AArch32: add a minimal secure payload (SP_MIN)

This patch adds a minimal AArch32 secure payload SP_MIN. It relies on PSCI
library to initialize the normal world context. It runs in Monitor mode
and uses the runtime service framework to handle SMCs. It is added as
a BL32 component in the Trusted Firmware source tree.

Change-Id: Icc04fa6b242025a769c1f6c7022fde19459c43e9
diff --git a/bl32/sp_min/aarch32/entrypoint.S b/bl32/sp_min/aarch32/entrypoint.S
new file mode 100644
index 0000000..33d35b9
--- /dev/null
+++ b/bl32/sp_min/aarch32/entrypoint.S
@@ -0,0 +1,331 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <context.h>
+#include <runtime_svc.h>
+#include <smcc_helpers.h>
+#include <smcc_macros.S>
+#include <xlat_tables.h>
+
+	.globl	sp_min_vector_table
+	.globl	sp_min_entrypoint
+	.globl	sp_min_warm_entrypoint
+
+func sp_min_vector_table
+	b	sp_min_entrypoint
+	b	plat_panic_handler	/* Undef */
+	b	handle_smc		/* Syscall */
+	b	plat_panic_handler	/* Prefetch abort */
+	b	plat_panic_handler	/* Data abort */
+	b	plat_panic_handler	/* Reserved */
+	b	plat_panic_handler	/* IRQ */
+	b	plat_panic_handler	/* FIQ */
+endfunc sp_min_vector_table
+
+func handle_smc
+	smcc_save_gp_mode_regs
+
+	/* r0 points to smc_context */
+	mov	r2, r0				/* handle */
+	ldcopr	r0, SCR
+
+	/* Save SCR in stack */
+	push	{r0}
+	and	r3, r0, #SCR_NS_BIT		/* flags */
+
+	/* Switch to Secure Mode*/
+	bic	r0, #SCR_NS_BIT
+	stcopr	r0, SCR
+	isb
+	ldr	r0, [r2, #SMC_CTX_GPREG_R0]	/* smc_fid */
+	/* Check whether an SMC64 is issued */
+	tst	r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
+	beq	1f	/* SMC32 is detected */
+	mov	r0, #SMC_UNK
+	str	r0, [r2, #SMC_CTX_GPREG_R0]
+	mov	r0, r2
+	b	2f	/* Skip handling the SMC */
+1:
+	mov	r1, #0				/* cookie */
+	bl	handle_runtime_svc
+2:
+	/* r0 points to smc context */
+
+	/* Restore SCR from stack */
+	pop	{r1}
+	stcopr	r1, SCR
+	isb
+
+	b	sp_min_exit
+endfunc handle_smc
+
+/*
+ * The Cold boot/Reset entrypoint for SP_MIN
+ */
+func sp_min_entrypoint
+
+	/*
+	 * The caches and TLBs are disabled at reset. If any implementation
+	 * allows the caches/TLB to be hit while they are disabled, ensure
+	 * that they are invalidated here
+	 */
+
+	/* Make sure we are in Secure Mode*/
+	ldcopr	r0, SCR
+	bic	r0, #SCR_NS_BIT
+	stcopr	r0, SCR
+	isb
+
+	/* Switch to monitor mode */
+	cps	#MODE32_mon
+	isb
+
+	/*
+	 * Set sane values for NS SCTLR as well.
+	 * Switch to non secure mode for this.
+	 */
+	ldr	r0, =(SCTLR_RES1)
+	ldcopr	r1, SCR
+	orr	r2, r1, #SCR_NS_BIT
+	stcopr	r2, SCR
+	isb
+
+	ldcopr	r2, SCTLR
+	orr	r0, r0, r2
+	stcopr	r0, SCTLR
+	isb
+
+	stcopr	r1, SCR
+	isb
+
+	/*
+	 * Set the CPU endianness before doing anything that might involve
+	 * memory reads or writes.
+	 */
+	ldcopr	r0, SCTLR
+	bic	r0, r0, #SCTLR_EE_BIT
+	stcopr	r0, SCTLR
+	isb
+
+	/* Run the CPU Specific Reset handler */
+	bl	reset_handler
+
+	/*
+	 * Enable the instruction cache and data access
+	 * alignment checks
+	 */
+	ldcopr	r0, SCTLR
+	ldr	r1, =(SCTLR_RES1 | SCTLR_A_BIT | SCTLR_I_BIT)
+	orr	r0, r0, r1
+	stcopr	r0, SCTLR
+	isb
+
+	/* Set the vector tables */
+	ldr	r0, =sp_min_vector_table
+	stcopr	r0, VBAR
+	stcopr	r0, MVBAR
+	isb
+
+	/*
+	 * Enable the SIF bit to disable instruction fetches
+	 * from Non-secure memory.
+	 */
+	ldcopr	r0, SCR
+	orr	r0, r0, #SCR_SIF_BIT
+	stcopr	r0, SCR
+
+	/*
+	 * Enable the SError interrupt now that the exception vectors have been
+	 * setup.
+	 */
+	cpsie   a
+	isb
+
+	/* Enable access to Advanced SIMD registers */
+	ldcopr	r0, NSACR
+	bic	r0, r0, #NSASEDIS_BIT
+	orr	r0, r0, #(NASCR_CP10_BIT | NASCR_CP11_BIT)
+	stcopr	r0, NSACR
+	isb
+
+	/*
+	 * Enable access to Advanced SIMD, Floating point and to the Trace
+	 * functionality as well.
+	 */
+	ldcopr	r0, CPACR
+	bic	r0, r0, #ASEDIS_BIT
+	bic	r0, r0, #TRCDIS_BIT
+	orr	r0, r0, #CPACR_ENABLE_FP_ACCESS
+	stcopr	r0, CPACR
+	isb
+
+	vmrs	r0, FPEXC
+	orr	r0, r0, #FPEXC_EN_BIT
+	vmsr	FPEXC, r0
+
+	/* Detect whether Warm or Cold boot */
+	bl	plat_get_my_entrypoint
+	cmp	r0, #0
+	/* If warm boot detected, jump to warm boot entry */
+	bxne	r0
+
+	/* Setup C runtime stack */
+	bl	plat_set_my_stack
+
+	/* Perform platform specific memory initialization */
+	bl	platform_mem_init
+
+	/* Initialize the C Runtime Environment */
+
+	/*
+	 * Invalidate the RW memory used by SP_MIN image. This includes
+	 * the data and NOBITS sections. This is done to safeguard against
+	 * possible corruption of this memory by dirty cache lines in a system
+	 * cache as a result of use by an earlier boot loader stage.
+	 */
+	ldr	r0, =__RW_START__
+	ldr	r1, =__RW_END__
+	sub	r1, r1, r0
+	bl	inv_dcache_range
+
+	ldr	r0, =__BSS_START__
+	ldr	r1, =__BSS_SIZE__
+	bl	zeromem
+
+#if USE_COHERENT_MEM
+	ldr	r0, =__COHERENT_RAM_START__
+	ldr	r1, =__COHERENT_RAM_UNALIGNED_SIZE__
+	bl	zeromem
+#endif
+
+	/* Perform platform specific early arch. setup */
+	bl	sp_min_early_platform_setup
+	bl	sp_min_plat_arch_setup
+
+	/* Jump to the main function */
+	bl	sp_min_main
+
+	/* -------------------------------------------------------------
+	 * Clean the .data & .bss sections to main memory. This ensures
+	 * that any global data which was initialised by the primary CPU
+	 * is visible to secondary CPUs before they enable their data
+	 * caches and participate in coherency.
+	 * -------------------------------------------------------------
+	 */
+	ldr	r0, =__DATA_START__
+	ldr	r1, =__DATA_END__
+	sub	r1, r1, r0
+	bl	clean_dcache_range
+
+	ldr	r0, =__BSS_START__
+	ldr	r1, =__BSS_END__
+	sub	r1, r1, r0
+	bl	clean_dcache_range
+
+	/* Program the registers in cpu_context and exit monitor mode */
+	mov	r0, #NON_SECURE
+	bl	cm_get_context
+
+	/* Restore the SCR */
+	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
+	stcopr	r2, SCR
+	isb
+
+	/* Restore the SCTLR  */
+	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
+	stcopr	r2, SCTLR
+
+	bl	smc_get_next_ctx
+	/* The other cpu_context registers have been copied to smc context */
+	b	sp_min_exit
+endfunc sp_min_entrypoint
+
+/*
+ * The Warm boot entrypoint for SP_MIN.
+ */
+func sp_min_warm_entrypoint
+
+	/* Setup C runtime stack */
+	bl	plat_set_my_stack
+
+	/* --------------------------------------------
+	 * Enable the MMU with the DCache disabled. It
+	 * is safe to use stacks allocated in normal
+	 * memory as a result. All memory accesses are
+	 * marked nGnRnE when the MMU is disabled. So
+	 * all the stack writes will make it to memory.
+	 * All memory accesses are marked Non-cacheable
+	 * when the MMU is enabled but D$ is disabled.
+	 * So used stack memory is guaranteed to be
+	 * visible immediately after the MMU is enabled
+	 * Enabling the DCache at the same time as the
+	 * MMU can lead to speculatively fetched and
+	 * possibly stale stack memory being read from
+	 * other caches. This can lead to coherency
+	 * issues.
+	 * --------------------------------------------
+	 */
+	mov	r0, #DISABLE_DCACHE
+	bl	bl32_plat_enable_mmu
+
+	bl	sp_min_warm_boot
+
+	/* Program the registers in cpu_context and exit monitor mode */
+	mov	r0, #NON_SECURE
+	bl	cm_get_context
+
+	/* Restore the SCR */
+	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
+	stcopr	r2, SCR
+	isb
+
+	/* Restore the SCTLR  */
+	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
+	stcopr	r2, SCTLR
+
+	bl	smc_get_next_ctx
+
+	/* The other cpu_context registers have been copied to smc context */
+	b	sp_min_exit
+endfunc sp_min_warm_entrypoint
+
+/*
+ * The function to restore the registers from SMC context and return
+ * to the mode restored to SPSR.
+ *
+ * Arguments : r0 must point to the SMC context to restore from.
+ */
+func sp_min_exit
+	smcc_restore_gp_mode_regs
+	eret
+endfunc sp_min_exit
diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S
new file mode 100644
index 0000000..b158db1
--- /dev/null
+++ b/bl32/sp_min/sp_min.ld.S
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform_def.h>
+
+OUTPUT_FORMAT(elf32-littlearm)
+OUTPUT_ARCH(arm)
+ENTRY(sp_min_vector_table)
+
+MEMORY {
+    RAM (rwx): ORIGIN = BL32_BASE, LENGTH = BL32_LIMIT - BL32_BASE
+}
+
+
+SECTIONS
+{
+    . = BL32_BASE;
+   ASSERT(. == ALIGN(4096),
+          "BL32_BASE address is not aligned on a page boundary.")
+
+#if SEPARATE_CODE_AND_RODATA
+    .text . : {
+        __TEXT_START__ = .;
+        *entrypoint.o(.text*)
+        *(.text*)
+        . = NEXT(4096);
+        __TEXT_END__ = .;
+    } >RAM
+
+    .rodata . : {
+        __RODATA_START__ = .;
+        *(.rodata*)
+
+        /* Ensure 4-byte alignment for descriptors and ensure inclusion */
+        . = ALIGN(4);
+        __RT_SVC_DESCS_START__ = .;
+        KEEP(*(rt_svc_descs))
+        __RT_SVC_DESCS_END__ = .;
+
+        /*
+         * Ensure 4-byte alignment for cpu_ops so that its fields are also
+         * aligned. Also ensure cpu_ops inclusion.
+         */
+        . = ALIGN(4);
+        __CPU_OPS_START__ = .;
+        KEEP(*(cpu_ops))
+        __CPU_OPS_END__ = .;
+
+        . = NEXT(4096);
+        __RODATA_END__ = .;
+    } >RAM
+#else
+    ro . : {
+        __RO_START__ = .;
+        *entrypoint.o(.text*)
+        *(.text*)
+        *(.rodata*)
+
+        /* Ensure 4-byte alignment for descriptors and ensure inclusion */
+        . = ALIGN(4);
+        __RT_SVC_DESCS_START__ = .;
+        KEEP(*(rt_svc_descs))
+        __RT_SVC_DESCS_END__ = .;
+
+        /*
+         * Ensure 4-byte alignment for cpu_ops so that its fields are also
+         * aligned. Also ensure cpu_ops inclusion.
+         */
+        . = ALIGN(4);
+        __CPU_OPS_START__ = .;
+        KEEP(*(cpu_ops))
+        __CPU_OPS_END__ = .;
+
+        __RO_END_UNALIGNED__ = .;
+
+        /*
+         * Memory page(s) mapped to this section will be marked as
+         * read-only, executable.  No RW data from the next section must
+         * creep in.  Ensure the rest of the current memory block is unused.
+         */
+        . = NEXT(4096);
+        __RO_END__ = .;
+    } >RAM
+#endif
+
+    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
+           "cpu_ops not defined for this platform.")
+    /*
+     * Define a linker symbol to mark start of the RW memory area for this
+     * image.
+     */
+    __RW_START__ = . ;
+
+    .data . : {
+        __DATA_START__ = .;
+        *(.data*)
+        __DATA_END__ = .;
+    } >RAM
+
+    stacks (NOLOAD) : {
+        __STACKS_START__ = .;
+        *(tzfw_normal_stacks)
+        __STACKS_END__ = .;
+    } >RAM
+
+    /*
+     * The .bss section gets initialised to 0 at runtime.
+     * Its base address must be 16-byte aligned.
+     */
+    .bss (NOLOAD) : ALIGN(16) {
+        __BSS_START__ = .;
+        *(.bss*)
+        *(COMMON)
+#if !USE_COHERENT_MEM
+        /*
+         * Bakery locks are stored in normal .bss memory
+         *
+         * Each lock's data is spread across multiple cache lines, one per CPU,
+         * but multiple locks can share the same cache line.
+         * The compiler will allocate enough memory for one CPU's bakery locks,
+         * the remaining cache lines are allocated by the linker script
+         */
+        . = ALIGN(CACHE_WRITEBACK_GRANULE);
+        __BAKERY_LOCK_START__ = .;
+        *(bakery_lock)
+        . = ALIGN(CACHE_WRITEBACK_GRANULE);
+        __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(. - __BAKERY_LOCK_START__);
+        . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
+        __BAKERY_LOCK_END__ = .;
+#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
+    ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE,
+        "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
+#endif
+#endif
+
+#if ENABLE_PMF
+        /*
+         * Time-stamps are stored in normal .bss memory
+         *
+         * The compiler will allocate enough memory for one CPU's time-stamps,
+         * the remaining memory for other CPU's is allocated by the
+         * linker script
+         */
+        . = ALIGN(CACHE_WRITEBACK_GRANULE);
+        __PMF_TIMESTAMP_START__ = .;
+        KEEP(*(pmf_timestamp_array))
+        . = ALIGN(CACHE_WRITEBACK_GRANULE);
+        __PMF_PERCPU_TIMESTAMP_END__ = .;
+        __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__);
+        . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1));
+        __PMF_TIMESTAMP_END__ = .;
+#endif /* ENABLE_PMF */
+
+        __BSS_END__ = .;
+    } >RAM
+
+    /*
+     * The xlat_table section is for full, aligned page tables (4K).
+     * Removing them from .bss avoids forcing 4K alignment on
+     * the .bss section and eliminates the unecessary zero init
+     */
+    xlat_table (NOLOAD) : {
+        *(xlat_table)
+    } >RAM
+
+     __BSS_SIZE__ = SIZEOF(.bss);
+
+#if USE_COHERENT_MEM
+    /*
+     * The base address of the coherent memory section must be page-aligned (4K)
+     * to guarantee that the coherent data are stored on their own pages and
+     * are not mixed with normal data.  This is required to set up the correct
+     * memory attributes for the coherent data page tables.
+     */
+    coherent_ram (NOLOAD) : ALIGN(4096) {
+        __COHERENT_RAM_START__ = .;
+        /*
+         * Bakery locks are stored in coherent memory
+         *
+         * Each lock's data is contiguous and fully allocated by the compiler
+         */
+        *(bakery_lock)
+        *(tzfw_coherent_mem)
+        __COHERENT_RAM_END_UNALIGNED__ = .;
+        /*
+         * Memory page(s) mapped to this section will be marked
+         * as device memory.  No other unexpected data must creep in.
+         * Ensure the rest of the current memory page is unused.
+         */
+        . = NEXT(4096);
+        __COHERENT_RAM_END__ = .;
+    } >RAM
+
+    __COHERENT_RAM_UNALIGNED_SIZE__ =
+        __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
+#endif
+
+    /*
+     * Define a linker symbol to mark end of the RW memory area for this
+     * image.
+     */
+    __RW_END__ = .;
+
+   __BL32_END__ = .;
+}
diff --git a/bl32/sp_min/sp_min.mk b/bl32/sp_min/sp_min.mk
new file mode 100644
index 0000000..a8b572e
--- /dev/null
+++ b/bl32/sp_min/sp_min.mk
@@ -0,0 +1,63 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# Neither the name of ARM nor the names of its contributors may be used
+# to endorse or promote products derived from this software without specific
+# prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+
+ifneq (${ARCH}, aarch32)
+	$(error SP_MIN is only supported on AArch32 platforms)
+endif
+
+include lib/psci/psci_lib.mk
+
+INCLUDES		+=	-Iinclude/bl32/sp_min
+
+BL32_SOURCES		+=	bl32/sp_min/sp_min_main.c		\
+				bl32/sp_min/aarch32/entrypoint.S	\
+				common/runtime_svc.c			\
+				services/std_svc/std_svc_setup.c	\
+				${PSCI_LIB_SOURCES}
+
+ifeq (${ENABLE_PMF}, 1)
+BL32_SOURCES		+=	lib/pmf/pmf_main.c
+endif
+
+BL32_LINKERFILE	:=	bl32/sp_min/sp_min.ld.S
+
+# Include the platform-specific SP_MIN Makefile
+# If no platform-specific SP_MIN Makefile exists, it means SP_MIN is not supported
+# on this platform.
+SP_MIN_PLAT_MAKEFILE := $(wildcard ${PLAT_DIR}/sp_min/sp_min-${PLAT}.mk)
+ifeq (,${SP_MIN_PLAT_MAKEFILE})
+  $(error SP_MIN is not supported on platform ${PLAT})
+else
+  include ${SP_MIN_PLAT_MAKEFILE}
+endif
+
+RESET_TO_SP_MIN	:= 1
+$(eval $(call add_define,RESET_TO_SP_MIN))
+$(eval $(call assert_boolean,RESET_TO_SP_MIN))
diff --git a/bl32/sp_min/sp_min_main.c b/bl32/sp_min/sp_min_main.c
new file mode 100644
index 0000000..31cab3d
--- /dev/null
+++ b/bl32/sp_min/sp_min_main.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <platform_sp_min.h>
+#include <psci.h>
+#include <runtime_svc.h>
+#include <smcc_helpers.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <types.h>
+#include "sp_min_private.h"
+
+/* Pointers to per-core cpu contexts */
+static void *sp_min_cpu_ctx_ptr[PLATFORM_CORE_COUNT];
+
+/* SP_MIN only stores the non secure smc context */
+static smc_ctx_t sp_min_smc_context[PLATFORM_CORE_COUNT];
+
+/******************************************************************************
+ * Define the smcc helper library API's
+ *****************************************************************************/
+void *smc_get_ctx(int security_state)
+{
+	assert(security_state == NON_SECURE);
+	return &sp_min_smc_context[plat_my_core_pos()];
+}
+
+void smc_set_next_ctx(int security_state)
+{
+	assert(security_state == NON_SECURE);
+	/* SP_MIN stores only non secure smc context. Nothing to do here */
+}
+
+void *smc_get_next_ctx(void)
+{
+	return &sp_min_smc_context[plat_my_core_pos()];
+}
+
+/*******************************************************************************
+ * This function returns a pointer to the most recent 'cpu_context' structure
+ * for the calling CPU that was set as the context for the specified security
+ * state. NULL is returned if no such structure has been specified.
+ ******************************************************************************/
+void *cm_get_context(uint32_t security_state)
+{
+	assert(security_state == NON_SECURE);
+	return sp_min_cpu_ctx_ptr[plat_my_core_pos()];
+}
+
+/*******************************************************************************
+ * This function sets the pointer to the current 'cpu_context' structure for the
+ * specified security state for the calling CPU
+ ******************************************************************************/
+void cm_set_context(void *context, uint32_t security_state)
+{
+	assert(security_state == NON_SECURE);
+	sp_min_cpu_ctx_ptr[plat_my_core_pos()] = context;
+}
+
+/*******************************************************************************
+ * This function returns a pointer to the most recent 'cpu_context' structure
+ * for the CPU identified by `cpu_idx` that was set as the context for the
+ * specified security state. NULL is returned if no such structure has been
+ * specified.
+ ******************************************************************************/
+void *cm_get_context_by_index(unsigned int cpu_idx,
+				unsigned int security_state)
+{
+	assert(security_state == NON_SECURE);
+	return sp_min_cpu_ctx_ptr[cpu_idx];
+}
+
+/*******************************************************************************
+ * This function sets the pointer to the current 'cpu_context' structure for the
+ * specified security state for the CPU identified by CPU index.
+ ******************************************************************************/
+void cm_set_context_by_index(unsigned int cpu_idx, void *context,
+				unsigned int security_state)
+{
+	assert(security_state == NON_SECURE);
+	sp_min_cpu_ctx_ptr[cpu_idx] = context;
+}
+
+static void copy_cpu_ctx_to_smc_stx(const regs_t *cpu_reg_ctx,
+				smc_ctx_t *next_smc_ctx)
+{
+	next_smc_ctx->r0 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R0);
+	next_smc_ctx->lr_mon = read_ctx_reg(cpu_reg_ctx, CTX_LR);
+	next_smc_ctx->spsr_mon = read_ctx_reg(cpu_reg_ctx, CTX_SPSR);
+}
+
+/*******************************************************************************
+ * This function invokes the PSCI library interface to initialize the
+ * non secure cpu context and copies the relevant cpu context register values
+ * to smc context. These registers will get programmed during `smc_exit`.
+ ******************************************************************************/
+static void sp_min_prepare_next_image_entry(void)
+{
+	entry_point_info_t *next_image_info;
+
+	/* Program system registers to proceed to non-secure */
+	next_image_info = sp_min_plat_get_bl33_ep_info();
+	assert(next_image_info);
+	assert(NON_SECURE == GET_SECURITY_STATE(next_image_info->h.attr));
+
+	INFO("SP_MIN: Preparing exit to normal world\n");
+
+	psci_prepare_next_non_secure_ctx(next_image_info);
+	smc_set_next_ctx(NON_SECURE);
+
+	/* Copy r0, lr and spsr from cpu context to SMC context */
+	copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
+			smc_get_next_ctx());
+}
+
+/******************************************************************************
+ * The SP_MIN main function. Do the platform and PSCI Library setup. Also
+ * initialize the runtime service framework.
+ *****************************************************************************/
+void sp_min_main(void)
+{
+	/* Perform platform setup in TSP MIN */
+	sp_min_platform_setup();
+
+	/*
+	 * Initialize the PSCI library and perform the remaining generic
+	 * architectural setup from PSCI.
+	 */
+	psci_setup((uintptr_t)sp_min_warm_entrypoint);
+
+	/*
+	 * Initialize the runtime services e.g. psci
+	 * This is where the monitor mode will be initialized
+	 */
+	INFO("SP_MIN: Initializing runtime services\n");
+	runtime_svc_init();
+
+	/*
+	 * We are ready to enter the next EL. Prepare entry into the image
+	 * corresponding to the desired security state after the next ERET.
+	 */
+	sp_min_prepare_next_image_entry();
+}
+
+/******************************************************************************
+ * This function is invoked during warm boot. Invoke the PSCI library
+ * warm boot entry point which takes care of Architectural and platform setup/
+ * restore. Copy the relevant cpu_context register values to smc context which
+ * will get programmed during `smc_exit`.
+ *****************************************************************************/
+void sp_min_warm_boot(void)
+{
+	smc_ctx_t *next_smc_ctx;
+
+	psci_warmboot_entrypoint();
+
+	smc_set_next_ctx(NON_SECURE);
+
+	next_smc_ctx = smc_get_next_ctx();
+	memset(next_smc_ctx, 0, sizeof(smc_ctx_t));
+
+	copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
+			next_smc_ctx);
+}
diff --git a/bl32/sp_min/sp_min_private.h b/bl32/sp_min/sp_min_private.h
new file mode 100644
index 0000000..0042f40
--- /dev/null
+++ b/bl32/sp_min/sp_min_private.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SP_MIN_H__
+#define __SP_MIN_H__
+
+void sp_min_warm_entrypoint(void);
+void sp_min_main(void);
+void sp_min_warm_boot(void);
+
+#endif /* __SP_MIN_H__ */
diff --git a/include/bl32/sp_min/platform_sp_min.h b/include/bl32/sp_min/platform_sp_min.h
new file mode 100644
index 0000000..ae9dd58
--- /dev/null
+++ b/include/bl32/sp_min/platform_sp_min.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PLATFORM_SP_MIN_H__
+#define __PLATFORM_SP_MIN_H__
+
+/*******************************************************************************
+ * Mandatory SP_MIN functions
+ ******************************************************************************/
+void sp_min_early_platform_setup(void);
+void sp_min_plat_arch_setup(void);
+void sp_min_platform_setup(void);
+entry_point_info_t *sp_min_plat_get_bl33_ep_info(void);
+
+#endif /* __PLATFORM_SP_MIN_H__ */