Merge pull request #19 from johnstultz-work/bootloader-update

Update armtf branch AOSP uses to upstream armtf code
diff --git a/.checkpatch.conf b/.checkpatch.conf
new file mode 100644
index 0000000..0c84fcd
--- /dev/null
+++ b/.checkpatch.conf
@@ -0,0 +1,84 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# Neither the name of ARM nor the names of its contributors may be used
+# to endorse or promote products derived from this software without specific
+# prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+
+#
+# Configure how the Linux checkpatch script should be invoked in the context of
+# the Trusted Firmware source tree.
+#
+
+# This is not Linux so don't expect a Linux tree!
+--no-tree
+
+# This clarifes the lines indications in the report.
+#
+# E.g.:
+# Without this option, we have the following output:
+#      #333: FILE: drivers/arm/gic/arm_gic.c:160:
+# So we have 2 lines indications (333 and 160), which is confusing.
+# We only care about the position in the source file.
+#
+# With this option, it becomes:
+#      drivers/arm/gic/arm_gic.c:160:
+--showfile
+
+#
+# Ignore the following message types, as they don't necessarily make sense in
+# the context of the Trusted Firmware.
+#
+
+# COMPLEX_MACRO generates false positives.
+--ignore COMPLEX_MACRO
+
+# Commit messages might contain a Gerrit Change-Id.
+--ignore GERRIT_CHANGE_ID
+
+# Do not check the format of commit messages, as Github's merge commits do not
+# observe it.
+--ignore GIT_COMMIT_ID
+
+# FILE_PATH_CHANGES reports this kind of message:
+# "added, moved or deleted file(s), does MAINTAINERS need updating?"
+# We do not use this MAINTAINERS file process in TF.
+--ignore FILE_PATH_CHANGES
+
+# AVOID_EXTERNS reports this kind of messages:
+# "externs should be avoided in .c files"
+# We don't follow this convention in TF.
+--ignore AVOID_EXTERNS
+
+# NEW_TYPEDEFS reports this kind of messages:
+# "do not add new typedefs"
+# We allow adding new typedefs in TF.
+--ignore NEW_TYPEDEFS
+
+# VOLATILE reports this kind of messages:
+# "Use of volatile is usually wrong: see Documentation/volatile-considered-harmful.txt"
+# We allow the usage of the volatile keyword in TF.
+--ignore VOLATILE
diff --git a/.gitignore b/.gitignore
index d3567bc..4ece189 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,6 +2,7 @@
 cscope.*
 *.swp
 *.patch
+*~
 .project
 .cproject
 
@@ -10,6 +11,16 @@
 
 # Ignore build products from tools
 tools/**/*.o
-tools/fip_create/fip_create
+tools/fip_create/
+tools/fiptool/fiptool
+tools/fiptool/fiptool.exe
 tools/cert_create/src/*.o
+tools/cert_create/src/**/*.o
 tools/cert_create/cert_create
+tools/cert_create/cert_create.exe
+
+# GNU GLOBAL files
+GPATH
+GRTAGS
+GSYMS
+GTAGS
diff --git a/Makefile b/Makefile
index 6fb10b7..6b2efac 100644
--- a/Makefile
+++ b/Makefile
@@ -1,271 +1,120 @@
 #
-# Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
 #
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# Neither the name of ARM nor the names of its contributors may be used
-# to endorse or promote products derived from this software without specific
-# prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
 #
 
 #
 # Trusted Firmware Version
 #
-VERSION_MAJOR		:= 1
-VERSION_MINOR		:= 1
+VERSION_MAJOR			:= 1
+VERSION_MINOR			:= 4
 
-#
-# Default values for build configurations
-#
+# Default goal is build all images
+.DEFAULT_GOAL			:= all
 
-# Build verbosity
-V			:= 0
-# Debug build
-DEBUG			:= 0
-# Build architecture
-ARCH 			:= aarch64
-# Build platform
-DEFAULT_PLAT		:= fvp
-PLAT			:= ${DEFAULT_PLAT}
-# SPD choice
-SPD			:= none
-# Base commit to perform code check on
-BASE_COMMIT		:= origin/master
-# NS timer register save and restore
-NS_TIMER_SWITCH		:= 0
-# By default, Bl1 acts as the reset handler, not BL31
-RESET_TO_BL31		:= 0
-# Include FP registers in cpu context
-CTX_INCLUDE_FPREGS		:= 0
-# Determine the version of ARM GIC architecture to use for interrupt management
-# in EL3. The platform port can change this value if needed.
-ARM_GIC_ARCH		:=	2
-# Flag used to indicate if ASM_ASSERTION should be enabled for the build.
-# This defaults to being present in DEBUG builds only.
-ASM_ASSERTION		:=	${DEBUG}
-# Build option to choose whether Trusted firmware uses Coherent memory or not.
-USE_COHERENT_MEM	:=	1
-# Default FIP file name
-FIP_NAME		:= fip.bin
-# By default, use the -pedantic option in the gcc command line
-DISABLE_PEDANTIC	:= 0
-# Flags to generate the Chain of Trust
-GENERATE_COT		:= 0
-CREATE_KEYS		:= 1
-# Flags to build TF with Trusted Boot support
-TRUSTED_BOARD_BOOT	:= 0
-AUTH_MOD		:= none
+# Avoid any implicit propagation of command line variable definitions to
+# sub-Makefiles, like CFLAGS that we reserved for the firmware images'
+# usage. Other command line options like "-s" are still propagated as usual.
+MAKEOVERRIDES =
 
-# Checkpatch ignores
-CHECK_IGNORE		=	--ignore COMPLEX_MACRO
+MAKE_HELPERS_DIRECTORY := make_helpers/
+include ${MAKE_HELPERS_DIRECTORY}build_macros.mk
+include ${MAKE_HELPERS_DIRECTORY}build_env.mk
 
-CHECKPATCH_ARGS		=	--no-tree --no-signoff ${CHECK_IGNORE}
-CHECKCODE_ARGS		=	--no-patch --no-tree --no-signoff ${CHECK_IGNORE}
+################################################################################
+# Default values for build configurations, and their dependencies
+################################################################################
 
+ifdef ASM_ASSERTION
+        $(warning ASM_ASSERTION is removed, use ENABLE_ASSERTIONS instead.)
+endif
+
+include ${MAKE_HELPERS_DIRECTORY}defaults.mk
+
+# Assertions enabled for DEBUG builds by default
+ENABLE_ASSERTIONS		:= ${DEBUG}
+ENABLE_PMF			:= ${ENABLE_RUNTIME_INSTRUMENTATION}
+PLAT				:= ${DEFAULT_PLAT}
+
+################################################################################
+# Checkpatch script options
+################################################################################
+
+CHECKCODE_ARGS		:=	--no-patch
+# Do not check the coding style on imported library files or documentation files
+INC_LIB_DIRS_TO_CHECK	:=	$(sort $(filter-out			\
+					include/lib/libfdt		\
+					include/lib/stdlib,		\
+					$(wildcard include/lib/*)))
+INC_DIRS_TO_CHECK	:=	$(sort $(filter-out			\
+					include/lib,			\
+					$(wildcard include/*)))
+LIB_DIRS_TO_CHECK	:=	$(sort $(filter-out			\
+					lib/compiler-rt			\
+					lib/libfdt%			\
+					lib/stdlib,			\
+					$(wildcard lib/*)))
+ROOT_DIRS_TO_CHECK	:=	$(sort $(filter-out			\
+					lib				\
+					include				\
+					docs				\
+					%.md,				\
+					$(wildcard *)))
+CHECK_PATHS		:=	${ROOT_DIRS_TO_CHECK}			\
+				${INC_DIRS_TO_CHECK}			\
+				${INC_LIB_DIRS_TO_CHECK}		\
+				${LIB_DIRS_TO_CHECK}
+
+
+################################################################################
+# Process build options
+################################################################################
+
+# Verbose flag
 ifeq (${V},0)
-	Q=@
-	CHECKCODE_ARGS	+=	--no-summary --terse
+        Q:=@
+        CHECKCODE_ARGS	+=	--no-summary --terse
 else
-	Q=
+        Q:=
 endif
 export Q
 
+# Process Debug flag
+$(eval $(call add_define,DEBUG))
 ifneq (${DEBUG}, 0)
-	BUILD_TYPE	:=	debug
-	# Use LOG_LEVEL_INFO by default for debug builds
-	LOG_LEVEL	:=	40
+        BUILD_TYPE	:=	debug
+        TF_CFLAGS	+= 	-g
+        ASFLAGS		+= 	-g -Wa,--gdwarf-2
+        # Use LOG_LEVEL_INFO by default for debug builds
+        LOG_LEVEL	:=	40
 else
-	BUILD_TYPE	:=	release
-	# Use LOG_LEVEL_NOTICE by default for release builds
-	LOG_LEVEL	:=	20
+        BUILD_TYPE	:=	release
+        $(eval $(call add_define,NDEBUG))
+        # Use LOG_LEVEL_NOTICE by default for release builds
+        LOG_LEVEL	:=	20
 endif
 
 # Default build string (git branch and commit)
 ifeq (${BUILD_STRING},)
-	BUILD_STRING	:=	$(shell git log -n 1 --pretty=format:"%h")
+        BUILD_STRING	:=	$(shell git describe --always --dirty --tags 2> /dev/null)
 endif
-
 VERSION_STRING		:=	v${VERSION_MAJOR}.${VERSION_MINOR}(${BUILD_TYPE}):${BUILD_STRING}
 
-BL_COMMON_SOURCES	:=	common/bl_common.c			\
-				common/tf_printf.c			\
-				common/aarch64/debug.S			\
-				lib/aarch64/cache_helpers.S		\
-				lib/aarch64/misc_helpers.S		\
-				lib/aarch64/xlat_helpers.c		\
-				lib/stdlib/std.c			\
-				plat/common/aarch64/platform_helpers.S
-
-BUILD_BASE		:=	./build
-BUILD_PLAT		:=	${BUILD_BASE}/${PLAT}/${BUILD_TYPE}
-
-PLATFORMS		:=	$(shell ls -I common plat/)
-SPDS			:=	$(shell ls -I none services/spd)
-HELP_PLATFORMS		:=	$(shell echo ${PLATFORMS} | sed 's/ /|/g')
-
-# Convenience function for adding build definitions
-# $(eval $(call add_define,FOO)) will have:
-# -DFOO if $(FOO) is empty; -DFOO=$(FOO) otherwise
-define add_define
-DEFINES			+=	-D$(1)$(if $(value $(1)),=$(value $(1)),)
-endef
-
-# Convenience function for verifying option has a boolean value
-# $(eval $(call assert_boolean,FOO)) will assert FOO is 0 or 1
-define assert_boolean
-$(and $(patsubst 0,,$(value $(1))),$(patsubst 1,,$(value $(1))),$(error $(1) must be boolean))
-endef
-
-ifeq (${PLAT},)
-  $(error "Error: Unknown platform. Please use PLAT=<platform name> to specify the platform.")
-endif
-ifeq ($(findstring ${PLAT},${PLATFORMS}),)
-  $(error "Error: Invalid platform. The following platforms are available: ${PLATFORMS}")
+# The cert_create tool cannot generate certificates individually, so we use the
+# target 'certificates' to create them all
+ifneq (${GENERATE_COT},0)
+        FIP_DEPS += certificates
+        FWU_FIP_DEPS += fwu_certificates
 endif
 
-all: msg_start
 
-msg_start:
-	@echo "Building ${PLAT}"
+################################################################################
+# Toolchain
+################################################################################
 
-include plat/${PLAT}/platform.mk
-
-# Include the CPU specific operations makefile. By default all CPU errata
-# workarounds and CPU specifc optimisations are disabled. This can be
-# overridden by the platform.
-include lib/cpus/cpu-ops.mk
-
-ifdef BL1_SOURCES
-NEED_BL1 := yes
-include bl1/bl1.mk
-endif
-
-ifdef BL2_SOURCES
-NEED_BL2 := yes
-include bl2/bl2.mk
-# Using the ARM Trusted Firmware BL2 implies that a BL3-3 image also need to be supplied for the FIP.
-# This flag can be overridden by the platform.
-NEED_BL33 ?= yes
-endif
-
-ifdef BL31_SOURCES
-NEED_BL31 := yes
-include bl31/bl31.mk
-endif
-
-# Include SPD Makefile if one has been specified
-ifneq (${SPD},none)
-  # We expect to locate an spd.mk under the specified SPD directory
-  SPD_MAKE		:=	$(shell m="services/spd/${SPD}/${SPD}.mk"; [ -f "$$m" ] && echo "$$m")
-
-  ifeq (${SPD_MAKE},)
-    $(error Error: No services/spd/${SPD}/${SPD}.mk located)
-  endif
-  $(info Including ${SPD_MAKE})
-  include ${SPD_MAKE}
-
-  # If there's BL3-2 companion for the chosen SPD, and the SPD wants to build the
-  # BL3-2 from source, we expect that the SPD's Makefile would set NEED_BL32
-  # variable to "yes". In case the BL3-2 is a binary which needs to be included in
-  # fip, then the NEED_BL32 needs to be set and BL3-2 would need to point to the bin.
-endif
-
-.PHONY:			all msg_start clean realclean distclean cscope locate-checkpatch checkcodebase checkpatch fiptool fip certtool
-.SUFFIXES:
-
-INCLUDES		+=	-Iinclude/bl31			\
-				-Iinclude/bl31/services		\
-				-Iinclude/common		\
-				-Iinclude/drivers		\
-				-Iinclude/drivers/arm		\
-				-Iinclude/drivers/io		\
-				-Iinclude/lib			\
-				-Iinclude/lib/aarch64		\
-				-Iinclude/lib/cpus/aarch64	\
-				-Iinclude/plat/common		\
-				-Iinclude/stdlib		\
-				-Iinclude/stdlib/sys		\
-				${PLAT_INCLUDES}		\
-				${SPD_INCLUDES}
-
-# Process DEBUG flag
-$(eval $(call assert_boolean,DEBUG))
-$(eval $(call add_define,DEBUG))
-ifeq (${DEBUG},0)
-  $(eval $(call add_define,NDEBUG))
-else
-CFLAGS			+= 	-g
-ASFLAGS			+= 	-g -Wa,--gdwarf-2
-endif
-
-# Process NS_TIMER_SWITCH flag
-$(eval $(call assert_boolean,NS_TIMER_SWITCH))
-$(eval $(call add_define,NS_TIMER_SWITCH))
-
-# Process RESET_TO_BL31 flag
-$(eval $(call assert_boolean,RESET_TO_BL31))
-$(eval $(call add_define,RESET_TO_BL31))
-
-# Process CTX_INCLUDE_FPREGS flag
-$(eval $(call assert_boolean,CTX_INCLUDE_FPREGS))
-$(eval $(call add_define,CTX_INCLUDE_FPREGS))
-
-# Process ARM_GIC_ARCH flag
-$(eval $(call add_define,ARM_GIC_ARCH))
-
-# Process ASM_ASSERTION flag
-$(eval $(call assert_boolean,ASM_ASSERTION))
-$(eval $(call add_define,ASM_ASSERTION))
-
-# Process LOG_LEVEL flag
-$(eval $(call add_define,LOG_LEVEL))
-
-# Process USE_COHERENT_MEM flag
-$(eval $(call assert_boolean,USE_COHERENT_MEM))
-$(eval $(call add_define,USE_COHERENT_MEM))
-
-# Process Generate CoT flags
-$(eval $(call assert_boolean,GENERATE_COT))
-$(eval $(call assert_boolean,CREATE_KEYS))
-
-# Process TRUSTED_BOARD_BOOT flag
-$(eval $(call assert_boolean,TRUSTED_BOARD_BOOT))
-$(eval $(call add_define,TRUSTED_BOARD_BOOT))
-
-ASFLAGS			+= 	-nostdinc -ffreestanding -Wa,--fatal-warnings	\
-				-Werror -Wmissing-include-dirs			\
-				-mgeneral-regs-only -D__ASSEMBLY__		\
-				${DEFINES} ${INCLUDES}
-CFLAGS			+= 	-nostdinc -ffreestanding -Wall			\
-				-Werror -Wmissing-include-dirs			\
-				-mgeneral-regs-only -mstrict-align		\
-				-std=c99 -c -Os	${DEFINES} ${INCLUDES} -fno-pic
-CFLAGS			+=	-ffunction-sections -fdata-sections		\
-				-fno-delete-null-pointer-checks
-
-LDFLAGS			+=	--fatal-warnings -O1
-LDFLAGS			+=	--gc-sections
-
+HOSTCC			:=	gcc
+export HOSTCC
 
 CC			:=	${CROSS_COMPILE}gcc
 CPP			:=	${CROSS_COMPILE}cpp
@@ -275,348 +124,552 @@
 OC			:=	${CROSS_COMPILE}objcopy
 OD			:=	${CROSS_COMPILE}objdump
 NM			:=	${CROSS_COMPILE}nm
-PP			:=	${CROSS_COMPILE}gcc -E ${CFLAGS}
+PP			:=	${CROSS_COMPILE}gcc -E
 
-# Variables for use with Firmware Image Package
-FIPTOOLPATH		?=	tools/fip_create
-FIPTOOL			?=	${FIPTOOLPATH}/fip_create
-fiptool:		${FIPTOOL}
-fip:			${BUILD_PLAT}/${FIP_NAME}
+ifeq ($(notdir $(CC)),armclang)
+TF_CFLAGS_aarch32	=	-target arm-arm-none-eabi -march=armv8-a
+TF_CFLAGS_aarch64	=	-target aarch64-arm-none-eabi -march=armv8-a
+else ifneq ($(findstring clang,$(notdir $(CC))),)
+TF_CFLAGS_aarch32	=	-target armv8a-none-eabi
+TF_CFLAGS_aarch64	=	-target aarch64-elf
+else
+TF_CFLAGS_aarch32	=	-march=armv8-a
+TF_CFLAGS_aarch64	=	-march=armv8-a
+endif
+
+TF_CFLAGS_aarch64	+=	-mgeneral-regs-only -mstrict-align
+
+ASFLAGS_aarch32		=	-march=armv8-a
+ASFLAGS_aarch64		=	-march=armv8-a
+
+CPPFLAGS		=	${DEFINES} ${INCLUDES} -nostdinc		\
+				-Wmissing-include-dirs -Werror
+ASFLAGS			+=	$(CPPFLAGS) $(ASFLAGS_$(ARCH))			\
+				-D__ASSEMBLY__ -ffreestanding 			\
+				-Wa,--fatal-warnings
+TF_CFLAGS		+=	$(CPPFLAGS) $(TF_CFLAGS_$(ARCH))		\
+				-ffreestanding -fno-builtin -Wall -std=gnu99	\
+				-Os -ffunction-sections -fdata-sections -fno-pic
+
+TF_LDFLAGS		+=	--fatal-warnings -O1
+TF_LDFLAGS		+=	--gc-sections
+TF_LDFLAGS		+=	$(TF_LDFLAGS_$(ARCH))
+
+################################################################################
+# Common sources and include directories
+################################################################################
+include lib/compiler-rt/compiler-rt.mk
+include lib/stdlib/stdlib.mk
+
+BL_COMMON_SOURCES	+=	common/bl_common.c			\
+				common/tf_log.c				\
+				common/tf_printf.c			\
+				common/tf_snprintf.c			\
+				common/${ARCH}/debug.S			\
+				lib/${ARCH}/cache_helpers.S		\
+				lib/${ARCH}/misc_helpers.S		\
+				plat/common/plat_log_common.c		\
+				plat/common/${ARCH}/plat_common.c	\
+				plat/common/${ARCH}/platform_helpers.S	\
+				${COMPILER_RT_SRCS}			\
+				${STDLIB_SRCS}
+
+INCLUDES		+=	-Iinclude/bl1				\
+				-Iinclude/bl31				\
+				-Iinclude/common			\
+				-Iinclude/common/${ARCH}		\
+				-Iinclude/drivers			\
+				-Iinclude/drivers/arm			\
+				-Iinclude/drivers/auth			\
+				-Iinclude/drivers/io			\
+				-Iinclude/drivers/ti/uart		\
+				-Iinclude/lib				\
+				-Iinclude/lib/${ARCH}			\
+				-Iinclude/lib/cpus			\
+				-Iinclude/lib/cpus/${ARCH}		\
+				-Iinclude/lib/el3_runtime		\
+				-Iinclude/lib/el3_runtime/${ARCH}	\
+				-Iinclude/lib/pmf			\
+				-Iinclude/lib/psci			\
+				-Iinclude/lib/xlat_tables		\
+				-Iinclude/plat/common			\
+				-Iinclude/services			\
+				${PLAT_INCLUDES}			\
+				${SPD_INCLUDES}				\
+				-Iinclude/tools_share
+
+
+################################################################################
+# Generic definitions
+################################################################################
+
+include ${MAKE_HELPERS_DIRECTORY}plat_helpers.mk
+
+BUILD_BASE		:=	./build
+BUILD_PLAT		:=	${BUILD_BASE}/${PLAT}/${BUILD_TYPE}
+
+SPDS			:=	$(sort $(filter-out none, $(patsubst services/spd/%,%,$(wildcard services/spd/*))))
+
+# Platforms providing their own TBB makefile may override this value
+INCLUDE_TBBR_MK		:=	1
+
+
+################################################################################
+# Include SPD Makefile if one has been specified
+################################################################################
+
+ifneq (${SPD},none)
+ifeq (${ARCH},aarch32)
+	$(error "Error: SPD is incompatible with AArch32.")
+endif
+ifdef EL3_PAYLOAD_BASE
+        $(warning "SPD and EL3_PAYLOAD_BASE are incompatible build options.")
+        $(warning "The SPD and its BL32 companion will be present but ignored.")
+endif
+        # We expect to locate an spd.mk under the specified SPD directory
+        SPD_MAKE	:=	$(wildcard services/spd/${SPD}/${SPD}.mk)
+
+        ifeq (${SPD_MAKE},)
+                $(error Error: No services/spd/${SPD}/${SPD}.mk located)
+        endif
+        $(info Including ${SPD_MAKE})
+        include ${SPD_MAKE}
+
+        # If there's BL32 companion for the chosen SPD, we expect that the SPD's
+        # Makefile would set NEED_BL32 to "yes". In this case, the build system
+        # supports two mutually exclusive options:
+        # * BL32 is built from source: then BL32_SOURCES must contain the list
+        #   of source files to build BL32
+        # * BL32 is a prebuilt binary: then BL32 must point to the image file
+        #   that will be included in the FIP
+        # If both BL32_SOURCES and BL32 are defined, the binary takes precedence
+        # over the sources.
+endif
+
+################################################################################
+# Include libraries' Makefile that are used in all BL
+################################################################################
+
+include lib/stack_protector/stack_protector.mk
+
+
+################################################################################
+# Include the platform specific Makefile after the SPD Makefile (the platform
+# makefile may use all previous definitions in this file)
+################################################################################
+
+include ${PLAT_MAKEFILE_FULL}
+
+# Platform compatibility is not supported in AArch32
+ifneq (${ARCH},aarch32)
+# If the platform has not defined ENABLE_PLAT_COMPAT, then enable it by default
+ifndef ENABLE_PLAT_COMPAT
+ENABLE_PLAT_COMPAT := 1
+endif
+
+# Include the platform compatibility helpers for PSCI
+ifneq (${ENABLE_PLAT_COMPAT}, 0)
+include plat/compat/plat_compat.mk
+endif
+endif
+
+# Include the CPU specific operations makefile, which provides default
+# values for all CPU errata workarounds and CPU specific optimisations.
+# This can be overridden by the platform.
+include lib/cpus/cpu-ops.mk
+
+ifeq (${ARCH},aarch32)
+NEED_BL32 := yes
+
+################################################################################
+# Build `AARCH32_SP` as BL32 image for AArch32
+################################################################################
+ifneq (${AARCH32_SP},none)
+# We expect to locate an sp.mk under the specified AARCH32_SP directory
+AARCH32_SP_MAKE	:=	$(wildcard bl32/${AARCH32_SP}/${AARCH32_SP}.mk)
+
+ifeq (${AARCH32_SP_MAKE},)
+  $(error Error: No bl32/${AARCH32_SP}/${AARCH32_SP}.mk located)
+endif
+
+$(info Including ${AARCH32_SP_MAKE})
+include ${AARCH32_SP_MAKE}
+endif
+
+endif
+
+################################################################################
+# Check incompatible options
+################################################################################
+
+ifdef EL3_PAYLOAD_BASE
+        ifdef PRELOADED_BL33_BASE
+                $(warning "PRELOADED_BL33_BASE and EL3_PAYLOAD_BASE are \
+                incompatible build options. EL3_PAYLOAD_BASE has priority.")
+        endif
+        ifneq (${GENERATE_COT},0)
+                $(error "GENERATE_COT and EL3_PAYLOAD_BASE are incompatible build options.")
+        endif
+        ifneq (${TRUSTED_BOARD_BOOT},0)
+                $(error "TRUSTED_BOARD_BOOT and EL3_PAYLOAD_BASE are incompatible build options.")
+        endif
+endif
+
+ifeq (${NEED_BL33},yes)
+        ifdef EL3_PAYLOAD_BASE
+                $(warning "BL33 image is not needed when option \
+                BL33_PAYLOAD_BASE is used and won't be added to the FIP file.")
+        endif
+        ifdef PRELOADED_BL33_BASE
+                $(warning "BL33 image is not needed when option \
+                PRELOADED_BL33_BASE is used and won't be added to the FIP \
+                file.")
+        endif
+endif
+
+# For AArch32, LOAD_IMAGE_V2 must be enabled.
+ifeq (${ARCH},aarch32)
+    ifeq (${LOAD_IMAGE_V2}, 0)
+        $(error "For AArch32, LOAD_IMAGE_V2 must be enabled.")
+    endif
+endif
+
+# When building for systems with hardware-assisted coherency, there's no need to
+# use USE_COHERENT_MEM. Require that USE_COHERENT_MEM must be set to 0 too.
+ifeq ($(HW_ASSISTED_COHERENCY)-$(USE_COHERENT_MEM),1-1)
+$(error USE_COHERENT_MEM cannot be enabled with HW_ASSISTED_COHERENCY)
+endif
+
+################################################################################
+# Process platform overrideable behaviour
+################################################################################
+
+# Using the ARM Trusted Firmware BL2 implies that a BL33 image also needs to be
+# supplied for the FIP and Certificate generation tools. This flag can be
+# overridden by the platform.
+ifdef BL2_SOURCES
+        ifdef EL3_PAYLOAD_BASE
+                # If booting an EL3 payload there is no need for a BL33 image
+                # in the FIP file.
+                NEED_BL33		:=	no
+        else
+                ifdef PRELOADED_BL33_BASE
+                        # If booting a BL33 preloaded image there is no need of
+                        # another one in the FIP file.
+                        NEED_BL33		:=	no
+                else
+                        NEED_BL33		?=	yes
+                endif
+        endif
+endif
+
+# If SCP_BL2 is given, we always want FIP to include it.
+ifdef SCP_BL2
+        NEED_SCP_BL2		:=	yes
+endif
+
+# Process TBB related flags
+ifneq (${GENERATE_COT},0)
+        # Common cert_create options
+        ifneq (${CREATE_KEYS},0)
+                $(eval CRT_ARGS += -n)
+                $(eval FWU_CRT_ARGS += -n)
+                ifneq (${SAVE_KEYS},0)
+                        $(eval CRT_ARGS += -k)
+                        $(eval FWU_CRT_ARGS += -k)
+                endif
+        endif
+        # Include TBBR makefile (unless the platform indicates otherwise)
+        ifeq (${INCLUDE_TBBR_MK},1)
+                include make_helpers/tbbr/tbbr_tools.mk
+        endif
+endif
+
+ifneq (${FIP_ALIGN},0)
+FIP_ARGS += --align ${FIP_ALIGN}
+endif
+
+################################################################################
+# Auxiliary tools (fiptool, cert_create, etc)
+################################################################################
 
 # Variables for use with Certificate Generation Tool
 CRTTOOLPATH		?=	tools/cert_create
-CRTTOOL			?=	${CRTTOOLPATH}/cert_create
-certtool:		${CRTTOOL}
+CRTTOOL			?=	${CRTTOOLPATH}/cert_create${BIN_EXT}
 
-# CoT generation tool default parameters
-TRUSTED_KEY_CERT	:=	${BUILD_PLAT}/trusted_key.crt
+# Variables for use with Firmware Image Package
+FIPTOOLPATH		?=	tools/fiptool
+FIPTOOL			?=	${FIPTOOLPATH}/fiptool${BIN_EXT}
 
-# Pass the private keys to the CoT generation tool in the command line
-# If CREATE_KEYS is set, the '-n' option will be added, indicating the tool to create new keys
-ifneq (${GENERATE_COT},0)
-    $(eval CERTS := yes)
-
-    $(eval FIP_DEPS += certificates)
-    $(eval FIP_ARGS += --trusted-key-cert ${TRUSTED_KEY_CERT})
-
-    ifneq (${CREATE_KEYS},0)
-        $(eval CRT_ARGS += -n)
-    endif
-    $(eval CRT_ARGS += $(if ${ROT_KEY}, --rot-key ${ROT_KEY}))
-    $(eval CRT_ARGS += $(if ${TRUSTED_WORLD_KEY}, --trusted-world-key ${TRUSTED_WORLD_KEY}))
-    $(eval CRT_ARGS += $(if ${NON_TRUSTED_WORLD_KEY}, --non-trusted-world-key ${NON_TRUSTED_WORLD_KEY}))
-    $(eval CRT_ARGS += --trusted-key-cert ${TRUSTED_KEY_CERT})
+################################################################################
+# Include BL specific makefiles
+################################################################################
+ifdef BL1_SOURCES
+NEED_BL1 := yes
+include bl1/bl1.mk
 endif
 
-# Check Trusted Board Boot options
-ifneq (${TRUSTED_BOARD_BOOT},0)
-    ifeq (${AUTH_MOD},none)
-        $(error Error: When TRUSTED_BOARD_BOOT=1, AUTH_MOD has to be the name of a valid authentication module)
-    else
-        # We expect to locate an *.mk file under the specified AUTH_MOD directory
-        AUTH_MAKE := $(shell m="common/auth/${AUTH_MOD}/${AUTH_MOD}.mk"; [ -f "$$m" ] && echo "$$m")
-        ifeq (${AUTH_MAKE},)
-            $(error Error: No common/auth/${AUTH_MOD}/${AUTH_MOD}.mk located)
-        endif
-        $(info Including ${AUTH_MAKE})
-        include ${AUTH_MAKE}
-    endif
-
-    BL_COMMON_SOURCES	+=	common/auth.c
+ifdef BL2_SOURCES
+NEED_BL2 := yes
+include bl2/bl2.mk
 endif
 
-# Check if -pedantic option should be used
-ifeq (${DISABLE_PEDANTIC},0)
-    CFLAGS		+= 	-pedantic
+ifdef BL2U_SOURCES
+NEED_BL2U := yes
+include bl2u/bl2u.mk
 endif
 
-locate-checkpatch:
-ifndef CHECKPATCH
-	$(error "Please set CHECKPATCH to point to the Linux checkpatch.pl file, eg: CHECKPATCH=../linux/script/checkpatch.pl")
+# For AArch32, BL31 is not currently supported.
+ifneq (${ARCH},aarch32)
+ifdef BL31_SOURCES
+# When booting an EL3 payload, there is no need to compile the BL31 image nor
+# put it in the FIP.
+ifndef EL3_PAYLOAD_BASE
+NEED_BL31 := yes
+include bl31/bl31.mk
+endif
+endif
+endif
+
+################################################################################
+# Build options checks
+################################################################################
+
+$(eval $(call assert_boolean,COLD_BOOT_SINGLE_CPU))
+$(eval $(call assert_boolean,CREATE_KEYS))
+$(eval $(call assert_boolean,CTX_INCLUDE_AARCH32_REGS))
+$(eval $(call assert_boolean,CTX_INCLUDE_FPREGS))
+$(eval $(call assert_boolean,DEBUG))
+$(eval $(call assert_boolean,DISABLE_PEDANTIC))
+$(eval $(call assert_boolean,ENABLE_ASSERTIONS))
+$(eval $(call assert_boolean,ENABLE_PLAT_COMPAT))
+$(eval $(call assert_boolean,ENABLE_PMF))
+$(eval $(call assert_boolean,ENABLE_PSCI_STAT))
+$(eval $(call assert_boolean,ENABLE_RUNTIME_INSTRUMENTATION))
+$(eval $(call assert_boolean,ENABLE_SPE_FOR_LOWER_ELS))
+$(eval $(call assert_boolean,ERROR_DEPRECATED))
+$(eval $(call assert_boolean,GENERATE_COT))
+$(eval $(call assert_boolean,GICV2_G0_FOR_EL3))
+$(eval $(call assert_boolean,HW_ASSISTED_COHERENCY))
+$(eval $(call assert_boolean,LOAD_IMAGE_V2))
+$(eval $(call assert_boolean,NS_TIMER_SWITCH))
+$(eval $(call assert_boolean,PL011_GENERIC_UART))
+$(eval $(call assert_boolean,PROGRAMMABLE_RESET_ADDRESS))
+$(eval $(call assert_boolean,PSCI_EXTENDED_STATE_ID))
+$(eval $(call assert_boolean,RESET_TO_BL31))
+$(eval $(call assert_boolean,SAVE_KEYS))
+$(eval $(call assert_boolean,SEPARATE_CODE_AND_RODATA))
+$(eval $(call assert_boolean,SPIN_ON_BL1_EXIT))
+$(eval $(call assert_boolean,TRUSTED_BOARD_BOOT))
+$(eval $(call assert_boolean,USE_COHERENT_MEM))
+$(eval $(call assert_boolean,USE_TBBR_DEFS))
+$(eval $(call assert_boolean,WARMBOOT_ENABLE_DCACHE_EARLY))
+
+$(eval $(call assert_numeric,ARM_ARCH_MAJOR))
+$(eval $(call assert_numeric,ARM_ARCH_MINOR))
+
+################################################################################
+# Add definitions to the cpp preprocessor based on the current build options.
+# This is done after including the platform specific makefile to allow the
+# platform to overwrite the default options
+################################################################################
+
+$(eval $(call add_define,ARM_ARCH_MAJOR))
+$(eval $(call add_define,ARM_ARCH_MINOR))
+$(eval $(call add_define,ARM_GIC_ARCH))
+$(eval $(call add_define,COLD_BOOT_SINGLE_CPU))
+$(eval $(call add_define,CTX_INCLUDE_AARCH32_REGS))
+$(eval $(call add_define,CTX_INCLUDE_FPREGS))
+$(eval $(call add_define,ENABLE_ASSERTIONS))
+$(eval $(call add_define,ENABLE_PLAT_COMPAT))
+$(eval $(call add_define,ENABLE_PMF))
+$(eval $(call add_define,ENABLE_PSCI_STAT))
+$(eval $(call add_define,ENABLE_RUNTIME_INSTRUMENTATION))
+$(eval $(call add_define,ENABLE_SPE_FOR_LOWER_ELS))
+$(eval $(call add_define,ERROR_DEPRECATED))
+$(eval $(call add_define,GICV2_G0_FOR_EL3))
+$(eval $(call add_define,HW_ASSISTED_COHERENCY))
+$(eval $(call add_define,LOAD_IMAGE_V2))
+$(eval $(call add_define,LOG_LEVEL))
+$(eval $(call add_define,NS_TIMER_SWITCH))
+$(eval $(call add_define,PL011_GENERIC_UART))
+$(eval $(call add_define,PLAT_${PLAT}))
+$(eval $(call add_define,PROGRAMMABLE_RESET_ADDRESS))
+$(eval $(call add_define,PSCI_EXTENDED_STATE_ID))
+$(eval $(call add_define,RESET_TO_BL31))
+$(eval $(call add_define,SEPARATE_CODE_AND_RODATA))
+$(eval $(call add_define,SPD_${SPD}))
+$(eval $(call add_define,SPIN_ON_BL1_EXIT))
+$(eval $(call add_define,TRUSTED_BOARD_BOOT))
+$(eval $(call add_define,USE_COHERENT_MEM))
+$(eval $(call add_define,USE_TBBR_DEFS))
+$(eval $(call add_define,WARMBOOT_ENABLE_DCACHE_EARLY))
+
+# Define the EL3_PAYLOAD_BASE flag only if it is provided.
+ifdef EL3_PAYLOAD_BASE
+        $(eval $(call add_define,EL3_PAYLOAD_BASE))
 else
-ifeq (,$(wildcard ${CHECKPATCH}))
-	$(error "The file CHECKPATCH points to cannot be found, use eg: CHECKPATCH=../linux/script/checkpatch.pl")
+        # Define the PRELOADED_BL33_BASE flag only if it is provided and
+        # EL3_PAYLOAD_BASE is not defined, as it has priority.
+        ifdef PRELOADED_BL33_BASE
+                $(eval $(call add_define,PRELOADED_BL33_BASE))
+        endif
 endif
+# Define the AARCH32/AARCH64 flag based on the ARCH flag
+ifeq (${ARCH},aarch32)
+        $(eval $(call add_define,AARCH32))
+else
+        $(eval $(call add_define,AARCH64))
 endif
 
-clean:
-			@echo "  CLEAN"
-			${Q}rm -rf ${BUILD_PLAT}
-			${Q}${MAKE} --no-print-directory -C ${FIPTOOLPATH} clean
-			${Q}${MAKE} PLAT=${PLAT} --no-print-directory -C ${CRTTOOLPATH} clean
+################################################################################
+# Build targets
+################################################################################
 
-realclean distclean:
-			@echo "  REALCLEAN"
-			${Q}rm -rf ${BUILD_BASE}
-			${Q}rm -f ${CURDIR}/cscope.*
-			${Q}${MAKE} --no-print-directory -C ${FIPTOOLPATH} clean
-			${Q}${MAKE} PLAT=${PLAT} --no-print-directory -C ${CRTTOOLPATH} clean
+.PHONY:	all msg_start clean realclean distclean cscope locate-checkpatch checkcodebase checkpatch fiptool fip fwu_fip certtool
+.SUFFIXES:
 
-checkcodebase:		locate-checkpatch
-			@echo "  CHECKING STYLE"
-			@if test -d .git ; then	\
-				git ls-files | grep -v stdlib | while read GIT_FILE ; do ${CHECKPATCH} ${CHECKCODE_ARGS} -f $$GIT_FILE ; done ;	\
-			 else			\
-				 find . -type f -not -iwholename "*.git*" -not -iwholename "*build*" -not -iwholename "*stdlib*" -exec ${CHECKPATCH} ${CHECKCODE_ARGS} -f {} \; ;	\
-			 fi
+all: msg_start
 
-checkpatch:		locate-checkpatch
-			@echo "  CHECKING STYLE"
-			@git format-patch --stdout ${BASE_COMMIT} | ${CHECKPATCH} ${CHECKPATCH_ARGS} - || true
+msg_start:
+	@echo "Building ${PLAT}"
 
-.PHONY: ${CRTTOOL}
-${CRTTOOL}:
-			${Q}${MAKE} PLAT=${PLAT} --no-print-directory -C ${CRTTOOLPATH}
-			@echo
-			@echo "Built $@ successfully"
-			@echo
-
-.PHONY: ${FIPTOOL}
-${FIPTOOL}:
-			${Q}${MAKE} --no-print-directory -C ${FIPTOOLPATH}
-
-define match_goals
-$(strip $(foreach goal,$(1),$(filter $(goal),$(MAKECMDGOALS))))
-endef
-
-# List of rules that involve building things
-BUILD_TARGETS := all bl1 bl2 bl31 bl32 fip
-
-# Does the list of goals specified on the command line include a build target?
-ifneq ($(call match_goals,${BUILD_TARGETS}),)
-IS_ANYTHING_TO_BUILD := 1
+# Check if deprecated declarations should be treated as error or not.
+ifeq (${ERROR_DEPRECATED},0)
+    TF_CFLAGS		+= 	-Wno-error=deprecated-declarations
 endif
 
-define MAKE_C
-
-$(eval OBJ := $(1)/$(patsubst %.c,%.o,$(notdir $(2))))
-$(eval PREREQUISITES := $(patsubst %.o,%.d,$(OBJ)))
-
-$(OBJ) : $(2)
-	@echo "  CC      $$<"
-	$$(Q)$$(CC) $$(CFLAGS) -DIMAGE_BL$(3) -c $$< -o $$@
-
-
-$(PREREQUISITES) : $(2)
-	@echo "  DEPS    $$@"
-	@mkdir -p $(1)
-	$$(Q)$$(CC) $$(CFLAGS) -M -MT $(OBJ) -MF $$@ $$<
-
-ifdef IS_ANYTHING_TO_BUILD
--include $(PREREQUISITES)
-endif
-
-endef
-
-
-define MAKE_S
-
-$(eval OBJ := $(1)/$(patsubst %.S,%.o,$(notdir $(2))))
-$(eval PREREQUISITES := $(patsubst %.o,%.d,$(OBJ)))
-
-$(OBJ) : $(2)
-	@echo "  AS      $$<"
-	$$(Q)$$(AS) $$(ASFLAGS) -DIMAGE_BL$(3) -c $$< -o $$@
-
-$(PREREQUISITES) : $(2)
-	@echo "  DEPS    $$@"
-	@mkdir -p $(1)
-	$$(Q)$$(AS) $$(ASFLAGS) -M -MT $(OBJ) -MF $$@ $$<
-
-ifdef IS_ANYTHING_TO_BUILD
--include $(PREREQUISITES)
-endif
-
-endef
-
-
-define MAKE_LD
-
-$(eval PREREQUISITES := $(1).d)
-
-$(1) : $(2)
-	@echo "  PP      $$<"
-	$$(Q)$$(AS) $$(ASFLAGS) -P -E -D__LINKER__ -o $$@ $$<
-
-$(PREREQUISITES) : $(2)
-	@echo "  DEPS    $$@"
-	@mkdir -p $$(dir $$@)
-	$$(Q)$$(AS) $$(ASFLAGS) -M -MT $(1) -MF $$@ $$<
-
-ifdef IS_ANYTHING_TO_BUILD
--include $(PREREQUISITES)
-endif
-
-endef
-
-
-define MAKE_OBJS
-	$(eval C_OBJS := $(filter %.c,$(2)))
-	$(eval REMAIN := $(filter-out %.c,$(2)))
-	$(eval $(foreach obj,$(C_OBJS),$(call MAKE_C,$(1),$(obj),$(3))))
-
-	$(eval S_OBJS := $(filter %.S,$(REMAIN)))
-	$(eval REMAIN := $(filter-out %.S,$(REMAIN)))
-	$(eval $(foreach obj,$(S_OBJS),$(call MAKE_S,$(1),$(obj),$(3))))
-
-	$(and $(REMAIN),$(error Unexpected source files present: $(REMAIN)))
-endef
-
-
-# NOTE: The line continuation '\' is required in the next define otherwise we
-# end up with a line-feed characer at the end of the last c filename.
-# Also bare this issue in mind if extending the list of supported filetypes.
-define SOURCES_TO_OBJS
-	$(notdir $(patsubst %.c,%.o,$(filter %.c,$(1)))) \
-	$(notdir $(patsubst %.S,%.o,$(filter %.S,$(1))))
-endef
-
-
-# MAKE_TOOL_ARGS macro defines the command line arguments for the FIP and CRT
-# tools at each BL stage. Arguments:
-#   $(1) = BL stage (2, 30, 31, 32, 33)
-#   $(2) = Binary file
-#   $(3) = In FIP (false if empty)
-#   $(4) = Create certificates (false if empty)
-#   $(5) = Create key certificate (false if empty)
-#   $(6) = Private key (optional)
-define MAKE_TOOL_ARGS
-
-$(eval FIP_DEPS += $(if $3,$(2),))
-$(eval FIP_ARGS += $(if $3,--bl$(1) $(2),))
-$(eval FIP_ARGS += $(if $4,--bl$(1)-cert $(BUILD_PLAT)/bl$(1).crt))
-$(eval FIP_ARGS += $(if $4,$(if $5,--bl$(1)-key-cert $(BUILD_PLAT)/bl$(1)_key.crt)))
-
-$(eval CRT_DEPS += $(if $4,$(2),))
-$(eval CRT_DEPS += $(if $4,$(if $6,$(6),)))
-$(eval CRT_ARGS += $(if $4,--bl$(1) $(2)))
-$(eval CRT_ARGS += $(if $4,$(if $6,--bl$(1)-key $(6))))
-$(eval CRT_ARGS += $(if $4,--bl$(1)-cert $(BUILD_PLAT)/bl$(1).crt))
-$(eval CRT_ARGS += $(if $4,$(if $5,--bl$(1)-key-cert $(BUILD_PLAT)/bl$(1)_key.crt)))
-
-endef
-
-
-# MAKE_BL macro defines the targets and options to build each BL image.
-# Arguments:
-#   $(1) = BL stage (2, 30, 31, 32, 33)
-#   $(2) = In FIP (false if empty)
-#   $(3) = Create certificates (false if empty)
-#   $(4) = Create key certificate (false if empty)
-#   $(5) = Private key (optional)
-define MAKE_BL
-	$(eval BUILD_DIR  := ${BUILD_PLAT}/bl$(1))
-	$(eval SOURCES    := $(BL$(1)_SOURCES) $(BL_COMMON_SOURCES) $(PLAT_BL_COMMON_SOURCES))
-	$(eval OBJS       := $(addprefix $(BUILD_DIR)/,$(call SOURCES_TO_OBJS,$(SOURCES))))
-	$(eval LINKERFILE := $(BUILD_DIR)/bl$(1).ld)
-	$(eval MAPFILE    := $(BUILD_DIR)/bl$(1).map)
-	$(eval ELF        := $(BUILD_DIR)/bl$(1).elf)
-	$(eval DUMP       := $(BUILD_DIR)/bl$(1).dump)
-	$(eval BIN        := $(BUILD_PLAT)/bl$(1).bin)
-
-	$(eval $(call MAKE_OBJS,$(BUILD_DIR),$(SOURCES),$(1)))
-	$(eval $(call MAKE_LD,$(LINKERFILE),$(BL$(1)_LINKERFILE)))
-
-$(BUILD_DIR) :
-	$$(Q)mkdir -p "$$@"
-
-$(ELF) : $(OBJS) $(LINKERFILE)
-	@echo "  LD      $$@"
-	@echo 'const char build_message[] = "Built : "__TIME__", "__DATE__; \
-	       const char version_string[] = "${VERSION_STRING}";' | \
-		$$(CC) $$(CFLAGS) -xc - -o $(BUILD_DIR)/build_message.o
-	$$(Q)$$(LD) -o $$@ $$(LDFLAGS) -Map=$(MAPFILE) --script $(LINKERFILE) \
-					$(BUILD_DIR)/build_message.o $(OBJS)
-
-$(DUMP) : $(ELF)
-	@echo "  OD      $$@"
-	$${Q}$${OD} -dx $$< > $$@
-
-$(BIN) : $(ELF)
-	@echo "  BIN     $$@"
-	$$(Q)$$(OC) -O binary $$< $$@
-	@echo
-	@echo "Built $$@ successfully"
-	@echo
-
-.PHONY : bl$(1)
-bl$(1) : $(BUILD_DIR) $(BIN) $(DUMP)
-
-all : bl$(1)
-
-$(eval $(call MAKE_TOOL_ARGS,$(1),$(BIN),$(2),$(3),$(4),$(5)))
-
-endef
-
-
+# Expand build macros for the different images
 ifeq (${NEED_BL1},yes)
 $(eval $(call MAKE_BL,1))
 endif
 
 ifeq (${NEED_BL2},yes)
-$(if ${BL2}, $(eval $(call MAKE_TOOL_ARGS,2,${BL2},in_fip,${CERTS})),\
-	$(eval $(call MAKE_BL,2,in_fip,${CERTS})))
+$(if ${BL2}, $(eval $(call MAKE_TOOL_ARGS,2,${BL2},tb-fw)),\
+	$(eval $(call MAKE_BL,2,tb-fw)))
+endif
+
+ifeq (${NEED_SCP_BL2},yes)
+$(eval $(call FIP_ADD_IMG,SCP_BL2,--scp-fw))
 endif
 
 ifeq (${NEED_BL31},yes)
 BL31_SOURCES += ${SPD_SOURCES}
-$(if ${BL31}, $(eval $(call MAKE_TOOL_ARGS,31,${BL31},in_fip,${CERTS},${CERTS},${BL31_KEY})),\
-	$(eval $(call MAKE_BL,31,in_fip,${CERTS},${CERTS},${BL31_KEY})))
+$(if ${BL31}, $(eval $(call MAKE_TOOL_ARGS,31,${BL31},soc-fw)),\
+	$(eval $(call MAKE_BL,31,soc-fw)))
 endif
 
+# If a BL32 image is needed but neither BL32 nor BL32_SOURCES is defined, the
+# build system will call FIP_ADD_IMG to print a warning message and abort the
+# process. Note that the dependency on BL32 applies to the FIP only.
 ifeq (${NEED_BL32},yes)
-$(if ${BL32}, $(eval $(call MAKE_TOOL_ARGS,32,${BL32},in_fip,${CERTS},${CERTS},${BL32_KEY})),\
-	$(eval $(call MAKE_BL,32,in_fip,${CERTS},${CERTS},${BL32_KEY})))
+$(if ${BL32}, $(eval $(call MAKE_TOOL_ARGS,32,${BL32},tos-fw)),\
+	$(if ${BL32_SOURCES}, $(eval $(call MAKE_BL,32,tos-fw)),\
+		$(eval $(call FIP_ADD_IMG,BL32,--tos-fw))))
 endif
 
-ifeq (${NEED_BL30},yes)
-$(if ${BL30}, $(eval $(call MAKE_TOOL_ARGS,30,${BL30},in_fip,${CERTS},${CERTS},${BL30_KEY})))
-
-# If BL3-0 is needed by the platform then 'BL30' variable must be defined.
-check_bl30:
-	$(if ${BL30},,$(error "To build a FIP for platform ${PLAT}, please set BL30 to point to the SCP firmware"))
-else
-
-# If BL3-0 is not needed by the platform but the user still specified the path
-# to a BL3-0 image then warn him that it will be ignored.
-check_bl30:
-	$(if ${BL30},$(warning "BL3-0 is not supported on platform ${PLAT}, it will just be ignored"),)
-endif
-
+# Add the BL33 image if required by the platform
 ifeq (${NEED_BL33},yes)
-$(if ${BL33}, $(eval $(call MAKE_TOOL_ARGS,33,${BL33},in_fip,${CERTS},${CERTS},${BL33_KEY})))
+$(eval $(call FIP_ADD_IMG,BL33,--nt-fw))
+endif
 
-# If BL3-3 is needed by the platform then 'BL33' variable must be defined.
-check_bl33:
-	$(if ${BL33},,$(error "To build a FIP, please set BL33 to point to the Normal World binary, eg: BL33=../uefi/FVP_AARCH64_EFI.fd"))
+ifeq (${NEED_BL2U},yes)
+BL2U_PATH	:= $(if ${BL2U},${BL2U},$(call IMG_BIN,2u))
+$(if ${BL2U}, ,$(eval $(call MAKE_BL,2u)))
+$(eval $(call FWU_FIP_ADD_PAYLOAD,${BL2U_PATH},--ap-fwu-cfg))
+endif
+
+locate-checkpatch:
+ifndef CHECKPATCH
+	$(error "Please set CHECKPATCH to point to the Linux checkpatch.pl file, eg: CHECKPATCH=../linux/scripts/checkpatch.pl")
 else
-
-# If BL3-3 is not needed by the platform but the user still specified the path
-# to a BL3-3 image then warn him that it will be ignored.
-check_bl33:
-	$(if ${BL33},$(warning "BL3-3 is not supported on platform ${PLAT}, it will just be ignored"),)
+ifeq (,$(wildcard ${CHECKPATCH}))
+	$(error "The file CHECKPATCH points to cannot be found, use eg: CHECKPATCH=../linux/scripts/checkpatch.pl")
+endif
 endif
 
-# Add the dependency on the certificates
+clean:
+	@echo "  CLEAN"
+	$(call SHELL_REMOVE_DIR,${BUILD_PLAT})
+	${Q}${MAKE} --no-print-directory -C ${FIPTOOLPATH} clean
+	${Q}${MAKE} PLAT=${PLAT} --no-print-directory -C ${CRTTOOLPATH} clean
+
+realclean distclean:
+	@echo "  REALCLEAN"
+	$(call SHELL_REMOVE_DIR,${BUILD_BASE})
+	$(call SHELL_DELETE_ALL, ${CURDIR}/cscope.*)
+	${Q}${MAKE} --no-print-directory -C ${FIPTOOLPATH} clean
+	${Q}${MAKE} PLAT=${PLAT} --no-print-directory -C ${CRTTOOLPATH} clean
+
+checkcodebase:		locate-checkpatch
+	@echo "  CHECKING STYLE"
+	@if test -d .git ; then						\
+		git ls-files | grep -E -v 'libfdt|stdlib|docs|\.md' |	\
+		while read GIT_FILE ;					\
+		do ${CHECKPATCH} ${CHECKCODE_ARGS} -f $$GIT_FILE ;	\
+		done ;							\
+	else								\
+		 find . -type f -not -iwholename "*.git*"		\
+		 -not -iwholename "*build*"				\
+		 -not -iwholename "*libfdt*"				\
+		 -not -iwholename "*stdlib*"				\
+		 -not -iwholename "*docs*"				\
+		 -not -iwholename "*.md"				\
+		 -exec ${CHECKPATCH} ${CHECKCODE_ARGS} -f {} \; ;	\
+	fi
+
+checkpatch:		locate-checkpatch
+	@echo "  CHECKING STYLE"
+	${Q}git format-patch --stdout ${BASE_COMMIT}..HEAD -- ${CHECK_PATHS} | ${CHECKPATCH} - || true
+
+certtool: ${CRTTOOL}
+
+.PHONY: ${CRTTOOL}
+${CRTTOOL}:
+	${Q}${MAKE} PLAT=${PLAT} USE_TBBR_DEFS=${USE_TBBR_DEFS} --no-print-directory -C ${CRTTOOLPATH}
+	@${ECHO_BLANK_LINE}
+	@echo "Built $@ successfully"
+	@${ECHO_BLANK_LINE}
+
 ifneq (${GENERATE_COT},0)
-    all: certificates
+certificates: ${CRT_DEPS} ${CRTTOOL}
+	${Q}${CRTTOOL} ${CRT_ARGS}
+	@${ECHO_BLANK_LINE}
+	@echo "Built $@ successfully"
+	@echo "Certificates can be found in ${BUILD_PLAT}"
+	@${ECHO_BLANK_LINE}
 endif
 
-certificates: ${CRT_DEPS} ${CRTTOOL} check_bl30 check_bl33
-			${Q}${CRTTOOL} ${CRT_ARGS}
-			@echo
-			@echo "Built $@ successfully"
-			@echo "Certificates can be found in ${BUILD_PLAT}"
-			@echo
+${BUILD_PLAT}/${FIP_NAME}: ${FIP_DEPS} ${FIPTOOL}
+	${Q}${FIPTOOL} create ${FIP_ARGS} $@
+	${Q}${FIPTOOL} info $@
+	@${ECHO_BLANK_LINE}
+	@echo "Built $@ successfully"
+	@${ECHO_BLANK_LINE}
 
-${BUILD_PLAT}/${FIP_NAME}: ${FIP_DEPS} ${FIPTOOL} check_bl30 check_bl33
-			${Q}${FIPTOOL} --dump \
-				${FIP_ARGS} \
-				$@
-			@echo
-			@echo "Built $@ successfully"
-			@echo
+ifneq (${GENERATE_COT},0)
+fwu_certificates: ${FWU_CRT_DEPS} ${CRTTOOL}
+	${Q}${CRTTOOL} ${FWU_CRT_ARGS}
+	@${ECHO_BLANK_LINE}
+	@echo "Built $@ successfully"
+	@echo "FWU certificates can be found in ${BUILD_PLAT}"
+	@${ECHO_BLANK_LINE}
+endif
 
+${BUILD_PLAT}/${FWU_FIP_NAME}: ${FWU_FIP_DEPS} ${FIPTOOL}
+	${Q}${FIPTOOL} create ${FWU_FIP_ARGS} $@
+	${Q}${FIPTOOL} info $@
+	@${ECHO_BLANK_LINE}
+	@echo "Built $@ successfully"
+	@${ECHO_BLANK_LINE}
+
+fiptool: ${FIPTOOL}
+fip: ${BUILD_PLAT}/${FIP_NAME}
+fwu_fip: ${BUILD_PLAT}/${FWU_FIP_NAME}
+
+.PHONY: ${FIPTOOL}
+${FIPTOOL}:
+	${Q}${MAKE} CPPFLAGS="-DVERSION='\"${VERSION_STRING}\"'" --no-print-directory -C ${FIPTOOLPATH}
 
 cscope:
 	@echo "  CSCOPE"
@@ -624,16 +677,27 @@
 	${Q}cscope -b -q -k
 
 help:
-	@echo "usage: ${MAKE} PLAT=<${HELP_PLATFORMS}> <all|bl1|bl2|bl31|distclean|clean|checkcodebase|checkpatch>"
+	@echo "usage: ${MAKE} PLAT=<${PLATFORM_LIST}> [OPTIONS] [TARGET]"
 	@echo ""
 	@echo "PLAT is used to specify which platform you wish to build."
 	@echo "If no platform is specified, PLAT defaults to: ${DEFAULT_PLAT}"
 	@echo ""
+	@echo "Please refer to the User Guide for a list of all supported options."
+	@echo "Note that the build system doesn't track dependencies for build "
+	@echo "options. Therefore, if any of the build options are changed "
+	@echo "from a previous build, a clean build must be performed."
+	@echo ""
 	@echo "Supported Targets:"
-	@echo "  all            Build the BL1, BL2 and BL31 binaries"
+	@echo "  all            Build all individual bootloader binaries"
 	@echo "  bl1            Build the BL1 binary"
 	@echo "  bl2            Build the BL2 binary"
+	@echo "  bl2u           Build the BL2U binary"
 	@echo "  bl31           Build the BL31 binary"
+	@echo "  bl32           Build the BL32 binary. If ARCH=aarch32, then "
+	@echo "                 this builds secure payload specified by AARCH32_SP"
+	@echo "  certificates   Build the certificates (requires 'GENERATE_COT=1')"
+	@echo "  fip            Build the Firmware Image Package (FIP)"
+	@echo "  fwu_fip        Build the FWU Firmware Image Package (FIP)"
 	@echo "  checkcodebase  Check the coding style of the entire source tree"
 	@echo "  checkpatch     Check the coding style on changes in the current"
 	@echo "                 branch against BASE_COMMIT (default origin/master)"
@@ -641,9 +705,9 @@
 	@echo "  cscope         Generate cscope index"
 	@echo "  distclean      Remove all build artifacts for all platforms"
 	@echo "  certtool       Build the Certificate generation tool"
-	@echo "  fiptool        Build the Firmware Image Package(FIP) creation tool"
+	@echo "  fiptool        Build the Firmware Image Package (FIP) creation tool"
 	@echo ""
-	@echo "note: most build targets require PLAT to be set to a specific platform."
+	@echo "Note: most build targets require PLAT to be set to a specific platform."
 	@echo ""
 	@echo "example: build all targets for the FVP platform:"
 	@echo "  CROSS_COMPILE=aarch64-none-elf- make PLAT=fvp all"
diff --git a/acknowledgements.md b/acknowledgements.md
deleted file mode 100644
index a428f2f..0000000
--- a/acknowledgements.md
+++ /dev/null
@@ -1,9 +0,0 @@
-Contributor Acknowledgements
-============================
-
-Companies
----------
-Linaro Limited
-
-Individuals
------------
diff --git a/acknowledgements.rst b/acknowledgements.rst
new file mode 100644
index 0000000..59f569e
--- /dev/null
+++ b/acknowledgements.rst
@@ -0,0 +1,16 @@
+Contributor Acknowledgements
+============================
+
+Companies
+---------
+
+Linaro Limited
+
+NVIDIA Corporation
+
+Socionext Inc.
+
+Xilinx, Inc.
+
+Individuals
+-----------
diff --git a/bl1/aarch32/bl1_arch_setup.c b/bl1/aarch32/bl1_arch_setup.c
new file mode 100644
index 0000000..ce04aaa
--- /dev/null
+++ b/bl1/aarch32/bl1_arch_setup.c
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "../bl1_private.h"
+
+/*******************************************************************************
+ * TODO: Function that does the first bit of architectural setup.
+ ******************************************************************************/
+void bl1_arch_setup(void)
+{
+
+}
diff --git a/bl1/aarch32/bl1_context_mgmt.c b/bl1/aarch32/bl1_context_mgmt.c
new file mode 100644
index 0000000..6623dfc
--- /dev/null
+++ b/bl1/aarch32/bl1_context_mgmt.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <platform.h>
+#include <smcc_helpers.h>
+#include "../bl1_private.h"
+
+/*
+ * Following arrays will be used for context management.
+ * There are 2 instances, for the Secure and Non-Secure contexts.
+ */
+static cpu_context_t bl1_cpu_context[2];
+static smc_ctx_t bl1_smc_context[2];
+
+/* Following contains the next cpu context pointer. */
+static void *bl1_next_cpu_context_ptr;
+
+/* Following contains the next smc context pointer. */
+static void *bl1_next_smc_context_ptr;
+
+/* Following functions are used for SMC context handling */
+void *smc_get_ctx(unsigned int security_state)
+{
+	assert(sec_state_is_valid(security_state));
+	return &bl1_smc_context[security_state];
+}
+
+void smc_set_next_ctx(unsigned int security_state)
+{
+	assert(sec_state_is_valid(security_state));
+	bl1_next_smc_context_ptr = &bl1_smc_context[security_state];
+}
+
+void *smc_get_next_ctx(void)
+{
+	return bl1_next_smc_context_ptr;
+}
+
+/* Following functions are used for CPU context handling */
+void *cm_get_context(uint32_t security_state)
+{
+	assert(sec_state_is_valid(security_state));
+	return &bl1_cpu_context[security_state];
+}
+
+void cm_set_next_context(void *cpu_context)
+{
+	assert(cpu_context);
+	bl1_next_cpu_context_ptr = cpu_context;
+}
+
+void *cm_get_next_context(void)
+{
+	return bl1_next_cpu_context_ptr;
+}
+
+/*******************************************************************************
+ * Following function copies GP regs r0-r4, lr and spsr,
+ * from the CPU context to the SMC context structures.
+ ******************************************************************************/
+static void copy_cpu_ctx_to_smc_ctx(const regs_t *cpu_reg_ctx,
+		smc_ctx_t *next_smc_ctx)
+{
+	next_smc_ctx->r0 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R0);
+	next_smc_ctx->r1 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R1);
+	next_smc_ctx->r2 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R2);
+	next_smc_ctx->r3 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R3);
+	next_smc_ctx->lr_mon = read_ctx_reg(cpu_reg_ctx, CTX_LR);
+	next_smc_ctx->spsr_mon = read_ctx_reg(cpu_reg_ctx, CTX_SPSR);
+	next_smc_ctx->scr = read_ctx_reg(cpu_reg_ctx, CTX_SCR);
+}
+
+/*******************************************************************************
+ * Following function flushes the SMC & CPU context pointer and its data.
+ ******************************************************************************/
+static void flush_smc_and_cpu_ctx(void)
+{
+	flush_dcache_range((uintptr_t)&bl1_next_smc_context_ptr,
+		sizeof(bl1_next_smc_context_ptr));
+	flush_dcache_range((uintptr_t)bl1_next_smc_context_ptr,
+		sizeof(smc_ctx_t));
+
+	flush_dcache_range((uintptr_t)&bl1_next_cpu_context_ptr,
+		sizeof(bl1_next_cpu_context_ptr));
+	flush_dcache_range((uintptr_t)bl1_next_cpu_context_ptr,
+		sizeof(cpu_context_t));
+}
+
+/*******************************************************************************
+ * This function prepares the context for Secure/Normal world images.
+ * Normal world images are transitioned to HYP(if supported) else SVC.
+ ******************************************************************************/
+void bl1_prepare_next_image(unsigned int image_id)
+{
+	unsigned int security_state;
+	image_desc_t *image_desc;
+	entry_point_info_t *next_bl_ep;
+
+	/* Get the image descriptor. */
+	image_desc = bl1_plat_get_image_desc(image_id);
+	assert(image_desc);
+
+	/* Get the entry point info. */
+	next_bl_ep = &image_desc->ep_info;
+
+	/* Get the image security state. */
+	security_state = GET_SECURITY_STATE(next_bl_ep->h.attr);
+
+	/* Prepare the SPSR for the next BL image. */
+	if (security_state == SECURE) {
+		next_bl_ep->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
+			SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
+	} else {
+		/* Use HYP mode if supported else use SVC. */
+		if (GET_VIRT_EXT(read_id_pfr1())) {
+			next_bl_ep->spsr = SPSR_MODE32(MODE32_hyp, SPSR_T_ARM,
+				SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
+		} else {
+			next_bl_ep->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
+				SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
+		}
+	}
+
+	/* Allow platform to make change */
+	bl1_plat_set_ep_info(image_id, next_bl_ep);
+
+	/* Prepare the cpu context for the next BL image. */
+	cm_init_my_context(next_bl_ep);
+	cm_prepare_el3_exit(security_state);
+	cm_set_next_context(cm_get_context(security_state));
+
+	/* Prepare the smc context for the next BL image. */
+	smc_set_next_ctx(security_state);
+	copy_cpu_ctx_to_smc_ctx(get_regs_ctx(cm_get_next_context()),
+		smc_get_next_ctx());
+
+	/*
+	 * If the next image is non-secure, then we need to program the banked
+	 * non secure sctlr. This is not required when the next image is secure
+	 * because in AArch32, we expect the secure world to have the same
+	 * SCTLR settings.
+	 */
+	if (security_state == NON_SECURE) {
+		cpu_context_t *ctx = cm_get_context(security_state);
+		u_register_t ns_sctlr;
+
+		/* Temporarily set the NS bit to access NS SCTLR */
+		write_scr(read_scr() | SCR_NS_BIT);
+		isb();
+
+		ns_sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR);
+		write_sctlr(ns_sctlr);
+		isb();
+
+		write_scr(read_scr() & ~SCR_NS_BIT);
+		isb();
+	}
+
+	/*
+	 * Flush the SMC & CPU context and the (next)pointers,
+	 * to access them after caches are disabled.
+	 */
+	flush_smc_and_cpu_ctx();
+
+	/* Indicate that image is in execution state. */
+	image_desc->state = IMAGE_STATE_EXECUTED;
+
+	print_entry_point_info(next_bl_ep);
+}
diff --git a/bl1/aarch32/bl1_entrypoint.S b/bl1/aarch32/bl1_entrypoint.S
new file mode 100644
index 0000000..7780626
--- /dev/null
+++ b/bl1/aarch32/bl1_entrypoint.S
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <context.h>
+#include <el3_common_macros.S>
+#include <smcc_helpers.h>
+#include <smcc_macros.S>
+
+	.globl	bl1_vector_table
+	.globl	bl1_entrypoint
+
+	/* -----------------------------------------------------
+	 * Setup the vector table to support SVC & MON mode.
+	 * -----------------------------------------------------
+	 */
+vector_base bl1_vector_table
+	b	bl1_entrypoint
+	b	report_exception	/* Undef */
+	b	bl1_aarch32_smc_handler	/* SMC call */
+	b	report_exception	/* Prefetch abort */
+	b	report_exception	/* Data abort */
+	b	report_exception	/* Reserved */
+	b	report_exception	/* IRQ */
+	b	report_exception	/* FIQ */
+
+	/* -----------------------------------------------------
+	 * bl1_entrypoint() is the entry point into the trusted
+	 * firmware code when a cpu is released from warm or
+	 * cold reset.
+	 * -----------------------------------------------------
+	 */
+
+func bl1_entrypoint
+/* ---------------------------------------------------------------------
+* If the reset address is programmable then bl1_entrypoint() is
+* executed only on the cold boot path. Therefore, we can skip the warm
+* boot mailbox mechanism.
+* ---------------------------------------------------------------------
+*/
+	el3_entrypoint_common					\
+		_init_sctlr=1					\
+		_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS	\
+		_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU	\
+		_init_memory=1					\
+		_init_c_runtime=1				\
+		_exception_vectors=bl1_vector_table
+
+	/* -----------------------------------------------------
+	 * Perform early platform setup & platform
+	 * specific early arch. setup e.g. mmu setup
+	 * -----------------------------------------------------
+	 */
+	bl	bl1_early_platform_setup
+	bl	bl1_plat_arch_setup
+
+	/* -----------------------------------------------------
+	 * Jump to main function.
+	 * -----------------------------------------------------
+	 */
+	bl	bl1_main
+
+	/* -----------------------------------------------------
+	 * Jump to next image.
+	 * -----------------------------------------------------
+	 */
+
+	/*
+	 * Get the smc_context for next BL image,
+	 * program the gp/system registers and save it in `r4`.
+	 */
+	bl	smc_get_next_ctx
+	mov	r4, r0
+
+	/* Only turn-off MMU if going to secure world */
+	ldr	r5, [r4, #SMC_CTX_SCR]
+	tst	r5, #SCR_NS_BIT
+	bne	skip_mmu_off
+
+	/*
+	 * MMU needs to be disabled because both BL1 and BL2/BL2U execute
+	 * in PL1, and therefore share the same address space.
+	 * BL2/BL2U will initialize the address space according to its
+	 * own requirement.
+	 */
+	bl	disable_mmu_icache_secure
+	stcopr	r0, TLBIALL
+	dsb	sy
+	isb
+
+skip_mmu_off:
+	/* Restore smc_context from `r4` and exit secure monitor mode. */
+	mov	r0, r4
+	monitor_exit
+endfunc bl1_entrypoint
diff --git a/bl1/aarch32/bl1_exceptions.S b/bl1/aarch32/bl1_exceptions.S
new file mode 100644
index 0000000..f73db40
--- /dev/null
+++ b/bl1/aarch32/bl1_exceptions.S
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl1.h>
+#include <bl_common.h>
+#include <context.h>
+#include <smcc_helpers.h>
+#include <smcc_macros.S>
+#include <xlat_tables.h>
+
+	.globl	bl1_aarch32_smc_handler
+
+
+func bl1_aarch32_smc_handler
+	/* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
+	str	lr, [sp, #SMC_CTX_LR_MON]
+
+	/* ------------------------------------------------
+	 * SMC in BL1 is handled assuming that the MMU is
+	 * turned off by BL2.
+	 * ------------------------------------------------
+	 */
+
+	/* ----------------------------------------------
+	 * Detect if this is a RUN_IMAGE or other SMC.
+	 * ----------------------------------------------
+	 */
+	mov	lr, #BL1_SMC_RUN_IMAGE
+	cmp	lr, r0
+	bne	smc_handler
+
+	/* ------------------------------------------------
+	 * Make sure only Secure world reaches here.
+	 * ------------------------------------------------
+	 */
+	ldcopr  r8, SCR
+	tst	r8, #SCR_NS_BIT
+	blne	report_exception
+
+	/* ---------------------------------------------------------------------
+	 * Pass control to next secure image.
+	 * Here it expects r1 to contain the address of a entry_point_info_t
+	 * structure describing the BL entrypoint.
+	 * ---------------------------------------------------------------------
+	 */
+	mov	r8, r1
+	mov	r0, r1
+	bl	bl1_print_next_bl_ep_info
+
+#if SPIN_ON_BL1_EXIT
+	bl	print_debug_loop_message
+debug_loop:
+	b	debug_loop
+#endif
+
+	mov	r0, r8
+	bl	bl1_plat_prepare_exit
+
+	stcopr	r0, TLBIALL
+	dsb	sy
+	isb
+
+	/*
+	 * Extract PC and SPSR based on struct `entry_point_info_t`
+	 * and load it in LR and SPSR registers respectively.
+	 */
+	ldr	lr, [r8, #ENTRY_POINT_INFO_PC_OFFSET]
+	ldr	r1, [r8, #(ENTRY_POINT_INFO_PC_OFFSET + 4)]
+	msr	spsr, r1
+
+	add	r8, r8, #ENTRY_POINT_INFO_ARGS_OFFSET
+	ldm	r8, {r0, r1, r2, r3}
+	eret
+endfunc bl1_aarch32_smc_handler
+
+	/* -----------------------------------------------------
+	 * Save Secure/Normal world context and jump to
+	 * BL1 SMC handler.
+	 * -----------------------------------------------------
+	 */
+func smc_handler
+	/* -----------------------------------------------------
+	 * Save the GP registers.
+	 * -----------------------------------------------------
+	 */
+	smcc_save_gp_mode_regs
+
+	/*
+	 * `sp` still points to `smc_ctx_t`. Save it to a register
+	 * and restore the C runtime stack pointer to `sp`.
+	 */
+	mov	r6, sp
+	ldr	sp, [r6, #SMC_CTX_SP_MON]
+
+	ldr	r0, [r6, #SMC_CTX_SCR]
+	and	r7, r0, #SCR_NS_BIT		/* flags */
+
+	/* Switch to Secure Mode */
+	bic	r0, #SCR_NS_BIT
+	stcopr	r0, SCR
+	isb
+
+	/* If caller is from Secure world then turn on the MMU */
+	tst	r7, #SCR_NS_BIT
+	bne	skip_mmu_on
+
+	/* Turn on the MMU */
+	mov	r0, #DISABLE_DCACHE
+	bl	enable_mmu_secure
+
+	/* Enable the data cache. */
+	ldcopr	r9, SCTLR
+	orr	r9, r9, #SCTLR_C_BIT
+	stcopr	r9, SCTLR
+	isb
+
+skip_mmu_on:
+	/* Prepare arguments for BL1 SMC wrapper. */
+	ldr	r0, [r6, #SMC_CTX_GPREG_R0]	/* smc_fid */
+	mov	r1, #0				/* cookie */
+	mov	r2, r6				/* handle */
+	mov	r3, r7				/* flags */
+	bl	bl1_smc_wrapper
+
+	/* Get the smc_context for next BL image */
+	bl	smc_get_next_ctx
+	mov	r4, r0
+
+	/* Only turn-off MMU if going to secure world */
+	ldr	r5, [r4, #SMC_CTX_SCR]
+	tst	r5, #SCR_NS_BIT
+	bne	skip_mmu_off
+
+	/* Disable the MMU */
+	bl	disable_mmu_icache_secure
+	stcopr	r0, TLBIALL
+	dsb	sy
+	isb
+
+skip_mmu_off:
+	/* -----------------------------------------------------
+	 * Do the transition to next BL image.
+	 * -----------------------------------------------------
+	 */
+	mov	r0, r4
+	monitor_exit
+endfunc smc_handler
diff --git a/bl1/aarch64/bl1_arch_setup.c b/bl1/aarch64/bl1_arch_setup.c
index 6a3f062..624bd80 100644
--- a/bl1/aarch64/bl1_arch_setup.c
+++ b/bl1/aarch64/bl1_arch_setup.c
@@ -1,35 +1,12 @@
 /*
  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
 #include <arch_helpers.h>
+#include "../bl1_private.h"
 
 /*******************************************************************************
  * Function that does the first bit of architectural setup that affects
@@ -38,7 +15,7 @@
 void bl1_arch_setup(void)
 {
 	/* Set the next EL to be AArch64 */
-	write_scr_el3(SCR_RES1_BITS | SCR_RW_BIT);
+	write_scr_el3(read_scr_el3() | SCR_RW_BIT);
 }
 
 /*******************************************************************************
diff --git a/bl1/aarch64/bl1_context_mgmt.c b/bl1/aarch64/bl1_context_mgmt.c
new file mode 100644
index 0000000..b9304dc
--- /dev/null
+++ b/bl1/aarch64/bl1_context_mgmt.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <platform.h>
+#include "../bl1_private.h"
+
+/*
+ * Following array will be used for context management.
+ * There are 2 instances, for the Secure and Non-Secure contexts.
+ */
+static cpu_context_t bl1_cpu_context[2];
+
+/* Following contains the cpu context pointers. */
+static void *bl1_cpu_context_ptr[2];
+
+
+void *cm_get_context(uint32_t security_state)
+{
+	assert(sec_state_is_valid(security_state));
+	return bl1_cpu_context_ptr[security_state];
+}
+
+void cm_set_context(void *context, uint32_t security_state)
+{
+	assert(sec_state_is_valid(security_state));
+	bl1_cpu_context_ptr[security_state] = context;
+}
+
+/*******************************************************************************
+ * This function prepares the context for Secure/Normal world images.
+ * Normal world images are transitioned to EL2(if supported) else EL1.
+ ******************************************************************************/
+void bl1_prepare_next_image(unsigned int image_id)
+{
+	unsigned int security_state;
+	image_desc_t *image_desc;
+	entry_point_info_t *next_bl_ep;
+
+#if CTX_INCLUDE_AARCH32_REGS
+	/*
+	 * Ensure that the build flag to save AArch32 system registers in CPU
+	 * context is not set for AArch64-only platforms.
+	 */
+	if (EL_IMPLEMENTED(1) == EL_IMPL_A64ONLY) {
+		ERROR("EL1 supports AArch64-only. Please set build flag "
+				"CTX_INCLUDE_AARCH32_REGS = 0");
+		panic();
+	}
+#endif
+
+	/* Get the image descriptor. */
+	image_desc = bl1_plat_get_image_desc(image_id);
+	assert(image_desc);
+
+	/* Get the entry point info. */
+	next_bl_ep = &image_desc->ep_info;
+
+	/* Get the image security state. */
+	security_state = GET_SECURITY_STATE(next_bl_ep->h.attr);
+
+	/* Setup the Secure/Non-Secure context if not done already. */
+	if (!cm_get_context(security_state))
+		cm_set_context(&bl1_cpu_context[security_state], security_state);
+
+	/* Prepare the SPSR for the next BL image. */
+	if (security_state == SECURE) {
+		next_bl_ep->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
+				   DISABLE_ALL_EXCEPTIONS);
+	} else {
+		/* Use EL2 if supported; else use EL1. */
+		if (EL_IMPLEMENTED(2)) {
+			next_bl_ep->spsr = SPSR_64(MODE_EL2, MODE_SP_ELX,
+				DISABLE_ALL_EXCEPTIONS);
+		} else {
+			next_bl_ep->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
+			   DISABLE_ALL_EXCEPTIONS);
+		}
+	}
+
+	/* Allow platform to make change */
+	bl1_plat_set_ep_info(image_id, next_bl_ep);
+
+	/* Prepare the context for the next BL image. */
+	cm_init_my_context(next_bl_ep);
+	cm_prepare_el3_exit(security_state);
+
+	/* Indicate that image is in execution state. */
+	image_desc->state = IMAGE_STATE_EXECUTED;
+
+	print_entry_point_info(next_bl_ep);
+}
diff --git a/bl1/aarch64/bl1_entrypoint.S b/bl1/aarch64/bl1_entrypoint.S
index cfc6292..f7e02e9 100644
--- a/bl1/aarch64/bl1_entrypoint.S
+++ b/bl1/aarch64/bl1_entrypoint.S
@@ -1,35 +1,11 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
-#include <asm_macros.S>
+#include <el3_common_macros.S>
 
 	.globl	bl1_entrypoint
 
@@ -42,116 +18,19 @@
 	 */
 
 func bl1_entrypoint
-	/* ---------------------------------------------
-	 * Set the CPU endianness before doing anything
-	 * that might involve memory reads or writes.
-	 * ---------------------------------------------
-	 */
-	mrs	x0, sctlr_el3
-	bic	x0, x0, #SCTLR_EE_BIT
-	msr	sctlr_el3, x0
-	isb
-
-	/* ---------------------------------------------
-	 * Perform any processor specific actions upon
-	 * reset e.g. cache, tlb invalidations etc.
-	 * ---------------------------------------------
-	 */
-	bl	reset_handler
-
-	/* ---------------------------------------------
-	 * Enable the instruction cache, stack pointer
-	 * and data access alignment checks
-	 * ---------------------------------------------
-	 */
-	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
-	mrs	x0, sctlr_el3
-	orr	x0, x0, x1
-	msr	sctlr_el3, x0
-	isb
-
-	/* ---------------------------------------------
-	 * Set the exception vector to something sane.
-	 * ---------------------------------------------
-	 */
-	adr	x0, bl1_exceptions
-	msr	vbar_el3, x0
-	isb
-
-	/* ---------------------------------------------
-	 * Enable the SError interrupt now that the
-	 * exception vectors have been setup.
-	 * ---------------------------------------------
-	 */
-	msr	daifclr, #DAIF_ABT_BIT
-
 	/* ---------------------------------------------------------------------
-	 * The initial state of the Architectural feature trap register
-	 * (CPTR_EL3) is unknown and it must be set to a known state. All
-	 * feature traps are disabled. Some bits in this register are marked as
-	 * Reserved and should not be modified.
-	 *
-	 * CPTR_EL3.TCPAC: This causes a direct access to the CPACR_EL1 from EL1
-	 *  or the CPTR_EL2 from EL2 to trap to EL3 unless it is trapped at EL2.
-	 * CPTR_EL3.TTA: This causes access to the Trace functionality to trap
-	 *  to EL3 when executed from EL0, EL1, EL2, or EL3. If system register
-	 *  access to trace functionality is not supported, this bit is RES0.
-	 * CPTR_EL3.TFP: This causes instructions that access the registers
-	 *  associated with Floating Point and Advanced SIMD execution to trap
-	 *  to EL3 when executed from any exception level, unless trapped to EL1
-	 *  or EL2.
+	 * If the reset address is programmable then bl1_entrypoint() is
+	 * executed only on the cold boot path. Therefore, we can skip the warm
+	 * boot mailbox mechanism.
 	 * ---------------------------------------------------------------------
 	 */
-	mrs	x0, cptr_el3
-	bic	w0, w0, #TCPAC_BIT
-	bic	w0, w0, #TTA_BIT
-	bic	w0, w0, #TFP_BIT
-	msr	cptr_el3, x0
-
-	/* -------------------------------------------------------
-	 * Will not return from this macro if it is a warm boot.
-	 * -------------------------------------------------------
-	 */
-	wait_for_entrypoint
-
-	bl	platform_mem_init
-
-	/* ---------------------------------------------
-	 * Init C runtime environment.
-	 *   - Zero-initialise the NOBITS sections.
-	 *     There are 2 of them:
-	 *       - the .bss section;
-	 *       - the coherent memory section.
-	 *   - Copy the data section from BL1 image
-	 *     (stored in ROM) to the correct location
-	 *     in RAM.
-	 * ---------------------------------------------
-	 */
-	ldr	x0, =__BSS_START__
-	ldr	x1, =__BSS_SIZE__
-	bl	zeromem16
-
-#if USE_COHERENT_MEM
-	ldr	x0, =__COHERENT_RAM_START__
-	ldr	x1, =__COHERENT_RAM_UNALIGNED_SIZE__
-	bl	zeromem16
-#endif
-
-	ldr	x0, =__DATA_RAM_START__
-	ldr	x1, =__DATA_ROM_START__
-	ldr	x2, =__DATA_SIZE__
-	bl	memcpy16
-
-	/* --------------------------------------------
-	 * Allocate a stack whose memory will be marked
-	 * as Normal-IS-WBWA when the MMU is enabled.
-	 * There is no risk of reading stale stack
-	 * memory after enabling the MMU as only the
-	 * primary cpu is running at the moment.
-	 * --------------------------------------------
-	 */
-	mrs	x0, mpidr_el1
-	bl	platform_set_stack
+	el3_entrypoint_common					\
+		_init_sctlr=1					\
+		_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS	\
+		_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU	\
+		_init_memory=1					\
+		_init_c_runtime=1				\
+		_exception_vectors=bl1_exceptions
 
 	/* ---------------------------------------------
 	 * Architectural init. can be generic e.g.
@@ -166,9 +45,14 @@
 
 	/* --------------------------------------------------
 	 * Initialize platform and jump to our c-entry point
-	 * for this type of reset. Panic if it returns
+	 * for this type of reset.
 	 * --------------------------------------------------
 	 */
 	bl	bl1_main
-panic:
-	b	panic
+
+	/* --------------------------------------------------
+	 * Do the transition to next boot image.
+	 * --------------------------------------------------
+	 */
+	b	el3_exit
+endfunc bl1_entrypoint
diff --git a/bl1/aarch64/bl1_exceptions.S b/bl1/aarch64/bl1_exceptions.S
index 1ca3a6c..eb98ffa 100644
--- a/bl1/aarch64/bl1_exceptions.S
+++ b/bl1/aarch64/bl1_exceptions.S
@@ -1,216 +1,276 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
 #include <asm_macros.S>
+#include <bl1.h>
 #include <bl_common.h>
-#include <runtime_svc.h>
+#include <context.h>
 
+/* -----------------------------------------------------------------------------
+ * Very simple stackless exception handlers used by BL1.
+ * -----------------------------------------------------------------------------
+ */
 	.globl	bl1_exceptions
 
-	.section	.vectors, "ax"; .align 11
+vector_base bl1_exceptions
 
 	/* -----------------------------------------------------
-	 * Very simple stackless exception handlers used by BL1.
-	 * -----------------------------------------------------
-	 */
-	.align	7
-bl1_exceptions:
-	/* -----------------------------------------------------
 	 * Current EL with SP0 : 0x0 - 0x200
 	 * -----------------------------------------------------
 	 */
-SynchronousExceptionSP0:
+vector_entry SynchronousExceptionSP0
 	mov	x0, #SYNC_EXCEPTION_SP_EL0
 	bl	plat_report_exception
-	b	SynchronousExceptionSP0
+	no_ret	plat_panic_handler
 	check_vector_size SynchronousExceptionSP0
 
-	.align	7
-IrqSP0:
+vector_entry IrqSP0
 	mov	x0, #IRQ_SP_EL0
 	bl	plat_report_exception
-	b	IrqSP0
+	no_ret	plat_panic_handler
 	check_vector_size IrqSP0
 
-	.align	7
-FiqSP0:
+vector_entry FiqSP0
 	mov	x0, #FIQ_SP_EL0
 	bl	plat_report_exception
-	b	FiqSP0
+	no_ret	plat_panic_handler
 	check_vector_size FiqSP0
 
-	.align	7
-SErrorSP0:
+vector_entry SErrorSP0
 	mov	x0, #SERROR_SP_EL0
 	bl	plat_report_exception
-	b	SErrorSP0
+	no_ret	plat_panic_handler
 	check_vector_size SErrorSP0
 
 	/* -----------------------------------------------------
 	 * Current EL with SPx: 0x200 - 0x400
 	 * -----------------------------------------------------
 	 */
-	.align	7
-SynchronousExceptionSPx:
+vector_entry SynchronousExceptionSPx
 	mov	x0, #SYNC_EXCEPTION_SP_ELX
 	bl	plat_report_exception
-	b	SynchronousExceptionSPx
+	no_ret	plat_panic_handler
 	check_vector_size SynchronousExceptionSPx
 
-	.align	7
-IrqSPx:
+vector_entry IrqSPx
 	mov	x0, #IRQ_SP_ELX
 	bl	plat_report_exception
-	b	IrqSPx
+	no_ret	plat_panic_handler
 	check_vector_size IrqSPx
 
-	.align	7
-FiqSPx:
+vector_entry FiqSPx
 	mov	x0, #FIQ_SP_ELX
 	bl	plat_report_exception
-	b	FiqSPx
+	no_ret	plat_panic_handler
 	check_vector_size FiqSPx
 
-	.align	7
-SErrorSPx:
+vector_entry SErrorSPx
 	mov	x0, #SERROR_SP_ELX
 	bl	plat_report_exception
-	b	SErrorSPx
+	no_ret	plat_panic_handler
 	check_vector_size SErrorSPx
 
 	/* -----------------------------------------------------
 	 * Lower EL using AArch64 : 0x400 - 0x600
 	 * -----------------------------------------------------
 	 */
-	.align	7
-SynchronousExceptionA64:
+vector_entry SynchronousExceptionA64
 	/* Enable the SError interrupt */
 	msr	daifclr, #DAIF_ABT_BIT
 
-	/* ------------------------------------------------
-	 * Only a single SMC exception from BL2 to ask
-	 * BL1 to pass EL3 control to BL31 is expected
-	 * here.
-	 * It expects X0 with RUN_IMAGE SMC function id
-	 * X1 with address of a entry_point_info_t structure
-	 * describing the BL3-1 entrypoint
-	 * ------------------------------------------------
-	 */
-	mov	x19, x0
-	mov	x20, x1
+	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
 
-	mrs	x0, esr_el3
-	ubfx	x1, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH
-	cmp	x1, #EC_AARCH64_SMC
-	b.ne	panic
+	/* Expect only SMC exceptions */
+	mrs	x30, esr_el3
+	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+	cmp	x30, #EC_AARCH64_SMC
+	b.ne	unexpected_sync_exception
 
-	mov	x0, #RUN_IMAGE
-	cmp	x19, x0
-	b.ne	panic
-
-	mov	x0, x20
-	bl	display_boot_progress
-
-	ldp	x0, x1, [x20, #ENTRY_POINT_INFO_PC_OFFSET]
-	msr	elr_el3, x0
-	msr	spsr_el3, x1
-	ubfx	x0, x1, #MODE_EL_SHIFT, #2
-	cmp	x0, #MODE_EL3
-	b.ne	panic
-
-	bl	disable_mmu_icache_el3
-	tlbi	alle3
-
-	ldp	x6, x7, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x30)]
-	ldp	x4, x5, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x20)]
-	ldp	x2, x3, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x10)]
-	ldp	x0, x1, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x0)]
-	eret
-panic:
-	mov	x0, #SYNC_EXCEPTION_AARCH64
-	bl	plat_report_exception
-
-	wfi
-	b	panic
+	b	smc_handler64
 	check_vector_size SynchronousExceptionA64
 
-	.align	7
-IrqA64:
+vector_entry IrqA64
 	mov	x0, #IRQ_AARCH64
 	bl	plat_report_exception
-	b	IrqA64
+	no_ret	plat_panic_handler
 	check_vector_size IrqA64
 
-	.align	7
-FiqA64:
+vector_entry FiqA64
 	mov	x0, #FIQ_AARCH64
 	bl	plat_report_exception
-	b	FiqA64
+	no_ret	plat_panic_handler
 	check_vector_size FiqA64
 
-	.align	7
-SErrorA64:
+vector_entry SErrorA64
 	mov	x0, #SERROR_AARCH64
 	bl	plat_report_exception
-	b   	SErrorA64
+	no_ret	plat_panic_handler
 	check_vector_size SErrorA64
 
 	/* -----------------------------------------------------
 	 * Lower EL using AArch32 : 0x600 - 0x800
 	 * -----------------------------------------------------
 	 */
-	.align	7
-SynchronousExceptionA32:
+vector_entry SynchronousExceptionA32
 	mov	x0, #SYNC_EXCEPTION_AARCH32
 	bl	plat_report_exception
-	b	SynchronousExceptionA32
+	no_ret	plat_panic_handler
 	check_vector_size SynchronousExceptionA32
 
-	.align	7
-IrqA32:
+vector_entry IrqA32
 	mov	x0, #IRQ_AARCH32
 	bl	plat_report_exception
-	b	IrqA32
+	no_ret	plat_panic_handler
 	check_vector_size IrqA32
 
-	.align	7
-FiqA32:
+vector_entry FiqA32
 	mov	x0, #FIQ_AARCH32
 	bl	plat_report_exception
-	b	FiqA32
+	no_ret	plat_panic_handler
 	check_vector_size FiqA32
 
-	.align	7
-SErrorA32:
+vector_entry SErrorA32
 	mov	x0, #SERROR_AARCH32
 	bl	plat_report_exception
-	b	SErrorA32
+	no_ret	plat_panic_handler
 	check_vector_size SErrorA32
+
+
+func smc_handler64
+
+	/* ----------------------------------------------
+	 * Detect if this is a RUN_IMAGE or other SMC.
+	 * ----------------------------------------------
+	 */
+	mov	x30, #BL1_SMC_RUN_IMAGE
+	cmp	x30, x0
+	b.ne	smc_handler
+
+	/* ------------------------------------------------
+	 * Make sure only Secure world reaches here.
+	 * ------------------------------------------------
+	 */
+	mrs	x30, scr_el3
+	tst	x30, #SCR_NS_BIT
+	b.ne	unexpected_sync_exception
+
+	/* ----------------------------------------------
+	 * Handling RUN_IMAGE SMC. First switch back to
+	 * SP_EL0 for the C runtime stack.
+	 * ----------------------------------------------
+	 */
+	ldr	x30, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
+	msr	spsel, #0
+	mov	sp, x30
+
+	/* ---------------------------------------------------------------------
+	 * Pass EL3 control to next BL image.
+	 * Here it expects X1 with the address of a entry_point_info_t
+	 * structure describing the next BL image entrypoint.
+	 * ---------------------------------------------------------------------
+	 */
+	mov	x20, x1
+
+	mov	x0, x20
+	bl	bl1_print_next_bl_ep_info
+
+	ldp	x0, x1, [x20, #ENTRY_POINT_INFO_PC_OFFSET]
+	msr	elr_el3, x0
+	msr	spsr_el3, x1
+	ubfx	x0, x1, #MODE_EL_SHIFT, #2
+	cmp	x0, #MODE_EL3
+	b.ne	unexpected_sync_exception
+
+	bl	disable_mmu_icache_el3
+	tlbi	alle3
+
+#if SPIN_ON_BL1_EXIT
+	bl	print_debug_loop_message
+debug_loop:
+	b	debug_loop
+#endif
+
+	mov	x0, x20
+	bl	bl1_plat_prepare_exit
+
+	ldp	x6, x7, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x30)]
+	ldp	x4, x5, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x20)]
+	ldp	x2, x3, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x10)]
+	ldp	x0, x1, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x0)]
+	eret
+endfunc smc_handler64
+
+unexpected_sync_exception:
+	mov	x0, #SYNC_EXCEPTION_AARCH64
+	bl	plat_report_exception
+	no_ret	plat_panic_handler
+
+	/* -----------------------------------------------------
+	 * Save Secure/Normal world context and jump to
+	 * BL1 SMC handler.
+	 * -----------------------------------------------------
+	 */
+smc_handler:
+	/* -----------------------------------------------------
+	 * Save the GP registers x0-x29.
+	 * TODO: Revisit to store only SMCC specified registers.
+	 * -----------------------------------------------------
+	 */
+	bl	save_gp_registers
+
+	/* -----------------------------------------------------
+	 * Populate the parameters for the SMC handler. We
+	 * already have x0-x4 in place. x5 will point to a
+	 * cookie (not used now). x6 will point to the context
+	 * structure (SP_EL3) and x7 will contain flags we need
+	 * to pass to the handler.
+	 * -----------------------------------------------------
+	 */
+	mov	x5, xzr
+	mov	x6, sp
+
+	/* -----------------------------------------------------
+	 * Restore the saved C runtime stack value which will
+	 * become the new SP_EL0 i.e. EL3 runtime stack. It was
+	 * saved in the 'cpu_context' structure prior to the last
+	 * ERET from EL3.
+	 * -----------------------------------------------------
+	 */
+	ldr	x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
+
+	/* ---------------------------------------------
+	 * Switch back to SP_EL0 for the C runtime stack.
+	 * ---------------------------------------------
+	 */
+	msr	spsel, #0
+	mov	sp, x12
+
+	/* -----------------------------------------------------
+	 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there
+	 * is a world switch during SMC handling.
+	 * -----------------------------------------------------
+	 */
+	mrs	x16, spsr_el3
+	mrs	x17, elr_el3
+	mrs	x18, scr_el3
+	stp	x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+	str	x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
+
+	/* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
+	bfi	x7, x18, #0, #1
+
+	/* -----------------------------------------------------
+	 * Go to BL1 SMC handler.
+	 * -----------------------------------------------------
+	 */
+	bl	bl1_smc_handler
+
+	/* -----------------------------------------------------
+	 * Do the transition to next BL image.
+	 * -----------------------------------------------------
+	 */
+	b	el3_exit
diff --git a/bl1/bl1.ld.S b/bl1/bl1.ld.S
index d682384..2c453bd 100644
--- a/bl1/bl1.ld.S
+++ b/bl1/bl1.ld.S
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <platform_def.h>
@@ -45,12 +21,55 @@
     ASSERT(. == ALIGN(4096),
            "BL1_RO_BASE address is not aligned on a page boundary.")
 
+#if SEPARATE_CODE_AND_RODATA
+    .text . : {
+        __TEXT_START__ = .;
+        *bl1_entrypoint.o(.text*)
+        *(.text*)
+        *(.vectors)
+        . = NEXT(4096);
+        __TEXT_END__ = .;
+     } >ROM
+
+    .rodata . : {
+        __RODATA_START__ = .;
+        *(.rodata*)
+
+        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
+        . = ALIGN(8);
+        __PARSER_LIB_DESCS_START__ = .;
+        KEEP(*(.img_parser_lib_descs))
+        __PARSER_LIB_DESCS_END__ = .;
+
+        /*
+         * Ensure 8-byte alignment for cpu_ops so that its fields are also
+         * aligned. Also ensure cpu_ops inclusion.
+         */
+        . = ALIGN(8);
+        __CPU_OPS_START__ = .;
+        KEEP(*(cpu_ops))
+        __CPU_OPS_END__ = .;
+
+        /*
+         * No need to pad out the .rodata section to a page boundary. Next is
+         * the .data section, which can mapped in ROM with the same memory
+         * attributes as the .rodata section.
+         */
+        __RODATA_END__ = .;
+    } >ROM
+#else
     ro . : {
         __RO_START__ = .;
         *bl1_entrypoint.o(.text*)
         *(.text*)
         *(.rodata*)
 
+        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
+        . = ALIGN(8);
+        __PARSER_LIB_DESCS_START__ = .;
+        KEEP(*(.img_parser_lib_descs))
+        __PARSER_LIB_DESCS_END__ = .;
+
         /*
          * Ensure 8-byte alignment for cpu_ops so that its fields are also
          * aligned. Also ensure cpu_ops inclusion.
@@ -63,18 +82,25 @@
         *(.vectors)
         __RO_END__ = .;
     } >ROM
+#endif
 
     ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
            "cpu_ops not defined for this platform.")
 
+    . = BL1_RW_BASE;
+    ASSERT(BL1_RW_BASE == ALIGN(4096),
+           "BL1_RW_BASE address is not aligned on a page boundary.")
+
     /*
      * The .data section gets copied from ROM to RAM at runtime.
-     * Its LMA must be 16-byte aligned.
+     * Its LMA should be 16-byte aligned to allow efficient copying of 16-bytes
+     * aligned regions in it.
      * Its VMA must be page-aligned as it marks the first read/write page.
+     *
+     * It must be placed at a lower address than the stacks if the stack
+     * protector is enabled. Alternatively, the .data.stack_protector_canary
+     * section can be placed independently of the main .data section.
      */
-    . = BL1_RW_BASE;
-    ASSERT(. == ALIGN(4096),
-           "BL1_RW_BASE address is not aligned on a page boundary.")
     .data . : ALIGN(16) {
         __DATA_RAM_START__ = .;
         *(.data*)
@@ -89,7 +115,8 @@
 
     /*
      * The .bss section gets initialised to 0 at runtime.
-     * Its base address must be 16-byte aligned.
+     * Its base address should be 16-byte aligned for better performance of the
+     * zero-initialization code.
      */
     .bss : ALIGN(16) {
         __BSS_START__ = .;
@@ -133,12 +160,14 @@
 
     __DATA_ROM_START__ = LOADADDR(.data);
     __DATA_SIZE__ = SIZEOF(.data);
+
     /*
      * The .data section is the last PROGBITS section so its end marks the end
-     * of the read-only part of BL1's binary.
+     * of BL1's actual content in Trusted ROM.
      */
-    ASSERT(__DATA_ROM_START__ + __DATA_SIZE__ <= BL1_RO_LIMIT,
-           "BL1's RO section has exceeded its limit.")
+    __BL1_ROM_END__ =  __DATA_ROM_START__ + __DATA_SIZE__;
+    ASSERT(__BL1_ROM_END__ <= BL1_RO_LIMIT,
+           "BL1's ROM content has exceeded its limit.")
 
     __BSS_SIZE__ = SIZEOF(.bss);
 
diff --git a/bl1/bl1.mk b/bl1/bl1.mk
index 8e73bef..a026499 100644
--- a/bl1/bl1.mk
+++ b/bl1/bl1.mk
@@ -1,37 +1,26 @@
 #
-# Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
 #
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# Neither the name of ARM nor the names of its contributors may be used
-# to endorse or promote products derived from this software without specific
-# prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
 #
 
 BL1_SOURCES		+=	bl1/bl1_main.c				\
-				bl1/aarch64/bl1_arch_setup.c		\
-				bl1/aarch64/bl1_entrypoint.S		\
-				bl1/aarch64/bl1_exceptions.S		\
-				lib/cpus/aarch64/cpu_helpers.S
+				bl1/${ARCH}/bl1_arch_setup.c		\
+				bl1/${ARCH}/bl1_context_mgmt.c		\
+				bl1/${ARCH}/bl1_entrypoint.S		\
+				bl1/${ARCH}/bl1_exceptions.S		\
+				lib/cpus/${ARCH}/cpu_helpers.S		\
+				lib/cpus/errata_report.c		\
+				lib/el3_runtime/${ARCH}/context_mgmt.c	\
+				plat/common/plat_bl1_common.c		\
+				plat/common/${ARCH}/platform_up_stack.S
+
+ifeq (${ARCH},aarch64)
+BL1_SOURCES		+=	lib/el3_runtime/aarch64/context.S
+endif
+
+ifeq (${TRUSTED_BOARD_BOOT},1)
+BL1_SOURCES		+=	bl1/bl1_fwu.c
+endif
 
 BL1_LINKERFILE		:=	bl1/bl1.ld.S
diff --git a/bl1/bl1_fwu.c b/bl1/bl1_fwu.c
new file mode 100644
index 0000000..07a7fc8
--- /dev/null
+++ b/bl1/bl1_fwu.c
@@ -0,0 +1,732 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <auth_mod.h>
+#include <bl1.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <errno.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <smcc_helpers.h>
+#include <string.h>
+#include <utils.h>
+#include "bl1_private.h"
+
+/*
+ * Function declarations.
+ */
+static int bl1_fwu_image_copy(unsigned int image_id,
+			uintptr_t image_addr,
+			unsigned int block_size,
+			unsigned int image_size,
+			unsigned int flags);
+static int bl1_fwu_image_auth(unsigned int image_id,
+			uintptr_t image_addr,
+			unsigned int image_size,
+			unsigned int flags);
+static int bl1_fwu_image_execute(unsigned int image_id,
+			void **handle,
+			unsigned int flags);
+static register_t bl1_fwu_image_resume(register_t image_param,
+			void **handle,
+			unsigned int flags);
+static int bl1_fwu_sec_image_done(void **handle,
+			unsigned int flags);
+static int bl1_fwu_image_reset(unsigned int image_id,
+			unsigned int flags);
+__dead2 static void bl1_fwu_done(void *client_cookie, void *reserved);
+
+/*
+ * This keeps track of last executed secure image id.
+ */
+static unsigned int sec_exec_image_id = INVALID_IMAGE_ID;
+
+/* Authentication status of each image. */
+extern unsigned int auth_img_flags[];
+
+/*******************************************************************************
+ * Top level handler for servicing FWU SMCs.
+ ******************************************************************************/
+register_t bl1_fwu_smc_handler(unsigned int smc_fid,
+			register_t x1,
+			register_t x2,
+			register_t x3,
+			register_t x4,
+			void *cookie,
+			void *handle,
+			unsigned int flags)
+{
+
+	switch (smc_fid) {
+	case FWU_SMC_IMAGE_COPY:
+		SMC_RET1(handle, bl1_fwu_image_copy(x1, x2, x3, x4, flags));
+
+	case FWU_SMC_IMAGE_AUTH:
+		SMC_RET1(handle, bl1_fwu_image_auth(x1, x2, x3, flags));
+
+	case FWU_SMC_IMAGE_EXECUTE:
+		SMC_RET1(handle, bl1_fwu_image_execute(x1, &handle, flags));
+
+	case FWU_SMC_IMAGE_RESUME:
+		SMC_RET1(handle, bl1_fwu_image_resume(x1, &handle, flags));
+
+	case FWU_SMC_SEC_IMAGE_DONE:
+		SMC_RET1(handle, bl1_fwu_sec_image_done(&handle, flags));
+
+	case FWU_SMC_IMAGE_RESET:
+		SMC_RET1(handle, bl1_fwu_image_reset(x1, flags));
+
+	case FWU_SMC_UPDATE_DONE:
+		bl1_fwu_done((void *)x1, NULL);
+		/* We should never return from bl1_fwu_done() */
+
+	default:
+		assert(0);
+		break;
+	}
+
+	SMC_RET1(handle, SMC_UNK);
+}
+
+/*******************************************************************************
+ * Utility functions to keep track of the images that are loaded at any time.
+ ******************************************************************************/
+
+#ifdef PLAT_FWU_MAX_SIMULTANEOUS_IMAGES
+#define FWU_MAX_SIMULTANEOUS_IMAGES	PLAT_FWU_MAX_SIMULTANEOUS_IMAGES
+#else
+#define FWU_MAX_SIMULTANEOUS_IMAGES	10
+#endif
+
+static int bl1_fwu_loaded_ids[FWU_MAX_SIMULTANEOUS_IMAGES] = {
+	[0 ... FWU_MAX_SIMULTANEOUS_IMAGES-1] = INVALID_IMAGE_ID
+};
+
+/*
+ * Adds an image_id to the bl1_fwu_loaded_ids array.
+ * Returns 0 on success, 1 on error.
+ */
+static int bl1_fwu_add_loaded_id(int image_id)
+{
+	int i;
+
+	/* Check if the ID is already in the list */
+	for (i = 0; i < FWU_MAX_SIMULTANEOUS_IMAGES; i++) {
+		if (bl1_fwu_loaded_ids[i] == image_id)
+			return 0;
+	}
+
+	/* Find an empty slot */
+	for (i = 0; i < FWU_MAX_SIMULTANEOUS_IMAGES; i++) {
+		if (bl1_fwu_loaded_ids[i] == INVALID_IMAGE_ID) {
+			bl1_fwu_loaded_ids[i] = image_id;
+			return 0;
+		}
+	}
+
+	return 1;
+}
+
+/*
+ * Removes an image_id from the bl1_fwu_loaded_ids array.
+ * Returns 0 on success, 1 on error.
+ */
+static int bl1_fwu_remove_loaded_id(int image_id)
+{
+	int i;
+
+	/* Find the ID */
+	for (i = 0; i < FWU_MAX_SIMULTANEOUS_IMAGES; i++) {
+		if (bl1_fwu_loaded_ids[i] == image_id) {
+			bl1_fwu_loaded_ids[i] = INVALID_IMAGE_ID;
+			return 0;
+		}
+	}
+
+	return 1;
+}
+
+/*******************************************************************************
+ * This function checks if the specified image overlaps another image already
+ * loaded. It returns 0 if there is no overlap, a negative error code otherwise.
+ ******************************************************************************/
+static int bl1_fwu_image_check_overlaps(int image_id)
+{
+	const image_desc_t *image_desc, *checked_image_desc;
+	const image_info_t *info, *checked_info;
+
+	uintptr_t image_base, image_end;
+	uintptr_t checked_image_base, checked_image_end;
+
+	checked_image_desc = bl1_plat_get_image_desc(image_id);
+	checked_info = &checked_image_desc->image_info;
+
+	/* Image being checked mustn't be empty. */
+	assert(checked_info->image_size != 0);
+
+	checked_image_base = checked_info->image_base;
+	checked_image_end = checked_image_base + checked_info->image_size - 1;
+	/* No need to check for overflows, it's done in bl1_fwu_image_copy(). */
+
+	for (int i = 0; i < FWU_MAX_SIMULTANEOUS_IMAGES; i++) {
+
+		/* Skip INVALID_IMAGE_IDs and don't check image against itself */
+		if ((bl1_fwu_loaded_ids[i] == INVALID_IMAGE_ID) ||
+				(bl1_fwu_loaded_ids[i] == image_id))
+			continue;
+
+		image_desc = bl1_plat_get_image_desc(bl1_fwu_loaded_ids[i]);
+
+		/* Only check images that are loaded or being loaded. */
+		assert (image_desc && image_desc->state != IMAGE_STATE_RESET);
+
+		info = &image_desc->image_info;
+
+		/* There cannot be overlaps with an empty image. */
+		if (info->image_size == 0)
+			continue;
+
+		image_base = info->image_base;
+		image_end = image_base + info->image_size - 1;
+		/*
+		 * Overflows cannot happen. It is checked in
+		 * bl1_fwu_image_copy() when the image goes from RESET to
+		 * COPYING or COPIED.
+		 */
+		assert (image_end > image_base);
+
+		/* Check if there are overlaps. */
+		if (!(image_end < checked_image_base ||
+		    checked_image_end < image_base)) {
+			VERBOSE("Image with ID %d overlaps existing image with ID %d",
+				checked_image_desc->image_id, image_desc->image_id);
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+/*******************************************************************************
+ * This function is responsible for copying secure images in AP Secure RAM.
+ ******************************************************************************/
+static int bl1_fwu_image_copy(unsigned int image_id,
+			uintptr_t image_src,
+			unsigned int block_size,
+			unsigned int image_size,
+			unsigned int flags)
+{
+	uintptr_t dest_addr;
+	unsigned int remaining;
+
+	/* Get the image descriptor. */
+	image_desc_t *image_desc = bl1_plat_get_image_desc(image_id);
+	if (!image_desc) {
+		WARN("BL1-FWU: Invalid image ID %u\n", image_id);
+		return -EPERM;
+	}
+
+	/*
+	 * The request must originate from a non-secure caller and target a
+	 * secure image. Any other scenario is invalid.
+	 */
+	if (GET_SECURITY_STATE(flags) == SECURE) {
+		WARN("BL1-FWU: Copy not allowed from secure world.\n");
+		return -EPERM;
+	}
+	if (GET_SECURITY_STATE(image_desc->ep_info.h.attr) == NON_SECURE) {
+		WARN("BL1-FWU: Copy not allowed for non-secure images.\n");
+		return -EPERM;
+	}
+
+	/* Check whether the FWU state machine is in the correct state. */
+	if ((image_desc->state != IMAGE_STATE_RESET) &&
+	    (image_desc->state != IMAGE_STATE_COPYING)) {
+		WARN("BL1-FWU: Copy not allowed at this point of the FWU"
+			" process.\n");
+		return -EPERM;
+	}
+
+	if ((!image_src) || (!block_size) ||
+	    check_uptr_overflow(image_src, block_size - 1)) {
+		WARN("BL1-FWU: Copy not allowed due to invalid image source"
+			" or block size\n");
+		return -ENOMEM;
+	}
+
+	if (image_desc->state == IMAGE_STATE_COPYING) {
+		/*
+		 * There must have been at least 1 copy operation for this image
+		 * previously.
+		 */
+		assert(image_desc->copied_size != 0);
+		/*
+		 * The image size must have been recorded in the 1st copy
+		 * operation.
+		 */
+		image_size = image_desc->image_info.image_size;
+		assert(image_size != 0);
+		assert(image_desc->copied_size < image_size);
+
+		INFO("BL1-FWU: Continuing image copy in blocks\n");
+	} else { /* image_desc->state == IMAGE_STATE_RESET */
+		INFO("BL1-FWU: Initial call to copy an image\n");
+
+		/*
+		 * image_size is relevant only for the 1st copy request, it is
+		 * then ignored for subsequent calls for this image.
+		 */
+		if (!image_size) {
+			WARN("BL1-FWU: Copy not allowed due to invalid image"
+				" size\n");
+			return -ENOMEM;
+		}
+
+#if LOAD_IMAGE_V2
+		/* Check that the image size to load is within limit */
+		if (image_size > image_desc->image_info.image_max_size) {
+			WARN("BL1-FWU: Image size out of bounds\n");
+			return -ENOMEM;
+		}
+#else
+		/*
+		 * Check the image will fit into the free trusted RAM after BL1
+		 * load.
+		 */
+		const meminfo_t *mem_layout = bl1_plat_sec_mem_layout();
+		if (!is_mem_free(mem_layout->free_base, mem_layout->free_size,
+					image_desc->image_info.image_base,
+					image_size)) {
+			WARN("BL1-FWU: Copy not allowed due to insufficient"
+			     " resources.\n");
+			return -ENOMEM;
+		}
+#endif
+
+		/* Save the given image size. */
+		image_desc->image_info.image_size = image_size;
+
+		/* Make sure the image doesn't overlap other images. */
+		if (bl1_fwu_image_check_overlaps(image_id)) {
+			image_desc->image_info.image_size = 0;
+			WARN("BL1-FWU: This image overlaps another one\n");
+			return -EPERM;
+		}
+
+		/*
+		 * copied_size must be explicitly initialized here because the
+		 * FWU code doesn't necessarily do it when it resets the state
+		 * machine.
+		 */
+		image_desc->copied_size = 0;
+	}
+
+	/*
+	 * If the given block size is more than the total image size
+	 * then clip the former to the latter.
+	 */
+	remaining = image_size - image_desc->copied_size;
+	if (block_size > remaining) {
+		WARN("BL1-FWU: Block size is too big, clipping it.\n");
+		block_size = remaining;
+	}
+
+	/* Make sure the source image is mapped in memory. */
+	if (bl1_plat_mem_check(image_src, block_size, flags)) {
+		WARN("BL1-FWU: Source image is not mapped.\n");
+		return -ENOMEM;
+	}
+
+	if (bl1_fwu_add_loaded_id(image_id)) {
+		WARN("BL1-FWU: Too many images loaded at the same time.\n");
+		return -ENOMEM;
+	}
+
+	/* Everything looks sane. Go ahead and copy the block of data. */
+	dest_addr = image_desc->image_info.image_base + image_desc->copied_size;
+	memcpy((void *) dest_addr, (const void *) image_src, block_size);
+	flush_dcache_range(dest_addr, block_size);
+
+	image_desc->copied_size += block_size;
+	image_desc->state = (block_size == remaining) ?
+		IMAGE_STATE_COPIED : IMAGE_STATE_COPYING;
+
+	INFO("BL1-FWU: Copy operation successful.\n");
+	return 0;
+}
+
+/*******************************************************************************
+ * This function is responsible for authenticating Normal/Secure images.
+ ******************************************************************************/
+static int bl1_fwu_image_auth(unsigned int image_id,
+			uintptr_t image_src,
+			unsigned int image_size,
+			unsigned int flags)
+{
+	int result;
+	uintptr_t base_addr;
+	unsigned int total_size;
+
+	/* Get the image descriptor. */
+	image_desc_t *image_desc = bl1_plat_get_image_desc(image_id);
+	if (!image_desc)
+		return -EPERM;
+
+	if (GET_SECURITY_STATE(flags) == SECURE) {
+		if (image_desc->state != IMAGE_STATE_RESET) {
+			WARN("BL1-FWU: Authentication from secure world "
+				"while in invalid state\n");
+			return -EPERM;
+		}
+	} else {
+		if (GET_SECURITY_STATE(image_desc->ep_info.h.attr) == SECURE) {
+			if (image_desc->state != IMAGE_STATE_COPIED) {
+				WARN("BL1-FWU: Authentication of secure image "
+					"from non-secure world while not in copied state\n");
+				return -EPERM;
+			}
+		} else {
+			if (image_desc->state != IMAGE_STATE_RESET) {
+				WARN("BL1-FWU: Authentication of non-secure image "
+					"from non-secure world while in invalid state\n");
+				return -EPERM;
+			}
+		}
+	}
+
+	if (image_desc->state == IMAGE_STATE_COPIED) {
+		/*
+		 * Image is in COPIED state.
+		 * Use the stored address and size.
+		 */
+		base_addr = image_desc->image_info.image_base;
+		total_size = image_desc->image_info.image_size;
+	} else {
+		if ((!image_src) || (!image_size) ||
+		    check_uptr_overflow(image_src, image_size - 1)) {
+			WARN("BL1-FWU: Auth not allowed due to invalid"
+				" image source/size\n");
+			return -ENOMEM;
+		}
+
+		/*
+		 * Image is in RESET state.
+		 * Check the parameters and authenticate the source image in place.
+		 */
+		if (bl1_plat_mem_check(image_src, image_size,	\
+					image_desc->ep_info.h.attr)) {
+			WARN("BL1-FWU: Authentication arguments source/size not mapped\n");
+			return -ENOMEM;
+		}
+
+		if (bl1_fwu_add_loaded_id(image_id)) {
+			WARN("BL1-FWU: Too many images loaded at the same time.\n");
+			return -ENOMEM;
+		}
+
+		base_addr = image_src;
+		total_size = image_size;
+
+		/* Update the image size in the descriptor. */
+		image_desc->image_info.image_size = total_size;
+	}
+
+	/*
+	 * Authenticate the image.
+	 */
+	INFO("BL1-FWU: Authenticating image_id:%d\n", image_id);
+	result = auth_mod_verify_img(image_id, (void *)base_addr, total_size);
+	if (result != 0) {
+		WARN("BL1-FWU: Authentication Failed err=%d\n", result);
+
+		/*
+		 * Authentication has failed.
+		 * Clear the memory if the image was copied.
+		 * This is to prevent an attack where this contains
+		 * some malicious code that can somehow be executed later.
+		 */
+		if (image_desc->state == IMAGE_STATE_COPIED) {
+			/* Clear the memory.*/
+			zero_normalmem((void *)base_addr, total_size);
+			flush_dcache_range(base_addr, total_size);
+
+			/* Indicate that image can be copied again*/
+			image_desc->state = IMAGE_STATE_RESET;
+		}
+
+		/*
+		 * Even if this fails it's ok because the ID isn't in the array.
+		 * The image cannot be in RESET state here, it is checked at the
+		 * beginning of the function.
+		 */
+		bl1_fwu_remove_loaded_id(image_id);
+		return -EAUTH;
+	}
+
+	/* Indicate that image is in authenticated state. */
+	image_desc->state = IMAGE_STATE_AUTHENTICATED;
+
+	/*
+	 * Flush image_info to memory so that other
+	 * secure world images can see changes.
+	 */
+	flush_dcache_range((unsigned long)&image_desc->image_info,
+		sizeof(image_info_t));
+
+	INFO("BL1-FWU: Authentication was successful\n");
+
+	return 0;
+}
+
+/*******************************************************************************
+ * This function is responsible for executing Secure images.
+ ******************************************************************************/
+static int bl1_fwu_image_execute(unsigned int image_id,
+			void **handle,
+			unsigned int flags)
+{
+	/* Get the image descriptor. */
+	image_desc_t *image_desc = bl1_plat_get_image_desc(image_id);
+
+	/*
+	 * Execution is NOT allowed if:
+	 * image_id is invalid OR
+	 * Caller is from Secure world OR
+	 * Image is Non-Secure OR
+	 * Image is Non-Executable OR
+	 * Image is NOT in AUTHENTICATED state.
+	 */
+	if ((!image_desc) ||
+	    (GET_SECURITY_STATE(flags) == SECURE) ||
+	    (GET_SECURITY_STATE(image_desc->ep_info.h.attr) == NON_SECURE) ||
+	    (EP_GET_EXE(image_desc->ep_info.h.attr) == NON_EXECUTABLE) ||
+	    (image_desc->state != IMAGE_STATE_AUTHENTICATED)) {
+		WARN("BL1-FWU: Execution not allowed due to invalid state/args\n");
+		return -EPERM;
+	}
+
+	INFO("BL1-FWU: Executing Secure image\n");
+
+#ifdef AARCH64
+	/* Save NS-EL1 system registers. */
+	cm_el1_sysregs_context_save(NON_SECURE);
+#endif
+
+	/* Prepare the image for execution. */
+	bl1_prepare_next_image(image_id);
+
+	/* Update the secure image id. */
+	sec_exec_image_id = image_id;
+
+#ifdef AARCH64
+	*handle = cm_get_context(SECURE);
+#else
+	*handle = smc_get_ctx(SECURE);
+#endif
+	return 0;
+}
+
+/*******************************************************************************
+ * This function is responsible for resuming execution in the other security
+ * world
+ ******************************************************************************/
+static register_t bl1_fwu_image_resume(register_t image_param,
+			void **handle,
+			unsigned int flags)
+{
+	image_desc_t *image_desc;
+	unsigned int resume_sec_state;
+	unsigned int caller_sec_state = GET_SECURITY_STATE(flags);
+
+	/* Get the image descriptor for last executed secure image id. */
+	image_desc = bl1_plat_get_image_desc(sec_exec_image_id);
+	if (caller_sec_state == NON_SECURE) {
+		if (!image_desc) {
+			WARN("BL1-FWU: Resume not allowed due to no available"
+				"secure image\n");
+			return -EPERM;
+		}
+	} else {
+		/* image_desc must be valid for secure world callers */
+		assert(image_desc);
+	}
+
+	assert(GET_SECURITY_STATE(image_desc->ep_info.h.attr) == SECURE);
+	assert(EP_GET_EXE(image_desc->ep_info.h.attr) == EXECUTABLE);
+
+	if (caller_sec_state == SECURE) {
+		assert(image_desc->state == IMAGE_STATE_EXECUTED);
+
+		/* Update the flags. */
+		image_desc->state = IMAGE_STATE_INTERRUPTED;
+		resume_sec_state = NON_SECURE;
+	} else {
+		assert(image_desc->state == IMAGE_STATE_INTERRUPTED);
+
+		/* Update the flags. */
+		image_desc->state = IMAGE_STATE_EXECUTED;
+		resume_sec_state = SECURE;
+	}
+
+	INFO("BL1-FWU: Resuming %s world context\n",
+		(resume_sec_state == SECURE) ? "secure" : "normal");
+
+#ifdef AARCH64
+	/* Save the EL1 system registers of calling world. */
+	cm_el1_sysregs_context_save(caller_sec_state);
+
+	/* Restore the EL1 system registers of resuming world. */
+	cm_el1_sysregs_context_restore(resume_sec_state);
+
+	/* Update the next context. */
+	cm_set_next_eret_context(resume_sec_state);
+
+	*handle = cm_get_context(resume_sec_state);
+#else
+	/* Update the next context. */
+	cm_set_next_context(cm_get_context(resume_sec_state));
+
+	/* Prepare the smc context for the next BL image. */
+	smc_set_next_ctx(resume_sec_state);
+
+	*handle = smc_get_ctx(resume_sec_state);
+#endif
+	return image_param;
+}
+
+/*******************************************************************************
+ * This function is responsible for resuming normal world context.
+ ******************************************************************************/
+static int bl1_fwu_sec_image_done(void **handle, unsigned int flags)
+{
+	image_desc_t *image_desc;
+
+	/* Make sure caller is from the secure world */
+	if (GET_SECURITY_STATE(flags) == NON_SECURE) {
+		WARN("BL1-FWU: Image done not allowed from normal world\n");
+		return -EPERM;
+	}
+
+	/* Get the image descriptor for last executed secure image id */
+	image_desc = bl1_plat_get_image_desc(sec_exec_image_id);
+
+	/* image_desc must correspond to a valid secure executing image */
+	assert(image_desc);
+	assert(GET_SECURITY_STATE(image_desc->ep_info.h.attr) == SECURE);
+	assert(EP_GET_EXE(image_desc->ep_info.h.attr) == EXECUTABLE);
+	assert(image_desc->state == IMAGE_STATE_EXECUTED);
+
+#if ENABLE_ASSERTIONS
+	int rc = bl1_fwu_remove_loaded_id(sec_exec_image_id);
+	assert(rc == 0);
+#else
+	bl1_fwu_remove_loaded_id(sec_exec_image_id);
+#endif
+
+	/* Update the flags. */
+	image_desc->state = IMAGE_STATE_RESET;
+	sec_exec_image_id = INVALID_IMAGE_ID;
+
+	INFO("BL1-FWU: Resuming Normal world context\n");
+#ifdef AARCH64
+	/*
+	 * Secure world is done so no need to save the context.
+	 * Just restore the Non-Secure context.
+	 */
+	cm_el1_sysregs_context_restore(NON_SECURE);
+
+	/* Update the next context. */
+	cm_set_next_eret_context(NON_SECURE);
+
+	*handle = cm_get_context(NON_SECURE);
+#else
+	/* Update the next context. */
+	cm_set_next_context(cm_get_context(NON_SECURE));
+
+	/* Prepare the smc context for the next BL image. */
+	smc_set_next_ctx(NON_SECURE);
+
+	*handle = smc_get_ctx(NON_SECURE);
+#endif
+	return 0;
+}
+
+/*******************************************************************************
+ * This function provides the opportunity for users to perform any
+ * platform specific handling after the Firmware update is done.
+ ******************************************************************************/
+__dead2 static void bl1_fwu_done(void *client_cookie, void *reserved)
+{
+	NOTICE("BL1-FWU: *******FWU Process Completed*******\n");
+
+	/*
+	 * Call platform done function.
+	 */
+	bl1_plat_fwu_done(client_cookie, reserved);
+	assert(0);
+}
+
+/*******************************************************************************
+ * This function resets an image to IMAGE_STATE_RESET. It fails if the image is
+ * being executed.
+ ******************************************************************************/
+static int bl1_fwu_image_reset(unsigned int image_id, unsigned int flags)
+{
+	image_desc_t *image_desc = bl1_plat_get_image_desc(image_id);
+
+	if ((!image_desc) || (GET_SECURITY_STATE(flags) == SECURE)) {
+		WARN("BL1-FWU: Reset not allowed due to invalid args\n");
+		return -EPERM;
+	}
+
+	switch (image_desc->state) {
+
+	case IMAGE_STATE_RESET:
+		/* Nothing to do. */
+		break;
+
+	case IMAGE_STATE_INTERRUPTED:
+	case IMAGE_STATE_AUTHENTICATED:
+	case IMAGE_STATE_COPIED:
+	case IMAGE_STATE_COPYING:
+
+		if (bl1_fwu_remove_loaded_id(image_id)) {
+			WARN("BL1-FWU: Image reset couldn't find the image ID\n");
+			return -EPERM;
+		}
+
+		if (image_desc->copied_size) {
+			/* Clear the memory if the image is copied */
+			assert(GET_SECURITY_STATE(image_desc->ep_info.h.attr) == SECURE);
+
+			zero_normalmem((void *)image_desc->image_info.image_base,
+					image_desc->copied_size);
+			flush_dcache_range(image_desc->image_info.image_base,
+					image_desc->copied_size);
+		}
+
+		/* Reset status variables */
+		image_desc->copied_size = 0;
+		image_desc->image_info.image_size = 0;
+		image_desc->state = IMAGE_STATE_RESET;
+
+		/* Clear authentication state */
+		auth_img_flags[image_id] = 0;
+
+		break;
+
+	case IMAGE_STATE_EXECUTED:
+	default:
+		assert(0);
+	}
+
+	return 0;
+}
diff --git a/bl1/bl1_main.c b/bl1/bl1_main.c
index 491fd5c..821b6a3 100644
--- a/bl1/bl1_main.c
+++ b/bl1/bl1_main.c
@@ -1,69 +1,32 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
 #include <arch_helpers.h>
 #include <assert.h>
-#include <auth.h>
+#include <auth_mod.h>
+#include <bl1.h>
 #include <bl_common.h>
+#include <console.h>
 #include <debug.h>
+#include <errata_report.h>
 #include <platform.h>
 #include <platform_def.h>
+#include <smcc_helpers.h>
+#include <utils.h>
+#include <uuid.h>
 #include "bl1_private.h"
 
-/*******************************************************************************
- * Runs BL2 from the given entry point. It results in dropping the
- * exception level
- ******************************************************************************/
-static void __dead2 bl1_run_bl2(entry_point_info_t *bl2_ep)
-{
-	bl1_arch_next_el_setup();
+/* BL1 Service UUID */
+DEFINE_SVC_UUID(bl1_svc_uid,
+	0xfd3967d4, 0x72cb, 0x4d9a, 0xb5, 0x75,
+	0x67, 0x15, 0xd6, 0xf4, 0xbb, 0x4a);
 
-	/* Tell next EL what we want done */
-	bl2_ep->args.arg0 = RUN_IMAGE;
 
-	if (GET_SECURITY_STATE(bl2_ep->h.attr) == NON_SECURE)
-		change_security_state(GET_SECURITY_STATE(bl2_ep->h.attr));
-
-	write_spsr_el3(bl2_ep->spsr);
-	write_elr_el3(bl2_ep->pc);
-
-	eret(bl2_ep->args.arg0,
-		bl2_ep->args.arg1,
-		bl2_ep->args.arg2,
-		bl2_ep->args.arg3,
-		bl2_ep->args.arg4,
-		bl2_ep->args.arg5,
-		bl2_ep->args.arg6,
-		bl2_ep->args.arg7);
-}
+static void bl1_load_bl2(void);
 
 /*******************************************************************************
  * The next function has a weak definition. Platform specific code can override
@@ -79,125 +42,158 @@
 void bl1_init_bl2_mem_layout(const meminfo_t *bl1_mem_layout,
 			     meminfo_t *bl2_mem_layout)
 {
-	const size_t bl1_size = BL1_RAM_LIMIT - BL1_RAM_BASE;
 
 	assert(bl1_mem_layout != NULL);
 	assert(bl2_mem_layout != NULL);
 
+#if LOAD_IMAGE_V2
+	/*
+	 * Remove BL1 RW data from the scope of memory visible to BL2.
+	 * This is assuming BL1 RW data is at the top of bl1_mem_layout.
+	 */
+	assert(BL1_RW_BASE > bl1_mem_layout->total_base);
+	bl2_mem_layout->total_base = bl1_mem_layout->total_base;
+	bl2_mem_layout->total_size = BL1_RW_BASE - bl1_mem_layout->total_base;
+#else
 	/* Check that BL1's memory is lying outside of the free memory */
 	assert((BL1_RAM_LIMIT <= bl1_mem_layout->free_base) ||
-	       (BL1_RAM_BASE >= bl1_mem_layout->free_base + bl1_mem_layout->free_size));
+	       (BL1_RAM_BASE >= bl1_mem_layout->free_base +
+				bl1_mem_layout->free_size));
 
 	/* Remove BL1 RW data from the scope of memory visible to BL2 */
 	*bl2_mem_layout = *bl1_mem_layout;
 	reserve_mem(&bl2_mem_layout->total_base,
 		    &bl2_mem_layout->total_size,
 		    BL1_RAM_BASE,
-		    bl1_size);
+		    BL1_RAM_LIMIT - BL1_RAM_BASE);
+#endif /* LOAD_IMAGE_V2 */
 
 	flush_dcache_range((unsigned long)bl2_mem_layout, sizeof(meminfo_t));
 }
 
 /*******************************************************************************
  * Function to perform late architectural and platform specific initialization.
- * It also locates and loads the BL2 raw binary image in the trusted DRAM. Only
- * called by the primary cpu after a cold boot.
- * TODO: Add support for alternative image load mechanism e.g using virtio/elf
- * loader etc.
-  ******************************************************************************/
+ * It also queries the platform to load and run next BL image. Only called
+ * by the primary cpu after a cold boot.
+ ******************************************************************************/
 void bl1_main(void)
 {
+	unsigned int image_id;
+
 	/* Announce our arrival */
 	NOTICE(FIRMWARE_WELCOME_STR);
 	NOTICE("BL1: %s\n", version_string);
 	NOTICE("BL1: %s\n", build_message);
 
-	INFO("BL1: RAM 0x%lx - 0x%lx\n", BL1_RAM_BASE, BL1_RAM_LIMIT);
+	INFO("BL1: RAM %p - %p\n", (void *)BL1_RAM_BASE,
+					(void *)BL1_RAM_LIMIT);
 
-#if DEBUG
-	unsigned long sctlr_el3 = read_sctlr_el3();
-#endif
-	image_info_t bl2_image_info = { {0} };
-	entry_point_info_t bl2_ep = { {0} };
-	meminfo_t *bl1_tzram_layout;
-	meminfo_t *bl2_tzram_layout = 0x0;
-	int err;
+	print_errata_status();
 
+#if ENABLE_ASSERTIONS
+	u_register_t val;
 	/*
 	 * Ensure that MMU/Caches and coherency are turned on
 	 */
-	assert(sctlr_el3 | SCTLR_M_BIT);
-	assert(sctlr_el3 | SCTLR_C_BIT);
-	assert(sctlr_el3 | SCTLR_I_BIT);
+#ifdef AARCH32
+	val = read_sctlr();
+#else
+	val = read_sctlr_el3();
+#endif
+	assert(val & SCTLR_M_BIT);
+	assert(val & SCTLR_C_BIT);
+	assert(val & SCTLR_I_BIT);
+	/*
+	 * Check that Cache Writeback Granule (CWG) in CTR_EL0 matches the
+	 * provided platform value
+	 */
+	val = (read_ctr_el0() >> CTR_CWG_SHIFT) & CTR_CWG_MASK;
+	/*
+	 * If CWG is zero, then no CWG information is available but we can
+	 * at least check the platform value is less than the architectural
+	 * maximum.
+	 */
+	if (val != 0)
+		assert(CACHE_WRITEBACK_GRANULE == SIZE_FROM_LOG2_WORDS(val));
+	else
+		assert(CACHE_WRITEBACK_GRANULE <= MAX_CACHE_LINE_SIZE);
+#endif /* ENABLE_ASSERTIONS */
 
 	/* Perform remaining generic architectural setup from EL3 */
 	bl1_arch_setup();
 
+#if TRUSTED_BOARD_BOOT
+	/* Initialize authentication module */
+	auth_mod_init();
+#endif /* TRUSTED_BOARD_BOOT */
+
 	/* Perform platform setup in BL1. */
 	bl1_platform_setup();
 
-	SET_PARAM_HEAD(&bl2_image_info, PARAM_IMAGE_BINARY, VERSION_1, 0);
-	SET_PARAM_HEAD(&bl2_ep, PARAM_EP, VERSION_1, 0);
+	/* Get the image id of next image to load and run. */
+	image_id = bl1_plat_get_next_image_id();
+
+	/*
+	 * We currently interpret any image id other than
+	 * BL2_IMAGE_ID as the start of firmware update.
+	 */
+	if (image_id == BL2_IMAGE_ID)
+		bl1_load_bl2();
+	else
+		NOTICE("BL1-FWU: *******FWU Process Started*******\n");
+
+	bl1_prepare_next_image(image_id);
+
+	console_flush();
+}
+
+/*******************************************************************************
+ * This function locates and loads the BL2 raw binary image in the trusted SRAM.
+ * Called by the primary cpu after a cold boot.
+ * TODO: Add support for alternative image load mechanism e.g using virtio/elf
+ * loader etc.
+ ******************************************************************************/
+void bl1_load_bl2(void)
+{
+	image_desc_t *image_desc;
+	image_info_t *image_info;
+	entry_point_info_t *ep_info;
+	meminfo_t *bl1_tzram_layout;
+	meminfo_t *bl2_tzram_layout;
+	int err;
+
+	/* Get the image descriptor */
+	image_desc = bl1_plat_get_image_desc(BL2_IMAGE_ID);
+	assert(image_desc);
+
+	/* Get the image info */
+	image_info = &image_desc->image_info;
+
+	/* Get the entry point info */
+	ep_info = &image_desc->ep_info;
 
 	/* Find out how much free trusted ram remains after BL1 load */
 	bl1_tzram_layout = bl1_plat_sec_mem_layout();
 
-#if TRUSTED_BOARD_BOOT
-	/* Initialize authentication module */
-	auth_init();
+	INFO("BL1: Loading BL2\n");
 
-	/*
-	 * Load the BL2 certificate into the BL2 region. This region will be
-	 * overwritten by the image, so the authentication module is responsible
-	 * for storing the relevant data from the certificate (keys, hashes,
-	 * etc.) so it can be used later.
-	 */
-	err = load_image(bl1_tzram_layout,
-			 BL2_CERT_NAME,
-			 BL2_BASE,
-			 &bl2_image_info,
-			 NULL);
-	if (err) {
-		ERROR("Failed to load BL2 certificate.\n");
-		panic();
-	}
-
-	err = auth_verify_obj(AUTH_BL2_IMG_CERT, bl2_image_info.image_base,
-			bl2_image_info.image_size);
-	if (err) {
-		ERROR("Failed to validate BL2 certificate.\n");
-		panic();
-	}
-#endif /* TRUSTED_BOARD_BOOT */
-
+#if LOAD_IMAGE_V2
+	err = load_auth_image(BL2_IMAGE_ID, image_info);
+#else
 	/* Load the BL2 image */
-	err = load_image(bl1_tzram_layout,
-			 BL2_IMAGE_NAME,
-			 BL2_BASE,
-			 &bl2_image_info,
-			 &bl2_ep);
+	err = load_auth_image(bl1_tzram_layout,
+			 BL2_IMAGE_ID,
+			 image_info->image_base,
+			 image_info,
+			 ep_info);
+
+#endif /* LOAD_IMAGE_V2 */
+
 	if (err) {
-		/*
-		 * TODO: print failure to load BL2 but also add a tzwdog timer
-		 * which will reset the system eventually.
-		 */
 		ERROR("Failed to load BL2 firmware.\n");
-		panic();
+		plat_error_handler(err);
 	}
 
-#if TRUSTED_BOARD_BOOT
-	err = auth_verify_obj(AUTH_BL2_IMG, bl2_image_info.image_base,
-				bl2_image_info.image_size);
-	if (err) {
-		ERROR("Failed to validate BL2 image.\n");
-		panic();
-	}
-
-	/* After working with data, invalidate the data cache */
-	inv_dcache_range(bl2_image_info.image_base,
-			(size_t)bl2_image_info.image_size);
-#endif /* TRUSTED_BOARD_BOOT */
-
 	/*
 	 * Create a new layout of memory for BL2 as seen by BL1 i.e.
 	 * tell it the amount of total and free memory available.
@@ -205,36 +201,98 @@
 	 * to BL2. BL2 will read the memory layout before using its
 	 * memory for other purposes.
 	 */
+#if LOAD_IMAGE_V2
+	bl2_tzram_layout = (meminfo_t *) bl1_tzram_layout->total_base;
+#else
 	bl2_tzram_layout = (meminfo_t *) bl1_tzram_layout->free_base;
+#endif /* LOAD_IMAGE_V2 */
+
 	bl1_init_bl2_mem_layout(bl1_tzram_layout, bl2_tzram_layout);
 
-	bl1_plat_set_bl2_ep_info(&bl2_image_info, &bl2_ep);
-	bl2_ep.args.arg1 = (unsigned long)bl2_tzram_layout;
+	ep_info->args.arg1 = (uintptr_t)bl2_tzram_layout;
 	NOTICE("BL1: Booting BL2\n");
-	INFO("BL1: BL2 address = 0x%llx\n",
-		(unsigned long long) bl2_ep.pc);
-	INFO("BL1: BL2 spsr = 0x%x\n", bl2_ep.spsr);
-	VERBOSE("BL1: BL2 memory layout address = 0x%llx\n",
-		(unsigned long long) bl2_tzram_layout);
-
-	bl1_run_bl2(&bl2_ep);
-
-	return;
+	VERBOSE("BL1: BL2 memory layout address = %p\n",
+		(void *) bl2_tzram_layout);
 }
 
 /*******************************************************************************
- * Temporary function to print the fact that BL2 has done its job and BL31 is
- * about to be loaded. This is needed as long as printfs cannot be used
+ * Function called just before handing over to the next BL to inform the user
+ * about the boot progress. In debug mode, also print details about the BL
+ * image's execution context.
  ******************************************************************************/
-void display_boot_progress(entry_point_info_t *bl31_ep_info)
+void bl1_print_next_bl_ep_info(const entry_point_info_t *bl_ep_info)
 {
-	NOTICE("BL1: Booting BL3-1\n");
-	INFO("BL1: BL3-1 address = 0x%llx\n",
-		(unsigned long long)bl31_ep_info->pc);
-	INFO("BL1: BL3-1 spsr = 0x%llx\n",
-		(unsigned long long)bl31_ep_info->spsr);
-	INFO("BL1: BL3-1 params address = 0x%llx\n",
-		(unsigned long long)bl31_ep_info->args.arg0);
-	INFO("BL1: BL3-1 plat params address = 0x%llx\n",
-		(unsigned long long)bl31_ep_info->args.arg1);
+#ifdef AARCH32
+	NOTICE("BL1: Booting BL32\n");
+#else
+	NOTICE("BL1: Booting BL31\n");
+#endif /* AARCH32 */
+	print_entry_point_info(bl_ep_info);
+}
+
+#if SPIN_ON_BL1_EXIT
+void print_debug_loop_message(void)
+{
+	NOTICE("BL1: Debug loop, spinning forever\n");
+	NOTICE("BL1: Please connect the debugger to continue\n");
+}
+#endif
+
+/*******************************************************************************
+ * Top level handler for servicing BL1 SMCs.
+ ******************************************************************************/
+register_t bl1_smc_handler(unsigned int smc_fid,
+	register_t x1,
+	register_t x2,
+	register_t x3,
+	register_t x4,
+	void *cookie,
+	void *handle,
+	unsigned int flags)
+{
+
+#if TRUSTED_BOARD_BOOT
+	/*
+	 * Dispatch FWU calls to FWU SMC handler and return its return
+	 * value
+	 */
+	if (is_fwu_fid(smc_fid)) {
+		return bl1_fwu_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
+			handle, flags);
+	}
+#endif
+
+	switch (smc_fid) {
+	case BL1_SMC_CALL_COUNT:
+		SMC_RET1(handle, BL1_NUM_SMC_CALLS);
+
+	case BL1_SMC_UID:
+		SMC_UUID_RET(handle, bl1_svc_uid);
+
+	case BL1_SMC_VERSION:
+		SMC_RET1(handle, BL1_SMC_MAJOR_VER | BL1_SMC_MINOR_VER);
+
+	default:
+		break;
+	}
+
+	WARN("Unimplemented BL1 SMC Call: 0x%x \n", smc_fid);
+	SMC_RET1(handle, SMC_UNK);
+}
+
+/*******************************************************************************
+ * BL1 SMC wrapper.  This function is only used in AArch32 mode to ensure ABI
+ * compliance when invoking bl1_smc_handler.
+ ******************************************************************************/
+register_t bl1_smc_wrapper(uint32_t smc_fid,
+	void *cookie,
+	void *handle,
+	unsigned int flags)
+{
+	register_t x1, x2, x3, x4;
+
+	assert(handle);
+
+	get_smc_params_from_ctx(handle, x1, x2, x3, x4);
+	return bl1_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags);
 }
diff --git a/bl1/bl1_private.h b/bl1/bl1_private.h
index 0a8fc45..6ac3b8c 100644
--- a/bl1/bl1_private.h
+++ b/bl1/bl1_private.h
@@ -1,44 +1,25 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __BL1_PRIVATE_H__
 #define __BL1_PRIVATE_H__
 
+#include <types.h>
+
 /*******************************************************************************
  * Declarations of linker defined symbols which will tell us where BL1 lives
- * in Trusted RAM
+ * in Trusted ROM and RAM
  ******************************************************************************/
-extern uint64_t __BL1_RAM_START__;
-extern uint64_t __BL1_RAM_END__;
-#define BL1_RAM_BASE (uint64_t)(&__BL1_RAM_START__)
-#define BL1_RAM_LIMIT (uint64_t)(&__BL1_RAM_END__)
+extern uintptr_t __BL1_ROM_END__;
+#define BL1_ROM_END (uintptr_t)(&__BL1_ROM_END__)
+
+extern uintptr_t __BL1_RAM_START__;
+extern uintptr_t __BL1_RAM_END__;
+#define BL1_RAM_BASE (uintptr_t)(&__BL1_RAM_START__)
+#define BL1_RAM_LIMIT (uintptr_t)(&__BL1_RAM_END__)
 
 /******************************************
  * Function prototypes
@@ -46,4 +27,14 @@
 void bl1_arch_setup(void);
 void bl1_arch_next_el_setup(void);
 
+void bl1_prepare_next_image(unsigned int image_id);
+
+register_t bl1_fwu_smc_handler(unsigned int smc_fid,
+		register_t x1,
+		register_t x2,
+		register_t x3,
+		register_t x4,
+		void *cookie,
+		void *handle,
+		unsigned int flags);
 #endif /* __BL1_PRIVATE_H__ */
diff --git a/bl1/tbbr/tbbr_img_desc.c b/bl1/tbbr/tbbr_img_desc.c
new file mode 100644
index 0000000..f2ed1a1
--- /dev/null
+++ b/bl1/tbbr/tbbr_img_desc.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <bl1.h>
+#include <bl_common.h>
+#include <platform_def.h>
+
+image_desc_t bl1_tbbr_image_descs[] = {
+    {
+	    .image_id = FWU_CERT_ID,
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
+		    VERSION_1, image_info_t, 0),
+	    .image_info.image_base = BL2_BASE,
+#if LOAD_IMAGE_V2
+	    .image_info.image_max_size = BL2_LIMIT - BL2_BASE,
+#endif
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY,
+		    VERSION_1, entry_point_info_t, SECURE),
+    },
+#if NS_BL1U_BASE
+    {
+	    .image_id = NS_BL1U_IMAGE_ID,
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_1, entry_point_info_t, NON_SECURE | EXECUTABLE),
+	    .ep_info.pc = NS_BL1U_BASE,
+    },
+#endif
+#if SCP_BL2U_BASE
+    {
+	    .image_id = SCP_BL2U_IMAGE_ID,
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
+		    VERSION_1, image_info_t, 0),
+	    .image_info.image_base = SCP_BL2U_BASE,
+#if LOAD_IMAGE_V2
+	    .image_info.image_max_size = SCP_BL2U_LIMIT - SCP_BL2U_BASE,
+#endif
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY,
+		    VERSION_1, entry_point_info_t, SECURE),
+    },
+#endif
+#if BL2U_BASE
+    {
+	    .image_id = BL2U_IMAGE_ID,
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_1, image_info_t, 0),
+	    .image_info.image_base = BL2U_BASE,
+#if LOAD_IMAGE_V2
+	    .image_info.image_max_size = BL2U_LIMIT - BL2U_BASE,
+#endif
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_1, entry_point_info_t, SECURE | EXECUTABLE),
+	    .ep_info.pc = BL2U_BASE,
+    },
+#endif
+#if NS_BL2U_BASE
+    {
+	    .image_id = NS_BL2U_IMAGE_ID,
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_1, entry_point_info_t, NON_SECURE),
+    },
+#endif
+	    BL2_IMAGE_DESC,
+
+    {
+	    .image_id = INVALID_IMAGE_ID,
+    }
+};
diff --git a/bl2/aarch32/bl2_arch_setup.c b/bl2/aarch32/bl2_arch_setup.c
new file mode 100644
index 0000000..db8a068
--- /dev/null
+++ b/bl2/aarch32/bl2_arch_setup.c
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+/*******************************************************************************
+ * Place holder function to perform any Secure SVC specific architectural
+ * setup. At the moment there is nothing to do.
+ ******************************************************************************/
+void bl2_arch_setup(void)
+{
+
+}
diff --git a/bl2/aarch32/bl2_entrypoint.S b/bl2/aarch32/bl2_entrypoint.S
new file mode 100644
index 0000000..e6fa5b9
--- /dev/null
+++ b/bl2/aarch32/bl2_entrypoint.S
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+
+
+	.globl	bl2_vector_table
+	.globl	bl2_entrypoint
+
+
+vector_base bl2_vector_table
+	b	bl2_entrypoint
+	b	report_exception	/* Undef */
+	b	report_exception	/* SVC call */
+	b	report_exception	/* Prefetch abort */
+	b	report_exception	/* Data abort */
+	b	report_exception	/* Reserved */
+	b	report_exception	/* IRQ */
+	b	report_exception	/* FIQ */
+
+
+func bl2_entrypoint
+	/*---------------------------------------------
+	 * Save from r1 the extents of the trusted ram
+	 * available to BL2 for future use.
+	 * r0 is not currently used.
+	 * ---------------------------------------------
+	 */
+ 	mov	r11, r1
+
+	/* ---------------------------------------------
+	 * Set the exception vector to something sane.
+	 * ---------------------------------------------
+	 */
+	ldr	r0, =bl2_vector_table
+	stcopr	r0, VBAR
+	isb
+
+	/* -----------------------------------------------------
+	 * Enable the instruction cache
+	 * -----------------------------------------------------
+	 */
+	ldcopr	r0, SCTLR
+	orr	r0, r0, #SCTLR_I_BIT
+	stcopr	r0, SCTLR
+	isb
+
+	/* ---------------------------------------------
+	 * Since BL2 executes after BL1, it is assumed
+	 * here that BL1 has already has done the
+	 * necessary register initializations.
+	 * ---------------------------------------------
+	 */
+
+	/* ---------------------------------------------
+	 * Invalidate the RW memory used by the BL2
+	 * image. This includes the data and NOBITS
+	 * sections. This is done to safeguard against
+	 * possible corruption of this memory by dirty
+	 * cache lines in a system cache as a result of
+	 * use by an earlier boot loader stage.
+	 * ---------------------------------------------
+	 */
+	ldr	r0, =__RW_START__
+	ldr	r1, =__RW_END__
+	sub	r1, r1, r0
+	bl	inv_dcache_range
+
+	/* ---------------------------------------------
+	 * Zero out NOBITS sections. There are 2 of them:
+	 *   - the .bss section;
+	 *   - the coherent memory section.
+	 * ---------------------------------------------
+	 */
+	ldr	r0, =__BSS_START__
+	ldr	r1, =__BSS_SIZE__
+	bl	zeromem
+
+#if USE_COHERENT_MEM
+	ldr	r0, =__COHERENT_RAM_START__
+	ldr	r1, =__COHERENT_RAM_UNALIGNED_SIZE__
+	bl	zeromem
+#endif
+
+	/* --------------------------------------------
+	 * Allocate a stack whose memory will be marked
+	 * as Normal-IS-WBWA when the MMU is enabled.
+	 * There is no risk of reading stale stack
+	 * memory after enabling the MMU as only the
+	 * primary cpu is running at the moment.
+	 * --------------------------------------------
+	 */
+	bl	plat_set_my_stack
+
+	/* ---------------------------------------------
+	 * Initialize the stack protector canary before
+	 * any C code is called.
+	 * ---------------------------------------------
+	 */
+#if STACK_PROTECTOR_ENABLED
+	bl	update_stack_protector_canary
+#endif
+
+	/* ---------------------------------------------
+	 * Perform early platform setup & platform
+	 * specific early arch. setup e.g. mmu setup
+	 * ---------------------------------------------
+	 */
+	mov	r0, r11
+	bl	bl2_early_platform_setup
+	bl	bl2_plat_arch_setup
+
+	/* ---------------------------------------------
+	 * Jump to main function.
+	 * ---------------------------------------------
+	 */
+	bl	bl2_main
+
+	/* ---------------------------------------------
+	 * Should never reach this point.
+	 * ---------------------------------------------
+	 */
+	no_ret	plat_panic_handler
+
+endfunc bl2_entrypoint
diff --git a/bl2/aarch64/bl2_arch_setup.c b/bl2/aarch64/bl2_arch_setup.c
index 0eafd15..038a075 100644
--- a/bl2/aarch64/bl2_arch_setup.c
+++ b/bl2/aarch64/bl2_arch_setup.c
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
diff --git a/bl2/aarch64/bl2_entrypoint.S b/bl2/aarch64/bl2_entrypoint.S
index 499dc37..3ab8b5a 100644
--- a/bl2/aarch64/bl2_entrypoint.S
+++ b/bl2/aarch64/bl2_entrypoint.S
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
@@ -39,13 +15,12 @@
 
 func bl2_entrypoint
 	/*---------------------------------------------
-	 * Store the extents of the tzram available to
-	 * BL2 for future use. Use the opcode param to
-	 * allow implement other functions if needed.
+	 * Save from x1 the extents of the tzram
+	 * available to BL2 for future use.
+	 * x0 is not currently used.
 	 * ---------------------------------------------
 	 */
-	mov	x20, x0
-	mov	x21, x1
+	mov	x20, x1
 
 	/* ---------------------------------------------
 	 * Set the exception vector to something sane.
@@ -74,12 +49,18 @@
 	isb
 
 	/* ---------------------------------------------
-	 * Check the opcodes out of paranoia.
+	 * Invalidate the RW memory used by the BL2
+	 * image. This includes the data and NOBITS
+	 * sections. This is done to safeguard against
+	 * possible corruption of this memory by dirty
+	 * cache lines in a system cache as a result of
+	 * use by an earlier boot loader stage.
 	 * ---------------------------------------------
 	 */
-	mov	x0, #RUN_IMAGE
-	cmp	x0, x20
-	b.ne	_panic
+	adr	x0, __RW_START__
+	adr	x1, __RW_END__
+	sub	x1, x1, x0
+	bl	inv_dcache_range
 
 	/* ---------------------------------------------
 	 * Zero out NOBITS sections. There are 2 of them:
@@ -89,12 +70,12 @@
 	 */
 	ldr	x0, =__BSS_START__
 	ldr	x1, =__BSS_SIZE__
-	bl	zeromem16
+	bl	zeromem
 
 #if USE_COHERENT_MEM
 	ldr	x0, =__COHERENT_RAM_START__
 	ldr	x1, =__COHERENT_RAM_UNALIGNED_SIZE__
-	bl	zeromem16
+	bl	zeromem
 #endif
 
 	/* --------------------------------------------
@@ -105,15 +86,23 @@
 	 * primary cpu is running at the moment.
 	 * --------------------------------------------
 	 */
-	mrs	x0, mpidr_el1
-	bl	platform_set_stack
+	bl	plat_set_my_stack
+
+	/* ---------------------------------------------
+	 * Initialize the stack protector canary before
+	 * any C code is called.
+	 * ---------------------------------------------
+	 */
+#if STACK_PROTECTOR_ENABLED
+	bl	update_stack_protector_canary
+#endif
 
 	/* ---------------------------------------------
 	 * Perform early platform setup & platform
 	 * specific early arch. setup e.g. mmu setup
 	 * ---------------------------------------------
 	 */
-	mov	x0, x21
+	mov	x0, x20
 	bl	bl2_early_platform_setup
 	bl	bl2_plat_arch_setup
 
@@ -122,5 +111,11 @@
 	 * ---------------------------------------------
 	 */
 	bl	bl2_main
-_panic:
-	b	_panic
+
+	/* ---------------------------------------------
+	 * Should never reach this point.
+	 * ---------------------------------------------
+	 */
+	no_ret	plat_panic_handler
+
+endfunc bl2_entrypoint
diff --git a/bl2/bl2.ld.S b/bl2/bl2.ld.S
index 9933339..f3ab706 100644
--- a/bl2/bl2.ld.S
+++ b/bl2/bl2.ld.S
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <platform_def.h>
@@ -45,11 +21,42 @@
     ASSERT(. == ALIGN(4096),
            "BL2_BASE address is not aligned on a page boundary.")
 
+#if SEPARATE_CODE_AND_RODATA
+    .text . : {
+        __TEXT_START__ = .;
+        *bl2_entrypoint.o(.text*)
+        *(.text*)
+        *(.vectors)
+        . = NEXT(4096);
+        __TEXT_END__ = .;
+     } >RAM
+
+    .rodata . : {
+        __RODATA_START__ = .;
+        *(.rodata*)
+
+        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
+        . = ALIGN(8);
+        __PARSER_LIB_DESCS_START__ = .;
+        KEEP(*(.img_parser_lib_descs))
+        __PARSER_LIB_DESCS_END__ = .;
+
+        . = NEXT(4096);
+        __RODATA_END__ = .;
+    } >RAM
+#else
     ro . : {
         __RO_START__ = .;
         *bl2_entrypoint.o(.text*)
         *(.text*)
         *(.rodata*)
+
+        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
+        . = ALIGN(8);
+        __PARSER_LIB_DESCS_START__ = .;
+        KEEP(*(.img_parser_lib_descs))
+        __PARSER_LIB_DESCS_END__ = .;
+
         *(.vectors)
         __RO_END_UNALIGNED__ = .;
         /*
@@ -60,7 +67,19 @@
         . = NEXT(4096);
         __RO_END__ = .;
     } >RAM
+#endif
 
+    /*
+     * Define a linker symbol to mark start of the RW memory area for this
+     * image.
+     */
+    __RW_START__ = . ;
+
+    /*
+     * .data must be placed at a lower address than the stacks if the stack
+     * protector is enabled. Alternatively, the .data.stack_protector_canary
+     * section can be placed independently of the main .data section.
+     */
     .data . : {
         __DATA_START__ = .;
         *(.data*)
@@ -75,7 +94,8 @@
 
     /*
      * The .bss section gets initialised to 0 at runtime.
-     * Its base address must be 16-byte aligned.
+     * Its base address should be 16-byte aligned for better performance of the
+     * zero-initialization code.
      */
     .bss : ALIGN(16) {
         __BSS_START__ = .;
@@ -114,6 +134,11 @@
     } >RAM
 #endif
 
+    /*
+     * Define a linker symbol to mark end of the RW memory area for this
+     * image.
+     */
+    __RW_END__ = .;
     __BL2_END__ = .;
 
     __BSS_SIZE__ = SIZEOF(.bss);
diff --git a/bl2/bl2.mk b/bl2/bl2.mk
index 1e82078..32e3284 100644
--- a/bl2/bl2.mk
+++ b/bl2/bl2.mk
@@ -1,37 +1,23 @@
 #
-# Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
 #
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# Neither the name of ARM nor the names of its contributors may be used
-# to endorse or promote products derived from this software without specific
-# prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
 #
 
 BL2_SOURCES		+=	bl2/bl2_main.c				\
-				bl2/aarch64/bl2_entrypoint.S		\
-				bl2/aarch64/bl2_arch_setup.c		\
-				common/aarch64/early_exceptions.S	\
-				lib/locks/exclusive/spinlock.S
+				bl2/${ARCH}/bl2_entrypoint.S		\
+				bl2/${ARCH}/bl2_arch_setup.c		\
+				lib/locks/exclusive/${ARCH}/spinlock.S	\
+				plat/common/${ARCH}/platform_up_stack.S
+
+ifeq (${ARCH},aarch64)
+BL2_SOURCES		+=	common/aarch64/early_exceptions.S
+endif
+
+ifeq (${LOAD_IMAGE_V2},1)
+BL2_SOURCES		+=	bl2/bl2_image_load_v2.c
+else
+BL2_SOURCES		+=	bl2/bl2_image_load.c
+endif
 
 BL2_LINKERFILE		:=	bl2/bl2.ld.S
diff --git a/bl2/bl2_image_load.c b/bl2/bl2_image_load.c
new file mode 100644
index 0000000..d5d28a5
--- /dev/null
+++ b/bl2/bl2_image_load.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <auth_mod.h>
+#include <bl_common.h>
+#include <debug.h>
+#include <errno.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <stdint.h>
+
+/*
+ * Check for platforms that use obsolete image terminology
+ */
+#ifdef BL30_BASE
+# error "BL30_BASE platform define no longer used - please use SCP_BL2_BASE"
+#endif
+
+/*******************************************************************************
+ * Load the SCP_BL2 image if there's one.
+ * If a platform does not want to attempt to load SCP_BL2 image it must leave
+ * SCP_BL2_BASE undefined.
+ * Return 0 on success or if there's no SCP_BL2 image to load, a negative error
+ * code otherwise.
+ ******************************************************************************/
+static int load_scp_bl2(void)
+{
+	int e = 0;
+#ifdef SCP_BL2_BASE
+	meminfo_t scp_bl2_mem_info;
+	image_info_t scp_bl2_image_info;
+
+	/*
+	 * It is up to the platform to specify where SCP_BL2 should be loaded if
+	 * it exists. It could create space in the secure sram or point to a
+	 * completely different memory.
+	 *
+	 * The entry point information is not relevant in this case as the AP
+	 * won't execute the SCP_BL2 image.
+	 */
+	INFO("BL2: Loading SCP_BL2\n");
+	bl2_plat_get_scp_bl2_meminfo(&scp_bl2_mem_info);
+	scp_bl2_image_info.h.version = VERSION_1;
+	e = load_auth_image(&scp_bl2_mem_info,
+			    SCP_BL2_IMAGE_ID,
+			    SCP_BL2_BASE,
+			    &scp_bl2_image_info,
+			    NULL);
+
+	if (e == 0) {
+		/* The subsequent handling of SCP_BL2 is platform specific */
+		e = bl2_plat_handle_scp_bl2(&scp_bl2_image_info);
+		if (e) {
+			ERROR("Failure in platform-specific handling of SCP_BL2 image.\n");
+		}
+	}
+#endif /* SCP_BL2_BASE */
+
+	return e;
+}
+
+#ifndef EL3_PAYLOAD_BASE
+/*******************************************************************************
+ * Load the BL31 image.
+ * The bl2_to_bl31_params and bl31_ep_info params will be updated with the
+ * relevant BL31 information.
+ * Return 0 on success, a negative error code otherwise.
+ ******************************************************************************/
+static int load_bl31(bl31_params_t *bl2_to_bl31_params,
+		     entry_point_info_t *bl31_ep_info)
+{
+	meminfo_t *bl2_tzram_layout;
+	int e;
+
+	INFO("BL2: Loading BL31\n");
+	assert(bl2_to_bl31_params != NULL);
+	assert(bl31_ep_info != NULL);
+
+	/* Find out how much free trusted ram remains after BL2 load */
+	bl2_tzram_layout = bl2_plat_sec_mem_layout();
+
+	/* Set the X0 parameter to BL31 */
+	bl31_ep_info->args.arg0 = (unsigned long)bl2_to_bl31_params;
+
+	/* Load the BL31 image */
+	e = load_auth_image(bl2_tzram_layout,
+			    BL31_IMAGE_ID,
+			    BL31_BASE,
+			    bl2_to_bl31_params->bl31_image_info,
+			    bl31_ep_info);
+
+	if (e == 0) {
+		bl2_plat_set_bl31_ep_info(bl2_to_bl31_params->bl31_image_info,
+					  bl31_ep_info);
+	}
+
+	return e;
+}
+
+/*******************************************************************************
+ * Load the BL32 image if there's one.
+ * The bl2_to_bl31_params param will be updated with the relevant BL32
+ * information.
+ * If a platform does not want to attempt to load BL32 image it must leave
+ * BL32_BASE undefined.
+ * Return 0 on success or if there's no BL32 image to load, a negative error
+ * code otherwise.
+ ******************************************************************************/
+static int load_bl32(bl31_params_t *bl2_to_bl31_params)
+{
+	int e = 0;
+#ifdef BL32_BASE
+	meminfo_t bl32_mem_info;
+
+	INFO("BL2: Loading BL32\n");
+	assert(bl2_to_bl31_params != NULL);
+
+	/*
+	 * It is up to the platform to specify where BL32 should be loaded if
+	 * it exists. It could create space in the secure sram or point to a
+	 * completely different memory.
+	 */
+	bl2_plat_get_bl32_meminfo(&bl32_mem_info);
+	e = load_auth_image(&bl32_mem_info,
+			    BL32_IMAGE_ID,
+			    BL32_BASE,
+			    bl2_to_bl31_params->bl32_image_info,
+			    bl2_to_bl31_params->bl32_ep_info);
+
+	if (e == 0) {
+		bl2_plat_set_bl32_ep_info(
+			bl2_to_bl31_params->bl32_image_info,
+			bl2_to_bl31_params->bl32_ep_info);
+	}
+#endif /* BL32_BASE */
+
+	return e;
+}
+
+#ifndef PRELOADED_BL33_BASE
+/*******************************************************************************
+ * Load the BL33 image.
+ * The bl2_to_bl31_params param will be updated with the relevant BL33
+ * information.
+ * Return 0 on success, a negative error code otherwise.
+ ******************************************************************************/
+static int load_bl33(bl31_params_t *bl2_to_bl31_params)
+{
+	meminfo_t bl33_mem_info;
+	int e;
+
+	INFO("BL2: Loading BL33\n");
+	assert(bl2_to_bl31_params != NULL);
+
+	bl2_plat_get_bl33_meminfo(&bl33_mem_info);
+
+	/* Load the BL33 image in non-secure memory provided by the platform */
+	e = load_auth_image(&bl33_mem_info,
+			    BL33_IMAGE_ID,
+			    plat_get_ns_image_entrypoint(),
+			    bl2_to_bl31_params->bl33_image_info,
+			    bl2_to_bl31_params->bl33_ep_info);
+
+	if (e == 0) {
+		bl2_plat_set_bl33_ep_info(bl2_to_bl31_params->bl33_image_info,
+					  bl2_to_bl31_params->bl33_ep_info);
+	}
+
+	return e;
+}
+#endif /* PRELOADED_BL33_BASE */
+
+#endif /* EL3_PAYLOAD_BASE */
+
+/*******************************************************************************
+ * This function loads SCP_BL2/BL3x images and returns the ep_info for
+ * the next executable image.
+ ******************************************************************************/
+entry_point_info_t *bl2_load_images(void)
+{
+	bl31_params_t *bl2_to_bl31_params;
+	entry_point_info_t *bl31_ep_info;
+	int e;
+
+	e = load_scp_bl2();
+	if (e) {
+		ERROR("Failed to load SCP_BL2 (%i)\n", e);
+		plat_error_handler(e);
+	}
+
+	/* Perform platform setup in BL2 after loading SCP_BL2 */
+	bl2_platform_setup();
+
+	/*
+	 * Get a pointer to the memory the platform has set aside to pass
+	 * information to BL31.
+	 */
+	bl2_to_bl31_params = bl2_plat_get_bl31_params();
+	bl31_ep_info = bl2_plat_get_bl31_ep_info();
+
+#ifdef EL3_PAYLOAD_BASE
+	/*
+	 * In the case of an EL3 payload, we don't need to load any further
+	 * images. Just update the BL31 entrypoint info structure to make BL1
+	 * jump to the EL3 payload.
+	 * The pointer to the memory the platform has set aside to pass
+	 * information to BL31 in the normal boot flow is reused here, even
+	 * though only a fraction of the information contained in the
+	 * bl31_params_t structure makes sense in the context of EL3 payloads.
+	 * This will be refined in the future.
+	 */
+	INFO("BL2: Populating the entrypoint info for the EL3 payload\n");
+	bl31_ep_info->pc = EL3_PAYLOAD_BASE;
+	bl31_ep_info->args.arg0 = (unsigned long) bl2_to_bl31_params;
+	bl2_plat_set_bl31_ep_info(NULL, bl31_ep_info);
+#else
+	e = load_bl31(bl2_to_bl31_params, bl31_ep_info);
+	if (e) {
+		ERROR("Failed to load BL31 (%i)\n", e);
+		plat_error_handler(e);
+	}
+
+	e = load_bl32(bl2_to_bl31_params);
+	if (e) {
+		if (e == -EAUTH) {
+			ERROR("Failed to authenticate BL32\n");
+			plat_error_handler(e);
+		} else {
+			WARN("Failed to load BL32 (%i)\n", e);
+		}
+	}
+
+#ifdef PRELOADED_BL33_BASE
+	/*
+	 * In this case, don't load the BL33 image as it's already loaded in
+	 * memory. Update BL33 entrypoint information.
+	 */
+	INFO("BL2: Populating the entrypoint info for the preloaded BL33\n");
+	bl2_to_bl31_params->bl33_ep_info->pc = PRELOADED_BL33_BASE;
+	bl2_plat_set_bl33_ep_info(NULL, bl2_to_bl31_params->bl33_ep_info);
+#else
+	e = load_bl33(bl2_to_bl31_params);
+	if (e) {
+		ERROR("Failed to load BL33 (%i)\n", e);
+		plat_error_handler(e);
+	}
+#endif /* PRELOADED_BL33_BASE */
+
+#endif /* EL3_PAYLOAD_BASE */
+
+	/* Flush the params to be passed to memory */
+	bl2_plat_flush_bl31_params();
+
+	return bl31_ep_info;
+}
diff --git a/bl2/bl2_image_load_v2.c b/bl2/bl2_image_load_v2.c
new file mode 100644
index 0000000..6517703
--- /dev/null
+++ b/bl2/bl2_image_load_v2.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <auth_mod.h>
+#include <bl_common.h>
+#include <debug.h>
+#include <desc_image_load.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <stdint.h>
+
+
+/*******************************************************************************
+ * This function loads SCP_BL2/BL3x images and returns the ep_info for
+ * the next executable image.
+ ******************************************************************************/
+entry_point_info_t *bl2_load_images(void)
+{
+	bl_params_t *bl2_to_next_bl_params;
+	bl_load_info_t *bl2_load_info;
+	const bl_load_info_node_t *bl2_node_info;
+	int plat_setup_done = 0;
+	int err;
+
+	/*
+	 * Get information about the images to load.
+	 */
+	bl2_load_info = plat_get_bl_image_load_info();
+	assert(bl2_load_info);
+	assert(bl2_load_info->head);
+	assert(bl2_load_info->h.type == PARAM_BL_LOAD_INFO);
+	assert(bl2_load_info->h.version >= VERSION_2);
+	bl2_node_info = bl2_load_info->head;
+
+	while (bl2_node_info) {
+		/*
+		 * Perform platform setup before loading the image,
+		 * if indicated in the image attributes AND if NOT
+		 * already done before.
+		 */
+		if (bl2_node_info->image_info->h.attr & IMAGE_ATTRIB_PLAT_SETUP) {
+			if (plat_setup_done) {
+				WARN("BL2: Platform setup already done!!\n");
+			} else {
+				INFO("BL2: Doing platform setup\n");
+				bl2_platform_setup();
+				plat_setup_done = 1;
+			}
+		}
+
+		if (!(bl2_node_info->image_info->h.attr & IMAGE_ATTRIB_SKIP_LOADING)) {
+			INFO("BL2: Loading image id %d\n", bl2_node_info->image_id);
+			err = load_auth_image(bl2_node_info->image_id,
+				bl2_node_info->image_info);
+			if (err) {
+				ERROR("BL2: Failed to load image (%i)\n", err);
+				plat_error_handler(err);
+			}
+		} else {
+			INFO("BL2: Skip loading image id %d\n", bl2_node_info->image_id);
+		}
+
+		/* Allow platform to handle image information. */
+		err = bl2_plat_handle_post_image_load(bl2_node_info->image_id);
+		if (err) {
+			ERROR("BL2: Failure in post image load handling (%i)\n", err);
+			plat_error_handler(err);
+		}
+
+		/* Go to next image */
+		bl2_node_info = bl2_node_info->next_load_info;
+	}
+
+	/*
+	 * Get information to pass to the next image.
+	 */
+	bl2_to_next_bl_params = plat_get_next_bl_params();
+	assert(bl2_to_next_bl_params);
+	assert(bl2_to_next_bl_params->head);
+	assert(bl2_to_next_bl_params->h.type == PARAM_BL_PARAMS);
+	assert(bl2_to_next_bl_params->h.version >= VERSION_2);
+	assert(bl2_to_next_bl_params->head->ep_info);
+
+	/* Populate arg0 for the next BL image */
+	bl2_to_next_bl_params->head->ep_info->args.arg0 = (u_register_t)bl2_to_next_bl_params;
+
+	/* Flush the parameters to be passed to next image */
+	plat_flush_next_bl_params();
+
+	return bl2_to_next_bl_params->head->ep_info;
+}
diff --git a/bl2/bl2_main.c b/bl2/bl2_main.c
index 5b1e69c..018deb3 100644
--- a/bl2/bl2_main.c
+++ b/bl2/bl2_main.c
@@ -1,397 +1,27 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
-#include <arch.h>
 #include <arch_helpers.h>
-#include <assert.h>
-#include <auth.h>
+#include <auth_mod.h>
+#include <bl1.h>
 #include <bl_common.h>
+#include <console.h>
 #include <debug.h>
 #include <platform.h>
-#include <platform_def.h>
 #include "bl2_private.h"
 
-#if TRUSTED_BOARD_BOOT
-
-#ifdef BL32_BASE
-static int bl32_cert_error;
-#endif
-
-/*
- * Load and authenticate the key and content certificates for a BL3-x image
- *
- * Parameters:
- *   key_cert_blob: key certificate blob id (see auth.h)
- *   key_cert_name: key certificate filename
- *   cont_cert_blob: content certificate blob id (see auth.h)
- *   cont_cert_name: content certificate filename
- *   mem_layout: Trusted SRAM memory layout
- *   load_addr: load the certificates at this address
- *
- * Return: 0 = success, Otherwise = error
- */
-static int load_cert_bl3x(int key_cert_blob, const char *key_cert_name,
-			  int cont_cert_blob, const char *cont_cert_name,
-			  meminfo_t *mem_layout, uint64_t load_addr)
-{
-	image_info_t image_info;
-	int err;
-
-	/* Load Key certificate */
-	image_info.h.version = VERSION_1;
-	err = load_image(mem_layout, key_cert_name, load_addr, &image_info, NULL);
-	if (err) {
-		ERROR("Cannot load %s.\n", key_cert_name);
-		return err;
-	}
-
-	err = auth_verify_obj(key_cert_blob, image_info.image_base,
-			image_info.image_size);
-	if (err) {
-		ERROR("Invalid key certificate %s.\n", key_cert_name);
-		return err;
-	}
-
-	/* Load Content certificate */
-	image_info.h.version = VERSION_1;
-	err = load_image(mem_layout, cont_cert_name, load_addr, &image_info, NULL);
-	if (err) {
-		ERROR("Cannot load %s.\n", cont_cert_name);
-		return err;
-	}
-
-	err = auth_verify_obj(cont_cert_blob, image_info.image_base,
-			image_info.image_size);
-	if (err) {
-		ERROR("Invalid content certificate %s.\n", cont_cert_name);
-		return err;
-	}
-
-	return 0;
-}
-
-/*
- * Load and authenticate the Trusted Key certificate the key and content
- * certificates for each of the BL3-x images.
- *
- * Return: 0 = success, Otherwise = error
- */
-static int load_certs(void)
-{
-	const uint64_t load_addr = BL31_BASE;
-	image_info_t image_info;
-	meminfo_t *mem_layout;
-	int err;
-
-	/* Find out how much free trusted ram remains after BL2 load */
-	mem_layout = bl2_plat_sec_mem_layout();
-
-	/* Load the Trusted Key certificate in the BL31 region */
-	image_info.h.version = VERSION_1;
-	err = load_image(mem_layout, TRUSTED_KEY_CERT_NAME, load_addr,
-			 &image_info, NULL);
-	if (err) {
-		ERROR("Failed to load Trusted Key certificate.\n");
-		return err;
-	}
-
-	/* Validate the certificate */
-	err = auth_verify_obj(AUTH_TRUSTED_KEY_CERT, image_info.image_base,
-			image_info.image_size);
-	if (err) {
-		ERROR("Invalid Trusted Key certificate.\n");
-		return err;
-	}
-
-	/* Load and validate Key and Content certificates for BL3-x images */
-#ifdef BL30_BASE
-	err = load_cert_bl3x(AUTH_BL30_KEY_CERT, BL30_KEY_CERT_NAME,
-			     AUTH_BL30_IMG_CERT, BL30_CERT_NAME,
-			     mem_layout, load_addr);
-	if (err) {
-		ERROR("Failed to verify BL3-0 authenticity\n");
-		return err;
-	}
-#endif /* BL30_BASE */
-
-	err = load_cert_bl3x(AUTH_BL31_KEY_CERT, BL31_KEY_CERT_NAME,
-			     AUTH_BL31_IMG_CERT, BL31_CERT_NAME,
-			     mem_layout, load_addr);
-	if (err) {
-		ERROR("Failed to verify BL3-1 authenticity\n");
-		return err;
-	}
-
-#ifdef BL32_BASE
-	/* BL3-2 image is optional, but keep the return value in case the
-	 * image is present but the certificate is missing */
-	err = load_cert_bl3x(AUTH_BL32_KEY_CERT, BL32_KEY_CERT_NAME,
-			     AUTH_BL32_IMG_CERT, BL32_CERT_NAME,
-			     mem_layout, load_addr);
-	if (err) {
-		WARN("Failed to verify BL3-2 authenticity\n");
-	}
-	bl32_cert_error = err;
-#endif /* BL32_BASE */
-
-	err = load_cert_bl3x(AUTH_BL33_KEY_CERT, BL33_KEY_CERT_NAME,
-			     AUTH_BL33_IMG_CERT, BL33_CERT_NAME,
-			     mem_layout, load_addr);
-	if (err) {
-		ERROR("Failed to verify BL3-3 authenticity\n");
-		return err;
-	}
-
-	return 0;
-}
-
-#endif /* TRUSTED_BOARD_BOOT */
-
-/*******************************************************************************
- * Load the BL3-0 image if there's one.
- * If a platform does not want to attempt to load BL3-0 image it must leave
- * BL30_BASE undefined.
- * Return 0 on success or if there's no BL3-0 image to load, a negative error
- * code otherwise.
- ******************************************************************************/
-static int load_bl30(void)
-{
-	int e = 0;
-#ifdef BL30_BASE
-	meminfo_t bl30_mem_info;
-	image_info_t bl30_image_info;
-
-	/*
-	 * It is up to the platform to specify where BL3-0 should be loaded if
-	 * it exists. It could create space in the secure sram or point to a
-	 * completely different memory.
-	 *
-	 * The entry point information is not relevant in this case as the AP
-	 * won't execute the BL3-0 image.
-	 */
-	INFO("BL2: Loading BL3-0\n");
-	bl2_plat_get_bl30_meminfo(&bl30_mem_info);
-	bl30_image_info.h.version = VERSION_1;
-	e = load_image(&bl30_mem_info,
-		       BL30_IMAGE_NAME,
-		       BL30_BASE,
-		       &bl30_image_info,
-		       NULL);
-
-	if (e == 0) {
-#if TRUSTED_BOARD_BOOT
-		e = auth_verify_obj(AUTH_BL30_IMG,
-				bl30_image_info.image_base,
-				bl30_image_info.image_size);
-		if (e) {
-			ERROR("Failed to authenticate BL3-0 image.\n");
-			panic();
-		}
-
-		/* After working with data, invalidate the data cache */
-		inv_dcache_range(bl30_image_info.image_base,
-				 (size_t)bl30_image_info.image_size);
-#endif /* TRUSTED_BOARD_BOOT */
-
-		/* The subsequent handling of BL3-0 is platform specific */
-		bl2_plat_handle_bl30(&bl30_image_info);
-	}
-#endif /* BL30_BASE */
-
-	return e;
-}
-
-/*******************************************************************************
- * Load the BL3-1 image.
- * The bl2_to_bl31_params and bl31_ep_info params will be updated with the
- * relevant BL3-1 information.
- * Return 0 on success, a negative error code otherwise.
- ******************************************************************************/
-static int load_bl31(bl31_params_t *bl2_to_bl31_params,
-		     entry_point_info_t *bl31_ep_info)
-{
-	meminfo_t *bl2_tzram_layout;
-	int e;
-
-	INFO("BL2: Loading BL3-1\n");
-	assert(bl2_to_bl31_params != NULL);
-	assert(bl31_ep_info != NULL);
-
-	/* Find out how much free trusted ram remains after BL2 load */
-	bl2_tzram_layout = bl2_plat_sec_mem_layout();
-
-	/* Set the X0 parameter to BL3-1 */
-	bl31_ep_info->args.arg0 = (unsigned long)bl2_to_bl31_params;
-
-	/* Load the BL3-1 image */
-	e = load_image(bl2_tzram_layout,
-		       BL31_IMAGE_NAME,
-		       BL31_BASE,
-		       bl2_to_bl31_params->bl31_image_info,
-		       bl31_ep_info);
-
-	if (e == 0) {
-#if TRUSTED_BOARD_BOOT
-		e = auth_verify_obj(AUTH_BL31_IMG,
-			bl2_to_bl31_params->bl31_image_info->image_base,
-			bl2_to_bl31_params->bl31_image_info->image_size);
-		if (e) {
-			ERROR("Failed to authenticate BL3-1 image.\n");
-			panic();
-		}
-
-		/* After working with data, invalidate the data cache */
-		inv_dcache_range(bl2_to_bl31_params->bl31_image_info->image_base,
-			(size_t)bl2_to_bl31_params->bl31_image_info->image_size);
-#endif /* TRUSTED_BOARD_BOOT */
-
-		bl2_plat_set_bl31_ep_info(bl2_to_bl31_params->bl31_image_info,
-					  bl31_ep_info);
-	}
-
-	return e;
-}
-
-/*******************************************************************************
- * Load the BL3-2 image if there's one.
- * The bl2_to_bl31_params param will be updated with the relevant BL3-2
- * information.
- * If a platform does not want to attempt to load BL3-2 image it must leave
- * BL32_BASE undefined.
- * Return 0 on success or if there's no BL3-2 image to load, a negative error
- * code otherwise.
- ******************************************************************************/
-static int load_bl32(bl31_params_t *bl2_to_bl31_params)
-{
-	int e = 0;
-#ifdef BL32_BASE
-	meminfo_t bl32_mem_info;
-
-	INFO("BL2: Loading BL3-2\n");
-	assert(bl2_to_bl31_params != NULL);
-
-	/*
-	 * It is up to the platform to specify where BL3-2 should be loaded if
-	 * it exists. It could create space in the secure sram or point to a
-	 * completely different memory.
-	 */
-	bl2_plat_get_bl32_meminfo(&bl32_mem_info);
-	e = load_image(&bl32_mem_info,
-		       BL32_IMAGE_NAME,
-		       BL32_BASE,
-		       bl2_to_bl31_params->bl32_image_info,
-		       bl2_to_bl31_params->bl32_ep_info);
-
-	if (e == 0) {
-#if TRUSTED_BOARD_BOOT
-		/* Image is present. Check if there is a valid certificate */
-		if (bl32_cert_error) {
-			ERROR("Failed to authenticate BL3-2 certificates.\n");
-			panic();
-		}
-
-		e = auth_verify_obj(AUTH_BL32_IMG,
-			bl2_to_bl31_params->bl32_image_info->image_base,
-			bl2_to_bl31_params->bl32_image_info->image_size);
-		if (e) {
-			ERROR("Failed to authenticate BL3-2 image.\n");
-			panic();
-		}
-		/* After working with data, invalidate the data cache */
-		inv_dcache_range(bl2_to_bl31_params->bl32_image_info->image_base,
-			(size_t)bl2_to_bl31_params->bl32_image_info->image_size);
-#endif /* TRUSTED_BOARD_BOOT */
-
-		bl2_plat_set_bl32_ep_info(
-			bl2_to_bl31_params->bl32_image_info,
-			bl2_to_bl31_params->bl32_ep_info);
-	}
-#endif /* BL32_BASE */
-
-	return e;
-}
-
-/*******************************************************************************
- * Load the BL3-3 image.
- * The bl2_to_bl31_params param will be updated with the relevant BL3-3
- * information.
- * Return 0 on success, a negative error code otherwise.
- ******************************************************************************/
-static int load_bl33(bl31_params_t *bl2_to_bl31_params)
-{
-	meminfo_t bl33_mem_info;
-	int e;
-
-	INFO("BL2: Loading BL3-3\n");
-	assert(bl2_to_bl31_params != NULL);
-
-	bl2_plat_get_bl33_meminfo(&bl33_mem_info);
-
-	/* Load the BL3-3 image in non-secure memory provided by the platform */
-	e = load_image(&bl33_mem_info,
-		       BL33_IMAGE_NAME,
-		       plat_get_ns_image_entrypoint(),
-		       bl2_to_bl31_params->bl33_image_info,
-		       bl2_to_bl31_params->bl33_ep_info);
-
-	if (e == 0) {
-#if TRUSTED_BOARD_BOOT
-		e = auth_verify_obj(AUTH_BL33_IMG,
-				bl2_to_bl31_params->bl33_image_info->image_base,
-				bl2_to_bl31_params->bl33_image_info->image_size);
-		if (e) {
-			ERROR("Failed to authenticate BL3-3 image.\n");
-			panic();
-		}
-		/* After working with data, invalidate the data cache */
-		inv_dcache_range(bl2_to_bl31_params->bl33_image_info->image_base,
-			(size_t)bl2_to_bl31_params->bl33_image_info->image_size);
-#endif /* TRUSTED_BOARD_BOOT */
-
-		bl2_plat_set_bl33_ep_info(bl2_to_bl31_params->bl33_image_info,
-					  bl2_to_bl31_params->bl33_ep_info);
-	}
-
-	return e;
-}
 
 /*******************************************************************************
  * The only thing to do in BL2 is to load further images and pass control to
- * BL3-1. The memory occupied by BL2 will be reclaimed by BL3-x stages. BL2 runs
- * entirely in S-EL1.
+ * next BL. The memory occupied by BL2 will be reclaimed by BL3x stages. BL2
+ * runs entirely in S-EL1.
  ******************************************************************************/
 void bl2_main(void)
 {
-	bl31_params_t *bl2_to_bl31_params;
-	entry_point_info_t *bl31_ep_info;
-	int e;
+	entry_point_info_t *next_bl_ep_info;
 
 	NOTICE("BL2: %s\n", version_string);
 	NOTICE("BL2: %s\n", build_message);
@@ -401,59 +31,30 @@
 
 #if TRUSTED_BOARD_BOOT
 	/* Initialize authentication module */
-	auth_init();
-
-	/* Validate the certificates involved in the Chain of Trust */
-	e = load_certs();
-	if (e) {
-		ERROR("Chain of Trust invalid. Aborting...\n");
-		panic();
-	}
+	auth_mod_init();
 #endif /* TRUSTED_BOARD_BOOT */
 
-	/*
-	 * Load the subsequent bootloader images
-	 */
-	e = load_bl30();
-	if (e) {
-		ERROR("Failed to load BL3-0 (%i)\n", e);
-		ERROR("Please burn mcu image:\n");
-		ERROR("  sudo fastboot flash mcuimage mcuimage.bin\n");
-	}
+	/* initialize boot source */
+	bl2_plat_preload_setup();
 
-	/* Perform platform setup in BL2 after loading BL3-0 */
-	bl2_platform_setup();
+	/* Load the subsequent bootloader images. */
+	next_bl_ep_info = bl2_load_images();
+
+#ifdef AARCH32
+	/*
+	 * For AArch32 state BL1 and BL2 share the MMU setup.
+	 * Given that BL2 does not map BL1 regions, MMU needs
+	 * to be disabled in order to go back to BL1.
+	 */
+	disable_mmu_icache_secure();
+#endif /* AARCH32 */
+
+	console_flush();
 
 	/*
-	 * Get a pointer to the memory the platform has set aside to pass
-	 * information to BL3-1.
+	 * Run next BL image via an SMC to BL1. Information on how to pass
+	 * control to the BL32 (if present) and BL33 software images will
+	 * be passed to next BL image as an argument.
 	 */
-	bl2_to_bl31_params = bl2_plat_get_bl31_params();
-	bl31_ep_info = bl2_plat_get_bl31_ep_info();
-
-	e = load_bl31(bl2_to_bl31_params, bl31_ep_info);
-	if (e) {
-		ERROR("Failed to load BL3-1 (%i)\n", e);
-		panic();
-	}
-
-	e = load_bl32(bl2_to_bl31_params);
-	if (e)
-		WARN("Failed to load BL3-2 (%i)\n", e);
-
-	e = load_bl33(bl2_to_bl31_params);
-	if (e) {
-		ERROR("Failed to load BL3-3 (%i)\n", e);
-		panic();
-	}
-
-	/* Flush the params to be passed to memory */
-	bl2_plat_flush_bl31_params();
-
-	/*
-	 * Run BL3-1 via an SMC to BL1. Information on how to pass control to
-	 * the BL3-2 (if present) and BL3-3 software images will be passed to
-	 * BL3-1 as an argument.
-	 */
-	smc(RUN_IMAGE, (unsigned long)bl31_ep_info, 0, 0, 0, 0, 0, 0);
+	smc(BL1_SMC_RUN_IMAGE, (unsigned long)next_bl_ep_info, 0, 0, 0, 0, 0, 0);
 }
diff --git a/bl2/bl2_private.h b/bl2/bl2_private.h
index 022d1e9..83b8047 100644
--- a/bl2/bl2_private.h
+++ b/bl2/bl2_private.h
@@ -1,39 +1,21 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __BL2_PRIVATE_H__
 #define __BL2_PRIVATE_H__
 
 /******************************************
+ * Forward declarations
+ *****************************************/
+struct entry_point_info;
+
+/******************************************
  * Function prototypes
  *****************************************/
 void bl2_arch_setup(void);
+struct entry_point_info *bl2_load_images(void);
 
 #endif /* __BL2_PRIVATE_H__ */
diff --git a/bl2u/aarch32/bl2u_entrypoint.S b/bl2u/aarch32/bl2u_entrypoint.S
new file mode 100644
index 0000000..7fb64f3
--- /dev/null
+++ b/bl2u/aarch32/bl2u_entrypoint.S
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+
+
+	.globl	bl2u_vector_table
+	.globl	bl2u_entrypoint
+
+
+vector_base bl2u_vector_table
+	b	bl2u_entrypoint
+	b	report_exception	/* Undef */
+	b	report_exception	/* SVC call */
+	b	report_exception	/* Prefetch abort */
+	b	report_exception	/* Data abort */
+	b	report_exception	/* Reserved */
+	b	report_exception	/* IRQ */
+	b	report_exception	/* FIQ */
+
+
+func bl2u_entrypoint
+	/*---------------------------------------------
+	 * Save from r1 the extents of the trusted ram
+	 * available to BL2U for future use.
+	 * r0 is not currently used.
+	 * ---------------------------------------------
+	 */
+	mov	r11, r1
+	mov	r10, r2
+
+	/* ---------------------------------------------
+	 * Set the exception vector to something sane.
+	 * ---------------------------------------------
+	 */
+	ldr	r0, =bl2u_vector_table
+	stcopr	r0, VBAR
+	isb
+
+	/* -----------------------------------------------------
+	 * Enable the instruction cache
+	 * -----------------------------------------------------
+	 */
+	ldcopr	r0, SCTLR
+	orr	r0, r0, #SCTLR_I_BIT
+	stcopr	r0, SCTLR
+	isb
+
+	/* ---------------------------------------------
+	 * Since BL2U executes after BL1, it is assumed
+	 * here that BL1 has already has done the
+	 * necessary register initializations.
+	 * ---------------------------------------------
+	 */
+
+	/* ---------------------------------------------
+	 * Invalidate the RW memory used by the BL2U
+	 * image. This includes the data and NOBITS
+	 * sections. This is done to safeguard against
+	 * possible corruption of this memory by dirty
+	 * cache lines in a system cache as a result of
+	 * use by an earlier boot loader stage.
+	 * ---------------------------------------------
+	 */
+	ldr	r0, =__RW_START__
+	ldr	r1, =__RW_END__
+	sub	r1, r1, r0
+	bl	inv_dcache_range
+
+	/* ---------------------------------------------
+	 * Zero out NOBITS sections. There are 2 of them:
+	 *   - the .bss section;
+	 *   - the coherent memory section.
+	 * ---------------------------------------------
+	 */
+	ldr	r0, =__BSS_START__
+	ldr	r1, =__BSS_SIZE__
+	bl	zeromem
+
+	/* --------------------------------------------
+	 * Allocate a stack whose memory will be marked
+	 * as Normal-IS-WBWA when the MMU is enabled.
+	 * There is no risk of reading stale stack
+	 * memory after enabling the MMU as only the
+	 * primary cpu is running at the moment.
+	 * --------------------------------------------
+	 */
+	bl	plat_set_my_stack
+
+	/* ---------------------------------------------
+	 * Initialize the stack protector canary before
+	 * any C code is called.
+	 * ---------------------------------------------
+	 */
+#if STACK_PROTECTOR_ENABLED
+	bl	update_stack_protector_canary
+#endif
+
+	/* ---------------------------------------------
+	 * Perform early platform setup & platform
+	 * specific early arch. setup e.g. mmu setup
+	 * ---------------------------------------------
+	 */
+	mov	r0, r11
+	mov	r1, r10
+	bl	bl2u_early_platform_setup
+	bl	bl2u_plat_arch_setup
+
+	/* ---------------------------------------------
+	 * Jump to main function.
+	 * ---------------------------------------------
+	 */
+	bl	bl2u_main
+
+	/* ---------------------------------------------
+	 * Should never reach this point.
+	 * ---------------------------------------------
+	 */
+	no_ret	plat_panic_handler
+
+endfunc bl2u_entrypoint
diff --git a/bl2u/aarch64/bl2u_entrypoint.S b/bl2u/aarch64/bl2u_entrypoint.S
new file mode 100644
index 0000000..8b9c2a6
--- /dev/null
+++ b/bl2u/aarch64/bl2u_entrypoint.S
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+
+
+	.globl	bl2u_entrypoint
+
+
+func bl2u_entrypoint
+	/*---------------------------------------------
+	 * Store the extents of the tzram available to
+	 * BL2U and other platform specific information
+	 * for future use. x0 is currently not used.
+	 * ---------------------------------------------
+	 */
+	mov	x20, x1
+	mov	x21, x2
+
+	/* ---------------------------------------------
+	 * Set the exception vector to something sane.
+	 * ---------------------------------------------
+	 */
+	adr	x0, early_exceptions
+	msr	vbar_el1, x0
+	isb
+
+	/* ---------------------------------------------
+	 * Enable the SError interrupt now that the
+	 * exception vectors have been setup.
+	 * ---------------------------------------------
+	 */
+	msr	daifclr, #DAIF_ABT_BIT
+
+	/* ---------------------------------------------
+	 * Enable the instruction cache, stack pointer
+	 * and data access alignment checks
+	 * ---------------------------------------------
+	 */
+	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+	mrs	x0, sctlr_el1
+	orr	x0, x0, x1
+	msr	sctlr_el1, x0
+	isb
+
+	/* ---------------------------------------------
+	 * Invalidate the RW memory used by the BL2U
+	 * image. This includes the data and NOBITS
+	 * sections. This is done to safeguard against
+	 * possible corruption of this memory by dirty
+	 * cache lines in a system cache as a result of
+	 * use by an earlier boot loader stage.
+	 * ---------------------------------------------
+	 */
+	adr	x0, __RW_START__
+	adr	x1, __RW_END__
+	sub	x1, x1, x0
+	bl	inv_dcache_range
+
+	/* ---------------------------------------------
+	 * Zero out NOBITS sections. There are 2 of them:
+	 *   - the .bss section;
+	 *   - the coherent memory section.
+	 * ---------------------------------------------
+	 */
+	ldr	x0, =__BSS_START__
+	ldr	x1, =__BSS_SIZE__
+	bl	zeromem
+
+	/* --------------------------------------------
+	 * Allocate a stack whose memory will be marked
+	 * as Normal-IS-WBWA when the MMU is enabled.
+	 * There is no risk of reading stale stack
+	 * memory after enabling the MMU as only the
+	 * primary cpu is running at the moment.
+	 * --------------------------------------------
+	 */
+	bl	plat_set_my_stack
+
+	/* ---------------------------------------------
+	 * Initialize the stack protector canary before
+	 * any C code is called.
+	 * ---------------------------------------------
+	 */
+#if STACK_PROTECTOR_ENABLED
+	bl	update_stack_protector_canary
+#endif
+
+	/* ---------------------------------------------
+	 * Perform early platform setup & platform
+	 * specific early arch. setup e.g. mmu setup
+	 * ---------------------------------------------
+	 */
+	mov	x0, x20
+	mov	x1, x21
+	bl	bl2u_early_platform_setup
+	bl	bl2u_plat_arch_setup
+
+	/* ---------------------------------------------
+	 * Jump to bl2u_main function.
+	 * ---------------------------------------------
+	 */
+	bl	bl2u_main
+
+	/* ---------------------------------------------
+	 * Should never reach this point.
+	 * ---------------------------------------------
+	 */
+	no_ret	plat_panic_handler
+
+endfunc bl2u_entrypoint
diff --git a/bl2u/bl2u.ld.S b/bl2u/bl2u.ld.S
new file mode 100644
index 0000000..efae293
--- /dev/null
+++ b/bl2u/bl2u.ld.S
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+
+OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
+OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
+ENTRY(bl2u_entrypoint)
+
+MEMORY {
+    RAM (rwx): ORIGIN = BL2U_BASE, LENGTH = BL2U_LIMIT - BL2U_BASE
+}
+
+
+SECTIONS
+{
+    . = BL2U_BASE;
+    ASSERT(. == ALIGN(4096),
+           "BL2U_BASE address is not aligned on a page boundary.")
+
+#if SEPARATE_CODE_AND_RODATA
+    .text . : {
+        __TEXT_START__ = .;
+        *bl2u_entrypoint.o(.text*)
+        *(.text*)
+        *(.vectors)
+        . = NEXT(4096);
+        __TEXT_END__ = .;
+     } >RAM
+
+    .rodata . : {
+        __RODATA_START__ = .;
+        *(.rodata*)
+        . = NEXT(4096);
+        __RODATA_END__ = .;
+    } >RAM
+#else
+    ro . : {
+        __RO_START__ = .;
+        *bl2u_entrypoint.o(.text*)
+        *(.text*)
+        *(.rodata*)
+
+        *(.vectors)
+        __RO_END_UNALIGNED__ = .;
+        /*
+         * Memory page(s) mapped to this section will be marked as
+         * read-only, executable.  No RW data from the next section must
+         * creep in.  Ensure the rest of the current memory page is unused.
+         */
+        . = NEXT(4096);
+        __RO_END__ = .;
+    } >RAM
+#endif
+
+    /*
+     * Define a linker symbol to mark start of the RW memory area for this
+     * image.
+     */
+    __RW_START__ = . ;
+
+    /*
+     * .data must be placed at a lower address than the stacks if the stack
+     * protector is enabled. Alternatively, the .data.stack_protector_canary
+     * section can be placed independently of the main .data section.
+     */
+    .data . : {
+        __DATA_START__ = .;
+        *(.data*)
+        __DATA_END__ = .;
+    } >RAM
+
+    stacks (NOLOAD) : {
+        __STACKS_START__ = .;
+        *(tzfw_normal_stacks)
+        __STACKS_END__ = .;
+    } >RAM
+
+    /*
+     * The .bss section gets initialised to 0 at runtime.
+     * Its base address should be 16-byte aligned for better performance of the
+     * zero-initialization code.
+     */
+    .bss : ALIGN(16) {
+        __BSS_START__ = .;
+        *(SORT_BY_ALIGNMENT(.bss*))
+        *(COMMON)
+        __BSS_END__ = .;
+    } >RAM
+
+    /*
+     * The xlat_table section is for full, aligned page tables (4K).
+     * Removing them from .bss avoids forcing 4K alignment on
+     * the .bss section and eliminates the unecessary zero init
+     */
+    xlat_table (NOLOAD) : {
+        *(xlat_table)
+    } >RAM
+
+#if USE_COHERENT_MEM
+    /*
+     * The base address of the coherent memory section must be page-aligned (4K)
+     * to guarantee that the coherent data are stored on their own pages and
+     * are not mixed with normal data.  This is required to set up the correct
+     * memory attributes for the coherent data page tables.
+     */
+    coherent_ram (NOLOAD) : ALIGN(4096) {
+        __COHERENT_RAM_START__ = .;
+        *(tzfw_coherent_mem)
+        __COHERENT_RAM_END_UNALIGNED__ = .;
+        /*
+         * Memory page(s) mapped to this section will be marked
+         * as device memory.  No other unexpected data must creep in.
+         * Ensure the rest of the current memory page is unused.
+         */
+        . = NEXT(4096);
+        __COHERENT_RAM_END__ = .;
+    } >RAM
+#endif
+
+    /*
+     * Define a linker symbol to mark end of the RW memory area for this
+     * image.
+     */
+    __RW_END__ = .;
+    __BL2U_END__ = .;
+
+    __BSS_SIZE__ = SIZEOF(.bss);
+
+    ASSERT(. <= BL2U_LIMIT, "BL2U image has exceeded its limit.")
+}
diff --git a/bl2u/bl2u.mk b/bl2u/bl2u.mk
new file mode 100644
index 0000000..b4d7634
--- /dev/null
+++ b/bl2u/bl2u.mk
@@ -0,0 +1,15 @@
+#
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+BL2U_SOURCES		+=	bl2u/bl2u_main.c			\
+				bl2u/${ARCH}/bl2u_entrypoint.S		\
+				plat/common/${ARCH}/platform_up_stack.S
+
+ifeq (${ARCH},aarch64)
+BL2U_SOURCES		+=	common/aarch64/early_exceptions.S
+endif
+
+BL2U_LINKERFILE		:=	bl2u/bl2u.ld.S
diff --git a/bl2u/bl2u_main.c b/bl2u/bl2u_main.c
new file mode 100644
index 0000000..09ad468
--- /dev/null
+++ b/bl2u/bl2u_main.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <auth_mod.h>
+#include <bl1.h>
+#include <bl_common.h>
+#include <console.h>
+#include <debug.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <stdint.h>
+
+/*******************************************************************************
+ * This function is responsible to:
+ * Load SCP_BL2U if platform has defined SCP_BL2U_BASE
+ * Perform platform setup.
+ * Go back to EL3.
+ ******************************************************************************/
+void bl2u_main(void)
+{
+	NOTICE("BL2U: %s\n", version_string);
+	NOTICE("BL2U: %s\n", build_message);
+
+#if SCP_BL2U_BASE
+	int rc;
+	/* Load the subsequent bootloader images */
+	rc = bl2u_plat_handle_scp_bl2u();
+	if (rc) {
+		ERROR("Failed to load SCP_BL2U (%i)\n", rc);
+		panic();
+	}
+#endif
+
+	/* Perform platform setup in BL2U after loading SCP_BL2U */
+	bl2u_platform_setup();
+
+	console_flush();
+
+#ifdef AARCH32
+	/*
+	 * For AArch32 state BL1 and BL2U share the MMU setup.
+	 * Given that BL2U does not map BL1 regions, MMU needs
+	 * to be disabled in order to go back to BL1.
+	 */
+	disable_mmu_icache_secure();
+#endif /* AARCH32 */
+
+	/*
+	 * Indicate that BL2U is done and resume back to
+	 * normal world via an SMC to BL1.
+	 * x1 could be passed to Normal world,
+	 * so DO NOT pass any secret information.
+	 */
+	smc(FWU_SMC_SEC_IMAGE_DONE, 0, 0, 0, 0, 0, 0, 0);
+	wfi();
+}
diff --git a/bl31/aarch64/bl31_arch_setup.c b/bl31/aarch64/bl31_arch_setup.c
deleted file mode 100644
index a88b029..0000000
--- a/bl31/aarch64/bl31_arch_setup.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <arch_helpers.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <bl31.h>
-#include <platform.h>
-
-/*******************************************************************************
- * This duplicates what the primary cpu did after a cold boot in BL1. The same
- * needs to be done when a cpu is hotplugged in. This function could also over-
- * ride any EL3 setup done by BL1 as this code resides in rw memory.
- ******************************************************************************/
-void bl31_arch_setup(void)
-{
-	/* Set the RES1 bits in the SCR_EL3 */
-	write_scr_el3(SCR_RES1_BITS);
-
-	/* Program the counter frequency */
-	write_cntfrq_el0(plat_get_syscnt_freq());
-}
diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S
index 01d7a7f..419927d 100644
--- a/bl31/aarch64/bl31_entrypoint.S
+++ b/bl31/aarch64/bl31_entrypoint.S
@@ -1,39 +1,18 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
-#include <asm_macros.S>
 #include <bl_common.h>
+#include <el3_common_macros.S>
+#include <pmf_asm_macros.S>
+#include <runtime_instr.h>
+#include <xlat_tables_defs.h>
 
 	.globl	bl31_entrypoint
-
+	.globl	bl31_warm_entrypoint
 
 	/* -----------------------------------------------------
 	 * bl31_entrypoint() is the cold boot entrypoint,
@@ -42,160 +21,68 @@
 	 */
 
 func bl31_entrypoint
+#if !RESET_TO_BL31
 	/* ---------------------------------------------------------------
 	 * Preceding bootloader has populated x0 with a pointer to a
 	 * 'bl31_params' structure & x1 with a pointer to platform
 	 * specific structure
 	 * ---------------------------------------------------------------
 	 */
-#if !RESET_TO_BL31
 	mov	x20, x0
 	mov	x21, x1
-#else
-	/* ---------------------------------------------
-	 * Set the CPU endianness before doing anything
-	 * that might involve memory reads or writes.
-	 * ---------------------------------------------
-	 */
-	mrs	x0, sctlr_el3
-	bic	x0, x0, #SCTLR_EE_BIT
-	msr	sctlr_el3, x0
-	isb
-#endif
-
-	/* ---------------------------------------------
-	 * When RESET_TO_BL31 is true, perform any
-	 * processor specific actions upon reset e.g.
-	 * cache, tlb invalidations, errata workarounds
-	 * etc.
-	 * When RESET_TO_BL31 is false, perform any
-	 * processor specific actions which undo or are
-	 * in addition to the actions performed by the
-	 * reset handler in the Boot ROM (BL1).
-	 * ---------------------------------------------
-	 */
-	bl	reset_handler
-
-	/* ---------------------------------------------
-	 * Enable the instruction cache, stack pointer
-	 * and data access alignment checks
-	 * ---------------------------------------------
-	 */
-	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
-	mrs	x0, sctlr_el3
-	orr	x0, x0, x1
-	msr	sctlr_el3, x0
-	isb
-
-	/* ---------------------------------------------
-	 * Initialise cpu_data early to enable crash
-	 * reporting to have access to crash stack.
-	 * Since crash reporting depends on cpu_data to
-	 * report the unhandled exception, not
-	 * doing so can lead to recursive exceptions due
-	 * to a NULL TPIDR_EL3
-	 * ---------------------------------------------
-	 */
-	bl	init_cpu_data_ptr
-
-	/* ---------------------------------------------
-	 * Set the exception vector.
-	 * ---------------------------------------------
-	 */
-	adr	x1, runtime_exceptions
-	msr	vbar_el3, x1
-	isb
-
-	/* ---------------------------------------------
-	 * Enable the SError interrupt now that the
-	 * exception vectors have been setup.
-	 * ---------------------------------------------
-	 */
-	msr	daifclr, #DAIF_ABT_BIT
 
 	/* ---------------------------------------------------------------------
-	 * The initial state of the Architectural feature trap register
-	 * (CPTR_EL3) is unknown and it must be set to a known state. All
-	 * feature traps are disabled. Some bits in this register are marked as
-	 * Reserved and should not be modified.
+	 * For !RESET_TO_BL31 systems, only the primary CPU ever reaches
+	 * bl31_entrypoint() during the cold boot flow, so the cold/warm boot
+	 * and primary/secondary CPU logic should not be executed in this case.
 	 *
-	 * CPTR_EL3.TCPAC: This causes a direct access to the CPACR_EL1 from EL1
-	 *  or the CPTR_EL2 from EL2 to trap to EL3 unless it is trapped at EL2.
-	 * CPTR_EL3.TTA: This causes access to the Trace functionality to trap
-	 *  to EL3 when executed from EL0, EL1, EL2, or EL3. If system register
-	 *  access to trace functionality is not supported, this bit is RES0.
-	 * CPTR_EL3.TFP: This causes instructions that access the registers
-	 *  associated with Floating Point and Advanced SIMD execution to trap
-	 *  to EL3 when executed from any exception level, unless trapped to EL1
-	 *  or EL2.
+	 * Also, assume that the previous bootloader has already initialised the
+	 * SCTLR_EL3, including the endianness, and has initialised the memory.
 	 * ---------------------------------------------------------------------
 	 */
-	mrs	x1, cptr_el3
-	bic	w1, w1, #TCPAC_BIT
-	bic	w1, w1, #TTA_BIT
-	bic	w1, w1, #TFP_BIT
-	msr	cptr_el3, x1
+	el3_entrypoint_common					\
+		_init_sctlr=0					\
+		_warm_boot_mailbox=0				\
+		_secondary_cold_boot=0				\
+		_init_memory=0					\
+		_init_c_runtime=1				\
+		_exception_vectors=runtime_exceptions
 
-#if RESET_TO_BL31
-	/* -------------------------------------------------------
-	 * Will not return from this macro if it is a warm boot.
-	 * -------------------------------------------------------
+	/* ---------------------------------------------------------------------
+	 * Relay the previous bootloader's arguments to the platform layer
+	 * ---------------------------------------------------------------------
 	 */
-	wait_for_entrypoint
-	bl	platform_mem_init
-#endif
-
-	/* ---------------------------------------------
-	 * Zero out NOBITS sections. There are 2 of them:
-	 *   - the .bss section;
-	 *   - the coherent memory section.
-	 * ---------------------------------------------
+	mov	x0, x20
+	mov	x1, x21
+#else
+	/* ---------------------------------------------------------------------
+	 * For RESET_TO_BL31 systems which have a programmable reset address,
+	 * bl31_entrypoint() is executed only on the cold boot path so we can
+	 * skip the warm boot mailbox mechanism.
+	 * ---------------------------------------------------------------------
 	 */
-	ldr	x0, =__BSS_START__
-	ldr	x1, =__BSS_SIZE__
-	bl	zeromem16
+	el3_entrypoint_common					\
+		_init_sctlr=1					\
+		_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS	\
+		_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU	\
+		_init_memory=1					\
+		_init_c_runtime=1				\
+		_exception_vectors=runtime_exceptions
 
-#if USE_COHERENT_MEM
-	ldr	x0, =__COHERENT_RAM_START__
-	ldr	x1, =__COHERENT_RAM_UNALIGNED_SIZE__
-	bl	zeromem16
-#endif
-
-	/* ---------------------------------------------
-	 * Initialize the cpu_ops pointer.
-	 * ---------------------------------------------
+	/* ---------------------------------------------------------------------
+	 * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so
+	 * there's no argument to relay from a previous bootloader. Zero the
+	 * arguments passed to the platform layer to reflect that.
+	 * ---------------------------------------------------------------------
 	 */
-	bl	init_cpu_ops
-
-	/* ---------------------------------------------
-	 * Use SP_EL0 for the C runtime stack.
-	 * ---------------------------------------------
-	 */
-	msr	spsel, #0
-
-	/* --------------------------------------------
-	 * Allocate a stack whose memory will be marked
-	 * as Normal-IS-WBWA when the MMU is enabled.
-	 * There is no risk of reading stale stack
-	 * memory after enabling the MMU as only the
-	 * primary cpu is running at the moment.
-	 * --------------------------------------------
-	 */
-	mrs	x0, mpidr_el1
-	bl	platform_set_stack
+	mov	x0, 0
+	mov	x1, 0
+#endif /* RESET_TO_BL31 */
 
 	/* ---------------------------------------------
 	 * Perform platform specific early arch. setup
 	 * ---------------------------------------------
 	 */
-#if RESET_TO_BL31
-	mov	x0, 0
-	mov	x1, 0
-#else
-	mov	x0, x20
-	mov	x1, x21
-#endif
-
 	bl	bl31_early_platform_setup
 	bl	bl31_plat_arch_setup
 
@@ -205,4 +92,115 @@
 	 */
 	bl	bl31_main
 
+	/* -------------------------------------------------------------
+	 * Clean the .data & .bss sections to main memory. This ensures
+	 * that any global data which was initialised by the primary CPU
+	 * is visible to secondary CPUs before they enable their data
+	 * caches and participate in coherency.
+	 * -------------------------------------------------------------
+	 */
+	adr	x0, __DATA_START__
+	adr	x1, __DATA_END__
+	sub	x1, x1, x0
+	bl	clean_dcache_range
+
+	adr	x0, __BSS_START__
+	adr	x1, __BSS_END__
+	sub	x1, x1, x0
+	bl	clean_dcache_range
+
 	b	el3_exit
+endfunc bl31_entrypoint
+
+	/* --------------------------------------------------------------------
+	 * This CPU has been physically powered up. It is either resuming from
+	 * suspend or has simply been turned on. In both cases, call the BL31
+	 * warmboot entrypoint
+	 * --------------------------------------------------------------------
+	 */
+func bl31_warm_entrypoint
+#if ENABLE_RUNTIME_INSTRUMENTATION
+
+	/*
+	 * This timestamp update happens with cache off.  The next
+	 * timestamp collection will need to do cache maintenance prior
+	 * to timestamp update.
+	 */
+	pmf_calc_timestamp_addr rt_instr_svc RT_INSTR_EXIT_HW_LOW_PWR
+	mrs	x1, cntpct_el0
+	str	x1, [x0]
+#endif
+
+	/*
+	 * On the warm boot path, most of the EL3 initialisations performed by
+	 * 'el3_entrypoint_common' must be skipped:
+	 *
+	 *  - Only when the platform bypasses the BL1/BL31 entrypoint by
+	 *    programming the reset address do we need to initialise SCTLR_EL3.
+	 *    In other cases, we assume this has been taken care by the
+	 *    entrypoint code.
+	 *
+	 *  - No need to determine the type of boot, we know it is a warm boot.
+	 *
+	 *  - Do not try to distinguish between primary and secondary CPUs, this
+	 *    notion only exists for a cold boot.
+	 *
+	 *  - No need to initialise the memory or the C runtime environment,
+	 *    it has been done once and for all on the cold boot path.
+	 */
+	el3_entrypoint_common					\
+		_init_sctlr=PROGRAMMABLE_RESET_ADDRESS		\
+		_warm_boot_mailbox=0				\
+		_secondary_cold_boot=0				\
+		_init_memory=0					\
+		_init_c_runtime=0				\
+		_exception_vectors=runtime_exceptions
+
+	/*
+	 * We're about to enable MMU and participate in PSCI state coordination.
+	 *
+	 * The PSCI implementation invokes platform routines that enable CPUs to
+	 * participate in coherency. On a system where CPUs are not
+	 * cache-coherent without appropriate platform specific programming,
+	 * having caches enabled until such time might lead to coherency issues
+	 * (resulting from stale data getting speculatively fetched, among
+	 * others). Therefore we keep data caches disabled even after enabling
+	 * the MMU for such platforms.
+	 *
+	 * On systems with hardware-assisted coherency, or on single cluster
+	 * platforms, such platform specific programming is not required to
+	 * enter coherency (as CPUs already are); and there's no reason to have
+	 * caches disabled either.
+	 */
+	mov	x0, #DISABLE_DCACHE
+	bl	bl31_plat_enable_mmu
+
+#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
+	mrs	x0, sctlr_el3
+	orr	x0, x0, #SCTLR_C_BIT
+	msr	sctlr_el3, x0
+	isb
+#endif
+
+	bl	psci_warmboot_entrypoint
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+	pmf_calc_timestamp_addr rt_instr_svc RT_INSTR_EXIT_PSCI
+	mov	x19, x0
+
+	/*
+	 * Invalidate before updating timestamp to ensure previous timestamp
+	 * updates on the same cache line with caches disabled are properly
+	 * seen by the same core. Without the cache invalidate, the core might
+	 * write into a stale cache line.
+	 */
+	mov	x1, #PMF_TS_SIZE
+	mov	x20, x30
+	bl	inv_dcache_range
+	mov	x30, x20
+
+	mrs	x0, cntpct_el0
+	str	x0, [x19]
+#endif
+	b	el3_exit
+endfunc bl31_warm_entrypoint
diff --git a/bl31/aarch64/context.S b/bl31/aarch64/context.S
deleted file mode 100644
index b127480..0000000
--- a/bl31/aarch64/context.S
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-#include <context.h>
-
-/* -----------------------------------------------------
- * The following function strictly follows the AArch64
- * PCS to use x9-x17 (temporary caller-saved registers)
- * to save EL1 system register context. It assumes that
- * 'x0' is pointing to a 'el1_sys_regs' structure where
- * the register context will be saved.
- * -----------------------------------------------------
- */
-	.global el1_sysregs_context_save
-func el1_sysregs_context_save
-
-	mrs	x9, spsr_el1
-	mrs	x10, elr_el1
-	stp	x9, x10, [x0, #CTX_SPSR_EL1]
-
-	mrs	x11, spsr_abt
-	mrs	x12, spsr_und
-	stp	x11, x12, [x0, #CTX_SPSR_ABT]
-
-	mrs	x13, spsr_irq
-	mrs	x14, spsr_fiq
-	stp	x13, x14, [x0, #CTX_SPSR_IRQ]
-
-	mrs	x15, sctlr_el1
-	mrs	x16, actlr_el1
-	stp	x15, x16, [x0, #CTX_SCTLR_EL1]
-
-	mrs	x17, cpacr_el1
-	mrs	x9, csselr_el1
-	stp	x17, x9, [x0, #CTX_CPACR_EL1]
-
-	mrs	x10, sp_el1
-	mrs	x11, esr_el1
-	stp	x10, x11, [x0, #CTX_SP_EL1]
-
-	mrs	x12, ttbr0_el1
-	mrs	x13, ttbr1_el1
-	stp	x12, x13, [x0, #CTX_TTBR0_EL1]
-
-	mrs	x14, mair_el1
-	mrs	x15, amair_el1
-	stp	x14, x15, [x0, #CTX_MAIR_EL1]
-
-	mrs	x16, tcr_el1
-	mrs	x17, tpidr_el1
-	stp	x16, x17, [x0, #CTX_TCR_EL1]
-
-	mrs	x9, tpidr_el0
-	mrs	x10, tpidrro_el0
-	stp	x9, x10, [x0, #CTX_TPIDR_EL0]
-
-	mrs	x11, dacr32_el2
-	mrs	x12, ifsr32_el2
-	stp	x11, x12, [x0, #CTX_DACR32_EL2]
-
-	mrs	x13, par_el1
-	mrs	x14, far_el1
-	stp	x13, x14, [x0, #CTX_PAR_EL1]
-
-	mrs	x15, afsr0_el1
-	mrs	x16, afsr1_el1
-	stp	x15, x16, [x0, #CTX_AFSR0_EL1]
-
-	mrs	x17, contextidr_el1
-	mrs	x9, vbar_el1
-	stp	x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
-
-	/* Save NS timer registers if the build has instructed so */
-#if NS_TIMER_SWITCH
-	mrs	x10, cntp_ctl_el0
-	mrs	x11, cntp_cval_el0
-	stp	x10, x11, [x0, #CTX_CNTP_CTL_EL0]
-
-	mrs	x12, cntv_ctl_el0
-	mrs	x13, cntv_cval_el0
-	stp	x12, x13, [x0, #CTX_CNTV_CTL_EL0]
-
-	mrs	x14, cntkctl_el1
-	str	x14, [x0, #CTX_CNTKCTL_EL1]
-#endif
-
-	mrs	x15, fpexc32_el2
-	str	x15, [x0, #CTX_FP_FPEXC32_EL2]
-
-	ret
-
-/* -----------------------------------------------------
- * The following function strictly follows the AArch64
- * PCS to use x9-x17 (temporary caller-saved registers)
- * to restore EL1 system register context.  It assumes
- * that 'x0' is pointing to a 'el1_sys_regs' structure
- * from where the register context will be restored
- * -----------------------------------------------------
- */
-	.global el1_sysregs_context_restore
-func el1_sysregs_context_restore
-
-	ldp	x9, x10, [x0, #CTX_SPSR_EL1]
-	msr	spsr_el1, x9
-	msr	elr_el1, x10
-
-	ldp	x11, x12, [x0, #CTX_SPSR_ABT]
-	msr	spsr_abt, x11
-	msr	spsr_und, x12
-
-	ldp	x13, x14, [x0, #CTX_SPSR_IRQ]
-	msr	spsr_irq, x13
-	msr	spsr_fiq, x14
-
-	ldp	x15, x16, [x0, #CTX_SCTLR_EL1]
-	msr	sctlr_el1, x15
-	msr	actlr_el1, x16
-
-	ldp	x17, x9, [x0, #CTX_CPACR_EL1]
-	msr	cpacr_el1, x17
-	msr	csselr_el1, x9
-
-	ldp	x10, x11, [x0, #CTX_SP_EL1]
-	msr	sp_el1, x10
-	msr	esr_el1, x11
-
-	ldp	x12, x13, [x0, #CTX_TTBR0_EL1]
-	msr	ttbr0_el1, x12
-	msr	ttbr1_el1, x13
-
-	ldp	x14, x15, [x0, #CTX_MAIR_EL1]
-	msr	mair_el1, x14
-	msr	amair_el1, x15
-
-	ldp	x16, x17, [x0, #CTX_TCR_EL1]
-	msr	tcr_el1, x16
-	msr	tpidr_el1, x17
-
-	ldp	x9, x10, [x0, #CTX_TPIDR_EL0]
-	msr	tpidr_el0, x9
-	msr	tpidrro_el0, x10
-
-	ldp	x11, x12, [x0, #CTX_DACR32_EL2]
-	msr	dacr32_el2, x11
-	msr	ifsr32_el2, x12
-
-	ldp	x13, x14, [x0, #CTX_PAR_EL1]
-	msr	par_el1, x13
-	msr	far_el1, x14
-
-	ldp	x15, x16, [x0, #CTX_AFSR0_EL1]
-	msr	afsr0_el1, x15
-	msr	afsr1_el1, x16
-
-	ldp	x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
-	msr	contextidr_el1, x17
-	msr	vbar_el1, x9
-
-	/* Restore NS timer registers if the build has instructed so */
-#if NS_TIMER_SWITCH
-	ldp	x10, x11, [x0, #CTX_CNTP_CTL_EL0]
-	msr	cntp_ctl_el0, x10
-	msr	cntp_cval_el0, x11
-
-	ldp	x12, x13, [x0, #CTX_CNTV_CTL_EL0]
-	msr	cntv_ctl_el0, x12
-	msr	cntv_cval_el0, x13
-
-	ldr	x14, [x0, #CTX_CNTKCTL_EL1]
-	msr	cntkctl_el1, x14
-#endif
-
-	ldr	x15, [x0, #CTX_FP_FPEXC32_EL2]
-	msr	fpexc32_el2, x15
-
-	/* No explict ISB required here as ERET covers it */
-
-	ret
-
-/* -----------------------------------------------------
- * The following function follows the aapcs_64 strictly
- * to use x9-x17 (temporary caller-saved registers
- * according to AArch64 PCS) to save floating point
- * register context. It assumes that 'x0' is pointing to
- * a 'fp_regs' structure where the register context will
- * be saved.
- *
- * Access to VFP registers will trap if CPTR_EL3.TFP is
- * set.  However currently we don't use VFP registers
- * nor set traps in Trusted Firmware, and assume it's
- * cleared
- *
- * TODO: Revisit when VFP is used in secure world
- * -----------------------------------------------------
- */
-#if CTX_INCLUDE_FPREGS
-	.global fpregs_context_save
-func fpregs_context_save
-	stp	q0, q1, [x0, #CTX_FP_Q0]
-	stp	q2, q3, [x0, #CTX_FP_Q2]
-	stp	q4, q5, [x0, #CTX_FP_Q4]
-	stp	q6, q7, [x0, #CTX_FP_Q6]
-	stp	q8, q9, [x0, #CTX_FP_Q8]
-	stp	q10, q11, [x0, #CTX_FP_Q10]
-	stp	q12, q13, [x0, #CTX_FP_Q12]
-	stp	q14, q15, [x0, #CTX_FP_Q14]
-	stp	q16, q17, [x0, #CTX_FP_Q16]
-	stp	q18, q19, [x0, #CTX_FP_Q18]
-	stp	q20, q21, [x0, #CTX_FP_Q20]
-	stp	q22, q23, [x0, #CTX_FP_Q22]
-	stp	q24, q25, [x0, #CTX_FP_Q24]
-	stp	q26, q27, [x0, #CTX_FP_Q26]
-	stp	q28, q29, [x0, #CTX_FP_Q28]
-	stp	q30, q31, [x0, #CTX_FP_Q30]
-
-	mrs	x9, fpsr
-	str	x9, [x0, #CTX_FP_FPSR]
-
-	mrs	x10, fpcr
-	str	x10, [x0, #CTX_FP_FPCR]
-
-	ret
-
-/* -----------------------------------------------------
- * The following function follows the aapcs_64 strictly
- * to use x9-x17 (temporary caller-saved registers
- * according to AArch64 PCS) to restore floating point
- * register context. It assumes that 'x0' is pointing to
- * a 'fp_regs' structure from where the register context
- * will be restored.
- *
- * Access to VFP registers will trap if CPTR_EL3.TFP is
- * set.  However currently we don't use VFP registers
- * nor set traps in Trusted Firmware, and assume it's
- * cleared
- *
- * TODO: Revisit when VFP is used in secure world
- * -----------------------------------------------------
- */
-	.global fpregs_context_restore
-func fpregs_context_restore
-	ldp	q0, q1, [x0, #CTX_FP_Q0]
-	ldp	q2, q3, [x0, #CTX_FP_Q2]
-	ldp	q4, q5, [x0, #CTX_FP_Q4]
-	ldp	q6, q7, [x0, #CTX_FP_Q6]
-	ldp	q8, q9, [x0, #CTX_FP_Q8]
-	ldp	q10, q11, [x0, #CTX_FP_Q10]
-	ldp	q12, q13, [x0, #CTX_FP_Q12]
-	ldp	q14, q15, [x0, #CTX_FP_Q14]
-	ldp	q16, q17, [x0, #CTX_FP_Q16]
-	ldp	q18, q19, [x0, #CTX_FP_Q18]
-	ldp	q20, q21, [x0, #CTX_FP_Q20]
-	ldp	q22, q23, [x0, #CTX_FP_Q22]
-	ldp	q24, q25, [x0, #CTX_FP_Q24]
-	ldp	q26, q27, [x0, #CTX_FP_Q26]
-	ldp	q28, q29, [x0, #CTX_FP_Q28]
-	ldp	q30, q31, [x0, #CTX_FP_Q30]
-
-	ldr	x9, [x0, #CTX_FP_FPSR]
-	msr	fpsr, x9
-
-	str	x10, [x0, #CTX_FP_FPCR]
-	msr	fpcr, x10
-
-	/*
-	 * No explict ISB required here as ERET to
-	 * swtich to secure EL1 or non-secure world
-	 * covers it
-	 */
-
-	ret
-#endif /* CTX_INCLUDE_FPREGS */
diff --git a/bl31/aarch64/cpu_data.S b/bl31/aarch64/cpu_data.S
deleted file mode 100644
index feb51d6..0000000
--- a/bl31/aarch64/cpu_data.S
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <asm_macros.S>
-#include <cpu_data.h>
-
-.globl	init_cpu_data_ptr
-.globl	_cpu_data_by_mpidr
-.globl	_cpu_data_by_index
-
-/* -----------------------------------------------------------------
- * void init_cpu_data_ptr(void)
- *
- * Initialise the TPIDR_EL3 register to refer to the cpu_data_t
- * for the calling CPU. This must be called before cm_get_cpu_data()
- *
- * This can be called without a valid stack.
- * clobbers: x0, x1, x9, x10
- * -----------------------------------------------------------------
- */
-func init_cpu_data_ptr
-	mov	x10, x30
-	mrs	x0, mpidr_el1
-	bl	_cpu_data_by_mpidr
-	msr	tpidr_el3, x0
-	ret	x10
-
-
-/* -----------------------------------------------------------------
- * cpu_data_t *_cpu_data_by_mpidr(uint64_t mpidr)
- *
- * Return the cpu_data structure for the CPU with given MPIDR
- *
- * This can be called without a valid stack. It assumes that
- * platform_get_core_pos() does not clobber register x9.
- * clobbers: x0, x1, x9
- * -----------------------------------------------------------------
- */
-func _cpu_data_by_mpidr
-	mov	x9, x30
-	bl	platform_get_core_pos
-	mov	x30, x9
-	b	_cpu_data_by_index
-
-
-/* -----------------------------------------------------------------
- * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index)
- *
- * Return the cpu_data structure for the CPU with given linear index
- *
- * This can be called without a valid stack.
- * clobbers: x0, x1
- * -----------------------------------------------------------------
- */
-func _cpu_data_by_index
-	adr	x1, percpu_data
-	add	x0, x1, x0, LSL #CPU_DATA_LOG2SIZE
-	ret
diff --git a/bl31/aarch64/crash_reporting.S b/bl31/aarch64/crash_reporting.S
index 68fe256..34e4dcd 100644
--- a/bl31/aarch64/crash_reporting.S
+++ b/bl31/aarch64/crash_reporting.S
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 #include <arch.h>
 #include <asm_macros.S>
@@ -70,7 +46,8 @@
 		"tpidrro_el0", "dacr32_el2", "ifsr32_el2", "par_el1",\
 		"mpidr_el1", "afsr0_el1", "afsr1_el1", "contextidr_el1",\
 		"vbar_el1", "cntp_ctl_el0", "cntp_cval_el0", "cntv_ctl_el0",\
-		"cntv_cval_el0", "cntkctl_el1", "fpexc32_el2", "sp_el0", ""
+		"cntv_cval_el0", "cntkctl_el1", "fpexc32_el2", "sp_el0",\
+		"isr_el1", ""
 
 panic_msg:
 	.asciz "PANIC in EL3 at x30 = 0x"
@@ -85,6 +62,7 @@
 func print_newline
 	mov	x0, '\n'
 	b	plat_crash_console_putc
+endfunc print_newline
 
 	/*
 	 * Helper function to print from crash buf.
@@ -122,6 +100,7 @@
 exit_size_print:
 	mov	x30, sp
 	ret
+endfunc size_controlled_print
 
 	/*
 	 * Helper function to store x8 - x15 registers to
@@ -140,6 +119,7 @@
 	stp	x12, x13, [x0, #REG_SIZE * 4]
 	stp	x14, x15, [x0, #REG_SIZE * 6]
 	b	size_controlled_print
+endfunc str_in_crash_buf_print
 
 	/* ------------------------------------------------------
 	 * This macro calculates the offset to crash buf from
@@ -176,6 +156,7 @@
 	mov	sp, x0
 	/* This call will not return */
 	b	do_crash_reporting
+endfunc report_unhandled_exception
 
 
 	/* -----------------------------------------------------
@@ -192,6 +173,7 @@
 	mov	sp, x0
 	/* This call will not return */
 	b	do_crash_reporting
+endfunc report_unhandled_interrupt
 
 	/* -----------------------------------------------------
 	 * This function allows to report a crash (if crash
@@ -208,6 +190,7 @@
 	mov	sp, x0
 	/* This call will not return */
 	b	do_crash_reporting
+endfunc el3_panic
 
 	/* ------------------------------------------------------------
 	 * The common crash reporting functionality. It requires x0
@@ -332,27 +315,30 @@
 	mrs	x8, cntkctl_el1
 	mrs	x9, fpexc32_el2
 	mrs	x10, sp_el0
+	mrs	x11, isr_el1
 	bl	str_in_crash_buf_print
 
 	/* Get the cpu specific registers to report */
 	bl	do_cpu_reg_dump
 	bl	str_in_crash_buf_print
 
-	/* Print the gic registers */
-	plat_print_gic_regs
+	/* Print some platform registers */
+	plat_crash_print_regs
 
-	/* Print the interconnect registers */
-	plat_print_interconnect_regs
+	bl	plat_crash_console_flush
 
 	/* Done reporting */
-	b	crash_panic
+	no_ret	plat_panic_handler
+endfunc do_crash_reporting
 
 #else	/* CRASH_REPORTING */
 func report_unhandled_exception
 report_unhandled_interrupt:
-	b	crash_panic
-#endif	/* CRASH_REPORING */
+	no_ret	plat_panic_handler
+endfunc report_unhandled_exception
+#endif	/* CRASH_REPORTING */
 
 
 func crash_panic
-	b	crash_panic
+	no_ret	plat_panic_handler
+endfunc crash_panic
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index 3265862..d8fbb9b 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -1,76 +1,63 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
 #include <asm_macros.S>
 #include <context.h>
+#include <cpu_data.h>
 #include <interrupt_mgmt.h>
 #include <platform_def.h>
 #include <runtime_svc.h>
 
 	.globl	runtime_exceptions
-	.globl	el3_exit
 
-	/* -----------------------------------------------------
-	 * Handle SMC exceptions separately from other sync.
-	 * exceptions.
-	 * -----------------------------------------------------
+	/* ---------------------------------------------------------------------
+	 * This macro handles Synchronous exceptions.
+	 * Only SMC exceptions are supported.
+	 * ---------------------------------------------------------------------
 	 */
 	.macro	handle_sync_exception
 	/* Enable the SError interrupt */
 	msr	daifclr, #DAIF_ABT_BIT
 
 	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+	/*
+	 * Read the timestamp value and store it in per-cpu data. The value
+	 * will be extracted from per-cpu data by the C level SMC handler and
+	 * saved to the PMF timestamp region.
+	 */
+	mrs	x30, cntpct_el0
+	str	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+	mrs	x29, tpidr_el3
+	str	x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
+	ldr	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+#endif
+
 	mrs	x30, esr_el3
 	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
 
+	/* Handle SMC exceptions separately from other synchronous exceptions */
 	cmp	x30, #EC_AARCH32_SMC
 	b.eq	smc_handler32
 
 	cmp	x30, #EC_AARCH64_SMC
 	b.eq	smc_handler64
 
-	/* -----------------------------------------------------
-	 * The following code handles any synchronous exception
-	 * that is not an SMC.
-	 * -----------------------------------------------------
-	 */
-
-	bl	report_unhandled_exception
+	/* Other kinds of synchronous exceptions are not handled */
+	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+	b	report_unhandled_exception
 	.endm
 
 
-	/* -----------------------------------------------------
-	 * This macro handles FIQ or IRQ interrupts i.e. EL3,
-	 * S-EL1 and NS interrupts.
-	 * -----------------------------------------------------
+	/* ---------------------------------------------------------------------
+	 * This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
+	 * interrupts.
+	 * ---------------------------------------------------------------------
 	 */
 	.macro	handle_interrupt_exception label
 	/* Enable the SError interrupt */
@@ -79,6 +66,11 @@
 	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
 	bl	save_gp_registers
 
+	/* Save the EL3 system registers needed to return from this exception */
+	mrs	x0, spsr_el3
+	mrs	x1, elr_el3
+	stp	x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+
 	/* Switch to the runtime stack i.e. SP_EL0 */
 	ldr	x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
 	mov	x20, sp
@@ -86,44 +78,40 @@
 	mov	sp, x2
 
 	/*
-	 * Find out whether this is a valid interrupt type. If the
-	 * interrupt controller reports a spurious interrupt then
-	 * return to where we came from.
+	 * Find out whether this is a valid interrupt type.
+	 * If the interrupt controller reports a spurious interrupt then return
+	 * to where we came from.
 	 */
 	bl	plat_ic_get_pending_interrupt_type
 	cmp	x0, #INTR_TYPE_INVAL
 	b.eq	interrupt_exit_\label
 
 	/*
-	 * Get the registered handler for this interrupt type. A
-	 * NULL return value implies that an interrupt was generated
-	 * for which there is no handler registered or the interrupt
-	 * was routed incorrectly. This is a problem of the framework
-	 * so report it as an error.
+	 * Get the registered handler for this interrupt type.
+	 * A NULL return value could be 'cause of the following conditions:
+	 *
+	 * a. An interrupt of a type was routed correctly but a handler for its
+	 *    type was not registered.
+	 *
+	 * b. An interrupt of a type was not routed correctly so a handler for
+	 *    its type was not registered.
+	 *
+	 * c. An interrupt of a type was routed correctly to EL3, but was
+	 *    deasserted before its pending state could be read. Another
+	 *    interrupt of a different type pended at the same time and its
+	 *    type was reported as pending instead. However, a handler for this
+	 *    type was not registered.
+	 *
+	 * a. and b. can only happen due to a programming error. The
+	 * occurrence of c. could be beyond the control of Trusted Firmware.
+	 * It makes sense to return from this exception instead of reporting an
+	 * error.
 	 */
 	bl	get_interrupt_type_handler
-	cbz	x0, interrupt_error_\label
+	cbz	x0, interrupt_exit_\label
 	mov	x21, x0
 
 	mov	x0, #INTR_ID_UNAVAILABLE
-#if IMF_READ_INTERRUPT_ID
-	/*
-	 * Read the id of the highest priority pending interrupt. If
-	 * no interrupt is asserted then return to where we came from.
-	 */
-	mov	x19,  #INTR_ID_UNAVAILABLE
-	bl	plat_ic_get_pending_interrupt_id
-	cmp	x19, x0
-	b.eq	interrupt_exit_\label
-#endif
-
-	/*
-	 * Save the EL3 system registers needed to return from
-	 * this exception.
-	 */
-	mrs	x3, spsr_el3
-	mrs	x4, elr_el3
-	stp	x3, x4, [x20, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
 
 	/* Set the current security state in the 'flags' parameter */
 	mrs	x2, scr_el3
@@ -132,7 +120,7 @@
 	/* Restore the reference to the 'handle' i.e. SP_EL3 */
 	mov	x2, x20
 
-	/*  x3 will point to a cookie (not used now) */
+	/* x3 will point to a cookie (not used now) */
 	mov	x3, xzr
 
 	/* Call the interrupt type handler */
@@ -142,13 +130,6 @@
 	/* Return from exception, possibly in a different security state */
 	b	el3_exit
 
-	/*
-	 * This label signifies a problem with the interrupt management
-	 * framework where it is not safe to go back to the instruction
-	 * where the interrupt was generated.
-	 */
-interrupt_error_\label:
-	bl	report_unhandled_interrupt
 	.endm
 
 
@@ -163,172 +144,143 @@
 	str	x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
 	.endm
 
-	.section	.vectors, "ax"; .align 11
-	.align	7
-runtime_exceptions:
-	/* -----------------------------------------------------
-	 * Current EL with _sp_el0 : 0x0 - 0x200
-	 * -----------------------------------------------------
+
+vector_base runtime_exceptions
+
+	/* ---------------------------------------------------------------------
+	 * Current EL with SP_EL0 : 0x0 - 0x200
+	 * ---------------------------------------------------------------------
 	 */
-sync_exception_sp_el0:
-	/* -----------------------------------------------------
-	 * We don't expect any synchronous exceptions from EL3
-	 * -----------------------------------------------------
-	 */
-	bl	report_unhandled_exception
+vector_entry sync_exception_sp_el0
+	/* We don't expect any synchronous exceptions from EL3 */
+	b	report_unhandled_exception
 	check_vector_size sync_exception_sp_el0
 
-	.align	7
-	/* -----------------------------------------------------
-	 * EL3 code is non-reentrant. Any asynchronous exception
-	 * is a serious error. Loop infinitely.
-	 * -----------------------------------------------------
+vector_entry irq_sp_el0
+	/*
+	 * EL3 code is non-reentrant. Any asynchronous exception is a serious
+	 * error. Loop infinitely.
 	 */
-irq_sp_el0:
-	bl	report_unhandled_interrupt
+	b	report_unhandled_interrupt
 	check_vector_size irq_sp_el0
 
-	.align	7
-fiq_sp_el0:
-	bl	report_unhandled_interrupt
+
+vector_entry fiq_sp_el0
+	b	report_unhandled_interrupt
 	check_vector_size fiq_sp_el0
 
-	.align	7
-serror_sp_el0:
-	bl	report_unhandled_exception
+
+vector_entry serror_sp_el0
+	b	report_unhandled_exception
 	check_vector_size serror_sp_el0
 
-	/* -----------------------------------------------------
-	 * Current EL with SPx: 0x200 - 0x400
-	 * -----------------------------------------------------
+	/* ---------------------------------------------------------------------
+	 * Current EL with SP_ELx: 0x200 - 0x400
+	 * ---------------------------------------------------------------------
 	 */
-	.align	7
-sync_exception_sp_elx:
-	/* -----------------------------------------------------
-	 * This exception will trigger if anything went wrong
-	 * during a previous exception entry or exit or while
-	 * handling an earlier unexpected synchronous exception.
-	 * There is a high probability that SP_EL3 is corrupted.
-	 * -----------------------------------------------------
+vector_entry sync_exception_sp_elx
+	/*
+	 * This exception will trigger if anything went wrong during a previous
+	 * exception entry or exit or while handling an earlier unexpected
+	 * synchronous exception. There is a high probability that SP_EL3 is
+	 * corrupted.
 	 */
-	bl	report_unhandled_exception
+	b	report_unhandled_exception
 	check_vector_size sync_exception_sp_elx
 
-	.align	7
-irq_sp_elx:
-	bl	report_unhandled_interrupt
+vector_entry irq_sp_elx
+	b	report_unhandled_interrupt
 	check_vector_size irq_sp_elx
 
-	.align	7
-fiq_sp_elx:
-	bl	report_unhandled_interrupt
+vector_entry fiq_sp_elx
+	b	report_unhandled_interrupt
 	check_vector_size fiq_sp_elx
 
-	.align	7
-serror_sp_elx:
-	bl	report_unhandled_exception
+vector_entry serror_sp_elx
+	b	report_unhandled_exception
 	check_vector_size serror_sp_elx
 
-	/* -----------------------------------------------------
+	/* ---------------------------------------------------------------------
 	 * Lower EL using AArch64 : 0x400 - 0x600
-	 * -----------------------------------------------------
+	 * ---------------------------------------------------------------------
 	 */
-	.align	7
-sync_exception_aarch64:
-	/* -----------------------------------------------------
-	 * This exception vector will be the entry point for
-	 * SMCs and traps that are unhandled at lower ELs most
-	 * commonly. SP_EL3 should point to a valid cpu context
-	 * where the general purpose and system register state
-	 * can be saved.
-	 * -----------------------------------------------------
+vector_entry sync_exception_aarch64
+	/*
+	 * This exception vector will be the entry point for SMCs and traps
+	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
+	 * to a valid cpu context where the general purpose and system register
+	 * state can be saved.
 	 */
 	handle_sync_exception
 	check_vector_size sync_exception_aarch64
 
-	.align	7
-	/* -----------------------------------------------------
-	 * Asynchronous exceptions from lower ELs are not
-	 * currently supported. Report their occurrence.
-	 * -----------------------------------------------------
-	 */
-irq_aarch64:
+vector_entry irq_aarch64
 	handle_interrupt_exception irq_aarch64
 	check_vector_size irq_aarch64
 
-	.align	7
-fiq_aarch64:
+vector_entry fiq_aarch64
 	handle_interrupt_exception fiq_aarch64
 	check_vector_size fiq_aarch64
 
-	.align	7
-serror_aarch64:
-	bl	report_unhandled_exception
+vector_entry serror_aarch64
+	/*
+	 * SError exceptions from lower ELs are not currently supported.
+	 * Report their occurrence.
+	 */
+	b	report_unhandled_exception
 	check_vector_size serror_aarch64
 
-	/* -----------------------------------------------------
+	/* ---------------------------------------------------------------------
 	 * Lower EL using AArch32 : 0x600 - 0x800
-	 * -----------------------------------------------------
+	 * ---------------------------------------------------------------------
 	 */
-	.align	7
-sync_exception_aarch32:
-	/* -----------------------------------------------------
-	 * This exception vector will be the entry point for
-	 * SMCs and traps that are unhandled at lower ELs most
-	 * commonly. SP_EL3 should point to a valid cpu context
-	 * where the general purpose and system register state
-	 * can be saved.
-	 * -----------------------------------------------------
+vector_entry sync_exception_aarch32
+	/*
+	 * This exception vector will be the entry point for SMCs and traps
+	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
+	 * to a valid cpu context where the general purpose and system register
+	 * state can be saved.
 	 */
 	handle_sync_exception
 	check_vector_size sync_exception_aarch32
 
-	.align	7
-	/* -----------------------------------------------------
-	 * Asynchronous exceptions from lower ELs are not
-	 * currently supported. Report their occurrence.
-	 * -----------------------------------------------------
-	 */
-irq_aarch32:
+vector_entry irq_aarch32
 	handle_interrupt_exception irq_aarch32
 	check_vector_size irq_aarch32
 
-	.align	7
-fiq_aarch32:
+vector_entry fiq_aarch32
 	handle_interrupt_exception fiq_aarch32
 	check_vector_size fiq_aarch32
 
-	.align	7
-serror_aarch32:
-	bl	report_unhandled_exception
+vector_entry serror_aarch32
+	/*
+	 * SError exceptions from lower ELs are not currently supported.
+	 * Report their occurrence.
+	 */
+	b	report_unhandled_exception
 	check_vector_size serror_aarch32
 
-	.align	7
 
-	/* -----------------------------------------------------
+	/* ---------------------------------------------------------------------
 	 * The following code handles secure monitor calls.
-	 * Depending upon the execution state from where the SMC
-	 * has been invoked, it frees some general purpose
-	 * registers to perform the remaining tasks. They
-	 * involve finding the runtime service handler that is
-	 * the target of the SMC & switching to runtime stacks
-	 * (SP_EL0) before calling the handler.
+	 * Depending upon the execution state from where the SMC has been
+	 * invoked, it frees some general purpose registers to perform the
+	 * remaining tasks. They involve finding the runtime service handler
+	 * that is the target of the SMC & switching to runtime stacks (SP_EL0)
+	 * before calling the handler.
 	 *
-	 * Note that x30 has been explicitly saved and can be
-	 * used here
-	 * -----------------------------------------------------
+	 * Note that x30 has been explicitly saved and can be used here
+	 * ---------------------------------------------------------------------
 	 */
 func smc_handler
 smc_handler32:
 	/* Check whether aarch32 issued an SMC64 */
 	tbnz	x0, #FUNCID_CC_SHIFT, smc_prohibited
 
-	/* -----------------------------------------------------
-	 * Since we're are coming from aarch32, x8-x18 need to
-	 * be saved as per SMC32 calling convention. If a lower
-	 * EL in aarch64 is making an SMC32 call then it must
-	 * have saved x8-x17 already therein.
-	 * -----------------------------------------------------
+	/*
+	 * Since we're are coming from aarch32, x8-x18 need to be saved as per
+	 * SMC32 calling convention. If a lower EL in aarch64 is making an
+	 * SMC32 call then it must have saved x8-x17 already therein.
 	 */
 	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
 	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
@@ -339,15 +291,14 @@
 	/* x4-x7, x18, sp_el0 are saved below */
 
 smc_handler64:
-	/* -----------------------------------------------------
-	 * Populate the parameters for the SMC handler. We
-	 * already have x0-x4 in place. x5 will point to a
-	 * cookie (not used now). x6 will point to the context
-	 * structure (SP_EL3) and x7 will contain flags we need
-	 * to pass to the handler Hence save x5-x7. Note that x4
-	 * only needs to be preserved for AArch32 callers but we
-	 * do it for AArch64 callers as well for convenience
-	 * -----------------------------------------------------
+	/*
+	 * Populate the parameters for the SMC handler.
+	 * We already have x0-x4 in place. x5 will point to a cookie (not used
+	 * now). x6 will point to the context structure (SP_EL3) and x7 will
+	 * contain flags we need to pass to the handler Hence save x5-x7.
+	 *
+	 * Note: x4 only needs to be preserved for AArch32 callers but we do it
+	 *       for AArch64 callers as well for convenience
 	 */
 	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
 	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
@@ -369,12 +320,10 @@
 	adr	x14, rt_svc_descs_indices
 	ldrb	w15, [x14, x16]
 
-	/* -----------------------------------------------------
-	 * Restore the saved C runtime stack value which will
-	 * become the new SP_EL0 i.e. EL3 runtime stack. It was
-	 * saved in the 'cpu_context' structure prior to the last
-	 * ERET from EL3.
-	 * -----------------------------------------------------
+	/*
+	 * Restore the saved C runtime stack value which will become the new
+	 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
+	 * structure prior to the last ERET from EL3.
 	 */
 	ldr	x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
 
@@ -387,22 +336,19 @@
 	/* Switch to SP_EL0 */
 	msr	spsel, #0
 
-	/* -----------------------------------------------------
+	/*
 	 * Get the descriptor using the index
 	 * x11 = (base + off), x15 = index
 	 *
 	 * handler = (base + off) + (index << log2(size))
-	 * -----------------------------------------------------
 	 */
 	lsl	w10, w15, #RT_SVC_SIZE_LOG2
 	ldr	x15, [x11, w10, uxtw]
 
-	/* -----------------------------------------------------
-	 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there
-	 * is a world switch during SMC handling.
-	 * TODO: Revisit if all system registers can be saved
-	 * later.
-	 * -----------------------------------------------------
+	/*
+	 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world
+	 * switch during SMC handling.
+	 * TODO: Revisit if all system registers can be saved later.
 	 */
 	mrs	x16, spsr_el3
 	mrs	x17, elr_el3
@@ -415,50 +361,17 @@
 
 	mov	sp, x12
 
-	/* -----------------------------------------------------
-	 * Call the Secure Monitor Call handler and then drop
-	 * directly into el3_exit() which will program any
-	 * remaining architectural state prior to issuing the
-	 * ERET to the desired lower EL.
-	 * -----------------------------------------------------
+	/*
+	 * Call the Secure Monitor Call handler and then drop directly into
+	 * el3_exit() which will program any remaining architectural state
+	 * prior to issuing the ERET to the desired lower EL.
 	 */
 #if DEBUG
 	cbz	x15, rt_svc_fw_critical_error
 #endif
 	blr	x15
 
-	/* -----------------------------------------------------
-	 * This routine assumes that the SP_EL3 is pointing to
-	 * a valid context structure from where the gp regs and
-	 * other special registers can be retrieved.
-	 *
-	 * Keep it in the same section as smc_handler as this
-	 * function uses a fall-through to el3_exit
-	 * -----------------------------------------------------
-	 */
-el3_exit: ; .type el3_exit, %function
-	/* -----------------------------------------------------
-	 * Save the current SP_EL0 i.e. the EL3 runtime stack
-	 * which will be used for handling the next SMC. Then
-	 * switch to SP_EL3
-	 * -----------------------------------------------------
-	 */
-	mov	x17, sp
-	msr	spsel, #1
-	str	x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
-
-	/* -----------------------------------------------------
-	 * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
-	 * -----------------------------------------------------
-	 */
-	ldr	x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
-	ldp	x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
-	msr	scr_el3, x18
-	msr	spsr_el3, x16
-	msr	elr_el3, x17
-
-	/* Restore saved general purpose registers and return */
-	b	restore_gp_registers_eret
+	b	el3_exit
 
 smc_unknown:
 	/*
@@ -466,7 +379,7 @@
 	 * callers will find the registers contents unchanged, but AArch64
 	 * callers will find the registers modified (with stale earlier NS
 	 * content). Either way, we aren't leaking any secure information
-	 * through them
+	 * through them.
 	 */
 	mov	w0, #SMC_UNK
 	b	restore_gp_registers_callee_eret
@@ -477,51 +390,7 @@
 	eret
 
 rt_svc_fw_critical_error:
-	msr	spsel, #1 /* Switch to SP_ELx */
-	bl	report_unhandled_exception
-
-	/* -----------------------------------------------------
-	 * The following functions are used to saved and restore
-	 * all the general pupose registers. Ideally we would
-	 * only save and restore the callee saved registers when
-	 * a world switch occurs but that type of implementation
-	 * is more complex. So currently we will always save and
-	 * restore these registers on entry and exit of EL3.
-	 * These are not macros to ensure their invocation fits
-	 * within the 32 instructions per exception vector.
-	 * -----------------------------------------------------
-	 */
-func save_gp_registers
-	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
-	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
-	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
-	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
-	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
-	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
-	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
-	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
-	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
-	save_x18_to_x29_sp_el0
-	ret
-
-func restore_gp_registers_eret
-	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
-	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
-
-restore_gp_registers_callee_eret:
-	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
-	ldp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
-	ldp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
-	ldp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
-	ldp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
-	ldp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
-	ldp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
-	ldp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
-	ldp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
-	ldp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
-	ldp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
-	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
-	ldp	x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
-	msr	sp_el0, x17
-	ldp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
-	eret
+	/* Switch to SP_ELx */
+	msr	spsel, #1
+	no_ret	report_unhandled_exception
+endfunc smc_handler
diff --git a/bl31/bl31.ld.S b/bl31/bl31.ld.S
index 3327f31..9ff774b 100644
--- a/bl31/bl31.ld.S
+++ b/bl31/bl31.ld.S
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <platform_def.h>
@@ -39,6 +15,9 @@
     RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
 }
 
+#ifdef PLAT_EXTRA_LD_SCRIPT
+#include <plat.ld.S>
+#endif
 
 SECTIONS
 {
@@ -46,6 +25,51 @@
     ASSERT(. == ALIGN(4096),
            "BL31_BASE address is not aligned on a page boundary.")
 
+#if SEPARATE_CODE_AND_RODATA
+    .text . : {
+        __TEXT_START__ = .;
+        *bl31_entrypoint.o(.text*)
+        *(.text*)
+        *(.vectors)
+        . = NEXT(4096);
+        __TEXT_END__ = .;
+    } >RAM
+
+    .rodata . : {
+        __RODATA_START__ = .;
+        *(.rodata*)
+
+        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
+        . = ALIGN(8);
+        __RT_SVC_DESCS_START__ = .;
+        KEEP(*(rt_svc_descs))
+        __RT_SVC_DESCS_END__ = .;
+
+#if ENABLE_PMF
+        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
+        . = ALIGN(8);
+        __PMF_SVC_DESCS_START__ = .;
+        KEEP(*(pmf_svc_descs))
+        __PMF_SVC_DESCS_END__ = .;
+#endif /* ENABLE_PMF */
+
+        /*
+         * Ensure 8-byte alignment for cpu_ops so that its fields are also
+         * aligned. Also ensure cpu_ops inclusion.
+         */
+        . = ALIGN(8);
+        __CPU_OPS_START__ = .;
+        KEEP(*(cpu_ops))
+        __CPU_OPS_END__ = .;
+
+        /* Place pubsub sections for events */
+        . = ALIGN(8);
+#include <pubsub_events.h>
+
+        . = NEXT(4096);
+        __RODATA_END__ = .;
+    } >RAM
+#else
     ro . : {
         __RO_START__ = .;
         *bl31_entrypoint.o(.text*)
@@ -58,6 +82,14 @@
         KEEP(*(rt_svc_descs))
         __RT_SVC_DESCS_END__ = .;
 
+#if ENABLE_PMF
+        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
+        . = ALIGN(8);
+        __PMF_SVC_DESCS_START__ = .;
+        KEEP(*(pmf_svc_descs))
+        __PMF_SVC_DESCS_END__ = .;
+#endif /* ENABLE_PMF */
+
         /*
          * Ensure 8-byte alignment for cpu_ops so that its fields are also
          * aligned. Also ensure cpu_ops inclusion.
@@ -67,6 +99,10 @@
         KEEP(*(cpu_ops))
         __CPU_OPS_END__ = .;
 
+        /* Place pubsub sections for events */
+        . = ALIGN(8);
+#include <pubsub_events.h>
+
         *(.vectors)
         __RO_END_UNALIGNED__ = .;
         /*
@@ -77,18 +113,30 @@
         . = NEXT(4096);
         __RO_END__ = .;
     } >RAM
+#endif
 
     ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
            "cpu_ops not defined for this platform.")
 
-    .data . : {
+    /*
+     * Define a linker symbol to mark start of the RW memory area for this
+     * image.
+     */
+    __RW_START__ = . ;
+
+    /*
+     * .data must be placed at a lower address than the stacks if the stack
+     * protector is enabled. Alternatively, the .data.stack_protector_canary
+     * section can be placed independently of the main .data section.
+     */
+   .data . : {
         __DATA_START__ = .;
         *(.data*)
         __DATA_END__ = .;
     } >RAM
 
 #ifdef BL31_PROGBITS_LIMIT
-    ASSERT(. <= BL31_PROGBITS_LIMIT, "BL3-1 progbits has exceeded its limit.")
+    ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.")
 #endif
 
     stacks (NOLOAD) : {
@@ -99,12 +147,52 @@
 
     /*
      * The .bss section gets initialised to 0 at runtime.
-     * Its base address must be 16-byte aligned.
+     * Its base address should be 16-byte aligned for better performance of the
+     * zero-initialization code.
      */
-    .bss : ALIGN(16) {
+    .bss (NOLOAD) : ALIGN(16) {
         __BSS_START__ = .;
         *(.bss*)
         *(COMMON)
+#if !USE_COHERENT_MEM
+        /*
+         * Bakery locks are stored in normal .bss memory
+         *
+         * Each lock's data is spread across multiple cache lines, one per CPU,
+         * but multiple locks can share the same cache line.
+         * The compiler will allocate enough memory for one CPU's bakery locks,
+         * the remaining cache lines are allocated by the linker script
+         */
+        . = ALIGN(CACHE_WRITEBACK_GRANULE);
+        __BAKERY_LOCK_START__ = .;
+        *(bakery_lock)
+        . = ALIGN(CACHE_WRITEBACK_GRANULE);
+        __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(. - __BAKERY_LOCK_START__);
+        . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
+        __BAKERY_LOCK_END__ = .;
+#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
+    ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE,
+        "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
+#endif
+#endif
+
+#if ENABLE_PMF
+        /*
+         * Time-stamps are stored in normal .bss memory
+         *
+         * The compiler will allocate enough memory for one CPU's time-stamps,
+         * the remaining memory for other CPU's is allocated by the
+         * linker script
+         */
+        . = ALIGN(CACHE_WRITEBACK_GRANULE);
+        __PMF_TIMESTAMP_START__ = .;
+        KEEP(*(pmf_timestamp_array))
+        . = ALIGN(CACHE_WRITEBACK_GRANULE);
+        __PMF_PERCPU_TIMESTAMP_END__ = .;
+        __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__);
+        . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1));
+        __PMF_TIMESTAMP_END__ = .;
+#endif /* ENABLE_PMF */
         __BSS_END__ = .;
     } >RAM
 
@@ -126,6 +214,12 @@
      */
     coherent_ram (NOLOAD) : ALIGN(4096) {
         __COHERENT_RAM_START__ = .;
+        /*
+         * Bakery locks are stored in coherent memory
+         *
+         * Each lock's data is contiguous and fully allocated by the compiler
+         */
+        *(bakery_lock)
         *(tzfw_coherent_mem)
         __COHERENT_RAM_END_UNALIGNED__ = .;
         /*
@@ -138,6 +232,11 @@
     } >RAM
 #endif
 
+    /*
+     * Define a linker symbol to mark end of the RW memory area for this
+     * image.
+     */
+    __RW_END__ = .;
     __BL31_END__ = .;
 
     __BSS_SIZE__ = SIZEOF(.bss);
@@ -146,5 +245,5 @@
         __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
 #endif
 
-    ASSERT(. <= BL31_LIMIT, "BL3-1 image has exceeded its limit.")
+    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
 }
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 4c25a60..0c9e393 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -1,74 +1,30 @@
 #
-# Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
 #
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
+# SPDX-License-Identifier: BSD-3-Clause
 #
-# Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# Neither the name of ARM nor the names of its contributors may be used
-# to endorse or promote products derived from this software without specific
-# prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
+
+include lib/psci/psci_lib.mk
 
 BL31_SOURCES		+=	bl31/bl31_main.c				\
-				bl31/context_mgmt.c				\
-				bl31/cpu_data_array.c				\
-				bl31/runtime_svc.c				\
 				bl31/interrupt_mgmt.c				\
-				bl31/aarch64/bl31_arch_setup.c			\
 				bl31/aarch64/bl31_entrypoint.S			\
-				bl31/aarch64/context.S				\
-				bl31/aarch64/cpu_data.S				\
 				bl31/aarch64/runtime_exceptions.S		\
 				bl31/aarch64/crash_reporting.S			\
-				lib/cpus/aarch64/cpu_helpers.S			\
-				lib/locks/exclusive/spinlock.S			\
+				bl31/bl31_context_mgmt.c			\
+				common/runtime_svc.c				\
+				plat/common/aarch64/platform_mp_stack.S		\
 				services/std_svc/std_svc_setup.c		\
-				services/std_svc/psci/psci_afflvl_off.c		\
-				services/std_svc/psci/psci_afflvl_on.c		\
-				services/std_svc/psci/psci_afflvl_suspend.c	\
-				services/std_svc/psci/psci_common.c		\
-				services/std_svc/psci/psci_entry.S		\
-				services/std_svc/psci/psci_helpers.S		\
-				services/std_svc/psci/psci_main.c		\
-				services/std_svc/psci/psci_setup.c		\
-				services/std_svc/psci/psci_system_off.c
+				${PSCI_LIB_SOURCES}
 
-ifeq (${USE_COHERENT_MEM}, 1)
-BL31_SOURCES		+=	lib/locks/bakery/bakery_lock_coherent.c
-else
-BL31_SOURCES		+=	lib/locks/bakery/bakery_lock_normal.c
+ifeq (${ENABLE_PMF}, 1)
+BL31_SOURCES		+=	lib/pmf/pmf_main.c
 endif
 
 BL31_LINKERFILE		:=	bl31/bl31.ld.S
 
-# Flag used by the generic interrupt management framework to  determine if
-# upon the assertion of an interrupt, it should pass the interrupt id or not
-IMF_READ_INTERRUPT_ID	:=	0
-
-$(eval $(call assert_boolean,IMF_READ_INTERRUPT_ID))
-$(eval $(call add_define,IMF_READ_INTERRUPT_ID))
-
-# Flag used to inidicate if Crash reporting via console should be included
-# in BL3-1. This defaults to being present in DEBUG builds only
+# Flag used to indicate if Crash reporting via console should be included
+# in BL31. This defaults to being present in DEBUG builds only
 ifndef CRASH_REPORTING
 CRASH_REPORTING		:=	$(DEBUG)
 endif
diff --git a/bl31/bl31_context_mgmt.c b/bl31/bl31_context_mgmt.c
new file mode 100644
index 0000000..05bf4e1
--- /dev/null
+++ b/bl31/bl31_context_mgmt.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <bl31.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <cpu_data.h>
+#include <platform.h>
+
+
+/*******************************************************************************
+ * This function returns a pointer to the most recent 'cpu_context' structure
+ * for the calling CPU that was set as the context for the specified security
+ * state. NULL is returned if no such structure has been specified.
+ ******************************************************************************/
+void *cm_get_context(uint32_t security_state)
+{
+	assert(security_state <= NON_SECURE);
+
+	return get_cpu_data(cpu_context[security_state]);
+}
+
+/*******************************************************************************
+ * This function sets the pointer to the current 'cpu_context' structure for the
+ * specified security state for the calling CPU
+ ******************************************************************************/
+void cm_set_context(void *context, uint32_t security_state)
+{
+	assert(security_state <= NON_SECURE);
+
+	set_cpu_data(cpu_context[security_state], context);
+}
+
+/*******************************************************************************
+ * This function returns a pointer to the most recent 'cpu_context' structure
+ * for the CPU identified by `cpu_idx` that was set as the context for the
+ * specified security state. NULL is returned if no such structure has been
+ * specified.
+ ******************************************************************************/
+void *cm_get_context_by_index(unsigned int cpu_idx,
+				unsigned int security_state)
+{
+	assert(sec_state_is_valid(security_state));
+
+	return get_cpu_data_by_index(cpu_idx, cpu_context[security_state]);
+}
+
+/*******************************************************************************
+ * This function sets the pointer to the current 'cpu_context' structure for the
+ * specified security state for the CPU identified by CPU index.
+ ******************************************************************************/
+void cm_set_context_by_index(unsigned int cpu_idx, void *context,
+				unsigned int security_state)
+{
+	assert(sec_state_is_valid(security_state));
+
+	set_cpu_data_by_index(cpu_idx, cpu_context[security_state], context);
+}
+
+#if !ERROR_DEPRECATED
+/*
+ * These context management helpers are deprecated but are maintained for use
+ * by SPDs which have not migrated to the new API. If ERROR_DEPRECATED
+ * is enabled, these are excluded from the build so as to force users to
+ * migrate to the new API.
+ */
+
+/*******************************************************************************
+ * This function returns a pointer to the most recent 'cpu_context' structure
+ * for the CPU identified by MPIDR that was set as the context for the specified
+ * security state. NULL is returned if no such structure has been specified.
+ ******************************************************************************/
+void *cm_get_context_by_mpidr(uint64_t mpidr, uint32_t security_state)
+{
+	assert(sec_state_is_valid(security_state));
+
+	return cm_get_context_by_index(platform_get_core_pos(mpidr), security_state);
+}
+
+/*******************************************************************************
+ * This function sets the pointer to the current 'cpu_context' structure for the
+ * specified security state for the CPU identified by MPIDR
+ ******************************************************************************/
+void cm_set_context_by_mpidr(uint64_t mpidr, void *context, uint32_t security_state)
+{
+	assert(sec_state_is_valid(security_state));
+
+	cm_set_context_by_index(platform_get_core_pos(mpidr),
+						 context, security_state);
+}
+
+/*******************************************************************************
+ * The following function provides a compatibility function for SPDs using the
+ * existing cm library routines. This function is expected to be invoked for
+ * initializing the cpu_context for the CPU specified by MPIDR for first use.
+ ******************************************************************************/
+void cm_init_context(unsigned long mpidr, const entry_point_info_t *ep)
+{
+	if ((mpidr & MPIDR_AFFINITY_MASK) ==
+			(read_mpidr_el1() & MPIDR_AFFINITY_MASK))
+		cm_init_my_context(ep);
+	else
+		cm_init_context_by_index(platform_get_core_pos(mpidr), ep);
+}
+#endif
diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c
index 19f3774..4a88bd7 100644
--- a/bl31/bl31_main.c
+++ b/bl31/bl31_main.c
@@ -1,44 +1,28 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
 #include <arch_helpers.h>
 #include <assert.h>
-#include <bl_common.h>
 #include <bl31.h>
+#include <bl_common.h>
+#include <console.h>
 #include <context_mgmt.h>
 #include <debug.h>
 #include <platform.h>
+#include <pmf.h>
+#include <runtime_instr.h>
 #include <runtime_svc.h>
 #include <string.h>
 
+#if ENABLE_RUNTIME_INSTRUMENTATION
+PMF_REGISTER_SERVICE_SMC(rt_instr_svc, PMF_RT_INSTR_SVC_ID,
+	RT_INSTR_TOTAL_IDS, PMF_STORE_ENABLE)
+#endif
+
 /*******************************************************************************
  * This function pointer is used to initialise the BL32 image. It's initialized
  * by SPD calling bl31_register_bl32_init after setting up all things necessary
@@ -53,6 +37,21 @@
  ******************************************************************************/
 static uint32_t next_image_type = NON_SECURE;
 
+/*
+ * Implement the ARM Standard Service function to get arguments for a
+ * particular service.
+ */
+uintptr_t get_arm_std_svc_args(unsigned int svc_mask)
+{
+	/* Setup the arguments for PSCI Library */
+	DEFINE_STATIC_PSCI_LIB_ARGS_V1(psci_args, bl31_warm_entrypoint);
+
+	/* PSCI is the only ARM Standard Service implemented */
+	assert(svc_mask == PSCI_FID_MASK);
+
+	return (uintptr_t)&psci_args;
+}
+
 /*******************************************************************************
  * Simple function to initialise all BL31 helper libraries.
  ******************************************************************************/
@@ -71,25 +70,19 @@
  ******************************************************************************/
 void bl31_main(void)
 {
-	NOTICE("BL3-1: %s\n", version_string);
-	NOTICE("BL3-1: %s\n", build_message);
+	NOTICE("BL31: %s\n", version_string);
+	NOTICE("BL31: %s\n", build_message);
 
-	/* Perform remaining generic architectural setup from EL3 */
-	bl31_arch_setup();
-
-	/* Perform platform setup in BL1 */
+	/* Perform platform setup in BL31 */
 	bl31_platform_setup();
 
 	/* Initialise helper libraries */
 	bl31_lib_init();
 
-	/* Initialize the runtime services e.g. psci */
-	INFO("BL3-1: Initializing runtime services\n");
+	/* Initialize the runtime services e.g. psci. */
+	INFO("BL31: Initializing runtime services\n");
 	runtime_svc_init();
 
-	/* Clean caches before re-entering normal world */
-	dcsw_op_all(DCCSW);
-
 	/*
 	 * All the cold boot actions on the primary cpu are done. We now need to
 	 * decide which is the next image (BL32 or BL33) and how to execute it.
@@ -104,7 +97,7 @@
 	 * If SPD had registerd an init hook, invoke it.
 	 */
 	if (bl32_init) {
-		INFO("BL3-1: Initializing BL3-2\n");
+		INFO("BL31: Initializing BL32\n");
 		(*bl32_init)();
 	}
 	/*
@@ -112,6 +105,14 @@
 	 * corresponding to the desired security state after the next ERET.
 	 */
 	bl31_prepare_next_image_entry();
+
+	console_flush();
+
+	/*
+	 * Perform any platform specific runtime setup prior to cold boot exit
+	 * from BL31
+	 */
+	bl31_plat_runtime_setup();
 }
 
 /*******************************************************************************
@@ -142,6 +143,18 @@
 	entry_point_info_t *next_image_info;
 	uint32_t image_type;
 
+#if CTX_INCLUDE_AARCH32_REGS
+	/*
+	 * Ensure that the build flag to save AArch32 system registers in CPU
+	 * context is not set for AArch64-only platforms.
+	 */
+	if (EL_IMPLEMENTED(1) == EL_IMPL_A64ONLY) {
+		ERROR("EL1 supports AArch64-only. Please set build flag "
+				"CTX_INCLUDE_AARCH32_REGS = 0");
+		panic();
+	}
+#endif
+
 	/* Determine which image to execute next */
 	image_type = bl31_get_next_image_type();
 
@@ -150,12 +163,10 @@
 	assert(next_image_info);
 	assert(image_type == GET_SECURITY_STATE(next_image_info->h.attr));
 
-	INFO("BL3-1: Preparing for EL3 exit to %s world\n",
+	INFO("BL31: Preparing for EL3 exit to %s world\n",
 		(image_type == SECURE) ? "secure" : "normal");
-	INFO("BL3-1: Next image address = 0x%llx\n",
-		(unsigned long long) next_image_info->pc);
-	INFO("BL3-1: Next image spsr = 0x%x\n", next_image_info->spsr);
-	cm_init_context(read_mpidr_el1(), next_image_info);
+	print_entry_point_info(next_image_info);
+	cm_init_my_context(next_image_info);
 	cm_prepare_el3_exit(image_type);
 }
 
diff --git a/bl31/context_mgmt.c b/bl31/context_mgmt.c
deleted file mode 100644
index 6f27176..0000000
--- a/bl31/context_mgmt.c
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <arch_helpers.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <bl31.h>
-#include <context.h>
-#include <context_mgmt.h>
-#include <cpu_data.h>
-#include <interrupt_mgmt.h>
-#include <platform.h>
-#include <platform_def.h>
-#include <runtime_svc.h>
-#include <string.h>
-
-
-/*******************************************************************************
- * Context management library initialisation routine. This library is used by
- * runtime services to share pointers to 'cpu_context' structures for the secure
- * and non-secure states. Management of the structures and their associated
- * memory is not done by the context management library e.g. the PSCI service
- * manages the cpu context used for entry from and exit to the non-secure state.
- * The Secure payload dispatcher service manages the context(s) corresponding to
- * the secure state. It also uses this library to get access to the non-secure
- * state cpu context pointers.
- * Lastly, this library provides the api to make SP_EL3 point to the cpu context
- * which will used for programming an entry into a lower EL. The same context
- * will used to save state upon exception entry from that EL.
- ******************************************************************************/
-void cm_init(void)
-{
-	/*
-	 * The context management library has only global data to intialize, but
-	 * that will be done when the BSS is zeroed out
-	 */
-}
-
-/*******************************************************************************
- * This function returns a pointer to the most recent 'cpu_context' structure
- * for the CPU identified by MPIDR that was set as the context for the specified
- * security state. NULL is returned if no such structure has been specified.
- ******************************************************************************/
-void *cm_get_context_by_mpidr(uint64_t mpidr, uint32_t security_state)
-{
-	assert(sec_state_is_valid(security_state));
-
-	return get_cpu_data_by_mpidr(mpidr, cpu_context[security_state]);
-}
-
-/*******************************************************************************
- * This function sets the pointer to the current 'cpu_context' structure for the
- * specified security state for the CPU identified by MPIDR
- ******************************************************************************/
-void cm_set_context_by_mpidr(uint64_t mpidr, void *context, uint32_t security_state)
-{
-	assert(sec_state_is_valid(security_state));
-
-	set_cpu_data_by_mpidr(mpidr, cpu_context[security_state], context);
-}
-
-/*******************************************************************************
- * This function is used to program the context that's used for exception
- * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
- * the required security state
- ******************************************************************************/
-static inline void cm_set_next_context(void *context)
-{
-#if DEBUG
-	uint64_t sp_mode;
-
-	/*
-	 * Check that this function is called with SP_EL0 as the stack
-	 * pointer
-	 */
-	__asm__ volatile("mrs	%0, SPSel\n"
-			 : "=r" (sp_mode));
-
-	assert(sp_mode == MODE_SP_EL0);
-#endif
-
-	__asm__ volatile("msr	spsel, #1\n"
-			 "mov	sp, %0\n"
-			 "msr	spsel, #0\n"
-			 : : "r" (context));
-}
-
-/*******************************************************************************
- * The following function initializes a cpu_context for the current CPU for
- * first use, and sets the initial entrypoint state as specified by the
- * entry_point_info structure.
- *
- * The security state to initialize is determined by the SECURE attribute
- * of the entry_point_info. The function returns a pointer to the initialized
- * context and sets this as the next context to return to.
- *
- * The EE and ST attributes are used to configure the endianess and secure
- * timer availability for the new excution context.
- *
- * To prepare the register state for entry call cm_prepare_el3_exit() and
- * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
- * cm_e1_sysreg_context_restore().
- ******************************************************************************/
-void cm_init_context(uint64_t mpidr, const entry_point_info_t *ep)
-{
-	uint32_t security_state;
-	cpu_context_t *ctx;
-	uint32_t scr_el3;
-	el3_state_t *state;
-	gp_regs_t *gp_regs;
-	unsigned long sctlr_elx;
-
-	security_state = GET_SECURITY_STATE(ep->h.attr);
-	ctx = cm_get_context_by_mpidr(mpidr, security_state);
-	assert(ctx);
-
-	/* Clear any residual register values from the context */
-	memset(ctx, 0, sizeof(*ctx));
-
-	/*
-	 * Base the context SCR on the current value, adjust for entry point
-	 * specific requirements and set trap bits from the IMF
-	 * TODO: provide the base/global SCR bits using another mechanism?
-	 */
-	scr_el3 = read_scr();
-	scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT |
-			SCR_ST_BIT | SCR_HCE_BIT);
-
-	if (security_state != SECURE)
-		scr_el3 |= SCR_NS_BIT;
-
-	if (GET_RW(ep->spsr) == MODE_RW_64)
-		scr_el3 |= SCR_RW_BIT;
-
-	if (EP_GET_ST(ep->h.attr))
-		scr_el3 |= SCR_ST_BIT;
-
-	scr_el3 |= get_scr_el3_from_routing_model(security_state);
-
-	/*
-	 * Set up SCTLR_ELx for the target exception level:
-	 * EE bit is taken from the entrpoint attributes
-	 * M, C and I bits must be zero (as required by PSCI specification)
-	 *
-	 * The target exception level is based on the spsr mode requested.
-	 * If execution is requested to EL2 or hyp mode, HVC is enabled
-	 * via SCR_EL3.HCE.
-	 *
-	 * Always compute the SCTLR_EL1 value and save in the cpu_context
-	 * - the EL2 registers are set up by cm_preapre_ns_entry() as they
-	 * are not part of the stored cpu_context
-	 *
-	 * TODO: In debug builds the spsr should be validated and checked
-	 * against the CPU support, security state, endianess and pc
-	 */
-	sctlr_elx = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0;
-	if (GET_RW(ep->spsr) == MODE_RW_64)
-		sctlr_elx |= SCTLR_EL1_RES1;
-	else
-		sctlr_elx |= SCTLR_AARCH32_EL1_RES1;
-	write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
-
-	if ((GET_RW(ep->spsr) == MODE_RW_64
-	     && GET_EL(ep->spsr) == MODE_EL2)
-	    || (GET_RW(ep->spsr) != MODE_RW_64
-		&& GET_M32(ep->spsr) == MODE32_hyp)) {
-		scr_el3 |= SCR_HCE_BIT;
-	}
-
-	/* Populate EL3 state so that we've the right context before doing ERET */
-	state = get_el3state_ctx(ctx);
-	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
-	write_ctx_reg(state, CTX_ELR_EL3, ep->pc);
-	write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr);
-
-	/*
-	 * Store the X0-X7 value from the entrypoint into the context
-	 * Use memcpy as we are in control of the layout of the structures
-	 */
-	gp_regs = get_gpregs_ctx(ctx);
-	memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t));
-}
-
-/*******************************************************************************
- * Prepare the CPU system registers for first entry into secure or normal world
- *
- * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized
- * If execution is requested to non-secure EL1 or svc mode, and the CPU supports
- * EL2 then EL2 is disabled by configuring all necessary EL2 registers.
- * For all entries, the EL1 registers are initialized from the cpu_context
- ******************************************************************************/
-void cm_prepare_el3_exit(uint32_t security_state)
-{
-	uint32_t sctlr_elx, scr_el3, cptr_el2;
-	cpu_context_t *ctx = cm_get_context(security_state);
-
-	assert(ctx);
-
-	if (security_state == NON_SECURE) {
-		scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
-		if (scr_el3 & SCR_HCE_BIT) {
-			/* Use SCTLR_EL1.EE value to initialise sctlr_el2 */
-			sctlr_elx = read_ctx_reg(get_sysregs_ctx(ctx),
-						 CTX_SCTLR_EL1);
-			sctlr_elx &= ~SCTLR_EE_BIT;
-			sctlr_elx |= SCTLR_EL2_RES1;
-			write_sctlr_el2(sctlr_elx);
-		} else if (read_id_aa64pfr0_el1() &
-			   (ID_AA64PFR0_ELX_MASK << ID_AA64PFR0_EL2_SHIFT)) {
-			/* EL2 present but unused, need to disable safely */
-
-			/* HCR_EL2 = 0, except RW bit set to match SCR_EL3 */
-			write_hcr_el2((scr_el3 & SCR_RW_BIT) ? HCR_RW_BIT : 0);
-
-			/* SCTLR_EL2 : can be ignored when bypassing */
-
-			/* CPTR_EL2 : disable all traps TCPAC, TTA, TFP */
-			cptr_el2 = read_cptr_el2();
-			cptr_el2 &= ~(TCPAC_BIT | TTA_BIT | TFP_BIT);
-			write_cptr_el2(cptr_el2);
-
-			/* Enable EL1 access to timer */
-			write_cnthctl_el2(EL1PCEN_BIT | EL1PCTEN_BIT);
-
-			/* Reset CNTVOFF_EL2 */
-			write_cntvoff_el2(0);
-
-			/* Set VPIDR, VMPIDR to match MIDR, MPIDR */
-			write_vpidr_el2(read_midr_el1());
-			write_vmpidr_el2(read_mpidr_el1());
-		}
-	}
-
-	el1_sysregs_context_restore(get_sysregs_ctx(ctx));
-
-	cm_set_next_context(ctx);
-}
-
-/*******************************************************************************
- * The next four functions are used by runtime services to save and restore
- * EL1 context on the 'cpu_context' structure for the specified security
- * state.
- ******************************************************************************/
-void cm_el1_sysregs_context_save(uint32_t security_state)
-{
-	cpu_context_t *ctx;
-
-	ctx = cm_get_context(security_state);
-	assert(ctx);
-
-	el1_sysregs_context_save(get_sysregs_ctx(ctx));
-}
-
-void cm_el1_sysregs_context_restore(uint32_t security_state)
-{
-	cpu_context_t *ctx;
-
-	ctx = cm_get_context(security_state);
-	assert(ctx);
-
-	el1_sysregs_context_restore(get_sysregs_ctx(ctx));
-}
-
-/*******************************************************************************
- * This function populates ELR_EL3 member of 'cpu_context' pertaining to the
- * given security state with the given entrypoint
- ******************************************************************************/
-void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint)
-{
-	cpu_context_t *ctx;
-	el3_state_t *state;
-
-	ctx = cm_get_context(security_state);
-	assert(ctx);
-
-	/* Populate EL3 state so that ERET jumps to the correct entry */
-	state = get_el3state_ctx(ctx);
-	write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
-}
-
-/*******************************************************************************
- * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context'
- * pertaining to the given security state
- ******************************************************************************/
-void cm_set_elr_spsr_el3(uint32_t security_state,
-			 uint64_t entrypoint, uint32_t spsr)
-{
-	cpu_context_t *ctx;
-	el3_state_t *state;
-
-	ctx = cm_get_context(security_state);
-	assert(ctx);
-
-	/* Populate EL3 state so that ERET jumps to the correct entry */
-	state = get_el3state_ctx(ctx);
-	write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
-	write_ctx_reg(state, CTX_SPSR_EL3, spsr);
-}
-
-/*******************************************************************************
- * This function updates a single bit in the SCR_EL3 member of the 'cpu_context'
- * pertaining to the given security state using the value and bit position
- * specified in the parameters. It preserves all other bits.
- ******************************************************************************/
-void cm_write_scr_el3_bit(uint32_t security_state,
-			  uint32_t bit_pos,
-			  uint32_t value)
-{
-	cpu_context_t *ctx;
-	el3_state_t *state;
-	uint32_t scr_el3;
-
-	ctx = cm_get_context(security_state);
-	assert(ctx);
-
-	/* Ensure that the bit position is a valid one */
-	assert((1 << bit_pos) & SCR_VALID_BIT_MASK);
-
-	/* Ensure that the 'value' is only a bit wide */
-	assert(value <= 1);
-
-	/*
-	 * Get the SCR_EL3 value from the cpu context, clear the desired bit
-	 * and set it to its new value.
-	 */
-	state = get_el3state_ctx(ctx);
-	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
-	scr_el3 &= ~(1 << bit_pos);
-	scr_el3 |= value << bit_pos;
-	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
-}
-
-/*******************************************************************************
- * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the
- * given security state.
- ******************************************************************************/
-uint32_t cm_get_scr_el3(uint32_t security_state)
-{
-	cpu_context_t *ctx;
-	el3_state_t *state;
-
-	ctx = cm_get_context(security_state);
-	assert(ctx);
-
-	/* Populate EL3 state so that ERET jumps to the correct entry */
-	state = get_el3state_ctx(ctx);
-	return read_ctx_reg(state, CTX_SCR_EL3);
-}
-
-/*******************************************************************************
- * This function is used to program the context that's used for exception
- * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
- * the required security state
- ******************************************************************************/
-void cm_set_next_eret_context(uint32_t security_state)
-{
-	cpu_context_t *ctx;
-
-	ctx = cm_get_context(security_state);
-	assert(ctx);
-
-	cm_set_next_context(ctx);
-}
diff --git a/bl31/cpu_data_array.c b/bl31/cpu_data_array.c
deleted file mode 100644
index 4cba118..0000000
--- a/bl31/cpu_data_array.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <cassert.h>
-#include <cpu_data.h>
-#include <platform_def.h>
-
-/* The per_cpu_ptr_cache_t space allocation */
-cpu_data_t percpu_data[PLATFORM_CORE_COUNT];
diff --git a/bl31/interrupt_mgmt.c b/bl31/interrupt_mgmt.c
index 5478902..b885a66 100644
--- a/bl31/interrupt_mgmt.c
+++ b/bl31/interrupt_mgmt.c
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <assert.h>
@@ -67,18 +43,15 @@
 static intr_type_desc_t intr_type_descs[MAX_INTR_TYPES];
 
 /*******************************************************************************
- * This function validates the interrupt type. EL3 interrupts are currently not
- * supported.
+ * This function validates the interrupt type.
  ******************************************************************************/
 static int32_t validate_interrupt_type(uint32_t type)
 {
-	if (type == INTR_TYPE_EL3)
-		return -ENOTSUP;
+	if (type == INTR_TYPE_S_EL1 || type == INTR_TYPE_NS ||
+			type == INTR_TYPE_EL3)
+		return 0;
 
-	if (type != INTR_TYPE_S_EL1 && type != INTR_TYPE_NS)
-		return -EINVAL;
-
-	return 0;
+	return -EINVAL;
 }
 
 /*******************************************************************************
@@ -95,6 +68,9 @@
 	if (type == INTR_TYPE_NS)
 		return validate_ns_interrupt_rm(flags);
 
+	if (type == INTR_TYPE_EL3)
+		return validate_el3_interrupt_rm(flags);
+
 	return -EINVAL;
 }
 
@@ -129,7 +105,12 @@
 	flag = get_interrupt_rm_flag(interrupt_type_flags, security_state);
 	bit_pos = plat_interrupt_type_to_line(type, security_state);
 	intr_type_descs[type].scr_el3[security_state] = flag << bit_pos;
-	cm_write_scr_el3_bit(security_state, bit_pos, flag);
+
+	/* Update scr_el3 only if there is a context available. If not, it
+	 * will be updated later during context initialization which will obtain
+	 * the scr_el3 value to be used via get_scr_el3_from_routing_model() */
+	if (cm_get_context(security_state))
+		cm_write_scr_el3_bit(security_state, bit_pos, flag);
 }
 
 /*******************************************************************************
diff --git a/bl31/runtime_svc.c b/bl31/runtime_svc.c
deleted file mode 100644
index c33748f..0000000
--- a/bl31/runtime_svc.c
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <debug.h>
-#include <errno.h>
-#include <runtime_svc.h>
-#include <string.h>
-
-/*******************************************************************************
- * The 'rt_svc_descs' array holds the runtime service descriptors exported by
- * services by placing them in the 'rt_svc_descs' linker section.
- * The 'rt_svc_descs_indices' array holds the index of a descriptor in the
- * 'rt_svc_descs' array. When an SMC arrives, the OEN[29:24] bits and the call
- * type[31] bit in the function id are combined to get an index into the
- * 'rt_svc_descs_indices' array. This gives the index of the descriptor in the
- * 'rt_svc_descs' array which contains the SMC handler.
- ******************************************************************************/
-#define RT_SVC_DESCS_START	((uint64_t) (&__RT_SVC_DESCS_START__))
-#define RT_SVC_DESCS_END	((uint64_t) (&__RT_SVC_DESCS_END__))
-uint8_t rt_svc_descs_indices[MAX_RT_SVCS];
-static rt_svc_desc_t *rt_svc_descs;
-
-/*******************************************************************************
- * Simple routine to sanity check a runtime service descriptor before using it
- ******************************************************************************/
-static int32_t validate_rt_svc_desc(rt_svc_desc_t *desc)
-{
-	if (desc == NULL)
-		return -EINVAL;
-
-	if (desc->start_oen > desc->end_oen)
-		return -EINVAL;
-
-	if (desc->end_oen >= OEN_LIMIT)
-		return -EINVAL;
-
-	if (desc->call_type != SMC_TYPE_FAST && desc->call_type != SMC_TYPE_STD)
-		return -EINVAL;
-
-	/* A runtime service having no init or handle function doesn't make sense */
-	if (desc->init == NULL && desc->handle == NULL)
-		return -EINVAL;
-
-	return 0;
-}
-
-/*******************************************************************************
- * This function calls the initialisation routine in the descriptor exported by
- * a runtime service. Once a descriptor has been validated, its start & end
- * owning entity numbers and the call type are combined to form a unique oen.
- * The unique oen is used as an index into the 'rt_svc_descs_indices' array.
- * The index of the runtime service descriptor is stored at this index.
- ******************************************************************************/
-void runtime_svc_init(void)
-{
-	int32_t rc = 0;
-	uint32_t index, start_idx, end_idx;
-	uint64_t rt_svc_descs_num;
-
-	/* If no runtime services are implemented then simply bail out */
-	rt_svc_descs_num = RT_SVC_DESCS_END - RT_SVC_DESCS_START;
-	rt_svc_descs_num /= sizeof(rt_svc_desc_t);
-	if (rt_svc_descs_num == 0)
-		return;
-
-	/* Initialise internal variables to invalid state */
-	memset(rt_svc_descs_indices, -1, sizeof(rt_svc_descs_indices));
-
-	rt_svc_descs = (rt_svc_desc_t *) RT_SVC_DESCS_START;
-	for (index = 0; index < rt_svc_descs_num; index++) {
-
-		/*
-		 * An invalid descriptor is an error condition since it is
-		 * difficult to predict the system behaviour in the absence
-		 * of this service.
-		 */
-		rc = validate_rt_svc_desc(&rt_svc_descs[index]);
-		if (rc) {
-			ERROR("Invalid runtime service descriptor 0x%x (%s)\n",
-					&rt_svc_descs[index],
-					rt_svc_descs[index].name);
-			goto error;
-		}
-
-		/*
-		 * The runtime service may have seperate rt_svc_desc_t
-		 * for its fast smc and standard smc. Since the service itself
-		 * need to be initialized only once, only one of them will have
-		 * an initialisation routine defined. Call the initialisation
-		 * routine for this runtime service, if it is defined.
-		 */
-		if (rt_svc_descs[index].init) {
-			rc = rt_svc_descs[index].init();
-			if (rc) {
-				ERROR("Error initializing runtime service %s\n",
-						rt_svc_descs[index].name);
-				continue;
-			}
-		}
-
-		/*
-		 * Fill the indices corresponding to the start and end
-		 * owning entity numbers with the index of the
-		 * descriptor which will handle the SMCs for this owning
-		 * entity range.
-		 */
-		start_idx = get_unique_oen(rt_svc_descs[index].start_oen,
-				rt_svc_descs[index].call_type);
-		end_idx = get_unique_oen(rt_svc_descs[index].end_oen,
-				rt_svc_descs[index].call_type);
-
-		for (; start_idx <= end_idx; start_idx++)
-			rt_svc_descs_indices[start_idx] = index;
-	}
-
-	return;
-error:
-	panic();
-}
diff --git a/bl32/sp_min/aarch32/entrypoint.S b/bl32/sp_min/aarch32/entrypoint.S
new file mode 100644
index 0000000..cd9fe5c
--- /dev/null
+++ b/bl32/sp_min/aarch32/entrypoint.S
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <context.h>
+#include <el3_common_macros.S>
+#include <runtime_svc.h>
+#include <smcc_helpers.h>
+#include <smcc_macros.S>
+#include <xlat_tables_defs.h>
+
+	.globl	sp_min_vector_table
+	.globl	sp_min_entrypoint
+	.globl	sp_min_warm_entrypoint
+
+	.macro route_fiq_to_sp_min reg
+		/* -----------------------------------------------------
+		 * FIQs are secure interrupts trapped by Monitor and non
+		 * secure is not allowed to mask the FIQs.
+		 * -----------------------------------------------------
+		 */
+		ldcopr	\reg, SCR
+		orr	\reg, \reg, #SCR_FIQ_BIT
+		bic	\reg, \reg, #SCR_FW_BIT
+		stcopr	\reg, SCR
+	.endm
+
+vector_base sp_min_vector_table
+	b	sp_min_entrypoint
+	b	plat_panic_handler	/* Undef */
+	b	handle_smc		/* Syscall */
+	b	plat_panic_handler	/* Prefetch abort */
+	b	plat_panic_handler	/* Data abort */
+	b	plat_panic_handler	/* Reserved */
+	b	plat_panic_handler	/* IRQ */
+	b	handle_fiq		/* FIQ */
+
+
+/*
+ * The Cold boot/Reset entrypoint for SP_MIN
+ */
+func sp_min_entrypoint
+#if !RESET_TO_SP_MIN
+	/* ---------------------------------------------------------------
+	 * Preceding bootloader has populated r0 with a pointer to a
+	 * 'bl_params_t' structure & r1 with a pointer to platform
+	 * specific structure
+	 * ---------------------------------------------------------------
+	 */
+	mov	r11, r0
+	mov	r12, r1
+
+	/* ---------------------------------------------------------------------
+	 * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches
+	 * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot
+	 * and primary/secondary CPU logic should not be executed in this case.
+	 *
+	 * Also, assume that the previous bootloader has already initialised the
+	 * SCTLR, including the CPU endianness, and has initialised the memory.
+	 * ---------------------------------------------------------------------
+	 */
+	el3_entrypoint_common					\
+		_init_sctlr=0					\
+		_warm_boot_mailbox=0				\
+		_secondary_cold_boot=0				\
+		_init_memory=0					\
+		_init_c_runtime=1				\
+		_exception_vectors=sp_min_vector_table
+
+	/* ---------------------------------------------------------------------
+	 * Relay the previous bootloader's arguments to the platform layer
+	 * ---------------------------------------------------------------------
+	 */
+	mov	r0, r11
+	mov	r1, r12
+#else
+	/* ---------------------------------------------------------------------
+	 * For RESET_TO_SP_MIN systems which have a programmable reset address,
+	 * sp_min_entrypoint() is executed only on the cold boot path so we can
+	 * skip the warm boot mailbox mechanism.
+	 * ---------------------------------------------------------------------
+	 */
+	el3_entrypoint_common					\
+		_init_sctlr=1					\
+		_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS	\
+		_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU	\
+		_init_memory=1					\
+		_init_c_runtime=1				\
+		_exception_vectors=sp_min_vector_table
+
+	/* ---------------------------------------------------------------------
+	 * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader
+	 * to run so there's no argument to relay from a previous bootloader.
+	 * Zero the arguments passed to the platform layer to reflect that.
+	 * ---------------------------------------------------------------------
+	 */
+	mov	r0, #0
+	mov	r1, #0
+#endif /* RESET_TO_SP_MIN */
+
+#if SP_MIN_WITH_SECURE_FIQ
+	route_fiq_to_sp_min r4
+#endif
+
+	bl	sp_min_early_platform_setup
+	bl	sp_min_plat_arch_setup
+
+	/* Jump to the main function */
+	bl	sp_min_main
+
+	/* -------------------------------------------------------------
+	 * Clean the .data & .bss sections to main memory. This ensures
+	 * that any global data which was initialised by the primary CPU
+	 * is visible to secondary CPUs before they enable their data
+	 * caches and participate in coherency.
+	 * -------------------------------------------------------------
+	 */
+	ldr	r0, =__DATA_START__
+	ldr	r1, =__DATA_END__
+	sub	r1, r1, r0
+	bl	clean_dcache_range
+
+	ldr	r0, =__BSS_START__
+	ldr	r1, =__BSS_END__
+	sub	r1, r1, r0
+	bl	clean_dcache_range
+
+	bl	smc_get_next_ctx
+
+	/* r0 points to `smc_ctx_t` */
+	/* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
+	b	sp_min_exit
+endfunc sp_min_entrypoint
+
+
+/*
+ * SMC handling function for SP_MIN.
+ */
+func handle_smc
+	/* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
+	str	lr, [sp, #SMC_CTX_LR_MON]
+
+	smcc_save_gp_mode_regs
+
+	/*
+	 * `sp` still points to `smc_ctx_t`. Save it to a register
+	 * and restore the C runtime stack pointer to `sp`.
+	 */
+	mov	r2, sp				/* handle */
+	ldr	sp, [r2, #SMC_CTX_SP_MON]
+
+	ldr	r0, [r2, #SMC_CTX_SCR]
+	and	r3, r0, #SCR_NS_BIT		/* flags */
+
+	/* Switch to Secure Mode*/
+	bic	r0, #SCR_NS_BIT
+	stcopr	r0, SCR
+	isb
+
+	/*
+	 * Set PMCR.DP to 1 to prohibit cycle counting whilst in Secure Mode.
+	 * Also, the PMCR.LC field has an architecturally UNKNOWN value on reset
+	 * and so set to 1 as ARM has deprecated use of PMCR.LC=0.
+	 */
+	ldcopr	r0, PMCR
+	orr	r0, r0, #(PMCR_LC_BIT | PMCR_DP_BIT)
+	stcopr	r0, PMCR
+
+	ldr	r0, [r2, #SMC_CTX_GPREG_R0]	/* smc_fid */
+	/* Check whether an SMC64 is issued */
+	tst	r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
+	beq	1f
+	/* SMC32 is not detected. Return error back to caller */
+	mov	r0, #SMC_UNK
+	str	r0, [r2, #SMC_CTX_GPREG_R0]
+	mov	r0, r2
+	b	sp_min_exit
+1:
+	/* SMC32 is detected */
+	mov	r1, #0				/* cookie */
+	bl	handle_runtime_svc
+
+	/* `r0` points to `smc_ctx_t` */
+	b	sp_min_exit
+endfunc handle_smc
+
+/*
+ * Secure Interrupts handling function for SP_MIN.
+ */
+func handle_fiq
+#if !SP_MIN_WITH_SECURE_FIQ
+	b plat_panic_handler
+#else
+	/* FIQ has a +4 offset for lr compared to preferred return address */
+	sub	lr, lr, #4
+	/* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
+	str	lr, [sp, #SMC_CTX_LR_MON]
+
+	smcc_save_gp_mode_regs
+
+	/*
+	 * AArch32 architectures need to clear the exclusive access when
+	 * entering Monitor mode.
+	 */
+	clrex
+
+	/* load run-time stack */
+	mov	r2, sp
+	ldr	sp, [r2, #SMC_CTX_SP_MON]
+
+	/* Switch to Secure Mode */
+	ldr	r0, [r2, #SMC_CTX_SCR]
+	bic	r0, #SCR_NS_BIT
+	stcopr	r0, SCR
+	isb
+
+	/*
+	 * Set PMCR.DP to 1 to prohibit cycle counting whilst in Secure Mode.
+	 * Also, the PMCR.LC field has an architecturally UNKNOWN value on reset
+	 * and so set to 1 as ARM has deprecated use of PMCR.LC=0.
+	 */
+	ldcopr	r0, PMCR
+	orr	r0, r0, #(PMCR_LC_BIT | PMCR_DP_BIT)
+	stcopr	r0, PMCR
+
+	push	{r2, r3}
+	bl	sp_min_fiq
+	pop	{r0, r3}
+
+	b	sp_min_exit
+#endif
+endfunc handle_fiq
+
+/*
+ * The Warm boot entrypoint for SP_MIN.
+ */
+func sp_min_warm_entrypoint
+	/*
+	 * On the warm boot path, most of the EL3 initialisations performed by
+	 * 'el3_entrypoint_common' must be skipped:
+	 *
+	 *  - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by
+	 *    programming the reset address do we need to initialied the SCTLR.
+	 *    In other cases, we assume this has been taken care by the
+	 *    entrypoint code.
+	 *
+	 *  - No need to determine the type of boot, we know it is a warm boot.
+	 *
+	 *  - Do not try to distinguish between primary and secondary CPUs, this
+	 *    notion only exists for a cold boot.
+	 *
+	 *  - No need to initialise the memory or the C runtime environment,
+	 *    it has been done once and for all on the cold boot path.
+	 */
+	el3_entrypoint_common					\
+		_init_sctlr=PROGRAMMABLE_RESET_ADDRESS		\
+		_warm_boot_mailbox=0				\
+		_secondary_cold_boot=0				\
+		_init_memory=0					\
+		_init_c_runtime=0				\
+		_exception_vectors=sp_min_vector_table
+
+	/*
+	 * We're about to enable MMU and participate in PSCI state coordination.
+	 *
+	 * The PSCI implementation invokes platform routines that enable CPUs to
+	 * participate in coherency. On a system where CPUs are not
+	 * cache-coherent without appropriate platform specific programming,
+	 * having caches enabled until such time might lead to coherency issues
+	 * (resulting from stale data getting speculatively fetched, among
+	 * others). Therefore we keep data caches disabled even after enabling
+	 * the MMU for such platforms.
+	 *
+	 * On systems with hardware-assisted coherency, or on single cluster
+	 * platforms, such platform specific programming is not required to
+	 * enter coherency (as CPUs already are); and there's no reason to have
+	 * caches disabled either.
+	 */
+	mov	r0, #DISABLE_DCACHE
+	bl	bl32_plat_enable_mmu
+
+#if SP_MIN_WITH_SECURE_FIQ
+	route_fiq_to_sp_min r0
+#endif
+
+#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
+	ldcopr	r0, SCTLR
+	orr	r0, r0, #SCTLR_C_BIT
+	stcopr	r0, SCTLR
+	isb
+#endif
+
+	bl	sp_min_warm_boot
+	bl	smc_get_next_ctx
+	/* r0 points to `smc_ctx_t` */
+	/* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
+	b	sp_min_exit
+endfunc sp_min_warm_entrypoint
+
+/*
+ * The function to restore the registers from SMC context and return
+ * to the mode restored to SPSR.
+ *
+ * Arguments : r0 must point to the SMC context to restore from.
+ */
+func sp_min_exit
+	monitor_exit
+endfunc sp_min_exit
diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S
new file mode 100644
index 0000000..fc44d52
--- /dev/null
+++ b/bl32/sp_min/sp_min.ld.S
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+
+OUTPUT_FORMAT(elf32-littlearm)
+OUTPUT_ARCH(arm)
+ENTRY(sp_min_vector_table)
+
+MEMORY {
+    RAM (rwx): ORIGIN = BL32_BASE, LENGTH = BL32_LIMIT - BL32_BASE
+}
+
+
+SECTIONS
+{
+    . = BL32_BASE;
+   ASSERT(. == ALIGN(4096),
+          "BL32_BASE address is not aligned on a page boundary.")
+
+#if SEPARATE_CODE_AND_RODATA
+    .text . : {
+        __TEXT_START__ = .;
+        *entrypoint.o(.text*)
+        *(.text*)
+        *(.vectors)
+        . = NEXT(4096);
+        __TEXT_END__ = .;
+    } >RAM
+
+    .rodata . : {
+        __RODATA_START__ = .;
+        *(.rodata*)
+
+        /* Ensure 4-byte alignment for descriptors and ensure inclusion */
+        . = ALIGN(4);
+        __RT_SVC_DESCS_START__ = .;
+        KEEP(*(rt_svc_descs))
+        __RT_SVC_DESCS_END__ = .;
+
+        /*
+         * Ensure 4-byte alignment for cpu_ops so that its fields are also
+         * aligned. Also ensure cpu_ops inclusion.
+         */
+        . = ALIGN(4);
+        __CPU_OPS_START__ = .;
+        KEEP(*(cpu_ops))
+        __CPU_OPS_END__ = .;
+
+        /* Place pubsub sections for events */
+        . = ALIGN(8);
+#include <pubsub_events.h>
+
+        . = NEXT(4096);
+        __RODATA_END__ = .;
+    } >RAM
+#else
+    ro . : {
+        __RO_START__ = .;
+        *entrypoint.o(.text*)
+        *(.text*)
+        *(.rodata*)
+
+        /* Ensure 4-byte alignment for descriptors and ensure inclusion */
+        . = ALIGN(4);
+        __RT_SVC_DESCS_START__ = .;
+        KEEP(*(rt_svc_descs))
+        __RT_SVC_DESCS_END__ = .;
+
+        /*
+         * Ensure 4-byte alignment for cpu_ops so that its fields are also
+         * aligned. Also ensure cpu_ops inclusion.
+         */
+        . = ALIGN(4);
+        __CPU_OPS_START__ = .;
+        KEEP(*(cpu_ops))
+        __CPU_OPS_END__ = .;
+
+        /* Place pubsub sections for events */
+        . = ALIGN(8);
+#include <pubsub_events.h>
+
+        *(.vectors)
+        __RO_END_UNALIGNED__ = .;
+
+        /*
+         * Memory page(s) mapped to this section will be marked as
+         * read-only, executable.  No RW data from the next section must
+         * creep in.  Ensure the rest of the current memory block is unused.
+         */
+        . = NEXT(4096);
+        __RO_END__ = .;
+    } >RAM
+#endif
+
+    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
+           "cpu_ops not defined for this platform.")
+    /*
+     * Define a linker symbol to mark start of the RW memory area for this
+     * image.
+     */
+    __RW_START__ = . ;
+
+    .data . : {
+        __DATA_START__ = .;
+        *(.data*)
+        __DATA_END__ = .;
+    } >RAM
+
+    stacks (NOLOAD) : {
+        __STACKS_START__ = .;
+        *(tzfw_normal_stacks)
+        __STACKS_END__ = .;
+    } >RAM
+
+    /*
+     * The .bss section gets initialised to 0 at runtime.
+     * Its base address should be 8-byte aligned for better performance of the
+     * zero-initialization code.
+     */
+    .bss (NOLOAD) : ALIGN(8) {
+        __BSS_START__ = .;
+        *(.bss*)
+        *(COMMON)
+#if !USE_COHERENT_MEM
+        /*
+         * Bakery locks are stored in normal .bss memory
+         *
+         * Each lock's data is spread across multiple cache lines, one per CPU,
+         * but multiple locks can share the same cache line.
+         * The compiler will allocate enough memory for one CPU's bakery locks,
+         * the remaining cache lines are allocated by the linker script
+         */
+        . = ALIGN(CACHE_WRITEBACK_GRANULE);
+        __BAKERY_LOCK_START__ = .;
+        *(bakery_lock)
+        . = ALIGN(CACHE_WRITEBACK_GRANULE);
+        __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(. - __BAKERY_LOCK_START__);
+        . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
+        __BAKERY_LOCK_END__ = .;
+#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
+    ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE,
+        "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
+#endif
+#endif
+
+#if ENABLE_PMF
+        /*
+         * Time-stamps are stored in normal .bss memory
+         *
+         * The compiler will allocate enough memory for one CPU's time-stamps,
+         * the remaining memory for other CPU's is allocated by the
+         * linker script
+         */
+        . = ALIGN(CACHE_WRITEBACK_GRANULE);
+        __PMF_TIMESTAMP_START__ = .;
+        KEEP(*(pmf_timestamp_array))
+        . = ALIGN(CACHE_WRITEBACK_GRANULE);
+        __PMF_PERCPU_TIMESTAMP_END__ = .;
+        __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__);
+        . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1));
+        __PMF_TIMESTAMP_END__ = .;
+#endif /* ENABLE_PMF */
+
+        __BSS_END__ = .;
+    } >RAM
+
+    /*
+     * The xlat_table section is for full, aligned page tables (4K).
+     * Removing them from .bss avoids forcing 4K alignment on
+     * the .bss section and eliminates the unecessary zero init
+     */
+    xlat_table (NOLOAD) : {
+        *(xlat_table)
+    } >RAM
+
+     __BSS_SIZE__ = SIZEOF(.bss);
+
+#if USE_COHERENT_MEM
+    /*
+     * The base address of the coherent memory section must be page-aligned (4K)
+     * to guarantee that the coherent data are stored on their own pages and
+     * are not mixed with normal data.  This is required to set up the correct
+     * memory attributes for the coherent data page tables.
+     */
+    coherent_ram (NOLOAD) : ALIGN(4096) {
+        __COHERENT_RAM_START__ = .;
+        /*
+         * Bakery locks are stored in coherent memory
+         *
+         * Each lock's data is contiguous and fully allocated by the compiler
+         */
+        *(bakery_lock)
+        *(tzfw_coherent_mem)
+        __COHERENT_RAM_END_UNALIGNED__ = .;
+        /*
+         * Memory page(s) mapped to this section will be marked
+         * as device memory.  No other unexpected data must creep in.
+         * Ensure the rest of the current memory page is unused.
+         */
+        . = NEXT(4096);
+        __COHERENT_RAM_END__ = .;
+    } >RAM
+
+    __COHERENT_RAM_UNALIGNED_SIZE__ =
+        __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
+#endif
+
+    /*
+     * Define a linker symbol to mark end of the RW memory area for this
+     * image.
+     */
+    __RW_END__ = .;
+
+   __BL32_END__ = .;
+}
diff --git a/bl32/sp_min/sp_min.mk b/bl32/sp_min/sp_min.mk
new file mode 100644
index 0000000..39588ce
--- /dev/null
+++ b/bl32/sp_min/sp_min.mk
@@ -0,0 +1,45 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${ARCH}, aarch32)
+	$(error SP_MIN is only supported on AArch32 platforms)
+endif
+
+include lib/psci/psci_lib.mk
+
+INCLUDES		+=	-Iinclude/bl32/sp_min
+
+BL32_SOURCES		+=	bl32/sp_min/sp_min_main.c		\
+				bl32/sp_min/aarch32/entrypoint.S	\
+				common/runtime_svc.c			\
+				services/std_svc/std_svc_setup.c	\
+				${PSCI_LIB_SOURCES}
+
+ifeq (${ENABLE_PMF}, 1)
+BL32_SOURCES		+=	lib/pmf/pmf_main.c
+endif
+
+BL32_LINKERFILE	:=	bl32/sp_min/sp_min.ld.S
+
+# Include the platform-specific SP_MIN Makefile
+# If no platform-specific SP_MIN Makefile exists, it means SP_MIN is not supported
+# on this platform.
+SP_MIN_PLAT_MAKEFILE := $(wildcard ${PLAT_DIR}/sp_min/sp_min-${PLAT}.mk)
+ifeq (,${SP_MIN_PLAT_MAKEFILE})
+  $(error SP_MIN is not supported on platform ${PLAT})
+else
+  include ${SP_MIN_PLAT_MAKEFILE}
+endif
+
+RESET_TO_SP_MIN	:= 0
+$(eval $(call add_define,RESET_TO_SP_MIN))
+$(eval $(call assert_boolean,RESET_TO_SP_MIN))
+
+# Flag to allow SP_MIN to handle FIQ interrupts in monitor mode. The platform
+# port is free to override this value. It is default disabled.
+SP_MIN_WITH_SECURE_FIQ 	?= 0
+$(eval $(call add_define,SP_MIN_WITH_SECURE_FIQ))
+$(eval $(call assert_boolean,SP_MIN_WITH_SECURE_FIQ))
diff --git a/bl32/sp_min/sp_min_main.c b/bl32/sp_min/sp_min_main.c
new file mode 100644
index 0000000..4e8e685
--- /dev/null
+++ b/bl32/sp_min/sp_min_main.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <platform_sp_min.h>
+#include <psci.h>
+#include <runtime_svc.h>
+#include <smcc_helpers.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <types.h>
+#include <utils.h>
+#include "sp_min_private.h"
+
+/* Pointers to per-core cpu contexts */
+static void *sp_min_cpu_ctx_ptr[PLATFORM_CORE_COUNT];
+
+/* SP_MIN only stores the non secure smc context */
+static smc_ctx_t sp_min_smc_context[PLATFORM_CORE_COUNT];
+
+/******************************************************************************
+ * Define the smcc helper library API's
+ *****************************************************************************/
+void *smc_get_ctx(unsigned int security_state)
+{
+	assert(security_state == NON_SECURE);
+	return &sp_min_smc_context[plat_my_core_pos()];
+}
+
+void smc_set_next_ctx(unsigned int security_state)
+{
+	assert(security_state == NON_SECURE);
+	/* SP_MIN stores only non secure smc context. Nothing to do here */
+}
+
+void *smc_get_next_ctx(void)
+{
+	return &sp_min_smc_context[plat_my_core_pos()];
+}
+
+/*******************************************************************************
+ * This function returns a pointer to the most recent 'cpu_context' structure
+ * for the calling CPU that was set as the context for the specified security
+ * state. NULL is returned if no such structure has been specified.
+ ******************************************************************************/
+void *cm_get_context(uint32_t security_state)
+{
+	assert(security_state == NON_SECURE);
+	return sp_min_cpu_ctx_ptr[plat_my_core_pos()];
+}
+
+/*******************************************************************************
+ * This function sets the pointer to the current 'cpu_context' structure for the
+ * specified security state for the calling CPU
+ ******************************************************************************/
+void cm_set_context(void *context, uint32_t security_state)
+{
+	assert(security_state == NON_SECURE);
+	sp_min_cpu_ctx_ptr[plat_my_core_pos()] = context;
+}
+
+/*******************************************************************************
+ * This function returns a pointer to the most recent 'cpu_context' structure
+ * for the CPU identified by `cpu_idx` that was set as the context for the
+ * specified security state. NULL is returned if no such structure has been
+ * specified.
+ ******************************************************************************/
+void *cm_get_context_by_index(unsigned int cpu_idx,
+				unsigned int security_state)
+{
+	assert(security_state == NON_SECURE);
+	return sp_min_cpu_ctx_ptr[cpu_idx];
+}
+
+/*******************************************************************************
+ * This function sets the pointer to the current 'cpu_context' structure for the
+ * specified security state for the CPU identified by CPU index.
+ ******************************************************************************/
+void cm_set_context_by_index(unsigned int cpu_idx, void *context,
+				unsigned int security_state)
+{
+	assert(security_state == NON_SECURE);
+	sp_min_cpu_ctx_ptr[cpu_idx] = context;
+}
+
+static void copy_cpu_ctx_to_smc_stx(const regs_t *cpu_reg_ctx,
+				smc_ctx_t *next_smc_ctx)
+{
+	next_smc_ctx->r0 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R0);
+	next_smc_ctx->lr_mon = read_ctx_reg(cpu_reg_ctx, CTX_LR);
+	next_smc_ctx->spsr_mon = read_ctx_reg(cpu_reg_ctx, CTX_SPSR);
+	next_smc_ctx->scr = read_ctx_reg(cpu_reg_ctx, CTX_SCR);
+}
+
+/*******************************************************************************
+ * This function invokes the PSCI library interface to initialize the
+ * non secure cpu context and copies the relevant cpu context register values
+ * to smc context. These registers will get programmed during `smc_exit`.
+ ******************************************************************************/
+static void sp_min_prepare_next_image_entry(void)
+{
+	entry_point_info_t *next_image_info;
+	cpu_context_t *ctx = cm_get_context(NON_SECURE);
+	u_register_t ns_sctlr;
+
+	/* Program system registers to proceed to non-secure */
+	next_image_info = sp_min_plat_get_bl33_ep_info();
+	assert(next_image_info);
+	assert(NON_SECURE == GET_SECURITY_STATE(next_image_info->h.attr));
+
+	INFO("SP_MIN: Preparing exit to normal world\n");
+
+	psci_prepare_next_non_secure_ctx(next_image_info);
+	smc_set_next_ctx(NON_SECURE);
+
+	/* Copy r0, lr and spsr from cpu context to SMC context */
+	copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
+			smc_get_next_ctx());
+
+	/* Temporarily set the NS bit to access NS SCTLR */
+	write_scr(read_scr() | SCR_NS_BIT);
+	isb();
+	ns_sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR);
+	write_sctlr(ns_sctlr);
+	isb();
+
+	write_scr(read_scr() & ~SCR_NS_BIT);
+	isb();
+}
+
+/******************************************************************************
+ * Implement the ARM Standard Service function to get arguments for a
+ * particular service.
+ *****************************************************************************/
+uintptr_t get_arm_std_svc_args(unsigned int svc_mask)
+{
+	/* Setup the arguments for PSCI Library */
+	DEFINE_STATIC_PSCI_LIB_ARGS_V1(psci_args, sp_min_warm_entrypoint);
+
+	/* PSCI is the only ARM Standard Service implemented */
+	assert(svc_mask == PSCI_FID_MASK);
+
+	return (uintptr_t)&psci_args;
+}
+
+/******************************************************************************
+ * The SP_MIN main function. Do the platform and PSCI Library setup. Also
+ * initialize the runtime service framework.
+ *****************************************************************************/
+void sp_min_main(void)
+{
+	NOTICE("SP_MIN: %s\n", version_string);
+	NOTICE("SP_MIN: %s\n", build_message);
+
+	/* Perform the SP_MIN platform setup */
+	sp_min_platform_setup();
+
+	/* Initialize the runtime services e.g. psci */
+	INFO("SP_MIN: Initializing runtime services\n");
+	runtime_svc_init();
+
+	/*
+	 * We are ready to enter the next EL. Prepare entry into the image
+	 * corresponding to the desired security state after the next ERET.
+	 */
+	sp_min_prepare_next_image_entry();
+
+	/*
+	 * Perform any platform specific runtime setup prior to cold boot exit
+	 * from SP_MIN.
+	 */
+	sp_min_plat_runtime_setup();
+
+	console_flush();
+}
+
+/******************************************************************************
+ * This function is invoked during warm boot. Invoke the PSCI library
+ * warm boot entry point which takes care of Architectural and platform setup/
+ * restore. Copy the relevant cpu_context register values to smc context which
+ * will get programmed during `smc_exit`.
+ *****************************************************************************/
+void sp_min_warm_boot(void)
+{
+	smc_ctx_t *next_smc_ctx;
+	cpu_context_t *ctx = cm_get_context(NON_SECURE);
+	u_register_t ns_sctlr;
+
+	psci_warmboot_entrypoint();
+
+	smc_set_next_ctx(NON_SECURE);
+
+	next_smc_ctx = smc_get_next_ctx();
+	zeromem(next_smc_ctx, sizeof(smc_ctx_t));
+
+	copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
+			next_smc_ctx);
+
+	/* Temporarily set the NS bit to access NS SCTLR */
+	write_scr(read_scr() | SCR_NS_BIT);
+	isb();
+	ns_sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR);
+	write_sctlr(ns_sctlr);
+	isb();
+
+	write_scr(read_scr() & ~SCR_NS_BIT);
+	isb();
+}
+
+#if SP_MIN_WITH_SECURE_FIQ
+/******************************************************************************
+ * This function is invoked on secure interrupts. By construction of the
+ * SP_MIN, secure interrupts can only be handled when core executes in non
+ * secure state.
+ *****************************************************************************/
+void sp_min_fiq(void)
+{
+	uint32_t id;
+
+	id = plat_ic_acknowledge_interrupt();
+	sp_min_plat_fiq_handler(id);
+	plat_ic_end_of_interrupt(id);
+}
+#endif /* SP_MIN_WITH_SECURE_FIQ */
diff --git a/bl32/sp_min/sp_min_private.h b/bl32/sp_min/sp_min_private.h
new file mode 100644
index 0000000..1836af9
--- /dev/null
+++ b/bl32/sp_min/sp_min_private.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SP_MIN_H__
+#define __SP_MIN_H__
+
+void sp_min_warm_entrypoint(void);
+void sp_min_main(void);
+void sp_min_warm_boot(void);
+void sp_min_fiq(void);
+
+#endif /* __SP_MIN_H__ */
diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S
index 2714282..489183c 100644
--- a/bl32/tsp/aarch64/tsp_entrypoint.S
+++ b/bl32/tsp/aarch64/tsp_entrypoint.S
@@ -1,37 +1,13 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
 #include <asm_macros.S>
 #include <tsp.h>
-#include <xlat_tables.h>
+#include <xlat_tables_defs.h>
 #include "../tsp_private.h"
 
 
@@ -67,10 +43,7 @@
 	msr	spsr_el1, \reg2
 	.endm
 
-	.section	.text, "ax"
-	.align 3
-
-func tsp_entrypoint
+func tsp_entrypoint _align=3
 
 	/* ---------------------------------------------
 	 * Set the exception vector to something sane.
@@ -99,6 +72,20 @@
 	isb
 
 	/* ---------------------------------------------
+	 * Invalidate the RW memory used by the BL32
+	 * image. This includes the data and NOBITS
+	 * sections. This is done to safeguard against
+	 * possible corruption of this memory by dirty
+	 * cache lines in a system cache as a result of
+	 * use by an earlier boot loader stage.
+	 * ---------------------------------------------
+	 */
+	adr	x0, __RW_START__
+	adr	x1, __RW_END__
+	sub	x1, x1, x0
+	bl	inv_dcache_range
+
+	/* ---------------------------------------------
 	 * Zero out NOBITS sections. There are 2 of them:
 	 *   - the .bss section;
 	 *   - the coherent memory section.
@@ -106,12 +93,12 @@
 	 */
 	ldr	x0, =__BSS_START__
 	ldr	x1, =__BSS_SIZE__
-	bl	zeromem16
+	bl	zeromem
 
 #if USE_COHERENT_MEM
 	ldr	x0, =__COHERENT_RAM_START__
 	ldr	x1, =__COHERENT_RAM_UNALIGNED_SIZE__
-	bl	zeromem16
+	bl	zeromem
 #endif
 
 	/* --------------------------------------------
@@ -122,8 +109,16 @@
 	 * primary cpu is running at the moment.
 	 * --------------------------------------------
 	 */
-	mrs	x0, mpidr_el1
-	bl	platform_set_stack
+	bl	plat_set_my_stack
+
+	/* ---------------------------------------------
+	 * Initialize the stack protector canary before
+	 * any C code is called.
+	 * ---------------------------------------------
+	 */
+#if STACK_PROTECTOR_ENABLED
+	bl	update_stack_protector_canary
+#endif
 
 	/* ---------------------------------------------
 	 * Perform early platform setup & platform
@@ -149,6 +144,7 @@
 
 tsp_entrypoint_panic:
 	b	tsp_entrypoint_panic
+endfunc tsp_entrypoint
 
 
 	/* -------------------------------------------
@@ -157,15 +153,17 @@
 	 * -------------------------------------------
 	 */
 func tsp_vector_table
-	b	tsp_std_smc_entry
+	b	tsp_yield_smc_entry
 	b	tsp_fast_smc_entry
 	b	tsp_cpu_on_entry
 	b	tsp_cpu_off_entry
 	b	tsp_cpu_resume_entry
 	b	tsp_cpu_suspend_entry
-	b	tsp_fiq_entry
+	b	tsp_sel1_intr_entry
 	b	tsp_system_off_entry
 	b	tsp_system_reset_entry
+	b	tsp_abort_yield_smc_entry
+endfunc tsp_vector_table
 
 	/*---------------------------------------------
 	 * This entrypoint is used by the TSPD when this
@@ -180,6 +178,7 @@
 func tsp_cpu_off_entry
 	bl	tsp_cpu_off_main
 	restore_args_call_smc
+endfunc tsp_cpu_off_entry
 
 	/*---------------------------------------------
 	 * This entrypoint is used by the TSPD when the
@@ -191,6 +190,7 @@
 func tsp_system_off_entry
 	bl	tsp_system_off_main
 	restore_args_call_smc
+endfunc tsp_system_off_entry
 
 	/*---------------------------------------------
 	 * This entrypoint is used by the TSPD when the
@@ -202,6 +202,7 @@
 func tsp_system_reset_entry
 	bl	tsp_system_reset_main
 	restore_args_call_smc
+endfunc tsp_system_reset_entry
 
 	/*---------------------------------------------
 	 * This entrypoint is used by the TSPD when this
@@ -243,8 +244,7 @@
 	 * enabled.
 	 * --------------------------------------------
 	 */
-	mrs	x0, mpidr_el1
-	bl	platform_set_stack
+	bl	plat_set_my_stack
 
 	/* --------------------------------------------
 	 * Enable the MMU with the DCache disabled. It
@@ -292,6 +292,7 @@
 	/* Should never reach here */
 tsp_cpu_on_entry_panic:
 	b	tsp_cpu_on_entry_panic
+endfunc tsp_cpu_on_entry
 
 	/*---------------------------------------------
 	 * This entrypoint is used by the TSPD when this
@@ -305,14 +306,17 @@
 func tsp_cpu_suspend_entry
 	bl	tsp_cpu_suspend_main
 	restore_args_call_smc
+endfunc tsp_cpu_suspend_entry
 
-	/*---------------------------------------------
+	/*-------------------------------------------------
 	 * This entrypoint is used by the TSPD to pass
-	 * control for handling a pending S-EL1 FIQ.
-	 * 'x0' contains a magic number which indicates
-	 * this. TSPD expects control to be handed back
-	 * at the end of FIQ processing. This is done
-	 * through an SMC. The handover agreement is:
+	 * control for `synchronously` handling a S-EL1
+	 * Interrupt which was triggered while executing
+	 * in normal world. 'x0' contains a magic number
+	 * which indicates this. TSPD expects control to
+	 * be handed back at the end of interrupt
+	 * processing. This is done through an SMC.
+	 * The handover agreement is:
 	 *
 	 * 1. PSTATE.DAIF are set upon entry. 'x1' has
 	 *    the ELR_EL3 from the non-secure state.
@@ -324,39 +328,54 @@
 	 * 4. TSP can use 'x0-x18' to enable its C
 	 *    runtime.
 	 * 5. TSP returns to TSPD using an SMC with
-	 *    'x0' = TSP_HANDLED_S_EL1_FIQ
-	 * ---------------------------------------------
+	 *    'x0' = TSP_HANDLED_S_EL1_INTR
+	 * ------------------------------------------------
 	 */
-func	tsp_fiq_entry
+func	tsp_sel1_intr_entry
 #if DEBUG
-	mov	x2, #(TSP_HANDLE_FIQ_AND_RETURN & ~0xffff)
-	movk	x2, #(TSP_HANDLE_FIQ_AND_RETURN &  0xffff)
+	mov_imm	x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
 	cmp	x0, x2
-	b.ne	tsp_fiq_entry_panic
+	b.ne	tsp_sel1_int_entry_panic
 #endif
-	/*---------------------------------------------
+	/*-------------------------------------------------
 	 * Save any previous context needed to perform
 	 * an exception return from S-EL1 e.g. context
-	 * from a previous IRQ. Update statistics and
-	 * handle the FIQ before returning to the TSPD.
+	 * from a previous Non secure Interrupt.
+	 * Update statistics and handle the S-EL1
+	 * interrupt before returning to the TSPD.
 	 * IRQ/FIQs are not enabled since that will
 	 * complicate the implementation. Execution
 	 * will be transferred back to the normal world
-	 * in any case. A non-zero return value from the
-	 * fiq handler is an error.
-	 * ---------------------------------------------
+	 * in any case. The handler can return 0
+	 * if the interrupt was handled or TSP_PREEMPTED
+	 * if the expected interrupt was preempted
+	 * by an interrupt that should be handled in EL3
+	 * e.g. Group 0 interrupt in GICv3. In both
+	 * the cases switch to EL3 using SMC with id
+	 * TSP_HANDLED_S_EL1_INTR. Any other return value
+	 * from the handler will result in panic.
+	 * ------------------------------------------------
 	 */
 	save_eret_context x2 x3
-	bl	tsp_update_sync_fiq_stats
-	bl	tsp_fiq_handler
-	cbnz	x0, tsp_fiq_entry_panic
+	bl	tsp_update_sync_sel1_intr_stats
+	bl	tsp_common_int_handler
+	/* Check if the S-EL1 interrupt has been handled */
+	cbnz	x0, tsp_sel1_intr_check_preemption
+	b	tsp_sel1_intr_return
+tsp_sel1_intr_check_preemption:
+	/* Check if the S-EL1 interrupt has been preempted */
+	mov_imm	x1, TSP_PREEMPTED
+	cmp	x0, x1
+	b.ne	tsp_sel1_int_entry_panic
+tsp_sel1_intr_return:
+	mov_imm	x0, TSP_HANDLED_S_EL1_INTR
 	restore_eret_context x2 x3
-	mov	x0, #(TSP_HANDLED_S_EL1_FIQ & ~0xffff)
-	movk	x0, #(TSP_HANDLED_S_EL1_FIQ &  0xffff)
 	smc	#0
 
-tsp_fiq_entry_panic:
-	b	tsp_fiq_entry_panic
+	/* Should never reach here */
+tsp_sel1_int_entry_panic:
+	no_ret	plat_panic_handler
+endfunc tsp_sel1_intr_entry
 
 	/*---------------------------------------------
 	 * This entrypoint is used by the TSPD when this
@@ -371,8 +390,10 @@
 func tsp_cpu_resume_entry
 	bl	tsp_cpu_resume_main
 	restore_args_call_smc
-tsp_cpu_resume_panic:
-	b	tsp_cpu_resume_panic
+
+	/* Should never reach here */
+	no_ret	plat_panic_handler
+endfunc tsp_cpu_resume_entry
 
 	/*---------------------------------------------
 	 * This entrypoint is used by the TSPD to ask
@@ -382,20 +403,51 @@
 func tsp_fast_smc_entry
 	bl	tsp_smc_handler
 	restore_args_call_smc
-tsp_fast_smc_entry_panic:
-	b	tsp_fast_smc_entry_panic
+
+	/* Should never reach here */
+	no_ret	plat_panic_handler
+endfunc tsp_fast_smc_entry
 
 	/*---------------------------------------------
 	 * This entrypoint is used by the TSPD to ask
-	 * the TSP to service a std smc request.
+	 * the TSP to service a Yielding SMC request.
 	 * We will enable preemption during execution
 	 * of tsp_smc_handler.
 	 * ---------------------------------------------
 	 */
-func tsp_std_smc_entry
+func tsp_yield_smc_entry
 	msr	daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
 	bl	tsp_smc_handler
 	msr	daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
 	restore_args_call_smc
-tsp_std_smc_entry_panic:
-	b	tsp_std_smc_entry_panic
+
+	/* Should never reach here */
+	no_ret	plat_panic_handler
+endfunc tsp_yield_smc_entry
+
+	/*---------------------------------------------------------------------
+	 * This entrypoint is used by the TSPD to abort a pre-empted Yielding
+	 * SMC. It could be on behalf of non-secure world or because a CPU
+	 * suspend/CPU off request needs to abort the preempted SMC.
+	 * --------------------------------------------------------------------
+	 */
+func tsp_abort_yield_smc_entry
+
+	/*
+	 * Exceptions masking is already done by the TSPD when entering this
+	 * hook so there is no need to do it here.
+	 */
+
+	/* Reset the stack used by the pre-empted SMC */
+	bl	plat_set_my_stack
+
+	/*
+	 * Allow some cleanup such as releasing locks.
+	 */
+	bl	tsp_abort_smc_handler
+
+	restore_args_call_smc
+
+	/* Should never reach here */
+	bl	plat_panic_handler
+endfunc tsp_abort_yield_smc_entry
diff --git a/bl32/tsp/aarch64/tsp_exceptions.S b/bl32/tsp/aarch64/tsp_exceptions.S
index 4c0d436..4b2ad75 100644
--- a/bl32/tsp/aarch64/tsp_exceptions.S
+++ b/bl32/tsp/aarch64/tsp_exceptions.S
@@ -1,37 +1,13 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
-#include <bl_common.h>
 #include <arch.h>
-#include <tsp.h>
 #include <asm_macros.S>
+#include <bl_common.h>
+#include <tsp.h>
 
 
 	/* ----------------------------------------------------
@@ -70,144 +46,118 @@
 	add	sp, sp, SCRATCH_REG_SIZE
 	.endm
 
+	/* ----------------------------------------------------
+	 * Common TSP interrupt handling routine
+	 * ----------------------------------------------------
+	 */
+	.macro	handle_tsp_interrupt label
+	/* Enable the SError interrupt */
+	msr	daifclr, #DAIF_ABT_BIT
+
+	save_caller_regs_and_lr
+	bl	tsp_common_int_handler
+	cbz	x0, interrupt_exit_\label
+
+	/*
+	 * This interrupt was not targetted to S-EL1 so send it to
+	 * the monitor and wait for execution to resume.
+	 */
+	smc	#0
+interrupt_exit_\label:
+	restore_caller_regs_and_lr
+	eret
+	.endm
+
 	.globl	tsp_exceptions
 
 	/* -----------------------------------------------------
 	 * TSP exception handlers.
 	 * -----------------------------------------------------
 	 */
-	.section	.vectors, "ax"; .align 11
-
-	.align	7
-tsp_exceptions:
+vector_base tsp_exceptions
 	/* -----------------------------------------------------
-	 * Current EL with _sp_el0 : 0x0 - 0x180. No exceptions
+	 * Current EL with _sp_el0 : 0x0 - 0x200. No exceptions
 	 * are expected and treated as irrecoverable errors.
 	 * -----------------------------------------------------
 	 */
-sync_exception_sp_el0:
-	wfi
-	b	sync_exception_sp_el0
+vector_entry sync_exception_sp_el0
+	b	plat_panic_handler
 	check_vector_size sync_exception_sp_el0
 
-	.align	7
-
-irq_sp_el0:
-	b	irq_sp_el0
+vector_entry irq_sp_el0
+	b	plat_panic_handler
 	check_vector_size irq_sp_el0
 
-	.align	7
-fiq_sp_el0:
-	b	fiq_sp_el0
+vector_entry fiq_sp_el0
+	b	plat_panic_handler
 	check_vector_size fiq_sp_el0
 
-	.align	7
-serror_sp_el0:
-	b	serror_sp_el0
+vector_entry serror_sp_el0
+	b	plat_panic_handler
 	check_vector_size serror_sp_el0
 
 
 	/* -----------------------------------------------------
-	 * Current EL with SPx: 0x200 - 0x380. Only IRQs/FIQs
+	 * Current EL with SPx: 0x200 - 0x400. Only IRQs/FIQs
 	 * are expected and handled
 	 * -----------------------------------------------------
 	 */
-	.align	7
-sync_exception_sp_elx:
-	wfi
-	b	sync_exception_sp_elx
+vector_entry sync_exception_sp_elx
+	b	plat_panic_handler
 	check_vector_size sync_exception_sp_elx
 
-	.align	7
-irq_sp_elx:
-	/* Enable the SError interrupt */
-	msr	daifclr, #DAIF_ABT_BIT
-
-	save_caller_regs_and_lr
-	/* We just update some statistics in the handler */
-	bl	tsp_irq_received
-	/* Hand over control to the normal world to handle the IRQ */
-	smc	#0
-	/* The resume std smc starts from here */
-	restore_caller_regs_and_lr
-	eret
+vector_entry irq_sp_elx
+	handle_tsp_interrupt irq_sp_elx
 	check_vector_size irq_sp_elx
 
-	.align	7
-fiq_sp_elx:
-	/* Enable the SError interrupt */
-	msr	daifclr, #DAIF_ABT_BIT
-
-	save_caller_regs_and_lr
-	bl	tsp_fiq_handler
-	cbz	x0, fiq_sp_elx_done
-
-	/*
-	 * This FIQ was not targetted to S-EL1 so send it to
-	 * the monitor and wait for execution to resume.
-	 */
-	smc	#0
-fiq_sp_elx_done:
-	restore_caller_regs_and_lr
-	eret
+vector_entry fiq_sp_elx
+	handle_tsp_interrupt fiq_sp_elx
 	check_vector_size fiq_sp_elx
 
-	.align	7
-serror_sp_elx:
-	b	serror_sp_elx
+vector_entry serror_sp_elx
+	b	plat_panic_handler
 	check_vector_size serror_sp_elx
 
 
 	/* -----------------------------------------------------
-	 * Lower EL using AArch64 : 0x400 - 0x580. No exceptions
+	 * Lower EL using AArch64 : 0x400 - 0x600. No exceptions
 	 * are handled since TSP does not implement a lower EL
 	 * -----------------------------------------------------
 	 */
-	.align	7
-sync_exception_aarch64:
-	wfi
-	b	sync_exception_aarch64
+vector_entry sync_exception_aarch64
+	b	plat_panic_handler
 	check_vector_size sync_exception_aarch64
 
-	.align	7
-irq_aarch64:
-	b	irq_aarch64
+vector_entry irq_aarch64
+	b	plat_panic_handler
 	check_vector_size irq_aarch64
 
-	.align	7
-fiq_aarch64:
-	b	fiq_aarch64
+vector_entry fiq_aarch64
+	b	plat_panic_handler
 	check_vector_size fiq_aarch64
 
-	.align	7
-serror_aarch64:
-	b	serror_aarch64
+vector_entry serror_aarch64
+	b	plat_panic_handler
 	check_vector_size serror_aarch64
 
 
 	/* -----------------------------------------------------
-	 * Lower EL using AArch32 : 0x600 - 0x780. No exceptions
+	 * Lower EL using AArch32 : 0x600 - 0x800. No exceptions
 	 * handled since the TSP does not implement a lower EL.
 	 * -----------------------------------------------------
 	 */
-	.align	7
-sync_exception_aarch32:
-	wfi
-	b	sync_exception_aarch32
+vector_entry sync_exception_aarch32
+	b	plat_panic_handler
 	check_vector_size sync_exception_aarch32
 
-	.align	7
-irq_aarch32:
-	b	irq_aarch32
+vector_entry irq_aarch32
+	b	plat_panic_handler
 	check_vector_size irq_aarch32
 
-	.align	7
-fiq_aarch32:
-	b	fiq_aarch32
+vector_entry fiq_aarch32
+	b	plat_panic_handler
 	check_vector_size fiq_aarch32
 
-	.align	7
-serror_aarch32:
-	b	serror_aarch32
+vector_entry serror_aarch32
+	b	plat_panic_handler
 	check_vector_size serror_aarch32
-	.align	7
diff --git a/bl32/tsp/aarch64/tsp_request.S b/bl32/tsp/aarch64/tsp_request.S
index 6aa0873..2261f87 100644
--- a/bl32/tsp/aarch64/tsp_request.S
+++ b/bl32/tsp/aarch64/tsp_request.S
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <asm_macros.S>
@@ -56,6 +32,7 @@
 	stp	x0, x1, [x4, #0]
 
 	ret
+endfunc tsp_get_magic
 
 	.align 2
 _tsp_fid_get_magic:
diff --git a/bl32/tsp/tsp.ld.S b/bl32/tsp/tsp.ld.S
index d411ad0..2b672ef 100644
--- a/bl32/tsp/tsp.ld.S
+++ b/bl32/tsp/tsp.ld.S
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <platform_def.h>
@@ -46,6 +22,23 @@
     ASSERT(. == ALIGN(4096),
            "BL32_BASE address is not aligned on a page boundary.")
 
+#if SEPARATE_CODE_AND_RODATA
+    .text . : {
+        __TEXT_START__ = .;
+        *tsp_entrypoint.o(.text*)
+        *(.text*)
+        *(.vectors)
+        . = NEXT(4096);
+        __TEXT_END__ = .;
+    } >RAM
+
+    .rodata . : {
+        __RODATA_START__ = .;
+        *(.rodata*)
+        . = NEXT(4096);
+        __RODATA_END__ = .;
+    } >RAM
+#else
     ro . : {
         __RO_START__ = .;
         *tsp_entrypoint.o(.text*)
@@ -61,6 +54,13 @@
         . = NEXT(4096);
         __RO_END__ = .;
     } >RAM
+#endif
+
+    /*
+     * Define a linker symbol to mark start of the RW memory area for this
+     * image.
+     */
+    __RW_START__ = . ;
 
     .data . : {
         __DATA_START__ = .;
@@ -80,7 +80,8 @@
 
     /*
      * The .bss section gets initialised to 0 at runtime.
-     * Its base address must be 16-byte aligned.
+     * Its base address should be 16-byte aligned for better performance of the
+     * zero-initialization code.
      */
     .bss : ALIGN(16) {
         __BSS_START__ = .;
@@ -119,6 +120,11 @@
     } >RAM
 #endif
 
+    /*
+     * Define a linker symbol to mark the end of the RW memory area for this
+     * image.
+     */
+    __RW_END__ = .;
     __BL32_END__ = .;
 
     __BSS_SIZE__ = SIZEOF(.bss);
@@ -127,5 +133,5 @@
         __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
 #endif
 
-    ASSERT(. <= BL32_LIMIT, "BL3-2 image has exceeded its limit.")
+    ASSERT(. <= BL32_LIMIT, "BL32 image has exceeded its limit.")
 }
diff --git a/bl32/tsp/tsp.mk b/bl32/tsp/tsp.mk
index f17ef1e..4ea3dfb 100644
--- a/bl32/tsp/tsp.mk
+++ b/bl32/tsp/tsp.mk
@@ -1,31 +1,7 @@
 #
-# Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
 #
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# Neither the name of ARM nor the names of its contributors may be used
-# to endorse or promote products derived from this software without specific
-# prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
 #
 
 INCLUDES		+=	-Iinclude/bl32/tsp
@@ -37,12 +13,12 @@
 				bl32/tsp/tsp_interrupt.c		\
 				bl32/tsp/tsp_timer.c			\
 				common/aarch64/early_exceptions.S	\
-				lib/locks/exclusive/spinlock.S
+				lib/locks/exclusive/aarch64/spinlock.S
 
 BL32_LINKERFILE		:=	bl32/tsp/tsp.ld.S
 
-# This flag determines if the TSPD initializes BL3-2 in tspd_init() (synchronous
-# method) or configures BL3-1 to pass control to BL3-2 instead of BL3-3
+# This flag determines if the TSPD initializes BL32 in tspd_init() (synchronous
+# method) or configures BL31 to pass control to BL32 instead of BL33
 # (asynchronous method).
 TSP_INIT_ASYNC         :=      0
 
@@ -52,8 +28,8 @@
 # Include the platform-specific TSP Makefile
 # If no platform-specific TSP Makefile exists, it means TSP is not supported
 # on this platform.
-TSP_PLAT_MAKEFILE := plat/${PLAT}/tsp/tsp-${PLAT}.mk
-ifeq (,$(wildcard ${TSP_PLAT_MAKEFILE}))
+TSP_PLAT_MAKEFILE := $(wildcard ${PLAT_DIR}/tsp/tsp-${PLAT}.mk)
+ifeq (,${TSP_PLAT_MAKEFILE})
   $(error TSP is not supported on platform ${PLAT})
 else
   include ${TSP_PLAT_MAKEFILE}
diff --git a/bl32/tsp/tsp_interrupt.c b/bl32/tsp/tsp_interrupt.c
index 7163bad..cbfc152 100644
--- a/bl32/tsp/tsp_interrupt.c
+++ b/bl32/tsp/tsp_interrupt.c
@@ -1,99 +1,98 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch_helpers.h>
 #include <assert.h>
 #include <debug.h>
-#include <gic_v2.h>
 #include <platform.h>
 #include <platform_def.h>
 #include <tsp.h>
 #include "tsp_private.h"
 
 /*******************************************************************************
- * This function updates the TSP statistics for FIQs handled synchronously i.e
- * the ones that have been handed over by the TSPD. It also keeps count of the
- * number of times control was passed back to the TSPD after handling an FIQ.
- * In the future it will be possible that the TSPD hands over an FIQ to the TSP
- * but does not expect it to return execution. This statistic will be useful to
- * distinguish between these two models of synchronous FIQ handling.
- * The 'elr_el3' parameter contains the address of the instruction in normal
- * world where this FIQ was generated.
+ * This function updates the TSP statistics for S-EL1 interrupts handled
+ * synchronously i.e the ones that have been handed over by the TSPD. It also
+ * keeps count of the number of times control was passed back to the TSPD
+ * after handling the interrupt. In the future it will be possible that the
+ * TSPD hands over an S-EL1 interrupt to the TSP but does not expect it to
+ * return execution. This statistic will be useful to distinguish between these
+ * two models of synchronous S-EL1 interrupt handling. The 'elr_el3' parameter
+ * contains the address of the instruction in normal world where this S-EL1
+ * interrupt was generated.
  ******************************************************************************/
-void tsp_update_sync_fiq_stats(uint32_t type, uint64_t elr_el3)
+void tsp_update_sync_sel1_intr_stats(uint32_t type, uint64_t elr_el3)
 {
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 
-	tsp_stats[linear_id].sync_fiq_count++;
-	if (type == TSP_HANDLE_FIQ_AND_RETURN)
-		tsp_stats[linear_id].sync_fiq_ret_count++;
+	tsp_stats[linear_id].sync_sel1_intr_count++;
+	if (type == TSP_HANDLE_SEL1_INTR_AND_RETURN)
+		tsp_stats[linear_id].sync_sel1_intr_ret_count++;
 
 #if LOG_LEVEL >= LOG_LEVEL_VERBOSE
 	spin_lock(&console_lock);
-	VERBOSE("TSP: cpu 0x%x sync fiq request from 0x%llx\n",
-		mpidr, elr_el3);
-	VERBOSE("TSP: cpu 0x%x: %d sync fiq requests, %d sync fiq returns\n",
-		mpidr,
-		tsp_stats[linear_id].sync_fiq_count,
-		tsp_stats[linear_id].sync_fiq_ret_count);
+	VERBOSE("TSP: cpu 0x%lx sync s-el1 interrupt request from 0x%lx\n",
+		read_mpidr(), elr_el3);
+	VERBOSE("TSP: cpu 0x%lx: %d sync s-el1 interrupt requests,"
+		" %d sync s-el1 interrupt returns\n",
+		read_mpidr(),
+		tsp_stats[linear_id].sync_sel1_intr_count,
+		tsp_stats[linear_id].sync_sel1_intr_ret_count);
 	spin_unlock(&console_lock);
 #endif
 }
 
-/*******************************************************************************
- * TSP FIQ handler called as a part of both synchronous and asynchronous
- * handling of FIQ interrupts. It returns 0 upon successfully handling a S-EL1
- * FIQ and treats all other FIQs as EL3 interrupts. It assumes that the GIC
- * architecture version in v2.0 and the secure physical timer interrupt is the
- * only S-EL1 interrupt that it needs to handle.
- ******************************************************************************/
-int32_t tsp_fiq_handler(void)
+/******************************************************************************
+ * This function is invoked when a non S-EL1 interrupt is received and causes
+ * the preemption of TSP. This function returns TSP_PREEMPTED and results
+ * in the control being handed over to EL3 for handling the interrupt.
+ *****************************************************************************/
+int32_t tsp_handle_preemption(void)
 {
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr), id;
+	uint32_t linear_id = plat_my_core_pos();
+
+	tsp_stats[linear_id].preempt_intr_count++;
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+	spin_lock(&console_lock);
+	VERBOSE("TSP: cpu 0x%lx: %d preempt interrupt requests\n",
+		read_mpidr(), tsp_stats[linear_id].preempt_intr_count);
+	spin_unlock(&console_lock);
+#endif
+	return TSP_PREEMPTED;
+}
+
+/*******************************************************************************
+ * TSP interrupt handler is called as a part of both synchronous and
+ * asynchronous handling of TSP interrupts. Currently the physical timer
+ * interrupt is the only S-EL1 interrupt that this handler expects. It returns
+ * 0 upon successfully handling the expected interrupt and all other
+ * interrupts are treated as normal world or EL3 interrupts.
+ ******************************************************************************/
+int32_t tsp_common_int_handler(void)
+{
+	uint32_t linear_id = plat_my_core_pos(), id;
 
 	/*
 	 * Get the highest priority pending interrupt id and see if it is the
 	 * secure physical generic timer interrupt in which case, handle it.
 	 * Otherwise throw this interrupt at the EL3 firmware.
+	 *
+	 * There is a small time window between reading the highest priority
+	 * pending interrupt and acknowledging it during which another
+	 * interrupt of higher priority could become the highest pending
+	 * interrupt. This is not expected to happen currently for TSP.
 	 */
 	id = plat_ic_get_pending_interrupt_id();
 
 	/* TSP can only handle the secure physical timer interrupt */
 	if (id != TSP_IRQ_SEC_PHY_TIMER)
-		return TSP_EL3_FIQ;
+		return tsp_handle_preemption();
 
 	/*
-	 * Handle the interrupt. Also sanity check if it has been preempted by
-	 * another secure interrupt through an assertion.
+	 * Acknowledge and handle the secure timer interrupt. Also sanity check
+	 * if it has been preempted by another interrupt through an assertion.
 	 */
 	id = plat_ic_acknowledge_interrupt();
 	assert(id == TSP_IRQ_SEC_PHY_TIMER);
@@ -101,30 +100,14 @@
 	plat_ic_end_of_interrupt(id);
 
 	/* Update the statistics and print some messages */
-	tsp_stats[linear_id].fiq_count++;
+	tsp_stats[linear_id].sel1_intr_count++;
 #if LOG_LEVEL >= LOG_LEVEL_VERBOSE
 	spin_lock(&console_lock);
-	VERBOSE("TSP: cpu 0x%x handled fiq %d\n",
-	       mpidr, id);
-	VERBOSE("TSP: cpu 0x%x: %d fiq requests\n",
-	     mpidr, tsp_stats[linear_id].fiq_count);
+	VERBOSE("TSP: cpu 0x%lx handled S-EL1 interrupt %d\n",
+	       read_mpidr(), id);
+	VERBOSE("TSP: cpu 0x%lx: %d S-EL1 requests\n",
+	     read_mpidr(), tsp_stats[linear_id].sel1_intr_count);
 	spin_unlock(&console_lock);
 #endif
 	return 0;
 }
-
-int32_t tsp_irq_received(void)
-{
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
-
-	tsp_stats[linear_id].irq_count++;
-#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
-	spin_lock(&console_lock);
-	VERBOSE("TSP: cpu 0x%x received irq\n", mpidr);
-	VERBOSE("TSP: cpu 0x%x: %d irq requests\n",
-		mpidr, tsp_stats[linear_id].irq_count);
-	spin_unlock(&console_lock);
-#endif
-	return TSP_PREEMPTED;
-}
diff --git a/bl32/tsp/tsp_main.c b/bl32/tsp/tsp_main.c
index c6000e1..0de0ca8 100644
--- a/bl32/tsp/tsp_main.c
+++ b/bl32/tsp/tsp_main.c
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch_helpers.h>
@@ -38,12 +14,6 @@
 #include <tsp.h>
 #include "tsp_private.h"
 
-/*******************************************************************************
- * Declarations of linker defined symbols which will help us find the layout
- * of trusted SRAM
- ******************************************************************************/
-extern unsigned long __RO_START__;
-extern unsigned long __BL32_END__;
 
 /*******************************************************************************
  * Lock to control access to the console
@@ -62,12 +32,12 @@
 work_statistics_t tsp_stats[PLATFORM_CORE_COUNT];
 
 /*******************************************************************************
- * The BL32 memory footprint starts with an RO sections and ends
- * with the linker symbol __BL32_END__. Use it to find the memory size
+ * The TSP memory footprint starts at address BL32_BASE and ends with the
+ * linker symbol __BL32_END__. Use these addresses to compute the TSP image
+ * size.
  ******************************************************************************/
-#define BL32_TOTAL_BASE (unsigned long)(&__RO_START__)
-
 #define BL32_TOTAL_LIMIT (unsigned long)(&__BL32_END__)
+#define BL32_TOTAL_SIZE (BL32_TOTAL_LIMIT - (unsigned long) BL32_BASE)
 
 static tsp_args_t *set_smc_args(uint64_t arg0,
 			     uint64_t arg1,
@@ -78,7 +48,6 @@
 			     uint64_t arg6,
 			     uint64_t arg7)
 {
-	uint64_t mpidr = read_mpidr();
 	uint32_t linear_id;
 	tsp_args_t *pcpu_smc_args;
 
@@ -86,7 +55,7 @@
 	 * Return to Secure Monitor by raising an SMC. The results of the
 	 * service are passed as an arguments to the SMC
 	 */
-	linear_id = platform_get_core_pos(mpidr);
+	linear_id = plat_my_core_pos();
 	pcpu_smc_args = &tsp_smc_args[linear_id];
 	write_sp_arg(pcpu_smc_args, TSP_ARG0, arg0);
 	write_sp_arg(pcpu_smc_args, TSP_ARG1, arg1);
@@ -109,12 +78,10 @@
 {
 	NOTICE("TSP: %s\n", version_string);
 	NOTICE("TSP: %s\n", build_message);
-	INFO("TSP: Total memory base : 0x%x\n", (unsigned long)BL32_TOTAL_BASE);
-	INFO("TSP: Total memory size : 0x%x bytes\n",
-			 (unsigned long)(BL32_TOTAL_LIMIT - BL32_TOTAL_BASE));
+	INFO("TSP: Total memory base : 0x%lx\n", (unsigned long) BL32_BASE);
+	INFO("TSP: Total memory size : 0x%lx bytes\n", BL32_TOTAL_SIZE);
 
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 
 	/* Initialize the platform */
 	tsp_platform_setup();
@@ -129,7 +96,8 @@
 
 #if LOG_LEVEL >= LOG_LEVEL_INFO
 	spin_lock(&console_lock);
-	INFO("TSP: cpu 0x%x: %d smcs, %d erets %d cpu on requests\n", mpidr,
+	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
+	     read_mpidr(),
 	     tsp_stats[linear_id].smc_count,
 	     tsp_stats[linear_id].eret_count,
 	     tsp_stats[linear_id].cpu_on_count);
@@ -145,8 +113,7 @@
  ******************************************************************************/
 tsp_args_t *tsp_cpu_on_main(void)
 {
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 
 	/* Initialize secure/applications state here */
 	tsp_generic_timer_start();
@@ -158,8 +125,9 @@
 
 #if LOG_LEVEL >= LOG_LEVEL_INFO
 	spin_lock(&console_lock);
-	INFO("TSP: cpu 0x%x turned on\n", mpidr);
-	INFO("TSP: cpu 0x%x: %d smcs, %d erets %d cpu on requests\n", mpidr,
+	INFO("TSP: cpu 0x%lx turned on\n", read_mpidr());
+	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
+		read_mpidr(),
 		tsp_stats[linear_id].smc_count,
 		tsp_stats[linear_id].eret_count,
 		tsp_stats[linear_id].cpu_on_count);
@@ -182,8 +150,7 @@
 			   uint64_t arg6,
 			   uint64_t arg7)
 {
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 
 	/*
 	 * This cpu is being turned off, so disable the timer to prevent the
@@ -199,8 +166,9 @@
 
 #if LOG_LEVEL >= LOG_LEVEL_INFO
 	spin_lock(&console_lock);
-	INFO("TSP: cpu 0x%x off request\n", mpidr);
-	INFO("TSP: cpu 0x%x: %d smcs, %d erets %d cpu off requests\n", mpidr,
+	INFO("TSP: cpu 0x%lx off request\n", read_mpidr());
+	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu off requests\n",
+		read_mpidr(),
 		tsp_stats[linear_id].smc_count,
 		tsp_stats[linear_id].eret_count,
 		tsp_stats[linear_id].cpu_off_count);
@@ -225,8 +193,7 @@
 			       uint64_t arg6,
 			       uint64_t arg7)
 {
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 
 	/*
 	 * Save the time context and disable it to prevent the secure timer
@@ -242,8 +209,8 @@
 
 #if LOG_LEVEL >= LOG_LEVEL_INFO
 	spin_lock(&console_lock);
-	INFO("TSP: cpu 0x%x: %d smcs, %d erets %d cpu suspend requests\n",
-		mpidr,
+	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu suspend requests\n",
+		read_mpidr(),
 		tsp_stats[linear_id].smc_count,
 		tsp_stats[linear_id].eret_count,
 		tsp_stats[linear_id].cpu_suspend_count);
@@ -259,7 +226,7 @@
  * cpu's architectural state has been restored after wakeup from an earlier psci
  * cpu_suspend request.
  ******************************************************************************/
-tsp_args_t *tsp_cpu_resume_main(uint64_t suspend_level,
+tsp_args_t *tsp_cpu_resume_main(uint64_t max_off_pwrlvl,
 			      uint64_t arg1,
 			      uint64_t arg2,
 			      uint64_t arg3,
@@ -268,8 +235,7 @@
 			      uint64_t arg6,
 			      uint64_t arg7)
 {
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 
 	/* Restore the generic timer context */
 	tsp_generic_timer_restore();
@@ -281,10 +247,10 @@
 
 #if LOG_LEVEL >= LOG_LEVEL_INFO
 	spin_lock(&console_lock);
-	INFO("TSP: cpu 0x%x resumed. suspend level %d\n",
-		mpidr, suspend_level);
-	INFO("TSP: cpu 0x%x: %d smcs, %d erets %d cpu suspend requests\n",
-		mpidr,
+	INFO("TSP: cpu 0x%lx resumed. maximum off power level %ld\n",
+	     read_mpidr(), max_off_pwrlvl);
+	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu suspend requests\n",
+		read_mpidr(),
 		tsp_stats[linear_id].smc_count,
 		tsp_stats[linear_id].eret_count,
 		tsp_stats[linear_id].cpu_suspend_count);
@@ -307,8 +273,7 @@
 				uint64_t arg6,
 				uint64_t arg7)
 {
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 
 	/* Update this cpu's statistics */
 	tsp_stats[linear_id].smc_count++;
@@ -316,8 +281,8 @@
 
 #if LOG_LEVEL >= LOG_LEVEL_INFO
 	spin_lock(&console_lock);
-	INFO("TSP: cpu 0x%x SYSTEM_OFF request\n", mpidr);
-	INFO("TSP: cpu 0x%x: %d smcs, %d erets requests\n", mpidr,
+	INFO("TSP: cpu 0x%lx SYSTEM_OFF request\n", read_mpidr());
+	INFO("TSP: cpu 0x%lx: %d smcs, %d erets requests\n", read_mpidr(),
 	     tsp_stats[linear_id].smc_count,
 	     tsp_stats[linear_id].eret_count);
 	spin_unlock(&console_lock);
@@ -340,8 +305,7 @@
 				uint64_t arg6,
 				uint64_t arg7)
 {
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 
 	/* Update this cpu's statistics */
 	tsp_stats[linear_id].smc_count++;
@@ -349,8 +313,8 @@
 
 #if LOG_LEVEL >= LOG_LEVEL_INFO
 	spin_lock(&console_lock);
-	INFO("TSP: cpu 0x%x SYSTEM_RESET request\n", mpidr);
-	INFO("TSP: cpu 0x%x: %d smcs, %d erets requests\n", mpidr,
+	INFO("TSP: cpu 0x%lx SYSTEM_RESET request\n", read_mpidr());
+	INFO("TSP: cpu 0x%lx: %d smcs, %d erets requests\n", read_mpidr(),
 	     tsp_stats[linear_id].smc_count,
 	     tsp_stats[linear_id].eret_count);
 	spin_unlock(&console_lock);
@@ -377,17 +341,16 @@
 {
 	uint64_t results[2];
 	uint64_t service_args[2];
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 
 	/* Update this cpu's statistics */
 	tsp_stats[linear_id].smc_count++;
 	tsp_stats[linear_id].eret_count++;
 
-	INFO("TSP: cpu 0x%x received %s smc 0x%x\n", read_mpidr(),
-		((func >> 31) & 1) == 1 ? "fast" : "standard",
+	INFO("TSP: cpu 0x%lx received %s smc 0x%lx\n", read_mpidr(),
+		((func >> 31) & 1) == 1 ? "fast" : "yielding",
 		func);
-	INFO("TSP: cpu 0x%x: %d smcs, %d erets\n", mpidr,
+	INFO("TSP: cpu 0x%lx: %d smcs, %d erets\n", read_mpidr(),
 		tsp_stats[linear_id].smc_count,
 		tsp_stats[linear_id].eret_count);
 
@@ -429,3 +392,20 @@
 			    0, 0, 0, 0);
 }
 
+/*******************************************************************************
+ * TSP smc abort handler. This function is called when aborting a preemtped
+ * yielding SMC request. It should cleanup all resources owned by the SMC
+ * handler such as locks or dynamically allocated memory so following SMC
+ * request are executed in a clean environment.
+ ******************************************************************************/
+tsp_args_t *tsp_abort_smc_handler(uint64_t func,
+				  uint64_t arg1,
+				  uint64_t arg2,
+				  uint64_t arg3,
+				  uint64_t arg4,
+				  uint64_t arg5,
+				  uint64_t arg6,
+				  uint64_t arg7)
+{
+	return set_smc_args(TSP_ABORT_DONE, 0, 0, 0, 0, 0, 0, 0);
+}
diff --git a/bl32/tsp/tsp_private.h b/bl32/tsp/tsp_private.h
index 39fb5f6..b11570c 100644
--- a/bl32/tsp/tsp_private.h
+++ b/bl32/tsp/tsp_private.h
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __TSP_PRIVATE_H__
@@ -54,10 +30,14 @@
 
 
 typedef struct work_statistics {
-	uint32_t fiq_count;		/* Number of FIQs on this cpu */
-	uint32_t irq_count;		/* Number of IRQs on this cpu */
-	uint32_t sync_fiq_count;	/* Number of sync. fiqs on this cpu */
-	uint32_t sync_fiq_ret_count;	/* Number of fiq returns on this cpu */
+	/* Number of s-el1 interrupts on this cpu */
+	uint32_t sel1_intr_count;
+	/* Number of non s-el1 interrupts on this cpu which preempted TSP */
+	uint32_t preempt_intr_count;
+	/* Number of sync s-el1 interrupts on this cpu */
+	uint32_t sync_sel1_intr_count;
+	/* Number of s-el1 interrupts returns on this cpu */
+	uint32_t sync_sel1_intr_ret_count;
 	uint32_t smc_count;		/* Number of returns on this cpu */
 	uint32_t eret_count;		/* Number of entries on this cpu */
 	uint32_t cpu_on_count;		/* Number of cpu on requests */
@@ -115,8 +95,8 @@
 void tsp_generic_timer_save(void);
 void tsp_generic_timer_restore(void);
 
-/* FIQ management functions */
-void tsp_update_sync_fiq_stats(uint32_t type, uint64_t elr_el3);
+/* S-EL1 interrupt management functions */
+void tsp_update_sync_sel1_intr_stats(uint32_t type, uint64_t elr_el3);
 
 
 /* Data structure to keep track of TSP statistics */
diff --git a/bl32/tsp/tsp_timer.c b/bl32/tsp/tsp_timer.c
index f196021..ebe7f0d 100644
--- a/bl32/tsp/tsp_timer.c
+++ b/bl32/tsp/tsp_timer.c
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 #include <arch_helpers.h>
 #include <assert.h>
@@ -92,7 +68,7 @@
  ******************************************************************************/
 void tsp_generic_timer_save(void)
 {
-	uint32_t linear_id = platform_get_core_pos(read_mpidr());
+	uint32_t linear_id = plat_my_core_pos();
 
 	pcpu_timer_context[linear_id].cval = read_cntps_cval_el1();
 	pcpu_timer_context[linear_id].ctl = read_cntps_ctl_el1();
@@ -105,7 +81,7 @@
  ******************************************************************************/
 void tsp_generic_timer_restore(void)
 {
-	uint32_t linear_id = platform_get_core_pos(read_mpidr());
+	uint32_t linear_id = plat_my_core_pos();
 
 	write_cntps_cval_el1(pcpu_timer_context[linear_id].cval);
 	write_cntps_ctl_el1(pcpu_timer_context[linear_id].ctl);
diff --git a/common/aarch32/debug.S b/common/aarch32/debug.S
new file mode 100644
index 0000000..583ee4a
--- /dev/null
+++ b/common/aarch32/debug.S
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+	.globl	asm_assert
+	.globl	do_panic
+	.globl	report_exception
+
+/* Since the max decimal input number is 65536 */
+#define MAX_DEC_DIVISOR		10000
+
+/* The offset to add to get ascii for numerals '0 - 9' */
+#define ASCII_OFFSET_NUM	'0'
+
+	.section .rodata.panic_str, "aS"
+panic_msg:
+	.asciz "PANIC at PC : 0x"
+panic_end:
+	.asciz "\r\n"
+
+	/***********************************************************
+	 * The common implementation of do_panic for all BL stages
+	 ***********************************************************/
+func do_panic
+	/* Have LR copy point to PC at the time of panic */
+	sub	r6, lr, #4
+
+	/* Initialize crash console and verify success */
+	bl	plat_crash_console_init
+	cmp	r0, #0
+	beq	1f
+
+	/* Print panic message */
+	ldr	r4, =panic_msg
+	bl	asm_print_str
+
+	/* Print LR in hex */
+	mov	r4, r6
+	bl	asm_print_hex
+
+	/* Print new line */
+	ldr	r4, =panic_end
+	bl	asm_print_str
+
+	bl	plat_crash_console_flush
+
+1:
+	mov	lr, r6
+	b	plat_panic_handler
+endfunc do_panic
+
+	/***********************************************************
+	 * This function is called from the vector table for
+	 * unhandled exceptions. It reads the current mode and
+	 * passes it to platform.
+	 ***********************************************************/
+func report_exception
+	mrs	r0, cpsr
+	and	r0, #MODE32_MASK
+	bl	plat_report_exception
+	no_ret	plat_panic_handler
+endfunc report_exception
+
+#if ENABLE_ASSERTIONS
+.section .rodata.assert_str, "aS"
+assert_msg1:
+	.asciz "ASSERT: File "
+assert_msg2:
+	.asciz " Line "
+
+/* ---------------------------------------------------------------------------
+ * Assertion support in assembly.
+ * The below function helps to support assertions in assembly where we do not
+ * have a C runtime stack. Arguments to the function are :
+ * r0 - File name
+ * r1 - Line no
+ * Clobber list : lr, r0 - r6
+ * ---------------------------------------------------------------------------
+ */
+func asm_assert
+#if LOG_LEVEL >= LOG_LEVEL_INFO
+	/*
+	 * Only print the output if LOG_LEVEL is higher or equal to
+	 * LOG_LEVEL_INFO, which is the default value for builds with DEBUG=1.
+	 */
+	/* Stash the parameters already in r0 and r1 */
+	mov	r5, r0
+	mov	r6, r1
+
+	/* Initialize crash console and verify success */
+	bl	plat_crash_console_init
+	cmp	r0, #0
+	beq	1f
+
+	/* Print file name */
+	ldr	r4, =assert_msg1
+	bl	asm_print_str
+	mov	r4, r5
+	bl	asm_print_str
+
+	/* Print line number string */
+	ldr	r4, =assert_msg2
+	bl	asm_print_str
+
+	/* Test for maximum supported line number */
+	ldr	r4, =~0xffff
+	tst	r6, r4
+	bne	1f
+	mov	r4, r6
+
+	/* Print line number in decimal */
+	mov	r6, #10			/* Divide by 10 after every loop iteration */
+	ldr	r5, =MAX_DEC_DIVISOR
+dec_print_loop:
+	udiv	r0, r4, r5			/* Quotient */
+	mls	r4, r0, r5, r4			/* Remainder */
+	add	r0, r0, #ASCII_OFFSET_NUM	/* Convert to ASCII */
+	bl	plat_crash_console_putc
+	udiv	r5, r5, r6			/* Reduce divisor */
+	cmp	r5, #0
+	bne	dec_print_loop
+
+	bl	plat_crash_console_flush
+
+1:
+#endif /* LOG_LEVEL >= LOG_LEVEL_INFO */
+	no_ret	plat_panic_handler
+endfunc asm_assert
+#endif /* ENABLE_ASSERTIONS */
+
+/*
+ * This function prints a string from address in r4
+ * Clobber: lr, r0 - r4
+ */
+func asm_print_str
+	mov	r3, lr
+1:
+	ldrb	r0, [r4], #0x1
+	cmp	r0, #0
+	beq	2f
+	bl	plat_crash_console_putc
+	b	1b
+2:
+	bx	r3
+endfunc asm_print_str
+
+/*
+ * This function prints a hexadecimal number in r4.
+ * In: r4 = the hexadecimal to print.
+ * Clobber: lr, r0 - r3, r5
+ */
+func asm_print_hex
+	mov	r3, lr
+	mov	r5, #32  /* No of bits to convert to ascii */
+1:
+	sub	r5, r5, #4
+	lsr	r0, r4, r5
+	and	r0, r0, #0xf
+	cmp	r0, #0xa
+	blo	2f
+	/* Add by 0x27 in addition to ASCII_OFFSET_NUM
+	 * to get ascii for characters 'a - f'.
+	 */
+	add	r0, r0, #0x27
+2:
+	add	r0, r0, #ASCII_OFFSET_NUM
+	bl	plat_crash_console_putc
+	cmp	r5, #0
+	bne	1b
+	bx	r3
+endfunc asm_print_hex
diff --git a/common/aarch64/debug.S b/common/aarch64/debug.S
index fcf5f26..4478d0d 100644
--- a/common/aarch64/debug.S
+++ b/common/aarch64/debug.S
@@ -1,35 +1,12 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
 #include <asm_macros.S>
+#include <debug.h>
 
 	.globl	asm_print_str
 	.globl	asm_print_hex
@@ -41,7 +18,7 @@
 /* The offset to add to get ascii for numerals '0 - 9' */
 #define ASCII_OFFSET_NUM	0x30
 
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
 .section .rodata.assert_str, "aS"
 assert_msg1:
 	.asciz "ASSERT: File "
@@ -78,6 +55,11 @@
  * ---------------------------------------------------------------------------
  */
 func asm_assert
+#if LOG_LEVEL >= LOG_LEVEL_INFO
+	/*
+	 * Only print the output if LOG_LEVEL is higher or equal to
+	 * LOG_LEVEL_INFO, which is the default value for builds with DEBUG=1.
+	 */
 	mov	x5, x0
 	mov	x6, x1
 	/* Ensure the console is initialized */
@@ -96,9 +78,12 @@
 	b.ne	_assert_loop
 	mov	x4, x6
 	asm_print_line_dec
+	bl	plat_crash_console_flush
 _assert_loop:
-	b	_assert_loop
-#endif
+#endif /* LOG_LEVEL >= LOG_LEVEL_INFO */
+	no_ret	plat_panic_handler
+endfunc asm_assert
+#endif /* ENABLE_ASSERTIONS */
 
 /*
  * This function prints a string from address in x4.
@@ -114,11 +99,12 @@
 	b	1b
 2:
 	ret	x3
+endfunc asm_print_str
 
 /*
  * This function prints a hexadecimal number in x4.
  * In: x4 = the hexadecimal to print.
- * Clobber: x30, x0, x5, x1, x2, x3
+ * Clobber: x30, x0 - x3, x5
  */
 func asm_print_hex
 	mov	x3, x30
@@ -138,6 +124,7 @@
 	bl	plat_crash_console_putc
 	cbnz	x5, 1b
 	ret	x3
+endfunc asm_print_hex
 
 	/***********************************************************
 	 * The common implementation of do_panic for all BL stages
@@ -175,7 +162,7 @@
 	mov	x6, x30
 	bl	plat_crash_console_init
 	/* Check if the console is initialized */
-	cbz	x0, _panic_loop
+	cbz	x0, _panic_handler
 	/* The console is initialized */
 	adr	x4, panic_msg
 	bl	asm_print_str
@@ -183,6 +170,12 @@
 	/* The panic location is lr -4 */
 	sub	x4, x4, #4
 	bl	asm_print_hex
-_panic_loop:
-	b	_panic_loop
 
+	bl	plat_crash_console_flush
+
+_panic_handler:
+	/* Pass to plat_panic_handler the address from where el3_panic was
+	 * called, not the address of the call from el3_panic. */
+	mov	x30, x6
+	b	plat_panic_handler
+endfunc do_panic
diff --git a/common/aarch64/early_exceptions.S b/common/aarch64/early_exceptions.S
index 90f5421..19cc35d 100644
--- a/common/aarch64/early_exceptions.S
+++ b/common/aarch64/early_exceptions.S
@@ -1,171 +1,129 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <asm_macros.S>
-#include <runtime_svc.h>
+#include <bl_common.h>
 
+/* -----------------------------------------------------------------------------
+ * Very simple stackless exception handlers used by BL2 and BL31 stages.
+ * BL31 uses them before stacks are setup. BL2 uses them throughout.
+ * -----------------------------------------------------------------------------
+ */
 	.globl	early_exceptions
 
-	.section	.vectors, "ax"; .align 11
+vector_base early_exceptions
 
 	/* -----------------------------------------------------
-	 * Very simple stackless exception handlers used by BL2
-	 * and BL3-1 bootloader stages. BL3-1 uses them before
-	 * stacks are setup. BL2 uses them throughout.
+	 * Current EL with SP0 : 0x0 - 0x200
 	 * -----------------------------------------------------
 	 */
-	.align	7
-early_exceptions:
-	/* -----------------------------------------------------
-	 * Current EL with SP0 : 0x0 - 0x180
-	 * -----------------------------------------------------
-	 */
-SynchronousExceptionSP0:
+vector_entry SynchronousExceptionSP0
 	mov	x0, #SYNC_EXCEPTION_SP_EL0
 	bl	plat_report_exception
-	b	SynchronousExceptionSP0
+	no_ret	plat_panic_handler
 	check_vector_size SynchronousExceptionSP0
 
-	.align	7
-IrqSP0:
+vector_entry IrqSP0
 	mov	x0, #IRQ_SP_EL0
 	bl	plat_report_exception
-	b	IrqSP0
+	no_ret	plat_panic_handler
 	check_vector_size IrqSP0
 
-	.align	7
-FiqSP0:
+vector_entry FiqSP0
 	mov	x0, #FIQ_SP_EL0
 	bl	plat_report_exception
-	b	FiqSP0
+	no_ret	plat_panic_handler
 	check_vector_size FiqSP0
 
-	.align	7
-SErrorSP0:
+vector_entry SErrorSP0
 	mov	x0, #SERROR_SP_EL0
 	bl	plat_report_exception
-	b	SErrorSP0
+	no_ret	plat_panic_handler
 	check_vector_size SErrorSP0
 
 	/* -----------------------------------------------------
-	 * Current EL with SPx: 0x200 - 0x380
+	 * Current EL with SPx: 0x200 - 0x400
 	 * -----------------------------------------------------
 	 */
-	.align	7
-SynchronousExceptionSPx:
+vector_entry SynchronousExceptionSPx
 	mov	x0, #SYNC_EXCEPTION_SP_ELX
 	bl	plat_report_exception
-	b	SynchronousExceptionSPx
+	no_ret	plat_panic_handler
 	check_vector_size SynchronousExceptionSPx
 
-	.align	7
-IrqSPx:
+vector_entry IrqSPx
 	mov	x0, #IRQ_SP_ELX
 	bl	plat_report_exception
-	b	IrqSPx
+	no_ret	plat_panic_handler
 	check_vector_size IrqSPx
 
-	.align	7
-FiqSPx:
+vector_entry FiqSPx
 	mov	x0, #FIQ_SP_ELX
 	bl	plat_report_exception
-	b	FiqSPx
+	no_ret	plat_panic_handler
 	check_vector_size FiqSPx
 
-	.align	7
-SErrorSPx:
+vector_entry SErrorSPx
 	mov	x0, #SERROR_SP_ELX
 	bl	plat_report_exception
-	b	SErrorSPx
+	no_ret	plat_panic_handler
 	check_vector_size SErrorSPx
 
 	/* -----------------------------------------------------
-	 * Lower EL using AArch64 : 0x400 - 0x580
+	 * Lower EL using AArch64 : 0x400 - 0x600
 	 * -----------------------------------------------------
 	 */
-	.align	7
-SynchronousExceptionA64:
+vector_entry SynchronousExceptionA64
 	mov	x0, #SYNC_EXCEPTION_AARCH64
 	bl	plat_report_exception
-	b	SynchronousExceptionA64
+	no_ret	plat_panic_handler
 	check_vector_size SynchronousExceptionA64
 
-	.align	7
-IrqA64:
+vector_entry IrqA64
 	mov	x0, #IRQ_AARCH64
 	bl	plat_report_exception
-	b	IrqA64
+	no_ret	plat_panic_handler
 	check_vector_size IrqA64
 
-	.align	7
-FiqA64:
+vector_entry FiqA64
 	mov	x0, #FIQ_AARCH64
 	bl	plat_report_exception
-	b	FiqA64
+	no_ret	plat_panic_handler
 	check_vector_size FiqA64
 
-	.align	7
-SErrorA64:
+vector_entry SErrorA64
 	mov	x0, #SERROR_AARCH64
 	bl	plat_report_exception
-	b   	SErrorA64
+	no_ret	plat_panic_handler
 	check_vector_size SErrorA64
 
 	/* -----------------------------------------------------
-	 * Lower EL using AArch32 : 0x0 - 0x180
+	 * Lower EL using AArch32 : 0x600 - 0x800
 	 * -----------------------------------------------------
 	 */
-	.align	7
-SynchronousExceptionA32:
+vector_entry SynchronousExceptionA32
 	mov	x0, #SYNC_EXCEPTION_AARCH32
 	bl	plat_report_exception
-	b	SynchronousExceptionA32
+	no_ret	plat_panic_handler
 	check_vector_size SynchronousExceptionA32
 
-	.align	7
-IrqA32:
+vector_entry IrqA32
 	mov	x0, #IRQ_AARCH32
 	bl	plat_report_exception
-	b	IrqA32
+	no_ret	plat_panic_handler
 	check_vector_size IrqA32
 
-	.align	7
-FiqA32:
+vector_entry FiqA32
 	mov	x0, #FIQ_AARCH32
 	bl	plat_report_exception
-	b	FiqA32
+	no_ret	plat_panic_handler
 	check_vector_size FiqA32
 
-	.align	7
-SErrorA32:
+vector_entry SErrorA32
 	mov	x0, #SERROR_AARCH32
 	bl	plat_report_exception
-	b	SErrorA32
+	no_ret	plat_panic_handler
 	check_vector_size SErrorA32
diff --git a/common/auth.c b/common/auth.c
deleted file mode 100644
index 37234b8..0000000
--- a/common/auth.c
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <assert.h>
-#include <auth.h>
-#include <debug.h>
-
-/*
- * Initialize the authentication module
- */
-void auth_init(void)
-{
-	assert(auth_mod.name);
-	assert(auth_mod.init);
-	assert(auth_mod.verify);
-
-	INFO("Using authentication module '%s'\n", auth_mod.name);
-	if (auth_mod.init() != 0)
-		assert(0);
-}
-
-/*
- * Authenticate a certificate/image
- *
- * Return: 0 = success, Otherwise = error
- */
-int auth_verify_obj(unsigned int obj_id, uintptr_t obj_buf, size_t len)
-{
-	assert(obj_id < AUTH_NUM_OBJ);
-	assert(obj_buf != 0);
-	assert(auth_mod.verify);
-
-	return auth_mod.verify(obj_id, obj_buf, len);
-}
diff --git a/common/auth/polarssl/polarssl.c b/common/auth/polarssl/polarssl.c
deleted file mode 100644
index e099f50..0000000
--- a/common/auth/polarssl/polarssl.c
+++ /dev/null
@@ -1,583 +0,0 @@
-/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/* Authentication module based on PolarSSL */
-
-#include <stddef.h>
-
-#include <assert.h>
-#include <auth.h>
-#include <debug.h>
-#include <platform.h>
-#include <platform_def.h>
-#include <platform_oid.h>
-
-#include <polarssl/memory_buffer_alloc.h>
-#include <polarssl/oid.h>
-#include <polarssl/platform.h>
-#include <polarssl/sha256.h>
-#include <polarssl/x509_crt.h>
-
-/*
- * At each authentication stage, the module is responsible for extracting and
- * storing those elements (keys, hashes, etc.) that will be needed later on
- * during the Trusted Boot process.
- */
-
-/* SHA256 algorithm */
-#define SHA_BYTES			32
-
-/*
- * An 8 KB stack has been proven to be enough for the current Trusted Boot
- * process
- */
-#define POLARSSL_HEAP_SIZE		(8*1024)
-static unsigned char heap[POLARSSL_HEAP_SIZE];
-
-/*
- * RSA public keys:
- *  SubjectPublicKeyInfo  ::=  SEQUENCE  {          1 + 3
- *       algorithm            AlgorithmIdentifier,  1 + 1 (sequence)
- *                                                + 1 + 1 + 9 (rsa oid)
- *                                                + 1 + 1 (params null)
- *       subjectPublicKey     BIT STRING }          1 + 3 + (1 + below)
- *  RSAPublicKey ::= SEQUENCE {                     1 + 3
- *      modulus           INTEGER,  -- n            1 + 3 + MPI_MAX + 1
- *      publicExponent    INTEGER   -- e            1 + 3 + MPI_MAX + 1
- *  }
- *
- * POLARSSL_MPI_MAX_SIZE is set to 256 bytes (RSA-2048 bit keys) in the
- * configuration file
- */
-#define RSA_PUB_DER_MAX_BYTES   38 + 2 * POLARSSL_MPI_MAX_SIZE
-
-/*
- * Buffer for storing public keys extracted from certificates while they are
- * verified
- */
-static unsigned char pk_buf[RSA_PUB_DER_MAX_BYTES];
-
-/* We use this variable to parse and authenticate the certificates */
-static x509_crt cert;
-
-/* BL specific variables */
-#if IMAGE_BL1
-static unsigned char sha_bl2[SHA_BYTES];
-#elif IMAGE_BL2
-/* Buffers to store the hash of BL3-x images */
-static unsigned char sha_bl30[SHA_BYTES];
-static unsigned char sha_bl31[SHA_BYTES];
-static unsigned char sha_bl32[SHA_BYTES];
-static unsigned char sha_bl33[SHA_BYTES];
-/* Buffers to store the Trusted and Non-Trusted world public keys */
-static unsigned char tz_world_pk[RSA_PUB_DER_MAX_BYTES];
-static unsigned char ntz_world_pk[RSA_PUB_DER_MAX_BYTES];
-static size_t tz_world_pk_len, ntz_world_pk_len;
-/* Buffer to store the BL3-x public keys */
-static unsigned char content_pk[RSA_PUB_DER_MAX_BYTES];
-static size_t content_pk_len;
-#endif
-
-
-static int x509_get_crt_ext_data(const unsigned char **ext_data,
-				 size_t *ext_len,
-				 x509_crt *crt,
-				 const char *oid)
-{
-	int ret;
-	size_t len;
-	unsigned char *end_ext_data, *end_ext_octet;
-	unsigned char *p;
-	const unsigned char *end;
-	char oid_str[64];
-
-	p = crt->v3_ext.p;
-	end = crt->v3_ext.p + crt->v3_ext.len;
-
-	ret = asn1_get_tag(&p, end, &len, ASN1_CONSTRUCTED | ASN1_SEQUENCE);
-	if (ret != 0)
-		return POLARSSL_ERR_X509_INVALID_EXTENSIONS + ret;
-
-	if (end != p + len)
-		return POLARSSL_ERR_X509_INVALID_EXTENSIONS +
-				POLARSSL_ERR_ASN1_LENGTH_MISMATCH;
-
-	while (p < end) {
-		/*
-		 * Extension  ::=  SEQUENCE  {
-		 *      extnID      OBJECT IDENTIFIER,
-		 *      critical    BOOLEAN DEFAULT FALSE,
-		 *      extnValue   OCTET STRING  }
-		 */
-		x509_buf extn_oid = {0, 0, NULL};
-		int is_critical = 0; /* DEFAULT FALSE */
-
-		ret = asn1_get_tag(&p, end, &len,
-				ASN1_CONSTRUCTED | ASN1_SEQUENCE);
-		if (ret != 0)
-			return POLARSSL_ERR_X509_INVALID_EXTENSIONS + ret;
-
-		end_ext_data = p + len;
-
-		/* Get extension ID */
-		extn_oid.tag = *p;
-
-		ret = asn1_get_tag(&p, end, &extn_oid.len, ASN1_OID);
-		if (ret != 0)
-			return POLARSSL_ERR_X509_INVALID_EXTENSIONS + ret;
-
-		extn_oid.p = p;
-		p += extn_oid.len;
-
-		if ((end - p) < 1)
-			return POLARSSL_ERR_X509_INVALID_EXTENSIONS +
-					POLARSSL_ERR_ASN1_OUT_OF_DATA;
-
-		/* Get optional critical */
-		ret = asn1_get_bool(&p, end_ext_data, &is_critical);
-		if (ret != 0 && (ret != POLARSSL_ERR_ASN1_UNEXPECTED_TAG))
-			return POLARSSL_ERR_X509_INVALID_EXTENSIONS + ret;
-
-		/* Data should be octet string type */
-		ret = asn1_get_tag(&p, end_ext_data, &len, ASN1_OCTET_STRING);
-		if (ret != 0)
-			return POLARSSL_ERR_X509_INVALID_EXTENSIONS + ret;
-
-		end_ext_octet = p + len;
-
-		if (end_ext_octet != end_ext_data)
-			return POLARSSL_ERR_X509_INVALID_EXTENSIONS +
-					POLARSSL_ERR_ASN1_LENGTH_MISMATCH;
-
-		/* Detect requested extension */
-		oid_get_numeric_string(oid_str, 64, &extn_oid);
-		if (memcmp(oid, oid_str, sizeof(oid)) == 0) {
-			*ext_data = p;
-			*ext_len = len;
-			return 0;
-		}
-
-		/* Next */
-		p = end_ext_octet;
-	}
-
-	if (p != end)
-		return POLARSSL_ERR_X509_INVALID_EXTENSIONS +
-				POLARSSL_ERR_ASN1_LENGTH_MISMATCH;
-
-	return POLARSSL_ERR_X509_UNKNOWN_OID;
-}
-
-#if IMAGE_BL1
-/*
- * Parse and verify the BL2 certificate
- *
- * This function verifies the integrity of the BL2 certificate, checks that it
- * has been signed with the ROT key and extracts the BL2 hash stored in the
- * certificate so it can be matched later against the calculated hash.
- *
- * Return: 0 = success, Otherwise = error
- */
-static int check_bl2_cert(unsigned char *buf, size_t len)
-{
-	const unsigned char *p;
-	size_t sz;
-	int err, flags;
-
-	x509_crt_init(&cert);
-
-	/* Parse the BL2 certificate */
-	err = x509_crt_parse(&cert, buf, len);
-	if (err) {
-		ERROR("BL2 certificate parse error %d.\n", err);
-		goto error;
-	}
-
-	/* Check that it has been signed with the ROT key */
-	err = pk_write_pubkey_der(&cert.pk, pk_buf, sizeof(pk_buf));
-	if (err < 0) {
-		ERROR("Error loading ROT key in DER format %d.\n", err);
-		goto error;
-	}
-
-	sz = (size_t)err;
-	p = pk_buf + sizeof(pk_buf) - sz;
-
-	err = plat_match_rotpk(p, sz);
-	if (err) {
-		ERROR("ROT and BL2 certificate key mismatch\n");
-		goto error;
-	}
-
-	/* Verify certificate */
-	err = x509_crt_verify(&cert, &cert, NULL, NULL, &flags, NULL, NULL);
-	if (err) {
-		ERROR("BL2 certificate verification error %d. Flags: 0x%x.\n",
-				err, flags);
-		goto error;
-	}
-
-	/* Extract BL2 image hash from certificate */
-	err = x509_get_crt_ext_data(&p, &sz, &cert, BL2_HASH_OID);
-	if (err) {
-		ERROR("Cannot read BL2 hash from certificate\n");
-		goto error;
-	}
-
-	assert(sz == SHA_BYTES + 2);
-
-	/* Skip the tag and length bytes and copy the hash */
-	p += 2;
-	memcpy(sha_bl2, p, SHA_BYTES);
-
-error:
-	x509_crt_free(&cert);
-
-	return err;
-}
-#endif /* IMAGE_BL1 */
-
-#if IMAGE_BL2
-static int check_trusted_key_cert(unsigned char *buf, size_t len)
-{
-	const unsigned char *p;
-	size_t sz;
-	int err, flags;
-
-	x509_crt_init(&cert);
-
-	/* Parse the Trusted Key certificate */
-	err = x509_crt_parse(&cert, buf, len);
-	if (err) {
-		ERROR("Trusted Key certificate parse error %d.\n", err);
-		goto error;
-	}
-
-	/* Verify Trusted Key certificate */
-	err = x509_crt_verify(&cert, &cert, NULL, NULL, &flags, NULL, NULL);
-	if (err) {
-		ERROR("Trusted Key certificate verification error %d. Flags: "
-				"0x%x.\n", err, flags);
-		goto error;
-	}
-
-	/* Check that it has been signed with the ROT key */
-	err = pk_write_pubkey_der(&cert.pk, pk_buf, sizeof(pk_buf));
-	if (err < 0) {
-		ERROR("Error loading ROT key in DER format %d.\n", err);
-		goto error;
-	}
-
-	sz = (size_t)err;
-	p = pk_buf + sizeof(pk_buf) - sz;
-
-	if (plat_match_rotpk(p, sz)) {
-		ERROR("ROT and Trusted Key certificate key mismatch\n");
-		goto error;
-	}
-
-	/* Extract Trusted World key from extensions */
-	err = x509_get_crt_ext_data(&p, &tz_world_pk_len,
-			&cert, TZ_WORLD_PK_OID);
-	if (err) {
-		ERROR("Cannot read Trusted World key\n");
-		goto error;
-	}
-
-	assert(tz_world_pk_len <= RSA_PUB_DER_MAX_BYTES);
-	memcpy(tz_world_pk, p, tz_world_pk_len);
-
-	/* Extract Non-Trusted World key from extensions */
-	err = x509_get_crt_ext_data(&p, &ntz_world_pk_len,
-			&cert, NTZ_WORLD_PK_OID);
-	if (err) {
-		ERROR("Cannot read Non-Trusted World key\n");
-		goto error;
-	}
-
-	assert(tz_world_pk_len <= RSA_PUB_DER_MAX_BYTES);
-	memcpy(ntz_world_pk, p, ntz_world_pk_len);
-
-error:
-	x509_crt_free(&cert);
-
-	return err;
-}
-
-static int check_bl3x_key_cert(const unsigned char *buf, size_t len,
-			       const unsigned char *i_key, size_t i_key_len,
-			       unsigned char *s_key, size_t *s_key_len,
-			       const char *key_oid)
-{
-	const unsigned char *p;
-	size_t sz;
-	int err, flags;
-
-	x509_crt_init(&cert);
-
-	/* Parse key certificate */
-	err = x509_crt_parse(&cert, buf, len);
-	if (err) {
-		ERROR("Key certificate parse error %d.\n", err);
-		goto error;
-	}
-
-	/* Verify certificate */
-	err = x509_crt_verify(&cert, &cert, NULL, NULL, &flags, NULL, NULL);
-	if (err) {
-		ERROR("Key certificate verification error %d. Flags: "
-				"0x%x.\n", err, flags);
-		goto error;
-	}
-
-	/* Check that the certificate has been signed by the issuer */
-	err = pk_write_pubkey_der(&cert.pk, pk_buf, sizeof(pk_buf));
-	if (err < 0) {
-		ERROR("Error loading key in DER format %d.\n", err);
-		goto error;
-	}
-
-	sz = (size_t)err;
-	p = pk_buf + sizeof(pk_buf) - sz;
-	if ((sz != i_key_len) || memcmp(p, i_key, sz)) {
-		ERROR("Key certificate not signed with issuer key\n");
-		err = 1;
-		goto error;
-	}
-
-	/* Get the content certificate key */
-	err = x509_get_crt_ext_data(&p, &sz, &cert, key_oid);
-	if (err) {
-		ERROR("Extension %s not found in Key certificate\n", key_oid);
-		goto error;
-	}
-
-	assert(sz <= RSA_PUB_DER_MAX_BYTES);
-	memcpy(s_key, p, sz);
-	*s_key_len = sz;
-
-error:
-	x509_crt_free(&cert);
-
-	return err;
-}
-
-static int check_bl3x_cert(unsigned char *buf, size_t len,
-		       const unsigned char *i_key, size_t i_key_len,
-		       const char *hash_oid, unsigned char *sha)
-{
-	const unsigned char *p;
-	size_t sz;
-	int err, flags;
-
-	x509_crt_init(&cert);
-
-	/* Parse BL31 content certificate */
-	err = x509_crt_parse(&cert, buf, len);
-	if (err) {
-		ERROR("Content certificate parse error %d.\n", err);
-		goto error;
-	}
-
-	/* Verify certificate */
-	err = x509_crt_verify(&cert, &cert, NULL, NULL, &flags, NULL, NULL);
-	if (err) {
-		ERROR("Content certificate verification error %d. Flags: "
-				"0x%x.\n", err, flags);
-		goto error;
-	}
-
-	/* Check that content certificate has been signed with the content
-	 * certificate key corresponding to this image */
-	sz = pk_write_pubkey_der(&cert.pk, pk_buf, sizeof(pk_buf));
-	p = pk_buf + sizeof(pk_buf) - sz;
-
-	if ((sz != i_key_len) || memcmp(p, i_key, sz)) {
-		ERROR("Content certificate not signed with content "
-				"certificate key\n");
-		err = 1;
-		goto error;
-	}
-
-	/* Extract image hash from certificate */
-	err = x509_get_crt_ext_data(&p, &sz, &cert, hash_oid);
-	if (err) {
-		ERROR("Cannot read hash from certificate\n");
-		goto error;
-	}
-
-	assert(sz == SHA_BYTES + 2);
-
-	/* Skip the tag and length bytes and copy the hash */
-	p += 2;
-	memcpy(sha, p, SHA_BYTES);
-
-error:
-	x509_crt_free(&cert);
-
-	return err;
-}
-#endif /* IMAGE_BL2 */
-
-/*
- * Calculate the hash of the image and check it against the hash extracted
- * previously from the certificate
- *
- * Parameters:
- *   buf: buffer where image is loaded
- *   len: size of the image
- *   sha: matching hash (extracted from the image certificate)
- *
- * Return: 0 = match, Otherwise = mismatch
- */
-static int check_bl_img(unsigned char *buf, size_t len,
-			const unsigned char *sha)
-{
-	unsigned char img_sha[SHA_BYTES];
-
-	/* Calculate the hash of the image */
-	sha256(buf, len, img_sha, 0);
-
-	/* Match the hash with the one extracted from the certificate */
-	if (memcmp(img_sha, sha, SHA_BYTES)) {
-		ERROR("Image hash mismatch\n");
-		return 1;
-	}
-
-	return 0;
-}
-
-/*
- * Object verification function
- *
- * The id parameter will indicate the expected format of the object
- * (certificate, image, etc).
- *
- * Return: 0 = success, Otherwise = error
- */
-static int polarssl_mod_verify(unsigned int id, uintptr_t obj, size_t len)
-{
-	int ret;
-
-	switch (id) {
-#if IMAGE_BL1
-	case AUTH_BL2_IMG_CERT:
-		ret = check_bl2_cert((unsigned char *)obj, len);
-		break;
-	case AUTH_BL2_IMG:
-		ret = check_bl_img((unsigned char *)obj, len, sha_bl2);
-		break;
-#endif /* IMAGE_BL1 */
-
-#if IMAGE_BL2
-	case AUTH_TRUSTED_KEY_CERT:
-		ret = check_trusted_key_cert((unsigned char *)obj, len);
-		break;
-	case AUTH_BL30_KEY_CERT:
-		ret = check_bl3x_key_cert((unsigned char *)obj, len,
-				tz_world_pk, tz_world_pk_len,
-				content_pk, &content_pk_len,
-				BL30_CONTENT_CERT_PK_OID);
-		break;
-	case AUTH_BL31_KEY_CERT:
-		ret = check_bl3x_key_cert((unsigned char *)obj, len,
-				tz_world_pk, tz_world_pk_len,
-				content_pk, &content_pk_len,
-				BL31_CONTENT_CERT_PK_OID);
-		break;
-	case AUTH_BL32_KEY_CERT:
-		ret = check_bl3x_key_cert((unsigned char *)obj, len,
-				tz_world_pk, tz_world_pk_len,
-				content_pk, &content_pk_len,
-				BL32_CONTENT_CERT_PK_OID);
-		break;
-	case AUTH_BL33_KEY_CERT:
-		ret = check_bl3x_key_cert((unsigned char *)obj, len,
-				ntz_world_pk, ntz_world_pk_len,
-				content_pk, &content_pk_len,
-				BL33_CONTENT_CERT_PK_OID);
-		break;
-	case AUTH_BL30_IMG_CERT:
-		ret = check_bl3x_cert((unsigned char *)obj, len,
-				content_pk, content_pk_len,
-				BL30_HASH_OID, sha_bl30);
-		break;
-	case AUTH_BL31_IMG_CERT:
-		ret = check_bl3x_cert((unsigned char *)obj, len,
-				content_pk, content_pk_len,
-				BL31_HASH_OID, sha_bl31);
-		break;
-	case AUTH_BL32_IMG_CERT:
-		ret = check_bl3x_cert((unsigned char *)obj, len,
-				content_pk, content_pk_len,
-				BL32_HASH_OID, sha_bl32);
-		break;
-	case AUTH_BL33_IMG_CERT:
-		ret = check_bl3x_cert((unsigned char *)obj, len,
-				content_pk, content_pk_len,
-				BL33_HASH_OID, sha_bl33);
-		break;
-	case AUTH_BL30_IMG:
-		ret = check_bl_img((unsigned char *)obj, len, sha_bl30);
-		break;
-	case AUTH_BL31_IMG:
-		ret = check_bl_img((unsigned char *)obj, len, sha_bl31);
-		break;
-	case AUTH_BL32_IMG:
-		ret = check_bl_img((unsigned char *)obj, len, sha_bl32);
-		break;
-	case AUTH_BL33_IMG:
-		ret = check_bl_img((unsigned char *)obj, len, sha_bl33);
-		break;
-#endif /* IMAGE_BL2 */
-	default:
-		ret = -1;
-		break;
-	}
-
-	return ret;
-}
-
-/*
- * Module initialization function
- *
- * Return: 0 = success, Otherwise = error
- */
-static int polarssl_mod_init(void)
-{
-	/* Initialize the PolarSSL heap */
-	return memory_buffer_alloc_init(heap, POLARSSL_HEAP_SIZE);
-}
-
-const auth_mod_t auth_mod = {
-	.name = "PolarSSL",
-	.init = polarssl_mod_init,
-	.verify = polarssl_mod_verify
-};
diff --git a/common/auth/polarssl/polarssl.mk b/common/auth/polarssl/polarssl.mk
deleted file mode 100644
index f7d92ea..0000000
--- a/common/auth/polarssl/polarssl.mk
+++ /dev/null
@@ -1,69 +0,0 @@
-#
-# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# Neither the name of ARM nor the names of its contributors may be used
-# to endorse or promote products derived from this software without specific
-# prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-
-# POLARSSL_DIR must be set to the PolarSSL main directory (it must contain
-# the 'include' and 'library' subdirectories).
-ifeq (${POLARSSL_DIR},)
-  $(error Error: POLARSSL_DIR not set)
-endif
-
-INCLUDES		+=	-I${POLARSSL_DIR}/include		\
-				-Icommon/auth/polarssl
-
-POLARSSL_CONFIG_FILE	:=	"<polarssl_config.h>"
-$(eval $(call add_define,POLARSSL_CONFIG_FILE))
-
-POLARSSL_SOURCES	:=	$(addprefix ${POLARSSL_DIR}/library/,	\
-				asn1parse.c 				\
-				asn1write.c 				\
-				bignum.c				\
-				md.c					\
-				md_wrap.c				\
-				memory_buffer_alloc.c			\
-				oid.c 					\
-				pk.c 					\
-				pk_wrap.c 				\
-				pkparse.c 				\
-				pkwrite.c 				\
-				platform.c 				\
-				rsa.c 					\
-				sha1.c					\
-				sha256.c				\
-				x509.c 					\
-				x509_crt.c 				\
-				)
-
-BL1_SOURCES		+=	${POLARSSL_SOURCES} 			\
-				common/auth/polarssl/polarssl.c
-
-BL2_SOURCES		+=	${POLARSSL_SOURCES} 			\
-				common/auth/polarssl/polarssl.c
-
-DISABLE_PEDANTIC	:=	1
diff --git a/common/auth/polarssl/polarssl_config.h b/common/auth/polarssl/polarssl_config.h
deleted file mode 100644
index 531e084..0000000
--- a/common/auth/polarssl/polarssl_config.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __POLARSSL_CONFIG_H__
-#define __POLARSSL_CONFIG_H__
-
-
-/*
- * Configuration file to build PolarSSL with the required features for
- * Trusted Boot
- */
-
-#define POLARSSL_PLATFORM_MEMORY
-#define POLARSSL_PLATFORM_NO_STD_FUNCTIONS
-
-#define POLARSSL_PKCS1_V15
-#define POLARSSL_PKCS1_V21
-
-#define POLARSSL_X509_ALLOW_UNSUPPORTED_CRITICAL_EXTENSION
-#define POLARSSL_X509_CHECK_KEY_USAGE
-#define POLARSSL_X509_CHECK_EXTENDED_KEY_USAGE
-
-#define POLARSSL_ASN1_PARSE_C
-#define POLARSSL_ASN1_WRITE_C
-
-#define POLARSSL_BASE64_C
-#define POLARSSL_BIGNUM_C
-
-#define POLARSSL_ERROR_C
-#define POLARSSL_MD_C
-
-#define POLARSSL_MEMORY_BUFFER_ALLOC_C
-#define POLARSSL_OID_C
-
-#define POLARSSL_PK_C
-#define POLARSSL_PK_PARSE_C
-#define POLARSSL_PK_WRITE_C
-
-#define POLARSSL_PLATFORM_C
-
-#define POLARSSL_RSA_C
-#define POLARSSL_SHA1_C
-#define POLARSSL_SHA256_C
-
-#define POLARSSL_VERSION_C
-
-#define POLARSSL_X509_USE_C
-#define POLARSSL_X509_CRT_PARSE_C
-
-/* MPI / BIGNUM options */
-#define POLARSSL_MPI_WINDOW_SIZE              2
-#define POLARSSL_MPI_MAX_SIZE               256
-
-/* Memory buffer allocator options */
-#define POLARSSL_MEMORY_ALIGN_MULTIPLE        8
-
-#include "polarssl/check_config.h"
-
-#endif /* __POLARSSL_CONFIG_H__ */
diff --git a/common/bl_common.c b/common/bl_common.c
index 8c241ec..e4473ed 100644
--- a/common/bl_common.c
+++ b/common/bl_common.c
@@ -1,95 +1,87 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
 #include <arch_helpers.h>
 #include <assert.h>
+#include <auth_mod.h>
 #include <bl_common.h>
 #include <debug.h>
 #include <errno.h>
 #include <io_storage.h>
 #include <platform.h>
+#include <string.h>
+#include <utils.h>
+#include <xlat_tables_defs.h>
 
-unsigned long page_align(unsigned long value, unsigned dir)
+uintptr_t page_align(uintptr_t value, unsigned dir)
 {
-	unsigned long page_size = 1 << FOUR_KB_SHIFT;
-
 	/* Round up the limit to the next page boundary */
-	if (value & (page_size - 1)) {
-		value &= ~(page_size - 1);
+	if (value & (PAGE_SIZE - 1)) {
+		value &= ~(PAGE_SIZE - 1);
 		if (dir == UP)
-			value += page_size;
+			value += PAGE_SIZE;
 	}
 
 	return value;
 }
 
-static inline unsigned int is_page_aligned (unsigned long addr) {
-	const unsigned long page_size = 1 << FOUR_KB_SHIFT;
-
-	return (addr & (page_size - 1)) == 0;
-}
-
-void change_security_state(unsigned int target_security_state)
-{
-	unsigned long scr = read_scr();
-
-	assert(sec_state_is_valid(target_security_state));
-	if (target_security_state == SECURE)
-		scr &= ~SCR_NS_BIT;
-	else
-		scr |= SCR_NS_BIT;
-
-	write_scr(scr);
-}
-
 /******************************************************************************
  * Determine whether the memory region delimited by 'addr' and 'size' is free,
  * given the extents of free memory.
- * Return 1 if it is free, 0 otherwise.
+ * Return 1 if it is free, 0 if it is not free or if the input values are
+ * invalid.
  *****************************************************************************/
-static int is_mem_free(uint64_t free_base, size_t free_size,
-		       uint64_t addr, size_t size)
+int is_mem_free(uintptr_t free_base, size_t free_size,
+		uintptr_t addr, size_t size)
 {
-	return (addr >= free_base) && (addr + size <= free_base + free_size);
+	uintptr_t free_end, requested_end;
+
+	/*
+	 * Handle corner cases first.
+	 *
+	 * The order of the 2 tests is important, because if there's no space
+	 * left (i.e. free_size == 0) but we don't ask for any memory
+	 * (i.e. size == 0) then we should report that the memory is free.
+	 */
+	if (size == 0)
+		return 1;	/* A zero-byte region is always free */
+	if (free_size == 0)
+		return 0;
+
+	/*
+	 * Check that the end addresses don't overflow.
+	 * If they do, consider that this memory region is not free, as this
+	 * is an invalid scenario.
+	 */
+	if (check_uptr_overflow(free_base, free_size - 1))
+		return 0;
+	free_end = free_base + (free_size - 1);
+
+	if (check_uptr_overflow(addr, size - 1))
+		return 0;
+	requested_end = addr + (size - 1);
+
+	/*
+	 * Finally, check that the requested memory region lies within the free
+	 * region.
+	 */
+	return (addr >= free_base) && (requested_end <= free_end);
 }
 
+#if !LOAD_IMAGE_V2
 /******************************************************************************
  * Inside a given memory region, determine whether a sub-region of memory is
  * closer from the top or the bottom of the encompassing region. Return the
  * size of the smallest chunk of free memory surrounding the sub-region in
  * 'small_chunk_size'.
  *****************************************************************************/
-static unsigned int choose_mem_pos(uint64_t mem_start, uint64_t mem_end,
-				   uint64_t submem_start, uint64_t submem_end,
-				   size_t *small_chunk_size)
+static unsigned int choose_mem_pos(uintptr_t mem_start, uintptr_t mem_end,
+				  uintptr_t submem_start, uintptr_t submem_end,
+				  size_t *small_chunk_size)
 {
 	size_t top_chunk_size, bottom_chunk_size;
 
@@ -114,10 +106,11 @@
  * Reserve the memory region delimited by 'addr' and 'size'. The extents of free
  * memory are passed in 'free_base' and 'free_size' and they will be updated to
  * reflect the memory usage.
- * The caller must ensure the memory to reserve is free.
+ * The caller must ensure the memory to reserve is free and that the addresses
+ * and sizes passed in arguments are sane.
  *****************************************************************************/
-void reserve_mem(uint64_t *free_base, size_t *free_size,
-		 uint64_t addr, size_t size)
+void reserve_mem(uintptr_t *free_base, size_t *free_size,
+		 uintptr_t addr, size_t size)
 {
 	size_t discard_size;
 	size_t reserved_size;
@@ -127,8 +120,13 @@
 	assert(free_size != NULL);
 	assert(is_mem_free(*free_base, *free_size, addr, size));
 
-	pos = choose_mem_pos(*free_base, *free_base + *free_size,
-			     addr, addr + size,
+	if (size == 0) {
+		WARN("Nothing to allocate, requested size is zero\n");
+		return;
+	}
+
+	pos = choose_mem_pos(*free_base, *free_base + (*free_size - 1),
+			     addr, addr + (size - 1),
 			     &discard_size);
 
 	reserved_size = size + discard_size;
@@ -137,56 +135,55 @@
 	if (pos == BOTTOM)
 		*free_base = addr + size;
 
-	VERBOSE("Reserved %u bytes (discarded %u bytes %s)\n",
+	VERBOSE("Reserved 0x%zx bytes (discarded 0x%zx bytes %s)\n",
 	     reserved_size, discard_size,
 	     pos == TOP ? "above" : "below");
 }
 
-static void dump_load_info(unsigned long image_load_addr,
-			   unsigned long image_size,
+static void dump_load_info(uintptr_t image_load_addr,
+			   size_t image_size,
 			   const meminfo_t *mem_layout)
 {
-	INFO("Trying to load image at address 0x%lx, size = 0x%lx\n",
-		image_load_addr, image_size);
+	INFO("Trying to load image at address %p, size = 0x%zx\n",
+		(void *)image_load_addr, image_size);
 	INFO("Current memory layout:\n");
-	INFO("  total region = [0x%lx, 0x%lx]\n", mem_layout->total_base,
-			mem_layout->total_base + mem_layout->total_size);
-	INFO("  free region = [0x%lx, 0x%lx]\n", mem_layout->free_base,
-			mem_layout->free_base + mem_layout->free_size);
+	INFO("  total region = [base = %p, size = 0x%zx]\n",
+		(void *) mem_layout->total_base, mem_layout->total_size);
+	INFO("  free region = [base = %p, size = 0x%zx]\n",
+		(void *) mem_layout->free_base, mem_layout->free_size);
 }
+#endif /* LOAD_IMAGE_V2 */
 
 /* Generic function to return the size of an image */
-unsigned long image_size(const char *image_name)
+size_t image_size(unsigned int image_id)
 {
 	uintptr_t dev_handle;
 	uintptr_t image_handle;
 	uintptr_t image_spec;
 	size_t image_size = 0;
-	int io_result = IO_FAIL;
-
-	assert(image_name != NULL);
+	int io_result;
 
 	/* Obtain a reference to the image by querying the platform layer */
-	io_result = plat_get_image_source(image_name, &dev_handle, &image_spec);
-	if (io_result != IO_SUCCESS) {
-		WARN("Failed to obtain reference to image '%s' (%i)\n",
-			image_name, io_result);
+	io_result = plat_get_image_source(image_id, &dev_handle, &image_spec);
+	if (io_result != 0) {
+		WARN("Failed to obtain reference to image id=%u (%i)\n",
+			image_id, io_result);
 		return 0;
 	}
 
 	/* Attempt to access the image */
 	io_result = io_open(dev_handle, image_spec, &image_handle);
-	if (io_result != IO_SUCCESS) {
-		WARN("Failed to access image '%s' (%i)\n",
-			image_name, io_result);
+	if (io_result != 0) {
+		WARN("Failed to access image id=%u (%i)\n",
+			image_id, io_result);
 		return 0;
 	}
 
 	/* Find the size of the image */
 	io_result = io_size(image_handle, &image_size);
-	if ((io_result != IO_SUCCESS) || (image_size == 0)) {
-		WARN("Failed to determine the size of the image '%s' file (%i)\n",
-			image_name, io_result);
+	if ((io_result != 0) || (image_size == 0)) {
+		WARN("Failed to determine the size of the image id=%u (%i)\n",
+			image_id, io_result);
 	}
 	io_result = io_close(image_handle);
 	/* Ignore improbable/unrecoverable error in 'close' */
@@ -200,18 +197,194 @@
 	return image_size;
 }
 
+#if LOAD_IMAGE_V2
+
 /*******************************************************************************
- * Generic function to load an image at a specific address given a name and
- * extents of free memory. It updates the memory layout if the load is
- * successful, as well as the image information and the entry point information.
- * The caller might pass a NULL pointer for the entry point if it is not
- * interested in this information, e.g. because the image just needs to be
- * loaded in memory but won't ever be executed.
+ * Generic function to load an image at a specific address given
+ * an image ID and extents of free memory.
+ *
+ * If the load is successful then the image information is updated.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ ******************************************************************************/
+int load_image(unsigned int image_id, image_info_t *image_data)
+{
+	uintptr_t dev_handle;
+	uintptr_t image_handle;
+	uintptr_t image_spec;
+	uintptr_t image_base;
+	size_t image_size;
+	size_t bytes_read;
+	int io_result;
+
+	assert(image_data != NULL);
+	assert(image_data->h.version >= VERSION_2);
+
+	image_base = image_data->image_base;
+
+	/* Obtain a reference to the image by querying the platform layer */
+	io_result = plat_get_image_source(image_id, &dev_handle, &image_spec);
+	if (io_result != 0) {
+		WARN("Failed to obtain reference to image id=%u (%i)\n",
+			image_id, io_result);
+		return io_result;
+	}
+
+	/* Attempt to access the image */
+	io_result = io_open(dev_handle, image_spec, &image_handle);
+	if (io_result != 0) {
+		WARN("Failed to access image id=%u (%i)\n",
+			image_id, io_result);
+		return io_result;
+	}
+
+	INFO("Loading image id=%u at address %p\n", image_id,
+		(void *) image_base);
+
+	/* Find the size of the image */
+	io_result = io_size(image_handle, &image_size);
+	if ((io_result != 0) || (image_size == 0)) {
+		WARN("Failed to determine the size of the image id=%u (%i)\n",
+			image_id, io_result);
+		goto exit;
+	}
+
+	/* Check that the image size to load is within limit */
+	if (image_size > image_data->image_max_size) {
+		WARN("Image id=%u size out of bounds\n", image_id);
+		io_result = -EFBIG;
+		goto exit;
+	}
+
+	image_data->image_size = image_size;
+
+	/* We have enough space so load the image now */
+	/* TODO: Consider whether to try to recover/retry a partially successful read */
+	io_result = io_read(image_handle, image_base, image_size, &bytes_read);
+	if ((io_result != 0) || (bytes_read < image_size)) {
+		WARN("Failed to load image id=%u (%i)\n", image_id, io_result);
+		goto exit;
+	}
+
+#if !TRUSTED_BOARD_BOOT
+	/*
+	 * File has been successfully loaded.
+	 * Flush the image to main memory so that it can be executed later by
+	 * any CPU, regardless of cache and MMU state.
+	 * When TBB is enabled the image is flushed later, after image
+	 * authentication.
+	 */
+	flush_dcache_range(image_base, image_size);
+#endif /* TRUSTED_BOARD_BOOT */
+
+	INFO("Image id=%u loaded: %p - %p\n", image_id, (void *) image_base,
+	     (void *) (image_base + image_size));
+
+exit:
+	io_close(image_handle);
+	/* Ignore improbable/unrecoverable error in 'close' */
+
+	/* TODO: Consider maintaining open device connection from this bootloader stage */
+	io_dev_close(dev_handle);
+	/* Ignore improbable/unrecoverable error in 'dev_close' */
+
+	return io_result;
+}
+
+static int load_auth_image_internal(unsigned int image_id,
+				    image_info_t *image_data,
+				    int is_parent_image)
+{
+	int rc;
+
+#if TRUSTED_BOARD_BOOT
+	unsigned int parent_id;
+
+	/* Use recursion to authenticate parent images */
+	rc = auth_mod_get_parent_id(image_id, &parent_id);
+	if (rc == 0) {
+		rc = load_auth_image_internal(parent_id, image_data, 1);
+		if (rc != 0) {
+			return rc;
+		}
+	}
+#endif /* TRUSTED_BOARD_BOOT */
+
+	/* Load the image */
+	rc = load_image(image_id, image_data);
+	if (rc != 0) {
+		return rc;
+	}
+
+#if TRUSTED_BOARD_BOOT
+	/* Authenticate it */
+	rc = auth_mod_verify_img(image_id,
+				 (void *)image_data->image_base,
+				 image_data->image_size);
+	if (rc != 0) {
+		/* Authentication error, zero memory and flush it right away. */
+		zero_normalmem((void *)image_data->image_base,
+		       image_data->image_size);
+		flush_dcache_range(image_data->image_base,
+				   image_data->image_size);
+		return -EAUTH;
+	}
+
+	/*
+	 * File has been successfully loaded and authenticated.
+	 * Flush the image to main memory so that it can be executed later by
+	 * any CPU, regardless of cache and MMU state.
+	 * Do it only for child images, not for the parents (certificates).
+	 */
+	if (!is_parent_image) {
+		flush_dcache_range(image_data->image_base,
+				   image_data->image_size);
+	}
+#endif /* TRUSTED_BOARD_BOOT */
+
+	return 0;
+}
+
+/*******************************************************************************
+ * Generic function to load and authenticate an image. The image is actually
+ * loaded by calling the 'load_image()' function. Therefore, it returns the
+ * same error codes if the loading operation failed, or -EAUTH if the
+ * authentication failed. In addition, this function uses recursion to
+ * authenticate the parent images up to the root of trust.
+ ******************************************************************************/
+int load_auth_image(unsigned int image_id, image_info_t *image_data)
+{
+	int err;
+
+	do {
+		err = load_auth_image_internal(image_id, image_data, 0);
+	} while (err != 0 && plat_try_next_boot_source());
+
+	return err;
+}
+
+#else /* LOAD_IMAGE_V2 */
+
+/*******************************************************************************
+ * Generic function to load an image at a specific address given an image ID and
+ * extents of free memory.
+ *
+ * If the load is successful then the image information is updated.
+ *
+ * If the entry_point_info argument is not NULL then this function also updates:
+ * - the memory layout to mark the memory as reserved;
+ * - the entry point information.
+ *
+ * The caller might pass a NULL pointer for the entry point if they are not
+ * interested in this information. This is typically the case for non-executable
+ * images (e.g. certificates) and executable images that won't ever be executed
+ * on the application processor (e.g. additional microcontroller firmware).
+ *
  * Returns 0 on success, a negative error code otherwise.
  ******************************************************************************/
 int load_image(meminfo_t *mem_layout,
-	       const char *image_name,
-	       uint64_t image_base,
+	       unsigned int image_id,
+	       uintptr_t image_base,
 	       image_info_t *image_data,
 	       entry_point_info_t *entry_point_info)
 {
@@ -220,44 +393,44 @@
 	uintptr_t image_spec;
 	size_t image_size;
 	size_t bytes_read;
-	int io_result = IO_FAIL;
+	int io_result;
 
 	assert(mem_layout != NULL);
-	assert(image_name != NULL);
 	assert(image_data != NULL);
-	assert(image_data->h.version >= VERSION_1);
+	assert(image_data->h.version == VERSION_1);
 
 	/* Obtain a reference to the image by querying the platform layer */
-	io_result = plat_get_image_source(image_name, &dev_handle, &image_spec);
-	if (io_result != IO_SUCCESS) {
-		WARN("Failed to obtain reference to image '%s' (%i)\n",
-			image_name, io_result);
+	io_result = plat_get_image_source(image_id, &dev_handle, &image_spec);
+	if (io_result != 0) {
+		WARN("Failed to obtain reference to image id=%u (%i)\n",
+			image_id, io_result);
 		return io_result;
 	}
 
 	/* Attempt to access the image */
 	io_result = io_open(dev_handle, image_spec, &image_handle);
-	if (io_result != IO_SUCCESS) {
-		WARN("Failed to access image '%s' (%i)\n",
-			image_name, io_result);
+	if (io_result != 0) {
+		WARN("Failed to access image id=%u (%i)\n",
+			image_id, io_result);
 		return io_result;
 	}
 
-	INFO("Loading file '%s' at address 0x%lx\n", image_name, image_base);
+	INFO("Loading image id=%u at address %p\n", image_id,
+		(void *) image_base);
 
 	/* Find the size of the image */
 	io_result = io_size(image_handle, &image_size);
-	if ((io_result != IO_SUCCESS) || (image_size == 0)) {
-		WARN("Failed to determine the size of the image '%s' file (%i)\n",
-			image_name, io_result);
+	if ((io_result != 0) || (image_size == 0)) {
+		WARN("Failed to determine the size of the image id=%u (%i)\n",
+			image_id, io_result);
 		goto exit;
 	}
 
 	/* Check that the memory where the image will be loaded is free */
 	if (!is_mem_free(mem_layout->free_base, mem_layout->free_size,
 			 image_base, image_size)) {
-		WARN("Failed to reserve memory: 0x%lx - 0x%lx\n",
-			image_base, image_base + image_size);
+		WARN("Failed to reserve region [base = %p, size = 0x%zx]\n",
+		     (void *) image_base, image_size);
 		dump_load_info(image_base, image_size, mem_layout);
 		io_result = -ENOMEM;
 		goto exit;
@@ -266,11 +439,14 @@
 	/* We have enough space so load the image now */
 	/* TODO: Consider whether to try to recover/retry a partially successful read */
 	io_result = io_read(image_handle, image_base, image_size, &bytes_read);
-	if ((io_result != IO_SUCCESS) || (bytes_read < image_size)) {
-		WARN("Failed to load '%s' file (%i)\n", image_name, io_result);
+	if ((io_result != 0) || (bytes_read < image_size)) {
+		WARN("Failed to load image id=%u (%i)\n", image_id, io_result);
 		goto exit;
 	}
 
+	image_data->image_base = image_base;
+	image_data->image_size = image_size;
+
 	/*
 	 * Update the memory usage info.
 	 * This is done after the actual loading so that it is not updated when
@@ -281,25 +457,25 @@
 	if (entry_point_info != NULL) {
 		reserve_mem(&mem_layout->free_base, &mem_layout->free_size,
 				image_base, image_size);
+		entry_point_info->pc = image_base;
 	} else {
-		INFO("Skip reserving memory: 0x%lx - 0x%lx\n",
-				image_base, image_base + image_size);
+		INFO("Skip reserving region [base = %p, size = 0x%zx]\n",
+		     (void *) image_base, image_size);
 	}
 
-	image_data->image_base = image_base;
-	image_data->image_size = image_size;
-
-	if (entry_point_info != NULL)
-		entry_point_info->pc = image_base;
-
+#if !TRUSTED_BOARD_BOOT
 	/*
 	 * File has been successfully loaded.
-	 * Flush the image in TZRAM so that the next EL can see it.
+	 * Flush the image to main memory so that it can be executed later by
+	 * any CPU, regardless of cache and MMU state.
+	 * When TBB is enabled the image is flushed later, after image
+	 * authentication.
 	 */
 	flush_dcache_range(image_base, image_size);
+#endif /* TRUSTED_BOARD_BOOT */
 
-	INFO("File '%s' loaded: 0x%lx - 0x%lx\n", image_name, image_base,
-	     image_base + image_size);
+	INFO("Image id=%u loaded at address %p, size = 0x%zx\n", image_id,
+		(void *) image_base, image_size);
 
 exit:
 	io_close(image_handle);
@@ -311,3 +487,111 @@
 
 	return io_result;
 }
+
+static int load_auth_image_internal(meminfo_t *mem_layout,
+				    unsigned int image_id,
+				    uintptr_t image_base,
+				    image_info_t *image_data,
+				    entry_point_info_t *entry_point_info,
+				    int is_parent_image)
+{
+	int rc;
+
+#if TRUSTED_BOARD_BOOT
+	unsigned int parent_id;
+
+	/* Use recursion to authenticate parent images */
+	rc = auth_mod_get_parent_id(image_id, &parent_id);
+	if (rc == 0) {
+		rc = load_auth_image_internal(mem_layout, parent_id, image_base,
+				     image_data, NULL, 1);
+		if (rc != 0) {
+			return rc;
+		}
+	}
+#endif /* TRUSTED_BOARD_BOOT */
+
+	/* Load the image */
+	rc = load_image(mem_layout, image_id, image_base, image_data,
+			entry_point_info);
+	if (rc != 0) {
+		return rc;
+	}
+
+#if TRUSTED_BOARD_BOOT
+	/* Authenticate it */
+	rc = auth_mod_verify_img(image_id,
+				 (void *)image_data->image_base,
+				 image_data->image_size);
+	if (rc != 0) {
+		/* Authentication error, zero memory and flush it right away. */
+		zero_normalmem((void *)image_data->image_base,
+		       image_data->image_size);
+		flush_dcache_range(image_data->image_base,
+				   image_data->image_size);
+		return -EAUTH;
+	}
+	/*
+	 * File has been successfully loaded and authenticated.
+	 * Flush the image to main memory so that it can be executed later by
+	 * any CPU, regardless of cache and MMU state.
+	 * Do it only for child images, not for the parents (certificates).
+	 */
+	if (!is_parent_image) {
+		flush_dcache_range(image_data->image_base,
+				   image_data->image_size);
+	}
+#endif /* TRUSTED_BOARD_BOOT */
+
+	return 0;
+}
+
+/*******************************************************************************
+ * Generic function to load and authenticate an image. The image is actually
+ * loaded by calling the 'load_image()' function. Therefore, it returns the
+ * same error codes if the loading operation failed, or -EAUTH if the
+ * authentication failed. In addition, this function uses recursion to
+ * authenticate the parent images up to the root of trust.
+ ******************************************************************************/
+int load_auth_image(meminfo_t *mem_layout,
+		    unsigned int image_id,
+		    uintptr_t image_base,
+		    image_info_t *image_data,
+		    entry_point_info_t *entry_point_info)
+{
+	int err;
+
+	do {
+		err = load_auth_image_internal(mem_layout, image_id, image_base,
+					       image_data, entry_point_info, 0);
+	} while (err != 0 && plat_try_next_boot_source());
+
+	return err;
+}
+
+#endif /* LOAD_IMAGE_V2 */
+
+/*******************************************************************************
+ * Print the content of an entry_point_info_t structure.
+ ******************************************************************************/
+void print_entry_point_info(const entry_point_info_t *ep_info)
+{
+	INFO("Entry point address = %p\n", (void *)ep_info->pc);
+	INFO("SPSR = 0x%x\n", ep_info->spsr);
+
+#define PRINT_IMAGE_ARG(n)					\
+	VERBOSE("Argument #" #n " = 0x%llx\n",			\
+		(unsigned long long) ep_info->args.arg##n)
+
+	PRINT_IMAGE_ARG(0);
+	PRINT_IMAGE_ARG(1);
+	PRINT_IMAGE_ARG(2);
+	PRINT_IMAGE_ARG(3);
+#ifndef AARCH32
+	PRINT_IMAGE_ARG(4);
+	PRINT_IMAGE_ARG(5);
+	PRINT_IMAGE_ARG(6);
+	PRINT_IMAGE_ARG(7);
+#endif
+#undef PRINT_IMAGE_ARG
+}
diff --git a/common/desc_image_load.c b/common/desc_image_load.c
new file mode 100644
index 0000000..e68e69c
--- /dev/null
+++ b/common/desc_image_load.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <desc_image_load.h>
+
+
+extern bl_mem_params_node_t *bl_mem_params_desc_ptr;
+extern unsigned int bl_mem_params_desc_num;
+
+static bl_load_info_t bl_load_info;
+static bl_params_t next_bl_params;
+
+
+/*******************************************************************************
+ * This function flushes the data structures so that they are visible
+ * in memory for the next BL image.
+ ******************************************************************************/
+void flush_bl_params_desc(void)
+{
+	flush_dcache_range((uintptr_t)bl_mem_params_desc_ptr,
+			sizeof(*bl_mem_params_desc_ptr) * bl_mem_params_desc_num);
+
+	flush_dcache_range((uintptr_t)&next_bl_params,
+			sizeof(next_bl_params));
+}
+
+/*******************************************************************************
+ * This function returns the index for given image_id, within the
+ * image descriptor array provided by bl_image_info_descs_ptr, if the
+ * image is found else it returns -1.
+ ******************************************************************************/
+int get_bl_params_node_index(unsigned int image_id)
+{
+	int index;
+	assert(image_id != INVALID_IMAGE_ID);
+
+	for (index = 0; index < bl_mem_params_desc_num; index++) {
+		if (bl_mem_params_desc_ptr[index].image_id == image_id)
+			return index;
+	}
+
+	return -1;
+}
+
+/*******************************************************************************
+ * This function returns the pointer to `bl_mem_params_node_t` object for
+ * given image_id, within the image descriptor array provided by
+ * bl_mem_params_desc_ptr, if the image is found else it returns NULL.
+ ******************************************************************************/
+bl_mem_params_node_t *get_bl_mem_params_node(unsigned int image_id)
+{
+	int index;
+	assert(image_id != INVALID_IMAGE_ID);
+
+	index = get_bl_params_node_index(image_id);
+	if (index >= 0)
+		return &bl_mem_params_desc_ptr[index];
+	else
+		return NULL;
+}
+
+/*******************************************************************************
+ * This function creates the list of loadable images, by populating and
+ * linking each `bl_load_info_node_t` type node, using the internal array
+ * of image descriptor provided by bl_mem_params_desc_ptr. It also populates
+ * and returns `bl_load_info_t` type structure that contains head of the list
+ * of loadable images.
+ ******************************************************************************/
+bl_load_info_t *get_bl_load_info_from_mem_params_desc(void)
+{
+	int index = 0;
+
+	/* If there is no image to start with, return NULL */
+	if (!bl_mem_params_desc_num)
+		return NULL;
+
+	/* Assign initial data structures */
+	bl_load_info_node_t *bl_node_info =
+		&bl_mem_params_desc_ptr[index].load_node_mem;
+	bl_load_info.head = bl_node_info;
+	SET_PARAM_HEAD(&bl_load_info, PARAM_BL_LOAD_INFO, VERSION_2, 0);
+
+	/* Go through the image descriptor array and create the list */
+	for (; index < bl_mem_params_desc_num; index++) {
+
+		/* Populate the image information */
+		bl_node_info->image_id = bl_mem_params_desc_ptr[index].image_id;
+		bl_node_info->image_info = &bl_mem_params_desc_ptr[index].image_info;
+
+		/* Link next image if present */
+		if ((index + 1) < bl_mem_params_desc_num) {
+			/* Get the memory and link the next node */
+			bl_node_info->next_load_info =
+				&bl_mem_params_desc_ptr[index + 1].load_node_mem;
+			bl_node_info = bl_node_info->next_load_info;
+		}
+	}
+
+	return &bl_load_info;
+}
+
+/*******************************************************************************
+ * This function creates the list of executable images, by populating and
+ * linking each `bl_params_node_t` type node, using the internal array of
+ * image descriptor provided by bl_mem_params_desc_ptr. It also populates
+ * and returns `bl_params_t` type structure that contains head of the list
+ * of executable images.
+ ******************************************************************************/
+bl_params_t *get_next_bl_params_from_mem_params_desc(void)
+{
+	int count;
+	unsigned int img_id = 0;
+	int link_index = 0;
+	bl_params_node_t *bl_current_exec_node = NULL;
+	bl_params_node_t *bl_last_exec_node = NULL;
+	bl_mem_params_node_t *desc_ptr;
+
+	/* If there is no image to start with, return NULL */
+	if (!bl_mem_params_desc_num)
+		return NULL;
+
+	/* Get the list HEAD */
+	for (count = 0; count < bl_mem_params_desc_num; count++) {
+
+		desc_ptr = &bl_mem_params_desc_ptr[count];
+
+		if ((EP_GET_EXE(desc_ptr->ep_info.h.attr) == EXECUTABLE) &&
+			(EP_GET_FIRST_EXE(desc_ptr->ep_info.h.attr) == EP_FIRST_EXE)) {
+			next_bl_params.head = &desc_ptr->params_node_mem;
+			link_index = count;
+			break;
+		}
+	}
+
+	/* Make sure we have a HEAD node */
+	assert(next_bl_params.head != NULL);
+
+	/* Populate the HEAD information */
+	SET_PARAM_HEAD(&next_bl_params, PARAM_BL_PARAMS, VERSION_2, 0);
+
+	/*
+	 * Go through the image descriptor array and create the list.
+	 * This bounded loop is to make sure that we are not looping forever.
+	 */
+	for (count = 0 ; count < bl_mem_params_desc_num; count++) {
+
+		desc_ptr = &bl_mem_params_desc_ptr[link_index];
+
+		/* Make sure the image is executable */
+		assert(EP_GET_EXE(desc_ptr->ep_info.h.attr) == EXECUTABLE);
+
+		/* Get the memory for current node */
+		bl_current_exec_node = &desc_ptr->params_node_mem;
+
+		/* Populate the image information */
+		bl_current_exec_node->image_id = desc_ptr->image_id;
+		bl_current_exec_node->image_info = &desc_ptr->image_info;
+		bl_current_exec_node->ep_info = &desc_ptr->ep_info;
+
+		if (bl_last_exec_node) {
+			/* Assert if loop detected */
+			assert(bl_last_exec_node->next_params_info == NULL);
+
+			/* Link the previous node to the current one */
+			bl_last_exec_node->next_params_info = bl_current_exec_node;
+		}
+
+		/* Update the last node */
+		bl_last_exec_node = bl_current_exec_node;
+
+		/* If no next hand-off image then break out */
+		img_id = desc_ptr->next_handoff_image_id;
+		if (img_id == INVALID_IMAGE_ID)
+			break;
+
+		/* Get the index for the next hand-off image */
+		link_index = get_bl_params_node_index(img_id);
+		assert((link_index > 0) &&
+			(link_index < bl_mem_params_desc_num));
+	}
+
+	/* Invalid image is expected to terminate the loop */
+	assert(img_id == INVALID_IMAGE_ID);
+
+	return &next_bl_params;
+}
diff --git a/common/runtime_svc.c b/common/runtime_svc.c
new file mode 100644
index 0000000..0ea4cd0
--- /dev/null
+++ b/common/runtime_svc.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <runtime_svc.h>
+#include <string.h>
+
+/*******************************************************************************
+ * The 'rt_svc_descs' array holds the runtime service descriptors exported by
+ * services by placing them in the 'rt_svc_descs' linker section.
+ * The 'rt_svc_descs_indices' array holds the index of a descriptor in the
+ * 'rt_svc_descs' array. When an SMC arrives, the OEN[29:24] bits and the call
+ * type[31] bit in the function id are combined to get an index into the
+ * 'rt_svc_descs_indices' array. This gives the index of the descriptor in the
+ * 'rt_svc_descs' array which contains the SMC handler.
+ ******************************************************************************/
+#define RT_SVC_DESCS_START	((uintptr_t) (&__RT_SVC_DESCS_START__))
+#define RT_SVC_DESCS_END	((uintptr_t) (&__RT_SVC_DESCS_END__))
+uint8_t rt_svc_descs_indices[MAX_RT_SVCS];
+static rt_svc_desc_t *rt_svc_descs;
+
+#define RT_SVC_DECS_NUM		((RT_SVC_DESCS_END - RT_SVC_DESCS_START)\
+					/ sizeof(rt_svc_desc_t))
+
+/*******************************************************************************
+ * Function to invoke the registered `handle` corresponding to the smc_fid.
+ ******************************************************************************/
+uintptr_t handle_runtime_svc(uint32_t smc_fid,
+			     void *cookie,
+			     void *handle,
+			     unsigned int flags)
+{
+	u_register_t x1, x2, x3, x4;
+	int index;
+	unsigned int idx;
+	const rt_svc_desc_t *rt_svc_descs;
+
+	assert(handle);
+	idx = get_unique_oen_from_smc_fid(smc_fid);
+	assert(idx < MAX_RT_SVCS);
+
+	index = rt_svc_descs_indices[idx];
+	if (index < 0 || index >= (int)RT_SVC_DECS_NUM)
+		SMC_RET1(handle, SMC_UNK);
+
+	rt_svc_descs = (rt_svc_desc_t *) RT_SVC_DESCS_START;
+
+	get_smc_params_from_ctx(handle, x1, x2, x3, x4);
+
+	return rt_svc_descs[index].handle(smc_fid, x1, x2, x3, x4, cookie,
+						handle, flags);
+}
+
+/*******************************************************************************
+ * Simple routine to sanity check a runtime service descriptor before using it
+ ******************************************************************************/
+static int32_t validate_rt_svc_desc(const rt_svc_desc_t *desc)
+{
+	if (desc == NULL)
+		return -EINVAL;
+
+	if (desc->start_oen > desc->end_oen)
+		return -EINVAL;
+
+	if (desc->end_oen >= OEN_LIMIT)
+		return -EINVAL;
+
+	if (desc->call_type != SMC_TYPE_FAST &&
+			desc->call_type != SMC_TYPE_YIELD)
+		return -EINVAL;
+
+	/* A runtime service having no init or handle function doesn't make sense */
+	if (desc->init == NULL && desc->handle == NULL)
+		return -EINVAL;
+
+	return 0;
+}
+
+/*******************************************************************************
+ * This function calls the initialisation routine in the descriptor exported by
+ * a runtime service. Once a descriptor has been validated, its start & end
+ * owning entity numbers and the call type are combined to form a unique oen.
+ * The unique oen is used as an index into the 'rt_svc_descs_indices' array.
+ * The index of the runtime service descriptor is stored at this index.
+ ******************************************************************************/
+void runtime_svc_init(void)
+{
+	int rc = 0;
+	unsigned int index, start_idx, end_idx;
+
+	/* Assert the number of descriptors detected are less than maximum indices */
+	assert((RT_SVC_DESCS_END >= RT_SVC_DESCS_START) &&
+			(RT_SVC_DECS_NUM < MAX_RT_SVCS));
+
+	/* If no runtime services are implemented then simply bail out */
+	if (RT_SVC_DECS_NUM == 0)
+		return;
+
+	/* Initialise internal variables to invalid state */
+	memset(rt_svc_descs_indices, -1, sizeof(rt_svc_descs_indices));
+
+	rt_svc_descs = (rt_svc_desc_t *) RT_SVC_DESCS_START;
+	for (index = 0; index < RT_SVC_DECS_NUM; index++) {
+		rt_svc_desc_t *service = &rt_svc_descs[index];
+
+		/*
+		 * An invalid descriptor is an error condition since it is
+		 * difficult to predict the system behaviour in the absence
+		 * of this service.
+		 */
+		rc = validate_rt_svc_desc(service);
+		if (rc) {
+			ERROR("Invalid runtime service descriptor %p\n",
+				(void *) service);
+			panic();
+		}
+
+		/*
+		 * The runtime service may have separate rt_svc_desc_t
+		 * for its fast smc and yielding smc. Since the service itself
+		 * need to be initialized only once, only one of them will have
+		 * an initialisation routine defined. Call the initialisation
+		 * routine for this runtime service, if it is defined.
+		 */
+		if (service->init) {
+			rc = service->init();
+			if (rc) {
+				ERROR("Error initializing runtime service %s\n",
+						service->name);
+				continue;
+			}
+		}
+
+		/*
+		 * Fill the indices corresponding to the start and end
+		 * owning entity numbers with the index of the
+		 * descriptor which will handle the SMCs for this owning
+		 * entity range.
+		 */
+		start_idx = get_unique_oen(rt_svc_descs[index].start_oen,
+				service->call_type);
+		assert(start_idx < MAX_RT_SVCS);
+		end_idx = get_unique_oen(rt_svc_descs[index].end_oen,
+				service->call_type);
+		assert(end_idx < MAX_RT_SVCS);
+		for (; start_idx <= end_idx; start_idx++)
+			rt_svc_descs_indices[start_idx] = index;
+	}
+}
diff --git a/common/tf_log.c b/common/tf_log.c
new file mode 100644
index 0000000..54c0a43
--- /dev/null
+++ b/common/tf_log.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <platform.h>
+
+/* Set the default maximum log level to the `LOG_LEVEL` build flag */
+static unsigned int max_log_level = LOG_LEVEL;
+
+/*
+ * The common log function which is invoked by ARM Trusted Firmware code.
+ * This function should not be directly invoked and is meant to be
+ * only used by the log macros defined in debug.h. The function
+ * expects the first character in the format string to be one of the
+ * LOG_MARKER_* macros defined in debug.h.
+ */
+void tf_log(const char *fmt, ...)
+{
+	unsigned int log_level;
+	va_list args;
+	const char *prefix_str;
+
+	/* We expect the LOG_MARKER_* macro as the first character */
+	log_level = fmt[0];
+
+	/* Verify that log_level is one of LOG_MARKER_* macro defined in debug.h */
+	assert(log_level && log_level <= LOG_LEVEL_VERBOSE);
+	assert(log_level % 10 == 0);
+
+	if (log_level > max_log_level)
+		return;
+
+	prefix_str = plat_log_get_prefix(log_level);
+
+	if (prefix_str != NULL)
+		tf_string_print(prefix_str);
+
+	va_start(args, fmt);
+	tf_vprintf(fmt+1, args);
+	va_end(args);
+}
+
+/*
+ * The helper function to set the log level dynamically by platform. The
+ * maximum log level is determined by `LOG_LEVEL` build flag at compile time
+ * and this helper can set a lower log level than the one at compile.
+ */
+void tf_log_set_max_level(unsigned int log_level)
+{
+	assert(log_level <= LOG_LEVEL_VERBOSE);
+	assert((log_level % 10) == 0);
+
+	/* Cap log_level to the compile time maximum. */
+	if (log_level < LOG_LEVEL)
+		max_log_level = log_level;
+
+}
diff --git a/common/tf_printf.c b/common/tf_printf.c
index 02461c0..f73842a 100644
--- a/common/tf_printf.c
+++ b/common/tf_printf.c
@@ -1,44 +1,41 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
 #include <debug.h>
+#include <limits.h>
 #include <stdarg.h>
 #include <stdint.h>
 
 /***********************************************************
  * The tf_printf implementation for all BL stages
  ***********************************************************/
-static void unsigned_num_print(unsigned long int unum, unsigned int radix)
+
+#define get_num_va_args(args, lcount) \
+	(((lcount) > 1) ? va_arg(args, long long int) :	\
+	((lcount) ? va_arg(args, long int) : va_arg(args, int)))
+
+#define get_unum_va_args(args, lcount) \
+	(((lcount) > 1) ? va_arg(args, unsigned long long int) :	\
+	((lcount) ? va_arg(args, unsigned long int) : va_arg(args, unsigned int)))
+
+void tf_string_print(const char *str)
+{
+	assert(str);
+
+	while (*str)
+		putchar(*str++);
+}
+
+static void unsigned_num_print(unsigned long long int unum, unsigned int radix)
 {
 	/* Just need enough space to store 64 bit decimal integer */
 	unsigned char num_buf[20];
-	int i = 0 , rem;
+	int i = 0, rem;
 
 	do {
 		rem = unum % radix;
@@ -52,36 +49,32 @@
 		putchar(num_buf[i]);
 }
 
-static void string_print(const char *str)
-{
-	while (*str)
-		putchar(*str++);
-}
-
 /*******************************************************************
  * Reduced format print for Trusted firmware.
- * The following formats are supported by this print
- * %x - 32 bit hexadecimal format
- * %llx and %lx -64 bit hexadecimal format
+ * The following type specifiers are supported by this print
+ * %x - hexadecimal format
  * %s - string format
- * %d or %i - signed 32 bit decimal format
- * %u - unsigned 32 bit decimal format
- * %ld and %lld - signed 64 bit decimal format
- * %lu and %llu - unsigned 64 bit decimal format
- * Exits on all other formats.
+ * %d or %i - signed decimal format
+ * %u - unsigned decimal format
+ * %p - pointer format
+ *
+ * The following length specifiers are supported by this print
+ * %l - long int (64-bit on AArch64)
+ * %ll - long long int (64-bit on AArch64)
+ * %z - size_t sized integer formats (64 bit on AArch64)
+ *
+ * The print exits on all other formats specifiers other than valid
+ * combinations of the above specifiers.
  *******************************************************************/
-
-void tf_printf(const char *fmt, ...)
+void tf_vprintf(const char *fmt, va_list args)
 {
-	va_list args;
-	int bit64;
-	int64_t num;
-	uint64_t unum;
+	int l_count;
+	long long int num;
+	unsigned long long int unum;
 	char *str;
 
-	va_start(args, fmt);
 	while (*fmt) {
-		bit64 = 0;
+		l_count = 0;
 
 		if (*fmt == '%') {
 			fmt++;
@@ -90,52 +83,60 @@
 			switch (*fmt) {
 			case 'i': /* Fall through to next one */
 			case 'd':
-				if (bit64)
-					num = va_arg(args, int64_t);
-				else
-					num = va_arg(args, int32_t);
-
+				num = get_num_va_args(args, l_count);
 				if (num < 0) {
 					putchar('-');
-					unum = (unsigned long int)-num;
+					unum = (unsigned long long int)-num;
 				} else
-					unum = (unsigned long int)num;
+					unum = (unsigned long long int)num;
 
 				unsigned_num_print(unum, 10);
 				break;
 			case 's':
 				str = va_arg(args, char *);
-				string_print(str);
+				tf_string_print(str);
 				break;
-			case 'x':
-				if (bit64)
-					unum = va_arg(args, uint64_t);
-				else
-					unum = va_arg(args, uint32_t);
+			case 'p':
+				unum = (uintptr_t)va_arg(args, void *);
+				if (unum)
+					tf_string_print("0x");
 
 				unsigned_num_print(unum, 16);
 				break;
+			case 'x':
+				unum = get_unum_va_args(args, l_count);
+				unsigned_num_print(unum, 16);
+				break;
+			case 'z':
+				if (sizeof(size_t) == 8)
+					l_count = 2;
+
+				fmt++;
+				goto loop;
 			case 'l':
-				bit64 = 1;
+				l_count++;
 				fmt++;
 				goto loop;
 			case 'u':
-				if (bit64)
-					unum = va_arg(args, uint64_t);
-				else
-					unum = va_arg(args, uint32_t);
-
+				unum = get_unum_va_args(args, l_count);
 				unsigned_num_print(unum, 10);
 				break;
 			default:
 				/* Exit on any other format specifier */
-				goto exit;
+				return;
 			}
 			fmt++;
 			continue;
 		}
 		putchar(*fmt++);
 	}
-exit:
-	va_end(args);
+}
+
+void tf_printf(const char *fmt, ...)
+{
+	va_list va;
+
+	va_start(va, fmt);
+	tf_vprintf(fmt, va);
+	va_end(va);
 }
diff --git a/common/tf_snprintf.c b/common/tf_snprintf.c
new file mode 100644
index 0000000..a99ab7a
--- /dev/null
+++ b/common/tf_snprintf.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <platform.h>
+#include <stdarg.h>
+
+static void unsigned_dec_print(char **s, size_t n, size_t *chars_printed,
+			       unsigned int unum)
+{
+	/* Enough for a 32-bit unsigned decimal integer (4294967295). */
+	unsigned char num_buf[10];
+	int i = 0, rem;
+
+	do {
+		rem = unum % 10;
+		num_buf[i++] = '0' + rem;
+	} while (unum /= 10);
+
+	while (--i >= 0) {
+		if (*chars_printed < n)
+			*(*s)++ = num_buf[i];
+		(*chars_printed)++;
+	}
+}
+
+/*******************************************************************
+ * Reduced snprintf to be used for Trusted firmware.
+ * The following type specifiers are supported:
+ *
+ * %d or %i - signed decimal format
+ * %u - unsigned decimal format
+ *
+ * The function panics on all other formats specifiers.
+ *
+ * It returns the number of characters that would be written if the
+ * buffer was big enough. If it returns a value lower than n, the
+ * whole string has been written.
+ *******************************************************************/
+int tf_snprintf(char *s, size_t n, const char *fmt, ...)
+{
+	va_list args;
+	int num;
+	unsigned int unum;
+	size_t chars_printed = 0;
+
+	if (n == 1) {
+		/* Buffer is too small to actually write anything else. */
+		*s = '\0';
+		n = 0;
+	} else if (n >= 2) {
+		/* Reserve space for the terminator character. */
+		n--;
+	}
+
+	va_start(args, fmt);
+	while (*fmt) {
+
+		if (*fmt == '%') {
+			fmt++;
+			/* Check the format specifier. */
+			switch (*fmt) {
+			case 'i':
+			case 'd':
+				num = va_arg(args, int);
+
+				if (num < 0) {
+					if (chars_printed < n)
+						*s++ = '-';
+					chars_printed++;
+
+					unum = (unsigned int)-num;
+				} else {
+					unum = (unsigned int)num;
+				}
+
+				unsigned_dec_print(&s, n, &chars_printed, unum);
+				break;
+			case 'u':
+				unum = va_arg(args, unsigned int);
+				unsigned_dec_print(&s, n, &chars_printed, unum);
+				break;
+			default:
+				/* Panic on any other format specifier. */
+				ERROR("tf_snprintf: specifier with ASCII code '%d' not supported.",
+				      *fmt);
+				plat_panic_handler();
+			}
+			fmt++;
+			continue;
+		}
+
+		if (chars_printed < n)
+			*s++ = *fmt;
+		fmt++;
+		chars_printed++;
+	}
+
+	va_end(args);
+
+	if (n > 0)
+		*s = '\0';
+
+	return chars_printed;
+}
diff --git a/contributing.md b/contributing.md
deleted file mode 100644
index 6b24fb5..0000000
--- a/contributing.md
+++ /dev/null
@@ -1,121 +0,0 @@
-Contributing to ARM Trusted Firmware
-====================================
-
-Before you start contributing to this project you must sign the ARM
-Contributor License Agreement (CLA).
-
-Individuals who want to contribute their own work must sign and return an
-Individual CLA. Companies that want to contribute must sign and return a
-Corporate CLA if their employees' intellectual property has been assigned to
-the employer. Copies of the CLAs are available from the [contributing page] of
-the ARM website.
-
-For this project, ARM also requires the GitHub account name(s) associated with
-each individual contributor or the designated employees of corporate
-contributors. Only contributions originating from these accounts will be
-considered covered by the CLA. To avoid delay, you should provide the Github
-account name(s) at the same time as the signed CLA.
-
-ARM reserves the right to not accept a contribution. This may be for technical,
-commercial or legal reasons.
-
-
-Getting Started
----------------
-
-*   Make sure you have a [GitHub account].
-*   Create an [issue] for your work if one does not already exist. This gives
-    everyone visibility of whether others are working on something similar. ARM
-    licensees may contact ARM directly via their partner managers instead if
-    they prefer.
-    *   Note that the [issue] tracker for this project is in a separate
-        [issue tracking repository]. Please follow the guidelines in that
-        repository.
-    *   If you intend to include Third Party IP in your contribution, please
-        raise a separate [issue] for this and ensure that the changes that
-        include Third Party IP are made on a separate topic branch.
-*   [Fork][] [arm-trusted-firmware][] on GitHub.
-*   Clone the fork to your own machine.
-*   Create a local topic branch based on the [arm-trusted-firmware][] `master`
-    branch.
-
-
-Making Changes
---------------
-
-*   Make commits of logical units. See these general [Git guidelines] for
-    contributing to a project.
-*   Follow the [Linux coding style]; this style is enforced for the ARM Trusted
-    Firmware project (style errors only, not warnings).
-    *   Use the checkpatch.pl script provided with the Linux source tree. A
-        Makefile target is provided for convenience (see section 2 in the
-        [User Guide]).
-*   Keep the commits on topic. If you need to fix another bug or make another
-    enhancement, please create a separate [issue] and address it on a separate
-    topic branch.
-*   Avoid long commit series. If you do have a long series, consider whether
-    some commits should be squashed together or addressed in a separate topic.
-*   Make sure your commit messages are in the proper format. If a commit fixes
-    a GitHub [issue], include a reference (e.g.
-    "fixes arm-software/tf-issues#45"); this ensures the [issue] is
-    [automatically closed] when merged into the [arm-trusted-firmware] `master`
-    branch.
-*   Where appropriate, please update the documentation.
-    *   Consider whether the [User Guide], [Porting Guide], [Firmware Design] or
-        other in-source documentation needs updating.
-    *   If this is your first contribution, you may add your name or your
-        company name to the [Acknowledgements] file.
-    *   For topics with multiple commits, you should make all documentation
-        changes (and nothing else) in the last commit of the series. Otherwise,
-        include the documentation changes within the single commit.
-*   Please test your changes. As a minimum, ensure UEFI boots to the shell on
-    the Foundation FVP. See the "[Running the software]" section of the
-    [User Guide] for more information.
-
-
-Submitting Changes
-------------------
-
-*   Ensure we have your signed CLA.
-*   Push your local changes to your fork of the repository.
-*   Submit a [pull request] to the [arm-trusted-firmware] `integration` branch.
-    *   The changes in the [pull request] will then undergo further review and
-        testing. Any review comments will be made as comments on the [pull
-        request]. This may require you to do some rework.
-*   When the changes are accepted, ARM will integrate them.
-    *   Typically, ARM will merge the [pull request] into the `integration`
-        branch within the GitHub UI, creating a merge commit.
-    *   Please avoid creating merge commits in the [pull request] itself.
-    *   If the [pull request] is not based on a recent commit, ARM may rebase
-        it onto the `master` branch first, or ask you to do this.
-    *   If the [pull request] cannot be automatically merged, ARM will ask you
-        to rebase it onto the `master` branch.
-    *   After final integration testing, ARM will push your merge commit to the
-        `master` branch. If a problem is found at this stage, the merge commit
-        will be removed from the `integration` branch and ARM will ask you to
-        create a new pull request to resolve the problem.
-    *   Please do not delete your topic branch until it is safely merged into
-        the `master` branch.
-
-
-- - - - - - - - - - - - - - - - - - - - - - - - - -
-
-_Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved._
-
-
-[User Guide]:                           ./docs/user-guide.md
-[Running the software]:                 ./docs/user-guide.md#6--running-the-software
-[Porting Guide]:                        ./docs/porting-guide.md
-[Firmware Design]:                      ./docs/firmware-design.md
-[Acknowledgements]:                     ./acknowledgements.md "Contributor acknowledgements"
-
-[contributing page]:            http://www.arm.com/community/open-source-contributing.php
-[GitHub account]:               https://github.com/signup/free
-[Fork]:                         https://help.github.com/articles/fork-a-repo
-[issue tracking repository]:    https://github.com/ARM-software/tf-issues
-[issue]:                        https://github.com/ARM-software/tf-issues/issues
-[pull request]:                 https://help.github.com/articles/using-pull-requests
-[automatically closed]:         https://help.github.com/articles/closing-issues-via-commit-messages
-[Git guidelines]:               http://git-scm.com/book/ch5-2.html
-[Linux coding style]:           https://www.kernel.org/doc/Documentation/CodingStyle
-[arm-trusted-firmware]:         https://github.com/ARM-software/arm-trusted-firmware
diff --git a/contributing.rst b/contributing.rst
new file mode 100644
index 0000000..cdf0620
--- /dev/null
+++ b/contributing.rst
@@ -0,0 +1,129 @@
+Contributing to ARM Trusted Firmware
+====================================
+
+Getting Started
+---------------
+
+-  Make sure you have a `GitHub account`_.
+-  Create an `issue`_ for your work if one does not already exist. This gives
+   everyone visibility of whether others are working on something similar. ARM
+   licensees may contact ARM directly via their partner managers instead if
+   they prefer.
+
+   -  Note that the `issue`_ tracker for this project is in a separate
+      `issue tracking repository`_. Please follow the guidelines in that
+      repository.
+   -  If you intend to include Third Party IP in your contribution, please
+      raise a separate `issue`_ for this and ensure that the changes that
+      include Third Party IP are made on a separate topic branch.
+
+-  `Fork`_ `arm-trusted-firmware`_ on GitHub.
+-  Clone the fork to your own machine.
+-  Create a local topic branch based on the `arm-trusted-firmware`_ ``master``
+   branch.
+
+Making Changes
+--------------
+
+-  Make commits of logical units. See these general `Git guidelines`_ for
+   contributing to a project.
+-  Follow the `Linux coding style`_; this style is enforced for the ARM Trusted
+   Firmware project (style errors only, not warnings).
+
+   -  Use the checkpatch.pl script provided with the Linux source tree. A
+      Makefile target is provided for convenience (see section 2 in the
+      `User Guide`_).
+
+-  Keep the commits on topic. If you need to fix another bug or make another
+   enhancement, please create a separate `issue`_ and address it on a separate
+   topic branch.
+-  Avoid long commit series. If you do have a long series, consider whether
+   some commits should be squashed together or addressed in a separate topic.
+-  Make sure your commit messages are in the proper format. If a commit fixes
+   a GitHub `issue`_, include a reference (e.g.
+   "fixes arm-software/tf-issues#45"); this ensures the `issue`_ is
+   `automatically closed`_ when merged into the `arm-trusted-firmware`_ ``master``
+   branch.
+-  Where appropriate, please update the documentation.
+
+   -  Consider whether the `User Guide`_, `Porting Guide`_, `Firmware Design`_ or
+      other in-source documentation needs updating.
+   -  Ensure that each changed file has the correct copyright and license
+      information. Files that entirely consist of contributions to this
+      project should have the copyright notice and BSD-3-Clause SPDX license
+      identifier as shown in `license.rst`_. Files that contain
+      changes to imported Third Party IP should contain a notice as follows,
+      with the original copyright and license text retained:
+
+      ::
+
+          Portions copyright (c) [XXXX-]YYYY, ARM Limited and Contributors. All rights reserved.
+
+      where XXXX is the year of first contribution (if different to YYYY) and
+      YYYY is the year of most recent contribution.
+   -  If not done previously, you may add your name or your company name to
+      the `Acknowledgements`_ file.
+   -  If you are submitting new files that you intend to be the technical
+      sub-maintainer for (for example, a new platform port), then also update
+      the `Maintainers`_ file.
+   -  For topics with multiple commits, you should make all documentation
+      changes (and nothing else) in the last commit of the series. Otherwise,
+      include the documentation changes within the single commit.
+
+-  Please test your changes. As a minimum, ensure UEFI boots to the shell on
+   the Foundation FVP. See `Running the software on FVP`_ for more information.
+
+Submitting Changes
+------------------
+
+-  Ensure that each commit in the series has at least one ``Signed-off-by:``
+   line, using your real name and email address. The names in the
+   ``Signed-off-by:`` and ``Author:`` lines must match. If anyone else contributes
+   to the commit, they must also add their own ``Signed-off-by:`` line.
+   By adding this line the contributor certifies the contribution is made under
+   the terms of the `Developer Certificate of Origin (DCO)`_.
+-  Push your local changes to your fork of the repository.
+-  Submit a `pull request`_ to the `arm-trusted-firmware`_ ``integration`` branch.
+
+   -  The changes in the `pull request`_ will then undergo further review and
+      testing by the `Maintainers`_. Any review comments will be made as
+      comments on the `pull request`_. This may require you to do some rework.
+
+-  When the changes are accepted, the `Maintainers`_ will integrate them.
+
+   -  Typically, the `Maintainers`_ will merge the `pull request`_ into the
+      ``integration`` branch within the GitHub UI, creating a merge commit.
+   -  Please avoid creating merge commits in the `pull request`_ itself.
+   -  If the `pull request`_ is not based on a recent commit, the `Maintainers`_
+      may rebase it onto the ``master`` branch first, or ask you to do this.
+   -  If the `pull request`_ cannot be automatically merged, the `Maintainers`_
+      will ask you to rebase it onto the ``master`` branch.
+   -  After final integration testing, the `Maintainers`_ will push your merge
+      commit to the ``master`` branch. If a problem is found during integration,
+      the merge commit will be removed from the ``integration`` branch and the
+      `Maintainers`_ will ask you to create a new pull request to resolve the
+      problem.
+   -  Please do not delete your topic branch until it is safely merged into
+      the ``master`` branch.
+
+--------------
+
+*Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.*
+
+.. _GitHub account: https://github.com/signup/free
+.. _issue: https://github.com/ARM-software/tf-issues/issues
+.. _issue tracking repository: https://github.com/ARM-software/tf-issues
+.. _Fork: https://help.github.com/articles/fork-a-repo
+.. _arm-trusted-firmware: https://github.com/ARM-software/arm-trusted-firmware
+.. _Git guidelines: http://git-scm.com/book/ch5-2.html
+.. _Linux coding style: https://www.kernel.org/doc/Documentation/CodingStyle
+.. _User Guide: ./docs/user-guide.rst
+.. _automatically closed: https://help.github.com/articles/closing-issues-via-commit-messages
+.. _Porting Guide: ./docs/porting-guide.rst
+.. _Firmware Design: ./docs/firmware-design.rst
+.. _license.rst: ./license.rst
+.. _Acknowledgements: ./acknowledgements.rst
+.. _Maintainers: ./maintainers.rst
+.. _Running the software on FVP: ./docs/user-guide.rst#user-content-running-the-software-on-fvp
+.. _Developer Certificate of Origin (DCO): ./dco.txt
+.. _pull request: https://help.github.com/articles/using-pull-requests
diff --git a/dco.txt b/dco.txt
new file mode 100644
index 0000000..8201f99
--- /dev/null
+++ b/dco.txt
@@ -0,0 +1,37 @@
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+1 Letterman Drive
+Suite D4700
+San Francisco, CA, 94129
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+    have the right to submit it under the open source license
+    indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+    of my knowledge, is covered under an appropriate open source
+    license and I have the right under that license to submit that
+    work with modifications, whether created in whole or in part
+    by me, under the same open source license (unless I am
+    permitted to submit under a different license), as indicated
+    in the file; or
+
+(c) The contribution was provided directly to me by some other
+    person who certified (a), (b) or (c) and I have not modified
+    it.
+
+(d) I understand and agree that this project and the contribution
+    are public and that a record of the contribution (including all
+    personal information I submit with it, including my sign-off) is
+    maintained indefinitely and may be redistributed consistent with
+    this project or the open source license(s) involved.
diff --git a/docs/arm-sip-service.rst b/docs/arm-sip-service.rst
new file mode 100644
index 0000000..6d456c7
--- /dev/null
+++ b/docs/arm-sip-service.rst
@@ -0,0 +1,96 @@
+ARM SiP Service
+===============
+
+This document enumerates and describes the ARM SiP (Silicon Provider) services.
+
+SiP services are non-standard, platform-specific services offered by the silicon
+implementer or platform provider. They are accessed via. ``SMC`` ("SMC calls")
+instruction executed from Exception Levels below EL3. SMC calls for SiP
+services:
+
+-  Follow `SMC Calling Convention`_;
+-  Use SMC function IDs that fall in the SiP range, which are ``0xc2000000`` -
+   ``0xc200ffff`` for 64-bit calls, and ``0x82000000`` - ``0x8200ffff`` for 32-bit
+   calls.
+
+The ARM SiP implementation offers the following services:
+
+-  Performance Measurement Framework (PMF)
+-  Execution State Switching service
+
+Source definitions for ARM SiP service are located in the ``arm_sip_svc.h`` header
+file.
+
+Performance Measurement Framework (PMF)
+---------------------------------------
+
+The `Performance Measurement Framework`_
+allows callers to retrieve timestamps captured at various paths in ARM Trusted
+Firmware execution. It's described in detail in `Firmware Design document`_.
+
+Execution State Switching service
+---------------------------------
+
+Execution State Switching service provides a mechanism for a non-secure lower
+Exception Level (either EL2, or NS EL1 if EL2 isn't implemented) to request to
+switch its execution state (a.k.a. Register Width), either from AArch64 to
+AArch32, or from AArch32 to AArch64, for the calling CPU. This service is only
+available when ARM Trusted Firmware is built for AArch64 (i.e. when build option
+``ARCH`` is set to ``aarch64``).
+
+``ARM_SIP_SVC_EXE_STATE_SWITCH``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Arguments:
+        uint32_t Function ID
+        uint32_t PC hi
+        uint32_t PC lo
+        uint32_t Cookie hi
+        uint32_t Cookie lo
+
+    Return:
+        uint32_t
+
+The function ID parameter must be ``0x82000020``. It uniquely identifies the
+Execution State Switching service being requested.
+
+The parameters *PC hi* and *PC lo* defines upper and lower words, respectively,
+of the entry point (physical address) at which execution should start, after
+Execution State has been switched. When calling from AArch64, *PC hi* must be 0.
+
+When execution starts at the supplied entry point after Execution State has been
+switched, the parameters *Cookie hi* and *Cookie lo* are passed in CPU registers
+0 and 1, respectively. When calling from AArch64, *Cookie hi* must be 0.
+
+This call can only be made on the primary CPU, before any secondaries were
+brought up with ``CPU_ON`` PSCI call. Otherwise, the call will always fail.
+
+The effect of switching execution state is as if the Exception Level were
+entered for the first time, following power on. This means CPU registers that
+have a defined reset value by the Architecture will assume that value. Other
+registers should not be expected to hold their values before the call was made.
+CPU endianness, however, is preserved from the previous execution state. Note
+that this switches the execution state of the calling CPU only. This is not a
+substitute for PSCI ``SYSTEM_RESET``.
+
+The service may return the following error codes:
+
+-  ``STATE_SW_E_PARAM``: If any of the parameters were deemed invalid for
+   a specific request.
+-  ``STATE_SW_E_DENIED``: If the call is not successful, or when ARM Trusted
+   Firmware is built for AArch32.
+
+If the call is successful, the caller wouldn't observe the SMC returning.
+Instead, execution starts at the supplied entry point, with the CPU registers 0
+and 1 populated with the supplied *Cookie hi* and *Cookie lo* values,
+respectively.
+
+--------------
+
+*Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.*
+
+.. _SMC Calling Convention: http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
+.. _Performance Measurement Framework: ./firmware-design.rst#user-content-performance-measurement-framework
+.. _Firmware Design document: ./firmware-design.rst
diff --git a/docs/auth-framework.rst b/docs/auth-framework.rst
new file mode 100644
index 0000000..3a054c7
--- /dev/null
+++ b/docs/auth-framework.rst
@@ -0,0 +1,940 @@
+Abstracting a Chain of Trust
+============================
+
+
+.. section-numbering::
+    :suffix: .
+
+.. contents::
+
+The aim of this document is to describe the authentication framework implemented
+in the Trusted Firmware. This framework fulfills the following requirements:
+
+#. It should be possible for a platform port to specify the Chain of Trust in
+   terms of certificate hierarchy and the mechanisms used to verify a
+   particular image/certificate.
+
+#. The framework should distinguish between:
+
+   -  The mechanism used to encode and transport information, e.g. DER encoded
+      X.509v3 certificates to ferry Subject Public Keys, hashes and non-volatile
+      counters.
+
+   -  The mechanism used to verify the transported information i.e. the
+      cryptographic libraries.
+
+The framework has been designed following a modular approach illustrated in the
+next diagram:
+
+::
+
+        +---------------+---------------+------------+
+        | Trusted       | Trusted       | Trusted    |
+        | Firmware      | Firmware      | Firmware   |
+        | Generic       | IO Framework  | Platform   |
+        | Code i.e.     | (IO)          | Port       |
+        | BL1/BL2 (GEN) |               | (PP)       |
+        +---------------+---------------+------------+
+               ^               ^               ^
+               |               |               |
+               v               v               v
+         +-----------+   +-----------+   +-----------+
+         |           |   |           |   | Image     |
+         | Crypto    |   | Auth      |   | Parser    |
+         | Module    |<->| Module    |<->| Module    |
+         | (CM)      |   | (AM)      |   | (IPM)     |
+         |           |   |           |   |           |
+         +-----------+   +-----------+   +-----------+
+               ^                               ^
+               |                               |
+               v                               v
+        +----------------+             +-----------------+
+        | Cryptographic  |             | Image Parser    |
+        | Libraries (CL) |             | Libraries (IPL) |
+        +----------------+             +-----------------+
+                      |                |
+                      |                |
+                      |                |
+                      v                v
+                     +-----------------+
+                     | Misc. Libs e.g. |
+                     | ASN.1 decoder   |
+                     |                 |
+                     +-----------------+
+
+        DIAGRAM 1.
+
+This document describes the inner details of the authentication framework and
+the abstraction mechanisms available to specify a Chain of Trust.
+
+Framework design
+----------------
+
+This section describes some aspects of the framework design and the rationale
+behind them. These aspects are key to verify a Chain of Trust.
+
+Chain of Trust
+~~~~~~~~~~~~~~
+
+A CoT is basically a sequence of authentication images which usually starts with
+a root of trust and culminates in a single data image. The following diagram
+illustrates how this maps to a CoT for the BL31 image described in the
+TBBR-Client specification.
+
+::
+
+        +------------------+       +-------------------+
+        | ROTPK/ROTPK Hash |------>| Trusted Key       |
+        +------------------+       | Certificate       |
+                                   | (Auth Image)      |
+                                  /+-------------------+
+                                 /            |
+                                /             |
+                               /              |
+                              /               |
+                             L                v
+        +------------------+       +-------------------+
+        | Trusted World    |------>| BL31 Key          |
+        | Public Key       |       | Certificate       |
+        +------------------+       | (Auth Image)      |
+                                   +-------------------+
+                                  /           |
+                                 /            |
+                                /             |
+                               /              |
+                              /               v
+        +------------------+ L     +-------------------+
+        | BL31 Content     |------>| BL31 Content      |
+        | Certificate PK   |       | Certificate       |
+        +------------------+       | (Auth Image)      |
+                                   +-------------------+
+                                  /           |
+                                 /            |
+                                /             |
+                               /              |
+                              /               v
+        +------------------+ L     +-------------------+
+        | BL31 Hash        |------>| BL31 Image        |
+        |                  |       | (Data Image)      |
+        +------------------+       |                   |
+                                   +-------------------+
+
+        DIAGRAM 2.
+
+The root of trust is usually a public key (ROTPK) that has been burnt in the
+platform and cannot be modified.
+
+Image types
+~~~~~~~~~~~
+
+Images in a CoT are categorised as authentication and data images. An
+authentication image contains information to authenticate a data image or
+another authentication image. A data image is usually a boot loader binary, but
+it could be any other data that requires authentication.
+
+Component responsibilities
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For every image in a Chain of Trust, the following high level operations are
+performed to verify it:
+
+#. Allocate memory for the image either statically or at runtime.
+
+#. Identify the image and load it in the allocated memory.
+
+#. Check the integrity of the image as per its type.
+
+#. Authenticate the image as per the cryptographic algorithms used.
+
+#. If the image is an authentication image, extract the information that will
+   be used to authenticate the next image in the CoT.
+
+In Diagram 1, each component is responsible for one or more of these operations.
+The responsibilities are briefly described below.
+
+TF Generic code and IO framework (GEN/IO)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+These components are responsible for initiating the authentication process for a
+particular image in BL1 or BL2. For each BL image that requires authentication,
+the Generic code asks recursively the Authentication module what is the parent
+image until either an authenticated image or the ROT is reached. Then the
+Generic code calls the IO framewotk to load the image and calls the
+Authentication module to authenticate it, following the CoT from ROT to Image.
+
+TF Platform Port (PP)
+^^^^^^^^^^^^^^^^^^^^^
+
+The platform is responsible for:
+
+#. Specifying the CoT for each image that needs to be authenticated. Details of
+   how a CoT can be specified by the platform are explained later. The platform
+   also specifies the authentication methods and the parsing method used for
+   each image.
+
+#. Statically allocating memory for each parameter in each image which is
+   used for verifying the CoT, e.g. memory for public keys, hashes etc.
+
+#. Providing the ROTPK or a hash of it.
+
+#. Providing additional information to the IPM to enable it to identify and
+   extract authentication parameters contained in an image, e.g. if the
+   parameters are stored as X509v3 extensions, the corresponding OID must be
+   provided.
+
+#. Fulfill any other memory requirements of the IPM and the CM (not currently
+   described in this document).
+
+#. Export functions to verify an image which uses an authentication method that
+   cannot be interpreted by the CM, e.g. if an image has to be verified using a
+   NV counter, then the value of the counter to compare with can only be
+   provided by the platform.
+
+#. Export a custom IPM if a proprietary image format is being used (described
+   later).
+
+Authentication Module (AM)
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+It is responsible for:
+
+#. Providing the necessary abstraction mechanisms to describe a CoT. Amongst
+   other things, the authentication and image parsing methods must be specified
+   by the PP in the CoT.
+
+#. Verifying the CoT passed by GEN by utilising functionality exported by the
+   PP, IPM and CM.
+
+#. Tracking which images have been verified. In case an image is a part of
+   multiple CoTs then it should be verified only once e.g. the Trusted World
+   Key Certificate in the TBBR-Client spec. contains information to verify
+   SCP\_BL2, BL31, BL32 each of which have a separate CoT. (This
+   responsibility has not been described in this document but should be
+   trivial to implement).
+
+#. Reusing memory meant for a data image to verify authentication images e.g.
+   in the CoT described in Diagram 2, each certificate can be loaded and
+   verified in the memory reserved by the platform for the BL31 image. By the
+   time BL31 (the data image) is loaded, all information to authenticate it
+   will have been extracted from the parent image i.e. BL31 content
+   certificate. It is assumed that the size of an authentication image will
+   never exceed the size of a data image. It should be possible to verify this
+   at build time using asserts.
+
+Cryptographic Module (CM)
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The CM is responsible for providing an API to:
+
+#. Verify a digital signature.
+#. Verify a hash.
+
+The CM does not include any cryptography related code, but it relies on an
+external library to perform the cryptographic operations. A Crypto-Library (CL)
+linking the CM and the external library must be implemented. The following
+functions must be provided by the CL:
+
+.. code:: c
+
+    void (*init)(void);
+    int (*verify_signature)(void *data_ptr, unsigned int data_len,
+                            void *sig_ptr, unsigned int sig_len,
+                            void *sig_alg, unsigned int sig_alg_len,
+                            void *pk_ptr, unsigned int pk_len);
+    int (*verify_hash)(void *data_ptr, unsigned int data_len,
+                       void *digest_info_ptr, unsigned int digest_info_len);
+
+These functions are registered in the CM using the macro:
+
+.. code:: c
+
+    REGISTER_CRYPTO_LIB(_name, _init, _verify_signature, _verify_hash);
+
+``_name`` must be a string containing the name of the CL. This name is used for
+debugging purposes.
+
+Image Parser Module (IPM)
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The IPM is responsible for:
+
+#. Checking the integrity of each image loaded by the IO framework.
+#. Extracting parameters used for authenticating an image based upon a
+   description provided by the platform in the CoT descriptor.
+
+Images may have different formats (for example, authentication images could be
+x509v3 certificates, signed ELF files or any other platform specific format).
+The IPM allows to register an Image Parser Library (IPL) for every image format
+used in the CoT. This library must implement the specific methods to parse the
+image. The IPM obtains the image format from the CoT and calls the right IPL to
+check the image integrity and extract the authentication parameters.
+
+See Section "Describing the image parsing methods" for more details about the
+mechanism the IPM provides to define and register IPLs.
+
+Authentication methods
+~~~~~~~~~~~~~~~~~~~~~~
+
+The AM supports the following authentication methods:
+
+#. Hash
+#. Digital signature
+
+The platform may specify these methods in the CoT in case it decides to define
+a custom CoT instead of reusing a predefined one.
+
+If a data image uses multiple methods, then all the methods must be a part of
+the same CoT. The number and type of parameters are method specific. These
+parameters should be obtained from the parent image using the IPM.
+
+#. Hash
+
+   Parameters:
+
+   #. A pointer to data to hash
+   #. Length of the data
+   #. A pointer to the hash
+   #. Length of the hash
+
+   The hash will be represented by the DER encoding of the following ASN.1
+   type:
+
+   ::
+
+       DigestInfo ::= SEQUENCE {
+           digestAlgorithm  DigestAlgorithmIdentifier,
+           digest           Digest
+       }
+
+   This ASN.1 structure makes it possible to remove any assumption about the
+   type of hash algorithm used as this information accompanies the hash. This
+   should allow the Cryptography Library (CL) to support multiple hash
+   algorithm implementations.
+
+#. Digital Signature
+
+   Parameters:
+
+   #. A pointer to data to sign
+   #. Length of the data
+   #. Public Key Algorithm
+   #. Public Key value
+   #. Digital Signature Algorithm
+   #. Digital Signature value
+
+   The Public Key parameters will be represented by the DER encoding of the
+   following ASN.1 type:
+
+   ::
+
+       SubjectPublicKeyInfo  ::=  SEQUENCE  {
+           algorithm         AlgorithmIdentifier{PUBLIC-KEY,{PublicKeyAlgorithms}},
+           subjectPublicKey  BIT STRING  }
+
+   The Digital Signature Algorithm will be represented by the DER encoding of
+   the following ASN.1 types.
+
+   ::
+
+       AlgorithmIdentifier {ALGORITHM:IOSet } ::= SEQUENCE {
+           algorithm         ALGORITHM.&id({IOSet}),
+           parameters        ALGORITHM.&Type({IOSet}{@algorithm}) OPTIONAL
+       }
+
+   The digital signature will be represented by:
+
+   ::
+
+       signature  ::=  BIT STRING
+
+The authentication framework will use the image descriptor to extract all the
+information related to authentication.
+
+Specifying a Chain of Trust
+---------------------------
+
+A CoT can be described as a set of image descriptors linked together in a
+particular order. The order dictates the sequence in which they must be
+verified. Each image has a set of properties which allow the AM to verify it.
+These properties are described below.
+
+The PP is responsible for defining a single or multiple CoTs for a data image.
+Unless otherwise specified, the data structures described in the following
+sections are populated by the PP statically.
+
+Describing the image parsing methods
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The parsing method refers to the format of a particular image. For example, an
+authentication image that represents a certificate could be in the X.509v3
+format. A data image that represents a boot loader stage could be in raw binary
+or ELF format. The IPM supports three parsing methods. An image has to use one
+of the three methods described below. An IPL is responsible for interpreting a
+single parsing method. There has to be one IPL for every method used by the
+platform.
+
+#. Raw format: This format is effectively a nop as an image using this method
+   is treated as being in raw binary format e.g. boot loader images used by ARM
+   TF. This method should only be used by data images.
+
+#. X509V3 method: This method uses industry standards like X.509 to represent
+   PKI certificates (authentication images). It is expected that open source
+   libraries will be available which can be used to parse an image represented
+   by this method. Such libraries can be used to write the corresponding IPL
+   e.g. the X.509 parsing library code in mbed TLS.
+
+#. Platform defined method: This method caters for platform specific
+   proprietary standards to represent authentication or data images. For
+   example, The signature of a data image could be appended to the data image
+   raw binary. A header could be prepended to the combined blob to specify the
+   extents of each component. The platform will have to implement the
+   corresponding IPL to interpret such a format.
+
+The following enum can be used to define these three methods.
+
+.. code:: c
+
+    typedef enum img_type_enum {
+        IMG_RAW,            /* Binary image */
+        IMG_PLAT,           /* Platform specific format */
+        IMG_CERT,           /* X509v3 certificate */
+        IMG_MAX_TYPES,
+    } img_type_t;
+
+An IPL must provide functions with the following prototypes:
+
+.. code:: c
+
+    void init(void);
+    int check_integrity(void *img, unsigned int img_len);
+    int get_auth_param(const auth_param_type_desc_t *type_desc,
+                          void *img, unsigned int img_len,
+                          void **param, unsigned int *param_len);
+
+An IPL for each type must be registered using the following macro:
+
+::
+
+    REGISTER_IMG_PARSER_LIB(_type, _name, _init, _check_int, _get_param)
+
+-  ``_type``: one of the types described above.
+-  ``_name``: a string containing the IPL name for debugging purposes.
+-  ``_init``: initialization function pointer.
+-  ``_check_int``: check image integrity function pointer.
+-  ``_get_param``: extract authentication parameter funcion pointer.
+
+The ``init()`` function will be used to initialize the IPL.
+
+The ``check_integrity()`` function is passed a pointer to the memory where the
+image has been loaded by the IO framework and the image length. It should ensure
+that the image is in the format corresponding to the parsing method and has not
+been tampered with. For example, RFC-2459 describes a validation sequence for an
+X.509 certificate.
+
+The ``get_auth_param()`` function is passed a parameter descriptor containing
+information about the parameter (``type_desc`` and ``cookie``) to identify and
+extract the data corresponding to that parameter from an image. This data will
+be used to verify either the current or the next image in the CoT sequence.
+
+Each image in the CoT will specify the parsing method it uses. This information
+will be used by the IPM to find the right parser descriptor for the image.
+
+Describing the authentication method(s)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As part of the CoT, each image has to specify one or more authentication methods
+which will be used to verify it. As described in the Section "Authentication
+methods", there are three methods supported by the AM.
+
+.. code:: c
+
+    typedef enum {
+        AUTH_METHOD_NONE,
+        AUTH_METHOD_HASH,
+        AUTH_METHOD_SIG,
+        AUTH_METHOD_NUM
+    } auth_method_type_t;
+
+The AM defines the type of each parameter used by an authentication method. It
+uses this information to:
+
+#. Specify to the ``get_auth_param()`` function exported by the IPM, which
+   parameter should be extracted from an image.
+
+#. Correctly marshall the parameters while calling the verification function
+   exported by the CM and PP.
+
+#. Extract authentication parameters from a parent image in order to verify a
+   child image e.g. to verify the certificate image, the public key has to be
+   obtained from the parent image.
+
+.. code:: c
+
+    typedef enum {
+        AUTH_PARAM_NONE,
+        AUTH_PARAM_RAW_DATA,        /* Raw image data */
+        AUTH_PARAM_SIG,         /* The image signature */
+        AUTH_PARAM_SIG_ALG,     /* The image signature algorithm */
+        AUTH_PARAM_HASH,        /* A hash (including the algorithm) */
+        AUTH_PARAM_PUB_KEY,     /* A public key */
+    } auth_param_type_t;
+
+The AM defines the following structure to identify an authentication parameter
+required to verify an image.
+
+.. code:: c
+
+    typedef struct auth_param_type_desc_s {
+        auth_param_type_t type;
+        void *cookie;
+    } auth_param_type_desc_t;
+
+``cookie`` is used by the platform to specify additional information to the IPM
+which enables it to uniquely identify the parameter that should be extracted
+from an image. For example, the hash of a BL3x image in its corresponding
+content certificate is stored in an X509v3 custom extension field. An extension
+field can only be identified using an OID. In this case, the ``cookie`` could
+contain the pointer to the OID defined by the platform for the hash extension
+field while the ``type`` field could be set to ``AUTH_PARAM_HASH``. A value of 0 for
+the ``cookie`` field means that it is not used.
+
+For each method, the AM defines a structure with the parameters required to
+verify the image.
+
+.. code:: c
+
+    /*
+     * Parameters for authentication by hash matching
+     */
+    typedef struct auth_method_param_hash_s {
+        auth_param_type_desc_t *data;   /* Data to hash */
+        auth_param_type_desc_t *hash;   /* Hash to match with */
+    } auth_method_param_hash_t;
+
+    /*
+     * Parameters for authentication by signature
+     */
+    typedef struct auth_method_param_sig_s {
+        auth_param_type_desc_t *pk; /* Public key */
+        auth_param_type_desc_t *sig;    /* Signature to check */
+        auth_param_type_desc_t *alg;    /* Signature algorithm */
+        auth_param_type_desc_t *tbs;    /* Data signed */
+    } auth_method_param_sig_t;
+
+The AM defines the following structure to describe an authentication method for
+verifying an image
+
+.. code:: c
+
+    /*
+     * Authentication method descriptor
+     */
+    typedef struct auth_method_desc_s {
+        auth_method_type_t type;
+        union {
+            auth_method_param_hash_t hash;
+            auth_method_param_sig_t sig;
+        } param;
+    } auth_method_desc_t;
+
+Using the method type specified in the ``type`` field, the AM finds out what field
+needs to access within the ``param`` union.
+
+Storing Authentication parameters
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A parameter described by ``auth_param_type_desc_t`` to verify an image could be
+obtained from either the image itself or its parent image. The memory allocated
+for loading the parent image will be reused for loading the child image. Hence
+parameters which are obtained from the parent for verifying a child image need
+to have memory allocated for them separately where they can be stored. This
+memory must be statically allocated by the platform port.
+
+The AM defines the following structure to store the data corresponding to an
+authentication parameter.
+
+.. code:: c
+
+    typedef struct auth_param_data_desc_s {
+        void *auth_param_ptr;
+        unsigned int auth_param_len;
+    } auth_param_data_desc_t;
+
+The ``auth_param_ptr`` field is initialized by the platform. The ``auth_param_len``
+field is used to specify the length of the data in the memory.
+
+For parameters that can be obtained from the child image itself, the IPM is
+responsible for populating the ``auth_param_ptr`` and ``auth_param_len`` fields
+while executing the ``img_get_auth_param()`` function.
+
+The AM defines the following structure to enable an image to describe the
+parameters that should be extracted from it and used to verify the next image
+(child) in a CoT.
+
+.. code:: c
+
+    typedef struct auth_param_desc_s {
+        auth_param_type_desc_t type_desc;
+        auth_param_data_desc_t data;
+    } auth_param_desc_t;
+
+Describing an image in a CoT
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+An image in a CoT is a consolidation of the following aspects of a CoT described
+above.
+
+#. A unique identifier specified by the platform which allows the IO framework
+   to locate the image in a FIP and load it in the memory reserved for the data
+   image in the CoT.
+
+#. A parsing method which is used by the AM to find the appropriate IPM.
+
+#. Authentication methods and their parameters as described in the previous
+   section. These are used to verify the current image.
+
+#. Parameters which are used to verify the next image in the current CoT. These
+   parameters are specified only by authentication images and can be extracted
+   from the current image once it has been verified.
+
+The following data structure describes an image in a CoT.
+
+.. code:: c
+
+    typedef struct auth_img_desc_s {
+        unsigned int img_id;
+        const struct auth_img_desc_s *parent;
+        img_type_t img_type;
+        auth_method_desc_t img_auth_methods[AUTH_METHOD_NUM];
+        auth_param_desc_t authenticated_data[COT_MAX_VERIFIED_PARAMS];
+    } auth_img_desc_t;
+
+A CoT is defined as an array of ``auth_image_desc_t`` structures linked together
+by the ``parent`` field. Those nodes with no parent must be authenticated using
+the ROTPK stored in the platform.
+
+Implementation example
+----------------------
+
+This section is a detailed guide explaining a trusted boot implementation using
+the authentication framework. This example corresponds to the Applicative
+Functional Mode (AFM) as specified in the TBBR-Client document. It is
+recommended to read this guide along with the source code.
+
+The TBBR CoT
+~~~~~~~~~~~~
+
+The CoT can be found in ``drivers/auth/tbbr/tbbr_cot.c``. This CoT consists of an
+array of image descriptors and it is registered in the framework using the macro
+``REGISTER_COT(cot_desc)``, where 'cot\_desc' must be the name of the array
+(passing a pointer or any other type of indirection will cause the registration
+process to fail).
+
+The number of images participating in the boot process depends on the CoT. There
+is, however, a minimum set of images that are mandatory in the Trusted Firmware
+and thus all CoTs must present:
+
+-  ``BL2``
+-  ``SCP_BL2`` (platform specific)
+-  ``BL31``
+-  ``BL32`` (optional)
+-  ``BL33``
+
+The TBBR specifies the additional certificates that must accompany these images
+for a proper authentication. Details about the TBBR CoT may be found in the
+`Trusted Board Boot`_ document.
+
+Following the `Platform Porting Guide`_, a platform must provide unique
+identifiers for all the images and certificates that will be loaded during the
+boot process. If a platform is using the TBBR as a reference for trusted boot,
+these identifiers can be obtained from ``include/common/tbbr/tbbr_img_def.h``.
+ARM platforms include this file in ``include/plat/arm/common/arm_def.h``. Other
+platforms may also include this file or provide their own identifiers.
+
+**Important**: the authentication module uses these identifiers to index the
+CoT array, so the descriptors location in the array must match the identifiers.
+
+Each image descriptor must specify:
+
+-  ``img_id``: the corresponding image unique identifier defined by the platform.
+-  ``img_type``: the image parser module uses the image type to call the proper
+   parsing library to check the image integrity and extract the required
+   authentication parameters. Three types of images are currently supported:
+
+   -  ``IMG_RAW``: image is a raw binary. No parsing functions are available,
+      other than reading the whole image.
+   -  ``IMG_PLAT``: image format is platform specific. The platform may use this
+      type for custom images not directly supported by the authentication
+      framework.
+   -  ``IMG_CERT``: image is an x509v3 certificate.
+
+-  ``parent``: pointer to the parent image descriptor. The parent will contain
+   the information required to authenticate the current image. If the parent
+   is NULL, the authentication parameters will be obtained from the platform
+   (i.e. the BL2 and Trusted Key certificates are signed with the ROT private
+   key, whose public part is stored in the platform).
+-  ``img_auth_methods``: this array defines the authentication methods that must
+   be checked to consider an image authenticated. Each method consists of a
+   type and a list of parameter descriptors. A parameter descriptor consists of
+   a type and a cookie which will point to specific information required to
+   extract that parameter from the image (i.e. if the parameter is stored in an
+   x509v3 extension, the cookie will point to the extension OID). Depending on
+   the method type, a different number of parameters must be specified.
+   Supported methods are:
+
+   -  ``AUTH_METHOD_HASH``: the hash of the image must match the hash extracted
+      from the parent image. The following parameter descriptors must be
+      specified:
+
+      -  ``data``: data to be hashed (obtained from current image)
+      -  ``hash``: reference hash (obtained from parent image)
+
+   -  ``AUTH_METHOD_SIG``: the image (usually a certificate) must be signed with
+      the private key whose public part is extracted from the parent image (or
+      the platform if the parent is NULL). The following parameter descriptors
+      must be specified:
+
+      -  ``pk``: the public key (obtained from parent image)
+      -  ``sig``: the digital signature (obtained from current image)
+      -  ``alg``: the signature algorithm used (obtained from current image)
+      -  ``data``: the data to be signed (obtained from current image)
+
+-  ``authenticated_data``: this array indicates what authentication parameters
+   must be extracted from an image once it has been authenticated. Each
+   parameter consists of a parameter descriptor and the buffer address/size
+   to store the parameter. The CoT is responsible for allocating the required
+   memory to store the parameters.
+
+In the ``tbbr_cot.c`` file, a set of buffers are allocated to store the parameters
+extracted from the certificates. In the case of the TBBR CoT, these parameters
+are hashes and public keys. In DER format, an RSA-2048 public key requires 294
+bytes, and a hash requires 51 bytes. Depending on the CoT and the authentication
+process, some of the buffers may be reused at different stages during the boot.
+
+Next in that file, the parameter descriptors are defined. These descriptors will
+be used to extract the parameter data from the corresponding image.
+
+Example: the BL31 Chain of Trust
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Four image descriptors form the BL31 Chain of Trust:
+
+.. code:: asm
+
+    [TRUSTED_KEY_CERT_ID] = {
+        .img_id = TRUSTED_KEY_CERT_ID,
+        .img_type = IMG_CERT,
+        .parent = NULL,
+        .img_auth_methods = {
+            [0] = {
+                .type = AUTH_METHOD_SIG,
+                .param.sig = {
+                    .pk = &subject_pk,
+                    .sig = &sig,
+                    .alg = &sig_alg,
+                    .data = &raw_data,
+                }
+            }
+        },
+        .authenticated_data = {
+            [0] = {
+                .type_desc = &trusted_world_pk,
+                .data = {
+                    .ptr = (void *)trusted_world_pk_buf,
+                    .len = (unsigned int)PK_DER_LEN
+                }
+            },
+            [1] = {
+                .type_desc = &non_trusted_world_pk,
+                .data = {
+                    .ptr = (void *)non_trusted_world_pk_buf,
+                    .len = (unsigned int)PK_DER_LEN
+                }
+            }
+        }
+    },
+    [SOC_FW_KEY_CERT_ID] = {
+        .img_id = SOC_FW_KEY_CERT_ID,
+        .img_type = IMG_CERT,
+        .parent = &cot_desc[TRUSTED_KEY_CERT_ID],
+        .img_auth_methods = {
+            [0] = {
+                .type = AUTH_METHOD_SIG,
+                .param.sig = {
+                    .pk = &trusted_world_pk,
+                    .sig = &sig,
+                    .alg = &sig_alg,
+                    .data = &raw_data,
+                }
+            }
+        },
+        .authenticated_data = {
+            [0] = {
+                .type_desc = &soc_fw_content_pk,
+                .data = {
+                    .ptr = (void *)content_pk_buf,
+                    .len = (unsigned int)PK_DER_LEN
+                }
+            }
+        }
+    },
+    [SOC_FW_CONTENT_CERT_ID] = {
+        .img_id = SOC_FW_CONTENT_CERT_ID,
+        .img_type = IMG_CERT,
+        .parent = &cot_desc[SOC_FW_KEY_CERT_ID],
+        .img_auth_methods = {
+            [0] = {
+                .type = AUTH_METHOD_SIG,
+                .param.sig = {
+                    .pk = &soc_fw_content_pk,
+                    .sig = &sig,
+                    .alg = &sig_alg,
+                    .data = &raw_data,
+                }
+            }
+        },
+        .authenticated_data = {
+            [0] = {
+                .type_desc = &soc_fw_hash,
+                .data = {
+                    .ptr = (void *)soc_fw_hash_buf,
+                    .len = (unsigned int)HASH_DER_LEN
+                }
+            }
+        }
+    },
+    [BL31_IMAGE_ID] = {
+        .img_id = BL31_IMAGE_ID,
+        .img_type = IMG_RAW,
+        .parent = &cot_desc[SOC_FW_CONTENT_CERT_ID],
+        .img_auth_methods = {
+            [0] = {
+                .type = AUTH_METHOD_HASH,
+                .param.hash = {
+                    .data = &raw_data,
+                    .hash = &soc_fw_hash,
+                }
+            }
+        }
+    }
+
+The **Trusted Key certificate** is signed with the ROT private key and contains
+the Trusted World public key and the Non-Trusted World public key as x509v3
+extensions. This must be specified in the image descriptor using the
+``img_auth_methods`` and ``authenticated_data`` arrays, respectively.
+
+The Trusted Key certificate is authenticated by checking its digital signature
+using the ROTPK. Four parameters are required to check a signature: the public
+key, the algorithm, the signature and the data that has been signed. Therefore,
+four parameter descriptors must be specified with the authentication method:
+
+-  ``subject_pk``: parameter descriptor of type ``AUTH_PARAM_PUB_KEY``. This type
+   is used to extract a public key from the parent image. If the cookie is an
+   OID, the key is extracted from the corresponding x509v3 extension. If the
+   cookie is NULL, the subject public key is retrieved. In this case, because
+   the parent image is NULL, the public key is obtained from the platform
+   (this key will be the ROTPK).
+-  ``sig``: parameter descriptor of type ``AUTH_PARAM_SIG``. It is used to extract
+   the signature from the certificate.
+-  ``sig_alg``: parameter descriptor of type ``AUTH_PARAM_SIG``. It is used to
+   extract the signature algorithm from the certificate.
+-  ``raw_data``: parameter descriptor of type ``AUTH_PARAM_RAW_DATA``. It is used
+   to extract the data to be signed from the certificate.
+
+Once the signature has been checked and the certificate authenticated, the
+Trusted World public key needs to be extracted from the certificate. A new entry
+is created in the ``authenticated_data`` array for that purpose. In that entry,
+the corresponding parameter descriptor must be specified along with the buffer
+address to store the parameter value. In this case, the ``tz_world_pk`` descriptor
+is used to extract the public key from an x509v3 extension with OID
+``TRUSTED_WORLD_PK_OID``. The BL31 key certificate will use this descriptor as
+parameter in the signature authentication method. The key is stored in the
+``plat_tz_world_pk_buf`` buffer.
+
+The **BL31 Key certificate** is authenticated by checking its digital signature
+using the Trusted World public key obtained previously from the Trusted Key
+certificate. In the image descriptor, we specify a single authentication method
+by signature whose public key is the ``tz_world_pk``. Once this certificate has
+been authenticated, we have to extract the BL31 public key, stored in the
+extension specified by ``bl31_content_pk``. This key will be copied to the
+``plat_content_pk`` buffer.
+
+The **BL31 certificate** is authenticated by checking its digital signature
+using the BL31 public key obtained previously from the BL31 Key certificate.
+We specify the authentication method using ``bl31_content_pk`` as public key.
+After authentication, we need to extract the BL31 hash, stored in the extension
+specified by ``bl31_hash``. This hash will be copied to the ``plat_bl31_hash_buf``
+buffer.
+
+The **BL31 image** is authenticated by calculating its hash and matching it
+with the hash obtained from the BL31 certificate. The image descriptor contains
+a single authentication method by hash. The parameters to the hash method are
+the reference hash, ``bl31_hash``, and the data to be hashed. In this case, it is
+the whole image, so we specify ``raw_data``.
+
+The image parser library
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+The image parser module relies on libraries to check the image integrity and
+extract the authentication parameters. The number and type of parser libraries
+depend on the images used in the CoT. Raw images do not need a library, so
+only an x509v3 library is required for the TBBR CoT.
+
+ARM platforms will use an x509v3 library based on mbed TLS. This library may be
+found in ``drivers/auth/mbedtls/mbedtls_x509_parser.c``. It exports three
+functions:
+
+.. code:: c
+
+    void init(void);
+    int check_integrity(void *img, unsigned int img_len);
+    int get_auth_param(const auth_param_type_desc_t *type_desc,
+                       void *img, unsigned int img_len,
+                       void **param, unsigned int *param_len);
+
+The library is registered in the framework using the macro
+``REGISTER_IMG_PARSER_LIB()``. Each time the image parser module needs to access
+an image of type ``IMG_CERT``, it will call the corresponding function exported
+in this file.
+
+The build system must be updated to include the corresponding library and
+mbed TLS sources. ARM platforms use the ``arm_common.mk`` file to pull the
+sources.
+
+The cryptographic library
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The cryptographic module relies on a library to perform the required operations,
+i.e. verify a hash or a digital signature. ARM platforms will use a library
+based on mbed TLS, which can be found in
+``drivers/auth/mbedtls/mbedtls_crypto.c``. This library is registered in the
+authentication framework using the macro ``REGISTER_CRYPTO_LIB()`` and exports
+three functions:
+
+.. code:: c
+
+    void init(void);
+    int verify_signature(void *data_ptr, unsigned int data_len,
+                         void *sig_ptr, unsigned int sig_len,
+                         void *sig_alg, unsigned int sig_alg_len,
+                         void *pk_ptr, unsigned int pk_len);
+    int verify_hash(void *data_ptr, unsigned int data_len,
+                    void *digest_info_ptr, unsigned int digest_info_len);
+
+The mbedTLS library algorithm support is configured by the
+``TF_MBEDTLS_KEY_ALG`` variable which can take in 3 values: `rsa`, `ecdsa` or
+`rsa+ecdsa`. This variable allows the Makefile to include the corresponding
+sources in the build for the various algorthms. Setting the variable to
+`rsa+ecdsa` enables support for both rsa and ecdsa algorithms in the mbedTLS
+library.
+
+Note: If code size is a concern, the build option ``MBEDTLS_SHA256_SMALLER`` can
+be defined in the platform Makefile. It will make mbed TLS use an implementation
+of SHA-256 with smaller memory footprint (~1.5 KB less) but slower (~30%).
+
+--------------
+
+*Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.*
+
+.. _Trusted Board Boot: ./trusted-board-boot.rst
+.. _Platform Porting Guide: ./porting-guide.rst
diff --git a/docs/change-log.md b/docs/change-log.md
deleted file mode 100644
index c7b5508..0000000
--- a/docs/change-log.md
+++ /dev/null
@@ -1,710 +0,0 @@
-ARM Trusted Firmware - version 1.1
-==================================
-
-New features
-------------
-
-*   A prototype implementation of Trusted Board Boot has been added. Boot
-    loader images are verified by BL1 and BL2 during the cold boot path. BL1 and
-    BL2 use the PolarSSL SSL library to verify certificates and images. The
-    OpenSSL library is used to create the X.509 certificates. Support has been
-    added to `fip_create` tool to package the certificates in a FIP.
-
-*   Support for calling CPU and platform specific reset handlers upon entry into
-    BL3-1 during the cold and warm boot paths has been added. This happens after
-    another Boot ROM `reset_handler()` has already run. This enables a developer
-    to perform additional actions or undo actions already performed during the
-    first call of the reset handlers e.g. apply additional errata workarounds.
-
-*   Support has been added to demonstrate routing of IRQs to EL3 instead of
-    S-EL1 when execution is in secure world.
-
-*   The PSCI implementation now conforms to version 1.0 of the PSCI
-    specification. All the mandatory APIs and selected optional APIs are
-    supported. In particular, support for the `PSCI_FEATURES` API has been
-    added. A capability variable is constructed during initialization by
-    examining the `plat_pm_ops` and `spd_pm_ops` exported by the platform and
-    the Secure Payload Dispatcher.  This is used by the PSCI FEATURES function
-    to determine which PSCI APIs are supported by the platform.
-
-*   Improvements have been made to the PSCI code as follows.
-
-    *   The code has been refactored to remove redundant parameters from
-        internal functions.
-
-    *   Changes have been made to the code for PSCI `CPU_SUSPEND`, `CPU_ON` and
-        `CPU_OFF` calls to facilitate an early return to the caller in case a
-        failure condition is detected. For example, a PSCI `CPU_SUSPEND` call
-        returns `SUCCESS` to the caller if a pending interrupt is detected early
-        in the code path.
-
-    *   Optional platform APIs have been added to validate the `power_state` and
-        `entrypoint` parameters early in PSCI `CPU_ON` and `CPU_SUSPEND` code
-        paths.
-
-    *   PSCI migrate APIs have been reworked to invoke the SPD hook to determine
-        the type of Trusted OS and the CPU it is resident on (if
-        applicable). Also, during a PSCI `MIGRATE` call, the SPD hook to migrate
-        the Trusted OS is invoked.
-
-*   It is now possible to build Trusted Firmware without marking at least an
-    extra page of memory as coherent. The build flag `USE_COHERENT_MEM` can be
-    used to choose between the two implementations. This has been made possible
-    through these changes.
-
-    *   An implementation of Bakery locks, where the locks are not allocated in
-        coherent memory has been added.
-
-    *   Memory which was previously marked as coherent is now kept coherent
-        through the use of software cache maintenance operations.
-
-    Approximately, 4K worth of memory is saved for each boot loader stage when
-    `USE_COHERENT_MEM=0`. Enabling this option increases the latencies
-    associated with acquire and release of locks. It also requires changes to
-    the platform ports.
-
-*   It is now possible to specify the name of the FIP at build time by defining
-    the `FIP_NAME` variable.
-
-*   Issues with depedencies on the 'fiptool' makefile target have been
-    rectified. The `fip_create` tool is now rebuilt whenever its source files
-    change.
-
-*   The BL3-1 runtime console is now also used as the crash console. The crash
-    console is changed to SoC UART0 (UART2) from the previous FPGA UART0 (UART0)
-    on Juno. In FVP, it is changed from UART0 to UART1.
-
-*   CPU errata workarounds are applied only when the revision and part number
-    match. This behaviour has been made consistent across the debug and release
-    builds. The debug build additionally prints a warning if a mismatch is
-    detected.
-
-*   It is now possible to issue cache maintenance operations by set/way for a
-    particular level of data cache. Levels 1-3 are currently supported.
-
-*   The following improvements have been made to the FVP port.
-
-    *   The build option `FVP_SHARED_DATA_LOCATION` which allowed relocation of
-        shared data into the Trusted DRAM has been deprecated. Shared data is
-        now always located at the base of Trusted SRAM.
-
-    *   BL2 Translation tables have been updated to map only the region of
-        DRAM which is accessible to normal world. This is the region of the 2GB
-        DDR-DRAM memory at 0x80000000 excluding the top 16MB. The top 16MB is
-        accessible to only the secure world.
-
-    *   BL3-2 can now reside in the top 16MB of DRAM which is accessible only to
-        the secure world. This can be done by setting the build flag
-        `FVP_TSP_RAM_LOCATION` to the value `dram`.
-
-*   Separate transation tables are created for each boot loader image. The
-    `IMAGE_BLx` build options are used to do this.  This allows each stage to
-    create mappings only for areas in the memory map that it needs.
-
-*   A Secure Payload Dispatcher (OPTEED) for the OP-TEE Trusted OS has been
-    added.  Details of using it with ARM Trusted Firmware can be found in
-    [OP-TEE Dispatcher]
-
-
-
-Issues resolved since last release
-----------------------------------
-
-*   The Juno port has been aligned with the FVP port as follows.
-
-    *   Support for reclaiming all BL1 RW memory and BL2 memory by overlaying
-        the BL3-1/BL3-2 NOBITS sections on top of them has been added to the
-        Juno port.
-
-    *   The top 16MB of the 2GB DDR-DRAM memory at 0x80000000 is configured
-        using the TZC-400 controller to be accessible only to the secure world.
-
-    *   The ARM GIC driver is used to configure the GIC-400 instead of using a
-        GIC driver private to the Juno port.
-
-    *   PSCI `CPU_SUSPEND` calls that target a standby state are now supported.
-
-    *   The TZC-400 driver is used to configure the controller instead of direct
-        accesses to the registers.
-
-*   The Linux kernel version referred to in the user guide has DVFS and HMP
-    support enabled.
-
-*   DS-5 v5.19 did not detect Version 5.8 of the Cortex-A57-A53 Base FVPs in
-    CADI server mode. This issue is not seen with DS-5 v5.20 and Version 6.2 of
-    the Cortex-A57-A53 Base FVPs.
-
-
-Known issues
-------------
-
-*   The Trusted Board Boot implementation is a prototype. There are issues with
-    the modularity and scalability of the design. Support for a Trusted
-    Watchdog, firmware update mechanism, recovery images and Trusted debug is
-    absent. These issues will be addressed in future releases.
-
-*   The FVP and Juno ports do not use the hash of the ROTPK stored in the
-    Trusted Key Storage registers to verify the ROTPK in the
-    `plat_match_rotpk()` function. This prevents the correct establishment of
-    the Chain of Trust at the first step in the Trusted Board Boot process.
-
-*   The version of the AEMv8 Base FVP used in this release resets the model
-    instead of terminating its execution in response to a shutdown request using
-    the PSCI `SYSTEM_OFF` API. This issue will be fixed in a future version of
-    the model.
-
-*   GICv3 support is experimental. There are known issues with GICv3
-    initialization in the ARM Trusted Firmware.
-
-*   While this version greatly reduces the on-chip RAM requirements, there are
-    further RAM usage enhancements that could be made.
-
-*   The firmware design documentation for the Test Secure-EL1 Payload (TSP) and
-    its dispatcher (TSPD) is incomplete. Similarly for the PSCI section.
-
-*   The Juno-specific firmware design documentation is incomplete.
-
-
-ARM Trusted Firmware - version 1.0
-==================================
-
-New features
-------------
-
-*   It is now possible to map higher physical addresses using non-flat virtual
-    to physical address mappings in the MMU setup.
-
-*   Wider use is now made of the per-CPU data cache in BL3-1 to store:
-
-    *   Pointers to the non-secure and secure security state contexts.
-
-    *   A pointer to the CPU-specific operations.
-
-    *   A pointer to PSCI specific information (for example the current power
-        state).
-
-    *   A crash reporting buffer.
-
-*   The following RAM usage improvements result in a BL3-1 RAM usage reduction
-    from 96KB to 56KB (for FVP with TSPD), and a total RAM usage reduction
-    across all images from 208KB to 88KB, compared to the previous release.
-
-    *   Removed the separate `early_exception` vectors from BL3-1 (2KB code size
-        saving).
-
-    *   Removed NSRAM from the FVP memory map, allowing the removal of one
-        (4KB) translation table.
-
-    *   Eliminated the internal `psci_suspend_context` array, saving 2KB.
-
-    *   Correctly dimensioned the PSCI `aff_map_node` array, saving 1.5KB in the
-        FVP port.
-
-    *   Removed calling CPU mpidr from the bakery lock API, saving 160 bytes.
-
-    *   Removed current CPU mpidr from PSCI common code, saving 160 bytes.
-
-    *   Inlined the mmio accessor functions, saving 360 bytes.
-
-    *   Fully reclaimed all BL1 RW memory and BL2 memory on the FVP port by
-        overlaying the BL3-1/BL3-2 NOBITS sections on top of these at runtime.
-
-    *   Made storing the FP register context optional, saving 0.5KB per context
-        (8KB on the FVP port, with TSPD enabled and running on 8 CPUs).
-
-    *   Implemented a leaner `tf_printf()` function, allowing the stack to be
-        greatly reduced.
-
-    *   Removed coherent stacks from the codebase. Stacks allocated in normal
-        memory are now used before and after the MMU is enabled. This saves 768
-        bytes per CPU in BL3-1.
-
-    *   Reworked the crash reporting in BL3-1 to use less stack.
-
-    *   Optimized the EL3 register state stored in the `cpu_context` structure
-        so that registers that do not change during normal execution are
-        re-initialized each time during cold/warm boot, rather than restored
-        from memory. This saves about 1.2KB.
-
-    *   As a result of some of the above, reduced the runtime stack size in all
-        BL images. For BL3-1, this saves 1KB per CPU.
-
-*   PSCI SMC handler improvements to correctly handle calls from secure states
-    and from AArch32.
-
-*   CPU contexts are now initialized from the `entry_point_info`. BL3-1 fully
-    determines the exception level to use for the non-trusted firmware (BL3-3)
-    based on the SPSR value provided by the BL2 platform code (or otherwise
-    provided to BL3-1). This allows platform code to directly run non-trusted
-    firmware payloads at either EL2 or EL1 without requiring an EL2 stub or OS
-    loader.
-
-*   Code refactoring improvements:
-
-    *   Refactored `fvp_config` into a common platform header.
-
-    *   Refactored the fvp gic code to be a generic driver that no longer has an
-        explicit dependency on platform code.
-
-    *   Refactored the CCI-400 driver to not have dependency on platform code.
-
-    *   Simplified the IO driver so it's no longer necessary to call `io_init()`
-        and moved all the IO storage framework code to one place.
-
-    *   Simplified the interface the the TZC-400 driver.
-
-    *   Clarified the platform porting interface to the TSP.
-
-    *   Reworked the TSPD setup code to support the alternate BL3-2
-        intialization flow where BL3-1 generic code hands control to BL3-2,
-        rather than expecting the TSPD to hand control directly to BL3-2.
-
-    *   Considerable rework to PSCI generic code to support CPU specific
-        operations.
-
-*   Improved console log output, by:
-
-    *   Adding the concept of debug log levels.
-
-    *   Rationalizing the existing debug messages and adding new ones.
-
-    *   Printing out the version of each BL stage at runtime.
-
-    *   Adding support for printing console output from assembler code,
-        including when a crash occurs before the C runtime is initialized.
-
-*   Moved up to the latest versions of the FVPs, toolchain, EDK2, kernel, Linaro
-    file system and DS-5.
-
-*   On the FVP port, made the use of the Trusted DRAM region optional at build
-    time (off by default). Normal platforms will not have such a "ready-to-use"
-    DRAM area so it is not a good example to use it.
-
-*   Added support for PSCI `SYSTEM_OFF` and `SYSTEM_RESET` APIs.
-
-*   Added support for CPU specific reset sequences, power down sequences and
-    register dumping during crash reporting. The CPU specific reset sequences
-    include support for errata workarounds.
-
-*   Merged the Juno port into the master branch. Added support for CPU hotplug
-    and CPU idle. Updated the user guide to describe how to build and run on the
-    Juno platform.
-
-
-Issues resolved since last release
-----------------------------------
-
-*   Removed the concept of top/bottom image loading. The image loader now
-    automatically detects the position of the image inside the current memory
-    layout and updates the layout to minimize fragementation. This resolves the
-    image loader limitations of previously releases. There are currently no
-    plans to support dynamic image loading.
-
-*   CPU idle now works on the publicized version of the Foundation FVP.
-
-*   All known issues relating to the compiler version used have now been
-    resolved. This TF version uses Linaro toolchain 14.07 (based on GCC 4.9).
-
-
-Known issues
-------------
-
-*   GICv3 support is experimental. The Linux kernel patches to support this are
-    not widely available. There are known issues with GICv3 initialization in
-    the ARM Trusted Firmware.
-
-*   While this version greatly reduces the on-chip RAM requirements, there are
-    further RAM usage enhancements that could be made.
-
-*   The firmware design documentation for the Test Secure-EL1 Payload (TSP) and
-    its dispatcher (TSPD) is incomplete. Similarly for the PSCI section.
-
-*   The Juno-specific firmware design documentation is incomplete.
-
-*   Some recent enhancements to the FVP port have not yet been translated into
-    the Juno port. These will be tracked via the tf-issues project.
-
-*   The Linux kernel version referred to in the user guide has DVFS and HMP
-    support disabled due to some known instabilities at the time of this
-    release. A future kernel version will re-enable these features.
-
-*   DS-5 v5.19 does not detect Version 5.8 of the Cortex-A57-A53 Base FVPs in
-    CADI server mode. This is because the `<SimName>` reported by the FVP in
-    this version has changed. For example, for the Cortex-A57x4-A53x4 Base FVP,
-    the `<SimName>` reported by the FVP is `FVP_Base_Cortex_A57x4_A53x4`, while
-    DS-5 expects it to be `FVP_Base_A57x4_A53x4`.
-
-    The temporary fix to this problem is to change the name of the FVP in
-    `sw/debugger/configdb/Boards/ARM FVP/Base_A57x4_A53x4/cadi_config.xml`.
-    Change the following line:
-
-        <SimName>System Generator:FVP_Base_A57x4_A53x4</SimName>
-    to
-        <SimName>System Generator:FVP_Base_Cortex-A57x4_A53x4</SimName>
-
-    A similar change can be made to the other Cortex-A57-A53 Base FVP variants.
-
-
-ARM Trusted Firmware - version 0.4
-==================================
-
-New features
-------------
-
-*   Makefile improvements:
-
-    *   Improved dependency checking when building.
-
-    *   Removed `dump` target (build now always produces dump files).
-
-    *   Enabled platform ports to optionally make use of parts of the Trusted
-        Firmware (e.g. BL3-1 only), rather than being forced to use all parts.
-        Also made the `fip` target optional.
-
-    *   Specified the full path to source files and removed use of the `vpath`
-        keyword.
-
-*   Provided translation table library code for potential re-use by platforms
-    other than the FVPs.
-
-*   Moved architectural timer setup to platform-specific code.
-
-*   Added standby state support to PSCI cpu_suspend implementation.
-
-*   SRAM usage improvements:
-
-    *   Started using the `-ffunction-sections`, `-fdata-sections` and
-        `--gc-sections` compiler/linker options to remove unused code and data
-        from the images. Previously, all common functions were being built into
-        all binary images, whether or not they were actually used.
-
-    *   Placed all assembler functions in their own section to allow more unused
-        functions to be removed from images.
-
-    *   Updated BL1 and BL2 to use a single coherent stack each, rather than one
-        per CPU.
-
-    *   Changed variables that were unnecessarily declared and initialized as
-        non-const (i.e. in the .data section) so they are either uninitialized
-        (zero init) or const.
-
-*   Moved the Test Secure-EL1 Payload (BL3-2) to execute in Trusted SRAM by
-    default. The option for it to run in Trusted DRAM remains.
-
-*   Implemented a TrustZone Address Space Controller (TZC-400) driver. A
-    default configuration is provided for the Base FVPs. This means the model
-    parameter `-C bp.secure_memory=1` is now supported.
-
-*   Started saving the PSCI cpu_suspend 'power_state' parameter prior to
-    suspending a CPU. This allows platforms that implement multiple power-down
-    states at the same affinity level to identify a specific state.
-
-*   Refactored the entire codebase to reduce the amount of nesting in header
-    files and to make the use of system/user includes more consistent. Also
-    split platform.h to separate out the platform porting declarations from the
-    required platform porting definitions and the definitions/declarations
-    specific to the platform port.
-
-*   Optimized the data cache clean/invalidate operations.
-
-*   Improved the BL3-1 unhandled exception handling and reporting. Unhandled
-    exceptions now result in a dump of registers to the console.
-
-*   Major rework to the handover interface between BL stages, in particular the
-    interface to BL3-1. The interface now conforms to a specification and is
-    more future proof.
-
-*   Added support for optionally making the BL3-1 entrypoint a reset handler
-    (instead of BL1). This allows platforms with an alternative image loading
-    architecture to re-use BL3-1 with fewer modifications to generic code.
-
-*   Reserved some DDR DRAM for secure use on FVP platforms to avoid future
-    compatibility problems with non-secure software.
-
-*   Added support for secure interrupts targeting the Secure-EL1 Payload (SP)
-    (using GICv2 routing only). Demonstrated this working by adding an interrupt
-    target and supporting test code to the TSP. Also demonstrated non-secure
-    interrupt handling during TSP processing.
-
-
-Issues resolved since last release
-----------------------------------
-
-*   Now support use of the model parameter `-C bp.secure_memory=1` in the Base
-    FVPs (see **New features**).
-
-*   Support for secure world interrupt handling now available (see **New
-    features**).
-
-*   Made enough SRAM savings (see **New features**) to enable the Test Secure-EL1
-    Payload (BL3-2) to execute in Trusted SRAM by default.
-
-*   The tested filesystem used for this release (Linaro AArch64 OpenEmbedded
-    14.04) now correctly reports progress in the console.
-
-*   Improved the Makefile structure to make it easier to separate out parts of
-    the Trusted Firmware for re-use in platform ports. Also, improved target
-    dependency checking.
-
-
-Known issues
-------------
-
-*   GICv3 support is experimental. The Linux kernel patches to support this are
-    not widely available. There are known issues with GICv3 initialization in
-    the ARM Trusted Firmware.
-
-*   Dynamic image loading is not available yet. The current image loader
-    implementation (used to load BL2 and all subsequent images) has some
-    limitations. Changing BL2 or BL3-1 load addresses in certain ways can lead
-    to loading errors, even if the images should theoretically fit in memory.
-
-*   The ARM Trusted Firmware still uses too much on-chip Trusted SRAM. A number
-    of RAM usage enhancements have been identified to rectify this situation.
-
-*   CPU idle does not work on the advertised version of the Foundation FVP.
-    Some FVP fixes are required that are not available externally at the time
-    of writing. This can be worked around by disabling CPU idle in the Linux
-    kernel.
-
-*   Various bugs in ARM Trusted Firmware, UEFI and the Linux kernel have been
-    observed when using Linaro toolchain versions later than 13.11. Although
-    most of these have been fixed, some remain at the time of writing. These
-    mainly seem to relate to a subtle change in the way the compiler converts
-    between 64-bit and 32-bit values (e.g. during casting operations), which
-    reveals previously hidden bugs in client code.
-
-*   The firmware design documentation for the Test Secure-EL1 Payload (TSP) and
-    its dispatcher (TSPD) is incomplete. Similarly for the PSCI section.
-
-
-ARM Trusted Firmware - version 0.3
-==================================
-
-New features
-------------
-
-*   Support for Foundation FVP Version 2.0 added.
-    The documented UEFI configuration disables some devices that are unavailable
-    in the Foundation FVP, including MMC and CLCD. The resultant UEFI binary can
-    be used on the AEMv8 and Cortex-A57-A53 Base FVPs, as well as the Foundation
-    FVP.
-
-    NOTE: The software will not work on Version 1.0 of the Foundation FVP.
-
-*   Enabled third party contributions. Added a new contributing.md containing
-    instructions for how to contribute and updated copyright text in all files
-    to acknowledge contributors.
-
-*   The PSCI CPU_SUSPEND API has been stabilised to the extent where it can be
-    used for entry into power down states with the following restrictions:
-    -   Entry into standby states is not supported.
-    -   The API is only supported on the AEMv8 and Cortex-A57-A53 Base FVPs.
-
-*   The PSCI AFFINITY_INFO api has undergone limited testing on the Base FVPs to
-    allow experimental use.
-
-*   Required C library and runtime header files are now included locally in ARM
-    Trusted Firmware instead of depending on the toolchain standard include
-    paths. The local implementation has been cleaned up and reduced in scope.
-
-*   Added I/O abstraction framework, primarily to allow generic code to load
-    images in a platform-independent way. The existing image loading code has
-    been reworked to use the new framework. Semi-hosting and NOR flash I/O
-    drivers are provided.
-
-*   Introduced Firmware Image Package (FIP) handling code and tools. A FIP
-    combines multiple firmware images with a Table of Contents (ToC) into a
-    single binary image. The new FIP driver is another type of I/O driver. The
-    Makefile builds a FIP by default and the FVP platform code expect to load a
-    FIP from NOR flash, although some support for image loading using semi-
-    hosting is retained.
-
-    NOTE: Building a FIP by default is a non-backwards-compatible change.
-
-    NOTE: Generic BL2 code now loads a BL3-3 (non-trusted firmware) image into
-    DRAM instead of expecting this to be pre-loaded at known location. This is
-    also a non-backwards-compatible change.
-
-    NOTE: Some non-trusted firmware (e.g. UEFI) will need to be rebuilt so that
-    it knows the new location to execute from and no longer needs to copy
-    particular code modules to DRAM itself.
-
-*   Reworked BL2 to BL3-1 handover interface. A new composite structure
-    (bl31_args) holds the superset of information that needs to be passed from
-    BL2 to BL3-1, including information on how handover execution control to
-    BL3-2 (if present) and BL3-3 (non-trusted firmware).
-
-*   Added library support for CPU context management, allowing the saving and
-    restoring of
-    -   Shared system registers between Secure-EL1 and EL1.
-    -   VFP registers.
-    -   Essential EL3 system registers.
-
-*   Added a framework for implementing EL3 runtime services. Reworked the PSCI
-    implementation to be one such runtime service.
-
-*   Reworked the exception handling logic, making use of both SP_EL0 and SP_EL3
-    stack pointers for determining the type of exception, managing general
-    purpose and system register context on exception entry/exit, and handling
-    SMCs. SMCs are directed to the correct EL3 runtime service.
-
-*   Added support for a Test Secure-EL1 Payload (TSP) and a corresponding
-    Dispatcher (TSPD), which is loaded as an EL3 runtime service. The TSPD
-    implements Secure Monitor functionality such as world switching and
-    EL1 context management, and is responsible for communication with the TSP.
-    NOTE: The TSPD does not yet contain support for secure world interrupts.
-    NOTE: The TSP/TSPD is not built by default.
-
-
-Issues resolved since last release
-----------------------------------
-
-*   Support has been added for switching context between secure and normal
-    worlds in EL3.
-
-*   PSCI API calls `AFFINITY_INFO` & `PSCI_VERSION` have now been tested (to
-    a limited extent).
-
-*   The ARM Trusted Firmware build artifacts are now placed in the `./build`
-    directory and sub-directories instead of being placed in the root of the
-    project.
-
-*   The ARM Trusted Firmware is now free from build warnings. Build warnings
-    are now treated as errors.
-
-*   The ARM Trusted Firmware now provides C library support locally within the
-    project to maintain compatibility between toolchains/systems.
-
-*   The PSCI locking code has been reworked so it no longer takes locks in an
-    incorrect sequence.
-
-*   The RAM-disk method of loading a Linux file-system has been confirmed to
-    work with the ARM Trusted Firmware and Linux kernel version (based on
-    version 3.13) used in this release, for both Foundation and Base FVPs.
-
-
-Known issues
-------------
-
-The following is a list of issues which are expected to be fixed in the future
-releases of the ARM Trusted Firmware.
-
-*   The TrustZone Address Space Controller (TZC-400) is not being programmed
-    yet. Use of model parameter `-C bp.secure_memory=1` is not supported.
-
-*   No support yet for secure world interrupt handling.
-
-*   GICv3 support is experimental. The Linux kernel patches to support this are
-    not widely available. There are known issues with GICv3 initialization in
-    the ARM Trusted Firmware.
-
-*   Dynamic image loading is not available yet. The current image loader
-    implementation (used to load BL2 and all subsequent images) has some
-    limitations. Changing BL2 or BL3-1 load addresses in certain ways can lead
-    to loading errors, even if the images should theoretically fit in memory.
-
-*   The ARM Trusted Firmware uses too much on-chip Trusted SRAM. Currently the
-    Test Secure-EL1 Payload (BL3-2) executes in Trusted DRAM since there is not
-    enough SRAM. A number of RAM usage enhancements have been identified to
-    rectify this situation.
-
-*   CPU idle does not work on the advertised version of the Foundation FVP.
-    Some FVP fixes are required that are not available externally at the time
-    of writing.
-
-*   Various bugs in ARM Trusted Firmware, UEFI and the Linux kernel have been
-    observed when using Linaro toolchain versions later than 13.11. Although
-    most of these have been fixed, some remain at the time of writing. These
-    mainly seem to relate to a subtle change in the way the compiler converts
-    between 64-bit and 32-bit values (e.g. during casting operations), which
-    reveals previously hidden bugs in client code.
-
-*   The tested filesystem used for this release (Linaro AArch64 OpenEmbedded
-    14.01) does not report progress correctly in the console. It only seems to
-    produce error output, not standard output. It otherwise appears to function
-    correctly. Other filesystem versions on the same software stack do not
-    exhibit the problem.
-
-*   The Makefile structure doesn't make it easy to separate out parts of the
-    Trusted Firmware for re-use in platform ports, for example if only BL3-1 is
-    required in a platform port. Also, dependency checking in the Makefile is
-    flawed.
-
-*   The firmware design documentation for the Test Secure-EL1 Payload (TSP) and
-    its dispatcher (TSPD) is incomplete. Similarly for the PSCI section.
-
-
-ARM Trusted Firmware - version 0.2
-==================================
-
-New features
-------------
-
-*   First source release.
-
-*   Code for the PSCI suspend feature is supplied, although this is not enabled
-    by default since there are known issues (see below).
-
-
-Issues resolved since last release
-----------------------------------
-
-*   The "psci" nodes in the FDTs provided in this release now fully comply
-    with the recommendations made in the PSCI specification.
-
-
-Known issues
-------------
-
-The following is a list of issues which are expected to be fixed in the future
-releases of the ARM Trusted Firmware.
-
-*   The TrustZone Address Space Controller (TZC-400) is not being programmed
-    yet. Use of model parameter `-C bp.secure_memory=1` is not supported.
-
-*   No support yet for secure world interrupt handling or for switching context
-    between secure and normal worlds in EL3.
-
-*   GICv3 support is experimental. The Linux kernel patches to support this are
-    not widely available. There are known issues with GICv3 initialization in
-    the ARM Trusted Firmware.
-
-*   Dynamic image loading is not available yet. The current image loader
-    implementation (used to load BL2 and all subsequent images) has some
-    limitations. Changing BL2 or BL3-1 load addresses in certain ways can lead
-    to loading errors, even if the images should theoretically fit in memory.
-
-*   Although support for PSCI `CPU_SUSPEND` is present, it is not yet stable
-    and ready for use.
-
-*   PSCI API calls `AFFINITY_INFO` & `PSCI_VERSION` are implemented but have not
-    been tested.
-
-*   The ARM Trusted Firmware make files result in all build artifacts being
-    placed in the root of the project. These should be placed in appropriate
-    sub-directories.
-
-*   The compilation of ARM Trusted Firmware is not free from compilation
-    warnings. Some of these warnings have not been investigated yet so they
-    could mask real bugs.
-
-*   The ARM Trusted Firmware currently uses toolchain/system include files like
-    stdio.h. It should provide versions of these within the project to maintain
-    compatibility between toolchains/systems.
-
-*   The PSCI code takes some locks in an incorrect sequence. This may cause
-    problems with suspend and hotplug in certain conditions.
-
-*   The Linux kernel used in this release is based on version 3.12-rc4. Using
-    this kernel with the ARM Trusted Firmware fails to start the file-system as
-    a RAM-disk. It fails to execute user-space `init` from the RAM-disk. As an
-    alternative, the VirtioBlock mechanism can be used to provide a file-system
-    to the kernel.
-
-
-- - - - - - - - - - - - - - - - - - - - - - - - - -
-
-_Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved._
-
-[OP-TEE Dispatcher]:       ./optee-dispatcher.md
diff --git a/docs/change-log.rst b/docs/change-log.rst
new file mode 100644
index 0000000..f5ad562
--- /dev/null
+++ b/docs/change-log.rst
@@ -0,0 +1,1382 @@
+
+.. section-numbering::
+    :suffix: .
+
+.. contents::
+
+ARM Trusted Firmware - version 1.4
+==================================
+
+New features
+------------
+
+-  Enabled support for platforms with hardware assisted coherency.
+
+   A new build option HW_ASSISTED_COHERENCY allows platforms to take advantage
+   of the following optimisations:
+
+   -  Skip performing cache maintenance during power-up and power-down.
+
+   -  Use spin-locks instead of bakery locks.
+
+   -  Enable data caches early on warm-booted CPUs.
+
+-  Added support for Cortex-A75 and Cortex-A55 processors.
+
+   Both Cortex-A75 and Cortex-A55 processors use the ARM DynamIQ Shared Unit
+   (DSU). The power-down and power-up sequences are therefore mostly managed in
+   hardware, reducing complexity of the software operations.
+
+-  Introduced ARM GIC-600 driver.
+
+   ARM GIC-600 IP complies with ARM GICv3 architecture. For FVP platforms, the
+   GIC-600 driver is chosen when FVP_USE_GIC_DRIVER is set to FVP_GIC600.
+
+-  Updated GICv3 support:
+
+   -  Introduced power management APIs for GICv3 Redistributor. These APIs
+      allow platforms to power down the Redistributor during CPU power on/off.
+      Requires the GICv3 implementations to have power management operations.
+
+      Implemented the power management APIs for FVP.
+
+   -  GIC driver data is flushed by the primary CPU so that secondary CPU do
+      not read stale GIC data.
+
+-  Added support for ARM System Control and Management Interface v1.0 (SCMI).
+
+   The SCMI driver implements the power domain management and system power
+   management protocol of the SCMI specification (ARM DEN 0056ASCMI) for
+   communicating with any compliant power controller.
+
+   Support is added for the Juno platform. The driver can be found in the
+   plat/arm/css/drivers folder.
+
+-  Added support to enable pre-integration of TBB with the ARM TrustZone
+   CryptoCell product, to take advantage of its hardware Root of Trust and
+   crypto acceleration services.
+
+-  Enabled Statistical Profiling Extensions for lower ELs.
+
+   The firmware support is limited to the use of SPE in the Non-secure state
+   and accesses to the SPE specific registers from S-EL1 will trap to EL3.
+
+   The SPE are architecturally specified for AArch64 only.
+
+-  Code hygiene changes aligned with MISRA guidelines:
+
+   -  Fixed signed / unsigned comparison warnings in the translation table
+      library.
+
+   -  Added U(_x) macro and together with the existing ULL(_x) macro fixed
+      some of the signed-ness defects flagged by the MISRA scanner.
+
+-  Enhancements to Firmware Update feature:
+
+   -  The FWU logic now checks for overlapping images to prevent execution of
+      unauthenticated arbitary code.
+
+   -  Introduced new FWU_SMC_IMAGE_RESET SMC that changes the image loading
+      state machine to go from COPYING, COPIED or AUTHENTICATED states to
+      RESET state. Previously, this was only possible when the authentication
+      of an image failed or when the execution of the image finished.
+
+   -  Fixed integer overflow which addressed TFV-1: Malformed Firmware Update
+      SMC can result in copy of unexpectedly large data into secure memory.
+
+-  Introduced support for ARM Compiler 6 and LLVM (clang).
+
+   ARM TF can now also be built with the ARM Compiler 6 or the clang compilers.
+   The assembler and linker must be provided by the GNU toolchain.
+
+   Tested with ARM CC 6.7 and clang 3.9.x and 4.0.x.
+
+-  Memory footprint improvements:
+
+   -  Introduced `tf_snprintf`, a reduced version of `snprintf` which has
+      support for a limited set of formats.
+
+      The mbedtls driver is updated to optionally use `tf_snprintf` instead of
+      `snprintf`.
+
+   -  The `assert()` is updated to no longer print the function name, and
+      additional logging options are supported via an optional platform define
+      `PLAT_LOG_LEVEL_ASSERT`, which controls how verbose the assert output is.
+
+-  Enhancements to Trusted Firmware support when running in AArch32 execution
+   state:
+
+   -  Support booting SP_MIN and BL33 in AArch32 execution mode on Juno. Due to
+      hardware limitations, BL1 and BL2 boot in AArch64 state and there is
+      additional trampoline code to warm reset into SP_MIN in AArch32 execution
+      state.
+
+   -  Added support for ARM Cortex-A53/57/72 MPCore processors including the
+      errata workarounds that are already implemented for AArch64 execution
+      state.
+
+   -  For FVP platforms, added AArch32 Trusted Board Boot support, including the
+      Firmware Update feature.
+
+-  Introduced ARM SiP service for use by ARM standard platforms.
+
+   -  Added new ARM SiP Service SMCs to enable the Non-secure  world to read PMF
+      timestamps.
+
+      Added PMF instrumentation points in ARM TF in order to quantify the
+      overall time spent in the PSCI software implementation.
+
+   -  Added new ARM SiP service SMC to switch execution state.
+
+      This allows the lower exception level to change its execution state from
+      AArch64 to AArch32, or vice verse, via a request to EL3.
+
+-  Migrated to use SPDX[0] license identifiers to make software license
+   auditing simpler.
+
+   *NOTE:* Files that have been imported by FreeBSD have not been modified.
+
+   [0]: https://spdx.org/
+
+-  Enhancements to the translation table library:
+
+   -  Added version 2 of translation table library that allows different
+      translation tables to be modified by using different 'contexts'. Version 1
+      of the transalation table library only allows the current EL's translation
+      tables to be modified.
+
+      Version 2 of the translation table also added support for dynamic
+      regions; regions that can be added and removed dynamically whilst the
+      MMU is enabled. Static regions can only be added or removed before the
+      MMU is enabled.
+
+      The dynamic mapping functionality is enabled or disabled when compiling
+      by setting the build option PLAT_XLAT_TABLES_DYNAMIC to 1 or 0. This can
+      be done per-image.
+
+   -  Added support for translation regimes with two virtual address spaces
+      such as the one shared by EL1 and EL0.
+
+      The library does not support initializing translation tables for EL0
+      software.
+
+   -  Added support to mark the translation tables as non-cacheable using an
+      additional build option `XLAT_TABLE_NC`.
+
+-  Added support for GCC stack protection. A new build option
+   ENABLE_STACK_PROTECTOR was introduced that enables compilation of all BL
+   images with one of the GCC -fstack-protector-* options.
+
+   A new platform function plat_get_stack_protector_canary() was introduced
+   that returns a value used to initialize the canary for stack corruption
+   detection. For increased effectiveness of protection platforms must provide
+   an implementation that returns a random value.
+
+-  Enhanced support for ARM platforms:
+
+   -  Added support for multi-threading CPUs, indicated by `MT` field in MPDIR.
+      A new build flag `ARM_PLAT_MT` is added, and when enabled, the functions
+      accessing MPIDR assume that the `MT` bit is set for the platform and
+      access the bit fields accordingly.
+
+      Also, a new API `plat_arm_get_cpu_pe_count` is added when `ARM_PLAT_MT` is
+      enabled, returning the Processing Element count within the physical CPU
+      corresponding to `mpidr`.
+
+   -  The ARM platforms migrated to use version 2 of the translation tables.
+
+   -  Introduced a new ARM platform layer API `plat_arm_psci_override_pm_ops`
+      which allows ARM platforms to modify `plat_arm_psci_pm_ops` and therefore
+      dynamically define PSCI capability.
+
+   -  The ARM platforms migrated to use IMAGE_LOAD_V2 by default.
+
+-  Enhanced reporting of errata workaround status with the following policy:
+
+   -  If an errata workaround is enabled:
+
+      -  If it applies (i.e. the CPU is affected by the errata), an INFO message
+         is printed, confirming that the errata workaround has been applied.
+
+      -  If it does not apply, a VERBOSE message is printed, confirming that the
+         errata workaround has been skipped.
+
+   -  If an errata workaround is not enabled, but would have applied had it
+      been, a WARN message is printed, alerting that errata workaround is
+      missing.
+
+-  Added build options ARM_ARCH_MAJOR and ARM_ARM_MINOR to choose the
+   architecture version to target ARM TF.
+
+-  Updated the spin lock implementation to use the more efficient CAS (Compare
+   And Swap) instruction when available. This instruction was introduced in
+   ARMv8.1-A.
+
+-  Applied errata workaround for ARM Cortex-A53: 855873.
+
+-  Applied errata workaround for ARM-Cortex-A57: 813419.
+
+-  Enabled all A53 and A57 errata workarounds for Juno, both in AArch64 and
+   AArch32 execution states.
+
+-  Added support for Socionext UniPhier SoC platform.
+
+-  Added support for Hikey960 and Hikey platforms.
+
+-  Added support for Rockchip RK3328 platform.
+
+-  Added support for NVidia Tegra T186 platform.
+
+-  Added support for Designware emmc driver.
+
+-  Imported libfdt v1.4.2 that addresses buffer overflow in fdt_offset_ptr().
+
+-  Enhanced the CPU operations framework to allow power handlers to be
+   registered on per-level basis. This enables support for future CPUs that
+   have multiple threads which might need powering down individually.
+
+-  Updated register initialisation to prevent unexpected behaviour:
+
+   -  Debug registers MDCR-EL3/SDCR and MDCR_EL2/HDCR are initialised to avoid
+      unexpected traps into the higher exception levels and disable secure
+      self-hosted debug. Additionally, secure privileged external debug on
+      Juno is disabled by programming the appropriate Juno SoC registers.
+
+   -  EL2 and EL3 configurable controls are initialised to avoid unexpected
+      traps in the higher exception levels.
+
+   -  Essential control registers are fully initialised on EL3 start-up, when
+      initialising the non-secure and secure context structures and when
+      preparing to leave EL3 for a lower EL. This gives better alignement with
+      the ARM ARM which states that software must initialise RES0 and RES1
+      fields with 0 / 1.
+
+-  Enhanced PSCI support:
+
+   -  Introduced new platform interfaces that decouple PSCI stat residency
+      calculation from PMF, enabling platforms to use alternative methods of
+      capturing timestamps.
+
+   -  PSCI stat accounting performed for retention/standby states when
+      requested at multiple power levels.
+
+-  Simplified fiptool to have a single linked list of image descriptors.
+
+-  For the TSP, resolved corruption of pre-empted secure context by aborting any
+   pre-empted SMC during PSCI power management requests.
+
+Issues resolved since last release
+----------------------------------
+
+-  ARM TF can be built with the latest mbed TLS version (v2.4.2). The earlier
+   version 2.3.0 cannot be used due to build warnings that the ARM TF build
+   system interprets as errors.
+
+-  TBBR, including the Firmware Update feature  is now supported on FVP
+   platforms when running Trusted Firmware in AArch32 state.
+
+-  The version of the AEMv8 Base FVP used in this release has resolved the issue
+   of the model executing a reset instead of terminating in response to a
+   shutdown request using the PSCI SYSTEM_OFF API.
+
+Known Issues
+------------
+
+-  Building TF with compiler optimisations disabled (-O0) fails.
+
+-  Trusted Board Boot currently does not work on Juno when running Trusted
+   Firmware in AArch32 execution state due to error when loading the sp_min to
+   memory becasue of lack of free space available. See `tf-issue#501`_ for more
+   details.
+
+-  The errata workaround for A53 errata 843419 is only available from binutils
+   2.26 and is not present in GCC4.9. If this errata is applicable to the
+   platform, please use GCC compiler version of at least 5.0. See `PR#1002`_ for
+   more details.
+
+ARM Trusted Firmware - version 1.3
+==================================
+
+
+New features
+------------
+
+-  Added support for running Trusted Firmware in AArch32 execution state.
+
+   The PSCI library has been refactored to allow integration with **EL3 Runtime
+   Software**. This is software that is executing at the highest secure
+   privilege which is EL3 in AArch64 or Secure SVC/Monitor mode in AArch32. See
+   `PSCI Integration Guide`_.
+
+   Included is a minimal AArch32 Secure Payload, **SP-MIN**, that illustrates
+   the usage and integration of the PSCI library with EL3 Runtime Software
+   running in AArch32 state.
+
+   Booting to the BL1/BL2 images as well as booting straight to the Secure
+   Payload is supported.
+
+-  Improvements to the initialization framework for the PSCI service and ARM
+   Standard Services in general.
+
+   The PSCI service is now initialized as part of ARM Standard Service
+   initialization. This consolidates the initializations of any ARM Standard
+   Service that may be added in the future.
+
+   A new function ``get_arm_std_svc_args()`` is introduced to get arguments
+   corresponding to each standard service and must be implemented by the EL3
+   Runtime Software.
+
+   For PSCI, a new versioned structure ``psci_lib_args_t`` is introduced to
+   initialize the PSCI Library. **Note** this is a compatibility break due to
+   the change in the prototype of ``psci_setup()``.
+
+-  To support AArch32 builds of BL1 and BL2, implemented a new, alternative
+   firmware image loading mechanism that adds flexibility.
+
+   The current mechanism has a hard-coded set of images and execution order
+   (BL31, BL32, etc). The new mechanism is data-driven by a list of image
+   descriptors provided by the platform code.
+
+   ARM platforms have been updated to support the new loading mechanism.
+
+   The new mechanism is enabled by a build flag (``LOAD_IMAGE_V2``) which is
+   currently off by default for the AArch64 build.
+
+   **Note** ``TRUSTED_BOARD_BOOT`` is currently not supported when
+   ``LOAD_IMAGE_V2`` is enabled.
+
+-  Updated requirements for making contributions to ARM TF.
+
+   Commits now must have a 'Signed-off-by:' field to certify that the
+   contribution has been made under the terms of the
+   `Developer Certificate of Origin`_.
+
+   A signed CLA is no longer required.
+
+   The `Contribution Guide`_ has been updated to reflect this change.
+
+-  Introduced Performance Measurement Framework (PMF) which provides support
+   for capturing, storing, dumping and retrieving time-stamps to measure the
+   execution time of critical paths in the firmware. This relies on defining
+   fixed sample points at key places in the code.
+
+-  To support the QEMU platform port, imported libfdt v1.4.1 from
+   https://git.kernel.org/cgit/utils/dtc/dtc.git
+
+-  Updated PSCI support:
+
+   -  Added support for PSCI NODE\_HW\_STATE API for ARM platforms.
+
+   -  New optional platform hook, ``pwr_domain_pwr_down_wfi()``, in
+      ``plat_psci_ops`` to enable platforms to perform platform-specific actions
+      needed to enter powerdown, including the 'wfi' invocation.
+
+   -  PSCI STAT residency and count functions have been added on ARM platforms
+      by using PMF.
+
+-  Enhancements to the translation table library:
+
+   -  Limited memory mapping support for region overlaps to only allow regions
+      to overlap that are identity mapped or have the same virtual to physical
+      address offset, and overlap completely but must not cover the same area.
+
+      This limitation will enable future enhancements without having to
+      support complex edge cases that may not be necessary.
+
+   -  The initial translation lookup level is now inferred from the virtual
+      address space size. Previously, it was hard-coded.
+
+   -  Added support for mapping Normal, Inner Non-cacheable, Outer
+      Non-cacheable memory in the translation table library.
+
+      This can be useful to map a non-cacheable memory region, such as a DMA
+      buffer.
+
+   -  Introduced the MT\_EXECUTE/MT\_EXECUTE\_NEVER memory mapping attributes to
+      specify the access permissions for instruction execution of a memory
+      region.
+
+-  Enabled support to isolate code and read-only data on separate memory pages,
+   allowing independent access control to be applied to each.
+
+-  Enabled SCR\_EL3.SIF (Secure Instruction Fetch) bit in BL1 and BL31 common
+   architectural setup code, preventing fetching instructions from non-secure
+   memory when in secure state.
+
+-  Enhancements to FIP support:
+
+   -  Replaced ``fip_create`` with ``fiptool`` which provides a more consistent
+      and intuitive interface as well as additional support to remove an image
+      from a FIP file.
+
+   -  Enabled printing the SHA256 digest with info command, allowing quick
+      verification of an image within a FIP without having to extract the
+      image and running sha256sum on it.
+
+   -  Added support for unpacking the contents of an existing FIP file into
+      the working directory.
+
+   -  Aligned command line options for specifying images to use same naming
+      convention as specified by TBBR and already used in cert\_create tool.
+
+-  Refactored the TZC-400 driver to also support memory controllers that
+   integrate TZC functionality, for example ARM CoreLink DMC-500. Also added
+   DMC-500 specific support.
+
+-  Implemented generic delay timer based on the system generic counter and
+   migrated all platforms to use it.
+
+-  Enhanced support for ARM platforms:
+
+   -  Updated image loading support to make SCP images (SCP\_BL2 and SCP\_BL2U)
+      optional.
+
+   -  Enhanced topology description support to allow multi-cluster topology
+      definitions.
+
+   -  Added interconnect abstraction layer to help platform ports select the
+      right interconnect driver, CCI or CCN, for the platform.
+
+   -  Added support to allow loading BL31 in the TZC-secured DRAM instead of
+      the default secure SRAM.
+
+   -  Added support to use a System Security Control (SSC) Registers Unit
+      enabling ARM TF to be compiled to support multiple ARM platforms and
+      then select one at runtime.
+
+   -  Restricted mapping of Trusted ROM in BL1 to what is actually needed by
+      BL1 rather than entire Trusted ROM region.
+
+   -  Flash is now mapped as execute-never by default. This increases security
+      by restricting the executable region to what is strictly needed.
+
+-  Applied following erratum workarounds for Cortex-A57: 833471, 826977,
+   829520, 828024 and 826974.
+
+-  Added support for Mediatek MT6795 platform.
+
+-  Added support for QEMU virtualization ARMv8-A target.
+
+-  Added support for Rockchip RK3368 and RK3399 platforms.
+
+-  Added support for Xilinx Zynq UltraScale+ MPSoC platform.
+
+-  Added support for ARM Cortex-A73 MPCore Processor.
+
+-  Added support for ARM Cortex-A72 processor.
+
+-  Added support for ARM Cortex-A35 processor.
+
+-  Added support for ARM Cortex-A32 MPCore Processor.
+
+-  Enabled preloaded BL33 alternative boot flow, in which BL2 does not load
+   BL33 from non-volatile storage and BL31 hands execution over to a preloaded
+   BL33. The User Guide has been updated with an example of how to use this
+   option with a bootwrapped kernel.
+
+-  Added support to build ARM TF on a Windows-based host machine.
+
+-  Updated Trusted Board Boot prototype implementation:
+
+   -  Enabled the ability for a production ROM with TBBR enabled to boot test
+      software before a real ROTPK is deployed (e.g. manufacturing mode).
+      Added support to use ROTPK in certificate without verifying against the
+      platform value when ``ROTPK_NOT_DEPLOYED`` bit is set.
+
+   -  Added support for non-volatile counter authentication to the
+      Authentication Module to protect against roll-back.
+
+-  Updated GICv3 support:
+
+   -  Enabled processor power-down and automatic power-on using GICv3.
+
+   -  Enabled G1S or G0 interrupts to be configured independently.
+
+   -  Changed FVP default interrupt driver to be the GICv3-only driver.
+      **Note** the default build of Trusted Firmware will not be able to boot
+      Linux kernel with GICv2 FDT blob.
+
+   -  Enabled wake-up from CPU\_SUSPEND to stand-by by temporarily re-routing
+      interrupts and then restoring after resume.
+
+Issues resolved since last release
+----------------------------------
+
+Known issues
+------------
+
+-  The version of the AEMv8 Base FVP used in this release resets the model
+   instead of terminating its execution in response to a shutdown request using
+   the PSCI ``SYSTEM_OFF`` API. This issue will be fixed in a future version of
+   the model.
+
+-  Building TF with compiler optimisations disabled (``-O0``) fails.
+
+-  ARM TF cannot be built with mbed TLS version v2.3.0 due to build warnings
+   that the ARM TF build system interprets as errors.
+
+-  TBBR is not currently supported when running Trusted Firmware in AArch32
+   state.
+
+ARM Trusted Firmware - version 1.2
+==================================
+
+New features
+------------
+
+-  The Trusted Board Boot implementation on ARM platforms now conforms to the
+   mandatory requirements of the TBBR specification.
+
+   In particular, the boot process is now guarded by a Trusted Watchdog, which
+   will reset the system in case of an authentication or loading error. On ARM
+   platforms, a secure instance of ARM SP805 is used as the Trusted Watchdog.
+
+   Also, a firmware update process has been implemented. It enables
+   authenticated firmware to update firmware images from external interfaces to
+   SoC Non-Volatile memories. This feature functions even when the current
+   firmware in the system is corrupt or missing; it therefore may be used as
+   a recovery mode.
+
+-  Improvements have been made to the Certificate Generation Tool
+   (``cert_create``) as follows.
+
+   -  Added support for the Firmware Update process by extending the Chain
+      of Trust definition in the tool to include the Firmware Update
+      certificate and the required extensions.
+
+   -  Introduced a new API that allows one to specify command line options in
+      the Chain of Trust description. This makes the declaration of the tool's
+      arguments more flexible and easier to extend.
+
+   -  The tool has been reworked to follow a data driven approach, which
+      makes it easier to maintain and extend.
+
+-  Extended the FIP tool (``fip_create``) to support the new set of images
+   involved in the Firmware Update process.
+
+-  Various memory footprint improvements. In particular:
+
+   -  The bakery lock structure for coherent memory has been optimised.
+
+   -  The mbed TLS SHA1 functions are not needed, as SHA256 is used to
+      generate the certificate signature. Therefore, they have been compiled
+      out, reducing the memory footprint of BL1 and BL2 by approximately
+      6 KB.
+
+   -  On ARM development platforms, each BL stage now individually defines
+      the number of regions that it needs to map in the MMU.
+
+-  Added the following new design documents:
+
+   -  `Authentication framework`_
+   -  `Firmware Update`_
+   -  `TF Reset Design`_
+   -  `Power Domain Topology Design`_
+
+-  Applied the new image terminology to the code base and documentation, as
+   described on the `TF wiki on GitHub`_.
+
+-  The build system has been reworked to improve readability and facilitate
+   adding future extensions.
+
+-  On ARM standard platforms, BL31 uses the boot console during cold boot
+   but switches to the runtime console for any later logs at runtime. The TSP
+   uses the runtime console for all output.
+
+-  Implemented a basic NOR flash driver for ARM platforms. It programs the
+   device using CFI (Common Flash Interface) standard commands.
+
+-  Implemented support for booting EL3 payloads on ARM platforms, which
+   reduces the complexity of developing EL3 baremetal code by doing essential
+   baremetal initialization.
+
+-  Provided separate drivers for GICv3 and GICv2. These expect the entire
+   software stack to use either GICv2 or GICv3; hybrid GIC software systems
+   are no longer supported and the legacy ARM GIC driver has been deprecated.
+
+-  Added support for Juno r1 and r2. A single set of Juno TF binaries can run
+   on Juno r0, r1 and r2 boards. Note that this TF version depends on a Linaro
+   release that does *not* contain Juno r2 support.
+
+-  Added support for MediaTek mt8173 platform.
+
+-  Implemented a generic driver for ARM CCN IP.
+
+-  Major rework of the PSCI implementation.
+
+   -  Added framework to handle composite power states.
+
+   -  Decoupled the notions of affinity instances (which describes the
+      hierarchical arrangement of cores) and of power domain topology, instead
+      of assuming a one-to-one mapping.
+
+   -  Better alignment with version 1.0 of the PSCI specification.
+
+-  Added support for the SYSTEM\_SUSPEND PSCI API on ARM platforms. When invoked
+   on the last running core on a supported platform, this puts the system
+   into a low power mode with memory retention.
+
+-  Unified the reset handling code as much as possible across BL stages.
+   Also introduced some build options to enable optimization of the reset path
+   on platforms that support it.
+
+-  Added a simple delay timer API, as well as an SP804 timer driver, which is
+   enabled on FVP.
+
+-  Added support for NVidia Tegra T210 and T132 SoCs.
+
+-  Reorganised ARM platforms ports to greatly improve code shareability and
+   facilitate the reuse of some of this code by other platforms.
+
+-  Added support for ARM Cortex-A72 processor in the CPU specific framework.
+
+-  Provided better error handling. Platform ports can now define their own
+   error handling, for example to perform platform specific bookkeeping or
+   post-error actions.
+
+-  Implemented a unified driver for ARM Cache Coherent Interconnects used for
+   both CCI-400 & CCI-500 IPs. ARM platforms ports have been migrated to this
+   common driver. The standalone CCI-400 driver has been deprecated.
+
+Issues resolved since last release
+----------------------------------
+
+-  The Trusted Board Boot implementation has been redesigned to provide greater
+   modularity and scalability. See the `Authentication Framework`_ document.
+   All missing mandatory features are now implemented.
+
+-  The FVP and Juno ports may now use the hash of the ROTPK stored in the
+   Trusted Key Storage registers to verify the ROTPK. Alternatively, a
+   development public key hash embedded in the BL1 and BL2 binaries might be
+   used instead. The location of the ROTPK is chosen at build-time using the
+   ``ARM_ROTPK_LOCATION`` build option.
+
+-  GICv3 is now fully supported and stable.
+
+Known issues
+------------
+
+-  The version of the AEMv8 Base FVP used in this release resets the model
+   instead of terminating its execution in response to a shutdown request using
+   the PSCI ``SYSTEM_OFF`` API. This issue will be fixed in a future version of
+   the model.
+
+-  While this version has low on-chip RAM requirements, there are further
+   RAM usage enhancements that could be made.
+
+-  The upstream documentation could be improved for structural consistency,
+   clarity and completeness. In particular, the design documentation is
+   incomplete for PSCI, the TSP(D) and the Juno platform.
+
+-  Building TF with compiler optimisations disabled (``-O0``) fails.
+
+ARM Trusted Firmware - version 1.1
+==================================
+
+New features
+------------
+
+-  A prototype implementation of Trusted Board Boot has been added. Boot
+   loader images are verified by BL1 and BL2 during the cold boot path. BL1 and
+   BL2 use the PolarSSL SSL library to verify certificates and images. The
+   OpenSSL library is used to create the X.509 certificates. Support has been
+   added to ``fip_create`` tool to package the certificates in a FIP.
+
+-  Support for calling CPU and platform specific reset handlers upon entry into
+   BL3-1 during the cold and warm boot paths has been added. This happens after
+   another Boot ROM ``reset_handler()`` has already run. This enables a developer
+   to perform additional actions or undo actions already performed during the
+   first call of the reset handlers e.g. apply additional errata workarounds.
+
+-  Support has been added to demonstrate routing of IRQs to EL3 instead of
+   S-EL1 when execution is in secure world.
+
+-  The PSCI implementation now conforms to version 1.0 of the PSCI
+   specification. All the mandatory APIs and selected optional APIs are
+   supported. In particular, support for the ``PSCI_FEATURES`` API has been
+   added. A capability variable is constructed during initialization by
+   examining the ``plat_pm_ops`` and ``spd_pm_ops`` exported by the platform and
+   the Secure Payload Dispatcher. This is used by the PSCI FEATURES function
+   to determine which PSCI APIs are supported by the platform.
+
+-  Improvements have been made to the PSCI code as follows.
+
+   -  The code has been refactored to remove redundant parameters from
+      internal functions.
+
+   -  Changes have been made to the code for PSCI ``CPU_SUSPEND``, ``CPU_ON`` and
+      ``CPU_OFF`` calls to facilitate an early return to the caller in case a
+      failure condition is detected. For example, a PSCI ``CPU_SUSPEND`` call
+      returns ``SUCCESS`` to the caller if a pending interrupt is detected early
+      in the code path.
+
+   -  Optional platform APIs have been added to validate the ``power_state`` and
+      ``entrypoint`` parameters early in PSCI ``CPU_ON`` and ``CPU_SUSPEND`` code
+      paths.
+
+   -  PSCI migrate APIs have been reworked to invoke the SPD hook to determine
+      the type of Trusted OS and the CPU it is resident on (if
+      applicable). Also, during a PSCI ``MIGRATE`` call, the SPD hook to migrate
+      the Trusted OS is invoked.
+
+-  It is now possible to build Trusted Firmware without marking at least an
+   extra page of memory as coherent. The build flag ``USE_COHERENT_MEM`` can be
+   used to choose between the two implementations. This has been made possible
+   through these changes.
+
+   -  An implementation of Bakery locks, where the locks are not allocated in
+      coherent memory has been added.
+
+   -  Memory which was previously marked as coherent is now kept coherent
+      through the use of software cache maintenance operations.
+
+   Approximately, 4K worth of memory is saved for each boot loader stage when
+   ``USE_COHERENT_MEM=0``. Enabling this option increases the latencies
+   associated with acquire and release of locks. It also requires changes to
+   the platform ports.
+
+-  It is now possible to specify the name of the FIP at build time by defining
+   the ``FIP_NAME`` variable.
+
+-  Issues with depedencies on the 'fiptool' makefile target have been
+   rectified. The ``fip_create`` tool is now rebuilt whenever its source files
+   change.
+
+-  The BL3-1 runtime console is now also used as the crash console. The crash
+   console is changed to SoC UART0 (UART2) from the previous FPGA UART0 (UART0)
+   on Juno. In FVP, it is changed from UART0 to UART1.
+
+-  CPU errata workarounds are applied only when the revision and part number
+   match. This behaviour has been made consistent across the debug and release
+   builds. The debug build additionally prints a warning if a mismatch is
+   detected.
+
+-  It is now possible to issue cache maintenance operations by set/way for a
+   particular level of data cache. Levels 1-3 are currently supported.
+
+-  The following improvements have been made to the FVP port.
+
+   -  The build option ``FVP_SHARED_DATA_LOCATION`` which allowed relocation of
+      shared data into the Trusted DRAM has been deprecated. Shared data is
+      now always located at the base of Trusted SRAM.
+
+   -  BL2 Translation tables have been updated to map only the region of
+      DRAM which is accessible to normal world. This is the region of the 2GB
+      DDR-DRAM memory at 0x80000000 excluding the top 16MB. The top 16MB is
+      accessible to only the secure world.
+
+   -  BL3-2 can now reside in the top 16MB of DRAM which is accessible only to
+      the secure world. This can be done by setting the build flag
+      ``FVP_TSP_RAM_LOCATION`` to the value ``dram``.
+
+-  Separate transation tables are created for each boot loader image. The
+   ``IMAGE_BLx`` build options are used to do this. This allows each stage to
+   create mappings only for areas in the memory map that it needs.
+
+-  A Secure Payload Dispatcher (OPTEED) for the OP-TEE Trusted OS has been
+   added. Details of using it with ARM Trusted Firmware can be found in
+   `OP-TEE Dispatcher`_
+
+Issues resolved since last release
+----------------------------------
+
+-  The Juno port has been aligned with the FVP port as follows.
+
+   -  Support for reclaiming all BL1 RW memory and BL2 memory by overlaying
+      the BL3-1/BL3-2 NOBITS sections on top of them has been added to the
+      Juno port.
+
+   -  The top 16MB of the 2GB DDR-DRAM memory at 0x80000000 is configured
+      using the TZC-400 controller to be accessible only to the secure world.
+
+   -  The ARM GIC driver is used to configure the GIC-400 instead of using a
+      GIC driver private to the Juno port.
+
+   -  PSCI ``CPU_SUSPEND`` calls that target a standby state are now supported.
+
+   -  The TZC-400 driver is used to configure the controller instead of direct
+      accesses to the registers.
+
+-  The Linux kernel version referred to in the user guide has DVFS and HMP
+   support enabled.
+
+-  DS-5 v5.19 did not detect Version 5.8 of the Cortex-A57-A53 Base FVPs in
+   CADI server mode. This issue is not seen with DS-5 v5.20 and Version 6.2 of
+   the Cortex-A57-A53 Base FVPs.
+
+Known issues
+------------
+
+-  The Trusted Board Boot implementation is a prototype. There are issues with
+   the modularity and scalability of the design. Support for a Trusted
+   Watchdog, firmware update mechanism, recovery images and Trusted debug is
+   absent. These issues will be addressed in future releases.
+
+-  The FVP and Juno ports do not use the hash of the ROTPK stored in the
+   Trusted Key Storage registers to verify the ROTPK in the
+   ``plat_match_rotpk()`` function. This prevents the correct establishment of
+   the Chain of Trust at the first step in the Trusted Board Boot process.
+
+-  The version of the AEMv8 Base FVP used in this release resets the model
+   instead of terminating its execution in response to a shutdown request using
+   the PSCI ``SYSTEM_OFF`` API. This issue will be fixed in a future version of
+   the model.
+
+-  GICv3 support is experimental. There are known issues with GICv3
+   initialization in the ARM Trusted Firmware.
+
+-  While this version greatly reduces the on-chip RAM requirements, there are
+   further RAM usage enhancements that could be made.
+
+-  The firmware design documentation for the Test Secure-EL1 Payload (TSP) and
+   its dispatcher (TSPD) is incomplete. Similarly for the PSCI section.
+
+-  The Juno-specific firmware design documentation is incomplete.
+
+ARM Trusted Firmware - version 1.0
+==================================
+
+New features
+------------
+
+-  It is now possible to map higher physical addresses using non-flat virtual
+   to physical address mappings in the MMU setup.
+
+-  Wider use is now made of the per-CPU data cache in BL3-1 to store:
+
+   -  Pointers to the non-secure and secure security state contexts.
+
+   -  A pointer to the CPU-specific operations.
+
+   -  A pointer to PSCI specific information (for example the current power
+      state).
+
+   -  A crash reporting buffer.
+
+-  The following RAM usage improvements result in a BL3-1 RAM usage reduction
+   from 96KB to 56KB (for FVP with TSPD), and a total RAM usage reduction
+   across all images from 208KB to 88KB, compared to the previous release.
+
+   -  Removed the separate ``early_exception`` vectors from BL3-1 (2KB code size
+      saving).
+
+   -  Removed NSRAM from the FVP memory map, allowing the removal of one
+      (4KB) translation table.
+
+   -  Eliminated the internal ``psci_suspend_context`` array, saving 2KB.
+
+   -  Correctly dimensioned the PSCI ``aff_map_node`` array, saving 1.5KB in the
+      FVP port.
+
+   -  Removed calling CPU mpidr from the bakery lock API, saving 160 bytes.
+
+   -  Removed current CPU mpidr from PSCI common code, saving 160 bytes.
+
+   -  Inlined the mmio accessor functions, saving 360 bytes.
+
+   -  Fully reclaimed all BL1 RW memory and BL2 memory on the FVP port by
+      overlaying the BL3-1/BL3-2 NOBITS sections on top of these at runtime.
+
+   -  Made storing the FP register context optional, saving 0.5KB per context
+      (8KB on the FVP port, with TSPD enabled and running on 8 CPUs).
+
+   -  Implemented a leaner ``tf_printf()`` function, allowing the stack to be
+      greatly reduced.
+
+   -  Removed coherent stacks from the codebase. Stacks allocated in normal
+      memory are now used before and after the MMU is enabled. This saves 768
+      bytes per CPU in BL3-1.
+
+   -  Reworked the crash reporting in BL3-1 to use less stack.
+
+   -  Optimized the EL3 register state stored in the ``cpu_context`` structure
+      so that registers that do not change during normal execution are
+      re-initialized each time during cold/warm boot, rather than restored
+      from memory. This saves about 1.2KB.
+
+   -  As a result of some of the above, reduced the runtime stack size in all
+      BL images. For BL3-1, this saves 1KB per CPU.
+
+-  PSCI SMC handler improvements to correctly handle calls from secure states
+   and from AArch32.
+
+-  CPU contexts are now initialized from the ``entry_point_info``. BL3-1 fully
+   determines the exception level to use for the non-trusted firmware (BL3-3)
+   based on the SPSR value provided by the BL2 platform code (or otherwise
+   provided to BL3-1). This allows platform code to directly run non-trusted
+   firmware payloads at either EL2 or EL1 without requiring an EL2 stub or OS
+   loader.
+
+-  Code refactoring improvements:
+
+   -  Refactored ``fvp_config`` into a common platform header.
+
+   -  Refactored the fvp gic code to be a generic driver that no longer has an
+      explicit dependency on platform code.
+
+   -  Refactored the CCI-400 driver to not have dependency on platform code.
+
+   -  Simplified the IO driver so it's no longer necessary to call ``io_init()``
+      and moved all the IO storage framework code to one place.
+
+   -  Simplified the interface the the TZC-400 driver.
+
+   -  Clarified the platform porting interface to the TSP.
+
+   -  Reworked the TSPD setup code to support the alternate BL3-2
+      intialization flow where BL3-1 generic code hands control to BL3-2,
+      rather than expecting the TSPD to hand control directly to BL3-2.
+
+   -  Considerable rework to PSCI generic code to support CPU specific
+      operations.
+
+-  Improved console log output, by:
+
+   -  Adding the concept of debug log levels.
+
+   -  Rationalizing the existing debug messages and adding new ones.
+
+   -  Printing out the version of each BL stage at runtime.
+
+   -  Adding support for printing console output from assembler code,
+      including when a crash occurs before the C runtime is initialized.
+
+-  Moved up to the latest versions of the FVPs, toolchain, EDK2, kernel, Linaro
+   file system and DS-5.
+
+-  On the FVP port, made the use of the Trusted DRAM region optional at build
+   time (off by default). Normal platforms will not have such a "ready-to-use"
+   DRAM area so it is not a good example to use it.
+
+-  Added support for PSCI ``SYSTEM_OFF`` and ``SYSTEM_RESET`` APIs.
+
+-  Added support for CPU specific reset sequences, power down sequences and
+   register dumping during crash reporting. The CPU specific reset sequences
+   include support for errata workarounds.
+
+-  Merged the Juno port into the master branch. Added support for CPU hotplug
+   and CPU idle. Updated the user guide to describe how to build and run on the
+   Juno platform.
+
+Issues resolved since last release
+----------------------------------
+
+-  Removed the concept of top/bottom image loading. The image loader now
+   automatically detects the position of the image inside the current memory
+   layout and updates the layout to minimize fragementation. This resolves the
+   image loader limitations of previously releases. There are currently no
+   plans to support dynamic image loading.
+
+-  CPU idle now works on the publicized version of the Foundation FVP.
+
+-  All known issues relating to the compiler version used have now been
+   resolved. This TF version uses Linaro toolchain 14.07 (based on GCC 4.9).
+
+Known issues
+------------
+
+-  GICv3 support is experimental. The Linux kernel patches to support this are
+   not widely available. There are known issues with GICv3 initialization in
+   the ARM Trusted Firmware.
+
+-  While this version greatly reduces the on-chip RAM requirements, there are
+   further RAM usage enhancements that could be made.
+
+-  The firmware design documentation for the Test Secure-EL1 Payload (TSP) and
+   its dispatcher (TSPD) is incomplete. Similarly for the PSCI section.
+
+-  The Juno-specific firmware design documentation is incomplete.
+
+-  Some recent enhancements to the FVP port have not yet been translated into
+   the Juno port. These will be tracked via the tf-issues project.
+
+-  The Linux kernel version referred to in the user guide has DVFS and HMP
+   support disabled due to some known instabilities at the time of this
+   release. A future kernel version will re-enable these features.
+
+-  DS-5 v5.19 does not detect Version 5.8 of the Cortex-A57-A53 Base FVPs in
+   CADI server mode. This is because the ``<SimName>`` reported by the FVP in
+   this version has changed. For example, for the Cortex-A57x4-A53x4 Base FVP,
+   the ``<SimName>`` reported by the FVP is ``FVP_Base_Cortex_A57x4_A53x4``, while
+   DS-5 expects it to be ``FVP_Base_A57x4_A53x4``.
+
+   The temporary fix to this problem is to change the name of the FVP in
+   ``sw/debugger/configdb/Boards/ARM FVP/Base_A57x4_A53x4/cadi_config.xml``.
+   Change the following line:
+
+   ::
+
+       <SimName>System Generator:FVP_Base_A57x4_A53x4</SimName>
+
+   to
+   System Generator:FVP\_Base\_Cortex-A57x4\_A53x4
+
+   A similar change can be made to the other Cortex-A57-A53 Base FVP variants.
+
+ARM Trusted Firmware - version 0.4
+==================================
+
+New features
+------------
+
+-  Makefile improvements:
+
+   -  Improved dependency checking when building.
+
+   -  Removed ``dump`` target (build now always produces dump files).
+
+   -  Enabled platform ports to optionally make use of parts of the Trusted
+      Firmware (e.g. BL3-1 only), rather than being forced to use all parts.
+      Also made the ``fip`` target optional.
+
+   -  Specified the full path to source files and removed use of the ``vpath``
+      keyword.
+
+-  Provided translation table library code for potential re-use by platforms
+   other than the FVPs.
+
+-  Moved architectural timer setup to platform-specific code.
+
+-  Added standby state support to PSCI cpu\_suspend implementation.
+
+-  SRAM usage improvements:
+
+   -  Started using the ``-ffunction-sections``, ``-fdata-sections`` and
+      ``--gc-sections`` compiler/linker options to remove unused code and data
+      from the images. Previously, all common functions were being built into
+      all binary images, whether or not they were actually used.
+
+   -  Placed all assembler functions in their own section to allow more unused
+      functions to be removed from images.
+
+   -  Updated BL1 and BL2 to use a single coherent stack each, rather than one
+      per CPU.
+
+   -  Changed variables that were unnecessarily declared and initialized as
+      non-const (i.e. in the .data section) so they are either uninitialized
+      (zero init) or const.
+
+-  Moved the Test Secure-EL1 Payload (BL3-2) to execute in Trusted SRAM by
+   default. The option for it to run in Trusted DRAM remains.
+
+-  Implemented a TrustZone Address Space Controller (TZC-400) driver. A
+   default configuration is provided for the Base FVPs. This means the model
+   parameter ``-C bp.secure_memory=1`` is now supported.
+
+-  Started saving the PSCI cpu\_suspend 'power\_state' parameter prior to
+   suspending a CPU. This allows platforms that implement multiple power-down
+   states at the same affinity level to identify a specific state.
+
+-  Refactored the entire codebase to reduce the amount of nesting in header
+   files and to make the use of system/user includes more consistent. Also
+   split platform.h to separate out the platform porting declarations from the
+   required platform porting definitions and the definitions/declarations
+   specific to the platform port.
+
+-  Optimized the data cache clean/invalidate operations.
+
+-  Improved the BL3-1 unhandled exception handling and reporting. Unhandled
+   exceptions now result in a dump of registers to the console.
+
+-  Major rework to the handover interface between BL stages, in particular the
+   interface to BL3-1. The interface now conforms to a specification and is
+   more future proof.
+
+-  Added support for optionally making the BL3-1 entrypoint a reset handler
+   (instead of BL1). This allows platforms with an alternative image loading
+   architecture to re-use BL3-1 with fewer modifications to generic code.
+
+-  Reserved some DDR DRAM for secure use on FVP platforms to avoid future
+   compatibility problems with non-secure software.
+
+-  Added support for secure interrupts targeting the Secure-EL1 Payload (SP)
+   (using GICv2 routing only). Demonstrated this working by adding an interrupt
+   target and supporting test code to the TSP. Also demonstrated non-secure
+   interrupt handling during TSP processing.
+
+Issues resolved since last release
+----------------------------------
+
+-  Now support use of the model parameter ``-C bp.secure_memory=1`` in the Base
+   FVPs (see **New features**).
+
+-  Support for secure world interrupt handling now available (see **New
+   features**).
+
+-  Made enough SRAM savings (see **New features**) to enable the Test Secure-EL1
+   Payload (BL3-2) to execute in Trusted SRAM by default.
+
+-  The tested filesystem used for this release (Linaro AArch64 OpenEmbedded
+   14.04) now correctly reports progress in the console.
+
+-  Improved the Makefile structure to make it easier to separate out parts of
+   the Trusted Firmware for re-use in platform ports. Also, improved target
+   dependency checking.
+
+Known issues
+------------
+
+-  GICv3 support is experimental. The Linux kernel patches to support this are
+   not widely available. There are known issues with GICv3 initialization in
+   the ARM Trusted Firmware.
+
+-  Dynamic image loading is not available yet. The current image loader
+   implementation (used to load BL2 and all subsequent images) has some
+   limitations. Changing BL2 or BL3-1 load addresses in certain ways can lead
+   to loading errors, even if the images should theoretically fit in memory.
+
+-  The ARM Trusted Firmware still uses too much on-chip Trusted SRAM. A number
+   of RAM usage enhancements have been identified to rectify this situation.
+
+-  CPU idle does not work on the advertised version of the Foundation FVP.
+   Some FVP fixes are required that are not available externally at the time
+   of writing. This can be worked around by disabling CPU idle in the Linux
+   kernel.
+
+-  Various bugs in ARM Trusted Firmware, UEFI and the Linux kernel have been
+   observed when using Linaro toolchain versions later than 13.11. Although
+   most of these have been fixed, some remain at the time of writing. These
+   mainly seem to relate to a subtle change in the way the compiler converts
+   between 64-bit and 32-bit values (e.g. during casting operations), which
+   reveals previously hidden bugs in client code.
+
+-  The firmware design documentation for the Test Secure-EL1 Payload (TSP) and
+   its dispatcher (TSPD) is incomplete. Similarly for the PSCI section.
+
+ARM Trusted Firmware - version 0.3
+==================================
+
+New features
+------------
+
+-  Support for Foundation FVP Version 2.0 added.
+   The documented UEFI configuration disables some devices that are unavailable
+   in the Foundation FVP, including MMC and CLCD. The resultant UEFI binary can
+   be used on the AEMv8 and Cortex-A57-A53 Base FVPs, as well as the Foundation
+   FVP.
+
+   NOTE: The software will not work on Version 1.0 of the Foundation FVP.
+
+-  Enabled third party contributions. Added a new contributing.md containing
+   instructions for how to contribute and updated copyright text in all files
+   to acknowledge contributors.
+
+-  The PSCI CPU\_SUSPEND API has been stabilised to the extent where it can be
+   used for entry into power down states with the following restrictions:
+
+   -  Entry into standby states is not supported.
+   -  The API is only supported on the AEMv8 and Cortex-A57-A53 Base FVPs.
+
+-  The PSCI AFFINITY\_INFO api has undergone limited testing on the Base FVPs to
+   allow experimental use.
+
+-  Required C library and runtime header files are now included locally in ARM
+   Trusted Firmware instead of depending on the toolchain standard include
+   paths. The local implementation has been cleaned up and reduced in scope.
+
+-  Added I/O abstraction framework, primarily to allow generic code to load
+   images in a platform-independent way. The existing image loading code has
+   been reworked to use the new framework. Semi-hosting and NOR flash I/O
+   drivers are provided.
+
+-  Introduced Firmware Image Package (FIP) handling code and tools. A FIP
+   combines multiple firmware images with a Table of Contents (ToC) into a
+   single binary image. The new FIP driver is another type of I/O driver. The
+   Makefile builds a FIP by default and the FVP platform code expect to load a
+   FIP from NOR flash, although some support for image loading using semi-
+   hosting is retained.
+
+   NOTE: Building a FIP by default is a non-backwards-compatible change.
+
+   NOTE: Generic BL2 code now loads a BL3-3 (non-trusted firmware) image into
+   DRAM instead of expecting this to be pre-loaded at known location. This is
+   also a non-backwards-compatible change.
+
+   NOTE: Some non-trusted firmware (e.g. UEFI) will need to be rebuilt so that
+   it knows the new location to execute from and no longer needs to copy
+   particular code modules to DRAM itself.
+
+-  Reworked BL2 to BL3-1 handover interface. A new composite structure
+   (bl31\_args) holds the superset of information that needs to be passed from
+   BL2 to BL3-1, including information on how handover execution control to
+   BL3-2 (if present) and BL3-3 (non-trusted firmware).
+
+-  Added library support for CPU context management, allowing the saving and
+   restoring of
+
+   -  Shared system registers between Secure-EL1 and EL1.
+   -  VFP registers.
+   -  Essential EL3 system registers.
+
+-  Added a framework for implementing EL3 runtime services. Reworked the PSCI
+   implementation to be one such runtime service.
+
+-  Reworked the exception handling logic, making use of both SP\_EL0 and SP\_EL3
+   stack pointers for determining the type of exception, managing general
+   purpose and system register context on exception entry/exit, and handling
+   SMCs. SMCs are directed to the correct EL3 runtime service.
+
+-  Added support for a Test Secure-EL1 Payload (TSP) and a corresponding
+   Dispatcher (TSPD), which is loaded as an EL3 runtime service. The TSPD
+   implements Secure Monitor functionality such as world switching and
+   EL1 context management, and is responsible for communication with the TSP.
+   NOTE: The TSPD does not yet contain support for secure world interrupts.
+   NOTE: The TSP/TSPD is not built by default.
+
+Issues resolved since last release
+----------------------------------
+
+-  Support has been added for switching context between secure and normal
+   worlds in EL3.
+
+-  PSCI API calls ``AFFINITY_INFO`` & ``PSCI_VERSION`` have now been tested (to
+   a limited extent).
+
+-  The ARM Trusted Firmware build artifacts are now placed in the ``./build``
+   directory and sub-directories instead of being placed in the root of the
+   project.
+
+-  The ARM Trusted Firmware is now free from build warnings. Build warnings
+   are now treated as errors.
+
+-  The ARM Trusted Firmware now provides C library support locally within the
+   project to maintain compatibility between toolchains/systems.
+
+-  The PSCI locking code has been reworked so it no longer takes locks in an
+   incorrect sequence.
+
+-  The RAM-disk method of loading a Linux file-system has been confirmed to
+   work with the ARM Trusted Firmware and Linux kernel version (based on
+   version 3.13) used in this release, for both Foundation and Base FVPs.
+
+Known issues
+------------
+
+The following is a list of issues which are expected to be fixed in the future
+releases of the ARM Trusted Firmware.
+
+-  The TrustZone Address Space Controller (TZC-400) is not being programmed
+   yet. Use of model parameter ``-C bp.secure_memory=1`` is not supported.
+
+-  No support yet for secure world interrupt handling.
+
+-  GICv3 support is experimental. The Linux kernel patches to support this are
+   not widely available. There are known issues with GICv3 initialization in
+   the ARM Trusted Firmware.
+
+-  Dynamic image loading is not available yet. The current image loader
+   implementation (used to load BL2 and all subsequent images) has some
+   limitations. Changing BL2 or BL3-1 load addresses in certain ways can lead
+   to loading errors, even if the images should theoretically fit in memory.
+
+-  The ARM Trusted Firmware uses too much on-chip Trusted SRAM. Currently the
+   Test Secure-EL1 Payload (BL3-2) executes in Trusted DRAM since there is not
+   enough SRAM. A number of RAM usage enhancements have been identified to
+   rectify this situation.
+
+-  CPU idle does not work on the advertised version of the Foundation FVP.
+   Some FVP fixes are required that are not available externally at the time
+   of writing.
+
+-  Various bugs in ARM Trusted Firmware, UEFI and the Linux kernel have been
+   observed when using Linaro toolchain versions later than 13.11. Although
+   most of these have been fixed, some remain at the time of writing. These
+   mainly seem to relate to a subtle change in the way the compiler converts
+   between 64-bit and 32-bit values (e.g. during casting operations), which
+   reveals previously hidden bugs in client code.
+
+-  The tested filesystem used for this release (Linaro AArch64 OpenEmbedded
+   14.01) does not report progress correctly in the console. It only seems to
+   produce error output, not standard output. It otherwise appears to function
+   correctly. Other filesystem versions on the same software stack do not
+   exhibit the problem.
+
+-  The Makefile structure doesn't make it easy to separate out parts of the
+   Trusted Firmware for re-use in platform ports, for example if only BL3-1 is
+   required in a platform port. Also, dependency checking in the Makefile is
+   flawed.
+
+-  The firmware design documentation for the Test Secure-EL1 Payload (TSP) and
+   its dispatcher (TSPD) is incomplete. Similarly for the PSCI section.
+
+ARM Trusted Firmware - version 0.2
+==================================
+
+New features
+------------
+
+-  First source release.
+
+-  Code for the PSCI suspend feature is supplied, although this is not enabled
+   by default since there are known issues (see below).
+
+Issues resolved since last release
+----------------------------------
+
+-  The "psci" nodes in the FDTs provided in this release now fully comply
+   with the recommendations made in the PSCI specification.
+
+Known issues
+------------
+
+The following is a list of issues which are expected to be fixed in the future
+releases of the ARM Trusted Firmware.
+
+-  The TrustZone Address Space Controller (TZC-400) is not being programmed
+   yet. Use of model parameter ``-C bp.secure_memory=1`` is not supported.
+
+-  No support yet for secure world interrupt handling or for switching context
+   between secure and normal worlds in EL3.
+
+-  GICv3 support is experimental. The Linux kernel patches to support this are
+   not widely available. There are known issues with GICv3 initialization in
+   the ARM Trusted Firmware.
+
+-  Dynamic image loading is not available yet. The current image loader
+   implementation (used to load BL2 and all subsequent images) has some
+   limitations. Changing BL2 or BL3-1 load addresses in certain ways can lead
+   to loading errors, even if the images should theoretically fit in memory.
+
+-  Although support for PSCI ``CPU_SUSPEND`` is present, it is not yet stable
+   and ready for use.
+
+-  PSCI API calls ``AFFINITY_INFO`` & ``PSCI_VERSION`` are implemented but have not
+   been tested.
+
+-  The ARM Trusted Firmware make files result in all build artifacts being
+   placed in the root of the project. These should be placed in appropriate
+   sub-directories.
+
+-  The compilation of ARM Trusted Firmware is not free from compilation
+   warnings. Some of these warnings have not been investigated yet so they
+   could mask real bugs.
+
+-  The ARM Trusted Firmware currently uses toolchain/system include files like
+   stdio.h. It should provide versions of these within the project to maintain
+   compatibility between toolchains/systems.
+
+-  The PSCI code takes some locks in an incorrect sequence. This may cause
+   problems with suspend and hotplug in certain conditions.
+
+-  The Linux kernel used in this release is based on version 3.12-rc4. Using
+   this kernel with the ARM Trusted Firmware fails to start the file-system as
+   a RAM-disk. It fails to execute user-space ``init`` from the RAM-disk. As an
+   alternative, the VirtioBlock mechanism can be used to provide a file-system
+   to the kernel.
+
+--------------
+
+*Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.*
+
+.. _PSCI Integration Guide: psci-lib-integration-guide.rst
+.. _Developer Certificate of Origin: ../dco.txt
+.. _Contribution Guide: ../contributing.rst
+.. _Authentication framework: auth-framework.rst
+.. _Firmware Update: firmware-update.rst
+.. _TF Reset Design: reset-design.rst
+.. _Power Domain Topology Design: psci-pd-tree.rst
+.. _TF wiki on GitHub: https://github.com/ARM-software/arm-trusted-firmware/wiki/ARM-Trusted-Firmware-Image-Terminology
+.. _Authentication Framework: auth-framework.rst
+.. _OP-TEE Dispatcher: optee-dispatcher.rst
+.. _tf-issue#501: https://github.com/ARM-software/tf-issues/issues/501
+.. _PR#1002: https://github.com/ARM-software/arm-trusted-firmware/pull/1002#issuecomment-312650193
diff --git a/docs/cpu-specific-build-macros.md b/docs/cpu-specific-build-macros.md
deleted file mode 100644
index 2368fd2..0000000
--- a/docs/cpu-specific-build-macros.md
+++ /dev/null
@@ -1,70 +0,0 @@
-ARM CPU Specific Build Macros
-=============================
-
-Contents
---------
-
-1.  [Introduction](#1--introduction)
-2.  [CPU Errata Workarounds](#2--cpu-errata-workarounds)
-3.  [CPU Specific optimizations](#3--cpu-specific-optimizations)
-
-
-1.  Introduction
-----------------
-
-This document describes the various build options present in the CPU specific
-operations framework to enable errata workarounds and to enable optimizations
-for a specific CPU on a platform.
-
-2.  CPU Errata Workarounds
---------------------------
-
-ARM Trusted Firmware exports a series of build flags which control the
-errata workarounds that are applied to each CPU by the reset handler. The
-errata details can be found in the CPU specific errata documents published
-by ARM. The errata workarounds are implemented for a particular revision
-or a set of processor revisions. This is checked by reset handler at runtime.
-Each errata workaround is identified by its `ID` as specified in the processor's
-errata notice document. The format of the define used to enable/disable the
-errata is `ERRATA_<Processor name>_<ID>` where the `Processor name`
-is either `A57` for the `Cortex_A57` CPU or `A53` for `Cortex_A53` CPU.
-
-All workarounds are disabled by default. The platform is reponsible for
-enabling these workarounds according to its requirement by defining the
-errata workaround build flags in the platform specific makefile. In case
-these workarounds are enabled for the wrong CPU revision then the errata
-workaround is not applied. In the DEBUG build, this is indicated by
-printing a warning to the crash console.
-
-In the current implementation, a platform which has more than 1 variant
-with different revisions of a processor has no runtime mechanism available
-for it to specify which errata workarounds should be enabled or not.
-
-The value of the build flags are 0 by default, that is, disabled. Any other
-value will enable it.
-
-For Cortex-A57, following errata build flags are defined :
-
-*   `ERRATA_A57_806969`: This applies errata 806969 workaround to Cortex-A57
-     CPU. This needs to be enabled only for revision r0p0 of the CPU.
-
-*   `ERRATA_A57_813420`: This applies errata 813420 workaround to Cortex-A57
-     CPU. This needs to be enabled only for revision r0p0 of the CPU.
-
-3.  CPU Specific optimizations
-------------------------------
-
-This section describes some of the optimizations allowed by the CPU micro
-architecture that can be enabled by the platform as desired.
-
-*    `SKIP_A57_L1_FLUSH_PWR_DWN`: This flag enables an optimization in the
-     Cortex-A57 cluster power down sequence by not flushing the Level 1 data
-     cache. The L1 data cache and the L2 unified cache are inclusive. A flush
-     of the L2 by set/way flushes any dirty lines from the L1 as well. This
-     is a known safe deviation from the Cortex-A57 TRM defined power down
-     sequence. Each Cortex-A57 based platform must make its own decision on
-     whether to use the optimization.
-
-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-_Copyright (c) 2014, ARM Limited and Contributors. All rights reserved._
diff --git a/docs/cpu-specific-build-macros.rst b/docs/cpu-specific-build-macros.rst
new file mode 100644
index 0000000..f74b459
--- /dev/null
+++ b/docs/cpu-specific-build-macros.rst
@@ -0,0 +1,149 @@
+ARM CPU Specific Build Macros
+=============================
+
+
+.. section-numbering::
+    :suffix: .
+
+.. contents::
+
+This document describes the various build options present in the CPU specific
+operations framework to enable errata workarounds and to enable optimizations
+for a specific CPU on a platform.
+
+CPU Errata Workarounds
+----------------------
+
+ARM Trusted Firmware exports a series of build flags which control the
+errata workarounds that are applied to each CPU by the reset handler. The
+errata details can be found in the CPU specific errata documents published
+by ARM:
+
+-  `Cortex-A53 MPCore Software Developers Errata Notice`_
+-  `Cortex-A57 MPCore Software Developers Errata Notice`_
+-  `Cortex-A72 MPCore Software Developers Errata Notice`_
+
+The errata workarounds are implemented for a particular revision or a set of
+processor revisions. This is checked by the reset handler at runtime. Each
+errata workaround is identified by its ``ID`` as specified in the processor's
+errata notice document. The format of the define used to enable/disable the
+errata workaround is ``ERRATA_<Processor name>_<ID>``, where the ``Processor name``
+is for example ``A57`` for the ``Cortex_A57`` CPU.
+
+Refer to the section *CPU errata status reporting* in
+`Firmware Design guide`_ for information on how to write errata workaround
+functions.
+
+All workarounds are disabled by default. The platform is responsible for
+enabling these workarounds according to its requirement by defining the
+errata workaround build flags in the platform specific makefile. In case
+these workarounds are enabled for the wrong CPU revision then the errata
+workaround is not applied. In the DEBUG build, this is indicated by
+printing a warning to the crash console.
+
+In the current implementation, a platform which has more than 1 variant
+with different revisions of a processor has no runtime mechanism available
+for it to specify which errata workarounds should be enabled or not.
+
+The value of the build flags are 0 by default, that is, disabled. Any other
+value will enable it.
+
+For Cortex-A53, following errata build flags are defined :
+
+-  ``ERRATA_A53_826319``: This applies errata 826319 workaround to Cortex-A53
+   CPU. This needs to be enabled only for revision <= r0p2 of the CPU.
+
+-  ``ERRATA_A53_835769``: This applies erratum 835769 workaround at compile and
+   link time to Cortex-A53 CPU. This needs to be enabled for some variants of
+   revision <= r0p4. This workaround can lead the linker to create ``*.stub``
+   sections.
+
+-  ``ERRATA_A53_836870``: This applies errata 836870 workaround to Cortex-A53
+   CPU. This needs to be enabled only for revision <= r0p3 of the CPU. From
+   r0p4 and onwards, this errata is enabled by default in hardware.
+
+-  ``ERRATA_A53_843419``: This applies erratum 843419 workaround at link time
+   to Cortex-A53 CPU.  This needs to be enabled for some variants of revision
+   <= r0p4. This workaround can lead the linker to emit ``*.stub`` sections
+   which are 4kB aligned.
+
+-  ``ERRATA_A53_855873``: This applies errata 855873 workaround to Cortex-A53
+   CPUs. Though the erratum is present in every revision of the CPU,
+   this workaround is only applied to CPUs from r0p3 onwards, which feature
+   a chicken bit in CPUACTLR\_EL1 to enable a hardware workaround.
+   Earlier revisions of the CPU have other errata which require the same
+   workaround in software, so they should be covered anyway.
+
+For Cortex-A57, following errata build flags are defined :
+
+-  ``ERRATA_A57_806969``: This applies errata 806969 workaround to Cortex-A57
+   CPU. This needs to be enabled only for revision r0p0 of the CPU.
+
+-  ``ERRATA_A57_813419``: This applies errata 813419 workaround to Cortex-A57
+   CPU. This needs to be enabled only for revision r0p0 of the CPU.
+
+-  ``ERRATA_A57_813420``: This applies errata 813420 workaround to Cortex-A57
+   CPU. This needs to be enabled only for revision r0p0 of the CPU.
+
+-  ``ERRATA_A57_826974``: This applies errata 826974 workaround to Cortex-A57
+   CPU. This needs to be enabled only for revision <= r1p1 of the CPU.
+
+-  ``ERRATA_A57_826977``: This applies errata 826977 workaround to Cortex-A57
+   CPU. This needs to be enabled only for revision <= r1p1 of the CPU.
+
+-  ``ERRATA_A57_828024``: This applies errata 828024 workaround to Cortex-A57
+   CPU. This needs to be enabled only for revision <= r1p1 of the CPU.
+
+-  ``ERRATA_A57_829520``: This applies errata 829520 workaround to Cortex-A57
+   CPU. This needs to be enabled only for revision <= r1p2 of the CPU.
+
+-  ``ERRATA_A57_833471``: This applies errata 833471 workaround to Cortex-A57
+   CPU. This needs to be enabled only for revision <= r1p2 of the CPU.
+
+-  ``ERRATA_A57_859972``: This applies errata 859972 workaround to Cortex-A57
+   CPU. This needs to be enabled only for revision <= r1p3 of the CPU.
+
+
+For Cortex-A72, following errata build flags are defined :
+
+-  ``ERRATA_A72_859971``: This applies errata 859971 workaround to Cortex-A72
+   CPU. This needs to be enabled only for revision <= r0p3 of the CPU.
+
+CPU Specific optimizations
+--------------------------
+
+This section describes some of the optimizations allowed by the CPU micro
+architecture that can be enabled by the platform as desired.
+
+-  ``SKIP_A57_L1_FLUSH_PWR_DWN``: This flag enables an optimization in the
+   Cortex-A57 cluster power down sequence by not flushing the Level 1 data
+   cache. The L1 data cache and the L2 unified cache are inclusive. A flush
+   of the L2 by set/way flushes any dirty lines from the L1 as well. This
+   is a known safe deviation from the Cortex-A57 TRM defined power down
+   sequence. Each Cortex-A57 based platform must make its own decision on
+   whether to use the optimization.
+
+-  ``A53_DISABLE_NON_TEMPORAL_HINT``: This flag disables the cache non-temporal
+   hint. The LDNP/STNP instructions as implemented on Cortex-A53 do not behave
+   in a way most programmers expect, and will most probably result in a
+   significant speed degradation to any code that employs them. The ARMv8-A
+   architecture (see ARM DDI 0487A.h, section D3.4.3) allows cores to ignore
+   the non-temporal hint and treat LDNP/STNP as LDP/STP instead. Enabling this
+   flag enforces this behaviour. This needs to be enabled only for revisions
+   <= r0p3 of the CPU and is enabled by default.
+
+-  ``A57_DISABLE_NON_TEMPORAL_HINT``: This flag has the same behaviour as
+   ``A53_DISABLE_NON_TEMPORAL_HINT`` but for Cortex-A57. This needs to be
+   enabled only for revisions <= r1p2 of the CPU and is enabled by default,
+   as recommended in section "4.7 Non-Temporal Loads/Stores" of the
+   `Cortex-A57 Software Optimization Guide`_.
+
+--------------
+
+*Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.*
+
+.. _Cortex-A53 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm048406/Cortex_A53_MPCore_Software_Developers_Errata_Notice.pdf
+.. _Cortex-A57 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm049219/cortex_a57_mpcore_software_developers_errata_notice.pdf
+.. _Cortex-A72 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm012079/index.html
+.. _Firmware Design guide: firmware-design.rst
+.. _Cortex-A57 Software Optimization Guide: http://infocenter.arm.com/help/topic/com.arm.doc.uan0015b/Cortex_A57_Software_Optimization_Guide_external.pdf
diff --git a/docs/diagrams/Makefile b/docs/diagrams/Makefile
new file mode 100644
index 0000000..de7d8f3
--- /dev/null
+++ b/docs/diagrams/Makefile
@@ -0,0 +1,74 @@
+#
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+# This Makefile generates the image files used in the ARM Trusted Firmware
+# document from the dia file.
+#
+# The PNG files in the present directory have been generated using Dia version
+# 0.97.2, which can be obtained from https://wiki.gnome.org/Apps/Dia/Download
+#
+
+# generate_image use the tool dia generate png from dia file
+#    $(1) = layers
+#    $(2) = image file name
+#    $(3) = image file format
+#    $(4) = addition opts
+#    $(5) = dia source file
+define generate_image
+	dia --show-layers=$(1) --filter=$(3) --export=$(2) $(4) $(5)
+endef
+
+RESET_DIA							= reset_code_flow.dia
+RESET_PNGS							=		\
+		default_reset_code.png 				\
+		reset_code_no_cpu_check.png			\
+		reset_code_no_boot_type_check.png 	\
+		reset_code_no_checks.png			\
+
+# The $(RESET_DIA) file is organized in several layers.
+# Each image is generated by combining and exporting the appropriate set of
+# layers.
+default_reset_code_layers			= "Frontground,Background,cpu_type_check,boot_type_check"
+reset_code_no_cpu_check_layers		= "Frontground,Background,no_cpu_type_check,boot_type_check"
+reset_code_no_boot_type_check_layers= "Frontground,Background,cpu_type_check,no_boot_type_check"
+reset_code_no_checks_layers			= "Frontground,Background,no_cpu_type_check,no_boot_type_check"
+
+default_reset_code_opts          	=
+reset_code_no_cpu_check_opts     	=
+reset_code_no_boot_type_check_opts	=
+reset_code_no_checks_opts			=
+
+INT_DIA								= int_handling.dia
+INT_PNGS							=		\
+		sec-int-handling.png				\
+		non-sec-int-handling.png
+
+# The $(INT_DIA) file is organized in several layers.
+# Each image is generated by combining and exporting the appropriate set of
+# layers.
+non-sec-int-handling_layers			= "non_sec_int_bg,legend,non_sec_int_note,non_sec_int_handling"
+sec-int-handling_layers				= "sec_int_bg,legend,sec_int_note,sec_int_handling"
+
+non-sec-int-handling_opts			= --size=1692x
+sec-int-handling_opts				= --size=1570x
+
+XLAT_DIA 							= xlat_align.dia
+XLAT_PNG 							= xlat_align.png
+
+xlat_align_layers					= "bg,translations"
+xlat_align_opts						=
+
+all:$(RESET_PNGS) $(INT_PNGS) $(XLAT_PNG)
+
+$(RESET_PNGS):$(RESET_DIA)
+	$(call generate_image,$($(patsubst %.png,%_layers,$@)),$@,png,$($(patsubst %.png,%_opts,$@)),$<)
+
+$(INT_PNGS):$(INT_DIA)
+	$(call generate_image,$($(patsubst %.png,%_layers,$@)),$@,png,$($(patsubst %.png,%_opts,$@)),$<)
+
+$(XLAT_PNG):$(XLAT_DIA)
+	$(call generate_image,$($(patsubst %.png,%_layers,$@)),$(patsubst %.png,%.svg,$@),svg,$($(patsubst %.png,%_opts,$@)),$<)
+	inkscape -z $(patsubst %.png,%.svg,$@) -e $@ -d 45
diff --git a/docs/diagrams/default_reset_code.png b/docs/diagrams/default_reset_code.png
new file mode 100644
index 0000000..d8675e4
--- /dev/null
+++ b/docs/diagrams/default_reset_code.png
Binary files differ
diff --git a/docs/diagrams/fwu_flow.png b/docs/diagrams/fwu_flow.png
new file mode 100644
index 0000000..534095f
--- /dev/null
+++ b/docs/diagrams/fwu_flow.png
Binary files differ
diff --git a/docs/diagrams/fwu_states.png b/docs/diagrams/fwu_states.png
new file mode 100644
index 0000000..fda4d8f
--- /dev/null
+++ b/docs/diagrams/fwu_states.png
Binary files differ
diff --git a/docs/diagrams/int_handling.dia b/docs/diagrams/int_handling.dia
new file mode 100644
index 0000000..12aa186
--- /dev/null
+++ b/docs/diagrams/int_handling.dia
Binary files differ
diff --git a/docs/diagrams/non-sec-int-handling.png b/docs/diagrams/non-sec-int-handling.png
index 1a5f629..64082c9 100644
--- a/docs/diagrams/non-sec-int-handling.png
+++ b/docs/diagrams/non-sec-int-handling.png
Binary files differ
diff --git a/docs/diagrams/psci-suspend-sequence.png b/docs/diagrams/psci-suspend-sequence.png
new file mode 100644
index 0000000..1703ea6
--- /dev/null
+++ b/docs/diagrams/psci-suspend-sequence.png
Binary files differ
diff --git a/docs/diagrams/reset_code_flow.dia b/docs/diagrams/reset_code_flow.dia
new file mode 100644
index 0000000..133c9cf
--- /dev/null
+++ b/docs/diagrams/reset_code_flow.dia
Binary files differ
diff --git a/docs/diagrams/reset_code_no_boot_type_check.png b/docs/diagrams/reset_code_no_boot_type_check.png
new file mode 100644
index 0000000..23e865f
--- /dev/null
+++ b/docs/diagrams/reset_code_no_boot_type_check.png
Binary files differ
diff --git a/docs/diagrams/reset_code_no_checks.png b/docs/diagrams/reset_code_no_checks.png
new file mode 100644
index 0000000..26a179b
--- /dev/null
+++ b/docs/diagrams/reset_code_no_checks.png
Binary files differ
diff --git a/docs/diagrams/reset_code_no_cpu_check.png b/docs/diagrams/reset_code_no_cpu_check.png
new file mode 100644
index 0000000..4150dbe
--- /dev/null
+++ b/docs/diagrams/reset_code_no_cpu_check.png
Binary files differ
diff --git a/docs/diagrams/sec-int-handling.png b/docs/diagrams/sec-int-handling.png
index 2ebbca4..fa5c340 100644
--- a/docs/diagrams/sec-int-handling.png
+++ b/docs/diagrams/sec-int-handling.png
Binary files differ
diff --git a/docs/diagrams/xlat_align.dia b/docs/diagrams/xlat_align.dia
new file mode 100644
index 0000000..bd88c0c
--- /dev/null
+++ b/docs/diagrams/xlat_align.dia
Binary files differ
diff --git a/docs/diagrams/xlat_align.png b/docs/diagrams/xlat_align.png
new file mode 100644
index 0000000..cffd3c1
--- /dev/null
+++ b/docs/diagrams/xlat_align.png
Binary files differ
diff --git a/docs/firmware-design.md b/docs/firmware-design.md
deleted file mode 100644
index 72525bd..0000000
--- a/docs/firmware-design.md
+++ /dev/null
@@ -1,1807 +0,0 @@
-ARM Trusted Firmware Design
-===========================
-
-Contents :
-
-1.  [Introduction](#1--introduction)
-2.  [Cold boot](#2--cold-boot)
-3.  [EL3 runtime services framework](#3--el3-runtime-services-framework)
-4.  [Power State Coordination Interface](#4--power-state-coordination-interface)
-5.  [Secure-EL1 Payloads and Dispatchers](#5--secure-el1-payloads-and-dispatchers)
-6.  [Crash Reporting in BL3-1](#6--crash-reporting-in-bl3-1)
-7.  [Guidelines for Reset Handlers](#7--guidelines-for-reset-handlers)
-8.  [CPU specific operations framework](#8--cpu-specific-operations-framework)
-9.  [Memory layout of BL images](#9-memory-layout-of-bl-images)
-10. [Firmware Image Package (FIP)](#10--firmware-image-package-fip)
-11. [Use of coherent memory in Trusted Firmware](#11--use-of-coherent-memory-in-trusted-firmware)
-12. [Code Structure](#12--code-structure)
-13. [References](#13--references)
-
-
-1.  Introduction
-----------------
-
-The ARM Trusted Firmware implements a subset of the Trusted Board Boot
-Requirements (TBBR) Platform Design Document (PDD) [1] for ARM reference
-platforms. The TBB sequence starts when the platform is powered on and runs up
-to the stage where it hands-off control to firmware running in the normal
-world in DRAM. This is the cold boot path.
-
-The ARM Trusted Firmware also implements the Power State Coordination Interface
-([PSCI]) PDD [2] as a runtime service. PSCI is the interface from normal world
-software to firmware implementing power management use-cases (for example,
-secondary CPU boot, hotplug and idle). Normal world software can access ARM
-Trusted Firmware runtime services via the ARM SMC (Secure Monitor Call)
-instruction. The SMC instruction must be used as mandated by the [SMC Calling
-Convention PDD][SMCCC] [3].
-
-The ARM Trusted Firmware implements a framework for configuring and managing
-interrupts generated in either security state. The details of the interrupt
-management framework and its design can be found in [ARM Trusted
-Firmware Interrupt Management Design guide][INTRG] [4].
-
-2.  Cold boot
--------------
-
-The cold boot path starts when the platform is physically turned on. One of
-the CPUs released from reset is chosen as the primary CPU, and the remaining
-CPUs are considered secondary CPUs. The primary CPU is chosen through
-platform-specific means. The cold boot path is mainly executed by the primary
-CPU, other than essential CPU initialization executed by all CPUs. The
-secondary CPUs are kept in a safe platform-specific state until the primary
-CPU has performed enough initialization to boot them.
-
-The cold boot path in this implementation of the ARM Trusted Firmware is divided
-into five steps (in order of execution):
-
-*   Boot Loader stage 1 (BL1) _AP Trusted ROM_
-*   Boot Loader stage 2 (BL2) _Trusted Boot Firmware_
-*   Boot Loader stage 3-1 (BL3-1) _EL3 Runtime Firmware_
-*   Boot Loader stage 3-2 (BL3-2) _Secure-EL1 Payload_ (optional)
-*   Boot Loader stage 3-3 (BL3-3) _Non-trusted Firmware_
-
-ARM development platforms (Fixed Virtual Platforms (FVPs) and Juno) implement a
-combination of the following types of memory regions. Each bootloader stage uses
-one or more of these memory regions.
-
-*   Regions accessible from both non-secure and secure states. For example,
-    non-trusted SRAM, ROM and DRAM.
-*   Regions accessible from only the secure state. For example, trusted SRAM and
-    ROM. The FVPs also implement the trusted DRAM which is statically
-    configured. Additionally, the Base FVPs and Juno development platform
-    configure the TrustZone Controller (TZC) to create a region in the DRAM
-    which is accessible only from the secure state.
-
-
-The sections below provide the following details:
-
-*   initialization and execution of the first three stages during cold boot
-*   specification of the BL3-1 entrypoint requirements for use by alternative
-    Trusted Boot Firmware in place of the provided BL1 and BL2
-*   changes in BL3-1 behavior when using the `RESET_TO_BL31` option which
-    allows BL3-1 to run without BL1 and BL2
-
-
-### BL1
-
-This stage begins execution from the platform's reset vector at EL3. The reset
-address is platform dependent but it is usually located in a Trusted ROM area.
-The BL1 data section is copied to trusted SRAM at runtime.
-
-On the ARM FVP port, BL1 code starts execution from the reset vector at address
-`0x00000000` (trusted ROM). The BL1 data section is copied to the start of
-trusted SRAM at address `0x04000000`.
-
-On the Juno ARM development platform port, BL1 code starts execution at
-`0x0BEC0000` (FLASH). The BL1 data section is copied to trusted SRAM at address
-`0x04001000.
-
-The functionality implemented by this stage is as follows.
-
-#### Determination of boot path
-
-Whenever a CPU is released from reset, BL1 needs to distinguish between a warm
-boot and a cold boot. This is done using platform-specific mechanisms (see the
-`platform_get_entrypoint()` function in the [Porting Guide]). In the case of a
-warm boot, a CPU is expected to continue execution from a seperate
-entrypoint. In the case of a cold boot, the secondary CPUs are placed in a safe
-platform-specific state (see the `plat_secondary_cold_boot_setup()` function in
-the [Porting Guide]) while the primary CPU executes the remaining cold boot path
-as described in the following sections.
-
-#### Architectural initialization
-
-BL1 performs minimal architectural initialization as follows.
-
-*   Exception vectors
-
-    BL1 sets up simple exception vectors for both synchronous and asynchronous
-    exceptions. The default behavior upon receiving an exception is to populate
-    a status code in the general purpose register `X0` and call the
-    `plat_report_exception()` function (see the [Porting Guide]). The status
-    code is one of:
-
-        0x0 : Synchronous exception from Current EL with SP_EL0
-        0x1 : IRQ exception from Current EL with SP_EL0
-        0x2 : FIQ exception from Current EL with SP_EL0
-        0x3 : System Error exception from Current EL with SP_EL0
-        0x4 : Synchronous exception from Current EL with SP_ELx
-        0x5 : IRQ exception from Current EL with SP_ELx
-        0x6 : FIQ exception from Current EL with SP_ELx
-        0x7 : System Error exception from Current EL with SP_ELx
-        0x8 : Synchronous exception from Lower EL using aarch64
-        0x9 : IRQ exception from Lower EL using aarch64
-        0xa : FIQ exception from Lower EL using aarch64
-        0xb : System Error exception from Lower EL using aarch64
-        0xc : Synchronous exception from Lower EL using aarch32
-        0xd : IRQ exception from Lower EL using aarch32
-        0xe : FIQ exception from Lower EL using aarch32
-        0xf : System Error exception from Lower EL using aarch32
-
-    The `plat_report_exception()` implementation on the ARM FVP port programs
-    the Versatile Express System LED register in the following format to
-    indicate the occurence of an unexpected exception:
-
-        SYS_LED[0]   - Security state (Secure=0/Non-Secure=1)
-        SYS_LED[2:1] - Exception Level (EL3=0x3, EL2=0x2, EL1=0x1, EL0=0x0)
-        SYS_LED[7:3] - Exception Class (Sync/Async & origin). This is the value
-                       of the status code
-
-    A write to the LED register reflects in the System LEDs (S6LED0..7) in the
-    CLCD window of the FVP.
-
-    BL1 does not expect to receive any exceptions other than the SMC exception.
-    For the latter, BL1 installs a simple stub. The stub expects to receive
-    only a single type of SMC (determined by its function ID in the general
-    purpose register `X0`). This SMC is raised by BL2 to make BL1 pass control
-    to BL3-1 (loaded by BL2) at EL3. Any other SMC leads to an assertion
-    failure.
-
-*   CPU initialization
-
-    BL1 calls the `reset_handler()` function which in turn calls the CPU
-    specific reset handler function (see the section: "CPU specific operations
-    framework").
-
-*   MMU setup
-
-    BL1 sets up EL3 memory translation by creating page tables to cover the
-    first 4GB of physical address space. This covers all the memories and
-    peripherals needed by BL1.
-
-*   Control register setup
-    -   `SCTLR_EL3`. Instruction cache is enabled by setting the `SCTLR_EL3.I`
-        bit. Alignment and stack alignment checking is enabled by setting the
-        `SCTLR_EL3.A` and `SCTLR_EL3.SA` bits. Exception endianness is set to
-        little-endian by clearing the `SCTLR_EL3.EE` bit.
-
-    -  `SCR_EL3`. The register width of the next lower exception level is set to
-        AArch64 by setting the `SCR.RW` bit.
-
-    -   `CPTR_EL3`. Accesses to the `CPACR_EL1` register from EL1 or EL2, or the
-        `CPTR_EL2` register from EL2 are configured to not trap to EL3 by
-        clearing the `CPTR_EL3.TCPAC` bit. Access to the trace functionality is
-        configured not to trap to EL3 by clearing the `CPTR_EL3.TTA` bit.
-        Instructions that access the registers associated with Floating Point
-        and Advanced SIMD execution are configured to not trap to EL3 by
-        clearing the `CPTR_EL3.TFP` bit.
-
-#### Platform initialization
-
-BL1 enables issuing of snoop and DVM (Distributed Virtual Memory) requests from
-the CCI-400 slave interface corresponding to the cluster that includes the
-primary CPU. BL1 also initializes UART0 (PL011 console), which enables access to
-the `printf` family of functions in BL1.
-
-#### BL2 image load and execution
-
-BL1 execution continues as follows:
-
-1.  BL1 determines the amount of free trusted SRAM memory available by
-    calculating the extent of its own data section, which also resides in
-    trusted SRAM. BL1 loads a BL2 raw binary image from platform storage, at a
-    platform-specific base address. If the BL2 image file is not present or if
-    there is not enough free trusted SRAM the following error message is
-    printed:
-
-        "Failed to load boot loader stage 2 (BL2) firmware."
-
-    If the load is successful, BL1 updates the limits of the remaining free
-    trusted SRAM. It also populates information about the amount of trusted
-    SRAM used by the BL2 image. The exact load location of the image is
-    provided as a base address in the platform header. Further description of
-    the memory layout can be found later in this document.
-
-2.  BL1 prints the following string from the primary CPU to indicate successful
-    execution of the BL1 stage:
-
-        "Booting trusted firmware boot loader stage 1"
-
-3.  BL1 passes control to the BL2 image at Secure EL1, starting from its load
-    address.
-
-4.  BL1 also passes information about the amount of trusted SRAM used and
-    available for use. This information is populated at a platform-specific
-    memory address.
-
-
-### BL2
-
-BL1 loads and passes control to BL2 at Secure-EL1. BL2 is linked against and
-loaded at a platform-specific base address (more information can be found later
-in this document). The functionality implemented by BL2 is as follows.
-
-#### Architectural initialization
-
-BL2 performs minimal architectural initialization required for subsequent
-stages of the ARM Trusted Firmware and normal world software. It sets up
-Secure EL1 memory translation by creating page tables to address the first 4GB
-of the physical address space in a similar way to BL1. EL1 and EL0 are given
-access to Floating Point & Advanced SIMD registers by clearing the `CPACR.FPEN`
-bits.
-
-#### Platform initialization
-
-BL2 copies the information regarding the trusted SRAM populated by BL1 using a
-platform-specific mechanism. It calculates the limits of DRAM (main memory)
-to determine whether there is enough space to load the BL3-3 image. A platform
-defined base address is used to specify the load address for the BL3-1 image.
-It also defines the extents of memory available for use by the BL3-2 image.
-BL2 also initializes UART0 (PL011 console), which enables  access to the
-`printf` family of functions in BL2. Platform security is initialized to allow
-access to controlled components. The storage abstraction layer is initialized
-which is used to load further bootloader images.
-
-#### BL3-0 (System Control Processor Firmware) image load
-
-Some systems have a separate System Control Processor (SCP) for power, clock,
-reset and system control. BL2 loads the optional BL3-0 image from platform
-storage into a platform-specific region of secure memory. The subsequent
-handling of BL3-0 is platform specific. For example, on the Juno ARM development
-platform port the image is transferred into SCP memory using the SCPI protocol
-after being loaded in the trusted SRAM memory at address `0x04009000`. The SCP
-executes BL3-0 and signals to the Application Processor (AP) for BL2 execution
-to continue.
-
-#### BL3-1 (EL3 Runtime Firmware) image load
-
-BL2 loads the BL3-1 image from platform storage into a platform-specific address
-in trusted SRAM. If there is not enough memory to load the image or image is
-missing it leads to an assertion failure. If the BL3-1 image loads successfully,
-BL2 updates the amount of trusted SRAM used and available for use by BL3-1.
-This information is populated at a platform-specific memory address.
-
-#### BL3-2 (Secure-EL1 Payload) image load
-
-BL2 loads the optional BL3-2 image from platform storage into a platform-
-specific region of secure memory. The image executes in the secure world. BL2
-relies on BL3-1 to pass control to the BL3-2 image, if present. Hence, BL2
-populates a platform-specific area of memory with the entrypoint/load-address
-of the BL3-2 image. The value of the Saved Processor Status Register (`SPSR`)
-for entry into BL3-2 is not determined by BL2, it is initialized by the
-Secure-EL1 Payload Dispatcher (see later) within BL3-1, which is responsible for
-managing interaction with BL3-2. This information is passed to BL3-1.
-
-#### BL3-3 (Non-trusted Firmware) image load
-
-BL2 loads the BL3-3 image (e.g. UEFI or other test or boot software) from
-platform storage into non-secure memory as defined by the platform.
-
-BL2 relies on BL3-1 to pass control to BL3-3 once secure state initialization is
-complete. Hence, BL2 populates a platform-specific area of memory with the
-entrypoint and Saved Program Status Register (`SPSR`) of the normal world
-software image. The entrypoint is the load address of the BL3-3 image. The
-`SPSR` is determined as specified in Section 5.13 of the [PSCI PDD] [PSCI]. This
-information is passed to BL3-1.
-
-#### BL3-1 (EL3 Runtime Firmware) execution
-
-BL2 execution continues as follows:
-
-1.  BL2 passes control back to BL1 by raising an SMC, providing BL1 with the
-    BL3-1 entrypoint. The exception is handled by the SMC exception handler
-    installed by BL1.
-
-2.  BL1 turns off the MMU and flushes the caches. It clears the
-    `SCTLR_EL3.M/I/C` bits, flushes the data cache to the point of coherency
-    and invalidates the TLBs.
-
-3.  BL1 passes control to BL3-1 at the specified entrypoint at EL3.
-
-
-### BL3-1
-
-The image for this stage is loaded by BL2 and BL1 passes control to BL3-1 at
-EL3. BL3-1 executes solely in trusted SRAM. BL3-1 is linked against and
-loaded at a platform-specific base address (more information can be found later
-in this document). The functionality implemented by BL3-1 is as follows.
-
-#### Architectural initialization
-
-Currently, BL3-1 performs a similar architectural initialization to BL1 as
-far as system register settings are concerned. Since BL1 code resides in ROM,
-architectural initialization in BL3-1 allows override of any previous
-initialization done by BL1. BL3-1 creates page tables to address the first
-4GB of physical address space and initializes the MMU accordingly. It initializes
-a buffer of frequently used pointers, called per-CPU pointer cache, in memory for
-faster access. Currently the per-CPU pointer cache contains only the pointer
-to crash stack. It then replaces the exception vectors populated by BL1 with its
-own. BL3-1 exception vectors implement more elaborate support for
-handling SMCs since this is the only mechanism to access the runtime services
-implemented by BL3-1 (PSCI for example). BL3-1 checks each SMC for validity as
-specified by the [SMC calling convention PDD][SMCCC] before passing control to
-the required SMC handler routine. BL3-1 programs the `CNTFRQ_EL0` register with
-the clock frequency of the system counter, which is provided by the platform.
-
-#### Platform initialization
-
-BL3-1 performs detailed platform initialization, which enables normal world
-software to function correctly. It also retrieves entrypoint information for
-the BL3-3 image loaded by BL2 from the platform defined memory address populated
-by BL2. BL3-1 also initializes UART0 (PL011 console), which enables
-access to the `printf` family of functions in BL3-1.  It enables the system
-level implementation of the generic timer through the memory mapped interface.
-
-* GICv2 initialization:
-
-    -   Enable group0 interrupts in the GIC CPU interface.
-    -   Configure group0 interrupts to be asserted as FIQs.
-    -   Disable the legacy interrupt bypass mechanism.
-    -   Configure the priority mask register to allow interrupts of all
-        priorities to be signaled to the CPU interface.
-    -   Mark SGIs 8-15, the secure physical timer interrupt (#29) and the
-        trusted watchdog interrupt (#56) as group0 (secure).
-    -   Target the trusted watchdog interrupt to CPU0.
-    -   Enable these group0 interrupts in the GIC distributor.
-    -   Configure all other interrupts as group1 (non-secure).
-    -   Enable signaling of group0 interrupts in the GIC distributor.
-
-*   GICv3 initialization:
-
-    If a GICv3 implementation is available in the platform, BL3-1 initializes
-    the GICv3 in GICv2 emulation mode with settings as described for GICv2
-    above.
-
-*   Power management initialization:
-
-    BL3-1 implements a state machine to track CPU and cluster state. The state
-    can be one of `OFF`, `ON_PENDING`, `SUSPEND` or `ON`. All secondary CPUs are
-    initially in the `OFF` state. The cluster that the primary CPU belongs to is
-    `ON`; any other cluster is `OFF`. BL3-1 initializes the data structures that
-    implement the state machine, including the locks that protect them. BL3-1
-    accesses the state of a CPU or cluster immediately after reset and before
-    the data cache is enabled in the warm boot path. It is not currently
-    possible to use 'exclusive' based spinlocks, therefore BL3-1 uses locks
-    based on Lamport's Bakery algorithm instead. BL3-1 allocates these locks in
-    device memory by default.
-
-*   Runtime services initialization:
-
-    The runtime service framework and its initialization is described in the
-    "EL3 runtime services framework" section below.
-
-    Details about the PSCI service are provided in the "Power State Coordination
-    Interface" section below.
-
-*   BL3-2 (Secure-EL1 Payload) image initialization
-
-    If a BL3-2 image is present then there must be a matching Secure-EL1 Payload
-    Dispatcher (SPD) service (see later for details). During initialization
-    that service  must register a function to carry out initialization of BL3-2
-    once the runtime services are fully initialized. BL3-1 invokes such a
-    registered function to initialize BL3-2 before running BL3-3.
-
-    Details on BL3-2 initialization and the SPD's role are described in the
-    "Secure-EL1 Payloads and Dispatchers" section below.
-
-*   BL3-3 (Non-trusted Firmware) execution
-
-    BL3-1 initializes the EL2 or EL1 processor context for normal-world cold
-    boot, ensuring that no secure state information finds its way into the
-    non-secure execution state. BL3-1 uses the entrypoint information provided
-    by BL2 to jump to the Non-trusted firmware image (BL3-3) at the highest
-    available Exception Level (EL2 if available, otherwise EL1).
-
-
-### Using alternative Trusted Boot Firmware in place of BL1 and BL2
-
-Some platforms have existing implementations of Trusted Boot Firmware that
-would like to use ARM Trusted Firmware BL3-1 for the EL3 Runtime Firmware. To
-enable this firmware architecture it is important to provide a fully documented
-and stable interface between the Trusted Boot Firmware and BL3-1.
-
-Future changes to the BL3-1 interface will be done in a backwards compatible
-way, and this enables these firmware components to be independently enhanced/
-updated to develop and exploit new functionality.
-
-#### Required CPU state when calling `bl31_entrypoint()` during cold boot
-
-This function must only be called by the primary CPU, if this is called by any
-other CPU the firmware will abort.
-
-On entry to this function the calling primary CPU must be executing in AArch64
-EL3, little-endian data access, and all interrupt sources masked:
-
-    PSTATE.EL = 3
-    PSTATE.RW = 1
-    PSTATE.DAIF = 0xf
-    SCTLR_EL3.EE = 0
-
-X0 and X1 can be used to pass information from the Trusted Boot Firmware to the
-platform code in BL3-1:
-
-    X0 : Reserved for common Trusted Firmware information
-    X1 : Platform specific information
-
-BL3-1 zero-init sections (e.g. `.bss`) should not contain valid data on entry,
-these will be zero filled prior to invoking platform setup code.
-
-##### Use of the X0 and X1 parameters
-
-The parameters are platform specific and passed from `bl31_entrypoint()` to
-`bl31_early_platform_setup()`. The value of these parameters is never directly
-used by the common BL3-1 code.
-
-The convention is that `X0` conveys information regarding the BL3-1, BL3-2 and
-BL3-3 images from the Trusted Boot firmware and `X1` can be used for other
-platform specific purpose. This convention allows platforms which use ARM
-Trusted Firmware's BL1 and BL2 images to transfer additional platform specific
-information from Secure Boot without conflicting with future evolution of the
-Trusted Firmware using `X0` to pass a `bl31_params` structure.
-
-BL3-1 common and SPD initialization code depends on image and entrypoint
-information about BL3-3 and BL3-2, which is provided via BL3-1 platform APIs.
-This information is required until the start of execution of BL3-3. This
-information can be provided in a platform defined manner, e.g. compiled into
-the platform code in BL3-1, or provided in a platform defined memory location
-by the Trusted Boot firmware, or passed from the Trusted Boot Firmware via the
-Cold boot Initialization parameters. This data may need to be cleaned out of
-the CPU caches if it is provided by an earlier boot stage and then accessed by
-BL3-1 platform code before the caches are enabled.
-
-ARM Trusted Firmware's BL2 implementation passes a `bl31_params` structure in
-`X0` and the FVP port interprets this in the BL3-1 platform code.
-
-##### MMU, Data caches & Coherency
-
-BL3-1 does not depend on the enabled state of the MMU, data caches or
-interconnect coherency on entry to `bl31_entrypoint()`. If these are disabled
-on entry, these should be enabled during `bl31_plat_arch_setup()`.
-
-##### Data structures used in the BL3-1 cold boot interface
-
-These structures are designed to support compatibility and independent
-evolution of the structures and the firmware images. For example, a version of
-BL3-1 that can interpret the BL3-x image information from different versions of
-BL2, a platform that uses an extended entry_point_info structure to convey
-additional register information to BL3-1, or a ELF image loader that can convey
-more details about the firmware images.
-
-To support these scenarios the structures are versioned and sized, which enables
-BL3-1 to detect which information is present and respond appropriately. The
-`param_header` is defined to capture this information:
-
-    typedef struct param_header {
-        uint8_t type;       /* type of the structure */
-        uint8_t version;    /* version of this structure */
-        uint16_t size;      /* size of this structure in bytes */
-        uint32_t attr;      /* attributes: unused bits SBZ */
-    } param_header_t;
-
-The structures using this format are `entry_point_info`, `image_info` and
-`bl31_params`. The code that allocates and populates these structures must set
-the header fields appropriately, and the `SET_PARA_HEAD()` a macro is defined
-to simplify this action.
-
-#### Required CPU state for BL3-1 Warm boot initialization
-
-When requesting a CPU power-on, or suspending a running CPU, ARM Trusted
-Firmware provides the platform power management code with a Warm boot
-initialization entry-point, to be invoked by the CPU immediately after the
-reset handler. On entry to the Warm boot initialization function the calling
-CPU must be in AArch64 EL3, little-endian data access and all interrupt sources
-masked:
-
-    PSTATE.EL = 3
-    PSTATE.RW = 1
-    PSTATE.DAIF = 0xf
-    SCTLR_EL3.EE = 0
-
-The PSCI implementation will initialize the processor state and ensure that the
-platform power management code is then invoked as required to initialize all
-necessary system, cluster and CPU resources.
-
-
-### Using BL3-1 as the CPU reset vector
-
-On some platforms the runtime firmware (BL3-x images) for the application
-processors are loaded by trusted firmware running on a secure system processor
-on the SoC, rather than by BL1 and BL2 running on the primary application
-processor. For this type of SoC it is desirable for the application processor
-to always reset to BL3-1 which eliminates the need for BL1 and BL2.
-
-ARM Trusted Firmware provides a build-time option `RESET_TO_BL31` that includes
-some additional logic in the BL3-1 entrypoint to support this use case.
-
-In this configuration, the platform's Trusted Boot Firmware must ensure that
-BL3-1 is loaded to its runtime address, which must match the CPU's RVBAR reset
-vector address, before the application processor is powered on. Additionally,
-platform software is responsible for loading the other BL3-x images required and
-providing entry point information for them to BL3-1. Loading these images might
-be done by the Trusted Boot Firmware or by platform code in BL3-1.
-
-The ARM FVP port supports the `RESET_TO_BL31` configuration, in which case the
-`bl31.bin` image must be loaded to its run address in Trusted SRAM and all CPU
-reset vectors be changed from the default `0x0` to this run address. See the
-[User Guide] for details of running the FVP models in this way.
-
-This configuration requires some additions and changes in the BL3-1
-functionality:
-
-#### Determination of boot path
-
-In this configuration, BL3-1 uses the same reset framework and code as the one
-described for BL1 above. On a warm boot a CPU is directed to the PSCI
-implementation via a platform defined mechanism. On a cold boot, the platform
-must place any secondary CPUs into a safe state while the primary CPU executes
-a modified BL3-1 initialization, as described below.
-
-#### Architectural initialization
-
-As the first image to execute in this configuration BL3-1 must ensure that
-interconnect coherency is enabled (if required) before enabling the MMU.
-
-#### Platform initialization
-
-In this configuration, when the CPU resets to BL3-1 there are no parameters
-that can be passed in registers by previous boot stages. Instead, the platform
-code in BL3-1 needs to know, or be able to determine, the location of the BL3-2
-(if required) and BL3-3 images and provide this information in response to the
-`bl31_plat_get_next_image_ep_info()` function.
-
-As the first image to execute in this configuration BL3-1 must also ensure that
-any security initialisation, for example programming a TrustZone address space
-controller, is carried out during early platform initialisation.
-
-
-3.  EL3 runtime services framework
-----------------------------------
-
-Software executing in the non-secure state and in the secure state at exception
-levels lower than EL3 will request runtime services using the Secure Monitor
-Call (SMC) instruction. These requests will follow the convention described in
-the SMC Calling Convention PDD ([SMCCC]). The [SMCCC] assigns function
-identifiers to each SMC request and describes how arguments are passed and
-returned.
-
-The EL3 runtime services framework enables the development of services by
-different providers that can be easily integrated into final product firmware.
-The following sections describe the framework which facilitates the
-registration, initialization and use of runtime services in EL3 Runtime
-Firmware (BL3-1).
-
-The design of the runtime services depends heavily on the concepts and
-definitions described in the [SMCCC], in particular SMC Function IDs, Owning
-Entity Numbers (OEN), Fast and Standard calls, and the SMC32 and SMC64 calling
-conventions. Please refer to that document for more detailed explanation of
-these terms.
-
-The following runtime services are expected to be implemented first. They have
-not all been instantiated in the current implementation.
-
-1.  Standard service calls
-
-    This service is for management of the entire system. The Power State
-    Coordination Interface ([PSCI]) is the first set of standard service calls
-    defined by ARM (see PSCI section later).
-
-    NOTE: Currently this service is called PSCI since there are no other
-    defined standard service calls.
-
-2.  Secure-EL1 Payload Dispatcher service
-
-    If a system runs a Trusted OS or other Secure-EL1 Payload (SP) then
-    it also requires a _Secure Monitor_ at EL3 to switch the EL1 processor
-    context between the normal world (EL1/EL2) and trusted world (Secure-EL1).
-    The Secure Monitor will make these world switches in response to SMCs. The
-    [SMCCC] provides for such SMCs with the Trusted OS Call and Trusted
-    Application Call OEN ranges.
-
-    The interface between the EL3 Runtime Firmware and the Secure-EL1 Payload is
-    not defined by the [SMCCC] or any other standard. As a result, each
-    Secure-EL1 Payload requires a specific Secure Monitor that runs as a runtime
-    service - within ARM Trusted Firmware this service is referred to as the
-    Secure-EL1 Payload Dispatcher (SPD).
-
-    ARM Trusted Firmware provides a Test Secure-EL1 Payload (TSP) and its
-    associated Dispatcher (TSPD). Details of SPD design and TSP/TSPD operation
-    are described in the "Secure-EL1 Payloads and Dispatchers" section below.
-
-3.  CPU implementation service
-
-    This service will provide an interface to CPU implementation specific
-    services for a given platform e.g. access to processor errata workarounds.
-    This service is currently unimplemented.
-
-Additional services for ARM Architecture, SiP and OEM calls can be implemented.
-Each implemented service handles a range of SMC function identifiers as
-described in the [SMCCC].
-
-
-### Registration
-
-A runtime service is registered using the `DECLARE_RT_SVC()` macro, specifying
-the name of the service, the range of OENs covered, the type of service and
-initialization and call handler functions. This macro instantiates a `const
-struct rt_svc_desc` for the service with these details (see `runtime_svc.h`).
-This structure is allocated in a special ELF section `rt_svc_descs`, enabling
-the framework to find all service descriptors included into BL3-1.
-
-The specific service for a SMC Function is selected based on the OEN and call
-type of the Function ID, and the framework uses that information in the service
-descriptor to identify the handler for the SMC Call.
-
-The service descriptors do not include information to identify the precise set
-of SMC function identifiers supported by this service implementation, the
-security state from which such calls are valid nor the capability to support
-64-bit and/or 32-bit callers (using SMC32 or SMC64). Responding appropriately
-to these aspects of a SMC call is the responsibility of the service
-implementation, the framework is focused on integration of services from
-different providers and minimizing the time taken by the framework before the
-service handler is invoked.
-
-Details of the parameters, requirements and behavior of the initialization and
-call handling functions are provided in the following sections.
-
-
-### Initialization
-
-`runtime_svc_init()` in `runtime_svc.c` initializes the runtime services
-framework running on the primary CPU during cold boot as part of the BL3-1
-initialization. This happens prior to initializing a Trusted OS and running
-Normal world boot firmware that might in turn use these services.
-Initialization involves validating each of the declared runtime service
-descriptors, calling the service initialization function and populating the
-index used for runtime lookup of the service.
-
-The BL3-1 linker script collects all of the declared service descriptors into a
-single array and defines symbols that allow the framework to locate and traverse
-the array, and determine its size.
-
-The framework does basic validation of each descriptor to halt firmware
-initialization if service declaration errors are detected. The framework does
-not check descriptors for the following error conditions, and may behave in an
-unpredictable manner under such scenarios:
-
-1.  Overlapping OEN ranges
-2.  Multiple descriptors for the same range of OENs and `call_type`
-3.  Incorrect range of owning entity numbers for a given `call_type`
-
-Once validated, the service `init()` callback is invoked. This function carries
-out any essential EL3 initialization before servicing requests. The `init()`
-function is only invoked on the primary CPU during cold boot. If the service
-uses per-CPU data this must either be initialized for all CPUs during this call,
-or be done lazily when a CPU first issues an SMC call to that service. If
-`init()` returns anything other than `0`, this is treated as an initialization
-error and the service is ignored: this does not cause the firmware to halt.
-
-The OEN and call type fields present in the SMC Function ID cover a total of
-128 distinct services, but in practice a single descriptor can cover a range of
-OENs, e.g. SMCs to call a Trusted OS function. To optimize the lookup of a
-service handler, the framework uses an array of 128 indices that map every
-distinct OEN/call-type combination either to one of the declared services or to
-indicate the service is not handled. This `rt_svc_descs_indices[]` array is
-populated for all of the OENs covered by a service after the service `init()`
-function has reported success. So a service that fails to initialize will never
-have it's `handle()` function invoked.
-
-The following figure shows how the `rt_svc_descs_indices[]` index maps the SMC
-Function ID call type and OEN onto a specific service handler in the
-`rt_svc_descs[]` array.
-
-![Image 1](diagrams/rt-svc-descs-layout.png?raw=true)
-
-
-### Handling an SMC
-
-When the EL3 runtime services framework receives a Secure Monitor Call, the SMC
-Function ID is passed in W0 from the lower exception level (as per the
-[SMCCC]). If the calling register width is AArch32, it is invalid to invoke an
-SMC Function which indicates the SMC64 calling convention: such calls are
-ignored and return the Unknown SMC Function Identifier result code `0xFFFFFFFF`
-in R0/X0.
-
-Bit[31] (fast/standard call) and bits[29:24] (owning entity number) of the SMC
-Function ID are combined to index into the `rt_svc_descs_indices[]` array. The
-resulting value might indicate a service that has no handler, in this case the
-framework will also report an Unknown SMC Function ID. Otherwise, the value is
-used as a further index into the `rt_svc_descs[]` array to locate the required
-service and handler.
-
-The service's `handle()` callback is provided with five of the SMC parameters
-directly, the others are saved into memory for retrieval (if needed) by the
-handler. The handler is also provided with an opaque `handle` for use with the
-supporting library for parameter retrieval, setting return values and context
-manipulation; and with `flags` indicating the security state of the caller. The
-framework finally sets up the execution stack for the handler, and invokes the
-services `handle()` function.
-
-On return from the handler the result registers are populated in X0-X3 before
-restoring the stack and CPU state and returning from the original SMC.
-
-
-4.  Power State Coordination Interface
---------------------------------------
-
-TODO: Provide design walkthrough of PSCI implementation.
-
-The PSCI v1.0 specification categorizes APIs as optional and mandatory. All the
-mandatory APIs in PSCI v1.0 and all the APIs in PSCI v0.2 draft specification
-[Power State Coordination Interface PDD] [PSCI] are implemented. The table lists
-the PSCI v1.0 APIs and their support in generic code.
-
-An API implementation might have a dependency on platform code e.g. CPU_SUSPEND
-requires the platform to export a part of the implementation. Hence the level
-of support of the mandatory APIs depends upon the support exported by the
-platform port as well. The Juno and FVP (all variants) platforms export all the
-required support.
-
-| PSCI v1.0 API         |Supported| Comments                                  |
-|:----------------------|:--------|:------------------------------------------|
-|`PSCI_VERSION`         | Yes     | The version returned is 1.0               |
-|`CPU_SUSPEND`          | Yes*    | The original `power_state` format is used |
-|`CPU_OFF`              | Yes*    |                                           |
-|`CPU_ON`               | Yes*    |                                           |
-|`AFFINITY_INFO`        | Yes     |                                           |
-|`MIGRATE`              | Yes**   |                                           |
-|`MIGRATE_INFO_TYPE`    | Yes**   |                                           |
-|`MIGRATE_INFO_CPU`     | Yes**   |                                           |
-|`SYSTEM_OFF`           | Yes*    |                                           |
-|`SYSTEM_RESET`         | Yes*    |                                           |
-|`PSCI_FEATURES`        | Yes     |                                           |
-|`CPU_FREEZE`           | No      |                                           |
-|`CPU_DEFAULT_SUSPEND`  | No      |                                           |
-|`CPU_HW_STATE`         | No      |                                           |
-|`SYSTEM_SUSPEND`       | Yes*    |                                           |
-|`PSCI_SET_SUSPEND_MODE`| No      |                                           |
-|`PSCI_STAT_RESIDENCY`  | No      |                                           |
-|`PSCI_STAT_COUNT`      | No      |                                           |
-
-*Note : These PSCI APIs require platform power management hooks to be
-registered with the generic PSCI code to be supported.
-
-**Note : These PSCI APIs require appropriate Secure Payload Dispatcher
-hooks to be registered with the generic PSCI code to be supported.
-
-
-5.  Secure-EL1 Payloads and Dispatchers
----------------------------------------
-
-On a production system that includes a Trusted OS running in Secure-EL1/EL0,
-the Trusted OS is coupled with a companion runtime service in the BL3-1
-firmware. This service is responsible for the initialisation of the Trusted
-OS and all communications with it. The Trusted OS is the BL3-2 stage of the
-boot flow in ARM Trusted Firmware. The firmware will attempt to locate, load
-and execute a BL3-2 image.
-
-ARM Trusted Firmware uses a more general term for the BL3-2 software that runs
-at Secure-EL1 - the _Secure-EL1 Payload_ - as it is not always a Trusted OS.
-
-The ARM Trusted Firmware provides a Test Secure-EL1 Payload (TSP) and a Test
-Secure-EL1 Payload Dispatcher (TSPD) service as an example of how a Trusted OS
-is supported on a production system using the Runtime Services Framework. On
-such a system, the Test BL3-2 image and service are replaced by the Trusted OS
-and its dispatcher service. The ARM Trusted Firmware build system expects that
-the dispatcher will define the build flag `NEED_BL32` to enable it to include
-the BL3-2 in the build either as a binary or to compile from source depending
-on whether the `BL32` build option is specified or not.
-
-The TSP runs in Secure-EL1. It is designed to demonstrate synchronous
-communication with the normal-world software running in EL1/EL2. Communication
-is initiated by the normal-world software
-
-*   either directly through a Fast SMC (as defined in the [SMCCC])
-
-*   or indirectly through a [PSCI] SMC. The [PSCI] implementation in turn
-    informs the TSPD about the requested power management operation. This allows
-    the TSP to prepare for or respond to the power state change
-
-The TSPD service is responsible for.
-
-*   Initializing the TSP
-
-*   Routing requests and responses between the secure and the non-secure
-    states during the two types of communications just described
-
-### Initializing a BL3-2 Image
-
-The Secure-EL1 Payload Dispatcher (SPD) service is responsible for initializing
-the BL3-2 image. It needs access to the information passed by BL2 to BL3-1 to do
-so. This is provided by:
-
-    entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t);
-
-which returns a reference to the `entry_point_info` structure corresponding to
-the image which will be run in the specified security state. The SPD uses this
-API to get entry point information for the SECURE image, BL3-2.
-
-In the absence of a BL3-2 image, BL3-1 passes control to the normal world
-bootloader image (BL3-3). When the BL3-2 image is present, it is typical
-that the SPD wants control to be passed to BL3-2 first and then later to BL3-3.
-
-To do this the SPD has to register a BL3-2 initialization function during
-initialization of the SPD service. The BL3-2 initialization function has this
-prototype:
-
-    int32_t init();
-
-and is registered using the `bl31_register_bl32_init()` function.
-
-Trusted Firmware supports two approaches for the SPD to pass control to BL3-2
-before returning through EL3 and running the non-trusted firmware (BL3-3):
-
-1.  In the BL3-2 setup function, use `bl31_set_next_image_type()` to
-    request that the exit from `bl31_main()` is to the BL3-2 entrypoint in
-    Secure-EL1. BL3-1 will exit to BL3-2 using the asynchronous method by
-    calling bl31_prepare_next_image_entry() and el3_exit().
-
-    When the BL3-2 has completed initialization at Secure-EL1, it returns to
-    BL3-1 by issuing an SMC, using a Function ID allocated to the SPD. On
-    receipt of this SMC, the SPD service handler should switch the CPU context
-    from trusted to normal world and use the `bl31_set_next_image_type()` and
-    `bl31_prepare_next_image_entry()` functions to set up the initial return to
-    the normal world firmware BL3-3. On return from the handler the framework
-    will exit to EL2 and run BL3-3.
-
-2.  The BL3-2 setup function registers a initialization function using
-    `bl31_register_bl32_init()` which provides a SPD-defined mechanism to
-    invoke a 'world-switch synchronous call' to Secure-EL1 to run the BL3-2
-    entrypoint.
-    NOTE: The Test SPD service included with the Trusted Firmware provides one
-    implementation of such a mechanism.
-
-    On completion BL3-2 returns control to BL3-1 via a SMC, and on receipt the
-    SPD service handler invokes the synchronous call return mechanism to return
-    to the BL3-2 initialization function. On return from this function,
-    `bl31_main()` will set up the return to the normal world firmware BL3-3 and
-    continue the boot process in the normal world.
-
-
-6.  Crash Reporting in BL3-1
-----------------------------
-
-The BL3-1 implements a scheme for reporting the processor state when an unhandled
-exception is encountered. The reporting mechanism attempts to preserve all the
-register contents and report it via the default serial output. The general purpose
-registers, EL3, Secure EL1 and some EL2 state registers are reported.
-
-A dedicated per-CPU crash stack is maintained by BL3-1 and this is retrieved via
-the per-CPU pointer cache. The implementation attempts to minimise the memory
-required for this feature. The file `crash_reporting.S` contains the
-implementation for crash reporting.
-
-The sample crash output is shown below.
-
-    x0	:0x000000004F00007C
-    x1	:0x0000000007FFFFFF
-    x2	:0x0000000004014D50
-    x3	:0x0000000000000000
-    x4	:0x0000000088007998
-    x5	:0x00000000001343AC
-    x6	:0x0000000000000016
-    x7	:0x00000000000B8A38
-    x8	:0x00000000001343AC
-    x9	:0x00000000000101A8
-    x10	:0x0000000000000002
-    x11	:0x000000000000011C
-    x12	:0x00000000FEFDC644
-    x13	:0x00000000FED93FFC
-    x14	:0x0000000000247950
-    x15	:0x00000000000007A2
-    x16	:0x00000000000007A4
-    x17	:0x0000000000247950
-    x18	:0x0000000000000000
-    x19	:0x00000000FFFFFFFF
-    x20	:0x0000000004014D50
-    x21	:0x000000000400A38C
-    x22	:0x0000000000247950
-    x23	:0x0000000000000010
-    x24	:0x0000000000000024
-    x25	:0x00000000FEFDC868
-    x26	:0x00000000FEFDC86A
-    x27	:0x00000000019EDEDC
-    x28	:0x000000000A7CFDAA
-    x29	:0x0000000004010780
-    x30	:0x000000000400F004
-    scr_el3	:0x0000000000000D3D
-    sctlr_el3	:0x0000000000C8181F
-    cptr_el3	:0x0000000000000000
-    tcr_el3	:0x0000000080803520
-    daif	:0x00000000000003C0
-    mair_el3	:0x00000000000004FF
-    spsr_el3	:0x00000000800003CC
-    elr_el3	:0x000000000400C0CC
-    ttbr0_el3	:0x00000000040172A0
-    esr_el3	:0x0000000096000210
-    sp_el3	:0x0000000004014D50
-    far_el3	:0x000000004F00007C
-    spsr_el1	:0x0000000000000000
-    elr_el1	:0x0000000000000000
-    spsr_abt	:0x0000000000000000
-    spsr_und	:0x0000000000000000
-    spsr_irq	:0x0000000000000000
-    spsr_fiq	:0x0000000000000000
-    sctlr_el1	:0x0000000030C81807
-    actlr_el1	:0x0000000000000000
-    cpacr_el1	:0x0000000000300000
-    csselr_el1	:0x0000000000000002
-    sp_el1	:0x0000000004028800
-    esr_el1	:0x0000000000000000
-    ttbr0_el1	:0x000000000402C200
-    ttbr1_el1	:0x0000000000000000
-    mair_el1	:0x00000000000004FF
-    amair_el1	:0x0000000000000000
-    tcr_el1	:0x0000000000003520
-    tpidr_el1	:0x0000000000000000
-    tpidr_el0	:0x0000000000000000
-    tpidrro_el0	:0x0000000000000000
-    dacr32_el2	:0x0000000000000000
-    ifsr32_el2	:0x0000000000000000
-    par_el1	:0x0000000000000000
-    far_el1	:0x0000000000000000
-    afsr0_el1	:0x0000000000000000
-    afsr1_el1	:0x0000000000000000
-    contextidr_el1	:0x0000000000000000
-    vbar_el1	:0x0000000004027000
-    cntp_ctl_el0	:0x0000000000000000
-    cntp_cval_el0	:0x0000000000000000
-    cntv_ctl_el0	:0x0000000000000000
-    cntv_cval_el0	:0x0000000000000000
-    cntkctl_el1	:0x0000000000000000
-    fpexc32_el2	:0x0000000004000700
-    sp_el0	:0x0000000004010780
-
-7.  Guidelines for Reset Handlers
----------------------------------
-
-Trusted Firmware implements a framework that allows CPU and platform ports to
-perform actions immediately after a CPU is released from reset in both the cold
-and warm boot paths. This is done by calling the `reset_handler()` function in
-both the BL1 and BL3-1 images. It in turn calls the platform and CPU specific
-reset handling functions.
-
-Details for implementing a CPU specific reset handler can be found in
-Section 8. Details for implementing a platform specific reset handler can be
-found in the [Porting Guide](see the `plat_reset_handler()` function).
-
-When adding functionality to a reset handler, the following points should be
-kept in mind.
-
-1.   The first reset handler in the system exists either in a ROM image
-     (e.g. BL1), or BL3-1 if `RESET_TO_BL31` is true. This may be detected at
-     compile time using the constant `FIRST_RESET_HANDLER_CALL`.
-
-2.   When considering ROM images, it's important to consider non TF-based ROMs
-     and ROMs based on previous versions of the TF code.
-
-3.   If the functionality should be applied to a ROM and there is no possibility
-     of a ROM being used that does not apply the functionality (or equivalent),
-     then the functionality should be applied within a `#if
-     FIRST_RESET_HANDLER_CALL` block.
-
-4.   If the functionality should execute in BL3-1 in order to override or
-     supplement a ROM version of the functionality, then the functionality
-     should be applied in the `#else` part of a `#if FIRST_RESET_HANDLER_CALL`
-     block.
-
-5.   If the functionality should be applied to a ROM but there is a possibility
-     of ROMs being used that do not apply the functionality, then the
-     functionality should be applied outside of a `FIRST_RESET_HANDLER_CALL`
-     block, so that BL3-1 has an opportunity to apply the functionality instead.
-     In this case, additional code may be needed to cope with different ROMs
-     that do or do not apply the functionality.
-
-
-8.  CPU specific operations framework
------------------------------
-
-Certain aspects of the ARMv8 architecture are implementation defined,
-that is, certain behaviours are not architecturally defined, but must be defined
-and documented by individual processor implementations. The ARM Trusted
-Firmware implements a framework which categorises the common implementation
-defined behaviours and allows a processor to export its implementation of that
-behaviour. The categories are:
-
-1.  Processor specific reset sequence.
-
-2.  Processor specific power down sequences.
-
-3.  Processor specific register dumping as a part of crash reporting.
-
-Each of the above categories fulfils a different requirement.
-
-1.  allows any processor specific initialization before the caches and MMU
-    are turned on, like implementation of errata workarounds, entry into
-    the intra-cluster coherency domain etc.
-
-2.  allows each processor to implement the power down sequence mandated in
-    its Technical Reference Manual (TRM).
-
-3.  allows a processor to provide additional information to the developer
-    in the event of a crash, for example Cortex-A53 has registers which
-    can expose the data cache contents.
-
-Please note that only 2. is mandated by the TRM.
-
-The CPU specific operations framework scales to accommodate a large number of
-different CPUs during power down and reset handling. The platform can specify
-any CPU optimization it wants to enable for each CPU. It can also specify
-the CPU errata workarounds to be applied for each CPU type during reset
-handling by defining CPU errata compile time macros. Details on these macros
-can be found in the [cpu-specific-build-macros.md][CPUBM] file.
-
-The CPU specific operations framework depends on the `cpu_ops` structure which
-needs to be exported for each type of CPU in the platform. It is defined in
-`include/lib/cpus/aarch64/cpu_macros.S` and has the following fields : `midr`,
-`reset_func()`, `core_pwr_dwn()`, `cluster_pwr_dwn()` and `cpu_reg_dump()`.
-
-The CPU specific files in `lib/cpus` export a `cpu_ops` data structure with
-suitable handlers for that CPU.  For example, `lib/cpus/cortex_a53.S` exports
-the `cpu_ops` for Cortex-A53 CPU. According to the platform configuration,
-these CPU specific files must must be included in the build by the platform
-makefile. The generic CPU specific operations framework code exists in
-`lib/cpus/aarch64/cpu_helpers.S`.
-
-### CPU specific Reset Handling
-
-After a reset, the state of the CPU when it calls generic reset handler is:
-MMU turned off, both instruction and data caches turned off and not part
-of any coherency domain.
-
-The BL entrypoint code first invokes the `plat_reset_handler()` to allow
-the platform to perform any system initialization required and any system
-errata workarounds that needs to be applied. The `get_cpu_ops_ptr()` reads
-the current CPU midr, finds the matching `cpu_ops` entry in the `cpu_ops`
-array and returns it. Note that only the part number and implementer fields
-in midr are used to find the matching `cpu_ops` entry. The `reset_func()` in
-the returned `cpu_ops` is then invoked which executes the required reset
-handling for that CPU and also any errata workarounds enabled by the platform.
-This function must preserve the values of general purpose registers x20 to x29.
-
-Refer to Section "Guidelines for Reset Handlers" for general guidelines
-regarding placement of code in a reset handler.
-
-### CPU specific power down sequence
-
-During the BL3-1 initialization sequence, the pointer to the matching `cpu_ops`
-entry is stored in per-CPU data by `init_cpu_ops()` so that it can be quickly
-retrieved during power down sequences.
-
-The PSCI service, upon receiving a power down request, determines the highest
-affinity level at which to execute power down sequence for a particular CPU and
-invokes the corresponding 'prepare' power down handler in the CPU specific
-operations framework. For example, when a CPU executes a power down for affinity
-level 0, the `prepare_core_pwr_dwn()` retrieves the `cpu_ops` pointer from the
-per-CPU data and the corresponding `core_pwr_dwn()` is invoked. Similarly when
-a CPU executes power down at affinity level 1, the `prepare_cluster_pwr_dwn()`
-retrieves the `cpu_ops` pointer and the corresponding `cluster_pwr_dwn()` is
-invoked.
-
-At runtime the platform hooks for power down are invoked by the PSCI service to
-perform platform specific operations during a power down sequence, for example
-turning off CCI coherency during a cluster power down.
-
-### CPU specific register reporting during crash
-
-If the crash reporting is enabled in BL3-1, when a crash occurs, the crash
-reporting framework calls `do_cpu_reg_dump` which retrieves the matching
-`cpu_ops` using `get_cpu_ops_ptr()` function. The `cpu_reg_dump()` in
-`cpu_ops` is invoked, which then returns the CPU specific register values to
-be reported and a pointer to the ASCII list of register names in a format
-expected by the crash reporting framework.
-
-
-9. Memory layout of BL images
------------------------------
-
-Each bootloader image can be divided in 2 parts:
-
- *    the static contents of the image. These are data actually stored in the
-      binary on the disk. In the ELF terminology, they are called `PROGBITS`
-      sections;
-
- *    the run-time contents of the image. These are data that don't occupy any
-      space in the binary on the disk. The ELF binary just contains some
-      metadata indicating where these data will be stored at run-time and the
-      corresponding sections need to be allocated and initialized at run-time.
-      In the ELF terminology, they are called `NOBITS` sections.
-
-All PROGBITS sections are grouped together at the beginning of the image,
-followed by all NOBITS sections. This is true for all Trusted Firmware images
-and it is governed by the linker scripts. This ensures that the raw binary
-images are as small as possible. If a NOBITS section would sneak in between
-PROGBITS sections then the resulting binary file would contain a bunch of zero
-bytes at the location of this NOBITS section, making the image unnecessarily
-bigger. Smaller images allow faster loading from the FIP to the main memory.
-
-### Linker scripts and symbols
-
-Each bootloader stage image layout is described by its own linker script. The
-linker scripts export some symbols into the program symbol table. Their values
-correspond to particular addresses. The trusted firmware code can refer to these
-symbols to figure out the image memory layout.
-
-Linker symbols follow the following naming convention in the trusted firmware.
-
-*   `__<SECTION>_START__`
-
-    Start address of a given section named `<SECTION>`.
-
-*   `__<SECTION>_END__`
-
-    End address of a given section named `<SECTION>`. If there is an alignment
-    constraint on the section's end address then `__<SECTION>_END__` corresponds
-    to the end address of the section's actual contents, rounded up to the right
-    boundary. Refer to the value of `__<SECTION>_UNALIGNED_END__`  to know the
-    actual end address of the section's contents.
-
-*   `__<SECTION>_UNALIGNED_END__`
-
-    End address of a given section named `<SECTION>` without any padding or
-    rounding up due to some alignment constraint.
-
-*   `__<SECTION>_SIZE__`
-
-    Size (in bytes) of a given section named `<SECTION>`. If there is an
-    alignment constraint on the section's end address then `__<SECTION>_SIZE__`
-    corresponds to the size of the section's actual contents, rounded up to the
-    right boundary. In other words, `__<SECTION>_SIZE__ = __<SECTION>_END__ -
-    _<SECTION>_START__`. Refer to the value of `__<SECTION>_UNALIGNED_SIZE__`
-    to know the actual size of the section's contents.
-
-*   `__<SECTION>_UNALIGNED_SIZE__`
-
-    Size (in bytes) of a given section named `<SECTION>` without any padding or
-    rounding up due to some alignment constraint. In other words,
-    `__<SECTION>_UNALIGNED_SIZE__ = __<SECTION>_UNALIGNED_END__ -
-    __<SECTION>_START__`.
-
-Some of the linker symbols are mandatory as the trusted firmware code relies on
-them to be defined. They are listed in the following subsections. Some of them
-must be provided for each bootloader stage and some are specific to a given
-bootloader stage.
-
-The linker scripts define some extra, optional symbols. They are not actually
-used by any code but they help in understanding the bootloader images' memory
-layout as they are easy to spot in the link map files.
-
-#### Common linker symbols
-
-Early setup code needs to know the extents of the BSS section to zero-initialise
-it before executing any C code. The following linker symbols are defined for
-this purpose:
-
-* `__BSS_START__` This address must be aligned on a 16-byte boundary.
-* `__BSS_SIZE__`
-
-Similarly, the coherent memory section (if enabled) must be zero-initialised.
-Also, the MMU setup code needs to know the extents of this section to set the
-right memory attributes for it. The following linker symbols are defined for
-this purpose:
-
-* `__COHERENT_RAM_START__` This address must be aligned on a page-size boundary.
-* `__COHERENT_RAM_END__` This address must be aligned on a page-size boundary.
-* `__COHERENT_RAM_UNALIGNED_SIZE__`
-
-#### BL1's linker symbols
-
-BL1's early setup code needs to know the extents of the .data section to
-relocate it from ROM to RAM before executing any C code. The following linker
-symbols are defined for this purpose:
-
-* `__DATA_ROM_START__` This address must be aligned on a 16-byte boundary.
-* `__DATA_RAM_START__` This address must be aligned on a 16-byte boundary.
-* `__DATA_SIZE__`
-
-BL1's platform setup code needs to know the extents of its read-write data
-region to figure out its memory layout. The following linker symbols are defined
-for this purpose:
-
-* `__BL1_RAM_START__` This is the start address of BL1 RW data.
-* `__BL1_RAM_END__` This is the end address of BL1 RW data.
-
-#### BL2's, BL3-1's and TSP's linker symbols
-
-BL2, BL3-1 and TSP need to know the extents of their read-only section to set
-the right memory attributes for this memory region in their MMU setup code. The
-following linker symbols are defined for this purpose:
-
-* `__RO_START__`
-* `__RO_END__`
-
-### How to choose the right base addresses for each bootloader stage image
-
-There is currently no support for dynamic image loading in the Trusted Firmware.
-This means that all bootloader images need to be linked against their ultimate
-runtime locations and the base addresses of each image must be chosen carefully
-such that images don't overlap each other in an undesired way. As the code
-grows, the base addresses might need adjustments to cope with the new memory
-layout.
-
-The memory layout is completely specific to the platform and so there is no
-general recipe for choosing the right base addresses for each bootloader image.
-However, there are tools to aid in understanding the memory layout. These are
-the link map files: `build/<platform>/<build-type>/bl<x>/bl<x>.map`, with `<x>`
-being the stage bootloader. They provide a detailed view of the memory usage of
-each image. Among other useful information, they provide the end address of
-each image.
-
-* `bl1.map` link map file provides `__BL1_RAM_END__` address.
-* `bl2.map` link map file provides `__BL2_END__` address.
-* `bl31.map` link map file provides `__BL31_END__` address.
-* `bl32.map` link map file provides `__BL32_END__` address.
-
-For each bootloader image, the platform code must provide its start address
-as well as a limit address that it must not overstep. The latter is used in the
-linker scripts to check that the image doesn't grow past that address. If that
-happens, the linker will issue a message similar to the following:
-
-    aarch64-none-elf-ld: BLx has exceeded its limit.
-
-Additionally, if the platform memory layout implies some image overlaying like
-on FVP, BL3-1 and TSP need to know the limit address that their PROGBITS
-sections must not overstep. The platform code must provide those.
-
-
-####  Memory layout on ARM FVPs
-
-The following list describes the memory layout on the FVP:
-
-*   A 4KB page of shared memory is used to store the entrypoint mailboxes
-    and the parameters passed between bootloaders. The shared memory is located
-    at the base of the Trusted SRAM. The amount of Trusted SRAM available to
-    load the bootloader images will be reduced by the size of the shared memory.
-
-*   BL1 is originally sitting in the Trusted ROM at address `0x0`. Its
-    read-write data are relocated at the top of the Trusted SRAM at runtime.
-
-*   BL3-1 is loaded at the top of the Trusted SRAM, such that its NOBITS
-    sections will overwrite BL1 R/W data.
-
-*   BL2 is loaded below BL3-1.
-
-*   BL3-2 can be loaded in one of the following locations:
-
-    *   Trusted SRAM
-    *   Trusted DRAM
-    *   Secure region of DRAM (top 16MB of DRAM configured by the TrustZone
-        controller)
-
-When BL3-2 is loaded into Trusted SRAM, its NOBITS sections are allowed to
-overlay BL2. This memory layout is designed to give the BL3-2 image as much
-memory as possible when it is loaded into Trusted SRAM.
-
-The location of the BL3-2 image will result in different memory maps. This is
-illustrated in the following diagrams using the TSP as an example.
-
-**TSP in Trusted SRAM (default option):**
-
-               Trusted SRAM
-    0x04040000 +----------+  loaded by BL2  ------------------
-               | BL1 (rw) |  <<<<<<<<<<<<<  |  BL3-1 NOBITS  |
-               |----------|  <<<<<<<<<<<<<  |----------------|
-               |          |  <<<<<<<<<<<<<  | BL3-1 PROGBITS |
-               |----------|                 ------------------
-               |   BL2    |  <<<<<<<<<<<<<  |  BL3-2 NOBITS  |
-               |----------|  <<<<<<<<<<<<<  |----------------|
-               |          |  <<<<<<<<<<<<<  | BL3-2 PROGBITS |
-    0x04001000 +----------+                 ------------------
-               |  Shared  |
-    0x04000000 +----------+
-
-               Trusted ROM
-    0x04000000 +----------+
-               | BL1 (ro) |
-    0x00000000 +----------+
-
-
-**TSP in Trusted DRAM:**
-
-               Trusted DRAM
-    0x08000000 +----------+
-               |  BL3-2   |
-    0x06000000 +----------+
-
-               Trusted SRAM
-    0x04040000 +----------+  loaded by BL2  ------------------
-               | BL1 (rw) |  <<<<<<<<<<<<<  |  BL3-1 NOBITS  |
-               |----------|  <<<<<<<<<<<<<  |----------------|
-               |          |  <<<<<<<<<<<<<  | BL3-1 PROGBITS |
-               |----------|                 ------------------
-               |   BL2    |
-               |----------|
-               |          |
-    0x04001000 +----------+
-               |  Shared  |
-    0x04000000 +----------+
-
-               Trusted ROM
-    0x04000000 +----------+
-               | BL1 (ro) |
-    0x00000000 +----------+
-
-**TSP in the TZC-Secured DRAM:**
-
-                   DRAM
-    0xffffffff +----------+
-               |  BL3-2   |  (secure)
-    0xff000000 +----------+
-               |          |
-               :          :  (non-secure)
-               |          |
-    0x80000000 +----------+
-
-               Trusted SRAM
-    0x04040000 +----------+  loaded by BL2  ------------------
-               | BL1 (rw) |  <<<<<<<<<<<<<  |  BL3-1 NOBITS  |
-               |----------|  <<<<<<<<<<<<<  |----------------|
-               |          |  <<<<<<<<<<<<<  | BL3-1 PROGBITS |
-               |----------|                 ------------------
-               |   BL2    |
-               |----------|
-               |          |
-    0x04001000 +----------+
-               |  Shared  |
-    0x04000000 +----------+
-
-               Trusted ROM
-    0x04000000 +----------+
-               | BL1 (ro) |
-    0x00000000 +----------+
-
-Moving the TSP image out of the Trusted SRAM doesn't change the memory layout
-of the other boot loader images in Trusted SRAM.
-
-
-####  Memory layout on Juno ARM development platform
-
-The following list describes the memory layout on Juno:
-
-*   Trusted SRAM at 0x04000000 contains the MHU page, BL1 r/w section, BL2
-    image, BL3-1 image and, optionally, the BL3-2 image.
-
-*   The MHU 4 KB page is used as communication channel between SCP and AP. It
-    also contains the entrypoint mailboxes for the AP. Mailboxes are stored in
-    the first 128 bytes of the MHU page.
-
-*   BL1 resides in flash memory at address `0x0BEC0000`. Its read-write data
-    section is relocated to the top of the Trusted SRAM at runtime.
-
-*   BL3-1 is loaded at the top of the Trusted SRAM, such that its NOBITS
-    sections will overwrite BL1 R/W data. This implies that BL1 global variables
-    will remain valid only until execution reaches the BL3-1 entry point during
-    a cold boot.
-
-*   BL2 is loaded below BL3-1.
-
-*   BL3-0 is loaded temporarily into the BL3-1 memory region and transfered to
-    the SCP before being overwritten by BL3-1.
-
-*   The BL3-2 image is optional and can be loaded into one of these two
-    locations: Trusted SRAM (right after the MHU page) or DRAM (14 MB starting
-    at 0xFF000000 and secured by the TrustZone controller). When loaded into
-    Trusted SRAM, its NOBITS sections are allowed to overlap BL2.
-
-Depending on the location of the BL3-2 image, it will result in different memory
-maps, illustrated by the following diagrams.
-
-**BL3-2 in Trusted SRAM (default option):**
-
-                  Flash0
-    0x0C000000 +----------+
-               :          :
-    0x0BED0000 |----------|
-               | BL1 (ro) |
-    0x0BEC0000 |----------|
-               :          :
-    0x08000000 +----------+                  BL3-1 is loaded
-                                             after BL3-0 has
-               Trusted SRAM                  been sent to SCP
-    0x04040000 +----------+  loaded by BL2  ------------------
-               | BL1 (rw) |  <<<<<<<<<<<<<  |  BL3-1 NOBITS  |
-               |----------|  <<<<<<<<<<<<<  |----------------|
-               |  BL3-0   |  <<<<<<<<<<<<<  | BL3-1 PROGBITS |
-               |----------|                 ------------------
-               |   BL2    |  <<<<<<<<<<<<<  |  BL3-2 NOBITS  |
-               |----------|  <<<<<<<<<<<<<  |----------------|
-               |          |  <<<<<<<<<<<<<  | BL3-2 PROGBITS |
-    0x04001000 +----------+                 ------------------
-               |   MHU    |
-    0x04000000 +----------+
-
-
-**BL3-2 in the secure region of DRAM:**
-
-                   DRAM
-    0xFFE00000 +----------+
-               |  BL3-2   |  (secure)
-    0xFF000000 |----------|
-               |          |
-               :          :  (non-secure)
-               |          |
-    0x80000000 +----------+
-
-                  Flash0
-    0x0C000000 +----------+
-               :          :
-    0x0BED0000 |----------|
-               | BL1 (ro) |
-    0x0BEC0000 |----------|
-               :          :
-    0x08000000 +----------+                  BL3-1 is loaded
-                                             after BL3-0 has
-               Trusted SRAM                  been sent to SCP
-    0x04040000 +----------+  loaded by BL2  ------------------
-               | BL1 (rw) |  <<<<<<<<<<<<<  |  BL3-1 NOBITS  |
-               |----------|  <<<<<<<<<<<<<  |----------------|
-               |  BL3-0   |  <<<<<<<<<<<<<  | BL3-1 PROGBITS |
-               |----------|                 ------------------
-               |   BL2    |
-               |----------|
-               |          |
-    0x04001000 +----------+
-               |   MHU    |
-    0x04000000 +----------+
-
-Loading the BL3-2 image in DRAM doesn't change the memory layout of the other
-images in Trusted SRAM.
-
-
-10.  Firmware Image Package (FIP)
----------------------------------
-
-Using a Firmware Image Package (FIP) allows for packing bootloader images (and
-potentially other payloads) into a single archive that can be loaded by the ARM
-Trusted Firmware from non-volatile platform storage. A driver to load images
-from a FIP has been added to the storage layer and allows a package to be read
-from supported platform storage. A tool to create Firmware Image Packages is
-also provided and described below.
-
-### Firmware Image Package layout
-
-The FIP layout consists of a table of contents (ToC) followed by payload data.
-The ToC itself has a header followed by one or more table entries. The ToC is
-terminated by an end marker entry. All ToC entries describe some payload data
-that has been appended to the end of the binary package. With the information
-provided in the ToC entry the corresponding payload data can be retrieved.
-
-    ------------------
-    | ToC Header     |
-    |----------------|
-    | ToC Entry 0    |
-    |----------------|
-    | ToC Entry 1    |
-    |----------------|
-    | ToC End Marker |
-    |----------------|
-    |                |
-    |     Data 0     |
-    |                |
-    |----------------|
-    |                |
-    |     Data 1     |
-    |                |
-    ------------------
-
-The ToC header and entry formats are described in the header file
-`include/firmware_image_package.h`. This file is used by both the tool and the
-ARM Trusted firmware.
-
-The ToC header has the following fields:
-    `name`: The name of the ToC. This is currently used to validate the header.
-    `serial_number`: A non-zero number provided by the creation tool
-    `flags`: Flags associated with this data. None are yet defined.
-
-A ToC entry has the following fields:
-    `uuid`: All files are referred to by a pre-defined Universally Unique
-        IDentifier [UUID] . The UUIDs are defined in
-        `include/firmware_image_package`. The platform translates the requested
-        image name into the corresponding UUID when accessing the package.
-    `offset_address`: The offset address at which the corresponding payload data
-        can be found. The offset is calculated from the ToC base address.
-    `size`: The size of the corresponding payload data in bytes.
-    `flags`: Flags associated with this entry. Non are yet defined.
-
-### Firmware Image Package creation tool
-
-The FIP creation tool can be used to pack specified images into a binary package
-that can be loaded by the ARM Trusted Firmware from platform storage. The tool
-currently only supports packing bootloader images. Additional image definitions
-can be added to the tool as required.
-
-The tool can be found in `tools/fip_create`.
-
-### Loading from a Firmware Image Package (FIP)
-
-The Firmware Image Package (FIP) driver can load images from a binary package on
-non-volatile platform storage. For the FVPs this is currently NOR FLASH.
-
-Bootloader images are loaded according to the platform policy as specified in
-`plat/<platform>/plat_io_storage.c`. For the FVPs this means the platform will
-attempt to load images from a Firmware Image Package located at the start of NOR
-FLASH0.
-
-Currently the FVP's policy only allows loading of a known set of images. The
-platform policy can be modified to allow additional images.
-
-
-11. Use of coherent memory in Trusted Firmware
-----------------------------------------------
-
-There might be loss of coherency when physical memory with mismatched
-shareability, cacheability and memory attributes is accessed by multiple CPUs
-(refer to section B2.9 of [ARM ARM] for more details). This possibility occurs
-in Trusted Firmware during power up/down sequences when coherency, MMU and
-caches are turned on/off incrementally.
-
-Trusted Firmware defines coherent memory as a region of memory with Device
-nGnRE attributes in the translation tables. The translation granule size in
-Trusted Firmware is 4KB. This is the smallest possible size of the coherent
-memory region.
-
-By default, all data structures which are susceptible to accesses with
-mismatched attributes from various CPUs are allocated in a coherent memory
-region (refer to section 2.1 of [Porting Guide]). The coherent memory region
-accesses are Outer Shareable, non-cacheable and they can be accessed
-with the Device nGnRE attributes when the MMU is turned on. Hence, at the
-expense of at least an extra page of memory, Trusted Firmware is able to work
-around coherency issues due to mismatched memory attributes.
-
-The alternative to the above approach is to allocate the susceptible data
-structures in Normal WriteBack WriteAllocate Inner shareable memory. This
-approach requires the data structures to be designed so that it is possible to
-work around the issue of mismatched memory attributes by performing software
-cache maintenance on them.
-
-### Disabling the use of coherent memory in Trusted Firmware
-
-It might be desirable to avoid the cost of allocating coherent memory on
-platforms which are memory constrained. Trusted Firmware enables inclusion of
-coherent memory in firmware images through the build flag `USE_COHERENT_MEM`.
-This flag is enabled by default. It can be disabled to choose the second
-approach described above.
-
-The below sections analyze the data structures allocated in the coherent memory
-region and the changes required to allocate them in normal memory.
-
-### PSCI Affinity map nodes
-
-The `psci_aff_map` data structure stores the hierarchial node information for
-each affinity level in the system including the PSCI states associated with them.
-By default, this data structure is allocated in the coherent memory region in
-the Trusted Firmware because it can be accessed by multiple CPUs, either with
-their caches enabled or disabled.
-
-	typedef struct aff_map_node {
-		unsigned long mpidr;
-		unsigned char ref_count;
-		unsigned char state;
-		unsigned char level;
-	#if USE_COHERENT_MEM
-		bakery_lock_t lock;
-	#else
-		unsigned char aff_map_index;
-	#endif
-	} aff_map_node_t;
-
-In order to move this data structure to normal memory, the use of each of its
-fields must be analyzed. Fields like `mpidr` and `level` are only written once
-during cold boot. Hence removing them from coherent memory involves only doing
-a clean and invalidate of the cache lines after these fields are written.
-
-The fields `state` and `ref_count` can be concurrently accessed by multiple
-CPUs in different cache states. A Lamport's Bakery lock is used to ensure mutual
-exlusion to these fields. As a result, it is possible to move these fields out
-of coherent memory by performing software cache maintenance on them. The field
-`lock` is the bakery lock data structure when `USE_COHERENT_MEM` is enabled.
-The `aff_map_index` is used to identify the bakery lock when `USE_COHERENT_MEM`
-is disabled.
-
-### Bakery lock data
-
-The bakery lock data structure `bakery_lock_t` is allocated in coherent memory
-and is accessed by multiple CPUs with mismatched attributes. `bakery_lock_t` is
-defined as follows:
-
-    typedef struct bakery_lock {
-        int owner;
-        volatile char entering[BAKERY_LOCK_MAX_CPUS];
-        volatile unsigned number[BAKERY_LOCK_MAX_CPUS];
-    } bakery_lock_t;
-
-It is a characteristic of Lamport's Bakery algorithm that the volatile per-CPU
-fields can be read by all CPUs but only written to by the owning CPU.
-
-Depending upon the data cache line size, the per-CPU fields of the
-`bakery_lock_t` structure for multiple CPUs may exist on a single cache line.
-These per-CPU fields can be read and written during lock contention by multiple
-CPUs with mismatched memory attributes. Since these fields are a part of the
-lock implementation, they do not have access to any other locking primitive to
-safeguard against the resulting coherency issues. As a result, simple software
-cache maintenance is not enough to allocate them in coherent memory. Consider
-the following example.
-
-CPU0 updates its per-CPU field with data cache enabled. This write updates a
-local cache line which contains a copy of the fields for other CPUs as well. Now
-CPU1 updates its per-CPU field of the `bakery_lock_t` structure with data cache
-disabled. CPU1 then issues a DCIVAC operation to invalidate any stale copies of
-its field in any other cache line in the system. This operation will invalidate
-the update made by CPU0 as well.
-
-To use bakery locks when `USE_COHERENT_MEM` is disabled, the lock data structure
-has been redesigned. The changes utilise the characteristic of Lamport's Bakery
-algorithm mentioned earlier. The per-CPU fields of the new lock structure are
-aligned such that they are allocated on separate cache lines. The per-CPU data
-framework in Trusted Firmware is used to achieve this. This enables software to
-perform software cache maintenance on the lock data structure without running
-into coherency issues associated with mismatched attributes.
-
-The per-CPU data framework enables consolidation of data structures on the
-fewest cache lines possible. This saves memory as compared to the scenario where
-each data structure is separately aligned to the cache line boundary to achieve
-the same effect.
-
-The bakery lock data structure `bakery_info_t` is defined for use when
-`USE_COHERENT_MEM` is disabled as follows:
-
-    typedef struct bakery_info {
-        /*
-         * The lock_data is a bit-field of 2 members:
-         * Bit[0]       : choosing. This field is set when the CPU is
-         *                choosing its bakery number.
-         * Bits[1 - 15] : number. This is the bakery number allocated.
-         */
-         volatile uint16_t lock_data;
-    } bakery_info_t;
-
-The `bakery_info_t` represents a single per-CPU field of one lock and
-the combination of corresponding `bakery_info_t` structures for all CPUs in the
-system represents the complete bakery lock. It is embedded in the per-CPU
-data framework `cpu_data` as shown below:
-
-      CPU0 cpu_data
-    ------------------
-    | ....           |
-    |----------------|
-    | `bakery_info_t`| <-- Lock_0 per-CPU field
-    |    Lock_0      |     for CPU0
-    |----------------|
-    | `bakery_info_t`| <-- Lock_1 per-CPU field
-    |    Lock_1      |     for CPU0
-    |----------------|
-    | ....           |
-    |----------------|
-    | `bakery_info_t`| <-- Lock_N per-CPU field
-    |    Lock_N      |     for CPU0
-    ------------------
-
-
-      CPU1 cpu_data
-    ------------------
-    | ....           |
-    |----------------|
-    | `bakery_info_t`| <-- Lock_0 per-CPU field
-    |    Lock_0      |     for CPU1
-    |----------------|
-    | `bakery_info_t`| <-- Lock_1 per-CPU field
-    |    Lock_1      |     for CPU1
-    |----------------|
-    | ....           |
-    |----------------|
-    | `bakery_info_t`| <-- Lock_N per-CPU field
-    |    Lock_N      |     for CPU1
-    ------------------
-
-Consider a system of 2 CPUs with 'N' bakery locks as shown above.  For an
-operation on Lock_N, the corresponding `bakery_info_t` in both CPU0 and CPU1
-`cpu_data` need to be fetched and appropriate cache operations need to be
-performed for each access.
-
-For multiple bakery locks, an array of `bakery_info_t` is declared in `cpu_data`
-and each lock is given an `id` to identify it in the array.
-
-### Non Functional Impact of removing coherent memory
-
-Removal of the coherent memory region leads to the additional software overhead
-of performing cache maintenance for the affected data structures. However, since
-the memory where the data structures are allocated is cacheable, the overhead is
-mostly mitigated by an increase in performance.
-
-There is however a performance impact for bakery locks, due to:
-*   Additional cache maintenance operations, and
-*   Multiple cache line reads for each lock operation, since the bakery locks
-    for each CPU are distributed across different cache lines.
-
-The implementation has been optimized to mimimize this additional overhead.
-Measurements indicate that when bakery locks are allocated in Normal memory, the
-minimum latency of acquiring a lock is on an average 3-4 micro seconds whereas
-in Device memory the same is 2 micro seconds. The measurements were done on the
-Juno ARM development platform.
-
-As mentioned earlier, almost a page of memory can be saved by disabling
-`USE_COHERENT_MEM`. Each platform needs to consider these trade-offs to decide
-whether coherent memory should be used. If a platform disables
-`USE_COHERENT_MEM` and needs to use bakery locks in the porting layer, it should
-reserve memory in `cpu_data` by defining the macro `PLAT_PCPU_DATA_SIZE` (see
-the [Porting Guide]). Refer to the reference platform code for examples.
-
-
-12.  Code Structure
--------------------
-
-Trusted Firmware code is logically divided between the three boot loader
-stages mentioned in the previous sections. The code is also divided into the
-following categories (present as directories in the source code):
-
-*   **Architecture specific.** This could be AArch32 or AArch64.
-*   **Platform specific.** Choice of architecture specific code depends upon
-    the platform.
-*   **Common code.** This is platform and architecture agnostic code.
-*   **Library code.** This code comprises of functionality commonly used by all
-    other code.
-*   **Stage specific.** Code specific to a boot stage.
-*   **Drivers.**
-*   **Services.** EL3 runtime services, e.g. PSCI or SPD. Specific SPD services
-    reside in the `services/spd` directory (e.g. `services/spd/tspd`).
-
-Each boot loader stage uses code from one or more of the above mentioned
-categories. Based upon the above, the code layout looks like this:
-
-    Directory    Used by BL1?    Used by BL2?    Used by BL3-1?
-    bl1          Yes             No              No
-    bl2          No              Yes             No
-    bl31         No              No              Yes
-    arch         Yes             Yes             Yes
-    plat         Yes             Yes             Yes
-    drivers      Yes             No              Yes
-    common       Yes             Yes             Yes
-    lib          Yes             Yes             Yes
-    services     No              No              Yes
-
-The build system provides a non configurable build option IMAGE_BLx for each
-boot loader stage (where x = BL stage). e.g. for BL1 , IMAGE_BL1 will be
-defined by the build system. This enables the Trusted Firmware to compile
-certain code only for specific boot loader stages
-
-All assembler files have the `.S` extension. The linker source files for each
-boot stage have the extension `.ld.S`. These are processed by GCC to create the
-linker scripts which have the extension `.ld`.
-
-FDTs provide a description of the hardware platform and are used by the Linux
-kernel at boot time. These can be found in the `fdts` directory.
-
-
-13.  References
----------------
-
-1.  Trusted Board Boot Requirements CLIENT PDD (ARM DEN 0006B-5). Available
-    under NDA through your ARM account representative.
-
-2.  [Power State Coordination Interface PDD (ARM DEN 0022B.b)][PSCI].
-
-3.  [SMC Calling Convention PDD (ARM DEN 0028A)][SMCCC].
-
-4.  [ARM Trusted Firmware Interrupt Management Design guide][INTRG].
-
-- - - - - - - - - - - - - - - - - - - - - - - - - -
-
-_Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved._
-
-[ARM ARM]:          http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0487a.e/index.html "ARMv8-A Reference Manual (ARM DDI0487A.E)"
-[PSCI]:             http://infocenter.arm.com/help/topic/com.arm.doc.den0022c/DEN0022C_Power_State_Coordination_Interface.pdf "Power State Coordination Interface PDD (ARM DEN 0022C)"
-[SMCCC]:            http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html "SMC Calling Convention PDD (ARM DEN 0028A)"
-[UUID]:             https://tools.ietf.org/rfc/rfc4122.txt "A Universally Unique IDentifier (UUID) URN Namespace"
-[User Guide]:       ./user-guide.md
-[Porting Guide]:    ./porting-guide.md
-[INTRG]:            ./interrupt-framework-design.md
-[CPUBM]:            ./cpu-specific-build-macros.md.md
diff --git a/docs/firmware-design.rst b/docs/firmware-design.rst
new file mode 100644
index 0000000..7cc1970
--- /dev/null
+++ b/docs/firmware-design.rst
@@ -0,0 +1,2599 @@
+ARM Trusted Firmware Design
+===========================
+
+
+.. section-numbering::
+    :suffix: .
+
+.. contents::
+
+The ARM Trusted Firmware implements a subset of the Trusted Board Boot
+Requirements (TBBR) Platform Design Document (PDD) [1]_ for ARM reference
+platforms. The TBB sequence starts when the platform is powered on and runs up
+to the stage where it hands-off control to firmware running in the normal
+world in DRAM. This is the cold boot path.
+
+The ARM Trusted Firmware also implements the Power State Coordination Interface
+PDD [2]_ as a runtime service. PSCI is the interface from normal world software
+to firmware implementing power management use-cases (for example, secondary CPU
+boot, hotplug and idle). Normal world software can access ARM Trusted Firmware
+runtime services via the ARM SMC (Secure Monitor Call) instruction. The SMC
+instruction must be used as mandated by the SMC Calling Convention [3]_.
+
+The ARM Trusted Firmware implements a framework for configuring and managing
+interrupts generated in either security state. The details of the interrupt
+management framework and its design can be found in ARM Trusted Firmware
+Interrupt Management Design guide [4]_.
+
+The ARM Trusted Firmware also implements a library for setting up and managing
+the translation tables. The details of this library can be found in
+`Xlat_tables design`_.
+
+The ARM Trusted Firmware can be built to support either AArch64 or AArch32
+execution state.
+
+Cold boot
+---------
+
+The cold boot path starts when the platform is physically turned on. If
+``COLD_BOOT_SINGLE_CPU=0``, one of the CPUs released from reset is chosen as the
+primary CPU, and the remaining CPUs are considered secondary CPUs. The primary
+CPU is chosen through platform-specific means. The cold boot path is mainly
+executed by the primary CPU, other than essential CPU initialization executed by
+all CPUs. The secondary CPUs are kept in a safe platform-specific state until
+the primary CPU has performed enough initialization to boot them.
+
+Refer to the `Reset Design`_ for more information on the effect of the
+``COLD_BOOT_SINGLE_CPU`` platform build option.
+
+The cold boot path in this implementation of the ARM Trusted Firmware,
+depends on the execution state.
+For AArch64, it is divided into five steps (in order of execution):
+
+-  Boot Loader stage 1 (BL1) *AP Trusted ROM*
+-  Boot Loader stage 2 (BL2) *Trusted Boot Firmware*
+-  Boot Loader stage 3-1 (BL31) *EL3 Runtime Software*
+-  Boot Loader stage 3-2 (BL32) *Secure-EL1 Payload* (optional)
+-  Boot Loader stage 3-3 (BL33) *Non-trusted Firmware*
+
+For AArch32, it is divided into four steps (in order of execution):
+
+-  Boot Loader stage 1 (BL1) *AP Trusted ROM*
+-  Boot Loader stage 2 (BL2) *Trusted Boot Firmware*
+-  Boot Loader stage 3-2 (BL32) *EL3 Runtime Software*
+-  Boot Loader stage 3-3 (BL33) *Non-trusted Firmware*
+
+ARM development platforms (Fixed Virtual Platforms (FVPs) and Juno) implement a
+combination of the following types of memory regions. Each bootloader stage uses
+one or more of these memory regions.
+
+-  Regions accessible from both non-secure and secure states. For example,
+   non-trusted SRAM, ROM and DRAM.
+-  Regions accessible from only the secure state. For example, trusted SRAM and
+   ROM. The FVPs also implement the trusted DRAM which is statically
+   configured. Additionally, the Base FVPs and Juno development platform
+   configure the TrustZone Controller (TZC) to create a region in the DRAM
+   which is accessible only from the secure state.
+
+The sections below provide the following details:
+
+-  initialization and execution of the first three stages during cold boot
+-  specification of the EL3 Runtime Software (BL31 for AArch64 and BL32 for
+   AArch32) entrypoint requirements for use by alternative Trusted Boot
+   Firmware in place of the provided BL1 and BL2
+
+BL1
+~~~
+
+This stage begins execution from the platform's reset vector at EL3. The reset
+address is platform dependent but it is usually located in a Trusted ROM area.
+The BL1 data section is copied to trusted SRAM at runtime.
+
+On the ARM development platforms, BL1 code starts execution from the reset
+vector defined by the constant ``BL1_RO_BASE``. The BL1 data section is copied
+to the top of trusted SRAM as defined by the constant ``BL1_RW_BASE``.
+
+The functionality implemented by this stage is as follows.
+
+Determination of boot path
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Whenever a CPU is released from reset, BL1 needs to distinguish between a warm
+boot and a cold boot. This is done using platform-specific mechanisms (see the
+``plat_get_my_entrypoint()`` function in the `Porting Guide`_). In the case of a
+warm boot, a CPU is expected to continue execution from a separate
+entrypoint. In the case of a cold boot, the secondary CPUs are placed in a safe
+platform-specific state (see the ``plat_secondary_cold_boot_setup()`` function in
+the `Porting Guide`_) while the primary CPU executes the remaining cold boot path
+as described in the following sections.
+
+This step only applies when ``PROGRAMMABLE_RESET_ADDRESS=0``. Refer to the
+`Reset Design`_ for more information on the effect of the
+``PROGRAMMABLE_RESET_ADDRESS`` platform build option.
+
+Architectural initialization
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+BL1 performs minimal architectural initialization as follows.
+
+-  Exception vectors
+
+   BL1 sets up simple exception vectors for both synchronous and asynchronous
+   exceptions. The default behavior upon receiving an exception is to populate
+   a status code in the general purpose register ``X0/R0`` and call the
+   ``plat_report_exception()`` function (see the `Porting Guide`_). The status
+   code is one of:
+
+   For AArch64:
+
+   ::
+
+       0x0 : Synchronous exception from Current EL with SP_EL0
+       0x1 : IRQ exception from Current EL with SP_EL0
+       0x2 : FIQ exception from Current EL with SP_EL0
+       0x3 : System Error exception from Current EL with SP_EL0
+       0x4 : Synchronous exception from Current EL with SP_ELx
+       0x5 : IRQ exception from Current EL with SP_ELx
+       0x6 : FIQ exception from Current EL with SP_ELx
+       0x7 : System Error exception from Current EL with SP_ELx
+       0x8 : Synchronous exception from Lower EL using aarch64
+       0x9 : IRQ exception from Lower EL using aarch64
+       0xa : FIQ exception from Lower EL using aarch64
+       0xb : System Error exception from Lower EL using aarch64
+       0xc : Synchronous exception from Lower EL using aarch32
+       0xd : IRQ exception from Lower EL using aarch32
+       0xe : FIQ exception from Lower EL using aarch32
+       0xf : System Error exception from Lower EL using aarch32
+
+   For AArch32:
+
+   ::
+
+       0x10 : User mode
+       0x11 : FIQ mode
+       0x12 : IRQ mode
+       0x13 : SVC mode
+       0x16 : Monitor mode
+       0x17 : Abort mode
+       0x1a : Hypervisor mode
+       0x1b : Undefined mode
+       0x1f : System mode
+
+   The ``plat_report_exception()`` implementation on the ARM FVP port programs
+   the Versatile Express System LED register in the following format to
+   indicate the occurence of an unexpected exception:
+
+   ::
+
+       SYS_LED[0]   - Security state (Secure=0/Non-Secure=1)
+       SYS_LED[2:1] - Exception Level (EL3=0x3, EL2=0x2, EL1=0x1, EL0=0x0)
+                      For AArch32 it is always 0x0
+       SYS_LED[7:3] - Exception Class (Sync/Async & origin). This is the value
+                      of the status code
+
+   A write to the LED register reflects in the System LEDs (S6LED0..7) in the
+   CLCD window of the FVP.
+
+   BL1 does not expect to receive any exceptions other than the SMC exception.
+   For the latter, BL1 installs a simple stub. The stub expects to receive a
+   limited set of SMC types (determined by their function IDs in the general
+   purpose register ``X0/R0``):
+
+   -  ``BL1_SMC_RUN_IMAGE``: This SMC is raised by BL2 to make BL1 pass control
+      to EL3 Runtime Software.
+   -  All SMCs listed in section "BL1 SMC Interface" in the `Firmware Update`_
+      Design Guide are supported for AArch64 only. These SMCs are currently
+      not supported when BL1 is built for AArch32.
+
+   Any other SMC leads to an assertion failure.
+
+-  CPU initialization
+
+   BL1 calls the ``reset_handler()`` function which in turn calls the CPU
+   specific reset handler function (see the section: "CPU specific operations
+   framework").
+
+-  Control register setup (for AArch64)
+
+   -  ``SCTLR_EL3``. Instruction cache is enabled by setting the ``SCTLR_EL3.I``
+      bit. Alignment and stack alignment checking is enabled by setting the
+      ``SCTLR_EL3.A`` and ``SCTLR_EL3.SA`` bits. Exception endianness is set to
+      little-endian by clearing the ``SCTLR_EL3.EE`` bit.
+
+   -  ``SCR_EL3``. The register width of the next lower exception level is set
+      to AArch64 by setting the ``SCR.RW`` bit. The ``SCR.EA`` bit is set to trap
+      both External Aborts and SError Interrupts in EL3. The ``SCR.SIF`` bit is
+      also set to disable instruction fetches from Non-secure memory when in
+      secure state.
+
+   -  ``CPTR_EL3``. Accesses to the ``CPACR_EL1`` register from EL1 or EL2, or the
+      ``CPTR_EL2`` register from EL2 are configured to not trap to EL3 by
+      clearing the ``CPTR_EL3.TCPAC`` bit. Access to the trace functionality is
+      configured not to trap to EL3 by clearing the ``CPTR_EL3.TTA`` bit.
+      Instructions that access the registers associated with Floating Point
+      and Advanced SIMD execution are configured to not trap to EL3 by
+      clearing the ``CPTR_EL3.TFP`` bit.
+
+   -  ``DAIF``. The SError interrupt is enabled by clearing the SError interrupt
+      mask bit.
+
+   -  ``MDCR_EL3``. The trap controls, ``MDCR_EL3.TDOSA``, ``MDCR_EL3.TDA`` and
+      ``MDCR_EL3.TPM``, are set so that accesses to the registers they control
+      do not trap to EL3. AArch64 Secure self-hosted debug is disabled by
+      setting the ``MDCR_EL3.SDD`` bit. Also ``MDCR_EL3.SPD32`` is set to
+      disable AArch32 Secure self-hosted privileged debug from S-EL1.
+
+-  Control register setup (for AArch32)
+
+   -  ``SCTLR``. Instruction cache is enabled by setting the ``SCTLR.I`` bit.
+      Alignment checking is enabled by setting the ``SCTLR.A`` bit.
+      Exception endianness is set to little-endian by clearing the
+      ``SCTLR.EE`` bit.
+
+   -  ``SCR``. The ``SCR.SIF`` bit is set to disable instruction fetches from
+      Non-secure memory when in secure state.
+
+   -  ``CPACR``. Allow execution of Advanced SIMD instructions at PL0 and PL1,
+      by clearing the ``CPACR.ASEDIS`` bit. Access to the trace functionality
+      is configured not to trap to undefined mode by clearing the
+      ``CPACR.TRCDIS`` bit.
+
+   -  ``NSACR``. Enable non-secure access to Advanced SIMD functionality and
+      system register access to implemented trace registers.
+
+   -  ``FPEXC``. Enable access to the Advanced SIMD and floating-point
+      functionality from all Exception levels.
+
+   -  ``CPSR.A``. The Asynchronous data abort interrupt is enabled by clearing
+      the Asynchronous data abort interrupt mask bit.
+
+   -  ``SDCR``. The ``SDCR.SPD`` field is set to disable AArch32 Secure
+      self-hosted privileged debug.
+
+Platform initialization
+^^^^^^^^^^^^^^^^^^^^^^^
+
+On ARM platforms, BL1 performs the following platform initializations:
+
+-  Enable the Trusted Watchdog.
+-  Initialize the console.
+-  Configure the Interconnect to enable hardware coherency.
+-  Enable the MMU and map the memory it needs to access.
+-  Configure any required platform storage to load the next bootloader image
+   (BL2).
+
+Firmware Update detection and execution
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+After performing platform setup, BL1 common code calls
+``bl1_plat_get_next_image_id()`` to determine if `Firmware Update`_ is required or
+to proceed with the normal boot process. If the platform code returns
+``BL2_IMAGE_ID`` then the normal boot sequence is executed as described in the
+next section, else BL1 assumes that `Firmware Update`_ is required and execution
+passes to the first image in the `Firmware Update`_ process. In either case, BL1
+retrieves a descriptor of the next image by calling ``bl1_plat_get_image_desc()``.
+The image descriptor contains an ``entry_point_info_t`` structure, which BL1
+uses to initialize the execution state of the next image.
+
+BL2 image load and execution
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In the normal boot flow, BL1 execution continues as follows:
+
+#. BL1 prints the following string from the primary CPU to indicate successful
+   execution of the BL1 stage:
+
+   ::
+
+       "Booting Trusted Firmware"
+
+#. BL1 determines the amount of free trusted SRAM memory available by
+   calculating the extent of its own data section, which also resides in
+   trusted SRAM. BL1 loads a BL2 raw binary image from platform storage, at a
+   platform-specific base address. If the BL2 image file is not present or if
+   there is not enough free trusted SRAM the following error message is
+   printed:
+
+   ::
+
+       "Failed to load BL2 firmware."
+
+   BL1 calculates the amount of Trusted SRAM that can be used by the BL2
+   image. The exact load location of the image is provided as a base address
+   in the platform header. Further description of the memory layout can be
+   found later in this document.
+
+#. BL1 passes control to the BL2 image at Secure EL1 (for AArch64) or at
+   Secure SVC mode (for AArch32), starting from its load address.
+
+#. BL1 also passes information about the amount of trusted SRAM used and
+   available for use. This information is populated at a platform-specific
+   memory address.
+
+BL2
+~~~
+
+BL1 loads and passes control to BL2 at Secure-EL1 (for AArch64) or at Secure
+SVC mode (for AArch32) . BL2 is linked against and loaded at a platform-specific
+base address (more information can be found later in this document).
+The functionality implemented by BL2 is as follows.
+
+Architectural initialization
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+For AArch64, BL2 performs the minimal architectural initialization required
+for subsequent stages of the ARM Trusted Firmware and normal world software.
+EL1 and EL0 are given access to Floating Point and Advanced SIMD registers
+by clearing the ``CPACR.FPEN`` bits.
+
+For AArch32, the minimal architectural initialization required for subsequent
+stages of the ARM Trusted Firmware and normal world software is taken care of
+in BL1 as both BL1 and BL2 execute at PL1.
+
+Platform initialization
+^^^^^^^^^^^^^^^^^^^^^^^
+
+On ARM platforms, BL2 performs the following platform initializations:
+
+-  Initialize the console.
+-  Configure any required platform storage to allow loading further bootloader
+   images.
+-  Enable the MMU and map the memory it needs to access.
+-  Perform platform security setup to allow access to controlled components.
+-  Reserve some memory for passing information to the next bootloader image
+   EL3 Runtime Software and populate it.
+-  Define the extents of memory available for loading each subsequent
+   bootloader image.
+
+Image loading in BL2
+^^^^^^^^^^^^^^^^^^^^
+
+Image loading scheme in BL2 depends on ``LOAD_IMAGE_V2`` build option. If the
+flag is disabled, the BLxx images are loaded, by calling the respective
+load\_blxx() function from BL2 generic code. If the flag is enabled, the BL2
+generic code loads the images based on the list of loadable images provided
+by the platform. BL2 passes the list of executable images provided by the
+platform to the next handover BL image. By default, this flag is disabled for
+AArch64 and the AArch32 build is supported only if this flag is enabled.
+
+SCP\_BL2 (System Control Processor Firmware) image load
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Some systems have a separate System Control Processor (SCP) for power, clock,
+reset and system control. BL2 loads the optional SCP\_BL2 image from platform
+storage into a platform-specific region of secure memory. The subsequent
+handling of SCP\_BL2 is platform specific. For example, on the Juno ARM
+development platform port the image is transferred into SCP's internal memory
+using the Boot Over MHU (BOM) protocol after being loaded in the trusted SRAM
+memory. The SCP executes SCP\_BL2 and signals to the Application Processor (AP)
+for BL2 execution to continue.
+
+EL3 Runtime Software image load
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+BL2 loads the EL3 Runtime Software image from platform storage into a platform-
+specific address in trusted SRAM. If there is not enough memory to load the
+image or image is missing it leads to an assertion failure. If ``LOAD_IMAGE_V2``
+is disabled and if image loads successfully, BL2 updates the amount of trusted
+SRAM used and available for use by EL3 Runtime Software. This information is
+populated at a platform-specific memory address.
+
+AArch64 BL32 (Secure-EL1 Payload) image load
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+BL2 loads the optional BL32 image from platform storage into a platform-
+specific region of secure memory. The image executes in the secure world. BL2
+relies on BL31 to pass control to the BL32 image, if present. Hence, BL2
+populates a platform-specific area of memory with the entrypoint/load-address
+of the BL32 image. The value of the Saved Processor Status Register (``SPSR``)
+for entry into BL32 is not determined by BL2, it is initialized by the
+Secure-EL1 Payload Dispatcher (see later) within BL31, which is responsible for
+managing interaction with BL32. This information is passed to BL31.
+
+BL33 (Non-trusted Firmware) image load
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+BL2 loads the BL33 image (e.g. UEFI or other test or boot software) from
+platform storage into non-secure memory as defined by the platform.
+
+BL2 relies on EL3 Runtime Software to pass control to BL33 once secure state
+initialization is complete. Hence, BL2 populates a platform-specific area of
+memory with the entrypoint and Saved Program Status Register (``SPSR``) of the
+normal world software image. The entrypoint is the load address of the BL33
+image. The ``SPSR`` is determined as specified in Section 5.13 of the
+`PSCI PDD`_. This information is passed to the EL3 Runtime Software.
+
+AArch64 BL31 (EL3 Runtime Software) execution
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+BL2 execution continues as follows:
+
+#. BL2 passes control back to BL1 by raising an SMC, providing BL1 with the
+   BL31 entrypoint. The exception is handled by the SMC exception handler
+   installed by BL1.
+
+#. BL1 turns off the MMU and flushes the caches. It clears the
+   ``SCTLR_EL3.M/I/C`` bits, flushes the data cache to the point of coherency
+   and invalidates the TLBs.
+
+#. BL1 passes control to BL31 at the specified entrypoint at EL3.
+
+AArch64 BL31
+~~~~~~~~~~~~
+
+The image for this stage is loaded by BL2 and BL1 passes control to BL31 at
+EL3. BL31 executes solely in trusted SRAM. BL31 is linked against and
+loaded at a platform-specific base address (more information can be found later
+in this document). The functionality implemented by BL31 is as follows.
+
+Architectural initialization
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Currently, BL31 performs a similar architectural initialization to BL1 as
+far as system register settings are concerned. Since BL1 code resides in ROM,
+architectural initialization in BL31 allows override of any previous
+initialization done by BL1.
+
+BL31 initializes the per-CPU data framework, which provides a cache of
+frequently accessed per-CPU data optimised for fast, concurrent manipulation
+on different CPUs. This buffer includes pointers to per-CPU contexts, crash
+buffer, CPU reset and power down operations, PSCI data, platform data and so on.
+
+It then replaces the exception vectors populated by BL1 with its own. BL31
+exception vectors implement more elaborate support for handling SMCs since this
+is the only mechanism to access the runtime services implemented by BL31 (PSCI
+for example). BL31 checks each SMC for validity as specified by the
+`SMC calling convention PDD`_ before passing control to the required SMC
+handler routine.
+
+BL31 programs the ``CNTFRQ_EL0`` register with the clock frequency of the system
+counter, which is provided by the platform.
+
+Platform initialization
+^^^^^^^^^^^^^^^^^^^^^^^
+
+BL31 performs detailed platform initialization, which enables normal world
+software to function correctly.
+
+On ARM platforms, this consists of the following:
+
+-  Initialize the console.
+-  Configure the Interconnect to enable hardware coherency.
+-  Enable the MMU and map the memory it needs to access.
+-  Initialize the generic interrupt controller.
+-  Initialize the power controller device.
+-  Detect the system topology.
+
+Runtime services initialization
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+BL31 is responsible for initializing the runtime services. One of them is PSCI.
+
+As part of the PSCI initializations, BL31 detects the system topology. It also
+initializes the data structures that implement the state machine used to track
+the state of power domain nodes. The state can be one of ``OFF``, ``RUN`` or
+``RETENTION``. All secondary CPUs are initially in the ``OFF`` state. The cluster
+that the primary CPU belongs to is ``ON``; any other cluster is ``OFF``. It also
+initializes the locks that protect them. BL31 accesses the state of a CPU or
+cluster immediately after reset and before the data cache is enabled in the
+warm boot path. It is not currently possible to use 'exclusive' based spinlocks,
+therefore BL31 uses locks based on Lamport's Bakery algorithm instead.
+
+The runtime service framework and its initialization is described in more
+detail in the "EL3 runtime services framework" section below.
+
+Details about the status of the PSCI implementation are provided in the
+"Power State Coordination Interface" section below.
+
+AArch64 BL32 (Secure-EL1 Payload) image initialization
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If a BL32 image is present then there must be a matching Secure-EL1 Payload
+Dispatcher (SPD) service (see later for details). During initialization
+that service must register a function to carry out initialization of BL32
+once the runtime services are fully initialized. BL31 invokes such a
+registered function to initialize BL32 before running BL33. This initialization
+is not necessary for AArch32 SPs.
+
+Details on BL32 initialization and the SPD's role are described in the
+"Secure-EL1 Payloads and Dispatchers" section below.
+
+BL33 (Non-trusted Firmware) execution
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+EL3 Runtime Software initializes the EL2 or EL1 processor context for normal-
+world cold boot, ensuring that no secure state information finds its way into
+the non-secure execution state. EL3 Runtime Software uses the entrypoint
+information provided by BL2 to jump to the Non-trusted firmware image (BL33)
+at the highest available Exception Level (EL2 if available, otherwise EL1).
+
+Using alternative Trusted Boot Firmware in place of BL1 & BL2 (AArch64 only)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Some platforms have existing implementations of Trusted Boot Firmware that
+would like to use ARM Trusted Firmware BL31 for the EL3 Runtime Software. To
+enable this firmware architecture it is important to provide a fully documented
+and stable interface between the Trusted Boot Firmware and BL31.
+
+Future changes to the BL31 interface will be done in a backwards compatible
+way, and this enables these firmware components to be independently enhanced/
+updated to develop and exploit new functionality.
+
+Required CPU state when calling ``bl31_entrypoint()`` during cold boot
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This function must only be called by the primary CPU.
+
+On entry to this function the calling primary CPU must be executing in AArch64
+EL3, little-endian data access, and all interrupt sources masked:
+
+::
+
+    PSTATE.EL = 3
+    PSTATE.RW = 1
+    PSTATE.DAIF = 0xf
+    SCTLR_EL3.EE = 0
+
+X0 and X1 can be used to pass information from the Trusted Boot Firmware to the
+platform code in BL31:
+
+::
+
+    X0 : Reserved for common Trusted Firmware information
+    X1 : Platform specific information
+
+BL31 zero-init sections (e.g. ``.bss``) should not contain valid data on entry,
+these will be zero filled prior to invoking platform setup code.
+
+Use of the X0 and X1 parameters
+'''''''''''''''''''''''''''''''
+
+The parameters are platform specific and passed from ``bl31_entrypoint()`` to
+``bl31_early_platform_setup()``. The value of these parameters is never directly
+used by the common BL31 code.
+
+The convention is that ``X0`` conveys information regarding the BL31, BL32 and
+BL33 images from the Trusted Boot firmware and ``X1`` can be used for other
+platform specific purpose. This convention allows platforms which use ARM
+Trusted Firmware's BL1 and BL2 images to transfer additional platform specific
+information from Secure Boot without conflicting with future evolution of the
+Trusted Firmware using ``X0`` to pass a ``bl31_params`` structure.
+
+BL31 common and SPD initialization code depends on image and entrypoint
+information about BL33 and BL32, which is provided via BL31 platform APIs.
+This information is required until the start of execution of BL33. This
+information can be provided in a platform defined manner, e.g. compiled into
+the platform code in BL31, or provided in a platform defined memory location
+by the Trusted Boot firmware, or passed from the Trusted Boot Firmware via the
+Cold boot Initialization parameters. This data may need to be cleaned out of
+the CPU caches if it is provided by an earlier boot stage and then accessed by
+BL31 platform code before the caches are enabled.
+
+ARM Trusted Firmware's BL2 implementation passes a ``bl31_params`` structure in
+``X0`` and the ARM development platforms interpret this in the BL31 platform
+code.
+
+MMU, Data caches & Coherency
+''''''''''''''''''''''''''''
+
+BL31 does not depend on the enabled state of the MMU, data caches or
+interconnect coherency on entry to ``bl31_entrypoint()``. If these are disabled
+on entry, these should be enabled during ``bl31_plat_arch_setup()``.
+
+Data structures used in the BL31 cold boot interface
+''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+These structures are designed to support compatibility and independent
+evolution of the structures and the firmware images. For example, a version of
+BL31 that can interpret the BL3x image information from different versions of
+BL2, a platform that uses an extended entry\_point\_info structure to convey
+additional register information to BL31, or a ELF image loader that can convey
+more details about the firmware images.
+
+To support these scenarios the structures are versioned and sized, which enables
+BL31 to detect which information is present and respond appropriately. The
+``param_header`` is defined to capture this information:
+
+.. code:: c
+
+    typedef struct param_header {
+        uint8_t type;       /* type of the structure */
+        uint8_t version;    /* version of this structure */
+        uint16_t size;      /* size of this structure in bytes */
+        uint32_t attr;      /* attributes: unused bits SBZ */
+    } param_header_t;
+
+The structures using this format are ``entry_point_info``, ``image_info`` and
+``bl31_params``. The code that allocates and populates these structures must set
+the header fields appropriately, and the ``SET_PARAM_HEAD()`` a macro is defined
+to simplify this action.
+
+Required CPU state for BL31 Warm boot initialization
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When requesting a CPU power-on, or suspending a running CPU, ARM Trusted
+Firmware provides the platform power management code with a Warm boot
+initialization entry-point, to be invoked by the CPU immediately after the
+reset handler. On entry to the Warm boot initialization function the calling
+CPU must be in AArch64 EL3, little-endian data access and all interrupt sources
+masked:
+
+::
+
+    PSTATE.EL = 3
+    PSTATE.RW = 1
+    PSTATE.DAIF = 0xf
+    SCTLR_EL3.EE = 0
+
+The PSCI implementation will initialize the processor state and ensure that the
+platform power management code is then invoked as required to initialize all
+necessary system, cluster and CPU resources.
+
+AArch32 EL3 Runtime Software entrypoint interface
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To enable this firmware architecture it is important to provide a fully
+documented and stable interface between the Trusted Boot Firmware and the
+AArch32 EL3 Runtime Software.
+
+Future changes to the entrypoint interface will be done in a backwards
+compatible way, and this enables these firmware components to be independently
+enhanced/updated to develop and exploit new functionality.
+
+Required CPU state when entering during cold boot
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This function must only be called by the primary CPU.
+
+On entry to this function the calling primary CPU must be executing in AArch32
+EL3, little-endian data access, and all interrupt sources masked:
+
+::
+
+    PSTATE.AIF = 0x7
+    SCTLR.EE = 0
+
+R0 and R1 are used to pass information from the Trusted Boot Firmware to the
+platform code in AArch32 EL3 Runtime Software:
+
+::
+
+    R0 : Reserved for common Trusted Firmware information
+    R1 : Platform specific information
+
+Use of the R0 and R1 parameters
+'''''''''''''''''''''''''''''''
+
+The parameters are platform specific and the convention is that ``R0`` conveys
+information regarding the BL3x images from the Trusted Boot firmware and ``R1``
+can be used for other platform specific purpose. This convention allows
+platforms which use ARM Trusted Firmware's BL1 and BL2 images to transfer
+additional platform specific information from Secure Boot without conflicting
+with future evolution of the Trusted Firmware using ``R0`` to pass a ``bl_params``
+structure.
+
+The AArch32 EL3 Runtime Software is responsible for entry into BL33. This
+information can be obtained in a platform defined manner, e.g. compiled into
+the AArch32 EL3 Runtime Software, or provided in a platform defined memory
+location by the Trusted Boot firmware, or passed from the Trusted Boot Firmware
+via the Cold boot Initialization parameters. This data may need to be cleaned
+out of the CPU caches if it is provided by an earlier boot stage and then
+accessed by AArch32 EL3 Runtime Software before the caches are enabled.
+
+When using AArch32 EL3 Runtime Software, the ARM development platforms pass a
+``bl_params`` structure in ``R0`` from BL2 to be interpreted by AArch32 EL3 Runtime
+Software platform code.
+
+MMU, Data caches & Coherency
+''''''''''''''''''''''''''''
+
+AArch32 EL3 Runtime Software must not depend on the enabled state of the MMU,
+data caches or interconnect coherency in its entrypoint. They must be explicitly
+enabled if required.
+
+Data structures used in cold boot interface
+'''''''''''''''''''''''''''''''''''''''''''
+
+The AArch32 EL3 Runtime Software cold boot interface uses ``bl_params`` instead
+of ``bl31_params``. The ``bl_params`` structure is based on the convention
+described in AArch64 BL31 cold boot interface section.
+
+Required CPU state for warm boot initialization
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When requesting a CPU power-on, or suspending a running CPU, AArch32 EL3
+Runtime Software must ensure execution of a warm boot initialization entrypoint.
+If ARM Trusted Firmware BL1 is used and the PROGRAMMABLE\_RESET\_ADDRESS build
+flag is false, then AArch32 EL3 Runtime Software must ensure that BL1 branches
+to the warm boot entrypoint by arranging for the BL1 platform function,
+plat\_get\_my\_entrypoint(), to return a non-zero value.
+
+In this case, the warm boot entrypoint must be in AArch32 EL3, little-endian
+data access and all interrupt sources masked:
+
+::
+
+    PSTATE.AIF = 0x7
+    SCTLR.EE = 0
+
+The warm boot entrypoint may be implemented by using the ARM Trusted Firmware
+``psci_warmboot_entrypoint()`` function. In that case, the platform must fulfil
+the pre-requisites mentioned in the `PSCI Library integration guide`_.
+
+EL3 runtime services framework
+------------------------------
+
+Software executing in the non-secure state and in the secure state at exception
+levels lower than EL3 will request runtime services using the Secure Monitor
+Call (SMC) instruction. These requests will follow the convention described in
+the SMC Calling Convention PDD (`SMCCC`_). The `SMCCC`_ assigns function
+identifiers to each SMC request and describes how arguments are passed and
+returned.
+
+The EL3 runtime services framework enables the development of services by
+different providers that can be easily integrated into final product firmware.
+The following sections describe the framework which facilitates the
+registration, initialization and use of runtime services in EL3 Runtime
+Software (BL31).
+
+The design of the runtime services depends heavily on the concepts and
+definitions described in the `SMCCC`_, in particular SMC Function IDs, Owning
+Entity Numbers (OEN), Fast and Yielding calls, and the SMC32 and SMC64 calling
+conventions. Please refer to that document for more detailed explanation of
+these terms.
+
+The following runtime services are expected to be implemented first. They have
+not all been instantiated in the current implementation.
+
+#. Standard service calls
+
+   This service is for management of the entire system. The Power State
+   Coordination Interface (`PSCI`_) is the first set of standard service calls
+   defined by ARM (see PSCI section later).
+
+#. Secure-EL1 Payload Dispatcher service
+
+   If a system runs a Trusted OS or other Secure-EL1 Payload (SP) then
+   it also requires a *Secure Monitor* at EL3 to switch the EL1 processor
+   context between the normal world (EL1/EL2) and trusted world (Secure-EL1).
+   The Secure Monitor will make these world switches in response to SMCs. The
+   `SMCCC`_ provides for such SMCs with the Trusted OS Call and Trusted
+   Application Call OEN ranges.
+
+   The interface between the EL3 Runtime Software and the Secure-EL1 Payload is
+   not defined by the `SMCCC`_ or any other standard. As a result, each
+   Secure-EL1 Payload requires a specific Secure Monitor that runs as a runtime
+   service - within ARM Trusted Firmware this service is referred to as the
+   Secure-EL1 Payload Dispatcher (SPD).
+
+   ARM Trusted Firmware provides a Test Secure-EL1 Payload (TSP) and its
+   associated Dispatcher (TSPD). Details of SPD design and TSP/TSPD operation
+   are described in the "Secure-EL1 Payloads and Dispatchers" section below.
+
+#. CPU implementation service
+
+   This service will provide an interface to CPU implementation specific
+   services for a given platform e.g. access to processor errata workarounds.
+   This service is currently unimplemented.
+
+Additional services for ARM Architecture, SiP and OEM calls can be implemented.
+Each implemented service handles a range of SMC function identifiers as
+described in the `SMCCC`_.
+
+Registration
+~~~~~~~~~~~~
+
+A runtime service is registered using the ``DECLARE_RT_SVC()`` macro, specifying
+the name of the service, the range of OENs covered, the type of service and
+initialization and call handler functions. This macro instantiates a ``const struct rt_svc_desc`` for the service with these details (see ``runtime_svc.h``).
+This structure is allocated in a special ELF section ``rt_svc_descs``, enabling
+the framework to find all service descriptors included into BL31.
+
+The specific service for a SMC Function is selected based on the OEN and call
+type of the Function ID, and the framework uses that information in the service
+descriptor to identify the handler for the SMC Call.
+
+The service descriptors do not include information to identify the precise set
+of SMC function identifiers supported by this service implementation, the
+security state from which such calls are valid nor the capability to support
+64-bit and/or 32-bit callers (using SMC32 or SMC64). Responding appropriately
+to these aspects of a SMC call is the responsibility of the service
+implementation, the framework is focused on integration of services from
+different providers and minimizing the time taken by the framework before the
+service handler is invoked.
+
+Details of the parameters, requirements and behavior of the initialization and
+call handling functions are provided in the following sections.
+
+Initialization
+~~~~~~~~~~~~~~
+
+``runtime_svc_init()`` in ``runtime_svc.c`` initializes the runtime services
+framework running on the primary CPU during cold boot as part of the BL31
+initialization. This happens prior to initializing a Trusted OS and running
+Normal world boot firmware that might in turn use these services.
+Initialization involves validating each of the declared runtime service
+descriptors, calling the service initialization function and populating the
+index used for runtime lookup of the service.
+
+The BL31 linker script collects all of the declared service descriptors into a
+single array and defines symbols that allow the framework to locate and traverse
+the array, and determine its size.
+
+The framework does basic validation of each descriptor to halt firmware
+initialization if service declaration errors are detected. The framework does
+not check descriptors for the following error conditions, and may behave in an
+unpredictable manner under such scenarios:
+
+#. Overlapping OEN ranges
+#. Multiple descriptors for the same range of OENs and ``call_type``
+#. Incorrect range of owning entity numbers for a given ``call_type``
+
+Once validated, the service ``init()`` callback is invoked. This function carries
+out any essential EL3 initialization before servicing requests. The ``init()``
+function is only invoked on the primary CPU during cold boot. If the service
+uses per-CPU data this must either be initialized for all CPUs during this call,
+or be done lazily when a CPU first issues an SMC call to that service. If
+``init()`` returns anything other than ``0``, this is treated as an initialization
+error and the service is ignored: this does not cause the firmware to halt.
+
+The OEN and call type fields present in the SMC Function ID cover a total of
+128 distinct services, but in practice a single descriptor can cover a range of
+OENs, e.g. SMCs to call a Trusted OS function. To optimize the lookup of a
+service handler, the framework uses an array of 128 indices that map every
+distinct OEN/call-type combination either to one of the declared services or to
+indicate the service is not handled. This ``rt_svc_descs_indices[]`` array is
+populated for all of the OENs covered by a service after the service ``init()``
+function has reported success. So a service that fails to initialize will never
+have it's ``handle()`` function invoked.
+
+The following figure shows how the ``rt_svc_descs_indices[]`` index maps the SMC
+Function ID call type and OEN onto a specific service handler in the
+``rt_svc_descs[]`` array.
+
+|Image 1|
+
+Handling an SMC
+~~~~~~~~~~~~~~~
+
+When the EL3 runtime services framework receives a Secure Monitor Call, the SMC
+Function ID is passed in W0 from the lower exception level (as per the
+`SMCCC`_). If the calling register width is AArch32, it is invalid to invoke an
+SMC Function which indicates the SMC64 calling convention: such calls are
+ignored and return the Unknown SMC Function Identifier result code ``0xFFFFFFFF``
+in R0/X0.
+
+Bit[31] (fast/yielding call) and bits[29:24] (owning entity number) of the SMC
+Function ID are combined to index into the ``rt_svc_descs_indices[]`` array. The
+resulting value might indicate a service that has no handler, in this case the
+framework will also report an Unknown SMC Function ID. Otherwise, the value is
+used as a further index into the ``rt_svc_descs[]`` array to locate the required
+service and handler.
+
+The service's ``handle()`` callback is provided with five of the SMC parameters
+directly, the others are saved into memory for retrieval (if needed) by the
+handler. The handler is also provided with an opaque ``handle`` for use with the
+supporting library for parameter retrieval, setting return values and context
+manipulation; and with ``flags`` indicating the security state of the caller. The
+framework finally sets up the execution stack for the handler, and invokes the
+services ``handle()`` function.
+
+On return from the handler the result registers are populated in X0-X3 before
+restoring the stack and CPU state and returning from the original SMC.
+
+Power State Coordination Interface
+----------------------------------
+
+TODO: Provide design walkthrough of PSCI implementation.
+
+The PSCI v1.1 specification categorizes APIs as optional and mandatory. All the
+mandatory APIs in PSCI v1.1, PSCI v1.0 and in PSCI v0.2 draft specification
+`Power State Coordination Interface PDD`_ are implemented. The table lists
+the PSCI v1.1 APIs and their support in generic code.
+
+An API implementation might have a dependency on platform code e.g. CPU\_SUSPEND
+requires the platform to export a part of the implementation. Hence the level
+of support of the mandatory APIs depends upon the support exported by the
+platform port as well. The Juno and FVP (all variants) platforms export all the
+required support.
+
++-----------------------------+-------------+-------------------------------+
+| PSCI v1.1 API               | Supported   | Comments                      |
++=============================+=============+===============================+
+| ``PSCI_VERSION``            | Yes         | The version returned is 1.1   |
++-----------------------------+-------------+-------------------------------+
+| ``CPU_SUSPEND``             | Yes\*       |                               |
++-----------------------------+-------------+-------------------------------+
+| ``CPU_OFF``                 | Yes\*       |                               |
++-----------------------------+-------------+-------------------------------+
+| ``CPU_ON``                  | Yes\*       |                               |
++-----------------------------+-------------+-------------------------------+
+| ``AFFINITY_INFO``           | Yes         |                               |
++-----------------------------+-------------+-------------------------------+
+| ``MIGRATE``                 | Yes\*\*     |                               |
++-----------------------------+-------------+-------------------------------+
+| ``MIGRATE_INFO_TYPE``       | Yes\*\*     |                               |
++-----------------------------+-------------+-------------------------------+
+| ``MIGRATE_INFO_CPU``        | Yes\*\*     |                               |
++-----------------------------+-------------+-------------------------------+
+| ``SYSTEM_OFF``              | Yes\*       |                               |
++-----------------------------+-------------+-------------------------------+
+| ``SYSTEM_RESET``            | Yes\*       |                               |
++-----------------------------+-------------+-------------------------------+
+| ``PSCI_FEATURES``           | Yes         |                               |
++-----------------------------+-------------+-------------------------------+
+| ``CPU_FREEZE``              | No          |                               |
++-----------------------------+-------------+-------------------------------+
+| ``CPU_DEFAULT_SUSPEND``     | No          |                               |
++-----------------------------+-------------+-------------------------------+
+| ``NODE_HW_STATE``           | Yes\*       |                               |
++-----------------------------+-------------+-------------------------------+
+| ``SYSTEM_SUSPEND``          | Yes\*       |                               |
++-----------------------------+-------------+-------------------------------+
+| ``PSCI_SET_SUSPEND_MODE``   | No          |                               |
++-----------------------------+-------------+-------------------------------+
+| ``PSCI_STAT_RESIDENCY``     | Yes\*       |                               |
++-----------------------------+-------------+-------------------------------+
+| ``PSCI_STAT_COUNT``         | Yes\*       |                               |
++-----------------------------+-------------+-------------------------------+
+| ``SYSTEM_RESET2``           | Yes\*       |                               |
++-----------------------------+-------------+-------------------------------+
+| ``MEM_PROTECT``             | Yes\*       |                               |
++-----------------------------+-------------+-------------------------------+
+| ``MEM_PROTECT_CHECK_RANGE`` | Yes\*       |                               |
++-----------------------------+-------------+-------------------------------+
+
+\*Note : These PSCI APIs require platform power management hooks to be
+registered with the generic PSCI code to be supported.
+
+\*\*Note : These PSCI APIs require appropriate Secure Payload Dispatcher
+hooks to be registered with the generic PSCI code to be supported.
+
+The PSCI implementation in ARM Trusted Firmware is a library which can be
+integrated with AArch64 or AArch32 EL3 Runtime Software for ARMv8-A systems.
+A guide to integrating PSCI library with AArch32 EL3 Runtime Software
+can be found `here`_.
+
+Secure-EL1 Payloads and Dispatchers
+-----------------------------------
+
+On a production system that includes a Trusted OS running in Secure-EL1/EL0,
+the Trusted OS is coupled with a companion runtime service in the BL31
+firmware. This service is responsible for the initialisation of the Trusted
+OS and all communications with it. The Trusted OS is the BL32 stage of the
+boot flow in ARM Trusted Firmware. The firmware will attempt to locate, load
+and execute a BL32 image.
+
+ARM Trusted Firmware uses a more general term for the BL32 software that runs
+at Secure-EL1 - the *Secure-EL1 Payload* - as it is not always a Trusted OS.
+
+The ARM Trusted Firmware provides a Test Secure-EL1 Payload (TSP) and a Test
+Secure-EL1 Payload Dispatcher (TSPD) service as an example of how a Trusted OS
+is supported on a production system using the Runtime Services Framework. On
+such a system, the Test BL32 image and service are replaced by the Trusted OS
+and its dispatcher service. The ARM Trusted Firmware build system expects that
+the dispatcher will define the build flag ``NEED_BL32`` to enable it to include
+the BL32 in the build either as a binary or to compile from source depending
+on whether the ``BL32`` build option is specified or not.
+
+The TSP runs in Secure-EL1. It is designed to demonstrate synchronous
+communication with the normal-world software running in EL1/EL2. Communication
+is initiated by the normal-world software
+
+-  either directly through a Fast SMC (as defined in the `SMCCC`_)
+
+-  or indirectly through a `PSCI`_ SMC. The `PSCI`_ implementation in turn
+   informs the TSPD about the requested power management operation. This allows
+   the TSP to prepare for or respond to the power state change
+
+The TSPD service is responsible for.
+
+-  Initializing the TSP
+
+-  Routing requests and responses between the secure and the non-secure
+   states during the two types of communications just described
+
+Initializing a BL32 Image
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The Secure-EL1 Payload Dispatcher (SPD) service is responsible for initializing
+the BL32 image. It needs access to the information passed by BL2 to BL31 to do
+so. This is provided by:
+
+.. code:: c
+
+    entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t);
+
+which returns a reference to the ``entry_point_info`` structure corresponding to
+the image which will be run in the specified security state. The SPD uses this
+API to get entry point information for the SECURE image, BL32.
+
+In the absence of a BL32 image, BL31 passes control to the normal world
+bootloader image (BL33). When the BL32 image is present, it is typical
+that the SPD wants control to be passed to BL32 first and then later to BL33.
+
+To do this the SPD has to register a BL32 initialization function during
+initialization of the SPD service. The BL32 initialization function has this
+prototype:
+
+.. code:: c
+
+    int32_t init(void);
+
+and is registered using the ``bl31_register_bl32_init()`` function.
+
+Trusted Firmware supports two approaches for the SPD to pass control to BL32
+before returning through EL3 and running the non-trusted firmware (BL33):
+
+#. In the BL32 setup function, use ``bl31_set_next_image_type()`` to
+   request that the exit from ``bl31_main()`` is to the BL32 entrypoint in
+   Secure-EL1. BL31 will exit to BL32 using the asynchronous method by
+   calling ``bl31_prepare_next_image_entry()`` and ``el3_exit()``.
+
+   When the BL32 has completed initialization at Secure-EL1, it returns to
+   BL31 by issuing an SMC, using a Function ID allocated to the SPD. On
+   receipt of this SMC, the SPD service handler should switch the CPU context
+   from trusted to normal world and use the ``bl31_set_next_image_type()`` and
+   ``bl31_prepare_next_image_entry()`` functions to set up the initial return to
+   the normal world firmware BL33. On return from the handler the framework
+   will exit to EL2 and run BL33.
+
+#. The BL32 setup function registers an initialization function using
+   ``bl31_register_bl32_init()`` which provides a SPD-defined mechanism to
+   invoke a 'world-switch synchronous call' to Secure-EL1 to run the BL32
+   entrypoint.
+   NOTE: The Test SPD service included with the Trusted Firmware provides one
+   implementation of such a mechanism.
+
+   On completion BL32 returns control to BL31 via a SMC, and on receipt the
+   SPD service handler invokes the synchronous call return mechanism to return
+   to the BL32 initialization function. On return from this function,
+   ``bl31_main()`` will set up the return to the normal world firmware BL33 and
+   continue the boot process in the normal world.
+
+Crash Reporting in BL31
+-----------------------
+
+BL31 implements a scheme for reporting the processor state when an unhandled
+exception is encountered. The reporting mechanism attempts to preserve all the
+register contents and report it via a dedicated UART (PL011 console). BL31
+reports the general purpose, EL3, Secure EL1 and some EL2 state registers.
+
+A dedicated per-CPU crash stack is maintained by BL31 and this is retrieved via
+the per-CPU pointer cache. The implementation attempts to minimise the memory
+required for this feature. The file ``crash_reporting.S`` contains the
+implementation for crash reporting.
+
+The sample crash output is shown below.
+
+::
+
+    x0  :0x000000004F00007C
+    x1  :0x0000000007FFFFFF
+    x2  :0x0000000004014D50
+    x3  :0x0000000000000000
+    x4  :0x0000000088007998
+    x5  :0x00000000001343AC
+    x6  :0x0000000000000016
+    x7  :0x00000000000B8A38
+    x8  :0x00000000001343AC
+    x9  :0x00000000000101A8
+    x10 :0x0000000000000002
+    x11 :0x000000000000011C
+    x12 :0x00000000FEFDC644
+    x13 :0x00000000FED93FFC
+    x14 :0x0000000000247950
+    x15 :0x00000000000007A2
+    x16 :0x00000000000007A4
+    x17 :0x0000000000247950
+    x18 :0x0000000000000000
+    x19 :0x00000000FFFFFFFF
+    x20 :0x0000000004014D50
+    x21 :0x000000000400A38C
+    x22 :0x0000000000247950
+    x23 :0x0000000000000010
+    x24 :0x0000000000000024
+    x25 :0x00000000FEFDC868
+    x26 :0x00000000FEFDC86A
+    x27 :0x00000000019EDEDC
+    x28 :0x000000000A7CFDAA
+    x29 :0x0000000004010780
+    x30 :0x000000000400F004
+    scr_el3 :0x0000000000000D3D
+    sctlr_el3   :0x0000000000C8181F
+    cptr_el3    :0x0000000000000000
+    tcr_el3 :0x0000000080803520
+    daif    :0x00000000000003C0
+    mair_el3    :0x00000000000004FF
+    spsr_el3    :0x00000000800003CC
+    elr_el3 :0x000000000400C0CC
+    ttbr0_el3   :0x00000000040172A0
+    esr_el3 :0x0000000096000210
+    sp_el3  :0x0000000004014D50
+    far_el3 :0x000000004F00007C
+    spsr_el1    :0x0000000000000000
+    elr_el1 :0x0000000000000000
+    spsr_abt    :0x0000000000000000
+    spsr_und    :0x0000000000000000
+    spsr_irq    :0x0000000000000000
+    spsr_fiq    :0x0000000000000000
+    sctlr_el1   :0x0000000030C81807
+    actlr_el1   :0x0000000000000000
+    cpacr_el1   :0x0000000000300000
+    csselr_el1  :0x0000000000000002
+    sp_el1  :0x0000000004028800
+    esr_el1 :0x0000000000000000
+    ttbr0_el1   :0x000000000402C200
+    ttbr1_el1   :0x0000000000000000
+    mair_el1    :0x00000000000004FF
+    amair_el1   :0x0000000000000000
+    tcr_el1 :0x0000000000003520
+    tpidr_el1   :0x0000000000000000
+    tpidr_el0   :0x0000000000000000
+    tpidrro_el0 :0x0000000000000000
+    dacr32_el2  :0x0000000000000000
+    ifsr32_el2  :0x0000000000000000
+    par_el1 :0x0000000000000000
+    far_el1 :0x0000000000000000
+    afsr0_el1   :0x0000000000000000
+    afsr1_el1   :0x0000000000000000
+    contextidr_el1  :0x0000000000000000
+    vbar_el1    :0x0000000004027000
+    cntp_ctl_el0    :0x0000000000000000
+    cntp_cval_el0   :0x0000000000000000
+    cntv_ctl_el0    :0x0000000000000000
+    cntv_cval_el0   :0x0000000000000000
+    cntkctl_el1 :0x0000000000000000
+    fpexc32_el2 :0x0000000004000700
+    sp_el0  :0x0000000004010780
+
+Guidelines for Reset Handlers
+-----------------------------
+
+Trusted Firmware implements a framework that allows CPU and platform ports to
+perform actions very early after a CPU is released from reset in both the cold
+and warm boot paths. This is done by calling the ``reset_handler()`` function in
+both the BL1 and BL31 images. It in turn calls the platform and CPU specific
+reset handling functions.
+
+Details for implementing a CPU specific reset handler can be found in
+Section 8. Details for implementing a platform specific reset handler can be
+found in the `Porting Guide`_ (see the ``plat_reset_handler()`` function).
+
+When adding functionality to a reset handler, keep in mind that if a different
+reset handling behavior is required between the first and the subsequent
+invocations of the reset handling code, this should be detected at runtime.
+In other words, the reset handler should be able to detect whether an action has
+already been performed and act as appropriate. Possible courses of actions are,
+e.g. skip the action the second time, or undo/redo it.
+
+Configuring secure interrupts
+-----------------------------
+
+The GIC driver is responsible for performing initial configuration of secure
+interrupts on the platform. To this end, the platform is expected to provide the
+GIC driver (either GICv2 or GICv3, as selected by the platform) with the
+interrupt configuration during the driver initialisation.
+
+There are two ways to specify secure interrupt configuration:
+
+#. Array of secure interrupt properties: In this scheme, in both GICv2 and GICv3
+   driver data structures, the ``interrupt_props`` member points to an array of
+   interrupt properties. Each element of the array specifies the interrupt
+   number and its configuration, viz. priority, group, configuration. Each
+   element of the array shall be populated by the macro ``INTR_PROP_DESC()``.
+   The macro takes the following arguments:
+
+   -  10-bit interrupt number,
+
+   -  8-bit interrupt priority,
+
+   -  Interrupt type (one of ``INTR_TYPE_EL3``, ``INTR_TYPE_S_EL1``,
+      ``INTR_TYPE_NS``),
+
+   -  Interrupt configuration (either ``GIC_INTR_CFG_LEVEL`` or
+      ``GIC_INTR_CFG_EDGE``).
+
+#. Array of secure interrupts: In this scheme, the GIC driver is provided an
+   array of secure interrupt numbers. The GIC driver, at the time of
+   initialisation, iterates through the array and assigns each interrupt
+   the appropriate group.
+
+   -  For the GICv2 driver, in ``gicv2_driver_data`` structure, the
+      ``g0_interrupt_array`` member of the should point to the array of
+      interrupts to be assigned to *Group 0*, and the ``g0_interrupt_num``
+      member of the should be set to the number of interrupts in the array.
+
+   -  For the GICv3 driver, in ``gicv3_driver_data`` structure:
+
+      -  The ``g0_interrupt_array`` member of the should point to the array of
+         interrupts to be assigned to *Group 0*, and the ``g0_interrupt_num``
+         member of the should be set to the number of interrupts in the array.
+
+      -  The ``g1s_interrupt_array`` member of the should point to the array of
+         interrupts to be assigned to *Group 1 Secure*, and the
+         ``g1s_interrupt_num`` member of the should be set to the number of
+         interrupts in the array.
+
+   **Note that this scheme is deprecated.**
+
+CPU specific operations framework
+---------------------------------
+
+Certain aspects of the ARMv8 architecture are implementation defined,
+that is, certain behaviours are not architecturally defined, but must be defined
+and documented by individual processor implementations. The ARM Trusted
+Firmware implements a framework which categorises the common implementation
+defined behaviours and allows a processor to export its implementation of that
+behaviour. The categories are:
+
+#. Processor specific reset sequence.
+
+#. Processor specific power down sequences.
+
+#. Processor specific register dumping as a part of crash reporting.
+
+#. Errata status reporting.
+
+Each of the above categories fulfils a different requirement.
+
+#. allows any processor specific initialization before the caches and MMU
+   are turned on, like implementation of errata workarounds, entry into
+   the intra-cluster coherency domain etc.
+
+#. allows each processor to implement the power down sequence mandated in
+   its Technical Reference Manual (TRM).
+
+#. allows a processor to provide additional information to the developer
+   in the event of a crash, for example Cortex-A53 has registers which
+   can expose the data cache contents.
+
+#. allows a processor to define a function that inspects and reports the status
+   of all errata workarounds on that processor.
+
+Please note that only 2. is mandated by the TRM.
+
+The CPU specific operations framework scales to accommodate a large number of
+different CPUs during power down and reset handling. The platform can specify
+any CPU optimization it wants to enable for each CPU. It can also specify
+the CPU errata workarounds to be applied for each CPU type during reset
+handling by defining CPU errata compile time macros. Details on these macros
+can be found in the `cpu-specific-build-macros.rst`_ file.
+
+The CPU specific operations framework depends on the ``cpu_ops`` structure which
+needs to be exported for each type of CPU in the platform. It is defined in
+``include/lib/cpus/aarch64/cpu_macros.S`` and has the following fields : ``midr``,
+``reset_func()``, ``cpu_pwr_down_ops`` (array of power down functions) and
+``cpu_reg_dump()``.
+
+The CPU specific files in ``lib/cpus`` export a ``cpu_ops`` data structure with
+suitable handlers for that CPU. For example, ``lib/cpus/aarch64/cortex_a53.S``
+exports the ``cpu_ops`` for Cortex-A53 CPU. According to the platform
+configuration, these CPU specific files must be included in the build by
+the platform makefile. The generic CPU specific operations framework code exists
+in ``lib/cpus/aarch64/cpu_helpers.S``.
+
+CPU specific Reset Handling
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+After a reset, the state of the CPU when it calls generic reset handler is:
+MMU turned off, both instruction and data caches turned off and not part
+of any coherency domain.
+
+The BL entrypoint code first invokes the ``plat_reset_handler()`` to allow
+the platform to perform any system initialization required and any system
+errata workarounds that needs to be applied. The ``get_cpu_ops_ptr()`` reads
+the current CPU midr, finds the matching ``cpu_ops`` entry in the ``cpu_ops``
+array and returns it. Note that only the part number and implementer fields
+in midr are used to find the matching ``cpu_ops`` entry. The ``reset_func()`` in
+the returned ``cpu_ops`` is then invoked which executes the required reset
+handling for that CPU and also any errata workarounds enabled by the platform.
+This function must preserve the values of general purpose registers x20 to x29.
+
+Refer to Section "Guidelines for Reset Handlers" for general guidelines
+regarding placement of code in a reset handler.
+
+CPU specific power down sequence
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+During the BL31 initialization sequence, the pointer to the matching ``cpu_ops``
+entry is stored in per-CPU data by ``init_cpu_ops()`` so that it can be quickly
+retrieved during power down sequences.
+
+Various CPU drivers register handlers to perform power down at certain power
+levels for that specific CPU. The PSCI service, upon receiving a power down
+request, determines the highest power level at which to execute power down
+sequence for a particular CPU. It uses the ``prepare_cpu_pwr_dwn()`` function to
+pick the right power down handler for the requested level. The function
+retrieves ``cpu_ops`` pointer member of per-CPU data, and from that, further
+retrieves ``cpu_pwr_down_ops`` array, and indexes into the required level. If the
+requested power level is higher than what a CPU driver supports, the handler
+registered for highest level is invoked.
+
+At runtime the platform hooks for power down are invoked by the PSCI service to
+perform platform specific operations during a power down sequence, for example
+turning off CCI coherency during a cluster power down.
+
+CPU specific register reporting during crash
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If the crash reporting is enabled in BL31, when a crash occurs, the crash
+reporting framework calls ``do_cpu_reg_dump`` which retrieves the matching
+``cpu_ops`` using ``get_cpu_ops_ptr()`` function. The ``cpu_reg_dump()`` in
+``cpu_ops`` is invoked, which then returns the CPU specific register values to
+be reported and a pointer to the ASCII list of register names in a format
+expected by the crash reporting framework.
+
+CPU errata status reporting
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Errata workarounds for CPUs supported in ARM Trusted Firmware are applied during
+both cold and warm boots, shortly after reset. Individual Errata workarounds are
+enabled as build options. Some errata workarounds have potential run-time
+implications; therefore some are enabled by default, others not. Platform ports
+shall override build options to enable or disable errata as appropriate. The CPU
+drivers take care of applying errata workarounds that are enabled and applicable
+to a given CPU. Refer to the section titled *CPU Errata Workarounds* in `CPUBM`_
+for more information.
+
+Functions in CPU drivers that apply errata workaround must follow the
+conventions listed below.
+
+The errata workaround must be authored as two separate functions:
+
+-  One that checks for errata. This function must determine whether that errata
+   applies to the current CPU. Typically this involves matching the current
+   CPUs revision and variant against a value that's known to be affected by the
+   errata. If the function determines that the errata applies to this CPU, it
+   must return ``ERRATA_APPLIES``; otherwise, it must return
+   ``ERRATA_NOT_APPLIES``. The utility functions ``cpu_get_rev_var`` and
+   ``cpu_rev_var_ls`` functions may come in handy for this purpose.
+
+For an errata identified as ``E``, the check function must be named
+``check_errata_E``.
+
+This function will be invoked at different times, both from assembly and from
+C run time. Therefore it must follow AAPCS, and must not use stack.
+
+-  Another one that applies the errata workaround. This function would call the
+   check function described above, and applies errata workaround if required.
+
+CPU drivers that apply errata workaround can optionally implement an assembly
+function that report the status of errata workarounds pertaining to that CPU.
+For a driver that registers the CPU, for example, ``cpux`` via. ``declare_cpu_ops``
+macro, the errata reporting function, if it exists, must be named
+``cpux_errata_report``. This function will always be called with MMU enabled; it
+must follow AAPCS and may use stack.
+
+In a debug build of ARM Trusted Firmware, on a CPU that comes out of reset, both
+BL1 and the run time firmware (BL31 in AArch64, and BL32 in AArch32) will invoke
+errata status reporting function, if one exists, for that type of CPU.
+
+To report the status of each errata workaround, the function shall use the
+assembler macro ``report_errata``, passing it:
+
+-  The build option that enables the errata;
+
+-  The name of the CPU: this must be the same identifier that CPU driver
+   registered itself with, using ``declare_cpu_ops``;
+
+-  And the errata identifier: the identifier must match what's used in the
+   errata's check function described above.
+
+The errata status reporting function will be called once per CPU type/errata
+combination during the software's active life time.
+
+It's expected that whenever an errata workaround is submitted to ARM Trusted
+Firmware, the errata reporting function is appropriately extended to report its
+status as well.
+
+Reporting the status of errata workaround is for informational purpose only; it
+has no functional significance.
+
+Memory layout of BL images
+--------------------------
+
+Each bootloader image can be divided in 2 parts:
+
+-  the static contents of the image. These are data actually stored in the
+   binary on the disk. In the ELF terminology, they are called ``PROGBITS``
+   sections;
+
+-  the run-time contents of the image. These are data that don't occupy any
+   space in the binary on the disk. The ELF binary just contains some
+   metadata indicating where these data will be stored at run-time and the
+   corresponding sections need to be allocated and initialized at run-time.
+   In the ELF terminology, they are called ``NOBITS`` sections.
+
+All PROGBITS sections are grouped together at the beginning of the image,
+followed by all NOBITS sections. This is true for all Trusted Firmware images
+and it is governed by the linker scripts. This ensures that the raw binary
+images are as small as possible. If a NOBITS section was inserted in between
+PROGBITS sections then the resulting binary file would contain zero bytes in
+place of this NOBITS section, making the image unnecessarily bigger. Smaller
+images allow faster loading from the FIP to the main memory.
+
+Linker scripts and symbols
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Each bootloader stage image layout is described by its own linker script. The
+linker scripts export some symbols into the program symbol table. Their values
+correspond to particular addresses. The trusted firmware code can refer to these
+symbols to figure out the image memory layout.
+
+Linker symbols follow the following naming convention in the trusted firmware.
+
+-  ``__<SECTION>_START__``
+
+   Start address of a given section named ``<SECTION>``.
+
+-  ``__<SECTION>_END__``
+
+   End address of a given section named ``<SECTION>``. If there is an alignment
+   constraint on the section's end address then ``__<SECTION>_END__`` corresponds
+   to the end address of the section's actual contents, rounded up to the right
+   boundary. Refer to the value of ``__<SECTION>_UNALIGNED_END__`` to know the
+   actual end address of the section's contents.
+
+-  ``__<SECTION>_UNALIGNED_END__``
+
+   End address of a given section named ``<SECTION>`` without any padding or
+   rounding up due to some alignment constraint.
+
+-  ``__<SECTION>_SIZE__``
+
+   Size (in bytes) of a given section named ``<SECTION>``. If there is an
+   alignment constraint on the section's end address then ``__<SECTION>_SIZE__``
+   corresponds to the size of the section's actual contents, rounded up to the
+   right boundary. In other words, ``__<SECTION>_SIZE__ = __<SECTION>_END__ - _<SECTION>_START__``. Refer to the value of ``__<SECTION>_UNALIGNED_SIZE__``
+   to know the actual size of the section's contents.
+
+-  ``__<SECTION>_UNALIGNED_SIZE__``
+
+   Size (in bytes) of a given section named ``<SECTION>`` without any padding or
+   rounding up due to some alignment constraint. In other words,
+   ``__<SECTION>_UNALIGNED_SIZE__ = __<SECTION>_UNALIGNED_END__ - __<SECTION>_START__``.
+
+Some of the linker symbols are mandatory as the trusted firmware code relies on
+them to be defined. They are listed in the following subsections. Some of them
+must be provided for each bootloader stage and some are specific to a given
+bootloader stage.
+
+The linker scripts define some extra, optional symbols. They are not actually
+used by any code but they help in understanding the bootloader images' memory
+layout as they are easy to spot in the link map files.
+
+Common linker symbols
+^^^^^^^^^^^^^^^^^^^^^
+
+All BL images share the following requirements:
+
+-  The BSS section must be zero-initialised before executing any C code.
+-  The coherent memory section (if enabled) must be zero-initialised as well.
+-  The MMU setup code needs to know the extents of the coherent and read-only
+   memory regions to set the right memory attributes. When
+   ``SEPARATE_CODE_AND_RODATA=1``, it needs to know more specifically how the
+   read-only memory region is divided between code and data.
+
+The following linker symbols are defined for this purpose:
+
+-  ``__BSS_START__``
+-  ``__BSS_SIZE__``
+-  ``__COHERENT_RAM_START__`` Must be aligned on a page-size boundary.
+-  ``__COHERENT_RAM_END__`` Must be aligned on a page-size boundary.
+-  ``__COHERENT_RAM_UNALIGNED_SIZE__``
+-  ``__RO_START__``
+-  ``__RO_END__``
+-  ``__TEXT_START__``
+-  ``__TEXT_END__``
+-  ``__RODATA_START__``
+-  ``__RODATA_END__``
+
+BL1's linker symbols
+^^^^^^^^^^^^^^^^^^^^
+
+BL1 being the ROM image, it has additional requirements. BL1 resides in ROM and
+it is entirely executed in place but it needs some read-write memory for its
+mutable data. Its ``.data`` section (i.e. its allocated read-write data) must be
+relocated from ROM to RAM before executing any C code.
+
+The following additional linker symbols are defined for BL1:
+
+-  ``__BL1_ROM_END__`` End address of BL1's ROM contents, covering its code
+   and ``.data`` section in ROM.
+-  ``__DATA_ROM_START__`` Start address of the ``.data`` section in ROM. Must be
+   aligned on a 16-byte boundary.
+-  ``__DATA_RAM_START__`` Address in RAM where the ``.data`` section should be
+   copied over. Must be aligned on a 16-byte boundary.
+-  ``__DATA_SIZE__`` Size of the ``.data`` section (in ROM or RAM).
+-  ``__BL1_RAM_START__`` Start address of BL1 read-write data.
+-  ``__BL1_RAM_END__`` End address of BL1 read-write data.
+
+How to choose the right base addresses for each bootloader stage image
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There is currently no support for dynamic image loading in the Trusted Firmware.
+This means that all bootloader images need to be linked against their ultimate
+runtime locations and the base addresses of each image must be chosen carefully
+such that images don't overlap each other in an undesired way. As the code
+grows, the base addresses might need adjustments to cope with the new memory
+layout.
+
+The memory layout is completely specific to the platform and so there is no
+general recipe for choosing the right base addresses for each bootloader image.
+However, there are tools to aid in understanding the memory layout. These are
+the link map files: ``build/<platform>/<build-type>/bl<x>/bl<x>.map``, with ``<x>``
+being the stage bootloader. They provide a detailed view of the memory usage of
+each image. Among other useful information, they provide the end address of
+each image.
+
+-  ``bl1.map`` link map file provides ``__BL1_RAM_END__`` address.
+-  ``bl2.map`` link map file provides ``__BL2_END__`` address.
+-  ``bl31.map`` link map file provides ``__BL31_END__`` address.
+-  ``bl32.map`` link map file provides ``__BL32_END__`` address.
+
+For each bootloader image, the platform code must provide its start address
+as well as a limit address that it must not overstep. The latter is used in the
+linker scripts to check that the image doesn't grow past that address. If that
+happens, the linker will issue a message similar to the following:
+
+::
+
+    aarch64-none-elf-ld: BLx has exceeded its limit.
+
+Additionally, if the platform memory layout implies some image overlaying like
+on FVP, BL31 and TSP need to know the limit address that their PROGBITS
+sections must not overstep. The platform code must provide those.
+
+When LOAD\_IMAGE\_V2 is disabled, Trusted Firmware provides a mechanism to
+verify at boot time that the memory to load a new image is free to prevent
+overwriting a previously loaded image. For this mechanism to work, the platform
+must specify the memory available in the system as regions, where each region
+consists of base address, total size and the free area within it (as defined
+in the ``meminfo_t`` structure). Trusted Firmware retrieves these memory regions
+by calling the corresponding platform API:
+
+-  ``meminfo_t *bl1_plat_sec_mem_layout(void)``
+-  ``meminfo_t *bl2_plat_sec_mem_layout(void)``
+-  ``void bl2_plat_get_scp_bl2_meminfo(meminfo_t *scp_bl2_meminfo)``
+-  ``void bl2_plat_get_bl32_meminfo(meminfo_t *bl32_meminfo)``
+-  ``void bl2_plat_get_bl33_meminfo(meminfo_t *bl33_meminfo)``
+
+For example, in the case of BL1 loading BL2, ``bl1_plat_sec_mem_layout()`` will
+return the region defined by the platform where BL1 intends to load BL2. The
+``load_image()`` function will check that the memory where BL2 will be loaded is
+within the specified region and marked as free.
+
+The actual number of regions and their base addresses and sizes is platform
+specific. The platform may return the same region or define a different one for
+each API. However, the overlap verification mechanism applies only to a single
+region. Hence, it is the platform responsibility to guarantee that different
+regions do not overlap, or that if they do, the overlapping images are not
+accessed at the same time. This could be used, for example, to load temporary
+images (e.g. certificates) or firmware images prior to being transfered to its
+corresponding processor (e.g. the SCP BL2 image).
+
+To reduce fragmentation and simplify the tracking of free memory, all the free
+memory within a region is always located in one single buffer defined by its
+base address and size. Trusted Firmware implements a top/bottom load approach:
+after a new image is loaded, it checks how much memory remains free above and
+below the image. The smallest area is marked as unavailable, while the larger
+area becomes the new free memory buffer. Platforms should take this behaviour
+into account when defining the base address for each of the images. For example,
+if an image is loaded near the middle of the region, small changes in image size
+could cause a flip between a top load and a bottom load, which may result in an
+unexpected memory layout.
+
+The following diagram is an example of an image loaded in the bottom part of
+the memory region. The region is initially free (nothing has been loaded yet):
+
+::
+
+               Memory region
+               +----------+
+               |          |
+               |          |  <<<<<<<<<<<<<  Free
+               |          |
+               |----------|                 +------------+
+               |  image   |  <<<<<<<<<<<<<  |   image    |
+               |----------|                 +------------+
+               | xxxxxxxx |  <<<<<<<<<<<<<  Marked as unavailable
+               +----------+
+
+And the following diagram is an example of an image loaded in the top part:
+
+::
+
+               Memory region
+               +----------+
+               | xxxxxxxx |  <<<<<<<<<<<<<  Marked as unavailable
+               |----------|                 +------------+
+               |  image   |  <<<<<<<<<<<<<  |   image    |
+               |----------|                 +------------+
+               |          |
+               |          |  <<<<<<<<<<<<<  Free
+               |          |
+               +----------+
+
+When LOAD\_IMAGE\_V2 is enabled, Trusted Firmware does not provide any mechanism
+to verify at boot time that the memory to load a new image is free to prevent
+overwriting a previously loaded image. The platform must specify the memory
+available in the system for all the relevant BL images to be loaded.
+
+For example, in the case of BL1 loading BL2, ``bl1_plat_sec_mem_layout()`` will
+return the region defined by the platform where BL1 intends to load BL2. The
+``load_image()`` function performs bounds check for the image size based on the
+base and maximum image size provided by the platforms. Platforms must take
+this behaviour into account when defining the base/size for each of the images.
+
+Memory layout on ARM development platforms
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The following list describes the memory layout on the ARM development platforms:
+
+-  A 4KB page of shared memory is used for communication between Trusted
+   Firmware and the platform's power controller. This is located at the base of
+   Trusted SRAM. The amount of Trusted SRAM available to load the bootloader
+   images is reduced by the size of the shared memory.
+
+   The shared memory is used to store the CPUs' entrypoint mailbox. On Juno,
+   this is also used for the MHU payload when passing messages to and from the
+   SCP.
+
+-  On FVP, BL1 is originally sitting in the Trusted ROM at address ``0x0``. On
+   Juno, BL1 resides in flash memory at address ``0x0BEC0000``. BL1 read-write
+   data are relocated to the top of Trusted SRAM at runtime.
+
+-  EL3 Runtime Software, BL31 for AArch64 and BL32 for AArch32 (e.g. SP\_MIN),
+   is loaded at the top of the Trusted SRAM, such that its NOBITS sections will
+   overwrite BL1 R/W data. This implies that BL1 global variables remain valid
+   only until execution reaches the EL3 Runtime Software entry point during a
+   cold boot.
+
+-  BL2 is loaded below EL3 Runtime Software.
+
+-  On Juno, SCP\_BL2 is loaded temporarily into the EL3 Runtime Software memory
+   region and transfered to the SCP before being overwritten by EL3 Runtime
+   Software.
+
+-  BL32 (for AArch64) can be loaded in one of the following locations:
+
+   -  Trusted SRAM
+   -  Trusted DRAM (FVP only)
+   -  Secure region of DRAM (top 16MB of DRAM configured by the TrustZone
+      controller)
+
+   When BL32 (for AArch64) is loaded into Trusted SRAM, its NOBITS sections
+   are allowed to overlay BL2. This memory layout is designed to give the
+   BL32 image as much memory as possible when it is loaded into Trusted SRAM.
+
+When LOAD\_IMAGE\_V2 is disabled the memory regions for the overlap detection
+mechanism at boot time are defined as follows (shown per API):
+
+-  ``meminfo_t *bl1_plat_sec_mem_layout(void)``
+
+   This region corresponds to the whole Trusted SRAM except for the shared
+   memory at the base. This region is initially free. At boot time, BL1 will
+   mark the BL1(rw) section within this region as occupied. The BL1(rw) section
+   is placed at the top of Trusted SRAM.
+
+-  ``meminfo_t *bl2_plat_sec_mem_layout(void)``
+
+   This region corresponds to the whole Trusted SRAM as defined by
+   ``bl1_plat_sec_mem_layout()``, but with the BL1(rw) section marked as
+   occupied. This memory region is used to check that BL2 and BL31 do not
+   overlap with each other. BL2\_BASE and BL1\_RW\_BASE are carefully chosen so
+   that the memory for BL31 is top loaded above BL2.
+
+-  ``void bl2_plat_get_scp_bl2_meminfo(meminfo_t *scp_bl2_meminfo)``
+
+   This region is an exact copy of the region defined by
+   ``bl2_plat_sec_mem_layout()``. Being a disconnected copy means that all the
+   changes made to this region by the Trusted Firmware will not be propagated.
+   This approach is valid because the SCP BL2 image is loaded temporarily
+   while it is being transferred to the SCP, so this memory is reused
+   afterwards.
+
+-  ``void bl2_plat_get_bl32_meminfo(meminfo_t *bl32_meminfo)``
+
+   This region depends on the location of the BL32 image. Currently, ARM
+   platforms support three different locations (detailed below): Trusted SRAM,
+   Trusted DRAM and the TZC-Secured DRAM.
+
+-  ``void bl2_plat_get_bl33_meminfo(meminfo_t *bl33_meminfo)``
+
+   This region corresponds to the Non-Secure DDR-DRAM, excluding the
+   TZC-Secured area.
+
+The location of the BL32 image will result in different memory maps. This is
+illustrated for both FVP and Juno in the following diagrams, using the TSP as
+an example.
+
+Note: Loading the BL32 image in TZC secured DRAM doesn't change the memory
+layout of the other images in Trusted SRAM.
+
+**FVP with TSP in Trusted SRAM (default option):**
+(These diagrams only cover the AArch64 case)
+
+::
+
+               Trusted SRAM
+    0x04040000 +----------+  loaded by BL2  ------------------
+               | BL1 (rw) |  <<<<<<<<<<<<<  |  BL31 NOBITS   |
+               |----------|  <<<<<<<<<<<<<  |----------------|
+               |          |  <<<<<<<<<<<<<  | BL31 PROGBITS  |
+               |----------|                 ------------------
+               |   BL2    |  <<<<<<<<<<<<<  |  BL32 NOBITS   |
+               |----------|  <<<<<<<<<<<<<  |----------------|
+               |          |  <<<<<<<<<<<<<  | BL32 PROGBITS  |
+    0x04001000 +----------+                 ------------------
+               |  Shared  |
+    0x04000000 +----------+
+
+               Trusted ROM
+    0x04000000 +----------+
+               | BL1 (ro) |
+    0x00000000 +----------+
+
+**FVP with TSP in Trusted DRAM:**
+
+::
+
+               Trusted DRAM
+    0x08000000 +----------+
+               |  BL32   |
+    0x06000000 +----------+
+
+               Trusted SRAM
+    0x04040000 +----------+  loaded by BL2  ------------------
+               | BL1 (rw) |  <<<<<<<<<<<<<  |  BL31 NOBITS   |
+               |----------|  <<<<<<<<<<<<<  |----------------|
+               |          |  <<<<<<<<<<<<<  | BL31 PROGBITS  |
+               |----------|                 ------------------
+               |   BL2    |
+               |----------|
+               |          |
+    0x04001000 +----------+
+               |  Shared  |
+    0x04000000 +----------+
+
+               Trusted ROM
+    0x04000000 +----------+
+               | BL1 (ro) |
+    0x00000000 +----------+
+
+**FVP with TSP in TZC-Secured DRAM:**
+
+::
+
+                   DRAM
+    0xffffffff +----------+
+               |  BL32   |  (secure)
+    0xff000000 +----------+
+               |          |
+               :          :  (non-secure)
+               |          |
+    0x80000000 +----------+
+
+               Trusted SRAM
+    0x04040000 +----------+  loaded by BL2  ------------------
+               | BL1 (rw) |  <<<<<<<<<<<<<  |  BL31 NOBITS   |
+               |----------|  <<<<<<<<<<<<<  |----------------|
+               |          |  <<<<<<<<<<<<<  | BL31 PROGBITS  |
+               |----------|                 ------------------
+               |   BL2    |
+               |----------|
+               |          |
+    0x04001000 +----------+
+               |  Shared  |
+    0x04000000 +----------+
+
+               Trusted ROM
+    0x04000000 +----------+
+               | BL1 (ro) |
+    0x00000000 +----------+
+
+**Juno with BL32 in Trusted SRAM (default option):**
+
+::
+
+                  Flash0
+    0x0C000000 +----------+
+               :          :
+    0x0BED0000 |----------|
+               | BL1 (ro) |
+    0x0BEC0000 |----------|
+               :          :
+    0x08000000 +----------+                  BL31 is loaded
+                                             after SCP_BL2 has
+               Trusted SRAM                  been sent to SCP
+    0x04040000 +----------+  loaded by BL2  ------------------
+               | BL1 (rw) |  <<<<<<<<<<<<<  |  BL31 NOBITS   |
+               |----------|  <<<<<<<<<<<<<  |----------------|
+               | SCP_BL2  |  <<<<<<<<<<<<<  | BL31 PROGBITS  |
+               |----------|                 ------------------
+               |   BL2    |  <<<<<<<<<<<<<  |  BL32 NOBITS   |
+               |----------|  <<<<<<<<<<<<<  |----------------|
+               |          |  <<<<<<<<<<<<<  | BL32 PROGBITS  |
+    0x04001000 +----------+                 ------------------
+               |   MHU    |
+    0x04000000 +----------+
+
+**Juno with BL32 in TZC-secured DRAM:**
+
+::
+
+                   DRAM
+    0xFFE00000 +----------+
+               |  BL32   |  (secure)
+    0xFF000000 |----------|
+               |          |
+               :          :  (non-secure)
+               |          |
+    0x80000000 +----------+
+
+                  Flash0
+    0x0C000000 +----------+
+               :          :
+    0x0BED0000 |----------|
+               | BL1 (ro) |
+    0x0BEC0000 |----------|
+               :          :
+    0x08000000 +----------+                  BL31 is loaded
+                                             after SCP_BL2 has
+               Trusted SRAM                  been sent to SCP
+    0x04040000 +----------+  loaded by BL2  ------------------
+               | BL1 (rw) |  <<<<<<<<<<<<<  |  BL31 NOBITS   |
+               |----------|  <<<<<<<<<<<<<  |----------------|
+               | SCP_BL2  |  <<<<<<<<<<<<<  | BL31 PROGBITS  |
+               |----------|                 ------------------
+               |   BL2    |
+               |----------|
+               |          |
+    0x04001000 +----------+
+               |   MHU    |
+    0x04000000 +----------+
+
+Firmware Image Package (FIP)
+----------------------------
+
+Using a Firmware Image Package (FIP) allows for packing bootloader images (and
+potentially other payloads) into a single archive that can be loaded by the ARM
+Trusted Firmware from non-volatile platform storage. A driver to load images
+from a FIP has been added to the storage layer and allows a package to be read
+from supported platform storage. A tool to create Firmware Image Packages is
+also provided and described below.
+
+Firmware Image Package layout
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The FIP layout consists of a table of contents (ToC) followed by payload data.
+The ToC itself has a header followed by one or more table entries. The ToC is
+terminated by an end marker entry. All ToC entries describe some payload data
+that has been appended to the end of the binary package. With the information
+provided in the ToC entry the corresponding payload data can be retrieved.
+
+::
+
+    ------------------
+    | ToC Header     |
+    |----------------|
+    | ToC Entry 0    |
+    |----------------|
+    | ToC Entry 1    |
+    |----------------|
+    | ToC End Marker |
+    |----------------|
+    |                |
+    |     Data 0     |
+    |                |
+    |----------------|
+    |                |
+    |     Data 1     |
+    |                |
+    ------------------
+
+The ToC header and entry formats are described in the header file
+``include/tools_share/firmware_image_package.h``. This file is used by both the
+tool and the ARM Trusted firmware.
+
+The ToC header has the following fields:
+
+::
+
+    `name`: The name of the ToC. This is currently used to validate the header.
+    `serial_number`: A non-zero number provided by the creation tool
+    `flags`: Flags associated with this data.
+        Bits 0-31: Reserved
+        Bits 32-47: Platform defined
+        Bits 48-63: Reserved
+
+A ToC entry has the following fields:
+
+::
+
+    `uuid`: All files are referred to by a pre-defined Universally Unique
+        IDentifier [UUID] . The UUIDs are defined in
+        `include/tools_share/firmware_image_package.h`. The platform translates
+        the requested image name into the corresponding UUID when accessing the
+        package.
+    `offset_address`: The offset address at which the corresponding payload data
+        can be found. The offset is calculated from the ToC base address.
+    `size`: The size of the corresponding payload data in bytes.
+    `flags`: Flags associated with this entry. None are yet defined.
+
+Firmware Image Package creation tool
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The FIP creation tool can be used to pack specified images into a binary package
+that can be loaded by the ARM Trusted Firmware from platform storage. The tool
+currently only supports packing bootloader images. Additional image definitions
+can be added to the tool as required.
+
+The tool can be found in ``tools/fiptool``.
+
+Loading from a Firmware Image Package (FIP)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The Firmware Image Package (FIP) driver can load images from a binary package on
+non-volatile platform storage. For the ARM development platforms, this is
+currently NOR FLASH.
+
+Bootloader images are loaded according to the platform policy as specified by
+the function ``plat_get_image_source()``. For the ARM development platforms, this
+means the platform will attempt to load images from a Firmware Image Package
+located at the start of NOR FLASH0.
+
+The ARM development platforms' policy is to only allow loading of a known set of
+images. The platform policy can be modified to allow additional images.
+
+Use of coherent memory in Trusted Firmware
+------------------------------------------
+
+There might be loss of coherency when physical memory with mismatched
+shareability, cacheability and memory attributes is accessed by multiple CPUs
+(refer to section B2.9 of `ARM ARM`_ for more details). This possibility occurs
+in Trusted Firmware during power up/down sequences when coherency, MMU and
+caches are turned on/off incrementally.
+
+Trusted Firmware defines coherent memory as a region of memory with Device
+nGnRE attributes in the translation tables. The translation granule size in
+Trusted Firmware is 4KB. This is the smallest possible size of the coherent
+memory region.
+
+By default, all data structures which are susceptible to accesses with
+mismatched attributes from various CPUs are allocated in a coherent memory
+region (refer to section 2.1 of `Porting Guide`_). The coherent memory region
+accesses are Outer Shareable, non-cacheable and they can be accessed
+with the Device nGnRE attributes when the MMU is turned on. Hence, at the
+expense of at least an extra page of memory, Trusted Firmware is able to work
+around coherency issues due to mismatched memory attributes.
+
+The alternative to the above approach is to allocate the susceptible data
+structures in Normal WriteBack WriteAllocate Inner shareable memory. This
+approach requires the data structures to be designed so that it is possible to
+work around the issue of mismatched memory attributes by performing software
+cache maintenance on them.
+
+Disabling the use of coherent memory in Trusted Firmware
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It might be desirable to avoid the cost of allocating coherent memory on
+platforms which are memory constrained. Trusted Firmware enables inclusion of
+coherent memory in firmware images through the build flag ``USE_COHERENT_MEM``.
+This flag is enabled by default. It can be disabled to choose the second
+approach described above.
+
+The below sections analyze the data structures allocated in the coherent memory
+region and the changes required to allocate them in normal memory.
+
+Coherent memory usage in PSCI implementation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``psci_non_cpu_pd_nodes`` data structure stores the platform's power domain
+tree information for state management of power domains. By default, this data
+structure is allocated in the coherent memory region in the Trusted Firmware
+because it can be accessed by multple CPUs, either with caches enabled or
+disabled.
+
+.. code:: c
+
+    typedef struct non_cpu_pwr_domain_node {
+        /*
+         * Index of the first CPU power domain node level 0 which has this node
+         * as its parent.
+         */
+        unsigned int cpu_start_idx;
+
+        /*
+         * Number of CPU power domains which are siblings of the domain indexed
+         * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx
+         * -> cpu_start_idx + ncpus' have this node as their parent.
+         */
+        unsigned int ncpus;
+
+        /*
+         * Index of the parent power domain node.
+         * TODO: Figure out whether to whether using pointer is more efficient.
+         */
+        unsigned int parent_node;
+
+        plat_local_state_t local_state;
+
+        unsigned char level;
+
+        /* For indexing the psci_lock array*/
+        unsigned char lock_index;
+    } non_cpu_pd_node_t;
+
+In order to move this data structure to normal memory, the use of each of its
+fields must be analyzed. Fields like ``cpu_start_idx``, ``ncpus``, ``parent_node``
+``level`` and ``lock_index`` are only written once during cold boot. Hence removing
+them from coherent memory involves only doing a clean and invalidate of the
+cache lines after these fields are written.
+
+The field ``local_state`` can be concurrently accessed by multiple CPUs in
+different cache states. A Lamport's Bakery lock ``psci_locks`` is used to ensure
+mutual exlusion to this field and a clean and invalidate is needed after it
+is written.
+
+Bakery lock data
+~~~~~~~~~~~~~~~~
+
+The bakery lock data structure ``bakery_lock_t`` is allocated in coherent memory
+and is accessed by multiple CPUs with mismatched attributes. ``bakery_lock_t`` is
+defined as follows:
+
+.. code:: c
+
+    typedef struct bakery_lock {
+        /*
+         * The lock_data is a bit-field of 2 members:
+         * Bit[0]       : choosing. This field is set when the CPU is
+         *                choosing its bakery number.
+         * Bits[1 - 15] : number. This is the bakery number allocated.
+         */
+        volatile uint16_t lock_data[BAKERY_LOCK_MAX_CPUS];
+    } bakery_lock_t;
+
+It is a characteristic of Lamport's Bakery algorithm that the volatile per-CPU
+fields can be read by all CPUs but only written to by the owning CPU.
+
+Depending upon the data cache line size, the per-CPU fields of the
+``bakery_lock_t`` structure for multiple CPUs may exist on a single cache line.
+These per-CPU fields can be read and written during lock contention by multiple
+CPUs with mismatched memory attributes. Since these fields are a part of the
+lock implementation, they do not have access to any other locking primitive to
+safeguard against the resulting coherency issues. As a result, simple software
+cache maintenance is not enough to allocate them in coherent memory. Consider
+the following example.
+
+CPU0 updates its per-CPU field with data cache enabled. This write updates a
+local cache line which contains a copy of the fields for other CPUs as well. Now
+CPU1 updates its per-CPU field of the ``bakery_lock_t`` structure with data cache
+disabled. CPU1 then issues a DCIVAC operation to invalidate any stale copies of
+its field in any other cache line in the system. This operation will invalidate
+the update made by CPU0 as well.
+
+To use bakery locks when ``USE_COHERENT_MEM`` is disabled, the lock data structure
+has been redesigned. The changes utilise the characteristic of Lamport's Bakery
+algorithm mentioned earlier. The bakery\_lock structure only allocates the memory
+for a single CPU. The macro ``DEFINE_BAKERY_LOCK`` allocates all the bakery locks
+needed for a CPU into a section ``bakery_lock``. The linker allocates the memory
+for other cores by using the total size allocated for the bakery\_lock section
+and multiplying it with (PLATFORM\_CORE\_COUNT - 1). This enables software to
+perform software cache maintenance on the lock data structure without running
+into coherency issues associated with mismatched attributes.
+
+The bakery lock data structure ``bakery_info_t`` is defined for use when
+``USE_COHERENT_MEM`` is disabled as follows:
+
+.. code:: c
+
+    typedef struct bakery_info {
+        /*
+         * The lock_data is a bit-field of 2 members:
+         * Bit[0]       : choosing. This field is set when the CPU is
+         *                choosing its bakery number.
+         * Bits[1 - 15] : number. This is the bakery number allocated.
+         */
+         volatile uint16_t lock_data;
+    } bakery_info_t;
+
+The ``bakery_info_t`` represents a single per-CPU field of one lock and
+the combination of corresponding ``bakery_info_t`` structures for all CPUs in the
+system represents the complete bakery lock. The view in memory for a system
+with n bakery locks are:
+
+::
+
+    bakery_lock section start
+    |----------------|
+    | `bakery_info_t`| <-- Lock_0 per-CPU field
+    |    Lock_0      |     for CPU0
+    |----------------|
+    | `bakery_info_t`| <-- Lock_1 per-CPU field
+    |    Lock_1      |     for CPU0
+    |----------------|
+    | ....           |
+    |----------------|
+    | `bakery_info_t`| <-- Lock_N per-CPU field
+    |    Lock_N      |     for CPU0
+    ------------------
+    |    XXXXX       |
+    | Padding to     |
+    | next Cache WB  | <--- Calculate PERCPU_BAKERY_LOCK_SIZE, allocate
+    |  Granule       |       continuous memory for remaining CPUs.
+    ------------------
+    | `bakery_info_t`| <-- Lock_0 per-CPU field
+    |    Lock_0      |     for CPU1
+    |----------------|
+    | `bakery_info_t`| <-- Lock_1 per-CPU field
+    |    Lock_1      |     for CPU1
+    |----------------|
+    | ....           |
+    |----------------|
+    | `bakery_info_t`| <-- Lock_N per-CPU field
+    |    Lock_N      |     for CPU1
+    ------------------
+    |    XXXXX       |
+    | Padding to     |
+    | next Cache WB  |
+    |  Granule       |
+    ------------------
+
+Consider a system of 2 CPUs with 'N' bakery locks as shown above. For an
+operation on Lock\_N, the corresponding ``bakery_info_t`` in both CPU0 and CPU1
+``bakery_lock`` section need to be fetched and appropriate cache operations need
+to be performed for each access.
+
+On ARM Platforms, bakery locks are used in psci (``psci_locks``) and power controller
+driver (``arm_lock``).
+
+Non Functional Impact of removing coherent memory
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Removal of the coherent memory region leads to the additional software overhead
+of performing cache maintenance for the affected data structures. However, since
+the memory where the data structures are allocated is cacheable, the overhead is
+mostly mitigated by an increase in performance.
+
+There is however a performance impact for bakery locks, due to:
+
+-  Additional cache maintenance operations, and
+-  Multiple cache line reads for each lock operation, since the bakery locks
+   for each CPU are distributed across different cache lines.
+
+The implementation has been optimized to minimize this additional overhead.
+Measurements indicate that when bakery locks are allocated in Normal memory, the
+minimum latency of acquiring a lock is on an average 3-4 micro seconds whereas
+in Device memory the same is 2 micro seconds. The measurements were done on the
+Juno ARM development platform.
+
+As mentioned earlier, almost a page of memory can be saved by disabling
+``USE_COHERENT_MEM``. Each platform needs to consider these trade-offs to decide
+whether coherent memory should be used. If a platform disables
+``USE_COHERENT_MEM`` and needs to use bakery locks in the porting layer, it can
+optionally define macro ``PLAT_PERCPU_BAKERY_LOCK_SIZE`` (see the
+`Porting Guide`_). Refer to the reference platform code for examples.
+
+Isolating code and read-only data on separate memory pages
+----------------------------------------------------------
+
+In the ARMv8 VMSA, translation table entries include fields that define the
+properties of the target memory region, such as its access permissions. The
+smallest unit of memory that can be addressed by a translation table entry is
+a memory page. Therefore, if software needs to set different permissions on two
+memory regions then it needs to map them using different memory pages.
+
+The default memory layout for each BL image is as follows:
+
+::
+
+       |        ...        |
+       +-------------------+
+       |  Read-write data  |
+       +-------------------+ Page boundary
+       |     <Padding>     |
+       +-------------------+
+       | Exception vectors |
+       +-------------------+ 2 KB boundary
+       |     <Padding>     |
+       +-------------------+
+       |  Read-only data   |
+       +-------------------+
+       |       Code        |
+       +-------------------+ BLx_BASE
+
+Note: The 2KB alignment for the exception vectors is an architectural
+requirement.
+
+The read-write data start on a new memory page so that they can be mapped with
+read-write permissions, whereas the code and read-only data below are configured
+as read-only.
+
+However, the read-only data are not aligned on a page boundary. They are
+contiguous to the code. Therefore, the end of the code section and the beginning
+of the read-only data one might share a memory page. This forces both to be
+mapped with the same memory attributes. As the code needs to be executable, this
+means that the read-only data stored on the same memory page as the code are
+executable as well. This could potentially be exploited as part of a security
+attack.
+
+TF provides the build flag ``SEPARATE_CODE_AND_RODATA`` to isolate the code and
+read-only data on separate memory pages. This in turn allows independent control
+of the access permissions for the code and read-only data. In this case,
+platform code gets a finer-grained view of the image layout and can
+appropriately map the code region as executable and the read-only data as
+execute-never.
+
+This has an impact on memory footprint, as padding bytes need to be introduced
+between the code and read-only data to ensure the segragation of the two. To
+limit the memory cost, this flag also changes the memory layout such that the
+code and exception vectors are now contiguous, like so:
+
+::
+
+       |        ...        |
+       +-------------------+
+       |  Read-write data  |
+       +-------------------+ Page boundary
+       |     <Padding>     |
+       +-------------------+
+       |  Read-only data   |
+       +-------------------+ Page boundary
+       |     <Padding>     |
+       +-------------------+
+       | Exception vectors |
+       +-------------------+ 2 KB boundary
+       |     <Padding>     |
+       +-------------------+
+       |       Code        |
+       +-------------------+ BLx_BASE
+
+With this more condensed memory layout, the separation of read-only data will
+add zero or one page to the memory footprint of each BL image. Each platform
+should consider the trade-off between memory footprint and security.
+
+This build flag is disabled by default, minimising memory footprint. On ARM
+platforms, it is enabled.
+
+Publish and Subscribe Framework
+-------------------------------
+
+The Publish and Subscribe Framework allows EL3 components to define and publish
+events, to which other EL3 components can subscribe.
+
+The following macros are provided by the framework:
+
+-  ``REGISTER_PUBSUB_EVENT(event)``: Defines an event, and takes one argument,
+   the event name, which must be a valid C identifier. All calls to
+   ``REGISTER_PUBSUB_EVENT`` macro must be placed in the file
+   ``pubsub_events.h``.
+
+-  ``PUBLISH_EVENT_ARG(event, arg)``: Publishes a defined event, by iterating
+   subscribed handlers and calling them in turn. The handlers will be passed the
+   parameter ``arg``. The expected use-case is to broadcast an event.
+
+-  ``PUBLISH_EVENT(event)``: Like ``PUBLISH_EVENT_ARG``, except that the value
+   ``NULL`` is passed to subscribed handlers.
+
+-  ``SUBSCRIBE_TO_EVENT(event, handler)``: Registers the ``handler`` to
+   subscribe to ``event``. The handler will be executed whenever the ``event``
+   is published.
+
+-  ``for_each_subscriber(event, subscriber)``: Iterates through all handlers
+   subscribed for ``event``. ``subscriber`` must be a local variable of type
+   ``pubsub_cb_t *``, and will point to each subscribed handler in turn during
+   iteration. This macro can be used for those patterns that none of the
+   ``PUBLISH_EVENT_*()`` macros cover.
+
+Publishing an event that wasn't defined using ``REGISTER_PUBSUB_EVENT`` will
+result in build error. Subscribing to an undefined event however won't.
+
+Subscribed handlers must be of type ``pubsub_cb_t``, with following function
+signature:
+
+::
+
+   typedef void* (*pubsub_cb_t)(const void *arg);
+
+There may be arbitrary number of handlers registered to the same event. The
+order in which subscribed handlers are notified when that event is published is
+not defined. Subscribed handlers may be executed in any order; handlers should
+not assume any relative ordering amongst them.
+
+Publishing an event on a PE will result in subscribed handlers executing on that
+PE only; it won't cause handlers to execute on a different PE.
+
+Note that publishing an event on a PE blocks until all the subscribed handlers
+finish executing on the PE.
+
+ARM Trusted Firmware generic code publishes and subscribes to some events
+within. Platform ports are discouraged from subscribing to them. These events
+may be withdrawn, renamed, or have their semantics altered in the future.
+Platforms may however register, publish, and subscribe to platform-specific
+events.
+
+Publish and Subscribe Example
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A publisher that wants to publish event ``foo`` would:
+
+-  Define the event ``foo`` in the ``pubsub_events.h``.
+
+   ::
+
+      REGISTER_PUBSUB_EVENT(foo);
+
+-  Depending on the nature of event, use one of ``PUBLISH_EVENT_*()`` macros to
+   publish the event at the appropriate path and time of execution.
+
+A subscriber that wants to subscribe to event ``foo`` published above would
+implement:
+
+::
+
+   void *foo_handler(const void *arg)
+   {
+        void *result;
+
+        /* Do handling ... */
+
+        return result;
+   }
+
+   SUBSCRIBE_TO_EVENT(foo, foo_handler);
+
+Performance Measurement Framework
+---------------------------------
+
+The Performance Measurement Framework (PMF) facilitates collection of
+timestamps by registered services and provides interfaces to retrieve
+them from within the ARM Trusted Firmware. A platform can choose to
+expose appropriate SMCs to retrieve these collected timestamps.
+
+By default, the global physical counter is used for the timestamp
+value and is read via ``CNTPCT_EL0``. The framework allows to retrieve
+timestamps captured by other CPUs.
+
+Timestamp identifier format
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A PMF timestamp is uniquely identified across the system via the
+timestamp ID or ``tid``. The ``tid`` is composed as follows:
+
+::
+
+    Bits 0-7: The local timestamp identifier.
+    Bits 8-9: Reserved.
+    Bits 10-15: The service identifier.
+    Bits 16-31: Reserved.
+
+#. The service identifier. Each PMF service is identified by a
+   service name and a service identifier. Both the service name and
+   identifier are unique within the system as a whole.
+
+#. The local timestamp identifier. This identifier is unique within a given
+   service.
+
+Registering a PMF service
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To register a PMF service, the ``PMF_REGISTER_SERVICE()`` macro from ``pmf.h``
+is used. The arguments required are the service name, the service ID,
+the total number of local timestamps to be captured and a set of flags.
+
+The ``flags`` field can be specified as a bitwise-OR of the following values:
+
+::
+
+    PMF_STORE_ENABLE: The timestamp is stored in memory for later retrieval.
+    PMF_DUMP_ENABLE: The timestamp is dumped on the serial console.
+
+The ``PMF_REGISTER_SERVICE()`` reserves memory to store captured
+timestamps in a PMF specific linker section at build time.
+Additionally, it defines necessary functions to capture and
+retrieve a particular timestamp for the given service at runtime.
+
+The macro ``PMF_REGISTER_SERVICE()`` only enables capturing PMF
+timestamps from within ARM Trusted Firmware. In order to retrieve
+timestamps from outside of ARM Trusted Firmware, the
+``PMF_REGISTER_SERVICE_SMC()`` macro must be used instead. This macro
+accepts the same set of arguments as the ``PMF_REGISTER_SERVICE()``
+macro but additionally supports retrieving timestamps using SMCs.
+
+Capturing a timestamp
+~~~~~~~~~~~~~~~~~~~~~
+
+PMF timestamps are stored in a per-service timestamp region. On a
+system with multiple CPUs, each timestamp is captured and stored
+in a per-CPU cache line aligned memory region.
+
+Having registered the service, the ``PMF_CAPTURE_TIMESTAMP()`` macro can be
+used to capture a timestamp at the location where it is used. The macro
+takes the service name, a local timestamp identifier and a flag as arguments.
+
+The ``flags`` field argument can be zero, or ``PMF_CACHE_MAINT`` which
+instructs PMF to do cache maintenance following the capture. Cache
+maintenance is required if any of the service's timestamps are captured
+with data cache disabled.
+
+To capture a timestamp in assembly code, the caller should use
+``pmf_calc_timestamp_addr`` macro (defined in ``pmf_asm_macros.S``) to
+calculate the address of where the timestamp would be stored. The
+caller should then read ``CNTPCT_EL0`` register to obtain the timestamp
+and store it at the determined address for later retrieval.
+
+Retrieving a timestamp
+~~~~~~~~~~~~~~~~~~~~~~
+
+From within ARM Trusted Firmware, timestamps for individual CPUs can
+be retrieved using either ``PMF_GET_TIMESTAMP_BY_MPIDR()`` or
+``PMF_GET_TIMESTAMP_BY_INDEX()`` macros. These macros accept the CPU's MPIDR
+value, or its ordinal position, respectively.
+
+From outside ARM Trusted Firmware, timestamps for individual CPUs can be
+retrieved by calling into ``pmf_smc_handler()``.
+
+.. code:: c
+
+    Interface : pmf_smc_handler()
+    Argument  : unsigned int smc_fid, u_register_t x1,
+                u_register_t x2, u_register_t x3,
+                u_register_t x4, void *cookie,
+                void *handle, u_register_t flags
+    Return    : uintptr_t
+
+    smc_fid: Holds the SMC identifier which is either `PMF_SMC_GET_TIMESTAMP_32`
+        when the caller of the SMC is running in AArch32 mode
+        or `PMF_SMC_GET_TIMESTAMP_64` when the caller is running in AArch64 mode.
+    x1: Timestamp identifier.
+    x2: The `mpidr` of the CPU for which the timestamp has to be retrieved.
+        This can be the `mpidr` of a different core to the one initiating
+        the SMC.  In that case, service specific cache maintenance may be
+        required to ensure the updated copy of the timestamp is returned.
+    x3: A flags value that is either 0 or `PMF_CACHE_MAINT`.  If
+        `PMF_CACHE_MAINT` is passed, then the PMF code will perform a
+        cache invalidate before reading the timestamp.  This ensures
+        an updated copy is returned.
+
+The remaining arguments, ``x4``, ``cookie``, ``handle`` and ``flags`` are unused
+in this implementation.
+
+PMF code structure
+~~~~~~~~~~~~~~~~~~
+
+#. ``pmf_main.c`` consists of core functions that implement service registration,
+   initialization, storing, dumping and retrieving timestamps.
+
+#. ``pmf_smc.c`` contains the SMC handling for registered PMF services.
+
+#. ``pmf.h`` contains the public interface to Performance Measurement Framework.
+
+#. ``pmf_asm_macros.S`` consists of macros to facilitate capturing timestamps in
+   assembly code.
+
+#. ``pmf_helpers.h`` is an internal header used by ``pmf.h``.
+
+ARMv8 Architecture Extensions
+-----------------------------
+
+ARM Trusted Firmware makes use of ARMv8 Architecture Extensions where
+applicable. This section lists the usage of Architecture Extensions, and build
+flags controlling them.
+
+In general, and unless individually mentioned, the build options
+``ARM_ARCH_MAJOR`` and ``ARM_ARCH_MINOR`` selects the Architecture Extension to
+target when building ARM Trusted Firmware. Subsequent ARM Architecture
+Extensions are backward compatible with previous versions.
+
+The build system only requires that ``ARM_ARCH_MAJOR`` and ``ARM_ARCH_MINOR`` have a
+valid numeric value. These build options only control whether or not
+Architecture Extension-specific code is included in the build. Otherwise, ARM
+Trusted Firmware targets the base ARMv8.0 architecture; i.e. as if
+``ARM_ARCH_MAJOR`` == 8 and ``ARM_ARCH_MINOR`` == 0, which are also their respective
+default values.
+
+See also the *Summary of build options* in `User Guide`_.
+
+For details on the Architecture Extension and available features, please refer
+to the respective Architecture Extension Supplement.
+
+ARMv8.1
+~~~~~~~
+
+This Architecture Extension is targeted when ``ARM_ARCH_MAJOR`` >= 8, or when
+``ARM_ARCH_MAJOR`` == 8 and ``ARM_ARCH_MINOR`` >= 1.
+
+-  The Compare and Swap instruction is used to implement spinlocks. Otherwise,
+   the load-/store-exclusive instruction pair is used.
+
+ARMv8.2
+~~~~~~~
+
+This Architecture Extension is targeted when ``ARM_ARCH_MAJOR`` == 8 and
+``ARM_ARCH_MINOR`` >= 2.
+
+-  The Common not Private (CnP) bit is enabled to indicate that multiple
+   Page Entries in the same Inner Shareable domain use the same translation
+   table entries for a given stage of translation for a particular translation
+   regime.
+
+Code Structure
+--------------
+
+Trusted Firmware code is logically divided between the three boot loader
+stages mentioned in the previous sections. The code is also divided into the
+following categories (present as directories in the source code):
+
+-  **Platform specific.** Choice of architecture specific code depends upon
+   the platform.
+-  **Common code.** This is platform and architecture agnostic code.
+-  **Library code.** This code comprises of functionality commonly used by all
+   other code. The PSCI implementation and other EL3 runtime frameworks reside
+   as Library components.
+-  **Stage specific.** Code specific to a boot stage.
+-  **Drivers.**
+-  **Services.** EL3 runtime services (eg: SPD). Specific SPD services
+   reside in the ``services/spd`` directory (e.g. ``services/spd/tspd``).
+
+Each boot loader stage uses code from one or more of the above mentioned
+categories. Based upon the above, the code layout looks like this:
+
+::
+
+    Directory    Used by BL1?    Used by BL2?    Used by BL31?
+    bl1          Yes             No              No
+    bl2          No              Yes             No
+    bl31         No              No              Yes
+    plat         Yes             Yes             Yes
+    drivers      Yes             No              Yes
+    common       Yes             Yes             Yes
+    lib          Yes             Yes             Yes
+    services     No              No              Yes
+
+The build system provides a non configurable build option IMAGE\_BLx for each
+boot loader stage (where x = BL stage). e.g. for BL1 , IMAGE\_BL1 will be
+defined by the build system. This enables the Trusted Firmware to compile
+certain code only for specific boot loader stages
+
+All assembler files have the ``.S`` extension. The linker source files for each
+boot stage have the extension ``.ld.S``. These are processed by GCC to create the
+linker scripts which have the extension ``.ld``.
+
+FDTs provide a description of the hardware platform and are used by the Linux
+kernel at boot time. These can be found in the ``fdts`` directory.
+
+References
+----------
+
+.. [#] Trusted Board Boot Requirements CLIENT PDD (ARM DEN0006C-1). Available
+       under NDA through your ARM account representative.
+.. [#] `Power State Coordination Interface PDD`_
+.. [#] `SMC Calling Convention PDD`_
+.. [#] `ARM Trusted Firmware Interrupt Management Design guide`_.
+
+--------------
+
+*Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.*
+
+.. _Reset Design: ./reset-design.rst
+.. _Porting Guide: ./porting-guide.rst
+.. _Firmware Update: ./firmware-update.rst
+.. _PSCI PDD: http://infocenter.arm.com/help/topic/com.arm.doc.den0022d/Power_State_Coordination_Interface_PDD_v1_1_DEN0022D.pdf
+.. _SMC calling convention PDD: http://infocenter.arm.com/help/topic/com.arm.doc.den0028b/ARM_DEN0028B_SMC_Calling_Convention.pdf
+.. _PSCI Library integration guide: ./psci-lib-integration-guide.rst
+.. _SMCCC: http://infocenter.arm.com/help/topic/com.arm.doc.den0028b/ARM_DEN0028B_SMC_Calling_Convention.pdf
+.. _PSCI: http://infocenter.arm.com/help/topic/com.arm.doc.den0022d/Power_State_Coordination_Interface_PDD_v1_1_DEN0022D.pdf
+.. _Power State Coordination Interface PDD: http://infocenter.arm.com/help/topic/com.arm.doc.den0022d/Power_State_Coordination_Interface_PDD_v1_1_DEN0022D.pdf
+.. _here: ./psci-lib-integration-guide.rst
+.. _cpu-specific-build-macros.rst: ./cpu-specific-build-macros.rst
+.. _CPUBM: ./cpu-specific-build-macros.rst
+.. _ARM ARM: http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0487a.e/index.html
+.. _User Guide: ./user-guide.rst
+.. _SMC Calling Convention PDD: http://infocenter.arm.com/help/topic/com.arm.doc.den0028b/ARM_DEN0028B_SMC_Calling_Convention.pdf
+.. _ARM Trusted Firmware Interrupt Management Design guide: ./interrupt-framework-design.rst
+.. _Xlat_tables design: xlat-tables-lib-v2-design.rst
+
+.. |Image 1| image:: diagrams/rt-svc-descs-layout.png?raw=true
diff --git a/docs/firmware-update.rst b/docs/firmware-update.rst
new file mode 100644
index 0000000..829341d
--- /dev/null
+++ b/docs/firmware-update.rst
@@ -0,0 +1,412 @@
+ARM Trusted Firmware - Firmware Update Design Guide
+===================================================
+
+
+.. section-numbering::
+    :suffix: .
+
+.. contents::
+
+--------------
+
+Introduction
+------------
+
+This document describes the design of the Firmware Update (FWU) feature, which
+enables authenticated firmware to update firmware images from external
+interfaces such as USB, UART, SD-eMMC, NAND, NOR or Ethernet to SoC Non-Volatile
+memories such as NAND Flash, LPPDR2-NVM or any memory determined by the
+platform. This feature functions even when the current firmware in the system
+is corrupt or missing; it therefore may be used as a recovery mode. It may also
+be complemented by other, higher level firmware update software.
+
+FWU implements a specific part of the Trusted Board Boot Requirements (TBBR)
+specification, ARM DEN0006C-1. It should be used in conjunction with the
+`Trusted Board Boot`_ design document, which describes the image authentication
+parts of the Trusted Firmware (TF) TBBR implementation.
+
+Scope
+~~~~~
+
+This document describes the secure world FWU design. It is beyond its scope to
+describe how normal world FWU images should operate. To implement normal world
+FWU images, please refer to the "Non-Trusted Firmware Updater" requirements in
+the TBBR.
+
+FWU Overview
+------------
+
+The FWU boot flow is primarily mediated by BL1. Since BL1 executes in ROM, and
+it is usually desirable to minimize the amount of ROM code, the design allows
+some parts of FWU to be implemented in other secure and normal world images.
+Platform code may choose which parts are implemented in which images but the
+general expectation is:
+
+-  BL1 handles:
+
+   -  Detection and initiation of the FWU boot flow.
+   -  Copying images from non-secure to secure memory
+   -  FWU image authentication
+   -  Context switching between the normal and secure world during the FWU
+      process.
+
+-  Other secure world FWU images handle platform initialization required by
+   the FWU process.
+-  Normal world FWU images handle loading of firmware images from external
+   interfaces to non-secure memory.
+
+The primary requirements of the FWU feature are:
+
+#. Export a BL1 SMC interface to interoperate with other FWU images executing
+   at other Exception Levels.
+#. Export a platform interface to provide FWU common code with the information
+   it needs, and to enable platform specific FWU functionality. See the
+   `Porting Guide`_ for details of this interface.
+
+TF uses abbreviated image terminology for FWU images like for other TF images.
+An overview of this terminology can be found `here`_.
+
+The following diagram shows the FWU boot flow for ARM development platforms.
+ARM CSS platforms like Juno have a System Control Processor (SCP), and these
+use all defined FWU images. Other platforms may use a subset of these.
+
+|Flow Diagram|
+
+Image Identification
+--------------------
+
+Each FWU image and certificate is identified by a unique ID, defined by the
+platform, which BL1 uses to fetch an image descriptor (``image_desc_t``) via a
+call to ``bl1_plat_get_image_desc()``. The same ID is also used to prepare the
+Chain of Trust (Refer to the `Authentication Framework Design`_
+for more information).
+
+The image descriptor includes the following information:
+
+-  Executable or non-executable image. This indicates whether the normal world
+   is permitted to request execution of a secure world FWU image (after
+   authentication). Secure world certificates and non-AP images are examples
+   of non-executable images.
+-  Secure or non-secure image. This indicates whether the image is
+   authenticated/executed in secure or non-secure memory.
+-  Image base address and size.
+-  Image entry point configuration (an ``entry_point_info_t``).
+-  FWU image state.
+
+BL1 uses the FWU image descriptors to:
+
+-  Validate the arguments of FWU SMCs
+-  Manage the state of the FWU process
+-  Initialize the execution state of the next FWU image.
+
+FWU State Machine
+-----------------
+
+BL1 maintains state for each FWU image during FWU execution. FWU images at lower
+Exception Levels raise SMCs to invoke FWU functionality in BL1, which causes
+BL1 to update its FWU image state. The BL1 image states and valid state
+transitions are shown in the diagram below. Note that secure images have a more
+complex state machine than non-secure images.
+
+|FWU state machine|
+
+The following is a brief description of the supported states:
+
+-  RESET: This is the initial state of every image at the start of FWU.
+   Authentication failure also leads to this state. A secure
+   image may yield to this state if it has completed execution.
+   It can also be reached by using ``FWU_SMC_IMAGE_RESET``.
+
+-  COPYING: This is the state of a secure image while BL1 is copying it
+   in blocks from non-secure to secure memory.
+
+-  COPIED: This is the state of a secure image when BL1 has completed
+   copying it to secure memory.
+
+-  AUTHENTICATED: This is the state of an image when BL1 has successfully
+   authenticated it.
+
+-  EXECUTED: This is the state of a secure, executable image when BL1 has
+   passed execution control to it.
+
+-  INTERRUPTED: This is the state of a secure, executable image after it has
+   requested BL1 to resume normal world execution.
+
+BL1 SMC Interface
+-----------------
+
+BL1\_SMC\_CALL\_COUNT
+~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Arguments:
+        uint32_t function ID : 0x0
+
+    Return:
+        uint32_t
+
+This SMC returns the number of SMCs supported by BL1.
+
+BL1\_SMC\_UID
+~~~~~~~~~~~~~
+
+::
+
+    Arguments:
+        uint32_t function ID : 0x1
+
+    Return:
+        UUID : 32 bits in each of w0-w3 (or r0-r3 for AArch32 callers)
+
+This SMC returns the 128-bit `Universally Unique Identifier`_ for the
+BL1 SMC service.
+
+BL1\_SMC\_VERSION
+~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument:
+        uint32_t function ID : 0x3
+
+    Return:
+        uint32_t : Bits [31:16] Major Version
+                   Bits [15:0] Minor Version
+
+This SMC returns the current version of the BL1 SMC service.
+
+BL1\_SMC\_RUN\_IMAGE
+~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Arguments:
+        uint32_t           function ID : 0x4
+        entry_point_info_t *ep_info
+
+    Return:
+        void
+
+    Pre-conditions:
+        if (normal world caller) synchronous exception
+        if (ep_info not EL3) synchronous exception
+
+This SMC passes execution control to an EL3 image described by the provided
+``entry_point_info_t`` structure. In the normal TF boot flow, BL2 invokes this SMC
+for BL1 to pass execution control to BL31.
+
+FWU\_SMC\_IMAGE\_COPY
+~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Arguments:
+        uint32_t     function ID : 0x10
+        unsigned int image_id
+        uintptr_t    image_addr
+        unsigned int block_size
+        unsigned int image_size
+
+    Return:
+        int : 0 (Success)
+            : -ENOMEM
+            : -EPERM
+
+    Pre-conditions:
+        if (image_id is invalid) return -EPERM
+        if (image_id is non-secure image) return -EPERM
+        if (image_id state is not (RESET or COPYING)) return -EPERM
+        if (secure world caller) return -EPERM
+        if (image_addr + block_size overflows) return -ENOMEM
+        if (image destination address + image_size overflows) return -ENOMEM
+        if (source block is in secure memory) return -ENOMEM
+        if (source block is not mapped into BL1) return -ENOMEM
+        if (image_size > free secure memory) return -ENOMEM
+        if (image overlaps another image) return -EPERM
+
+This SMC copies the secure image indicated by ``image_id`` from non-secure memory
+to secure memory for later authentication. The image may be copied in a single
+block or multiple blocks. In either case, the total size of the image must be
+provided in ``image_size`` when invoking this SMC for the first time for each
+image; it is ignored in subsequent calls (if any) for the same image.
+
+The ``image_addr`` and ``block_size`` specify the source memory block to copy from.
+The destination address is provided by the platform code.
+
+If ``block_size`` is greater than the amount of remaining bytes to copy for this
+image then the former is truncated to the latter. The copy operation is then
+considered as complete and the FWU state machine transitions to the "COPIED"
+state. If there is still more to copy, the FWU state machine stays in or
+transitions to the COPYING state (depending on the previous state).
+
+When using multiple blocks, the source blocks do not necessarily need to be in
+contiguous memory.
+
+Once the SMC is handled, BL1 returns from exception to the normal world caller.
+
+FWU\_SMC\_IMAGE\_AUTH
+~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Arguments:
+        uint32_t     function ID : 0x11
+        unsigned int image_id
+        uintptr_t    image_addr
+        unsigned int image_size
+
+    Return:
+        int : 0 (Success)
+            : -ENOMEM
+            : -EPERM
+            : -EAUTH
+
+    Pre-conditions:
+        if (image_id is invalid) return -EPERM
+        if (secure world caller)
+            if (image_id state is not RESET) return -EPERM
+            if (image_addr/image_size is not mappped into BL1) return -ENOMEM
+        else // normal world caller
+            if (image_id is secure image)
+                if (image_id state is not COPIED) return -EPERM
+            else // image_id is non-secure image
+                if (image_id state is not RESET) return -EPERM
+                if (image_addr/image_size is in secure memory) return -ENOMEM
+                if (image_addr/image_size not mappped into BL1) return -ENOMEM
+
+This SMC authenticates the image specified by ``image_id``. If the image is in the
+RESET state, BL1 authenticates the image in place using the provided
+``image_addr`` and ``image_size``. If the image is a secure image in the COPIED
+state, BL1 authenticates the image from the secure memory that BL1 previously
+copied the image into.
+
+BL1 returns from exception to the caller. If authentication succeeds then BL1
+sets the image state to AUTHENTICATED. If authentication fails then BL1 returns
+the -EAUTH error and sets the image state back to RESET.
+
+FWU\_SMC\_IMAGE\_EXECUTE
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Arguments:
+        uint32_t     function ID : 0x12
+        unsigned int image_id
+
+    Return:
+        int : 0 (Success)
+            : -EPERM
+
+    Pre-conditions:
+        if (image_id is invalid) return -EPERM
+        if (secure world caller) return -EPERM
+        if (image_id is non-secure image) return -EPERM
+        if (image_id is non-executable image) return -EPERM
+        if (image_id state is not AUTHENTICATED) return -EPERM
+
+This SMC initiates execution of a previously authenticated image specified by
+``image_id``, in the other security world to the caller. The current
+implementation only supports normal world callers initiating execution of a
+secure world image.
+
+BL1 saves the normal world caller's context, sets the secure image state to
+EXECUTED, and returns from exception to the secure image.
+
+FWU\_SMC\_IMAGE\_RESUME
+~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Arguments:
+        uint32_t   function ID : 0x13
+        register_t image_param
+
+    Return:
+        register_t : image_param (Success)
+                   : -EPERM
+
+    Pre-conditions:
+        if (normal world caller and no INTERRUPTED secure image) return -EPERM
+
+This SMC resumes execution in the other security world while there is a secure
+image in the EXECUTED/INTERRUPTED state.
+
+For normal world callers, BL1 sets the previously interrupted secure image state
+to EXECUTED. For secure world callers, BL1 sets the previously executing secure
+image state to INTERRUPTED. In either case, BL1 saves the calling world's
+context, restores the resuming world's context and returns from exception into
+the resuming world. If the call is successful then the caller provided
+``image_param`` is returned to the resumed world, otherwise an error code is
+returned to the caller.
+
+FWU\_SMC\_SEC\_IMAGE\_DONE
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Arguments:
+        uint32_t function ID : 0x14
+
+    Return:
+        int : 0 (Success)
+            : -EPERM
+
+    Pre-conditions:
+        if (normal world caller) return -EPERM
+
+This SMC indicates completion of a previously executing secure image.
+
+BL1 sets the previously executing secure image state to the RESET state,
+restores the normal world context and returns from exception into the normal
+world.
+
+FWU\_SMC\_UPDATE\_DONE
+~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Arguments:
+        uint32_t   function ID : 0x15
+        register_t client_cookie
+
+    Return:
+        N/A
+
+This SMC completes the firmware update process. BL1 calls the platform specific
+function ``bl1_plat_fwu_done``, passing the optional argument ``client_cookie`` as
+a ``void *``. The SMC does not return.
+
+FWU\_SMC\_IMAGE\_RESET
+~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Arguments:
+        uint32_t     function ID : 0x16
+        unsigned int image_id
+
+    Return:
+        int : 0 (Success)
+            : -EPERM
+
+    Pre-conditions:
+        if (secure world caller) return -EPERM
+        if (image in EXECUTED) return -EPERM
+
+This SMC sets the state of an image to RESET and zeroes the memory used by it.
+
+This is only allowed if the image is not being executed.
+
+--------------
+
+*Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.*
+
+.. _Trusted Board Boot: ./trusted-board-boot.rst
+.. _Porting Guide: ./porting-guide.rst
+.. _here: https://github.com/ARM-software/arm-trusted-firmware/wiki/ARM-Trusted-Firmware-Image-Terminology
+.. _Authentication Framework Design: ./auth-framework.rst
+.. _Universally Unique Identifier: https://tools.ietf.org/rfc/rfc4122.txt
+
+.. |Flow Diagram| image:: diagrams/fwu_flow.png?raw=true
+.. |FWU state machine| image:: diagrams/fwu_states.png?raw=true
diff --git a/docs/interrupt-framework-design.md b/docs/interrupt-framework-design.md
deleted file mode 100644
index ff001b1..0000000
--- a/docs/interrupt-framework-design.md
+++ /dev/null
@@ -1,848 +0,0 @@
-ARM Trusted Firmware Interrupt Management Design guide
-======================================================
-
-Contents :
-
-1.  [Introduction](#1-introduction)
-    *   [Assumptions](#11-assumptions)
-    *   [Concepts](#12-concepts)
-        -   [Interrupt Types](#121-interrupt-types)
-        -   [Routing Model](#122-routing-model)
-        -   [Valid Routing Models](#123-valid-routing-models)
-            +   [Secure-EL1 Interrupts](#1231-secure-el1-interrupts)
-            +   [Non-secure Interrupts](#1232-non-secure-interrupts)
-        -   [Mapping of Interrupt Type to Signal](#124-mapping-of-interrupt-type-to-signal)
-
-2.  [Interrupt Management](#2-interrupt-management)
-    *   [Software Components](#21-software-components)
-    *   [Interrupt Registration](#22-interrupt-registration)
-        -   [EL3 Runtime Firmware](#221-el3-runtime-firmware)
-        -   [Secure Payload Dispatcher](#222-secure-payload-dispatcher)
-            +   [Test Secure Payload Dispatcher behavior](#2221-test-secure-payload-dispatcher-behavior)
-        -   [Secure Payload](#223-secure-payload)
-            +   [Secure Payload IHF design w.r.t Secure-EL1 interrupts](#2231-secure-payload-ihf-design-wrt-secure-el1-interrupts)
-            +   [Secure Payload IHF design w.r.t Non-secure interrupts](#2232-secure-payload-ihf-design-wrt-non-secure-interrupts)
-            +   [Test Secure Payload behavior](#2233-test-secure-payload-behavior)
-    *   [Interrupt Handling](#23-interrupt-handling)
-        -   [EL3 Runtime Firmware](#231-el3-runtime-firmware)
-        -   [Secure Payload Dispatcher](#232-secure-payload-dispatcher)
-            +   [Interrupt Entry](#2321-interrupt-entry)
-            +   [Interrupt Exit](#2322-interrupt-exit)
-            +   [Test Secure Payload Dispatcher behavior](#2323-test-secure-payload-dispatcher-behavior)
-        -   [Secure Payload](#233-secure-payload)
-            +   [Test Secure Payload behavior](#2331-test-secure-payload-behavior)
-
-
-1. Introduction
-----------------
-This document describes the design of the Interrupt management framework in ARM
-Trusted Firmware. This section briefly describes the requirements from this
-framework. It also briefly explains some concepts and assumptions. They will
-help in understanding the implementation of the framework explained in
-subsequent sections.
-
-This framework is responsible for managing interrupts routed to EL3. It also
-allows EL3 software to configure the interrupt routing behavior. Its main
-objective is to implement the following two requirements.
-
-1.  It should be possible to route interrupts meant to be handled by secure
-    software (Secure interrupts) to EL3, when execution is in non-secure state
-    (normal world). The framework should then take care of handing control of
-    the interrupt to either software in EL3 or Secure-EL1 depending upon the
-    software configuration and the GIC implementation. This requirement ensures
-    that secure interrupts are under the control of the secure software with
-    respect to their delivery and handling without the possibility of
-    intervention from non-secure software.
-
-2.  It should be possible to route interrupts meant to be handled by
-    non-secure software (Non-secure interrupts) to the last executed exception
-    level in the normal world when the execution is in secure world at
-    exception levels lower than EL3. This could be done with or without the
-    knowledge of software executing in Secure-EL1/Secure-EL0. The choice of
-    approach should be governed by the secure software. This requirement
-    ensures that non-secure software is able to execute in tandem with the
-    secure software without overriding it.
-
-### 1.1 Assumptions
-The framework makes the following assumptions to simplify its implementation.
-
-1.  All secure interrupts are handled in Secure-EL1. They can be delivered to
-    Secure-EL1 via EL3 but they cannot be handled in EL3. It will be possible
-    to extend the framework to handle secure interrupts in EL3 in the future.
-
-2.  Interrupt exceptions (`PSTATE.I` and `F` bits) are masked during execution
-    in EL3.
-
-### 1.2 Concepts
-
-#### 1.2.1 Interrupt types
-The framework categorises an interrupt to be one of the following depending upon
-the exception level(s) it is handled in.
-
-1.  Secure EL1 interrupt. This type of interrupt can be routed to EL3 or
-    Secure-EL1 depending upon the security state of the current execution
-    context. It is always handled in Secure-EL1.
-
-2.  Non-secure interrupt. This type of interrupt can be routed to EL3,
-    Secure-EL1, Non-secure EL1 or EL2 depending upon the security state of the
-    current execution context. It is always handled in either Non-secure EL1
-    or EL2.
-
-3.  EL3 interrupt. This type of interrupt can be routed to EL3 or Secure-EL1
-    depending upon the security state of the current execution context. It is
-    always handled in EL3.
-
-In the current implementation of the framework, all secure interrupts are
-treated as Secure EL1 interrupts. It will be possible for EL3 software to
-configure a secure interrupt as an EL3 interrupt in future implementations.  The
-following constants define the various interrupt types in the framework
-implementation.
-
-    #define INTR_TYPE_S_EL1      0
-    #define INTR_TYPE_EL3        1
-    #define INTR_TYPE_NS         2
-
-
-#### 1.2.2 Routing model
-A type of interrupt can be either generated as an FIQ or an IRQ. The target
-exception level of an interrupt type is configured through the FIQ and IRQ bits
-in the Secure Configuration Register at EL3 (`SCR_EL3.FIQ` and `SCR_EL3.IRQ`
-bits). When `SCR_EL3.FIQ`=1, FIQs are routed to EL3. Otherwise they are routed
-to the First Exception Level (FEL) capable of handling interrupts. When
-`SCR_EL3.IRQ`=1, IRQs are routed to EL3. Otherwise they are routed to the
-FEL. This register is configured independently by EL3 software for each security
-state prior to entry into a lower exception level in that security state.
-
-A routing model for a type of interrupt (generated as FIQ or IRQ) is defined as
-its target exception level for each security state. It is represented by a
-single bit for each security state. A value of `0` means that the interrupt
-should be routed to the FEL. A value of `1` means that the interrupt should be
-routed to EL3. A routing model is applicable only when execution is not in EL3.
-
-The default routing model for an interrupt type is to route it to the FEL in
-either security state.
-
-#### 1.2.3 Valid routing models
-The framework considers certain routing models for each type of interrupt to be
-incorrect as they conflict with the requirements mentioned in Section 1. The
-following sub-sections describe all the possible routing models and specify
-which ones are valid or invalid. Only the Secure-EL1 and Non-secure interrupt
-types are considered as EL3 interrupts are currently unsupported (See 1.1). The
-terminology used in the following sub-sections is explained below.
-
-1.  __CSS__. Current Security State. `0` when secure and `1` when non-secure
-
-2.  __TEL3__. Target Exception Level 3. `0` when targeted to the FEL. `1` when
-    targeted to EL3.
-
-
-##### 1.2.3.1 Secure-EL1 interrupts
-
-1.  __CSS=0, TEL3=0__. Interrupt is routed to the FEL when execution is in
-    secure state. This is a valid routing model as secure software is in
-    control of handling secure interrupts.
-
-2.  __CSS=0, TEL3=1__. Interrupt is routed to EL3 when execution is in secure
-    state. This is a valid routing model as secure software in EL3 can
-    handover the interrupt to Secure-EL1 for handling.
-
-3.  __CSS=1, TEL3=0__. Interrupt is routed to the FEL when execution is in
-    non-secure state. This is an invalid routing model as a secure interrupt
-    is not visible to the secure software which violates the motivation behind
-    the ARM Security Extensions.
-
-4.  __CSS=1, TEL3=1__. Interrupt is routed to EL3 when execution is in
-    non-secure state. This is a valid routing model as secure software in EL3
-    can handover the interrupt to Secure-EL1 for handling.
-
-
-##### 1.2.3.2 Non-secure interrupts
-
-1.  __CSS=0, TEL3=0__. Interrupt is routed to the FEL when execution is in
-    secure state. This allows the secure software to trap non-secure
-    interrupts, perform its bookeeping and hand the interrupt to the
-    non-secure software through EL3. This is a valid routing model as secure
-    software is in control of how its execution is pre-empted by non-secure
-    interrupts.
-
-2.  __CSS=0, TEL3=1__. Interrupt is routed to EL3 when execution is in secure
-    state. This is a valid routing model as secure software in EL3 can save
-    the state of software in Secure-EL1/Secure-EL0 before handing the
-    interrupt to non-secure software. This model requires additional
-    coordination between Secure-EL1 and EL3 software to ensure that the
-    former's state is correctly saved by the latter.
-
-3.  __CSS=1, TEL3=0__. Interrupt is routed to FEL when execution is in
-    non-secure state. This is an valid routing model as a non-secure interrupt
-    is handled by non-secure software.
-
-4.   __CSS=1, TEL3=1__. Interrupt is routed to EL3 when execution is in
-    non-secure state. This is an invalid routing model as there is no valid
-    reason to route the interrupt to EL3 software and then hand it back to
-    non-secure software for handling.
-
-
-#### 1.2.4 Mapping of interrupt type to signal
-The framework is meant to work with any interrupt controller implemented by a
-platform. A interrupt controller could generate a type of interrupt as either an
-FIQ or IRQ signal to the CPU depending upon the current security state.The
-mapping between the type and signal is known only to the platform. The framework
-uses this information to determine whether the IRQ or the FIQ bit should be
-programmed in `SCR_EL3` while applying the routing model for a type of
-interrupt. The platform provides this information through the
-`plat_interrupt_type_to_line()` API (described in the [Porting
-Guide]). For example, on the FVP port when the platform uses an ARM GICv2
-interrupt controller, Secure-EL1 interrupts are signalled through the FIQ signal
-while Non-secure interrupts are signalled through the IRQ signal. This applies
-when execution is in either security state.
-
-
-2. Interrupt management
------------------------
-The following sections describe how interrupts are managed by the interrupt
-handling framework. This entails:
-
-1.  Providing an interface to allow registration of a handler and specification
-    of the routing model for a type of interrupt.
-
-2.  Implementing support to hand control of an interrupt type to its registered
-    handler when the interrupt is generated.
-
-Both aspects of interrupt management involve various components in the secure
-software stack spanning from EL3 to Secure-EL1. These components are described
-in the section 2.1. The framework stores information associated with each type
-of interrupt in the following data structure.
-
-```
-typedef struct intr_type_desc {
-        interrupt_type_handler_t handler;
-        uint32_t flags;
-        uint32_t scr_el3[2];
-} intr_type_desc_t;
-```
-
-The `flags` field stores the routing model for the interrupt type in
-bits[1:0]. Bit[0] stores the routing model when execution is in the secure
-state. Bit[1] stores the routing model when execution is in the non-secure
-state. As mentioned in Section 1.2.2, a value of `0` implies that the interrupt
-should be targeted to the FEL. A value of `1` implies that it should be targeted
-to EL3. The remaining bits are reserved and SBZ. The helper macro
-`set_interrupt_rm_flag()` should be used to set the bits in the `flags`
-parameter.
-
-The `scr_el3[2]` field also stores the routing model but as a mapping of the
-model in the `flags` field to the corresponding bit in the `SCR_EL3` for each
-security state.
-
-The framework also depends upon the platform port to configure the interrupt
-controller to distinguish between secure and non-secure interrupts. The platform
-is expected to be aware of the secure devices present in the system and their
-associated interrupt numbers. It should configure the interrupt controller to
-enable the secure interrupts, ensure that their priority is always higher than
-the non-secure interrupts and target them to the primary CPU. It should also
-export the interface described in the [Porting Guide] to enable
-handling of interrupts.
-
-In the remainder of this document, for the sake of simplicity it is assumed that
-the FIQ signal is used to generate Secure-EL1 interrupts and the IRQ signal is
-used to generate non-secure interrupts in either security state.
-
-### 2.1 Software components
-Roles and responsibilities for interrupt management are sub-divided between the
-following components of software running in EL3 and Secure-EL1. Each component is
-briefly described below.
-
-1.  EL3 Runtime Firmware. This component is common to all ports of the ARM
-    Trusted Firmware.
-
-2.  Secure Payload Dispatcher (SPD) service. This service interfaces with the
-    Secure Payload (SP) software which runs in exception levels lower than EL3
-    i.e. Secure-EL1/Secure-EL0. It is responsible for switching execution
-    between software running in secure and non-secure states at exception
-    levels lower than EL3. A switch is triggered by a Secure Monitor Call from
-    either state. It uses the APIs exported by the Context management library
-    to implement this functionality. Switching execution between the two
-    security states is a requirement for interrupt management as well. This
-    results in a significant dependency on the SPD service. ARM Trusted
-    firmware implements an example Test Secure Payload Dispatcher (TSPD)
-    service.
-
-    An SPD service plugs into the EL3 runtime firmware and could be common to
-    some ports of the ARM Trusted Firmware.
-
-3.  Secure Payload (SP). On a production system, the Secure Payload corresponds
-    to a Secure OS which runs in Secure-EL1/Secure-EL0. It interfaces with the
-    SPD service to manage communication with non-secure software. ARM Trusted
-    Firmware implements an example secure payload called Test Secure Payload
-    (TSP) which runs only in Secure-EL1.
-
-    A Secure payload implementation could be common to some ports of the ARM
-    Trusted Firmware just like the SPD service.
-
-
-### 2.2 Interrupt registration
-This section describes in detail the role of each software component (see 2.1)
-during the registration of a handler for an interrupt type.
-
-
-#### 2.2.1 EL3 runtime firmware
-This component declares the following prototype for a handler of an interrupt type.
-
-        typedef uint64_t (*interrupt_type_handler_t)(uint32_t id,
-					     uint32_t flags,
-					     void *handle,
-					     void *cookie);
-
-The value of the `id` parameter depends upon the definition of the
-`IMF_READ_INTERRUPT_ID` build time flag. When the flag is defined, `id` contains
-the number of the highest priority pending interrupt of the type that this
-handler was registered for. When the flag is not defined `id` contains
-`INTR_ID_UNAVAILABLE`.
-
-The `flags` parameter contains miscellaneous information as follows.
-
-1.  Security state, bit[0]. This bit indicates the security state of the lower
-    exception level when the interrupt was generated. A value of `1` means
-    that it was in the non-secure state. A value of `0` indicates that it was
-    in the secure state. This bit can be used by the handler to ensure that
-    interrupt was generated and routed as per the routing model specified
-    during registration.
-
-2.  Reserved, bits[31:1]. The remaining bits are reserved for future use.
-
-The `handle` parameter points to the `cpu_context` structure of the current CPU
-for the security state specified in the `flags` parameter.
-
-Once the handler routine completes, execution will return to either the secure
-or non-secure state. The handler routine should return a pointer to
-`cpu_context` structure of the current CPU for the the target security state. It
-should treat all error conditions as critical errors and take appropriate action
-within its implementation e.g. use assertion failures.
-
-The runtime firmware provides the following API for registering a handler for a
-particular type of interrupt. A Secure Payload Dispatcher service should use
-this API to register a handler for Secure-EL1 and optionally for non-secure
-interrupts. This API also requires the caller to specify the routing model for
-the type of interrupt.
-
-    int32_t register_interrupt_type_handler(uint32_t type,
-					interrupt_type_handler handler,
-					uint64_t flags);
-
-
-The `type` parameter can be one of the three interrupt types listed above i.e.
-`INTR_TYPE_S_EL1`, `INTR_TYPE_NS` & `INTR_TYPE_EL3` (currently unimplemented).
-The `flags` parameter is as described in Section 2.
-
-The function will return `0` upon a successful registration. It will return
-`-EALREADY` in case a handler for the interrupt type has already been
-registered.  If the `type` is unrecognised or the `flags` or the `handler` are
-invalid it will return `-EINVAL`. It will return `-ENOTSUP` if the specified
-`type` is not supported by the framework i.e. `INTR_TYPE_EL3`.
-
-Interrupt routing is governed by the configuration of the `SCR_EL3.FIQ/IRQ` bits
-prior to entry into a lower exception level in either security state. The
-context management library maintains a copy of the `SCR_EL3` system register for
-each security state in the `cpu_context` structure of each CPU. It exports the
-following APIs to let EL3 Runtime Firmware program and retrieve the routing
-model for each security state for the current CPU. The value of `SCR_EL3` stored
-in the `cpu_context` is used by the `el3_exit()` function to program the
-`SCR_EL3` register prior to returning from the EL3 exception level.
-
-        uint32_t cm_get_scr_el3(uint32_t security_state);
-        void cm_write_scr_el3_bit(uint32_t security_state,
-                                  uint32_t bit_pos,
-                                  uint32_t value);
-
-`cm_get_scr_el3()` returns the value of the `SCR_EL3` register for the specified
-security state of the current CPU. `cm_write_scr_el3()` writes a `0` or `1` to
-the bit specified by `bit_pos`. `register_interrupt_type_handler()` invokes
-`set_routing_model()` API which programs the `SCR_EL3` according to the routing
-model using the `cm_get_scr_el3()` and `cm_write_scr_el3_bit()` APIs.
-
-It is worth noting that in the current implementation of the framework, the EL3
-runtime firmware is responsible for programming the routing model. The SPD is
-responsible for ensuring that the routing model has been adhered to upon
-receiving an interrupt.
-
-#### 2.2.2 Secure payload dispatcher
-A SPD service is responsible for determining and maintaining the interrupt
-routing model supported by itself and the Secure Payload. It is also responsible
-for ferrying interrupts between secure and non-secure software depending upon
-the routing model. It could determine the routing model at build time or at
-runtime. It must use this information to register a handler for each interrupt
-type using the `register_interrupt_type_handler()` API in EL3 runtime firmware.
-
-If the routing model is not known to the SPD service at build time, then it must
-be provided by the SP as the result of its initialisation. The SPD should
-program the routing model only after SP initialisation has completed e.g. in the
-SPD initialisation function pointed to by the `bl32_init` variable.
-
-The SPD should determine the mechanism to pass control to the Secure Payload
-after receiving an interrupt from the EL3 runtime firmware. This information
-could either be provided to the SPD service at build time or by the SP at
-runtime.
-
-#### 2.2.2.1 Test secure payload dispatcher behavior
-The TSPD only handles Secure-EL1 interrupts and is provided with the following
-routing model at build time.
-
-*   Secure-EL1 interrupts are routed to EL3 when execution is in non-secure
-    state and are routed to the FEL when execution is in the secure state
-    i.e __CSS=0, TEL3=0__ & __CSS=1, TEL3=1__ for Secure-EL1 interrupts
-
-*   The default routing model is used for non-secure interrupts i.e they are
-    routed to the FEL in either security state i.e __CSS=0, TEL3=0__ &
-    __CSS=1, TEL3=0__ for Non-secure interrupts
-
-It performs the following actions in the `tspd_init()` function to fulfill the
-requirements mentioned earlier.
-
-1.  It passes control to the Test Secure Payload to perform its
-    initialisation. The TSP provides the address of the vector table
-    `tsp_vectors` in the SP which also includes the handler for Secure-EL1
-    interrupts in the `fiq_entry` field. The TSPD passes control to the TSP at
-    this address when it receives a Secure-EL1 interrupt.
-
-    The handover agreement between the TSP and the TSPD requires that the TSPD
-    masks all interrupts (`PSTATE.DAIF` bits) when it calls
-    `tsp_fiq_entry()`. The TSP has to preserve the callee saved general
-    purpose, SP_EL1/Secure-EL0, LR, VFP and system registers. It can use
-    `x0-x18` to enable its C runtime.
-
-2.  The TSPD implements a handler function for Secure-EL1 interrupts. It
-    registers it with the EL3 runtime firmware using the
-    `register_interrupt_type_handler()` API as follows
-
-        /* Forward declaration */
-        interrupt_type_handler tspd_secure_el1_interrupt_handler;
-        int32_t rc, flags = 0;
-        set_interrupt_rm_flag(flags, NON_SECURE);
-        rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
-                                         tspd_secure_el1_interrupt_handler,
-                                         flags);
-        assert(rc == 0);
-
-#### 2.2.3 Secure payload
-A Secure Payload must implement an interrupt handling framework at Secure-EL1
-(Secure-EL1 IHF) to support its chosen interrupt routing model. Secure payload
-execution will alternate between the below cases.
-
-1.  In the code where IRQ, FIQ or both interrupts are enabled, if an interrupt
-    type is targeted to the FEL, then it will be routed to the Secure-EL1
-    exception vector table. This is defined as the asynchronous model of
-    handling interrupts. This mode applies to both Secure-EL1 and non-secure
-    interrupts.
-
-2.  In the code where both interrupts are disabled, if an interrupt type is
-    targeted to the FEL, then execution will eventually migrate to the
-    non-secure state. Any non-secure interrupts will be handled as described
-    in the routing model where __CSS=1 and TEL3=0__. Secure-EL1 interrupts
-    will be routed to EL3 (as per the routing model where __CSS=1 and
-    TEL3=1__) where the SPD service will hand them to the SP. This is defined
-    as the synchronous mode of handling interrupts.
-
-The interrupt handling framework implemented by the SP should support one or
-both these interrupt handling models depending upon the chosen routing model.
-
-The following list briefly describes how the choice of a valid routing model
-(See 1.2.3) effects the implementation of the Secure-EL1 IHF. If the choice of
-the interrupt routing model is not known to the SPD service at compile time,
-then the SP should pass this information to the SPD service at runtime during
-its initialisation phase.
-
-As mentioned earlier, it is assumed that the FIQ signal is used to generate
-Secure-EL1 interrupts and the IRQ signal is used to generate non-secure
-interrupts in either security state.
-
-##### 2.2.3.1 Secure payload IHF design w.r.t secure-EL1 interrupts
-1.  __CSS=0, TEL3=0__. If `PSTATE.F=0`, Secure-EL1 interrupts will be
-    trigerred at one of the Secure-EL1 FIQ exception vectors. The Secure-EL1
-    IHF should implement support for handling FIQ interrupts asynchronously.
-
-    If `PSTATE.F=1` then Secure-EL1 interrupts will be handled as per the
-    synchronous interrupt handling model. The SP could implement this scenario
-    by exporting a seperate entrypoint for Secure-EL1 interrupts to the SPD
-    service during the registration phase. The SPD service would also need to
-    know the state of the system, general purpose and the `PSTATE` registers
-    in which it should arrange to return execution to the SP. The SP should
-    provide this information in an implementation defined way during the
-    registration phase if it is not known to the SPD service at build time.
-
-2.  __CSS=1, TEL3=1__. Interrupts are routed to EL3 when execution is in
-    non-secure state. They should be handled through the synchronous interrupt
-    handling model as described in 1. above.
-
-3.  __CSS=0, TEL3=1__. Secure interrupts are routed to EL3 when execution is in
-    secure state. They will not be visible to the SP. The `PSTATE.F` bit in
-    Secure-EL1/Secure-EL0 will not mask FIQs. The EL3 runtime firmware will
-    call the handler registered by the SPD service for Secure-EL1
-    interrupts. Secure-EL1 IHF should then handle all Secure-EL1 interrupt
-    through the synchronous interrupt handling model described in 1. above.
-
-
-##### 2.2.3.2 Secure payload IHF design w.r.t non-secure interrupts
-1.  __CSS=0, TEL3=0__. If `PSTATE.I=0`, non-secure interrupts will be
-    trigerred at one of the Secure-EL1 IRQ exception vectors . The Secure-EL1
-    IHF should co-ordinate with the SPD service to transfer execution to the
-    non-secure state where the interrupt should be handled e.g the SP could
-    allocate a function identifier to issue a SMC64 or SMC32 to the SPD
-    service which indicates that the SP execution has been pre-empted by a
-    non-secure interrupt. If this function identifier is not known to the SPD
-    service at compile time then the SP could provide it during the
-    registration phase.
-
-    If `PSTATE.I=1` then the non-secure interrupt will pend until execution
-    resumes in the non-secure state.
-
-2.  __CSS=0, TEL3=1__.  Non-secure interrupts are routed to EL3. They will not
-    be visible to the SP. The `PSTATE.I` bit in Secure-EL1/Secure-EL0 will
-    have not effect. The SPD service should register a non-secure interrupt
-    handler which should save the SP state correctly and resume execution in
-    the non-secure state where the interrupt will be handled. The Secure-EL1
-    IHF does not need to take any action.
-
-3.  __CSS=1, TEL3=0__.  Non-secure interrupts are handled in the FEL in
-    non-secure state (EL1/EL2) and are not visible to the SP. This routing
-    model does not affect the SP behavior.
-
-
-A Secure Payload must also ensure that all Secure-EL1 interrupts are correctly
-configured at the interrupt controller by the platform port of the EL3 runtime
-firmware. It should configure any additional Secure-EL1 interrupts which the EL3
-runtime firmware is not aware of through its platform port.
-
-#### 2.2.3.3 Test secure payload behavior
-The routing model for Secure-EL1 and non-secure interrupts chosen by the TSP is
-described in Section 2.2.2. It is known to the TSPD service at build time.
-
-The TSP implements an entrypoint (`tsp_fiq_entry()`) for handling Secure-EL1
-interrupts taken in non-secure state and routed through the TSPD service
-(synchronous handling model). It passes the reference to this entrypoint via
-`tsp_vectors` to the TSPD service.
-
-The TSP also replaces the default exception vector table referenced through the
-`early_exceptions` variable, with a vector table capable of handling FIQ and IRQ
-exceptions taken at the same (Secure-EL1) exception level. This table is
-referenced through the `tsp_exceptions` variable and programmed into the
-VBAR_EL1. It caters for the asynchronous handling model.
-
-The TSP also programs the Secure Physical Timer in the ARM Generic Timer block
-to raise a periodic interrupt (every half a second) for the purpose of testing
-interrupt management across all the software components listed in 2.1
-
-
-### 2.3 Interrupt handling
-This section describes in detail the role of each software component (see
-Section 2.1) in handling an interrupt of a particular type.
-
-#### 2.3.1 EL3 runtime firmware
-The EL3 runtime firmware populates the IRQ and FIQ exception vectors referenced
-by the `runtime_exceptions` variable as follows.
-
-1.  IRQ and FIQ exceptions taken from the current exception level with
-    `SP_EL0` or `SP_EL3` are reported as irrecoverable error conditions. As
-    mentioned earlier, EL3 runtime firmware always executes with the
-    `PSTATE.I` and `PSTATE.F` bits set.
-
-2.  The following text describes how the IRQ and FIQ exceptions taken from a
-    lower exception level using AArch64 or AArch32 are handled.
-
-When an interrupt is generated, the vector for each interrupt type is
-responsible for:
-
-1.  Saving the entire general purpose register context (x0-x30) immediately
-    upon exception entry. The registers are saved in the per-cpu `cpu_context`
-    data structure referenced by the `SP_EL3`register.
-
-2.  Saving the `ELR_EL3`, `SP_EL0` and `SPSR_EL3` system registers in the
-    per-cpu `cpu_context` data structure referenced by the `SP_EL3` register.
-
-3.  Switching to the C runtime stack by restoring the `CTX_RUNTIME_SP` value
-    from the per-cpu `cpu_context` data structure in `SP_EL0` and
-    executing the `msr spsel, #0` instruction.
-
-4.  Determining the type of interrupt. Secure-EL1 interrupts will be signalled
-    at the FIQ vector. Non-secure interrupts will be signalled at the IRQ
-    vector. The platform should implement the following API to determine the
-    type of the pending interrupt.
-
-        uint32_t plat_ic_get_interrupt_type(void);
-
-    It should return either `INTR_TYPE_S_EL1` or `INTR_TYPE_NS`.
-
-5.  Determining the handler for the type of interrupt that has been generated.
-    The following API has been added for this purpose.
-
-        interrupt_type_handler get_interrupt_type_handler(uint32_t interrupt_type);
-
-    It returns the reference to the registered handler for this interrupt
-    type. The `handler` is retrieved from the `intr_type_desc_t` structure as
-    described in Section 2. `NULL` is returned if no handler has been
-    registered for this type of interrupt. This scenario is reported as an
-    irrecoverable error condition.
-
-6.  Calling the registered handler function for the interrupt type generated.
-    The firmware also determines the interrupt id if the IMF_READ_INTERRUPT_ID
-    build time flag is set. The id is set to `INTR_ID_UNAVAILABLE` if the flag
-    is not set. The id along with the current security state and a reference to
-    the `cpu_context_t` structure for the current security state are passed to
-    the handler function as its arguments.
-
-    The handler function returns a reference to the per-cpu `cpu_context_t`
-    structure for the target security state.
-
-7.  Calling `el3_exit()` to return from EL3 into a lower exception level in
-    the security state determined by the handler routine. The `el3_exit()`
-    function is responsible for restoring the register context from the
-    `cpu_context_t` data structure for the target security state.
-
-
-#### 2.3.2 Secure payload dispatcher
-
-##### 2.3.2.1 Interrupt entry
-The SPD service begins handling an interrupt when the EL3 runtime firmware calls
-the handler function for that type of interrupt. The SPD service is responsible
-for the following:
-
-1.  Validating the interrupt. This involves ensuring that the interrupt was
-    generating according to the interrupt routing model specified by the SPD
-    service during registration. It should use the interrupt id and the
-    security state of the exception level (passed in the `flags` parameter of
-    the handler) where the interrupt was taken from to determine this. If the
-    interrupt is not recognised then the handler should treat it as an
-    irrecoverable error condition.
-
-    A SPD service can register a handler for Secure-EL1 and/or Non-secure
-    interrupts. The following text describes further error scenarios keeping
-    this in mind:
-
-    1.  __SPD service has registered a handler for Non-secure interrupts__:
-        When an interrupt is received by the handler, it could check its id
-        to ensure it has been configured as a non-secure interrupt at the
-        interrupt controller. A secure interrupt should never be handed to
-        the non-secure interrupt handler. A non-secure interrupt should
-        never be routed to EL3 when execution is in non-secure state. The
-        handler could check the security state flag to ensure this.
-
-    2.  __SPD service has registered a handler for Secure-EL1 interrupts__:
-        When an interrupt is received by the handler, it could check its id
-        to ensure it has been configured as a secure interrupt at the
-        interrupt controller. A non-secure interrupt should never be handed
-        to the secure interrupt handler. If the routing model chosen is such
-        that Secure-EL1 interrupts are not routed to EL3 when execution is
-        in non-secure state, then a Secure-EL1 interrupt generated in the
-        secure state would be invalid. The handler could use the security
-        state flag to check this.
-
-    The SPD service should use the platform API:
-    `plat_ic_get_interrupt_type()` to determine the type of interrupt for the
-    specified id.
-
-2.  Determining whether the security state of the exception level for handling
-    the interrupt is the same as the security state of the exception level
-    where the interrupt was generated. This depends upon the routing model and
-    type of the interrupt. The SPD should use this information to determine if
-    a context switch is required. The following two cases would require a
-    context switch from secure to non-secure or vice-versa.
-
-    1.  A Secure-EL1 interrupt taken from the non-secure state should be
-        routed to the Secure Payload.
-
-    2.  A non-secure interrupt taken from the secure state should be routed
-        to the last known non-secure exception level.
-
-    The SPD service must save the system register context of the current
-    security state. It must then restore the system register context of the
-    target security state. It should use the `cm_set_next_eret_context()` API
-    to ensure that the next `cpu_context` to be restored is of the target
-    security state.
-
-    If the target state is secure then execution should be handed to the SP as
-    per the synchronous interrupt handling model it implements. A Secure-EL1
-    interrupt can be routed to EL3 while execution is in the SP. This implies
-    that SP execution can be preempted while handling an interrupt by a
-    another higher priority Secure-EL1 interrupt (or a EL3 interrupt in the
-    future). The SPD service should manage secure interrupt priorities before
-    handing control to the SP to prevent this type of preemption which can
-    leave the system in an inconsistent state.
-
-3.  Setting the return value of the handler to the per-cpu `cpu_context` if
-    the interrupt has been successfully validated and ready to be handled at a
-    lower exception level.
-
-The routing model allows non-secure interrupts to be taken to Secure-EL1 when in
-secure state. The SPD service and the SP should implement a mechanism for
-routing these interrupts to the last known exception level in the non-secure
-state. The former should save the SP context, restore the non-secure context and
-arrange for entry into the non-secure state so that the interrupt can be
-handled.
-
-##### 2.3.2.2 Interrupt exit
-When the Secure Payload has finished handling a Secure-EL1 interrupt, it could
-return control back to the SPD service through a SMC32 or SMC64. The SPD service
-should handle this secure monitor call so that execution resumes in the
-exception level and the security state from where the Secure-EL1 interrupt was
-originally taken.
-
-##### 2.3.2.3 Test secure payload dispatcher behavior
-The example TSPD service registers a handler for Secure-EL1 interrupts taken
-from the non-secure state. Its handler `tspd_secure_el1_interrupt_handler()`
-takes the following actions upon being invoked.
-
-1.  It uses the `id` parameter to query the interrupt controller to ensure
-    that the interrupt is a Secure-EL1 interrupt. It asserts if this is not
-    the case.
-
-2.  It uses the security state provided in the `flags` parameter to ensure
-    that the secure interrupt originated from the non-secure state. It asserts
-    if this is not the case.
-
-3.  It saves the system register context for the non-secure state by calling
-    `cm_el1_sysregs_context_save(NON_SECURE);`.
-
-4.  It sets the `ELR_EL3` system register to `tsp_fiq_entry` and sets the
-    `SPSR_EL3.DAIF` bits in the secure CPU context. It sets `x0` to
-    `TSP_HANDLE_FIQ_AND_RETURN`. If the TSP was in the middle of handling a
-    standard SMC, then the `ELR_EL3` and `SPSR_EL3` registers in the secure CPU
-    context are saved first.
-
-5.  It restores the system register context for the secure state by calling
-    `cm_el1_sysregs_context_restore(SECURE);`.
-
-6.  It ensures that the secure CPU context is used to program the next
-    exception return from EL3 by calling `cm_set_next_eret_context(SECURE);`.
-
-7.  It returns the per-cpu `cpu_context` to indicate that the interrupt can
-    now be handled by the SP. `x1` is written with the value of `elr_el3`
-    register for the non-secure state. This information is used by the SP for
-    debugging purposes.
-
-The figure below describes how the interrupt handling is implemented by the TSPD
-when a Secure-EL1 interrupt is generated when execution is in the non-secure
-state.
-
-![Image 1](diagrams/sec-int-handling.png?raw=true)
-
-The TSP issues an SMC with `TSP_HANDLED_S_EL1_FIQ` as the function identifier to
-signal completion of interrupt handling.
-
-The TSP issues an SMC with `TSP_PREEMPTED` as the function identifier to signal
-generation of a non-secure interrupt in Secure-EL1.
-
-The TSPD service takes the following actions in `tspd_smc_handler()` function
-upon receiving an SMC with `TSP_HANDLED_S_EL1_FIQ` and `TSP_PREEMPTED` as the
-function identifiers:
-
-1.  It ensures that the call originated from the secure state otherwise
-    execution returns to the non-secure state with `SMC_UNK` in `x0`.
-
-2.  If the function identifier is `TSP_HANDLED_S_EL1_FIQ`, it restores the
-    saved `ELR_EL3` and `SPSR_EL3` system registers back to the secure CPU
-    context (see step 4 above) in case the TSP had been preempted by a non
-    secure interrupt earlier.  It does not save the secure context since the
-    TSP is expected to preserve it (see Section 2.2.2.1)
-
-3.  If the function identifier is `TSP_PREEMPTED`, it saves the system
-    register context for the secure state by calling
-    `cm_el1_sysregs_context_save(SECURE)`.
-
-4.  It restores the system register context for the non-secure state by
-    calling `cm_el1_sysregs_context_restore(NON_SECURE)`. It sets `x0` to
-    `SMC_PREEMPTED` if the incoming function identifier is
-    `TSP_PREEMPTED`. The Normal World is expected to resume the TSP after the
-    non-secure interrupt handling by issuing an SMC with `TSP_FID_RESUME` as
-    the function identifier.
-
-5.  It ensures that the non-secure CPU context is used to program the next
-    exception return from EL3 by calling
-    `cm_set_next_eret_context(NON_SECURE)`.
-
-6.  `tspd_smc_handler()` returns a reference to the non-secure `cpu_context`
-    as the return value.
-
-As mentioned in 4. above, if a non-secure interrupt preempts the TSP execution
-then the non-secure software issues an SMC with `TSP_FID_RESUME` as the function
-identifier to resume TSP execution. The TSPD service takes the following actions
-in `tspd_smc_handler()` function upon receiving this SMC:
-
-1.  It ensures that the call originated from the non secure state. An
-    assertion is raised otherwise.
-
-2.  Checks whether the TSP needs a resume i.e check if it was preempted. It
-    then saves the system register context for the secure state by calling
-    `cm_el1_sysregs_context_save(NON_SECURE)`.
-
-3.  Restores the secure context by calling
-    `cm_el1_sysregs_context_restore(SECURE)`
-
-4.  It ensures that the secure CPU context is used to program the next
-    exception return from EL3 by calling `cm_set_next_eret_context(SECURE)`.
-
-5.  `tspd_smc_handler()` returns a reference to the secure `cpu_context` as the
-    return value.
-
-The figure below describes how the TSP/TSPD handle a non-secure interrupt when
-it is generated during execution in the TSP with `PSTATE.I` = 0.
-
-![Image 2](diagrams/non-sec-int-handling.png?raw=true)
-
-
-#### 2.3.3 Secure payload
-The SP should implement one or both of the synchronous and asynchronous
-interrupt handling models depending upon the interrupt routing model it has
-chosen (as described in 2.2.3).
-
-In the synchronous model, it should begin handling a Secure-EL1 interrupt after
-receiving control from the SPD service at an entrypoint agreed upon during build
-time or during the registration phase. Before handling the interrupt, the SP
-should save any Secure-EL1 system register context which is needed for resuming
-normal execution in the SP later e.g. `SPSR_EL1, `ELR_EL1`. After handling the
-interrupt, the SP could return control back to the exception level and security
-state where the interrupt was originally taken from. The SP should use an SMC32
-or SMC64 to ask the SPD service to do this.
-
-In the asynchronous model, the Secure Payload is responsible for handling
-non-secure and Secure-EL1 interrupts at the IRQ and FIQ vectors in its exception
-vector table when `PSTATE.I` and `PSTATE.F` bits are 0. As described earlier,
-when a non-secure interrupt is generated, the SP should coordinate with the SPD
-service to pass control back to the non-secure state in the last known exception
-level. This will allow the non-secure interrupt to be handled in the non-secure
-state.
-
-##### 2.3.3.1 Test secure payload behavior
-The TSPD hands control of a Secure-EL1 interrupt to the TSP at the
-`tsp_fiq_entry()`.  The TSP handles the interrupt while ensuring that the
-handover agreement described in Section 2.2.2.1 is maintained. It updates some
-statistics by calling `tsp_update_sync_fiq_stats()`. It then calls
-`tsp_fiq_handler()` which.
-
-1.  Checks whether the interrupt is the secure physical timer interrupt. It
-    uses the platform API `plat_ic_get_pending_interrupt_id()` to get the
-    interrupt number.
-
-2.   Handles the interrupt by acknowledging it using the
-    `plat_ic_acknowledge_interrupt()` platform API, calling
-    `tsp_generic_timer_handler()` to reprogram the secure physical generic
-    timer and calling the `plat_ic_end_of_interrupt()` platform API to signal
-    end of interrupt processing.
-
-The TSP passes control back to the TSPD by issuing an SMC64 with
-`TSP_HANDLED_S_EL1_FIQ` as the function identifier.
-
-The TSP handles interrupts under the asynchronous model as follows.
-
-1.  Secure-EL1 interrupts are handled by calling the `tsp_fiq_handler()`
-    function. The function has been described above.
-
-2.  Non-secure interrupts are handled by issuing an SMC64 with `TSP_PREEMPTED`
-    as the function identifier. Execution resumes at the instruction that
-    follows this SMC instruction when the TSPD hands control to the TSP in
-    response to an SMC with `TSP_FID_RESUME` as the function identifier from
-    the non-secure state (see section 2.3.2.1).
-
-- - - - - - - - - - - - - - - - - - - - - - - - - -
-
-_Copyright (c) 2014, ARM Limited and Contributors. All rights reserved._
-
-[Porting Guide]:             ./porting-guide.md
diff --git a/docs/interrupt-framework-design.rst b/docs/interrupt-framework-design.rst
new file mode 100644
index 0000000..940bc24
--- /dev/null
+++ b/docs/interrupt-framework-design.rst
@@ -0,0 +1,1003 @@
+ARM Trusted Firmware Interrupt Management Design guide
+======================================================
+
+
+.. section-numbering::
+    :suffix: .
+
+.. contents::
+
+This framework is responsible for managing interrupts routed to EL3. It also
+allows EL3 software to configure the interrupt routing behavior. Its main
+objective is to implement the following two requirements.
+
+#. It should be possible to route interrupts meant to be handled by secure
+   software (Secure interrupts) to EL3, when execution is in non-secure state
+   (normal world). The framework should then take care of handing control of
+   the interrupt to either software in EL3 or Secure-EL1 depending upon the
+   software configuration and the GIC implementation. This requirement ensures
+   that secure interrupts are under the control of the secure software with
+   respect to their delivery and handling without the possibility of
+   intervention from non-secure software.
+
+#. It should be possible to route interrupts meant to be handled by
+   non-secure software (Non-secure interrupts) to the last executed exception
+   level in the normal world when the execution is in secure world at
+   exception levels lower than EL3. This could be done with or without the
+   knowledge of software executing in Secure-EL1/Secure-EL0. The choice of
+   approach should be governed by the secure software. This requirement
+   ensures that non-secure software is able to execute in tandem with the
+   secure software without overriding it.
+
+Concepts
+--------
+
+Interrupt types
+~~~~~~~~~~~~~~~
+
+The framework categorises an interrupt to be one of the following depending upon
+the exception level(s) it is handled in.
+
+#. Secure EL1 interrupt. This type of interrupt can be routed to EL3 or
+   Secure-EL1 depending upon the security state of the current execution
+   context. It is always handled in Secure-EL1.
+
+#. Non-secure interrupt. This type of interrupt can be routed to EL3,
+   Secure-EL1, Non-secure EL1 or EL2 depending upon the security state of the
+   current execution context. It is always handled in either Non-secure EL1
+   or EL2.
+
+#. EL3 interrupt. This type of interrupt can be routed to EL3 or Secure-EL1
+   depending upon the security state of the current execution context. It is
+   always handled in EL3.
+
+The following constants define the various interrupt types in the framework
+implementation.
+
+::
+
+    #define INTR_TYPE_S_EL1      0
+    #define INTR_TYPE_EL3        1
+    #define INTR_TYPE_NS         2
+
+Routing model
+~~~~~~~~~~~~~
+
+A type of interrupt can be either generated as an FIQ or an IRQ. The target
+exception level of an interrupt type is configured through the FIQ and IRQ bits
+in the Secure Configuration Register at EL3 (``SCR_EL3.FIQ`` and ``SCR_EL3.IRQ``
+bits). When ``SCR_EL3.FIQ``\ =1, FIQs are routed to EL3. Otherwise they are routed
+to the First Exception Level (FEL) capable of handling interrupts. When
+``SCR_EL3.IRQ``\ =1, IRQs are routed to EL3. Otherwise they are routed to the
+FEL. This register is configured independently by EL3 software for each security
+state prior to entry into a lower exception level in that security state.
+
+A routing model for a type of interrupt (generated as FIQ or IRQ) is defined as
+its target exception level for each security state. It is represented by a
+single bit for each security state. A value of ``0`` means that the interrupt
+should be routed to the FEL. A value of ``1`` means that the interrupt should be
+routed to EL3. A routing model is applicable only when execution is not in EL3.
+
+The default routing model for an interrupt type is to route it to the FEL in
+either security state.
+
+Valid routing models
+~~~~~~~~~~~~~~~~~~~~
+
+The framework considers certain routing models for each type of interrupt to be
+incorrect as they conflict with the requirements mentioned in Section 1. The
+following sub-sections describe all the possible routing models and specify
+which ones are valid or invalid. EL3 interrupts are currently supported only
+for GIC version 3.0 (ARM GICv3) and only the Secure-EL1 and Non-secure interrupt
+types are supported for GIC version 2.0 (ARM GICv2) (See 1.2). The terminology
+used in the following sub-sections is explained below.
+
+#. **CSS**. Current Security State. ``0`` when secure and ``1`` when non-secure
+
+#. **TEL3**. Target Exception Level 3. ``0`` when targeted to the FEL. ``1`` when
+   targeted to EL3.
+
+Secure-EL1 interrupts
+^^^^^^^^^^^^^^^^^^^^^
+
+#. **CSS=0, TEL3=0**. Interrupt is routed to the FEL when execution is in
+   secure state. This is a valid routing model as secure software is in
+   control of handling secure interrupts.
+
+#. **CSS=0, TEL3=1**. Interrupt is routed to EL3 when execution is in secure
+   state. This is a valid routing model as secure software in EL3 can
+   handover the interrupt to Secure-EL1 for handling.
+
+#. **CSS=1, TEL3=0**. Interrupt is routed to the FEL when execution is in
+   non-secure state. This is an invalid routing model as a secure interrupt
+   is not visible to the secure software which violates the motivation behind
+   the ARM Security Extensions.
+
+#. **CSS=1, TEL3=1**. Interrupt is routed to EL3 when execution is in
+   non-secure state. This is a valid routing model as secure software in EL3
+   can handover the interrupt to Secure-EL1 for handling.
+
+Non-secure interrupts
+^^^^^^^^^^^^^^^^^^^^^
+
+#. **CSS=0, TEL3=0**. Interrupt is routed to the FEL when execution is in
+   secure state. This allows the secure software to trap non-secure
+   interrupts, perform its book-keeping and hand the interrupt to the
+   non-secure software through EL3. This is a valid routing model as secure
+   software is in control of how its execution is preempted by non-secure
+   interrupts.
+
+#. **CSS=0, TEL3=1**. Interrupt is routed to EL3 when execution is in secure
+   state. This is a valid routing model as secure software in EL3 can save
+   the state of software in Secure-EL1/Secure-EL0 before handing the
+   interrupt to non-secure software. This model requires additional
+   coordination between Secure-EL1 and EL3 software to ensure that the
+   former's state is correctly saved by the latter.
+
+#. **CSS=1, TEL3=0**. Interrupt is routed to FEL when execution is in
+   non-secure state. This is an valid routing model as a non-secure interrupt
+   is handled by non-secure software.
+
+#. **CSS=1, TEL3=1**. Interrupt is routed to EL3 when execution is in
+   non-secure state. This is an invalid routing model as there is no valid
+   reason to route the interrupt to EL3 software and then hand it back to
+   non-secure software for handling.
+
+EL3 interrupts
+^^^^^^^^^^^^^^
+
+#. **CSS=0, TEL3=0**. Interrupt is routed to the FEL when execution is in
+   Secure-EL1/Secure-EL0. This is a valid routing model as secure software
+   in Secure-EL1/Secure-EL0 is in control of how its execution is preempted
+   by EL3 interrupt and can handover the interrupt to EL3 for handling.
+
+#. **CSS=0, TEL3=1**. Interrupt is routed to EL3 when execution is in
+   Secure-EL1/Secure-EL0. This is a valid routing model as secure software
+   in EL3 can handle the interrupt.
+
+#. **CSS=1, TEL3=0**. Interrupt is routed to the FEL when execution is in
+   non-secure state. This is an invalid routing model as a secure interrupt
+   is not visible to the secure software which violates the motivation behind
+   the ARM Security Extensions.
+
+#. **CSS=1, TEL3=1**. Interrupt is routed to EL3 when execution is in
+   non-secure state. This is a valid routing model as secure software in EL3
+   can handle the interrupt.
+
+Mapping of interrupt type to signal
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The framework is meant to work with any interrupt controller implemented by a
+platform. A interrupt controller could generate a type of interrupt as either an
+FIQ or IRQ signal to the CPU depending upon the current security state. The
+mapping between the type and signal is known only to the platform. The framework
+uses this information to determine whether the IRQ or the FIQ bit should be
+programmed in ``SCR_EL3`` while applying the routing model for a type of
+interrupt. The platform provides this information through the
+``plat_interrupt_type_to_line()`` API (described in the
+`Porting Guide`_). For example, on the FVP port when the platform uses an ARM GICv2
+interrupt controller, Secure-EL1 interrupts are signaled through the FIQ signal
+while Non-secure interrupts are signaled through the IRQ signal. This applies
+when execution is in either security state.
+
+Effect of mapping of several interrupt types to one signal
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+It should be noted that if more than one interrupt type maps to a single
+interrupt signal, and if any one of the interrupt type sets **TEL3=1** for a
+particular security state, then interrupt signal will be routed to EL3 when in
+that security state. This means that all the other interrupt types using the
+same interrupt signal will be forced to the same routing model. This should be
+borne in mind when choosing the routing model for an interrupt type.
+
+For example, in ARM GICv3, when the execution context is Secure-EL1/
+Secure-EL0, both the EL3 and the non secure interrupt types map to the FIQ
+signal. So if either one of the interrupt type sets the routing model so
+that **TEL3=1** when **CSS=0**, the FIQ bit in ``SCR_EL3`` will be programmed to
+route the FIQ signal to EL3 when executing in Secure-EL1/Secure-EL0, thereby
+effectively routing the other interrupt type also to EL3.
+
+Assumptions in Interrupt Management Framework
+---------------------------------------------
+
+The framework makes the following assumptions to simplify its implementation.
+
+#. Although the framework has support for 2 types of secure interrupts (EL3
+   and Secure-EL1 interrupt), only interrupt controller architectures
+   like ARM GICv3 has architectural support for EL3 interrupts in the form of
+   Group 0 interrupts. In ARM GICv2, all secure interrupts are assumed to be
+   handled in Secure-EL1. They can be delivered to Secure-EL1 via EL3 but they
+   cannot be handled in EL3.
+
+#. Interrupt exceptions (``PSTATE.I`` and ``F`` bits) are masked during execution
+   in EL3.
+
+#. .. rubric:: Interrupt management
+      :name: interrupt-management
+
+   The following sections describe how interrupts are managed by the interrupt
+   handling framework. This entails:
+
+#. Providing an interface to allow registration of a handler and specification
+   of the routing model for a type of interrupt.
+
+#. Implementing support to hand control of an interrupt type to its registered
+   handler when the interrupt is generated.
+
+Both aspects of interrupt management involve various components in the secure
+software stack spanning from EL3 to Secure-EL1. These components are described
+in the section 2.1. The framework stores information associated with each type
+of interrupt in the following data structure.
+
+.. code:: c
+
+    typedef struct intr_type_desc {
+            interrupt_type_handler_t handler;
+            uint32_t flags;
+            uint32_t scr_el3[2];
+    } intr_type_desc_t;
+
+The ``flags`` field stores the routing model for the interrupt type in
+bits[1:0]. Bit[0] stores the routing model when execution is in the secure
+state. Bit[1] stores the routing model when execution is in the non-secure
+state. As mentioned in Section 1.2.2, a value of ``0`` implies that the interrupt
+should be targeted to the FEL. A value of ``1`` implies that it should be targeted
+to EL3. The remaining bits are reserved and SBZ. The helper macro
+``set_interrupt_rm_flag()`` should be used to set the bits in the ``flags``
+parameter.
+
+The ``scr_el3[2]`` field also stores the routing model but as a mapping of the
+model in the ``flags`` field to the corresponding bit in the ``SCR_EL3`` for each
+security state.
+
+The framework also depends upon the platform port to configure the interrupt
+controller to distinguish between secure and non-secure interrupts. The platform
+is expected to be aware of the secure devices present in the system and their
+associated interrupt numbers. It should configure the interrupt controller to
+enable the secure interrupts, ensure that their priority is always higher than
+the non-secure interrupts and target them to the primary CPU. It should also
+export the interface described in the `Porting Guide`_ to enable
+handling of interrupts.
+
+In the remainder of this document, for the sake of simplicity a ARM GICv2 system
+is considered and it is assumed that the FIQ signal is used to generate Secure-EL1
+interrupts and the IRQ signal is used to generate non-secure interrupts in either
+security state. EL3 interrupts are not considered.
+
+Software components
+-------------------
+
+Roles and responsibilities for interrupt management are sub-divided between the
+following components of software running in EL3 and Secure-EL1. Each component is
+briefly described below.
+
+#. EL3 Runtime Firmware. This component is common to all ports of the ARM
+   Trusted Firmware.
+
+#. Secure Payload Dispatcher (SPD) service. This service interfaces with the
+   Secure Payload (SP) software which runs in Secure-EL1/Secure-EL0 and is
+   responsible for switching execution between secure and non-secure states.
+   A switch is triggered by a Secure Monitor Call and it uses the APIs
+   exported by the Context management library to implement this functionality.
+   Switching execution between the two security states is a requirement for
+   interrupt management as well. This results in a significant dependency on
+   the SPD service. ARM Trusted firmware implements an example Test Secure
+   Payload Dispatcher (TSPD) service.
+
+   An SPD service plugs into the EL3 runtime firmware and could be common to
+   some ports of the ARM Trusted Firmware.
+
+#. Secure Payload (SP). On a production system, the Secure Payload corresponds
+   to a Secure OS which runs in Secure-EL1/Secure-EL0. It interfaces with the
+   SPD service to manage communication with non-secure software. ARM Trusted
+   Firmware implements an example secure payload called Test Secure Payload
+   (TSP) which runs only in Secure-EL1.
+
+   A Secure payload implementation could be common to some ports of the ARM
+   Trusted Firmware just like the SPD service.
+
+Interrupt registration
+----------------------
+
+This section describes in detail the role of each software component (see 2.1)
+during the registration of a handler for an interrupt type.
+
+EL3 runtime firmware
+~~~~~~~~~~~~~~~~~~~~
+
+This component declares the following prototype for a handler of an interrupt type.
+
+.. code:: c
+
+        typedef uint64_t (*interrupt_type_handler_t)(uint32_t id,
+                                                     uint32_t flags,
+                                                     void *handle,
+                                                     void *cookie);
+
+The ``id`` is parameter is reserved and could be used in the future for passing
+the interrupt id of the highest pending interrupt only if there is a foolproof
+way of determining the id. Currently it contains ``INTR_ID_UNAVAILABLE``.
+
+The ``flags`` parameter contains miscellaneous information as follows.
+
+#. Security state, bit[0]. This bit indicates the security state of the lower
+   exception level when the interrupt was generated. A value of ``1`` means
+   that it was in the non-secure state. A value of ``0`` indicates that it was
+   in the secure state. This bit can be used by the handler to ensure that
+   interrupt was generated and routed as per the routing model specified
+   during registration.
+
+#. Reserved, bits[31:1]. The remaining bits are reserved for future use.
+
+The ``handle`` parameter points to the ``cpu_context`` structure of the current CPU
+for the security state specified in the ``flags`` parameter.
+
+Once the handler routine completes, execution will return to either the secure
+or non-secure state. The handler routine must return a pointer to
+``cpu_context`` structure of the current CPU for the target security state. On
+AArch64, this return value is currently ignored by the caller as the
+appropriate ``cpu_context`` to be used is expected to be set by the handler
+via the context management library APIs.
+A portable interrupt handler implementation must set the target context both in
+the structure pointed to by the returned pointer and via the context management
+library APIs. The handler should treat all error conditions as critical errors
+and take appropriate action within its implementation e.g. use assertion
+failures.
+
+The runtime firmware provides the following API for registering a handler for a
+particular type of interrupt. A Secure Payload Dispatcher service should use
+this API to register a handler for Secure-EL1 and optionally for non-secure
+interrupts. This API also requires the caller to specify the routing model for
+the type of interrupt.
+
+.. code:: c
+
+    int32_t register_interrupt_type_handler(uint32_t type,
+                                            interrupt_type_handler handler,
+                                            uint64_t flags);
+
+The ``type`` parameter can be one of the three interrupt types listed above i.e.
+``INTR_TYPE_S_EL1``, ``INTR_TYPE_NS`` & ``INTR_TYPE_EL3``. The ``flags`` parameter
+is as described in Section 2.
+
+The function will return ``0`` upon a successful registration. It will return
+``-EALREADY`` in case a handler for the interrupt type has already been
+registered. If the ``type`` is unrecognised or the ``flags`` or the ``handler`` are
+invalid it will return ``-EINVAL``.
+
+Interrupt routing is governed by the configuration of the ``SCR_EL3.FIQ/IRQ`` bits
+prior to entry into a lower exception level in either security state. The
+context management library maintains a copy of the ``SCR_EL3`` system register for
+each security state in the ``cpu_context`` structure of each CPU. It exports the
+following APIs to let EL3 Runtime Firmware program and retrieve the routing
+model for each security state for the current CPU. The value of ``SCR_EL3`` stored
+in the ``cpu_context`` is used by the ``el3_exit()`` function to program the
+``SCR_EL3`` register prior to returning from the EL3 exception level.
+
+.. code:: c
+
+        uint32_t cm_get_scr_el3(uint32_t security_state);
+        void cm_write_scr_el3_bit(uint32_t security_state,
+                                  uint32_t bit_pos,
+                                  uint32_t value);
+
+``cm_get_scr_el3()`` returns the value of the ``SCR_EL3`` register for the specified
+security state of the current CPU. ``cm_write_scr_el3()`` writes a ``0`` or ``1`` to
+the bit specified by ``bit_pos``. ``register_interrupt_type_handler()`` invokes
+``set_routing_model()`` API which programs the ``SCR_EL3`` according to the routing
+model using the ``cm_get_scr_el3()`` and ``cm_write_scr_el3_bit()`` APIs.
+
+It is worth noting that in the current implementation of the framework, the EL3
+runtime firmware is responsible for programming the routing model. The SPD is
+responsible for ensuring that the routing model has been adhered to upon
+receiving an interrupt.
+
+Secure payload dispatcher
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A SPD service is responsible for determining and maintaining the interrupt
+routing model supported by itself and the Secure Payload. It is also responsible
+for ferrying interrupts between secure and non-secure software depending upon
+the routing model. It could determine the routing model at build time or at
+runtime. It must use this information to register a handler for each interrupt
+type using the ``register_interrupt_type_handler()`` API in EL3 runtime firmware.
+
+If the routing model is not known to the SPD service at build time, then it must
+be provided by the SP as the result of its initialisation. The SPD should
+program the routing model only after SP initialisation has completed e.g. in the
+SPD initialisation function pointed to by the ``bl32_init`` variable.
+
+The SPD should determine the mechanism to pass control to the Secure Payload
+after receiving an interrupt from the EL3 runtime firmware. This information
+could either be provided to the SPD service at build time or by the SP at
+runtime.
+
+Test secure payload dispatcher behavior
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The TSPD only handles Secure-EL1 interrupts and is provided with the following
+routing model at build time.
+
+-  Secure-EL1 interrupts are routed to EL3 when execution is in non-secure
+   state and are routed to the FEL when execution is in the secure state
+   i.e **CSS=0, TEL3=0** & **CSS=1, TEL3=1** for Secure-EL1 interrupts
+
+-  When the build flag ``TSP_NS_INTR_ASYNC_PREEMPT`` is zero, the default routing
+   model is used for non-secure interrupts. They are routed to the FEL in
+   either security state i.e **CSS=0, TEL3=0** & **CSS=1, TEL3=0** for
+   Non-secure interrupts.
+
+-  When the build flag ``TSP_NS_INTR_ASYNC_PREEMPT`` is defined to 1, then the
+   non secure interrupts are routed to EL3 when execution is in secure state
+   i.e **CSS=0, TEL3=1** for non-secure interrupts. This effectively preempts
+   Secure-EL1. The default routing model is used for non secure interrupts in
+   non-secure state. i.e **CSS=1, TEL3=0**.
+
+It performs the following actions in the ``tspd_init()`` function to fulfill the
+requirements mentioned earlier.
+
+#. It passes control to the Test Secure Payload to perform its
+   initialisation. The TSP provides the address of the vector table
+   ``tsp_vectors`` in the SP which also includes the handler for Secure-EL1
+   interrupts in the ``sel1_intr_entry`` field. The TSPD passes control to the TSP at
+   this address when it receives a Secure-EL1 interrupt.
+
+   The handover agreement between the TSP and the TSPD requires that the TSPD
+   masks all interrupts (``PSTATE.DAIF`` bits) when it calls
+   ``tsp_sel1_intr_entry()``. The TSP has to preserve the callee saved general
+   purpose, SP\_EL1/Secure-EL0, LR, VFP and system registers. It can use
+   ``x0-x18`` to enable its C runtime.
+
+#. The TSPD implements a handler function for Secure-EL1 interrupts. This
+   function is registered with the EL3 runtime firmware using the
+   ``register_interrupt_type_handler()`` API as follows
+
+   .. code:: c
+
+       /* Forward declaration */
+       interrupt_type_handler tspd_secure_el1_interrupt_handler;
+       int32_t rc, flags = 0;
+       set_interrupt_rm_flag(flags, NON_SECURE);
+       rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
+                                        tspd_secure_el1_interrupt_handler,
+                                        flags);
+       if (rc)
+           panic();
+
+#. When the build flag ``TSP_NS_INTR_ASYNC_PREEMPT`` is defined to 1, the TSPD
+   implements a handler function for non-secure interrupts. This function is
+   registered with the EL3 runtime firmware using the
+   ``register_interrupt_type_handler()`` API as follows
+
+   .. code:: c
+
+       /* Forward declaration */
+       interrupt_type_handler tspd_ns_interrupt_handler;
+       int32_t rc, flags = 0;
+       set_interrupt_rm_flag(flags, SECURE);
+       rc = register_interrupt_type_handler(INTR_TYPE_NS,
+                                       tspd_ns_interrupt_handler,
+                                       flags);
+       if (rc)
+           panic();
+
+Secure payload
+~~~~~~~~~~~~~~
+
+A Secure Payload must implement an interrupt handling framework at Secure-EL1
+(Secure-EL1 IHF) to support its chosen interrupt routing model. Secure payload
+execution will alternate between the below cases.
+
+#. In the code where IRQ, FIQ or both interrupts are enabled, if an interrupt
+   type is targeted to the FEL, then it will be routed to the Secure-EL1
+   exception vector table. This is defined as the **asynchronous mode** of
+   handling interrupts. This mode applies to both Secure-EL1 and non-secure
+   interrupts.
+
+#. In the code where both interrupts are disabled, if an interrupt type is
+   targeted to the FEL, then execution will eventually migrate to the
+   non-secure state. Any non-secure interrupts will be handled as described
+   in the routing model where **CSS=1 and TEL3=0**. Secure-EL1 interrupts
+   will be routed to EL3 (as per the routing model where **CSS=1 and
+   TEL3=1**) where the SPD service will hand them to the SP. This is defined
+   as the **synchronous mode** of handling interrupts.
+
+The interrupt handling framework implemented by the SP should support one or
+both these interrupt handling models depending upon the chosen routing model.
+
+The following list briefly describes how the choice of a valid routing model
+(See 1.2.3) effects the implementation of the Secure-EL1 IHF. If the choice of
+the interrupt routing model is not known to the SPD service at compile time,
+then the SP should pass this information to the SPD service at runtime during
+its initialisation phase.
+
+As mentioned earlier, a ARM GICv2 system is considered and it is assumed that
+the FIQ signal is used to generate Secure-EL1 interrupts and the IRQ signal
+is used to generate non-secure interrupts in either security state.
+
+Secure payload IHF design w.r.t secure-EL1 interrupts
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+#. **CSS=0, TEL3=0**. If ``PSTATE.F=0``, Secure-EL1 interrupts will be
+   triggered at one of the Secure-EL1 FIQ exception vectors. The Secure-EL1
+   IHF should implement support for handling FIQ interrupts asynchronously.
+
+   If ``PSTATE.F=1`` then Secure-EL1 interrupts will be handled as per the
+   synchronous interrupt handling model. The SP could implement this scenario
+   by exporting a separate entrypoint for Secure-EL1 interrupts to the SPD
+   service during the registration phase. The SPD service would also need to
+   know the state of the system, general purpose and the ``PSTATE`` registers
+   in which it should arrange to return execution to the SP. The SP should
+   provide this information in an implementation defined way during the
+   registration phase if it is not known to the SPD service at build time.
+
+#. **CSS=1, TEL3=1**. Interrupts are routed to EL3 when execution is in
+   non-secure state. They should be handled through the synchronous interrupt
+   handling model as described in 1. above.
+
+#. **CSS=0, TEL3=1**. Secure-EL1 interrupts are routed to EL3 when execution
+   is in secure state. They will not be visible to the SP. The ``PSTATE.F`` bit
+   in Secure-EL1/Secure-EL0 will not mask FIQs. The EL3 runtime firmware will
+   call the handler registered by the SPD service for Secure-EL1 interrupts.
+   Secure-EL1 IHF should then handle all Secure-EL1 interrupt through the
+   synchronous interrupt handling model described in 1. above.
+
+Secure payload IHF design w.r.t non-secure interrupts
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+#. **CSS=0, TEL3=0**. If ``PSTATE.I=0``, non-secure interrupts will be
+   triggered at one of the Secure-EL1 IRQ exception vectors . The Secure-EL1
+   IHF should co-ordinate with the SPD service to transfer execution to the
+   non-secure state where the interrupt should be handled e.g the SP could
+   allocate a function identifier to issue a SMC64 or SMC32 to the SPD
+   service which indicates that the SP execution has been preempted by a
+   non-secure interrupt. If this function identifier is not known to the SPD
+   service at compile time then the SP could provide it during the
+   registration phase.
+
+   If ``PSTATE.I=1`` then the non-secure interrupt will pend until execution
+   resumes in the non-secure state.
+
+#. **CSS=0, TEL3=1**. Non-secure interrupts are routed to EL3. They will not
+   be visible to the SP. The ``PSTATE.I`` bit in Secure-EL1/Secure-EL0 will
+   have not effect. The SPD service should register a non-secure interrupt
+   handler which should save the SP state correctly and resume execution in
+   the non-secure state where the interrupt will be handled. The Secure-EL1
+   IHF does not need to take any action.
+
+#. **CSS=1, TEL3=0**. Non-secure interrupts are handled in the FEL in
+   non-secure state (EL1/EL2) and are not visible to the SP. This routing
+   model does not affect the SP behavior.
+
+A Secure Payload must also ensure that all Secure-EL1 interrupts are correctly
+configured at the interrupt controller by the platform port of the EL3 runtime
+firmware. It should configure any additional Secure-EL1 interrupts which the EL3
+runtime firmware is not aware of through its platform port.
+
+Test secure payload behavior
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The routing model for Secure-EL1 and non-secure interrupts chosen by the TSP is
+described in Section 2.2.2. It is known to the TSPD service at build time.
+
+The TSP implements an entrypoint (``tsp_sel1_intr_entry()``) for handling Secure-EL1
+interrupts taken in non-secure state and routed through the TSPD service
+(synchronous handling model). It passes the reference to this entrypoint via
+``tsp_vectors`` to the TSPD service.
+
+The TSP also replaces the default exception vector table referenced through the
+``early_exceptions`` variable, with a vector table capable of handling FIQ and IRQ
+exceptions taken at the same (Secure-EL1) exception level. This table is
+referenced through the ``tsp_exceptions`` variable and programmed into the
+VBAR\_EL1. It caters for the asynchronous handling model.
+
+The TSP also programs the Secure Physical Timer in the ARM Generic Timer block
+to raise a periodic interrupt (every half a second) for the purpose of testing
+interrupt management across all the software components listed in 2.1
+
+Interrupt handling
+------------------
+
+This section describes in detail the role of each software component (see
+Section 2.1) in handling an interrupt of a particular type.
+
+EL3 runtime firmware
+~~~~~~~~~~~~~~~~~~~~
+
+The EL3 runtime firmware populates the IRQ and FIQ exception vectors referenced
+by the ``runtime_exceptions`` variable as follows.
+
+#. IRQ and FIQ exceptions taken from the current exception level with
+   ``SP_EL0`` or ``SP_EL3`` are reported as irrecoverable error conditions. As
+   mentioned earlier, EL3 runtime firmware always executes with the
+   ``PSTATE.I`` and ``PSTATE.F`` bits set.
+
+#. The following text describes how the IRQ and FIQ exceptions taken from a
+   lower exception level using AArch64 or AArch32 are handled.
+
+When an interrupt is generated, the vector for each interrupt type is
+responsible for:
+
+#. Saving the entire general purpose register context (x0-x30) immediately
+   upon exception entry. The registers are saved in the per-cpu ``cpu_context``
+   data structure referenced by the ``SP_EL3``\ register.
+
+#. Saving the ``ELR_EL3``, ``SP_EL0`` and ``SPSR_EL3`` system registers in the
+   per-cpu ``cpu_context`` data structure referenced by the ``SP_EL3`` register.
+
+#. Switching to the C runtime stack by restoring the ``CTX_RUNTIME_SP`` value
+   from the per-cpu ``cpu_context`` data structure in ``SP_EL0`` and
+   executing the ``msr spsel, #0`` instruction.
+
+#. Determining the type of interrupt. Secure-EL1 interrupts will be signaled
+   at the FIQ vector. Non-secure interrupts will be signaled at the IRQ
+   vector. The platform should implement the following API to determine the
+   type of the pending interrupt.
+
+   .. code:: c
+
+       uint32_t plat_ic_get_interrupt_type(void);
+
+   It should return either ``INTR_TYPE_S_EL1`` or ``INTR_TYPE_NS``.
+
+#. Determining the handler for the type of interrupt that has been generated.
+   The following API has been added for this purpose.
+
+   .. code:: c
+
+       interrupt_type_handler get_interrupt_type_handler(uint32_t interrupt_type);
+
+   It returns the reference to the registered handler for this interrupt
+   type. The ``handler`` is retrieved from the ``intr_type_desc_t`` structure as
+   described in Section 2. ``NULL`` is returned if no handler has been
+   registered for this type of interrupt. This scenario is reported as an
+   irrecoverable error condition.
+
+#. Calling the registered handler function for the interrupt type generated.
+   The ``id`` parameter is set to ``INTR_ID_UNAVAILABLE`` currently. The id along
+   with the current security state and a reference to the ``cpu_context_t``
+   structure for the current security state are passed to the handler function
+   as its arguments.
+
+   The handler function returns a reference to the per-cpu ``cpu_context_t``
+   structure for the target security state.
+
+#. Calling ``el3_exit()`` to return from EL3 into a lower exception level in
+   the security state determined by the handler routine. The ``el3_exit()``
+   function is responsible for restoring the register context from the
+   ``cpu_context_t`` data structure for the target security state.
+
+Secure payload dispatcher
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Interrupt entry
+^^^^^^^^^^^^^^^
+
+The SPD service begins handling an interrupt when the EL3 runtime firmware calls
+the handler function for that type of interrupt. The SPD service is responsible
+for the following:
+
+#. Validating the interrupt. This involves ensuring that the interrupt was
+   generating according to the interrupt routing model specified by the SPD
+   service during registration. It should use the security state of the
+   exception level (passed in the ``flags`` parameter of the handler) where
+   the interrupt was taken from to determine this. If the interrupt is not
+   recognised then the handler should treat it as an irrecoverable error
+   condition.
+
+   A SPD service can register a handler for Secure-EL1 and/or Non-secure
+   interrupts. A non-secure interrupt should never be routed to EL3 from
+   from non-secure state. Also if a routing model is chosen where Secure-EL1
+   interrupts are routed to S-EL1 when execution is in Secure state, then a
+   S-EL1 interrupt should never be routed to EL3 from secure state. The handler
+   could use the security state flag to check this.
+
+#. Determining whether a context switch is required. This depends upon the
+   routing model and interrupt type. For non secure and S-EL1 interrupt,
+   if the security state of the execution context where the interrupt was
+   generated is not the same as the security state required for handling
+   the interrupt, a context switch is required. The following 2 cases
+   require a context switch from secure to non-secure or vice-versa:
+
+   #. A Secure-EL1 interrupt taken from the non-secure state should be
+      routed to the Secure Payload.
+
+   #. A non-secure interrupt taken from the secure state should be routed
+      to the last known non-secure exception level.
+
+   The SPD service must save the system register context of the current
+   security state. It must then restore the system register context of the
+   target security state. It should use the ``cm_set_next_eret_context()`` API
+   to ensure that the next ``cpu_context`` to be restored is of the target
+   security state.
+
+   If the target state is secure then execution should be handed to the SP as
+   per the synchronous interrupt handling model it implements. A Secure-EL1
+   interrupt can be routed to EL3 while execution is in the SP. This implies
+   that SP execution can be preempted while handling an interrupt by a
+   another higher priority Secure-EL1 interrupt or a EL3 interrupt. The SPD
+   service should be able to handle this preemption or manage secure interrupt
+   priorities before handing control to the SP.
+
+#. Setting the return value of the handler to the per-cpu ``cpu_context`` if
+   the interrupt has been successfully validated and ready to be handled at a
+   lower exception level.
+
+The routing model allows non-secure interrupts to interrupt Secure-EL1 when in
+secure state if it has been configured to do so. The SPD service and the SP
+should implement a mechanism for routing these interrupts to the last known
+exception level in the non-secure state. The former should save the SP context,
+restore the non-secure context and arrange for entry into the non-secure state
+so that the interrupt can be handled.
+
+Interrupt exit
+^^^^^^^^^^^^^^
+
+When the Secure Payload has finished handling a Secure-EL1 interrupt, it could
+return control back to the SPD service through a SMC32 or SMC64. The SPD service
+should handle this secure monitor call so that execution resumes in the
+exception level and the security state from where the Secure-EL1 interrupt was
+originally taken.
+
+Test secure payload dispatcher Secure-EL1 interrupt handling
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The example TSPD service registers a handler for Secure-EL1 interrupts taken
+from the non-secure state. During execution in S-EL1, the TSPD expects that the
+Secure-EL1 interrupts are handled in S-EL1 by TSP. Its handler
+``tspd_secure_el1_interrupt_handler()`` expects only to be invoked for Secure-EL1
+originating from the non-secure state. It takes the following actions upon being
+invoked.
+
+#. It uses the security state provided in the ``flags`` parameter to ensure
+   that the secure interrupt originated from the non-secure state. It asserts
+   if this is not the case.
+
+#. It saves the system register context for the non-secure state by calling
+   ``cm_el1_sysregs_context_save(NON_SECURE);``.
+
+#. It sets the ``ELR_EL3`` system register to ``tsp_sel1_intr_entry`` and sets the
+   ``SPSR_EL3.DAIF`` bits in the secure CPU context. It sets ``x0`` to
+   ``TSP_HANDLE_SEL1_INTR_AND_RETURN``. If the TSP was preempted earlier by a non
+   secure interrupt during ``yielding`` SMC processing, save the registers that
+   will be trashed, which is the ``ELR_EL3`` and ``SPSR_EL3``, in order to be able
+   to re-enter TSP for Secure-EL1 interrupt processing. It does not need to
+   save any other secure context since the TSP is expected to preserve it
+   (see Section 2.2.2.1).
+
+#. It restores the system register context for the secure state by calling
+   ``cm_el1_sysregs_context_restore(SECURE);``.
+
+#. It ensures that the secure CPU context is used to program the next
+   exception return from EL3 by calling ``cm_set_next_eret_context(SECURE);``.
+
+#. It returns the per-cpu ``cpu_context`` to indicate that the interrupt can
+   now be handled by the SP. ``x1`` is written with the value of ``elr_el3``
+   register for the non-secure state. This information is used by the SP for
+   debugging purposes.
+
+The figure below describes how the interrupt handling is implemented by the TSPD
+when a Secure-EL1 interrupt is generated when execution is in the non-secure
+state.
+
+|Image 1|
+
+The TSP issues an SMC with ``TSP_HANDLED_S_EL1_INTR`` as the function identifier to
+signal completion of interrupt handling.
+
+The TSPD service takes the following actions in ``tspd_smc_handler()`` function
+upon receiving an SMC with ``TSP_HANDLED_S_EL1_INTR`` as the function identifier:
+
+#. It ensures that the call originated from the secure state otherwise
+   execution returns to the non-secure state with ``SMC_UNK`` in ``x0``.
+
+#. It restores the saved ``ELR_EL3`` and ``SPSR_EL3`` system registers back to
+   the secure CPU context (see step 3 above) in case the TSP had been preempted
+   by a non secure interrupt earlier.
+
+#. It restores the system register context for the non-secure state by
+   calling ``cm_el1_sysregs_context_restore(NON_SECURE)``.
+
+#. It ensures that the non-secure CPU context is used to program the next
+   exception return from EL3 by calling ``cm_set_next_eret_context(NON_SECURE)``.
+
+#. ``tspd_smc_handler()`` returns a reference to the non-secure ``cpu_context``
+   as the return value.
+
+Test secure payload dispatcher non-secure interrupt handling
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The TSP in Secure-EL1 can be preempted by a non-secure interrupt during
+``yielding`` SMC processing or by a higher priority EL3 interrupt during
+Secure-EL1 interrupt processing. Currently only non-secure interrupts can
+cause preemption of TSP since there are no EL3 interrupts in the
+system.
+
+It should be noted that while TSP is preempted, the TSPD only allows entry into
+the TSP either for Secure-EL1 interrupt handling or for resuming the preempted
+``yielding`` SMC in response to the ``TSP_FID_RESUME`` SMC from the normal world.
+(See Section 3).
+
+The non-secure interrupt triggered in Secure-EL1 during ``yielding`` SMC processing
+can be routed to either EL3 or Secure-EL1 and is controlled by build option
+``TSP_NS_INTR_ASYNC_PREEMPT`` (see Section 2.2.2.1). If the build option is set,
+the TSPD will set the routing model for the non-secure interrupt to be routed to
+EL3 from secure state i.e. **TEL3=1, CSS=0** and registers
+``tspd_ns_interrupt_handler()`` as the non-secure interrupt handler. The
+``tspd_ns_interrupt_handler()`` on being invoked ensures that the interrupt
+originated from the secure state and disables routing of non-secure interrupts
+from secure state to EL3. This is to prevent further preemption (by a non-secure
+interrupt) when TSP is reentered for handling Secure-EL1 interrupts that
+triggered while execution was in the normal world. The
+``tspd_ns_interrupt_handler()`` then invokes ``tspd_handle_sp_preemption()`` for
+further handling.
+
+If the ``TSP_NS_INTR_ASYNC_PREEMPT`` build option is zero (default), the default
+routing model for non-secure interrupt in secure state is in effect
+i.e. **TEL3=0, CSS=0**. During ``yielding`` SMC processing, the IRQ
+exceptions are unmasked i.e. ``PSTATE.I=0``, and a non-secure interrupt will
+trigger at Secure-EL1 IRQ exception vector. The TSP saves the general purpose
+register context and issues an SMC with ``TSP_PREEMPTED`` as the function
+identifier to signal preemption of TSP. The TSPD SMC handler,
+``tspd_smc_handler()``, ensures that the SMC call originated from the
+secure state otherwise execution returns to the non-secure state with
+``SMC_UNK`` in ``x0``. It then invokes ``tspd_handle_sp_preemption()`` for
+further handling.
+
+The ``tspd_handle_sp_preemption()`` takes the following actions upon being
+invoked:
+
+#. It saves the system register context for the secure state by calling
+   ``cm_el1_sysregs_context_save(SECURE)``.
+
+#. It restores the system register context for the non-secure state by
+   calling ``cm_el1_sysregs_context_restore(NON_SECURE)``.
+
+#. It ensures that the non-secure CPU context is used to program the next
+   exception return from EL3 by calling ``cm_set_next_eret_context(NON_SECURE)``.
+
+#. ``SMC_PREEMPTED`` is set in x0 and return to non secure state after
+   restoring non secure context.
+
+The Normal World is expected to resume the TSP after the ``yielding`` SMC preemption
+by issuing an SMC with ``TSP_FID_RESUME`` as the function identifier (see section 3).
+The TSPD service takes the following actions in ``tspd_smc_handler()`` function
+upon receiving this SMC:
+
+#. It ensures that the call originated from the non secure state. An
+   assertion is raised otherwise.
+
+#. Checks whether the TSP needs a resume i.e check if it was preempted. It
+   then saves the system register context for the non-secure state by calling
+   ``cm_el1_sysregs_context_save(NON_SECURE)``.
+
+#. Restores the secure context by calling
+   ``cm_el1_sysregs_context_restore(SECURE)``
+
+#. It ensures that the secure CPU context is used to program the next
+   exception return from EL3 by calling ``cm_set_next_eret_context(SECURE)``.
+
+#. ``tspd_smc_handler()`` returns a reference to the secure ``cpu_context`` as the
+   return value.
+
+The figure below describes how the TSP/TSPD handle a non-secure interrupt when
+it is generated during execution in the TSP with ``PSTATE.I`` = 0 when the
+``TSP_NS_INTR_ASYNC_PREEMPT`` build flag is 0.
+
+|Image 2|
+
+Secure payload
+~~~~~~~~~~~~~~
+
+The SP should implement one or both of the synchronous and asynchronous
+interrupt handling models depending upon the interrupt routing model it has
+chosen (as described in 2.2.3).
+
+In the synchronous model, it should begin handling a Secure-EL1 interrupt after
+receiving control from the SPD service at an entrypoint agreed upon during build
+time or during the registration phase. Before handling the interrupt, the SP
+should save any Secure-EL1 system register context which is needed for resuming
+normal execution in the SP later e.g. ``SPSR_EL1,``\ ELR\_EL1\`. After handling the
+interrupt, the SP could return control back to the exception level and security
+state where the interrupt was originally taken from. The SP should use an SMC32
+or SMC64 to ask the SPD service to do this.
+
+In the asynchronous model, the Secure Payload is responsible for handling
+non-secure and Secure-EL1 interrupts at the IRQ and FIQ vectors in its exception
+vector table when ``PSTATE.I`` and ``PSTATE.F`` bits are 0. As described earlier,
+when a non-secure interrupt is generated, the SP should coordinate with the SPD
+service to pass control back to the non-secure state in the last known exception
+level. This will allow the non-secure interrupt to be handled in the non-secure
+state.
+
+Test secure payload behavior
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The TSPD hands control of a Secure-EL1 interrupt to the TSP at the
+``tsp_sel1_intr_entry()``. The TSP handles the interrupt while ensuring that the
+handover agreement described in Section 2.2.2.1 is maintained. It updates some
+statistics by calling ``tsp_update_sync_sel1_intr_stats()``. It then calls
+``tsp_common_int_handler()`` which.
+
+#. Checks whether the interrupt is the secure physical timer interrupt. It
+   uses the platform API ``plat_ic_get_pending_interrupt_id()`` to get the
+   interrupt number. If it is not the secure physical timer interrupt, then
+   that means that a higher priority interrupt has preempted it. Invoke
+   ``tsp_handle_preemption()`` to handover control back to EL3 by issuing
+   an SMC with ``TSP_PREEMPTED`` as the function identifier.
+
+#. Handles the secure timer interrupt interrupt by acknowledging it using the
+   ``plat_ic_acknowledge_interrupt()`` platform API, calling
+   ``tsp_generic_timer_handler()`` to reprogram the secure physical generic
+   timer and calling the ``plat_ic_end_of_interrupt()`` platform API to signal
+   end of interrupt processing.
+
+The TSP passes control back to the TSPD by issuing an SMC64 with
+``TSP_HANDLED_S_EL1_INTR`` as the function identifier.
+
+The TSP handles interrupts under the asynchronous model as follows.
+
+#. Secure-EL1 interrupts are handled by calling the ``tsp_common_int_handler()``
+   function. The function has been described above.
+
+#. Non-secure interrupts are handled by by calling the ``tsp_common_int_handler()``
+   function which ends up invoking ``tsp_handle_preemption()`` and issuing an
+   SMC64 with ``TSP_PREEMPTED`` as the function identifier. Execution resumes at
+   the instruction that follows this SMC instruction when the TSPD hands
+   control to the TSP in response to an SMC with ``TSP_FID_RESUME`` as the
+   function identifier from the non-secure state (see section 2.3.2.4).
+
+#. .. rubric:: Other considerations
+      :name: other-considerations
+
+Implication of preempted SMC on Non-Secure Software
+---------------------------------------------------
+
+A ``yielding`` SMC call to Secure payload can be preempted by a non-secure
+interrupt and the execution can return to the non-secure world for handling
+the interrupt (For details on ``yielding`` SMC refer `SMC calling convention`_).
+In this case, the SMC call has not completed its execution and the execution
+must return back to the secure payload to resume the preempted SMC call.
+This can be achieved by issuing an SMC call which instructs to resume the
+preempted SMC.
+
+A ``fast`` SMC cannot be preempted and hence this case will not happen for
+a fast SMC call.
+
+In the Test Secure Payload implementation, ``TSP_FID_RESUME`` is designated
+as the resume SMC FID. It is important to note that ``TSP_FID_RESUME`` is a
+``yielding`` SMC which means it too can be be preempted. The typical non
+secure software sequence for issuing a ``yielding`` SMC would look like this,
+assuming ``P.STATE.I=0`` in the non secure state :
+
+.. code:: c
+
+    int rc;
+    rc = smc(TSP_YIELD_SMC_FID, ...);     /* Issue a Yielding SMC call */
+    /* The pending non-secure interrupt is handled by the interrupt handler
+       and returns back here. */
+    while (rc == SMC_PREEMPTED) {       /* Check if the SMC call is preempted */
+        rc = smc(TSP_FID_RESUME);       /* Issue resume SMC call */
+    }
+
+The ``TSP_YIELD_SMC_FID`` is any ``yielding`` SMC function identifier and the smc()
+function invokes a SMC call with the required arguments. The pending non-secure
+interrupt causes an IRQ exception and the IRQ handler registered at the
+exception vector handles the non-secure interrupt and returns. The return value
+from the SMC call is tested for ``SMC_PREEMPTED`` to check whether it is
+preempted. If it is, then the resume SMC call ``TSP_FID_RESUME`` is issued. The
+return value of the SMC call is tested again to check if it is preempted.
+This is done in a loop till the SMC call succeeds or fails. If a ``yielding``
+SMC is preempted, until it is resumed using ``TSP_FID_RESUME`` SMC and
+completed, the current TSPD prevents any other SMC call from re-entering
+TSP by returning ``SMC_UNK`` error.
+
+--------------
+
+*Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.*
+
+.. _Porting Guide: ./porting-guide.rst
+.. _SMC calling convention: http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
+
+.. |Image 1| image:: diagrams/sec-int-handling.png?raw=true
+.. |Image 2| image:: diagrams/non-sec-int-handling.png?raw=true
diff --git a/docs/optee-dispatcher.md b/docs/optee-dispatcher.md
deleted file mode 100644
index c154f6b..0000000
--- a/docs/optee-dispatcher.md
+++ /dev/null
@@ -1,13 +0,0 @@
-OP-TEE Dispatcher
-=================
-
-[OP-TEE OS] is a Trusted OS running as Secure EL1.
-
-To build and execute [OP-TEE OS] follow the instructions at
-[ARM Trusted Firmware with OP-TEE] [OP-TEE OS]
-
-- - - - - - - - - - - - - - - - - - - - - - - - - -
-
-_Copyright (c) 2014, ARM Limited and Contributors. All rights reserved._
-
-[OP-TEE OS]:  http://github.com/OP-TEE/optee_os/tree/master/documentation/arm_trusted_firmware.md
diff --git a/docs/plat/hikey.rst b/docs/plat/hikey.rst
new file mode 100644
index 0000000..1c48104
--- /dev/null
+++ b/docs/plat/hikey.rst
@@ -0,0 +1,169 @@
+Description
+===========
+
+HiKey is one of 96boards. Hisilicon Kirin6220 processor is installed on HiKey.
+
+More information are listed in `link`_.
+
+How to build
+============
+
+Code Locations
+--------------
+
+-  ARM Trusted Firmware:
+   `link <https://github.com/ARM-software/arm-trusted-firmware>`__
+
+-  OP-TEE
+   `link <https://github.com/OP-TEE/optee_os>`__
+
+-  edk2:
+   `link <https://github.com/96boards-hikey/edk2/tree/testing/hikey960_v2.5>`__
+
+-  OpenPlatformPkg:
+   `link <https://github.com/96boards-hikey/OpenPlatformPkg/tree/testing/hikey960_v1.3.4>`__
+
+-  l-loader:
+   `link <https://github.com/96boards-hikey/l-loader/tree/testing/hikey960_v1.2>`__
+
+-  uefi-tools:
+   `link <https://git.linaro.org/uefi/uefi-tools.git>`__
+
+-  atf-fastboot:
+   `link <https://github.com/96boards-hikey/atf-fastboot/tree/master>`__
+
+Build Procedure
+---------------
+
+-  Fetch all the above repositories into local host.
+   Make all the repositories in the same ${BUILD\_PATH}.
+
+  .. code:: shell
+
+       git clone https://github.com/ARM-software/arm-trusted-firmware -b integration
+       git clone https://github.com/OP-TEE/optee_os
+       git clone https://github.com/96boards-hikey/edk2 -b testing/hikey960_v2.5
+       git clone https://github.com/96boards-hikey/OpenPlatformPkg -b testing/hikey960_v1.3.4
+       git clone https://github.com/96boards-hikey/l-loader -b testing/hikey960_v1.2
+       git clone https://git.linaro.org/uefi/uefi-tools
+       git clone https://github.com/96boards-hikey/atf-fastboot
+
+-  Create the symbol link to OpenPlatformPkg in edk2.
+
+   .. code:: shell
+
+       $cd ${BUILD_PATH}/edk2
+       $ln -sf ../OpenPlatformPkg
+
+-  Prepare AARCH64 && AARCH32 toolchain. Prepare python.
+
+-  If your hikey hardware is built by CircuitCo, update *uefi-tools/platform.config* first. *(optional)*
+   **Uncomment the below sentence. Otherwise, UEFI can't output messages on serial
+   console on hikey.**
+
+   .. code:: shell
+
+       BUILDFLAGS=-DSERIAL_BASE=0xF8015000
+
+   If your hikey hardware is built by LeMarker, nothing to do.
+
+-  Build it as debug mode. Create your own build script file or you could refer to **build\_uefi.sh** in l-loader git repository.
+
+   .. code:: shell
+
+       BUILD_OPTION=DEBUG
+       export AARCH64_TOOLCHAIN=GCC5
+       export UEFI_TOOLS_DIR=${BUILD_PATH}/uefi-tools
+       export EDK2_DIR=${BUILD_PATH}/edk2
+       EDK2_OUTPUT_DIR=${EDK2_DIR}/Build/HiKey/${BUILD_OPTION}_${AARCH64_TOOLCHAIN}
+       # Build fastboot for ARM Trust Firmware. It's used for recovery mode.
+       cd ${BUILD_PATH}/atf-fastboot
+       CROSS_COMPILE=aarch64-linux-gnu- make PLAT=hikey DEBUG=1
+       # Convert DEBUG/RELEASE to debug/release
+       FASTBOOT_BUILD_OPTION=$(echo ${BUILD_OPTION} | tr '[A-Z]' '[a-z]')
+       cd ${EDK2_DIR}
+       # Build UEFI & ARM Trust Firmware
+       ${UEFI_TOOLS_DIR}/uefi-build.sh -b ${BUILD_OPTION} -a ../arm-trusted-firmware -s ../optee_os hikey
+
+-  Generate l-loader.bin and partition table for aosp. The eMMC capacity is either 8GB or 4GB. Just change "aosp-8g" to "linux-8g" for debian.
+
+   .. code:: shell
+
+       cd ${BUILD_PATH}/l-loader
+       ln -sf ${EDK2_OUTPUT_DIR}/FV/bl1.bin
+       ln -sf ${BUILD_PATH}/atf-fastboot/build/hikey/${FASTBOOT_BUILD_OPTION}/bl1.bin fastboot.bin
+       make hikey PTABLE_LST=aosp-8g
+
+Setup Console
+-------------
+
+-  Install ser2net. Use telnet as the console since UEFI fails to display Boot Manager GUI in minicom. **If you don't need Boot Manager GUI, just ignore this section.**
+
+   .. code:: shell
+
+       $sudo apt-get install ser2net
+
+-  Configure ser2net.
+
+   .. code:: shell
+
+       $sudo vi /etc/ser2net.conf
+
+   Append one line for serial-over-USB in below.
+   *#ser2net.conf*
+
+   .. code:: shell
+
+       2004:telnet:0:/dev/ttyUSB0:115200 8DATABITS NONE 1STOPBIT banner
+
+-  Start ser2net
+
+   .. code:: shell
+
+       $sudo killall ser2net
+       $sudo ser2net -u
+
+-  Open the console.
+
+   .. code:: shell
+
+       $telnet localhost 2004
+
+   And you could open the console remotely, too.
+
+Flash images in recovery mode
+-----------------------------
+
+-  Make sure Pin3-Pin4 on J15 are connected for recovery mode. Then power on HiKey.
+
+-  Remove the modemmanager package. This package may cause the idt tool failure.
+
+   .. code:: shell
+
+       $sudo apt-get purge modemmanager
+
+-  Run the command to download l-loader.bin into HiKey.
+
+   .. code:: shell
+
+       $sudo python hisi-idt.py -d /dev/ttyUSB1 --img1 l-loader.bin
+
+-  Update images. All aosp or debian images could be fetched from `link <https://builds.96boards.org/>`__.
+
+   .. code:: shell
+
+       $sudo fastboot flash ptable prm_ptable.img
+       $sudo fastboot flash fastboot fip.bin
+       $sudo fastboot flash boot boot.img
+       $sudo fastboot flash cache cache.img
+       $sudo fastboot flash system system.img
+       $sudo  fastboot flash userdata userdata.img
+
+Boot UEFI in normal mode
+------------------------
+
+-  Make sure Pin3-Pin4 on J15 are open for normal boot mode. Then power on HiKey.
+
+-  Reference `link <https://github.com/96boards-hikey/tools-images-hikey960/blob/master/build-from-source/README-ATF-UEFI-build-from-source.md>`__
+
+.. _link: https://github.com/96boards/documentation/blob/master/ConsumerEdition/HiKey/Quickstart/README.md
diff --git a/docs/plat/hikey960.rst b/docs/plat/hikey960.rst
new file mode 100644
index 0000000..8524b48
--- /dev/null
+++ b/docs/plat/hikey960.rst
@@ -0,0 +1,190 @@
+Description
+===========
+
+HiKey960 is one of 96boards. Hisilicon Hi3660 processor is installed on HiKey960.
+
+More information are listed in `link`_.
+
+How to build
+============
+
+Code Locations
+--------------
+
+-  ARM Trusted Firmware:
+   `link <https://github.com/ARM-software/arm-trusted-firmware>`__
+
+-  OP-TEE:
+   `link <https://github.com/OP-TEE/optee_os>`__
+
+-  edk2:
+   `link <https://github.com/96boards-hikey/edk2/tree/testing/hikey960_v2.5>`__
+
+-  OpenPlatformPkg:
+   `link <https://github.com/96boards-hikey/OpenPlatformPkg/tree/testing/hikey960_v1.3.4>`__
+
+-  l-loader:
+   `link <https://github.com/96boards-hikey/l-loader/tree/testing/hikey960_v1.2>`__
+
+-  uefi-tools:
+   `link <https://git.linaro.org/uefi/uefi-tools.git>`__
+
+Build Procedure
+---------------
+
+-  Fetch all the above 5 repositories into local host.
+   Make all the repositories in the same ${BUILD\_PATH}.
+
+  .. code:: shell
+
+       git clone https://github.com/ARM-software/arm-trusted-firmware -b integration
+       git clone https://github.com/OP-TEE/optee_os
+       git clone https://github.com/96boards-hikey/edk2 -b testing/hikey960_v2.5
+       git clone https://github.com/96boards-hikey/OpenPlatformPkg -b testing/hikey960_v1.3.4
+       git clone https://github.com/96boards-hikey/l-loader -b testing/hikey960_v1.2
+       git clone https://git.linaro.org/uefi/uefi-tools
+
+-  Create the symbol link to OpenPlatformPkg in edk2.
+
+   .. code:: shell
+
+       $cd ${BUILD_PATH}/edk2
+       $ln -sf ../OpenPlatformPkg
+
+-  Prepare AARCH64 toolchain.
+
+-  If your hikey960 hardware is v1, update *uefi-tools/platform.config* first. *(optional)*
+   **Uncomment the below sentence. Otherwise, UEFI can't output messages on serial
+   console on hikey960 v1.**
+
+   .. code:: shell
+
+       BUILDFLAGS=-DSERIAL_BASE=0xFDF05000
+
+   If your hikey960 hardware is v2 or newer, nothing to do.
+
+-  Build it as debug mode. Create script file for build.
+
+   .. code:: shell
+
+       BUILD_OPTION=DEBUG
+       export AARCH64_TOOLCHAIN=GCC5
+       export UEFI_TOOLS_DIR=${BUILD_PATH}/uefi-tools
+       export EDK2_DIR=${BUILD_PATH}/edk2
+       EDK2_OUTPUT_DIR=${EDK2_DIR}/Build/HiKey960/${BUILD_OPTION}_${AARCH64_TOOLCHAIN}
+       cd ${EDK2_DIR}
+       # Build UEFI & ARM Trust Firmware
+       ${UEFI_TOOLS_DIR}/uefi-build.sh -b ${BUILD_OPTION} -a ../arm-trusted-firmware -s ../optee_os hikey960
+
+-  Generate l-loader.bin and partition table.
+   *Make sure that you're using the sgdisk in the l-loader directory.*
+
+   .. code:: shell
+
+       cd ${BUILD_PATH}/l-loader
+       ln -sf ${EDK2_OUTPUT_DIR}/FV/bl1.bin
+       ln -sf ${EDK2_OUTPUT_DIR}/FV/fip.bin
+       ln -sf ${EDK2_OUTPUT_DIR}/FV/BL33_AP_UEFI.fd
+       make hikey960
+
+Setup Console
+-------------
+
+-  Install ser2net. Use telnet as the console since UEFI will output window
+   that fails to display in minicom.
+
+   .. code:: shell
+
+       $sudo apt-get install ser2net
+
+-  Configure ser2net.
+
+   .. code:: shell
+
+       $sudo vi /etc/ser2net.conf
+
+   Append one line for serial-over-USB in *#ser2net.conf*
+
+   ::
+
+       2004:telnet:0:/dev/ttyUSB0:115200 8DATABITS NONE 1STOPBIT banner
+
+-  Start ser2net
+
+   .. code:: shell
+
+       $sudo killall ser2net
+       $sudo ser2net -u
+
+-  Open the console.
+
+   .. code:: shell
+
+       $telnet localhost 2004
+
+   And you could open the console remotely, too.
+
+Boot UEFI in recovery mode
+--------------------------
+
+-  Fetch that are used in recovery mode. The code location is in below.
+   `link <https://github.com/96boards-hikey/tools-images-hikey960>`__
+
+-  Generate l-loader.bin.
+
+   .. code:: shell
+
+       $cd tools-images-hikey960
+       $ln -sf ${BUILD_PATH}/l-loader/l-loader.bin
+       $ln -sf ${BUILD_PATH}/l-loader/fip.bin
+
+-  Prepare config file.
+
+   .. code:: shell
+
+       $vi config
+       # The content of config file
+       ./sec_usb_xloader.img 0x00020000
+       ./sec_uce_boot.img 0x6A908000
+       ./l-loader.bin 0x1AC00000
+
+-  Remove the modemmanager package. This package may causes hikey\_idt tool failure.
+
+   .. code:: shell
+
+       $sudo apt-get purge modemmanager
+
+-  Run the command to download l-loader.bin into HiKey960.
+
+   .. code:: shell
+
+       $sudo ./hikey_idt -c config -p /dev/ttyUSB1
+
+-  UEFI running in recovery mode.
+   When prompt '.' is displayed on console, press hotkey 'f' in keyboard. Then Android fastboot app is running.
+   The timeout of prompt '.' is 10 seconds.
+
+-  Update images.
+
+   .. code:: shell
+
+       $sudo fastboot flash ptable prm_ptable.img
+       $sudo fastboot flash xloader sec_xloader.img
+       $sudo fastboot flash fastboot l-loader.bin
+       $sudo fastboot flash fip fip.bin
+       $sudo fastboot flash boot boot.img
+       $sudo fastboot flash cache cache.img
+       $sudo fastboot flash system system.img
+       $sudo fastboot flash userdata userdata.img
+
+-  Notice: UEFI could also boot kernel in recovery mode, but BL31 isn't loaded in
+   recovery mode.
+
+Boot UEFI in normal mode
+------------------------
+
+-  Make sure "Boot Mode" switch is OFF for normal boot mode. Then power on HiKey960.
+
+-  Reference `link <https://github.com/96boards-hikey/tools-images-hikey960/blob/master/build-from-source/README-ATF-UEFI-build-from-source.md>`__
+
+.. _link: http://www.96boards.org/documentation/ConsumerEdition/HiKey960/README.md
diff --git a/docs/plat/nvidia-tegra.rst b/docs/plat/nvidia-tegra.rst
new file mode 100644
index 0000000..7aac7e5
--- /dev/null
+++ b/docs/plat/nvidia-tegra.rst
@@ -0,0 +1,98 @@
+Tegra SoCs - Overview
+=====================
+
+-  .. rubric:: T210
+      :name: t210
+
+T210 has Quad ARM® Cortex®-A57 cores in a switched configuration with a
+companion set of quad ARM Cortex-A53 cores. The Cortex-A57 and A53 cores
+support ARMv8, executing both 64-bit Aarch64 code, and 32-bit Aarch32 code
+including legacy ARMv7 applications. The Cortex-A57 processors each have
+48 KB Instruction and 32 KB Data Level 1 caches; and have a 2 MB shared
+Level 2 unified cache. The Cortex-A53 processors each have 32 KB Instruction
+and 32 KB Data Level 1 caches; and have a 512 KB shared Level 2 unified cache.
+
+-  .. rubric:: T132
+      :name: t132
+
+Denver is NVIDIA's own custom-designed, 64-bit, dual-core CPU which is
+fully ARMv8 architecture compatible. Each of the two Denver cores
+implements a 7-way superscalar microarchitecture (up to 7 concurrent
+micro-ops can be executed per clock), and includes a 128KB 4-way L1
+instruction cache, a 64KB 4-way L1 data cache, and a 2MB 16-way L2
+cache, which services both cores.
+
+Denver implements an innovative process called Dynamic Code Optimization,
+which optimizes frequently used software routines at runtime into dense,
+highly tuned microcode-equivalent routines. These are stored in a
+dedicated, 128MB main-memory-based optimization cache. After being read
+into the instruction cache, the optimized micro-ops are executed,
+re-fetched and executed from the instruction cache as long as needed and
+capacity allows.
+
+Effectively, this reduces the need to re-optimize the software routines.
+Instead of using hardware to extract the instruction-level parallelism
+(ILP) inherent in the code, Denver extracts the ILP once via software
+techniques, and then executes those routines repeatedly, thus amortizing
+the cost of ILP extraction over the many execution instances.
+
+Denver also features new low latency power-state transitions, in addition
+to extensive power-gating and dynamic voltage and clock scaling based on
+workloads.
+
+Directory structure
+===================
+
+-  plat/nvidia/tegra/common - Common code for all Tegra SoCs
+-  plat/nvidia/tegra/soc/txxx - Chip specific code
+
+Trusted OS dispatcher
+=====================
+
+Tegra supports multiple Trusted OS', Trusted Little Kernel (TLK) being one of
+them. In order to include the 'tlkd' dispatcher in the image, pass 'SPD=tlkd'
+on the command line while preparing a bl31 image. This allows other Trusted OS
+vendors to use the upstream code and include their dispatchers in the image
+without changing any makefiles.
+
+Preparing the BL31 image to run on Tegra SoCs
+=============================================
+
+.. code:: shell
+
+    CROSS_COMPILE=<path-to-aarch64-gcc>/bin/aarch64-none-elf- make PLAT=tegra \
+    TARGET_SOC=<target-soc e.g. t210|t132> SPD=<dispatcher e.g. tlkd> bl31
+
+Platforms wanting to use different TZDRAM\_BASE, can add ``TZDRAM_BASE=<value>``
+to the build command line.
+
+The Tegra platform code expects a pointer to the following platform specific
+structure via 'x1' register from the BL2 layer which is used by the
+bl31\_early\_platform\_setup() handler to extract the TZDRAM carveout base and
+size for loading the Trusted OS and the UART port ID to be used. The Tegra
+memory controller driver programs this base/size in order to restrict NS
+accesses.
+
+typedef struct plat\_params\_from\_bl2 {
+/\* TZ memory size */
+uint64\_t tzdram\_size;
+/* TZ memory base */
+uint64\_t tzdram\_base;
+/* UART port ID \*/
+int uart\_id;
+} plat\_params\_from\_bl2\_t;
+
+Power Management
+================
+
+The PSCI implementation expects each platform to expose the 'power state'
+parameter to be used during the 'SYSTEM SUSPEND' call. The state-id field
+is implementation defined on Tegra SoCs and is preferably defined by
+tegra\_def.h.
+
+Tegra configs
+=============
+
+-  'tegra\_enable\_l2\_ecc\_parity\_prot': This flag enables the L2 ECC and Parity
+   Protection bit, for ARM Cortex-A57 CPUs, during CPU boot. This flag will
+   be enabled by Tegrs SoCs during 'Cluster power up' or 'System Suspend' exit.
diff --git a/docs/plat/poplar.rst b/docs/plat/poplar.rst
new file mode 100644
index 0000000..d7f1fe0
--- /dev/null
+++ b/docs/plat/poplar.rst
@@ -0,0 +1,165 @@
+Description
+===========
+
+Poplar is the first development board compliant with the 96Boards Enterprise
+Edition TV Platform specification.
+
+The board features the Hi3798C V200 with an integrated quad-core 64-bit
+ARM Cortex A53 processor and high performance Mali T720 GPU, making it capable
+of running any commercial set-top solution based on Linux or Android.
+
+It supports a premium user experience with up to H.265 HEVC decoding of 4K
+video at 60 frames per second.
+
+    SOC Hisilicon Hi3798CV200
+    CPU Quad-core ARM Cortex-A53 64 bit
+    DRAM DDR3/3L/4 SDRAM interface, maximum 32-bit data width 2 GB
+    USB Two USB 2.0 ports One USB 3.0 ports
+    CONSOLE USB-micro port for console support
+    ETHERNET 1 GBe Ethernet
+    PCIE One PCIe 2.0 interfaces
+    JTAG 8-Pin JTAG
+    EXPANSION INTERFACE Linaro 96Boards Low Speed Expansion slot
+    DIMENSION Standard 160×120 mm 96Boards Enterprice Edition form factor
+    WIFI 802.11AC 2*2 with Bluetooth
+    CONNECTORS One connector for Smart Card One connector for TSI
+
+At the start of the boot sequence, the bootROM executes the so called l-loader
+binary whose main role is to change the processor state to 64bit mode. This
+must  happen prior invoking the arm trusted  firmware:
+
+    l-loader --> arm_trusted_firmware --> u-boot
+
+How to build
+============
+
+Code Locations
+--------------
+
+-  ARM Trusted Firmware:
+   `link <https://github.com/ARM-software/arm-trusted-firmware>`__
+
+-  l-loader:
+   `link <https://github.com/Linaro/poplar-l-loader.git>`__
+
+-  u-boot:
+   `link <http://git.denx.de/u-boot.git>`__
+
+Build Procedure
+---------------
+
+-  Fetch all the above 3 repositories into local host.
+   Make all the repositories in the same ${BUILD\_PATH}.
+
+-  Prepare the AARCH64 toolchain.
+
+-  Build u-boot using poplar_defconfig
+       make CROSS_COMPILE=aarch64-linux-gnu- poplar_defconfig
+       make CROSS_COMPILE=aarch64-linux-gnu-
+
+-  Build atf providing the previously generated u-boot.bin as the BL33 image
+       make CROSS_COMPILE=aarch64-linux-gnu-  all fip SPD=none PLAT=poplar
+       BL33=u-boot.bin
+
+-  Build l-loader (generated the final fastboot.bin)
+       1. copy the atf generated files fip.bin and bl1.bin to l-loader/atf/
+       2. export ARM_TRUSTED_FIRMWARE=${ATF_SOURCE_PATH)
+       3. make
+
+Install Procedure
+-----------------
+
+- Copy l-loader/fastboot.bin to a FAT partition on a USB pen drive.
+
+- Plug the USB pen drive to any of the USB2 ports
+
+- Power the board while keeping S3 pressed (usb_boot)
+
+The system will boot into a u-boot shell which you can then use to write the
+working firmware to eMMC.
+
+Boot trace
+==========
+
+Bootrom start
+Boot Media: eMMC
+Decrypt auxiliary code ...OK
+
+lsadc voltage min: 000000FE, max: 000000FF, aver: 000000FE, index: 00000000
+
+Entry boot auxiliary code
+
+Auxiliary code - v1.00
+DDR code - V1.1.2 20160205
+Build: Mar 24 2016 - 17:09:44
+Reg Version:  v134
+Reg Time:     2016/03/18 09:44:55
+Reg Name:     hi3798cv2dmb_hi3798cv200_ddr3_2gbyte_8bitx4_4layers.reg
+
+Boot auxiliary code success
+Bootrom success
+
+LOADER:  Switched to aarch64 mode
+LOADER:  Entering ARM TRUSTED FIRMWARE
+LOADER:  CPU0 executes at 0x000ce000
+
+INFO:    BL1: 0xe1000 - 0xe7000 [size = 24576]
+NOTICE:  Booting Trusted Firmware
+NOTICE:  BL1: v1.3(debug):v1.3-372-g1ba9c60
+NOTICE:  BL1: Built : 17:51:33, Apr 30 2017
+INFO:    BL1: RAM 0xe1000 - 0xe7000
+INFO:    BL1: Loading BL2
+INFO:    Loading image id=1 at address 0xe9000
+INFO:    Image id=1 loaded at address 0xe9000, size = 0x5008
+NOTICE:  BL1: Booting BL2
+INFO:    Entry point address = 0xe9000
+INFO:    SPSR = 0x3c5
+NOTICE:  BL2: v1.3(debug):v1.3-372-g1ba9c60
+NOTICE:  BL2: Built : 17:51:33, Apr 30 2017
+INFO:    BL2: Loading BL31
+INFO:    Loading image id=3 at address 0x129000
+INFO:    Image id=3 loaded at address 0x129000, size = 0x8038
+INFO:    BL2: Loading BL33
+INFO:    Loading image id=5 at address 0x37000000
+INFO:    Image id=5 loaded at address 0x37000000, size = 0x58f17
+NOTICE:  BL1: Booting BL31
+INFO:    Entry point address = 0x129000
+INFO:    SPSR = 0x3cd
+INFO:    Boot bl33 from 0x37000000 for 364311 Bytes
+NOTICE:  BL31: v1.3(debug):v1.3-372-g1ba9c60
+NOTICE:  BL31: Built : 17:51:33, Apr 30 2017
+INFO:    BL31: Initializing runtime services
+INFO:    BL31: Preparing for EL3 exit to normal world
+INFO:    Entry point address = 0x37000000
+INFO:    SPSR = 0x3c9
+
+
+U-Boot 2017.05-rc2-00130-gd2255b0 (Apr 30 2017 - 17:51:28 +0200)poplar
+
+Model: HiSilicon Poplar Development Board
+BOARD: Hisilicon HI3798cv200 Poplar
+DRAM:  1 GiB
+MMC:   Hisilicon DWMMC: 0
+In:    serial@f8b00000
+Out:   serial@f8b00000
+Err:   serial@f8b00000
+Net:   Net Initialization Skipped
+No ethernet found.
+
+Hit any key to stop autoboot:  0
+starting USB...
+USB0:   USB EHCI 1.00
+scanning bus 0 for devices... 1 USB Device(s) found
+USB1:   USB EHCI 1.00
+scanning bus 1 for devices... 4 USB Device(s) found
+       scanning usb for storage devices... 1 Storage Device(s) found
+       scanning usb for ethernet devices... 1 Ethernet Device(s) found
+
+USB device 0:
+    Device 0: Vendor: SanDisk Rev: 1.00 Prod: Cruzer Blade
+            Type: Removable Hard Disk
+            Capacity: 7632.0 MB = 7.4 GB (15630336 x 512)
+... is now current device
+Scanning usb 0:1...
+=>
+
diff --git a/docs/plat/qemu.rst b/docs/plat/qemu.rst
new file mode 100644
index 0000000..4e2cd7c
--- /dev/null
+++ b/docs/plat/qemu.rst
@@ -0,0 +1,48 @@
+ARM Trusted Firmware for QEMU virt ARMv8-A
+==========================================
+
+ARM Trusted Firmware implements the EL3 firmware layer for QEMU virt
+ARMv8-A. BL1 is used as the BootROM, supplied with the -bios argument.
+When QEMU starts all CPUs are released simultaneously, BL1 selects a
+primary CPU to handle the boot and the secondaries are placed in a polling
+loop to be released by normal world via PSCI.
+
+BL2 edits the Flattened Device Tree, FDT, generated by QEMU at run-time to
+add a node describing PSCI and also enable methods for the CPUs.
+
+An ARM64 defonfig v4.5 Linux kernel is known to boot, FTD doesn't need to be
+provided as it's generated by QEMU.
+
+Current limitations:
+
+-  Only cold boot is supported
+-  No build instructions for QEMU\_EFI.fd and rootfs-arm64.cpio.gz
+-  No instructions for how to load a BL32 (Secure Payload)
+
+``QEMU_EFI.fd`` can be dowloaded from
+http://snapshots.linaro.org/components/kernel/leg-virt-tianocore-edk2-upstream/latest/QEMU-KERNEL-AARCH64/RELEASE_GCC49/QEMU_EFI.fd
+
+Boot binaries, except BL1, are primarily loaded via semi-hosting so all
+binaries has to reside in the same directory as QEMU is started from. This
+is conveniently achieved with symlinks the local names as:
+
+-  ``bl2.bin`` -> BL2
+-  ``bl31.bin`` -> BL31
+-  ``bl33.bin`` -> BL33 (``QEMU_EFI.fd``)
+-  ``Image`` -> linux/Image
+
+To build:
+
+::
+
+    make CROSS_COMPILE=aarch64-none-elf- PLAT=qemu 
+
+To start (QEMU v2.6.0):
+
+::
+
+    qemu-system-aarch64 -nographic -machine virt,secure=on -cpu cortex-a57  \
+        -kernel Image                           \
+        -append console=ttyAMA0,38400 keep_bootcon root=/dev/vda2   \
+        -initrd rootfs-arm64.cpio.gz -smp 2 -m 1024 -bios bl1.bin   \
+        -d unimp -semihosting-config enable,target=native
diff --git a/docs/plat/socionext-uniphier.rst b/docs/plat/socionext-uniphier.rst
new file mode 100644
index 0000000..fb6ebe5
--- /dev/null
+++ b/docs/plat/socionext-uniphier.rst
@@ -0,0 +1,124 @@
+ARM Trusted Firmware for Socionext UniPhier SoCs
+================================================
+
+Socionext UniPhier ARMv8-A SoCs use ARM Trusted Firmware as the secure world
+firmware, supporting BL1, BL2, and BL31.
+
+UniPhier SoC family implements its internal boot ROM, so BL1 is used as pseudo
+ROM (i.e. runs in RAM). The internal boot ROM loads 64KB `1`_ image from a
+non-volatile storage to the on-chip SRAM. Unfortunately, BL1 does not fit in
+the 64KB limit if `Trusted Board Boot`_ (TBB) is enabled. To solve this problem,
+Socionext provides a first stage loader called `UniPhier BL`_. This loader runs
+in the on-chip SRAM, initializes the DRAM, expands BL1 there, and hands the
+control over to it. Therefore, all images of ARM Trusted Firmware run in DRAM.
+
+The UniPhier platform works with/without TBB. See below for the build process
+of each case. The image authentication for the UniPhier platform fully
+complies with the Trusted Board Boot Requirements (TBBR) specification.
+
+The UniPhier BL does not implement the authentication functionality, that is,
+it can not verify the BL1 image by itself. Instead, the UniPhier BL assures
+the BL1 validity in a different way; BL1 is GZIP-compressed and appended to
+the UniPhier BL. The concatenation of the UniPhier BL and the compressed BL1
+fits in the 64KB limit. The concatenated image is loaded by the boot ROM
+(and verified if the chip fuses are blown).
+
+::
+
+     to the lowest common denominator.
+
+Boot Flow
+---------
+
+#. The Boot ROM
+
+This is hard-wired ROM, so never corrupted. It loads the UniPhier BL (with
+compressed-BL1 appended) into the on-chip SRAM. If the SoC fuses are blown,
+the image is verified by the SoC's own method.
+
+#. UniPhier BL
+
+This runs in the on-chip SRAM. After the minimum SoC initialization and DRAM
+setup, it decompresses the appended BL1 image into the DRAM, then jumps to
+the BL1 entry.
+
+#. BL1
+
+This runs in the DRAM. It extracts BL2 from FIP (Firmware Image Package).
+If TBB is enabled, the BL2 is authenticated by the standard mechanism of ARM
+Trusted Firmware.
+
+#. BL2, BL31, and more
+
+They all run in the DRAM, and are authenticated by the standard mechanism if
+TBB is enabled. See `Firmware Design`_ for details.
+
+Basic Build
+-----------
+
+BL1 must be compressed for the reason above. The UniPhier's platform makefile
+provides a build target ``bl1_gzip`` for this.
+
+For a non-secure boot loader (aka BL33), U-Boot is well supported for UniPhier
+SoCs. The U-Boot image (``u-boot.bin``) must be built in advance. For the build
+procedure of U-Boot, refer to the document in the `U-Boot`_ project.
+
+To build minimum functionality for UniPhier (without TBB):
+
+::
+
+    make CROSS_COMPILE=<gcc-prefix> PLAT=uniphier BL33=<path-to-BL33> bl1_gzip fip
+
+Output images:
+
+-  ``bl1.bin.gzip``
+-  ``fip.bin``
+
+Optional features
+-----------------
+
+-  Trusted Board Boot
+
+`mbed TLS`_ is needed as the cryptographic and image parser modules.
+Refer to the `User Guide`_ for the appropriate version of mbed TLS.
+
+To enable TBB, add the following options to the build command:
+
+::
+
+      TRUSTED_BOARD_BOOT=1 GENERATE_COT=1 MBEDTLS_DIR=<path-to-mbedtls>
+
+-  System Control Processor (SCP)
+
+If desired, FIP can include an SCP BL2 image. If BL2 finds an SCP BL2 image
+in FIP, BL2 loads it into DRAM and kicks the SCP. Most of UniPhier boards
+still work without SCP, but SCP provides better power management support.
+
+To include SCP\_BL2, add the following option to the build command:
+
+::
+
+      SCP_BL2=<path-to-SCP>
+
+-  BL32 (Secure Payload)
+
+To enable BL32, add the following option to the build command:
+
+::
+
+      SPD=<spd> BL32=<path-to-BL32>
+
+If you use TSP for BL32, ``BL32=<path-to-BL32>`` is not required. Just add the
+following:
+
+::
+
+      SPD=tspd
+
+.. _1: Some%20SoCs%20can%20load%2080KB,%20but%20the%20software%20implementation%20must%20be%20aligned
+.. _Trusted Board Boot: ../trusted-board-boot.rst
+.. _UniPhier BL: https://github.com/uniphier/uniphier-bl
+.. _Firmware Design: ../firmware-design.rst
+.. _U-Boot: https://www.denx.de/wiki/U-Boot
+.. _mbed TLS: https://tls.mbed.org/
+.. _User Guide: ../user-guide.rst
diff --git a/docs/plat/xilinx-zynqmp.rst b/docs/plat/xilinx-zynqmp.rst
new file mode 100644
index 0000000..b9c7825
--- /dev/null
+++ b/docs/plat/xilinx-zynqmp.rst
@@ -0,0 +1,67 @@
+ARM Trusted Firmware for Xilinx Zynq UltraScale+ MPSoC
+======================================================
+
+ARM Trusted Firmware implements the EL3 firmware layer for Xilinx Zynq
+UltraScale + MPSoC.
+The platform only uses the runtime part of ATF as ZynqMP already has a
+BootROM (BL1) and FSBL (BL2).
+
+BL31 is ATF.
+BL32 is an optional Secure Payload.
+BL33 is the non-secure world software (U-Boot, Linux etc).
+
+To build:
+
+.. code:: bash
+
+    make ERROR_DEPRECATED=1 CROSS_COMPILE=aarch64-none-elf- PLAT=zynqmp bl31
+
+To build bl32 TSP you have to rebuild bl31 too:
+
+.. code:: bash
+
+    make ERROR_DEPRECATED=1 CROSS_COMPILE=aarch64-none-elf- PLAT=zynqmp SPD=tspd bl31 bl32
+
+ZynqMP platform specific build options
+======================================
+
+-  ``ZYNQMP_ATF_MEM_BASE``: Specifies the base address of the bl31 binary.
+-  ``ZYNQMP_ATF_MEM_SIZE``: Specifies the size of the memory region of the bl31 binary.
+-  ``ZYNQMP_BL32_MEM_BASE``: Specifies the base address of the bl32 binary.
+-  ``ZYNQMP_BL32_MEM_SIZE``: Specifies the size of the memory region of the bl32 binary.
+
+-  ``ZYNQMP_CONSOLE``: Select the console driver. Options:
+
+   -  ``cadence``, ``cadence0``: Cadence UART 0
+   -  ``cadence1`` : Cadence UART 1
+
+FSBL->ATF Parameter Passing
+===========================
+
+The FSBL populates a data structure with image information for the ATF. The ATF
+uses that data to hand off to the loaded images. The address of the handoff data
+structure is passed in the ``PMU_GLOBAL.GLOBAL_GEN_STORAGE6`` register. The
+register is free to be used by other software once the ATF is bringing up
+further firmware images.
+
+Power Domain Tree
+=================
+
+The following power domain tree represents the power domain model used by the
+ATF for ZynqMP:
+
+::
+
+                    +-+
+                    |0|
+                    +-+
+         +-------+---+---+-------+
+         |       |       |       |
+         |       |       |       |
+         v       v       v       v
+        +-+     +-+     +-+     +-+
+        |0|     |1|     |2|     |3|
+        +-+     +-+     +-+     +-+
+
+The 4 leaf power domains represent the individual A53 cores, while resources
+common to the cluster are grouped in the power domain on the top.
diff --git a/docs/platform-interrupt-controller-API.rst b/docs/platform-interrupt-controller-API.rst
new file mode 100644
index 0000000..795c085
--- /dev/null
+++ b/docs/platform-interrupt-controller-API.rst
@@ -0,0 +1,297 @@
+Platform Interrupt Controller API documentation
+===============================================
+
+.. section-numbering::
+    :suffix: .
+
+.. contents::
+
+This document lists the optional platform interrupt controller API that
+abstracts the runtime configuration and control of interrupt controller from the
+generic code. The mandatory APIs are described in the `porting guide`__.
+
+.. __: porting-guide.rst#interrupt-management-framework-in-bl31
+
+Function: unsigned int plat_ic_get_running_priority(void); [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : unsigned int
+
+This API should return the priority of the interrupt the PE is currently
+servicing. This must be be called only after an interrupt has already been
+acknowledged via. ``plat_ic_acknowledge_interrupt``.
+
+In the case of ARM standard platforms using GIC, the *Running Priority Register*
+is read to determine the priority of the interrupt.
+
+Function: int plat_ic_is_spi(unsigned int id); [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int
+    Return   : int
+
+The API should return whether the interrupt ID (first parameter) is categorized
+as a Shared Peripheral Interrupt. Shared Peripheral Interrupts are typically
+associated to system-wide peripherals, and these interrupts can target any PE in
+the system.
+
+Function: int plat_ic_is_ppi(unsigned int id); [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int
+    Return   : int
+
+The API should return whether the interrupt ID (first parameter) is categorized
+as a Private Peripheral Interrupt. Private Peripheral Interrupts are typically
+associated with peripherals that are private to each PE. Interrupts from private
+peripherals target to that PE only.
+
+Function: int plat_ic_is_sgi(unsigned int id); [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int
+    Return   : int
+
+The API should return whether the interrupt ID (first parameter) is categorized
+as a Software Generated Interrupt. Software Generated Interrupts are raised by
+explicit programming by software, and are typically used in inter-PE
+communication. Secure SGIs are reserved for use by Secure world software.
+
+Function: unsigned int plat_ic_get_interrupt_active(unsigned int id); [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int
+    Return   : int
+
+This API should return the *active* status of the interrupt ID specified by the
+first parameter, ``id``.
+
+In case of ARM standard platforms using GIC, the implementation of the API reads
+the GIC *Set Active Register* to read and return the active status of the
+interrupt.
+
+Function: void plat_ic_enable_interrupt(unsigned int id); [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int
+    Return   : void
+
+This API should enable the interrupt ID specified by the first parameter,
+``id``. PEs in the system are expected to receive only enabled interrupts.
+
+In case of ARM standard platforms using GIC, the implementation of the API
+inserts barrier to make memory updates visible before enabling interrupt, and
+then writes to GIC *Set Enable Register* to enable the interrupt.
+
+Function: void plat_ic_disable_interrupt(unsigned int id); [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int
+    Return   : void
+
+This API should disable the interrupt ID specified by the first parameter,
+``id``. PEs in the system are not expected to receive disabled interrupts.
+
+In case of ARM standard platforms using GIC, the implementation of the API
+writes to GIC *Clear Enable Register* to disable the interrupt, and inserts
+barrier to make memory updates visible afterwards.
+
+Function: void plat_ic_set_interrupt_priority(unsigned int id, unsigned int priority); [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int
+    Argument : unsigned int
+    Return   : void
+
+This API should set the priority of the interrupt specified by first parameter
+``id`` to the value set by the second parameter ``priority``.
+
+In case of ARM standard platforms using GIC, the implementation of the API
+writes to GIC *Priority Register* set interrupt priority.
+
+Function: int plat_ic_has_interrupt_type(unsigned int type); [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int
+    Return   : int
+
+This API should return whether the platform supports a given interrupt type. The
+parameter ``type`` shall be one of ``INTR_TYPE_EL3``, ``INTR_TYPE_S_EL1``, or
+``INTR_TYPE_NS``.
+
+In case of ARM standard platforms using GICv3, the implementation of the API
+returns ``1`` for all interrupt types.
+
+In case of ARM standard platforms using GICv2, the API always return ``1`` for
+``INTR_TYPE_NS``. Return value for other types depends on the value of build
+option ``GICV2_G0_FOR_EL3``:
+
+- For interrupt type ``INTR_TYPE_EL3``:
+
+  - When ``GICV2_G0_FOR_EL3`` is ``0``, it returns ``0``, indicating no support
+    for EL3 interrupts.
+
+  - When ``GICV2_G0_FOR_EL3`` is ``1``, it returns ``1``, indicating support for
+    EL3 interrupts.
+
+- For interrupt type ``INTR_TYPE_S_EL1``:
+
+  - When ``GICV2_G0_FOR_EL3`` is ``0``, it returns ``1``, indicating support for
+    Secure EL1 interrupts.
+
+  - When ``GICV2_G0_FOR_EL3`` is ``1``, it returns ``0``, indicating no support
+    for Secure EL1 interrupts.
+
+Function: void plat_ic_set_interrupt_type(unsigned int id, unsigned int type); [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int
+    Argument : unsigned int
+    Return   : void
+
+This API should set the interrupt specified by first parameter ``id`` to the
+type specified by second parameter ``type``. The ``type`` parameter can be
+one of:
+
+- ``INTR_TYPE_NS``: interrupt is meant to be consumed by the Non-secure world.
+
+- ``INTR_TYPE_S_EL1``: interrupt is meant to be consumed by Secure EL1.
+
+- ``INTR_TYPE_EL3``: interrupt is meant to be consumed by EL3.
+
+In case of ARM standard platforms using GIC, the implementation of the API
+writes to the GIC *Group Register* and *Group Modifier Register* (only GICv3) to
+assign the interrupt to the right group.
+
+For GICv3:
+
+- ``INTR_TYPE_NS`` maps to Group 1 interrupt.
+
+- ``INTR_TYPE_S_EL1`` maps to Secure Group 1 interrupt.
+
+- ``INTR_TYPE_EL3`` maps to Secure Group 0 interrupt.
+
+For GICv2:
+
+- ``INTR_TYPE_NS`` maps to Group 1 interrupt.
+
+- When the build option ``GICV2_G0_FOR_EL3`` is set to ``0`` (the default),
+  ``INTR_TYPE_S_EL1`` maps to Group 0. Otherwise, ``INTR_TYPE_EL3`` maps to
+  Group 0 interrupt.
+
+Function: void plat_ic_raise_el3_sgi(int sgi_num, u_register_t target); [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : int
+    Argument : u_register_t
+    Return   : void
+
+This API should raise an EL3 SGI. The first parameter, ``sgi_num``, specifies
+the ID of the SGI. The second parameter, ``target``, must be the MPIDR of the
+target PE.
+
+In case of ARM standard platforms using GIC, the implementation of the API
+inserts barrier to make memory updates visible before raising SGI, then writes
+to appropriate *SGI Register* in order to raise the EL3 SGI.
+
+Function: void plat_ic_set_spi_routing(unsigned int id, unsigned int routing_mode, u_register_t mpidr); [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int
+    Argument : unsigned int
+    Argument : u_register_t
+    Return   : void
+
+This API should set the routing mode of Share Peripheral Interrupt (SPI)
+specified by first parameter ``id`` to that specified by the second parameter
+``routing_mode``.
+
+The ``routing_mode`` parameter can be one of:
+
+- ``INTR_ROUTING_MODE_ANY`` means the interrupt can be routed to any PE in the
+  system. The ``mpidr`` parameter is ignored in this case.
+
+- ``INTR_ROUTING_MODE_PE`` means the interrupt is routed to the PE whose MPIDR
+  value is specified by the parameter ``mpidr``.
+
+In case of ARM standard platforms using GIC, the implementation of the API
+writes to the GIC *Target Register* (GICv2) or *Route Register* (GICv3) to set
+the routing.
+
+Function: void plat_ic_set_interrupt_pending(unsigned int id); [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int
+    Return   : void
+
+This API should set the interrupt specified by first parameter ``id`` to
+*Pending*.
+
+In case of ARM standard platforms using GIC, the implementation of the API
+inserts barrier to make memory updates visible before setting interrupt pending,
+and writes to the GIC *Set Pending Register* to set the interrupt pending
+status.
+
+Function: void plat_ic_clear_interrupt_pending(unsigned int id); [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int
+    Return   : void
+
+This API should clear the *Pending* status of the interrupt specified by first
+parameter ``id``.
+
+In case of ARM standard platforms using GIC, the implementation of the API
+writes to the GIC *Clear Pending Register* to clear the interrupt pending
+status, and inserts barrier to make memory updates visible afterwards.
+
+Function: unsigned int plat_ic_set_priority_mask(unsigned int id); [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int
+    Return   : int
+
+This API should set the priority mask (first parameter) in the interrupt
+controller such that only interrupts of higher priority than the supplied one
+may be signalled to the PE. The API should return the current priority value
+that it's overwriting.
+
+In case of ARM standard platforms using GIC, the implementation of the API
+inserts to order memory updates before updating mask, then writes to the GIC
+*Priority Mask Register*, and make sure memory updates are visible before
+potential trigger due to mask update.
+
+----
+
+*Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.*
diff --git a/docs/platform-migration-guide.rst b/docs/platform-migration-guide.rst
new file mode 100644
index 0000000..ca75546
--- /dev/null
+++ b/docs/platform-migration-guide.rst
@@ -0,0 +1,608 @@
+Guide to migrate to new Platform porting interface
+==================================================
+
+
+.. section-numbering::
+    :suffix: .
+
+.. contents::
+
+--------------
+
+Introduction
+------------
+
+The PSCI implementation in Trusted Firmware has undergone a redesign because of
+three requirements that the PSCI 1.0 specification introduced :
+
+-  Removing the framework assumption about the structure of the MPIDR, and
+   its relation to the power topology enables support for deeper and more
+   complex hierarchies.
+
+-  Reworking the power state coordination implementation in the framework
+   to support the more detailed PSCI 1.0 requirements and reduce platform
+   port complexity
+
+-  Enable the use of the extended power\_state parameter and the larger StateID
+   field
+
+The PSCI 1.0 implementation introduces new frameworks to fulfill the above
+requirements. These framework changes mean that the platform porting API must
+also be modified. This document is a guide to assist migration of the existing
+platform ports to the new platform API.
+
+This document describes the new platform API and compares it with the
+deprecated API. It also describes the compatibility layer that enables the
+existing platform ports to work with the PSCI 1.0 implementation. The
+deprecated platform API is documented for reference.
+
+Platform API modification due to PSCI framework changes
+-------------------------------------------------------
+
+This section describes changes to the platform APIs.
+
+Power domain topology framework platform API modifications
+----------------------------------------------------------
+
+This removes the assumption in the PSCI implementation that MPIDR
+based affinity instances map directly to power domains. A power domain, as
+described in section 4.2 of `PSCI`_, could contain a core or a logical group
+of cores (a cluster) which share some state on which power management
+operations can be performed. The existing affinity instance based APIs
+``plat_get_aff_count()`` and ``plat_get_aff_state()`` are deprecated. The new
+platform interfaces that are introduced for this framework are:
+
+-  ``plat_core_pos_by_mpidr()``
+-  ``plat_my_core_pos()``
+-  ``plat_get_power_domain_tree_desc()``
+
+``plat_my_core_pos()`` and ``plat_core_pos_by_mpidr()`` are mandatory
+and are meant to replace the existing ``platform_get_core_pos()`` API.
+The description of these APIs can be found in the `Porting Guide`_.
+These are used by the power domain topology framework such that:
+
+#. The generic PSCI code does not generate MPIDRs or use them to query the
+   platform about the number of power domains at a particular power level. The
+   ``plat_get_power_domain_tree_desc()`` provides a description of the power
+   domain tree on the SoC through a pointer to the byte array containing the
+   power domain topology tree description data structure.
+
+#. The linear indices returned by ``plat_core_pos_by_mpidr()`` and
+   ``plat_my_core_pos()`` are used to retrieve core power domain nodes from
+   the power domain tree. These core indices are unique for a core and it is a
+   number between ``0`` and ``PLATFORM_CORE_COUNT - 1``. The platform can choose
+   to implement a static mapping between ``MPIDR`` and core index or implement
+   a dynamic mapping, choosing to skip the unavailable/unused cores to compact
+   the core indices.
+
+In addition, the platforms must define the macros ``PLAT_NUM_PWR_DOMAINS`` and
+``PLAT_MAX_PWR_LVL`` which replace the macros ``PLAT_NUM_AFFS`` and
+``PLATFORM_MAX_AFFLVL`` respectively. On platforms where the affinity instances
+correspond to power domains, the values of new macros remain the same as the
+old ones.
+
+More details on the power domain topology description and its platform
+interface can be found in `psci pd tree`_.
+
+Composite power state framework platform API modifications
+----------------------------------------------------------
+
+The state-ID field in the power-state parameter of a CPU\_SUSPEND call can be
+used to describe the composite power states specific to a platform. The existing
+PSCI state coordination had the limitation that it operates on a run/off
+granularity of power states and it did not interpret the state-ID field. This
+was acceptable as the specification requirement in PSCI 0.2 and the framework's
+approach to coordination only required maintaining a reference
+count of the number of cores that have requested the cluster to remain powered.
+
+In the PSCI 1.0 specification, this approach is non optimal. If composite
+power states are used, the PSCI implementation cannot make global
+decisions about state coordination required because it does not understand the
+platform specific states.
+
+The PSCI 1.0 implementation now defines a generic representation of the
+power-state parameter :
+
+.. code:: c
+
+    typedef struct psci_power_state {
+        plat_local_state_t pwr_domain_state[PLAT_MAX_PWR_LVL + 1];
+    } psci_power_state_t;
+
+``pwr_domain_state`` is an array where each index corresponds to a power level.
+Each entry in the array contains the local power state the power domain at
+that power level could enter. The meaning of the local power state value is
+platform defined, and can vary between levels in a single platform. The PSCI
+implementation constraints the values only so that it can classify the state
+as RUN, RETENTION or OFF as required by the specification:
+
+#. Zero means RUN
+
+#. All OFF state values at all levels must be higher than all
+   RETENTION state values at all levels
+
+The platform is required to define the macros ``PLAT_MAX_RET_STATE`` and
+``PLAT_MAX_OFF_STATE`` to the framework. The requirement for these macros can
+be found in the `Porting Guide <porting-guide.rst>`__.
+
+The PSCI 1.0 implementation adds support to involve the platform in state
+coordination. This enables the platform to decide the final target state.
+During a request to place a power domain in a low power state, the platform
+is passed an array of requested ``plat_local_state_t`` for that power domain by
+each core within it through the ``plat_get_target_pwr_state()`` API. This API
+coordinates amongst these requested states to determine a target
+``plat_local_state_t`` for that power domain. A default weak implementation of
+this API is provided in the platform layer which returns the minimum of the
+requested local states back to the PSCI state coordination. More details
+of ``plat_get_target_pwr_state()`` API can be found in the
+`Porting Guide <porting-guide.rst#user-content-function--plat_get_target_pwr_state-optional>`__.
+
+The PSCI Generic implementation expects platform ports to populate the handlers
+for the ``plat_psci_ops`` structure which is declared as :
+
+.. code:: c
+
+    typedef struct plat_psci_ops {
+        void (*cpu_standby)(plat_local_state_t cpu_state);
+        int (*pwr_domain_on)(u_register_t mpidr);
+        void (*pwr_domain_off)(const psci_power_state_t *target_state);
+        void (*pwr_domain_suspend_early)(const psci_power_state_t *target_state);
+        void (*pwr_domain_suspend)(const psci_power_state_t *target_state);
+        void (*pwr_domain_on_finish)(const psci_power_state_t *target_state);
+        void (*pwr_domain_suspend_finish)(
+                        const psci_power_state_t *target_state);
+        void (*system_off)(void) __dead2;
+        void (*system_reset)(void) __dead2;
+        int (*validate_power_state)(unsigned int power_state,
+                        psci_power_state_t *req_state);
+        int (*validate_ns_entrypoint)(unsigned long ns_entrypoint);
+        void (*get_sys_suspend_power_state)(
+                        psci_power_state_t *req_state);
+        int (*get_pwr_lvl_state_idx)(plat_local_state_t pwr_domain_state,
+                                    int pwrlvl);
+        int (*translate_power_state_by_mpidr)(u_register_t mpidr,
+                                    unsigned int power_state,
+                                    psci_power_state_t *output_state);
+        int (*get_node_hw_state)(u_register_t mpidr, unsigned int power_level);
+        int (*mem_protect_chk)(uintptr_t base, u_register_t length);
+        int (*read_mem_protect)(int *val);
+        int (*write_mem_protect)(int val);
+        int (*system_reset2)(int is_vendor,
+                                int reset_type, u_register_t cookie);
+    } plat_psci_ops_t;
+
+The description of these handlers can be found in the `Porting Guide <porting-guide.rst#user-content-function--plat_setup_psci_ops-mandatory>`__.
+The previous ``plat_pm_ops`` structure is deprecated. Compared with the previous
+handlers, the major differences are:
+
+-  Difference in parameters
+
+The PSCI 1.0 implementation depends on the ``validate_power_state`` handler to
+convert the power-state parameter (possibly encoding a composite power state)
+passed in a PSCI ``CPU_SUSPEND`` to the ``psci_power_state`` format. This handler
+is now mandatory for PSCI ``CPU_SUSPEND`` support.
+
+The ``plat_psci_ops`` handlers, ``pwr_domain_off``, ``pwr_domain_suspend_early``
+and ``pwr_domain_suspend``, are passed the target local state for each affected
+power domain. The platform must execute operations specific to these target
+states. Similarly, ``pwr_domain_on_finish`` and ``pwr_domain_suspend_finish``
+are passed the local states of the affected power domains before wakeup. The
+platform must execute actions to restore these power domains from these specific
+local states.
+
+-  Difference in invocation
+
+Whereas the power management handlers in ``plat_pm_ops`` used to be invoked
+for each affinity level till the target affinity level, the new handlers
+are only invoked once. The ``target_state`` encodes the target low power
+state or the low power state woken up from for each affected power domain.
+
+-  Difference in semantics
+
+Although the previous ``suspend`` handlers could be used for power down as well
+as retention at different affinity levels, the new handlers make this support
+explicit. The ``pwr_domain_suspend`` can be used to specify powerdown and
+retention at various power domain levels subject to the conditions mentioned
+in section 4.2.1 of `PSCI`_
+
+Unlike the previous ``standby`` handler, the ``cpu_standby()`` handler is only used
+as a fast path for placing a core power domain into a standby or retention
+state.
+
+The below diagram shows the sequence of a PSCI SUSPEND call and the interaction
+with the platform layer depicting the exchange of data between PSCI Generic
+layer and the platform layer.
+
+|Image 1|
+
+Refer `plat/arm/board/fvp/fvp\_pm.c`_ for the implementation details of
+these handlers for the FVP. The commit `38dce70f51fb83b27958ba3e2ad15f5635cb1061`_
+demonstrates the migration of ARM reference platforms to the new platform API.
+
+Miscellaneous modifications
+---------------------------
+
+In addition to the framework changes, unification of warm reset entry points on
+wakeup from low power modes has led to a change in the platform API. In the
+earlier implementation, the warm reset entry used to be programmed into the
+mailboxes by the 'ON' and 'SUSPEND' power management hooks. In the PSCI 1.0
+implementation, this information is not required, because it can figure that
+out by querying affinity info state whether to execute the 'suspend\_finisher\`
+or 'on\_finisher'.
+
+As a result, the warm reset entry point must be programmed only once. The
+``plat_setup_psci_ops()`` API takes the secure entry point as an
+additional parameter to enable the platforms to configure their mailbox. The
+plat\_psci\_ops handlers ``pwr_domain_on`` and ``pwr_domain_suspend`` no longer take
+the warm reset entry point as a parameter.
+
+Also, some platform APIs which took ``MPIDR`` as an argument were only ever
+invoked to perform actions specific to the caller core which makes the argument
+redundant. Therefore the platform APIs ``plat_get_my_entrypoint()``,
+``plat_is_my_cpu_primary()``, ``plat_set_my_stack()`` and
+``plat_get_my_stack()`` are defined which are meant to be invoked only for
+operations on the current caller core instead of ``platform_get_entrypoint()``,
+``platform_is_primary_cpu()``, ``platform_set_stack()`` and ``platform_get_stack()``.
+
+Compatibility layer
+-------------------
+
+To ease the migration of the platform ports to the new porting interface,
+a compatibility layer is introduced that essentially implements a glue layer
+between the old platform API and the new API. The build flag
+``ENABLE_PLAT_COMPAT`` (enabled by default), specifies whether to enable this
+layer or not. A platform port which has migrated to the new API can disable
+this flag within the platform specific makefile.
+
+The compatibility layer works on the assumption that the onus of
+state coordination, in case multiple low power states are supported,
+is with the platform. The generic PSCI implementation only takes into
+account whether the suspend request is power down or not. This corresponds
+with the behavior of the PSCI implementation before the introduction of
+new frameworks. Also, it assumes that the affinity levels of the platform
+correspond directly to the power domain levels.
+
+The compatibility layer dynamically constructs the new topology
+description array by querying the platform using ``plat_get_aff_count()``
+and ``plat_get_aff_state()`` APIs. The linear index returned by
+``platform_get_core_pos()`` is used as the core index for the cores. The
+higher level (non-core) power domain nodes must know the cores contained
+within its domain. It does so by storing the core index of first core
+within it and number of core indexes following it. This means that core
+indices returned by ``platform_get_core_pos()`` for cores within a particular
+power domain must be consecutive. We expect that this is the case for most
+platform ports including ARM reference platforms.
+
+The old PSCI helpers like ``psci_get_suspend_powerstate()``,
+``psci_get_suspend_stateid()``, ``psci_get_suspend_stateid_by_mpidr()``,
+``psci_get_max_phys_off_afflvl()`` and ``psci_get_suspend_afflvl()`` are also
+implemented for the compatibility layer. This allows the existing
+platform ports to work with the new PSCI frameworks without significant
+rework.
+
+Deprecated Platform API
+-----------------------
+
+This section documents the deprecated platform porting API.
+
+Common mandatory modifications
+------------------------------
+
+The mandatory macros to be defined by the platform port in ``platform_def.h``
+
+-  **#define : PLATFORM\_NUM\_AFFS**
+
+   Defines the total number of nodes in the affinity hierarchy at all affinity
+   levels used by the platform.
+
+-  **#define : PLATFORM\_MAX\_AFFLVL**
+
+   Defines the maximum affinity level that the power management operations
+   should apply to. ARMv8-A has support for four affinity levels. It is likely
+   that hardware will implement fewer affinity levels. This macro allows the
+   PSCI implementation to consider only those affinity levels in the system
+   that the platform implements. For example, the Base AEM FVP implements two
+   clusters with a configurable number of cores. It reports the maximum
+   affinity level as 1, resulting in PSCI power control up to the cluster
+   level.
+
+The following functions must be implemented by the platform port to enable
+the reset vector code to perform the required tasks.
+
+Function : platform\_get\_entrypoint() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned long
+    Return   : unsigned long
+
+This function is called with the ``SCTLR.M`` and ``SCTLR.C`` bits disabled. The core
+is identified by its ``MPIDR``, which is passed as the argument. The function is
+responsible for distinguishing between a warm and cold reset using platform-
+specific means. If it is a warm reset, it returns the entrypoint into the
+BL31 image that the core must jump to. If it is a cold reset, this function
+must return zero.
+
+This function is also responsible for implementing a platform-specific mechanism
+to handle the condition where the core has been warm reset but there is no
+entrypoint to jump to.
+
+This function does not follow the Procedure Call Standard used by the
+Application Binary Interface for the ARM 64-bit architecture. The caller should
+not assume that callee saved registers are preserved across a call to this
+function.
+
+Function : platform\_is\_primary\_cpu() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned long
+    Return   : unsigned int
+
+This function identifies a core by its ``MPIDR``, which is passed as the argument,
+to determine whether this core is the primary core or a secondary core. A return
+value of zero indicates that the core is not the primary core, while a non-zero
+return value indicates that the core is the primary core.
+
+Common optional modifications
+-----------------------------
+
+Function : platform\_get\_core\_pos()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned long
+    Return   : int
+
+A platform may need to convert the ``MPIDR`` of a core to an absolute number, which
+can be used as a core-specific linear index into blocks of memory (for example
+while allocating per-core stacks). This routine contains a simple mechanism
+to perform this conversion, using the assumption that each cluster contains a
+maximum of four cores:
+
+::
+
+    linear index = cpu_id + (cluster_id * 4)
+
+    cpu_id = 8-bit value in MPIDR at affinity level 0
+    cluster_id = 8-bit value in MPIDR at affinity level 1
+
+Function : platform\_set\_stack()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned long
+    Return   : void
+
+This function sets the current stack pointer to the normal memory stack that
+has been allocated for the core specified by MPIDR. For BL images that only
+require a stack for the primary core the parameter is ignored. The size of
+the stack allocated to each core is specified by the platform defined constant
+``PLATFORM_STACK_SIZE``.
+
+Common implementations of this function for the UP and MP BL images are
+provided in `plat/common/aarch64/platform\_up\_stack.S`_ and
+`plat/common/aarch64/platform\_mp\_stack.S`_
+
+Function : platform\_get\_stack()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned long
+    Return   : unsigned long
+
+This function returns the base address of the normal memory stack that
+has been allocated for the core specificed by MPIDR. For BL images that only
+require a stack for the primary core the parameter is ignored. The size of
+the stack allocated to each core is specified by the platform defined constant
+``PLATFORM_STACK_SIZE``.
+
+Common implementations of this function for the UP and MP BL images are
+provided in `plat/common/aarch64/platform\_up\_stack.S`_ and
+`plat/common/aarch64/platform\_mp\_stack.S`_
+
+Modifications for Power State Coordination Interface (in BL31)
+--------------------------------------------------------------
+
+The following functions must be implemented to initialize PSCI functionality in
+the ARM Trusted Firmware.
+
+Function : plat\_get\_aff\_count() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int, unsigned long
+    Return   : unsigned int
+
+This function may execute with the MMU and data caches enabled if the platform
+port does the necessary initializations in ``bl31_plat_arch_setup()``. It is only
+called by the primary core.
+
+This function is called by the PSCI initialization code to detect the system
+topology. Its purpose is to return the number of affinity instances implemented
+at a given ``affinity level`` (specified by the first argument) and a given
+``MPIDR`` (specified by the second argument). For example, on a dual-cluster
+system where first cluster implements two cores and the second cluster
+implements four cores, a call to this function with an ``MPIDR`` corresponding
+to the first cluster (``0x0``) and affinity level 0, would return 2. A call
+to this function with an ``MPIDR`` corresponding to the second cluster (``0x100``)
+and affinity level 0, would return 4.
+
+Function : plat\_get\_aff\_state() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int, unsigned long
+    Return   : unsigned int
+
+This function may execute with the MMU and data caches enabled if the platform
+port does the necessary initializations in ``bl31_plat_arch_setup()``. It is only
+called by the primary core.
+
+This function is called by the PSCI initialization code. Its purpose is to
+return the state of an affinity instance. The affinity instance is determined by
+the affinity ID at a given ``affinity level`` (specified by the first argument)
+and an ``MPIDR`` (specified by the second argument). The state can be one of
+``PSCI_AFF_PRESENT`` or ``PSCI_AFF_ABSENT``. The latter state is used to cater for
+system topologies where certain affinity instances are unimplemented. For
+example, consider a platform that implements a single cluster with four cores and
+another core implemented directly on the interconnect with the cluster. The
+``MPIDR``\ s of the cluster would range from ``0x0-0x3``. The ``MPIDR`` of the single
+core is 0x100 to indicate that it does not belong to cluster 0. Cluster 1
+is missing but needs to be accounted for to reach this single core in the
+topology tree. Therefore it is marked as ``PSCI_AFF_ABSENT``.
+
+Function : platform\_setup\_pm() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : const plat_pm_ops **
+    Return   : int
+
+This function may execute with the MMU and data caches enabled if the platform
+port does the necessary initializations in ``bl31_plat_arch_setup()``. It is only
+called by the primary core.
+
+This function is called by PSCI initialization code. Its purpose is to export
+handler routines for platform-specific power management actions by populating
+the passed pointer with a pointer to the private ``plat_pm_ops`` structure of
+BL31.
+
+A description of each member of this structure is given below. A platform port
+is expected to implement these handlers if the corresponding PSCI operation
+is to be supported and these handlers are expected to succeed if the return
+type is ``void``.
+
+plat\_pm\_ops.affinst\_standby()
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Perform the platform-specific setup to enter the standby state indicated by the
+passed argument. The generic code expects the handler to succeed.
+
+plat\_pm\_ops.affinst\_on()
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Perform the platform specific setup to power on an affinity instance, specified
+by the ``MPIDR`` (first argument) and ``affinity level`` (third argument). The
+``state`` (fourth argument) contains the current state of that affinity instance
+(ON or OFF). This is useful to determine whether any action must be taken. For
+example, while powering on a core, the cluster that contains this core might
+already be in the ON state. The platform decides what actions must be taken to
+transition from the current state to the target state (indicated by the power
+management operation). The generic code expects the platform to return
+E\_SUCCESS on success or E\_INTERN\_FAIL for any failure.
+
+plat\_pm\_ops.affinst\_off()
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Perform the platform specific setup to power off an affinity instance of the
+calling core. It is called by the PSCI ``CPU_OFF`` API implementation.
+
+The ``affinity level`` (first argument) and ``state`` (second argument) have
+a similar meaning as described in the ``affinst_on()`` operation. They
+identify the affinity instance on which the call is made and its
+current state. This gives the platform port an indication of the
+state transition it must make to perform the requested action. For example, if
+the calling core is the last powered on core in the cluster, after powering down
+affinity level 0 (the core), the platform port should power down affinity
+level 1 (the cluster) as well. The generic code expects the handler to succeed.
+
+plat\_pm\_ops.affinst\_suspend()
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Perform the platform specific setup to power off an affinity instance of the
+calling core. It is called by the PSCI ``CPU_SUSPEND`` API and ``SYSTEM_SUSPEND``
+API implementation
+
+The ``affinity level`` (second argument) and ``state`` (third argument) have a
+similar meaning as described in the ``affinst_on()`` operation. They are used to
+identify the affinity instance on which the call is made and its current state.
+This gives the platform port an indication of the state transition it must
+make to perform the requested action. For example, if the calling core is the
+last powered on core in the cluster, after powering down affinity level 0
+(the core), the platform port should power down affinity level 1 (the cluster)
+as well.
+
+The difference between turning an affinity instance off and suspending it
+is that in the former case, the affinity instance is expected to re-initialize
+its state when it is next powered on (see ``affinst_on_finish()``). In the latter
+case, the affinity instance is expected to save enough state so that it can
+resume execution by restoring this state when it is powered on (see
+``affinst_suspend_finish()``).The generic code expects the handler to succeed.
+
+plat\_pm\_ops.affinst\_on\_finish()
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This function is called by the PSCI implementation after the calling core is
+powered on and released from reset in response to an earlier PSCI ``CPU_ON`` call.
+It performs the platform-specific setup required to initialize enough state for
+this core to enter the Normal world and also provide secure runtime firmware
+services.
+
+The ``affinity level`` (first argument) and ``state`` (second argument) have a
+similar meaning as described in the previous operations. The generic code
+expects the handler to succeed.
+
+plat\_pm\_ops.affinst\_suspend\_finish()
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This function is called by the PSCI implementation after the calling core is
+powered on and released from reset in response to an asynchronous wakeup
+event, for example a timer interrupt that was programmed by the core during the
+``CPU_SUSPEND`` call or ``SYSTEM_SUSPEND`` call. It performs the platform-specific
+setup required to restore the saved state for this core to resume execution
+in the Normal world and also provide secure runtime firmware services.
+
+The ``affinity level`` (first argument) and ``state`` (second argument) have a
+similar meaning as described in the previous operations. The generic code
+expects the platform to succeed.
+
+plat\_pm\_ops.validate\_power\_state()
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This function is called by the PSCI implementation during the ``CPU_SUSPEND``
+call to validate the ``power_state`` parameter of the PSCI API. If the
+``power_state`` is known to be invalid, the platform must return
+PSCI\_E\_INVALID\_PARAMS as an error, which is propagated back to the Normal
+world PSCI client.
+
+plat\_pm\_ops.validate\_ns\_entrypoint()
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This function is called by the PSCI implementation during the ``CPU_SUSPEND``,
+``SYSTEM_SUSPEND`` and ``CPU_ON`` calls to validate the Non-secure ``entry_point``
+parameter passed by the Normal world. If the ``entry_point`` is known to be
+invalid, the platform must return PSCI\_E\_INVALID\_PARAMS as an error, which is
+propagated back to the Normal world PSCI client.
+
+plat\_pm\_ops.get\_sys\_suspend\_power\_state()
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This function is called by the PSCI implementation during the ``SYSTEM_SUSPEND``
+call to return the ``power_state`` parameter. This allows the platform to encode
+the appropriate State-ID field within the ``power_state`` parameter which can be
+utilized in ``affinst_suspend()`` to suspend to system affinity level. The
+``power_state`` parameter should be in the same format as specified by the
+PSCI specification for the CPU\_SUSPEND API.
+
+--------------
+
+*Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.*
+
+.. _PSCI: http://infocenter.arm.com/help/topic/com.arm.doc.den0022c/DEN0022C_Power_State_Coordination_Interface.pdf
+.. _Porting Guide: porting-guide.rst#user-content-function--plat_my_core_pos
+.. _psci pd tree: psci-pd-tree.rst
+.. _plat/arm/board/fvp/fvp\_pm.c: ../plat/arm/board/fvp/fvp_pm.c
+.. _38dce70f51fb83b27958ba3e2ad15f5635cb1061: https://github.com/ARM-software/arm-trusted-firmware/commit/38dce70f51fb83b27958ba3e2ad15f5635cb1061
+.. _plat/common/aarch64/platform\_up\_stack.S: ../plat/common/aarch64/platform_up_stack.S
+.. _plat/common/aarch64/platform\_mp\_stack.S: ../plat/common/aarch64/platform_mp_stack.S
+
+.. |Image 1| image:: diagrams/psci-suspend-sequence.png?raw=true
diff --git a/docs/porting-guide.md b/docs/porting-guide.md
deleted file mode 100644
index 5e85023..0000000
--- a/docs/porting-guide.md
+++ /dev/null
@@ -1,1569 +0,0 @@
-ARM Trusted Firmware Porting Guide
-==================================
-
-Contents
---------
-
-1.  [Introduction](#1--introduction)
-2.  [Common Modifications](#2--common-modifications)
-    *   [Common mandatory modifications](#21-common-mandatory-modifications)
-    *   [Handling reset](#22-handling-reset)
-    *   [Common optional modifications](#23-common-optional-modifications)
-3.  [Boot Loader stage specific modifications](#3--modifications-specific-to-a-boot-loader-stage)
-    *   [Boot Loader stage 1 (BL1)](#31-boot-loader-stage-1-bl1)
-    *   [Boot Loader stage 2 (BL2)](#32-boot-loader-stage-2-bl2)
-    *   [Boot Loader stage 3-1 (BL3-1)](#32-boot-loader-stage-3-1-bl3-1)
-    *   [PSCI implementation (in BL3-1)](#33-power-state-coordination-interface-in-bl3-1)
-    *   [Interrupt Management framework (in BL3-1)](#34--interrupt-management-framework-in-bl3-1)
-    *   [Crash Reporting mechanism (in BL3-1)](#35--crash-reporting-mechanism-in-bl3-1)
-4.  [Build flags](#4--build-flags)
-5.  [C Library](#5--c-library)
-6.  [Storage abstraction layer](#6--storage-abstraction-layer)
-
-- - - - - - - - - - - - - - - - - -
-
-1.  Introduction
-----------------
-
-Porting the ARM Trusted Firmware to a new platform involves making some
-mandatory and optional modifications for both the cold and warm boot paths.
-Modifications consist of:
-
-*   Implementing a platform-specific function or variable,
-*   Setting up the execution context in a certain way, or
-*   Defining certain constants (for example #defines).
-
-The platform-specific functions and variables are all declared in
-[include/plat/common/platform.h]. The firmware provides a default implementation
-of variables and functions to fulfill the optional requirements. These
-implementations are all weakly defined; they are provided to ease the porting
-effort. Each platform port can override them with its own implementation if the
-default implementation is inadequate.
-
-Some modifications are common to all Boot Loader (BL) stages. Section 2
-discusses these in detail. The subsequent sections discuss the remaining
-modifications for each BL stage in detail.
-
-This document should be read in conjunction with the ARM Trusted Firmware
-[User Guide].
-
-
-2.  Common modifications
-------------------------
-
-This section covers the modifications that should be made by the platform for
-each BL stage to correctly port the firmware stack. They are categorized as
-either mandatory or optional.
-
-
-2.1 Common mandatory modifications
-----------------------------------
-A platform port must enable the Memory Management Unit (MMU) with identity
-mapped page tables, and enable both the instruction and data caches for each BL
-stage. In the ARM FVP port, each BL stage configures the MMU in its platform-
-specific architecture setup function, for example `blX_plat_arch_setup()`.
-
-If the build option `USE_COHERENT_MEM` is enabled, each platform must allocate a
-block of identity mapped secure memory with Device-nGnRE attributes aligned to
-page boundary (4K) for each BL stage. This memory is identified by the section
-name `tzfw_coherent_mem` so that its possible for the firmware to place
-variables in it using the following C code directive:
-
-    __attribute__ ((section("tzfw_coherent_mem")))
-
-Or alternatively the following assembler code directive:
-
-    .section tzfw_coherent_mem
-
-The `tzfw_coherent_mem` section is used to allocate any data structures that are
-accessed both when a CPU is executing with its MMU and caches enabled, and when
-it's running with its MMU and caches disabled. Examples are given below.
-
-The following variables, functions and constants must be defined by the platform
-for the firmware to work correctly.
-
-
-### File : platform_def.h [mandatory]
-
-Each platform must ensure that a header file of this name is in the system
-include path with the following constants defined. This may require updating the
-list of `PLAT_INCLUDES` in the `platform.mk` file. In the ARM FVP port, this
-file is found in [plat/fvp/include/platform_def.h].
-
-*   **#define : PLATFORM_LINKER_FORMAT**
-
-    Defines the linker format used by the platform, for example
-    `elf64-littleaarch64` used by the FVP.
-
-*   **#define : PLATFORM_LINKER_ARCH**
-
-    Defines the processor architecture for the linker by the platform, for
-    example `aarch64` used by the FVP.
-
-*   **#define : PLATFORM_STACK_SIZE**
-
-    Defines the normal stack memory available to each CPU. This constant is used
-    by [plat/common/aarch64/platform_mp_stack.S] and
-    [plat/common/aarch64/platform_up_stack.S].
-
-*   **#define : FIRMWARE_WELCOME_STR**
-
-    Defines the character string printed by BL1 upon entry into the `bl1_main()`
-    function.
-
-*   **#define : BL2_IMAGE_NAME**
-
-    Name of the BL2 binary image on the host file-system. This name is used by
-    BL1 to load BL2 into secure memory from non-volatile storage.
-
-*   **#define : BL31_IMAGE_NAME**
-
-    Name of the BL3-1 binary image on the host file-system. This name is used by
-    BL2 to load BL3-1 into secure memory from platform storage.
-
-*   **#define : BL33_IMAGE_NAME**
-
-    Name of the BL3-3 binary image on the host file-system. This name is used by
-    BL2 to load BL3-3 into non-secure memory from platform storage.
-
-*   **#define : BL2_CERT_NAME**
-
-    Name of the BL2 content certificate on the host file-system (mandatory when
-    Trusted Board Boot is enabled).
-
-*   **#define : TRUSTED_KEY_CERT_NAME**
-
-    Name of the Trusted Key certificate on the host file-system (mandatory when
-    Trusted Board Boot is enabled).
-
-*   **#define : BL31_KEY_CERT_NAME**
-
-    Name of the BL3-1 Key certificate on the host file-system (mandatory when
-    Trusted Board Boot is enabled).
-
-*   **#define : BL31_CERT_NAME**
-
-    Name of the BL3-1 Content certificate on the host file-system (mandatory
-    when Trusted Board Boot is enabled).
-
-*   **#define : BL33_KEY_CERT_NAME**
-
-    Name of the BL3-3 Key certificate on the host file-system (mandatory when
-    Trusted Board Boot is enabled).
-
-*   **#define : BL33_CERT_NAME**
-
-    Name of the BL3-3 Content certificate on the host file-system (mandatory
-    when Trusted Board Boot is enabled).
-
-*   **#define : PLATFORM_CACHE_LINE_SIZE**
-
-    Defines the size (in bytes) of the largest cache line across all the cache
-    levels in the platform.
-
-*   **#define : PLATFORM_CLUSTER_COUNT**
-
-    Defines the total number of clusters implemented by the platform in the
-    system.
-
-*   **#define : PLATFORM_CORE_COUNT**
-
-    Defines the total number of CPUs implemented by the platform across all
-    clusters in the system.
-
-*   **#define : PLATFORM_MAX_CPUS_PER_CLUSTER**
-
-    Defines the maximum number of CPUs that can be implemented within a cluster
-    on the platform.
-
-*   **#define : PLATFORM_NUM_AFFS**
-
-    Defines the total number of nodes in the affinity heirarchy at all affinity
-    levels used by the platform.
-
-*   **#define : BL1_RO_BASE**
-
-    Defines the base address in secure ROM where BL1 originally lives. Must be
-    aligned on a page-size boundary.
-
-*   **#define : BL1_RO_LIMIT**
-
-    Defines the maximum address in secure ROM that BL1's actual content (i.e.
-    excluding any data section allocated at runtime) can occupy.
-
-*   **#define : BL1_RW_BASE**
-
-    Defines the base address in secure RAM where BL1's read-write data will live
-    at runtime. Must be aligned on a page-size boundary.
-
-*   **#define : BL1_RW_LIMIT**
-
-    Defines the maximum address in secure RAM that BL1's read-write data can
-    occupy at runtime.
-
-*   **#define : BL2_BASE**
-
-    Defines the base address in secure RAM where BL1 loads the BL2 binary image.
-    Must be aligned on a page-size boundary.
-
-*   **#define : BL2_LIMIT**
-
-    Defines the maximum address in secure RAM that the BL2 image can occupy.
-
-*   **#define : BL31_BASE**
-
-    Defines the base address in secure RAM where BL2 loads the BL3-1 binary
-    image. Must be aligned on a page-size boundary.
-
-*   **#define : BL31_LIMIT**
-
-    Defines the maximum address in secure RAM that the BL3-1 image can occupy.
-
-*   **#define : NS_IMAGE_OFFSET**
-
-    Defines the base address in non-secure DRAM where BL2 loads the BL3-3 binary
-    image. Must be aligned on a page-size boundary.
-
-If a BL3-0 image is supported by the platform, the following constants must
-also be defined:
-
-*   **#define : BL30_IMAGE_NAME**
-
-    Name of the BL3-0 binary image on the host file-system. This name is used by
-    BL2 to load BL3-0 into secure memory from platform storage before being
-    transfered to the SCP.
-
-*   **#define : BL30_KEY_CERT_NAME**
-
-    Name of the BL3-0 Key certificate on the host file-system (mandatory when
-    Trusted Board Boot is enabled).
-
-*   **#define : BL30_CERT_NAME**
-
-    Name of the BL3-0 Content certificate on the host file-system (mandatory
-    when Trusted Board Boot is enabled).
-
-If a BL3-2 image is supported by the platform, the following constants must
-also be defined:
-
-*   **#define : BL32_IMAGE_NAME**
-
-    Name of the BL3-2 binary image on the host file-system. This name is used by
-    BL2 to load BL3-2 into secure memory from platform storage.
-
-*   **#define : BL32_KEY_CERT_NAME**
-
-    Name of the BL3-2 Key certificate on the host file-system (mandatory when
-    Trusted Board Boot is enabled).
-
-*   **#define : BL32_CERT_NAME**
-
-    Name of the BL3-2 Content certificate on the host file-system (mandatory
-    when Trusted Board Boot is enabled).
-
-*   **#define : BL32_BASE**
-
-    Defines the base address in secure memory where BL2 loads the BL3-2 binary
-    image. Must be aligned on a page-size boundary.
-
-*   **#define : BL32_LIMIT**
-
-    Defines the maximum address that the BL3-2 image can occupy.
-
-If the Test Secure-EL1 Payload (TSP) instantiation of BL3-2 is supported by the
-platform, the following constants must also be defined:
-
-*   **#define : TSP_SEC_MEM_BASE**
-
-    Defines the base address of the secure memory used by the TSP image on the
-    platform. This must be at the same address or below `BL32_BASE`.
-
-*   **#define : TSP_SEC_MEM_SIZE**
-
-    Defines the size of the secure memory used by the BL3-2 image on the
-    platform. `TSP_SEC_MEM_BASE` and `TSP_SEC_MEM_SIZE` must fully accomodate
-    the memory required by the BL3-2 image, defined by `BL32_BASE` and
-    `BL32_LIMIT`.
-
-*   **#define : TSP_IRQ_SEC_PHY_TIMER**
-
-    Defines the ID of the secure physical generic timer interrupt used by the
-    TSP's interrupt handling code.
-
-If the platform port uses the IO storage framework, the following constants
-must also be defined:
-
-*   **#define : MAX_IO_DEVICES**
-
-    Defines the maximum number of registered IO devices. Attempting to register
-    more devices than this value using `io_register_device()` will fail with
-    IO_RESOURCES_EXHAUSTED.
-
-*   **#define : MAX_IO_HANDLES**
-
-    Defines the maximum number of open IO handles. Attempting to open more IO
-    entities than this value using `io_open()` will fail with
-    IO_RESOURCES_EXHAUSTED.
-
-If the platform needs to allocate data within the per-cpu data framework in
-BL3-1, it should define the following macro. Currently this is only required if
-the platform decides not to use the coherent memory section by undefining the
-USE_COHERENT_MEM build flag. In this case, the framework allocates the required
-memory within the the per-cpu data to minimize wastage.
-
-*   **#define : PLAT_PCPU_DATA_SIZE**
-
-    Defines the memory (in bytes) to be reserved within the per-cpu data
-    structure for use by the platform layer.
-
-The following constants are optional. They should be defined when the platform
-memory layout implies some image overlaying like on FVP.
-
-*   **#define : BL31_PROGBITS_LIMIT**
-
-    Defines the maximum address in secure RAM that the BL3-1's progbits sections
-    can occupy.
-
-*   **#define : TSP_PROGBITS_LIMIT**
-
-    Defines the maximum address that the TSP's progbits sections can occupy.
-
-### File : plat_macros.S [mandatory]
-
-Each platform must ensure a file of this name is in the system include path with
-the following macro defined. In the ARM FVP port, this file is found in
-[plat/fvp/include/plat_macros.S].
-
-*   **Macro : plat_print_gic_regs**
-
-    This macro allows the crash reporting routine to print GIC registers
-    in case of an unhandled exception in BL3-1. This aids in debugging and
-    this macro can be defined to be empty in case GIC register reporting is
-    not desired.
-
-*   **Macro : plat_print_interconnect_regs**
-
-    This macro allows the crash reporting routine to print interconnect registers
-    in case of an unhandled exception in BL3-1. This aids in debugging and
-    this macro can be defined to be empty in case interconnect register reporting
-    is not desired. In the ARM FVP port, the CCI snoop control registers are
-    reported.
-
-### Other mandatory modifications
-
-The following mandatory modifications may be implemented in any file
-the implementer chooses. In the ARM FVP port, they are implemented in
-[plat/fvp/aarch64/plat_common.c].
-
-*   **Function : uint64_t plat_get_syscnt_freq(void)**
-
-    This function is used by the architecture setup code to retrieve the
-    counter frequency for the CPU's generic timer.  This value will be
-    programmed into the `CNTFRQ_EL0` register.
-    In the ARM FVP port, it returns the base frequency of the system counter,
-    which is retrieved from the first entry in the frequency modes table.
-
-
-2.2 Handling Reset
-------------------
-
-BL1 by default implements the reset vector where execution starts from a cold
-or warm boot. BL3-1 can be optionally set as a reset vector using the
-RESET_TO_BL31 make variable.
-
-For each CPU, the reset vector code is responsible for the following tasks:
-
-1.  Distinguishing between a cold boot and a warm boot.
-
-2.  In the case of a cold boot and the CPU being a secondary CPU, ensuring that
-    the CPU is placed in a platform-specific state until the primary CPU
-    performs the necessary steps to remove it from this state.
-
-3.  In the case of a warm boot, ensuring that the CPU jumps to a platform-
-    specific address in the BL3-1 image in the same processor mode as it was
-    when released from reset.
-
-The following functions need to be implemented by the platform port to enable
-reset vector code to perform the above tasks.
-
-
-### Function : platform_get_entrypoint() [mandatory]
-
-    Argument : unsigned long
-    Return   : unsigned int
-
-This function is called with the `SCTLR.M` and `SCTLR.C` bits disabled. The CPU
-is identified by its `MPIDR`, which is passed as the argument. The function is
-responsible for distinguishing between a warm and cold reset using platform-
-specific means. If it's a warm reset then it returns the entrypoint into the
-BL3-1 image that the CPU must jump to. If it's a cold reset then this function
-must return zero.
-
-This function is also responsible for implementing a platform-specific mechanism
-to handle the condition where the CPU has been warm reset but there is no
-entrypoint to jump to.
-
-This function does not follow the Procedure Call Standard used by the
-Application Binary Interface for the ARM 64-bit architecture. The caller should
-not assume that callee saved registers are preserved across a call to this
-function.
-
-This function fulfills requirement 1 and 3 listed above.
-
-
-### Function : plat_secondary_cold_boot_setup() [mandatory]
-
-    Argument : void
-    Return   : void
-
-This function is called with the MMU and data caches disabled. It is responsible
-for placing the executing secondary CPU in a platform-specific state until the
-primary CPU performs the necessary actions to bring it out of that state and
-allow entry into the OS.
-
-In the ARM FVP port, each secondary CPU powers itself off. The primary CPU is
-responsible for powering up the secondary CPU when normal world software
-requires them.
-
-This function fulfills requirement 2 above.
-
-
-### Function : platform_is_primary_cpu() [mandatory]
-
-    Argument : unsigned long
-    Return   : unsigned int
-
-This function identifies a CPU by its `MPIDR`, which is passed as the argument,
-to determine whether this CPU is the primary CPU or a secondary CPU. A return
-value of zero indicates that the CPU is not the primary CPU, while a non-zero
-return value indicates that the CPU is the primary CPU.
-
-
-### Function : platform_mem_init() [mandatory]
-
-    Argument : void
-    Return   : void
-
-This function is called before any access to data is made by the firmware, in
-order to carry out any essential memory initialization.
-
-The ARM FVP port uses this function to initialize the mailbox memory used for
-providing the warm-boot entry-point addresses.
-
-
-### Function: plat_match_rotpk()
-
-    Argument : const unsigned char *, unsigned int
-    Return   : int
-
-This function is mandatory when Trusted Board Boot is enabled. It receives a
-pointer to a buffer containing a signing key and its size as parameters and
-returns 0 (success) if that key matches the ROT (Root Of Trust) key stored in
-the platform. Any other return value means a mismatch.
-
-
-
-2.3 Common optional modifications
----------------------------------
-
-The following are helper functions implemented by the firmware that perform
-common platform-specific tasks. A platform may choose to override these
-definitions.
-
-
-### Function : platform_get_core_pos()
-
-    Argument : unsigned long
-    Return   : int
-
-A platform may need to convert the `MPIDR` of a CPU to an absolute number, which
-can be used as a CPU-specific linear index into blocks of memory (for example
-while allocating per-CPU stacks). This routine contains a simple mechanism
-to perform this conversion, using the assumption that each cluster contains a
-maximum of 4 CPUs:
-
-    linear index = cpu_id + (cluster_id * 4)
-
-    cpu_id = 8-bit value in MPIDR at affinity level 0
-    cluster_id = 8-bit value in MPIDR at affinity level 1
-
-
-### Function : platform_set_stack()
-
-    Argument : unsigned long
-    Return   : void
-
-This function sets the current stack pointer to the normal memory stack that
-has been allocated for the CPU specificed by MPIDR. For BL images that only
-require a stack for the primary CPU the parameter is ignored. The size of
-the stack allocated to each CPU is specified by the platform defined constant
-`PLATFORM_STACK_SIZE`.
-
-Common implementations of this function for the UP and MP BL images are
-provided in [plat/common/aarch64/platform_up_stack.S] and
-[plat/common/aarch64/platform_mp_stack.S]
-
-
-### Function : platform_get_stack()
-
-    Argument : unsigned long
-    Return   : unsigned long
-
-This function returns the base address of the normal memory stack that
-has been allocated for the CPU specificed by MPIDR. For BL images that only
-require a stack for the primary CPU the parameter is ignored. The size of
-the stack allocated to each CPU is specified by the platform defined constant
-`PLATFORM_STACK_SIZE`.
-
-Common implementations of this function for the UP and MP BL images are
-provided in [plat/common/aarch64/platform_up_stack.S] and
-[plat/common/aarch64/platform_mp_stack.S]
-
-
-### Function : plat_report_exception()
-
-    Argument : unsigned int
-    Return   : void
-
-A platform may need to report various information about its status when an
-exception is taken, for example the current exception level, the CPU security
-state (secure/non-secure), the exception type, and so on. This function is
-called in the following circumstances:
-
-*   In BL1, whenever an exception is taken.
-*   In BL2, whenever an exception is taken.
-
-The default implementation doesn't do anything, to avoid making assumptions
-about the way the platform displays its status information.
-
-This function receives the exception type as its argument. Possible values for
-exceptions types are listed in the [include/runtime_svc.h] header file. Note
-that these constants are not related to any architectural exception code; they
-are just an ARM Trusted Firmware convention.
-
-
-### Function : plat_reset_handler()
-
-    Argument : void
-    Return   : void
-
-A platform may need to do additional initialization after reset. This function
-allows the platform to do the platform specific intializations. Platform
-specific errata workarounds could also be implemented here. The api should
-preserve the values of callee saved registers x19 to x29.
-
-The default implementation doesn't do anything. If a platform needs to override
-the default implementation, refer to the [Firmware Design Guide] for general
-guidelines regarding placement of code in a reset handler.
-
-### Function : plat_disable_acp()
-
-    Argument : void
-    Return   : void
-
-This api allows a platform to disable the Accelerator Coherency Port (if
-present) during a cluster power down sequence. The default weak implementation
-doesn't do anything. Since this api is called during the power down sequence,
-it has restrictions for stack usage and it can use the registers x0 - x17 as
-scratch registers. It should preserve the value in x18 register as it is used
-by the caller to store the return address.
-
-
-3.  Modifications specific to a Boot Loader stage
--------------------------------------------------
-
-3.1 Boot Loader Stage 1 (BL1)
------------------------------
-
-BL1 implements the reset vector where execution starts from after a cold or
-warm boot. For each CPU, BL1 is responsible for the following tasks:
-
-1.  Handling the reset as described in section 2.2
-
-2.  In the case of a cold boot and the CPU being the primary CPU, ensuring that
-    only this CPU executes the remaining BL1 code, including loading and passing
-    control to the BL2 stage.
-
-3.  Loading the BL2 image from non-volatile storage into secure memory at the
-    address specified by the platform defined constant `BL2_BASE`.
-
-4.  Populating a `meminfo` structure with the following information in memory,
-    accessible by BL2 immediately upon entry.
-
-        meminfo.total_base = Base address of secure RAM visible to BL2
-        meminfo.total_size = Size of secure RAM visible to BL2
-        meminfo.free_base  = Base address of secure RAM available for
-                             allocation to BL2
-        meminfo.free_size  = Size of secure RAM available for allocation to BL2
-
-    BL1 places this `meminfo` structure at the beginning of the free memory
-    available for its use. Since BL1 cannot allocate memory dynamically at the
-    moment, its free memory will be available for BL2's use as-is. However, this
-    means that BL2 must read the `meminfo` structure before it starts using its
-    free memory (this is discussed in Section 3.2).
-
-    In future releases of the ARM Trusted Firmware it will be possible for
-    the platform to decide where it wants to place the `meminfo` structure for
-    BL2.
-
-    BL1 implements the `bl1_init_bl2_mem_layout()` function to populate the
-    BL2 `meminfo` structure. The platform may override this implementation, for
-    example if the platform wants to restrict the amount of memory visible to
-    BL2. Details of how to do this are given below.
-
-The following functions need to be implemented by the platform port to enable
-BL1 to perform the above tasks.
-
-
-### Function : bl1_plat_arch_setup() [mandatory]
-
-    Argument : void
-    Return   : void
-
-This function performs any platform-specific and architectural setup that the
-platform requires.  Platform-specific setup might include configuration of
-memory controllers, configuration of the interconnect to allow the cluster
-to service cache snoop requests from another cluster, and so on.
-
-In the ARM FVP port, this function enables CCI snoops into the cluster that the
-primary CPU is part of. It also enables the MMU.
-
-This function helps fulfill requirement 2 above.
-
-
-### Function : bl1_platform_setup() [mandatory]
-
-    Argument : void
-    Return   : void
-
-This function executes with the MMU and data caches enabled. It is responsible
-for performing any remaining platform-specific setup that can occur after the
-MMU and data cache have been enabled.
-
-This function is also responsible for initializing the storage abstraction layer
-which is used to load further bootloader images.
-
-This function helps fulfill requirement 3 above.
-
-
-### Function : bl1_plat_sec_mem_layout() [mandatory]
-
-    Argument : void
-    Return   : meminfo *
-
-This function should only be called on the cold boot path. It executes with the
-MMU and data caches enabled. The pointer returned by this function must point to
-a `meminfo` structure containing the extents and availability of secure RAM for
-the BL1 stage.
-
-    meminfo.total_base = Base address of secure RAM visible to BL1
-    meminfo.total_size = Size of secure RAM visible to BL1
-    meminfo.free_base  = Base address of secure RAM available for allocation
-                         to BL1
-    meminfo.free_size  = Size of secure RAM available for allocation to BL1
-
-This information is used by BL1 to load the BL2 image in secure RAM. BL1 also
-populates a similar structure to tell BL2 the extents of memory available for
-its own use.
-
-This function helps fulfill requirement 3 above.
-
-
-### Function : bl1_init_bl2_mem_layout() [optional]
-
-    Argument : meminfo *, meminfo *, unsigned int, unsigned long
-    Return   : void
-
-BL1 needs to tell the next stage the amount of secure RAM available
-for it to use. This information is populated in a `meminfo`
-structure.
-
-Depending upon where BL2 has been loaded in secure RAM (determined by
-`BL2_BASE`), BL1 calculates the amount of free memory available for BL2 to use.
-BL1 also ensures that its data sections resident in secure RAM are not visible
-to BL2. An illustration of how this is done in the ARM FVP port is given in the
-[User Guide], in the Section "Memory layout on Base FVP".
-
-
-### Function : bl1_plat_set_bl2_ep_info() [mandatory]
-
-    Argument : image_info *, entry_point_info *
-    Return   : void
-
-This function is called after loading BL2 image and it can be used to overwrite
-the entry point set by loader and also set the security state and SPSR which
-represents the entry point system state for BL2.
-
-On FVP, we are setting the security state and the SPSR for the BL2 entrypoint
-
-
-3.2 Boot Loader Stage 2 (BL2)
------------------------------
-
-The BL2 stage is executed only by the primary CPU, which is determined in BL1
-using the `platform_is_primary_cpu()` function. BL1 passed control to BL2 at
-`BL2_BASE`. BL2 executes in Secure EL1 and is responsible for:
-
-1.  (Optional) Loading the BL3-0 binary image (if present) from platform
-    provided non-volatile storage. To load the BL3-0 image, BL2 makes use of
-    the `meminfo` returned by the `bl2_plat_get_bl30_meminfo()` function.
-    The platform also defines the address in memory where BL3-0 is loaded
-    through the optional constant `BL30_BASE`. BL2 uses this information
-    to determine if there is enough memory to load the BL3-0 image.
-    Subsequent handling of the BL3-0 image is platform-specific and is
-    implemented in the `bl2_plat_handle_bl30()` function.
-    If `BL30_BASE` is not defined then this step is not performed.
-
-2.  Loading the BL3-1 binary image into secure RAM from non-volatile storage. To
-    load the BL3-1 image, BL2 makes use of the `meminfo` structure passed to it
-    by BL1. This structure allows BL2 to calculate how much secure RAM is
-    available for its use. The platform also defines the address in secure RAM
-    where BL3-1 is loaded through the constant `BL31_BASE`. BL2 uses this
-    information to determine if there is enough memory to load the BL3-1 image.
-
-3.  (Optional) Loading the BL3-2 binary image (if present) from platform
-    provided non-volatile storage. To load the BL3-2 image, BL2 makes use of
-    the `meminfo` returned by the `bl2_plat_get_bl32_meminfo()` function.
-    The platform also defines the address in memory where BL3-2 is loaded
-    through the optional constant `BL32_BASE`. BL2 uses this information
-    to determine if there is enough memory to load the BL3-2 image.
-    If `BL32_BASE` is not defined then this and the next step is not performed.
-
-4.  (Optional) Arranging to pass control to the BL3-2 image (if present) that
-    has been pre-loaded at `BL32_BASE`. BL2 populates an `entry_point_info`
-    structure in memory provided by the platform with information about how
-    BL3-1 should pass control to the BL3-2 image.
-
-5.  Loading the normal world BL3-3 binary image into non-secure DRAM from
-    platform storage and arranging for BL3-1 to pass control to this image. This
-    address is determined using the `plat_get_ns_image_entrypoint()` function
-    described below.
-
-6.  BL2 populates an `entry_point_info` structure in memory provided by the
-    platform with information about how BL3-1 should pass control to the
-    other BL images.
-
-The following functions must be implemented by the platform port to enable BL2
-to perform the above tasks.
-
-
-### Function : bl2_early_platform_setup() [mandatory]
-
-    Argument : meminfo *
-    Return   : void
-
-This function executes with the MMU and data caches disabled. It is only called
-by the primary CPU. The arguments to this function is the address of the
-`meminfo` structure populated by BL1.
-
-The platform must copy the contents of the `meminfo` structure into a private
-variable as the original memory may be subsequently overwritten by BL2. The
-copied structure is made available to all BL2 code through the
-`bl2_plat_sec_mem_layout()` function.
-
-
-### Function : bl2_plat_arch_setup() [mandatory]
-
-    Argument : void
-    Return   : void
-
-This function executes with the MMU and data caches disabled. It is only called
-by the primary CPU.
-
-The purpose of this function is to perform any architectural initialization
-that varies across platforms, for example enabling the MMU (since the memory
-map differs across platforms).
-
-
-### Function : bl2_platform_setup() [mandatory]
-
-    Argument : void
-    Return   : void
-
-This function may execute with the MMU and data caches enabled if the platform
-port does the necessary initialization in `bl2_plat_arch_setup()`. It is only
-called by the primary CPU.
-
-The purpose of this function is to perform any platform initialization
-specific to BL2. Platform security components are configured if required.
-For the Base FVP the TZC-400 TrustZone controller is configured to only
-grant non-secure access to DRAM. This avoids aliasing between secure and
-non-secure accesses in the TLB and cache - secure execution states can use
-the NS attributes in the MMU translation tables to access the DRAM.
-
-This function is also responsible for initializing the storage abstraction layer
-which is used to load further bootloader images.
-
-
-### Function : bl2_plat_sec_mem_layout() [mandatory]
-
-    Argument : void
-    Return   : meminfo *
-
-This function should only be called on the cold boot path. It may execute with
-the MMU and data caches enabled if the platform port does the necessary
-initialization in `bl2_plat_arch_setup()`. It is only called by the primary CPU.
-
-The purpose of this function is to return a pointer to a `meminfo` structure
-populated with the extents of secure RAM available for BL2 to use. See
-`bl2_early_platform_setup()` above.
-
-
-### Function : bl2_plat_get_bl30_meminfo() [mandatory]
-
-    Argument : meminfo *
-    Return   : void
-
-This function is used to get the memory limits where BL2 can load the
-BL3-0 image. The meminfo provided by this is used by load_image() to
-validate whether the BL3-0 image can be loaded within the given
-memory from the given base.
-
-
-### Function : bl2_plat_handle_bl30() [mandatory]
-
-    Argument : image_info *
-    Return   : int
-
-This function is called after loading BL3-0 image and it is used to perform any
-platform-specific actions required to handle the SCP firmware. Typically it
-transfers the image into SCP memory using a platform-specific protocol and waits
-until SCP executes it and signals to the Application Processor (AP) for BL2
-execution to continue.
-
-This function returns 0 on success, a negative error code otherwise.
-
-
-### Function : bl2_plat_get_bl31_params() [mandatory]
-
-    Argument : void
-    Return   : bl31_params *
-
-BL2 platform code needs to return a pointer to a `bl31_params` structure it
-will use for passing information to BL3-1. The `bl31_params` structure carries
-the following information.
-    - Header describing the version information for interpreting the bl31_param
-      structure
-    - Information about executing the BL3-3 image in the `bl33_ep_info` field
-    - Information about executing the BL3-2 image in the `bl32_ep_info` field
-    - Information about the type and extents of BL3-1 image in the
-      `bl31_image_info` field
-    - Information about the type and extents of BL3-2 image in the
-      `bl32_image_info` field
-    - Information about the type and extents of BL3-3 image in the
-      `bl33_image_info` field
-
-The memory pointed by this structure and its sub-structures should be
-accessible from BL3-1 initialisation code. BL3-1 might choose to copy the
-necessary content, or maintain the structures until BL3-3 is initialised.
-
-
-### Funtion : bl2_plat_get_bl31_ep_info() [mandatory]
-
-    Argument : void
-    Return   : entry_point_info *
-
-BL2 platform code returns a pointer which is used to populate the entry point
-information for BL3-1 entry point. The location pointed by it should be
-accessible from BL1 while processing the synchronous exception to run to BL3-1.
-
-On FVP this is allocated inside an bl2_to_bl31_params_mem structure which
-is allocated at an address pointed by PARAMS_BASE.
-
-
-### Function : bl2_plat_set_bl31_ep_info() [mandatory]
-
-    Argument : image_info *, entry_point_info *
-    Return   : void
-
-This function is called after loading BL3-1 image and it can be used to
-overwrite the entry point set by loader and also set the security state
-and SPSR which represents the entry point system state for BL3-1.
-
-On FVP, we are setting the security state and the SPSR for the BL3-1
-entrypoint.
-
-### Function : bl2_plat_set_bl32_ep_info() [mandatory]
-
-    Argument : image_info *, entry_point_info *
-    Return   : void
-
-This function is called after loading BL3-2 image and it can be used to
-overwrite the entry point set by loader and also set the security state
-and SPSR which represents the entry point system state for BL3-2.
-
-On FVP, we are setting the security state and the SPSR for the BL3-2
-entrypoint
-
-### Function : bl2_plat_set_bl33_ep_info() [mandatory]
-
-    Argument : image_info *, entry_point_info *
-    Return   : void
-
-This function is called after loading BL3-3 image and it can be used to
-overwrite the entry point set by loader and also set the security state
-and SPSR which represents the entry point system state for BL3-3.
-
-On FVP, we are setting the security state and the SPSR for the BL3-3
-entrypoint
-
-### Function : bl2_plat_get_bl32_meminfo() [mandatory]
-
-    Argument : meminfo *
-    Return   : void
-
-This function is used to get the memory limits where BL2 can load the
-BL3-2 image. The meminfo provided by this is used by load_image() to
-validate whether the BL3-2 image can be loaded with in the given
-memory from the given base.
-
-### Function : bl2_plat_get_bl33_meminfo() [mandatory]
-
-    Argument : meminfo *
-    Return   : void
-
-This function is used to get the memory limits where BL2 can load the
-BL3-3 image. The meminfo provided by this is used by load_image() to
-validate whether the BL3-3 image can be loaded with in the given
-memory from the given base.
-
-### Function : bl2_plat_flush_bl31_params() [mandatory]
-
-    Argument : void
-    Return   : void
-
-Once BL2 has populated all the structures that needs to be read by BL1
-and BL3-1 including the bl31_params structures and its sub-structures,
-the bl31_ep_info structure and any platform specific data. It flushes
-all these data to the main memory so that it is available when we jump to
-later Bootloader stages with MMU off
-
-### Function : plat_get_ns_image_entrypoint() [mandatory]
-
-    Argument : void
-    Return   : unsigned long
-
-As previously described, BL2 is responsible for arranging for control to be
-passed to a normal world BL image through BL3-1. This function returns the
-entrypoint of that image, which BL3-1 uses to jump to it.
-
-BL2 is responsible for loading the normal world BL3-3 image (e.g. UEFI).
-
-
-3.2 Boot Loader Stage 3-1 (BL3-1)
----------------------------------
-
-During cold boot, the BL3-1 stage is executed only by the primary CPU. This is
-determined in BL1 using the `platform_is_primary_cpu()` function. BL1 passes
-control to BL3-1 at `BL31_BASE`. During warm boot, BL3-1 is executed by all
-CPUs. BL3-1 executes at EL3 and is responsible for:
-
-1.  Re-initializing all architectural and platform state. Although BL1 performs
-    some of this initialization, BL3-1 remains resident in EL3 and must ensure
-    that EL3 architectural and platform state is completely initialized. It
-    should make no assumptions about the system state when it receives control.
-
-2.  Passing control to a normal world BL image, pre-loaded at a platform-
-    specific address by BL2. BL3-1 uses the `entry_point_info` structure that BL2
-    populated in memory to do this.
-
-3.  Providing runtime firmware services. Currently, BL3-1 only implements a
-    subset of the Power State Coordination Interface (PSCI) API as a runtime
-    service. See Section 3.3 below for details of porting the PSCI
-    implementation.
-
-4.  Optionally passing control to the BL3-2 image, pre-loaded at a platform-
-    specific address by BL2. BL3-1 exports a set of apis that allow runtime
-    services to specify the security state in which the next image should be
-    executed and run the corresponding image. BL3-1 uses the `entry_point_info`
-    structure populated by BL2 to do this.
-
-If BL3-1 is a reset vector, It also needs to handle the reset as specified in
-section 2.2 before the tasks described above.
-
-The following functions must be implemented by the platform port to enable BL3-1
-to perform the above tasks.
-
-
-### Function : bl31_early_platform_setup() [mandatory]
-
-    Argument : bl31_params *, void *
-    Return   : void
-
-This function executes with the MMU and data caches disabled. It is only called
-by the primary CPU. The arguments to this function are:
-
-*   The address of the `bl31_params` structure populated by BL2.
-*   An opaque pointer that the platform may use as needed.
-
-The platform can copy the contents of the `bl31_params` structure and its
-sub-structures into private variables if the original memory may be
-subsequently overwritten by BL3-1 and similarly the `void *` pointing
-to the platform data also needs to be saved.
-
-On the ARM FVP port, BL2 passes a pointer to a `bl31_params` structure populated
-in the secure DRAM at address `0x6000000` in the bl31_params * argument and it
-does not use opaque pointer mentioned earlier. BL3-1 does not copy this
-information to internal data structures as it guarantees that the secure
-DRAM memory will not be overwritten. It maintains an internal reference to this
-information in the `bl2_to_bl31_params` variable.
-
-### Function : bl31_plat_arch_setup() [mandatory]
-
-    Argument : void
-    Return   : void
-
-This function executes with the MMU and data caches disabled. It is only called
-by the primary CPU.
-
-The purpose of this function is to perform any architectural initialization
-that varies across platforms, for example enabling the MMU (since the memory
-map differs across platforms).
-
-
-### Function : bl31_platform_setup() [mandatory]
-
-    Argument : void
-    Return   : void
-
-This function may execute with the MMU and data caches enabled if the platform
-port does the necessary initialization in `bl31_plat_arch_setup()`. It is only
-called by the primary CPU.
-
-The purpose of this function is to complete platform initialization so that both
-BL3-1 runtime services and normal world software can function correctly.
-
-The ARM FVP port does the following:
-*   Initializes the generic interrupt controller.
-*   Configures the CLCD controller.
-*   Enables system-level implementation of the generic timer counter.
-*   Grants access to the system counter timer module
-*   Initializes the FVP power controller device
-*   Detects the system topology.
-
-
-### Function : bl31_get_next_image_info() [mandatory]
-
-    Argument : unsigned int
-    Return   : entry_point_info *
-
-This function may execute with the MMU and data caches enabled if the platform
-port does the necessary initializations in `bl31_plat_arch_setup()`.
-
-This function is called by `bl31_main()` to retrieve information provided by
-BL2 for the next image in the security state specified by the argument. BL3-1
-uses this information to pass control to that image in the specified security
-state. This function must return a pointer to the `entry_point_info` structure
-(that was copied during `bl31_early_platform_setup()`) if the image exists. It
-should return NULL otherwise.
-
-
-3.3 Power State Coordination Interface (in BL3-1)
-------------------------------------------------
-
-The ARM Trusted Firmware's implementation of the PSCI API is based around the
-concept of an _affinity instance_. Each _affinity instance_ can be uniquely
-identified in a system by a CPU ID (the processor `MPIDR` is used in the PSCI
-interface) and an _affinity level_. A processing element (for example, a
-CPU) is at level 0. If the CPUs in the system are described in a tree where the
-node above a CPU is a logical grouping of CPUs that share some state, then
-affinity level 1 is that group of CPUs (for example, a cluster), and affinity
-level 2 is a group of clusters (for example, the system). The implementation
-assumes that the affinity level 1 ID can be computed from the affinity level 0
-ID (for example, a unique cluster ID can be computed from the CPU ID). The
-current implementation computes this on the basis of the recommended use of
-`MPIDR` affinity fields in the ARM Architecture Reference Manual.
-
-BL3-1's platform initialization code exports a pointer to the platform-specific
-power management operations required for the PSCI implementation to function
-correctly. This information is populated in the `plat_pm_ops` structure. The
-PSCI implementation calls members of the `plat_pm_ops` structure for performing
-power management operations for each affinity instance. For example, the target
-CPU is specified by its `MPIDR` in a PSCI `CPU_ON` call. The `affinst_on()`
-handler (if present) is called for each affinity instance as the PSCI
-implementation powers up each affinity level implemented in the `MPIDR` (for
-example, CPU, cluster and system).
-
-The following functions must be implemented to initialize PSCI functionality in
-the ARM Trusted Firmware.
-
-
-### Function : plat_get_aff_count() [mandatory]
-
-    Argument : unsigned int, unsigned long
-    Return   : unsigned int
-
-This function may execute with the MMU and data caches enabled if the platform
-port does the necessary initializations in `bl31_plat_arch_setup()`. It is only
-called by the primary CPU.
-
-This function is called by the PSCI initialization code to detect the system
-topology. Its purpose is to return the number of affinity instances implemented
-at a given `affinity level` (specified by the first argument) and a given
-`MPIDR` (specified by the second argument). For example, on a dual-cluster
-system where first cluster implements 2 CPUs and the second cluster implements 4
-CPUs, a call to this function with an `MPIDR` corresponding to the first cluster
-(`0x0`) and affinity level 0, would return 2. A call to this function with an
-`MPIDR` corresponding to the second cluster (`0x100`) and affinity level 0,
-would return 4.
-
-
-### Function : plat_get_aff_state() [mandatory]
-
-    Argument : unsigned int, unsigned long
-    Return   : unsigned int
-
-This function may execute with the MMU and data caches enabled if the platform
-port does the necessary initializations in `bl31_plat_arch_setup()`. It is only
-called by the primary CPU.
-
-This function is called by the PSCI initialization code. Its purpose is to
-return the state of an affinity instance. The affinity instance is determined by
-the affinity ID at a given `affinity level` (specified by the first argument)
-and an `MPIDR` (specified by the second argument). The state can be one of
-`PSCI_AFF_PRESENT` or `PSCI_AFF_ABSENT`. The latter state is used to cater for
-system topologies where certain affinity instances are unimplemented. For
-example, consider a platform that implements a single cluster with 4 CPUs and
-another CPU implemented directly on the interconnect with the cluster. The
-`MPIDR`s of the cluster would range from `0x0-0x3`. The `MPIDR` of the single
-CPU would be 0x100 to indicate that it does not belong to cluster 0. Cluster 1
-is missing but needs to be accounted for to reach this single CPU in the
-topology tree. Hence it is marked as `PSCI_AFF_ABSENT`.
-
-
-### Function : plat_get_max_afflvl() [mandatory]
-
-    Argument : void
-    Return   : int
-
-This function may execute with the MMU and data caches enabled if the platform
-port does the necessary initializations in `bl31_plat_arch_setup()`. It is only
-called by the primary CPU.
-
-This function is called by the PSCI implementation both during cold and warm
-boot, to determine the maximum affinity level that the power management
-operations should apply to. ARMv8-A has support for 4 affinity levels. It is
-likely that hardware will implement fewer affinity levels. This function allows
-the PSCI implementation to consider only those affinity levels in the system
-that the platform implements. For example, the Base AEM FVP implements two
-clusters with a configurable number of CPUs. It reports the maximum affinity
-level as 1, resulting in PSCI power control up to the cluster level.
-
-
-### Function : platform_setup_pm() [mandatory]
-
-    Argument : const plat_pm_ops **
-    Return   : int
-
-This function may execute with the MMU and data caches enabled if the platform
-port does the necessary initializations in `bl31_plat_arch_setup()`. It is only
-called by the primary CPU.
-
-This function is called by PSCI initialization code. Its purpose is to export
-handler routines for platform-specific power management actions by populating
-the passed pointer with a pointer to BL3-1's private `plat_pm_ops` structure.
-
-A description of each member of this structure is given below. Please refer to
-the ARM FVP specific implementation of these handlers in [plat/fvp/fvp_pm.c]
-as an example. A platform port is expected to implement these handlers if the
-corresponding PSCI operation is to be supported and these handlers are expected
-to succeed if the return type is `void`.
-
-#### plat_pm_ops.affinst_standby()
-
-Perform the platform-specific setup to enter the standby state indicated by the
-passed argument. The generic code expects the handler to succeed.
-
-#### plat_pm_ops.affinst_on()
-
-Perform the platform specific setup to power on an affinity instance, specified
-by the `MPIDR` (first argument) and `affinity level` (third argument). The
-`state` (fourth argument) contains the current state of that affinity instance
-(ON or OFF). This is useful to determine whether any action must be taken. For
-example, while powering on a CPU, the cluster that contains this CPU might
-already be in the ON state. The platform decides what actions must be taken to
-transition from the current state to the target state (indicated by the power
-management operation). The generic code expects the platform to return
-E_SUCCESS on success or E_INTERN_FAIL for any failure.
-
-#### plat_pm_ops.affinst_off()
-
-Perform the platform specific setup to power off an affinity instance of the
-calling CPU. It is called by the PSCI `CPU_OFF` API implementation.
-
-The `affinity level` (first argument) and `state` (second argument) have
-a similar meaning as described in the `affinst_on()` operation. They are
-used to identify the affinity instance on which the call is made and its
-current state. This gives the platform port an indication of the
-state transition it must make to perform the requested action. For example, if
-the calling CPU is the last powered on CPU in the cluster, after powering down
-affinity level 0 (CPU), the platform port should power down affinity level 1
-(the cluster) as well. The generic code expects the handler to succeed.
-
-#### plat_pm_ops.affinst_suspend()
-
-Perform the platform specific setup to power off an affinity instance of the
-calling CPU. It is called by the PSCI `CPU_SUSPEND` API and `SYSTEM_SUSPEND`
-API implementation
-
-The `affinity level` (second argument) and `state` (third argument) have a
-similar meaning as described in the `affinst_on()` operation. They are used to
-identify the affinity instance on which the call is made and its current state.
-This gives the platform port an indication of the state transition it must
-make to perform the requested action. For example, if the calling CPU is the
-last powered on CPU in the cluster, after powering down affinity level 0 (CPU),
-the platform port should power down affinity level 1 (the cluster) as well.
-
-The difference between turning an affinity instance off versus suspending it
-is that in the former case, the affinity instance is expected to re-initialize
-its state when its next powered on (see `affinst_on_finish()`). In the latter
-case, the affinity instance is expected to save enough state so that it can
-resume execution by restoring this state when its powered on (see
-`affinst_suspend_finish()`).The generic code expects the handler to succeed.
-
-#### plat_pm_ops.affinst_on_finish()
-
-This function is called by the PSCI implementation after the calling CPU is
-powered on and released from reset in response to an earlier PSCI `CPU_ON` call.
-It performs the platform-specific setup required to initialize enough state for
-this CPU to enter the normal world and also provide secure runtime firmware
-services.
-
-The `affinity level` (first argument) and `state` (second argument) have a
-similar meaning as described in the previous operations. The generic code
-expects the handler to succeed.
-
-#### plat_pm_ops.affinst_suspend_finish()
-
-This function is called by the PSCI implementation after the calling CPU is
-powered on and released from reset in response to an asynchronous wakeup
-event, for example a timer interrupt that was programmed by the CPU during the
-`CPU_SUSPEND` call or `SYSTEM_SUSPEND` call. It performs the platform-specific
-setup required to restore the saved state for this CPU to resume execution
-in the normal world and also provide secure runtime firmware services.
-
-The `affinity level` (first argument) and `state` (second argument) have a
-similar meaning as described in the previous operations. The generic code
-expects the platform to succeed.
-
-#### plat_pm_ops.validate_power_state()
-
-This function is called by the PSCI implementation during the `CPU_SUSPEND`
-call to validate the `power_state` parameter of the PSCI API. If the
-`power_state` is known to be invalid, the platform must return
-PSCI_E_INVALID_PARAMS as error, which is propagated back to the normal
-world PSCI client.
-
-#### plat_pm_ops.validate_ns_entrypoint()
-
-This function is called by the PSCI implementation during the `CPU_SUSPEND`,
-`SYSTEM_SUSPEND` and `CPU_ON` calls to validate the non-secure `entry_point`
-parameter passed by the normal world. If the `entry_point` is known to be
-invalid, the platform must return PSCI_E_INVALID_PARAMS as error, which is
-propagated back to the normal world PSCI client.
-
-#### plat_pm_ops.get_sys_suspend_power_state()
-
-This function is called by the PSCI implementation during the `SYSTEM_SUSPEND`
-call to return the `power_state` parameter. This allows the platform to encode
-the appropriate State-ID field within the `power_state` parameter which can be
-utilized in `affinst_suspend()` to suspend to system affinity level. The
-`power_state` parameter should be in the same format as specified by the
-PSCI specification for the CPU_SUSPEND API.
-
-BL3-1 platform initialization code must also detect the system topology and
-the state of each affinity instance in the topology. This information is
-critical for the PSCI runtime service to function correctly. More details are
-provided in the description of the `plat_get_aff_count()` and
-`plat_get_aff_state()` functions above.
-
-3.4  Interrupt Management framework (in BL3-1)
-----------------------------------------------
-BL3-1 implements an Interrupt Management Framework (IMF) to manage interrupts
-generated in either security state and targeted to EL1 or EL2 in the non-secure
-state or EL3/S-EL1 in the secure state.  The design of this framework is
-described in the [IMF Design Guide]
-
-A platform should export the following APIs to support the IMF. The following
-text briefly describes each api and its implementation on the FVP port. The API
-implementation depends upon the type of interrupt controller present in the
-platform. The FVP implements an ARM Generic Interrupt Controller (ARM GIC) as
-per the version 2.0 of the [ARM GIC Architecture Specification]
-
-### Function : plat_interrupt_type_to_line() [mandatory]
-
-    Argument : uint32_t, uint32_t
-    Return   : uint32_t
-
-The ARM processor signals an interrupt exception either through the IRQ or FIQ
-interrupt line. The specific line that is signaled depends on how the interrupt
-controller (IC) reports different interrupt types from an execution context in
-either security state. The IMF uses this API to determine which interrupt line
-the platform IC uses to signal each type of interrupt supported by the framework
-from a given security state.
-
-The first parameter will be one of the `INTR_TYPE_*` values (see [IMF Design
-Guide]) indicating the target type of the interrupt, the second parameter is the
-security state of the originating execution context. The return result is the
-bit position in the `SCR_EL3` register of the respective interrupt trap: IRQ=1,
-FIQ=2.
-
-The FVP port configures the ARM GIC to signal S-EL1 interrupts as FIQs and
-Non-secure interrupts as IRQs from either security state.
-
-
-### Function : plat_ic_get_pending_interrupt_type() [mandatory]
-
-    Argument : void
-    Return   : uint32_t
-
-This API returns the type of the highest priority pending interrupt at the
-platform IC. The IMF uses the interrupt type to retrieve the corresponding
-handler function. `INTR_TYPE_INVAL` is returned when there is no interrupt
-pending. The valid interrupt types that can be returned are `INTR_TYPE_EL3`,
-`INTR_TYPE_S_EL1` and `INTR_TYPE_NS`.
-
-The FVP port reads the _Highest Priority Pending Interrupt Register_
-(`GICC_HPPIR`) to determine the id of the pending interrupt. The type of interrupt
-depends upon the id value as follows.
-
-1. id < 1022 is reported as a S-EL1 interrupt
-2. id = 1022 is reported as a Non-secure interrupt.
-3. id = 1023 is reported as an invalid interrupt type.
-
-
-### Function : plat_ic_get_pending_interrupt_id() [mandatory]
-
-    Argument : void
-    Return   : uint32_t
-
-This API returns the id of the highest priority pending interrupt at the
-platform IC. The IMF passes the id returned by this API to the registered
-handler for the pending interrupt if the `IMF_READ_INTERRUPT_ID` build time flag
-is set. INTR_ID_UNAVAILABLE is returned when there is no interrupt pending.
-
-The FVP port reads the _Highest Priority Pending Interrupt Register_
-(`GICC_HPPIR`) to determine the id of the pending interrupt. The id that is
-returned by API depends upon the value of the id read from the interrupt
-controller as follows.
-
-1. id < 1022. id is returned as is.
-2. id = 1022. The _Aliased Highest Priority Pending Interrupt Register_
-   (`GICC_AHPPIR`) is read to determine the id of the non-secure interrupt. This
-   id is returned by the API.
-3. id = 1023. `INTR_ID_UNAVAILABLE` is returned.
-
-
-### Function : plat_ic_acknowledge_interrupt() [mandatory]
-
-    Argument : void
-    Return   : uint32_t
-
-This API is used by the CPU to indicate to the platform IC that processing of
-the highest pending interrupt has begun. It should return the id of the
-interrupt which is being processed.
-
-The FVP port reads the _Interrupt Acknowledge Register_ (`GICC_IAR`). This
-changes the state of the highest priority pending interrupt from pending to
-active in the interrupt controller. It returns the value read from the
-`GICC_IAR`. This value is the id of the interrupt whose state has been changed.
-
-The TSP uses this API to start processing of the secure physical timer
-interrupt.
-
-
-### Function : plat_ic_end_of_interrupt() [mandatory]
-
-    Argument : uint32_t
-    Return   : void
-
-This API is used by the CPU to indicate to the platform IC that processing of
-the interrupt corresponding to the id (passed as the parameter) has
-finished. The id should be the same as the id returned by the
-`plat_ic_acknowledge_interrupt()` API.
-
-The FVP port writes the id to the _End of Interrupt Register_
-(`GICC_EOIR`). This deactivates the corresponding interrupt in the interrupt
-controller.
-
-The TSP uses this API to finish processing of the secure physical timer
-interrupt.
-
-
-### Function : plat_ic_get_interrupt_type() [mandatory]
-
-    Argument : uint32_t
-    Return   : uint32_t
-
-This API returns the type of the interrupt id passed as the parameter.
-`INTR_TYPE_INVAL` is returned if the id is invalid. If the id is valid, a valid
-interrupt type (one of `INTR_TYPE_EL3`, `INTR_TYPE_S_EL1` and `INTR_TYPE_NS`) is
-returned depending upon how the interrupt has been configured by the platform
-IC.
-
-The FVP port configures S-EL1 interrupts as Group0 interrupts and Non-secure
-interrupts as Group1 interrupts. It reads the group value corresponding to the
-interrupt id from the relevant _Interrupt Group Register_ (`GICD_IGROUPRn`). It
-uses the group value to determine the type of interrupt.
-
-3.5  Crash Reporting mechanism (in BL3-1)
-----------------------------------------------
-BL3-1 implements a crash reporting mechanism which prints the various registers
-of the CPU to enable quick crash analysis and debugging. It requires that a
-console is designated as the crash console by the platform which will be used to
-print the register dump.
-
-The following functions must be implemented by the platform if it wants crash
-reporting mechanism in BL3-1. The functions are implemented in assembly so that
-they can be invoked without a C Runtime stack.
-
-### Function : plat_crash_console_init
-
-    Argument : void
-    Return   : int
-
-This API is used by the crash reporting mechanism to initialize the crash
-console. It should only use the general purpose registers x0 to x2 to do the
-initialization and returns 1 on success.
-
-The FVP port designates the `PL011_UART0` as the crash console and calls the
-console_core_init() to initialize the console.
-
-### Function : plat_crash_console_putc
-
-    Argument : int
-    Return   : int
-
-This API is used by the crash reporting mechanism to print a character on the
-designated crash console. It should only use general purpose registers x1 and
-x2 to do its work. The parameter and the return value are in general purpose
-register x0.
-
-The FVP port designates the `PL011_UART0` as the crash console and calls the
-console_core_putc() to print the character on the console.
-
-4.  Build flags
----------------
-
-There are some build flags which can be defined by the platform to control
-inclusion or exclusion of certain BL stages from the FIP image. These flags
-need to be defined in the platform makefile which will get included by the
-build system.
-
-*   **NEED_BL30**
-    This flag if defined by the platform mandates that a BL3-0 binary should
-    be included in the FIP image. The path to the BL3-0 binary can be specified
-    by the `BL30` build option (see build options in the [User Guide]).
-
-*   **NEED_BL33**
-    By default, this flag is defined `yes` by the build system and `BL33`
-    build option should be supplied as a build option. The platform has the option
-    of excluding the BL3-3 image in the `fip` image by defining this flag to
-    `no`.
-
-5.  C Library
--------------
-
-To avoid subtle toolchain behavioral dependencies, the header files provided
-by the compiler are not used. The software is built with the `-nostdinc` flag
-to ensure no headers are included from the toolchain inadvertently. Instead the
-required headers are included in the ARM Trusted Firmware source tree. The
-library only contains those C library definitions required by the local
-implementation. If more functionality is required, the needed library functions
-will need to be added to the local implementation.
-
-Versions of [FreeBSD] headers can be found in `include/stdlib`. Some of these
-headers have been cut down in order to simplify the implementation. In order to
-minimize changes to the header files, the [FreeBSD] layout has been maintained.
-The generic C library definitions can be found in `include/stdlib` with more
-system and machine specific declarations in `include/stdlib/sys` and
-`include/stdlib/machine`.
-
-The local C library implementations can be found in `lib/stdlib`. In order to
-extend the C library these files may need to be modified. It is recommended to
-use a release version of [FreeBSD] as a starting point.
-
-The C library header files in the [FreeBSD] source tree are located in the
-`include` and `sys/sys` directories. [FreeBSD] machine specific definitions
-can be found in the `sys/<machine-type>` directories. These files define things
-like 'the size of a pointer' and 'the range of an integer'. Since an AArch64
-port for [FreeBSD] does not yet exist, the machine specific definitions are
-based on existing machine types with similar properties (for example SPARC64).
-
-Where possible, C library function implementations were taken from [FreeBSD]
-as found in the `lib/libc` directory.
-
-A copy of the [FreeBSD] sources can be downloaded with `git`.
-
-    git clone git://github.com/freebsd/freebsd.git -b origin/release/9.2.0
-
-
-6.  Storage abstraction layer
------------------------------
-
-In order to improve platform independence and portability an storage abstraction
-layer is used to load data from non-volatile platform storage.
-
-Each platform should register devices and their drivers via the Storage layer.
-These drivers then need to be initialized by bootloader phases as
-required in their respective `blx_platform_setup()` functions.  Currently
-storage access is only required by BL1 and BL2 phases. The `load_image()`
-function uses the storage layer to access non-volatile platform storage.
-
-It is mandatory to implement at least one storage driver. For the FVP the
-Firmware Image Package(FIP) driver is provided as the default means to load data
-from storage (see the "Firmware Image Package" section in the [User Guide]).
-The storage layer is described in the header file
-`include/drivers/io/io_storage.h`.  The implementation of the common library
-is in `drivers/io/io_storage.c` and the driver files are located in
-`drivers/io/`.
-
-Each IO driver must provide `io_dev_*` structures, as described in
-`drivers/io/io_driver.h`.  These are returned via a mandatory registration
-function that is called on platform initialization.  The semi-hosting driver
-implementation in `io_semihosting.c` can be used as an example.
-
-The Storage layer provides mechanisms to initialize storage devices before
-IO operations are called.  The basic operations supported by the layer
-include `open()`, `close()`, `read()`, `write()`, `size()` and `seek()`.
-Drivers do not have to implement all operations, but each platform must
-provide at least one driver for a device capable of supporting generic
-operations such as loading a bootloader image.
-
-The current implementation only allows for known images to be loaded by the
-firmware.  These images are specified by using their names, as defined in
-[include/plat/common/platform.h]. The platform layer (`plat_get_image_source()`)
-then returns a reference to a device and a driver-specific `spec` which will be
-understood by the driver to allow access to the image data.
-
-The layer is designed in such a way that is it possible to chain drivers with
-other drivers.  For example, file-system drivers may be implemented on top of
-physical block devices, both represented by IO devices with corresponding
-drivers.  In such a case, the file-system "binding" with the block device may
-be deferred until the file-system device is initialised.
-
-The abstraction currently depends on structures being statically allocated
-by the drivers and callers, as the system does not yet provide a means of
-dynamically allocating memory.  This may also have the affect of limiting the
-amount of open resources per driver.
-
-
-- - - - - - - - - - - - - - - - - - - - - - - - - -
-
-_Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved._
-
-
-[ARM GIC Architecture Specification]: http://arminfo.emea.arm.com/help/topic/com.arm.doc.ihi0048b/IHI0048B_gic_architecture_specification.pdf
-[IMF Design Guide]:                   interrupt-framework-design.md
-[User Guide]:                         user-guide.md
-[FreeBSD]:                            http://www.freebsd.org
-[Firmware Design Guide]:              firmware-design.md
-
-[plat/common/aarch64/platform_mp_stack.S]: ../plat/common/aarch64/platform_mp_stack.S
-[plat/common/aarch64/platform_up_stack.S]: ../plat/common/aarch64/platform_up_stack.S
-[plat/fvp/include/platform_def.h]:         ../plat/fvp/include/platform_def.h
-[plat/fvp/include/plat_macros.S]:          ../plat/fvp/include/plat_macros.S
-[plat/fvp/aarch64/plat_common.c]:          ../plat/fvp/aarch64/plat_common.c
-[plat/fvp/plat_pm.c]:                      ../plat/fvp/plat_pm.c
-[include/runtime_svc.h]:                   ../include/runtime_svc.h
-[include/plat/common/platform.h]:          ../include/plat/common/platform.h
diff --git a/docs/porting-guide.rst b/docs/porting-guide.rst
new file mode 100644
index 0000000..f0a8aaf
--- /dev/null
+++ b/docs/porting-guide.rst
@@ -0,0 +1,2729 @@
+ARM Trusted Firmware Porting Guide
+==================================
+
+
+.. section-numbering::
+    :suffix: .
+
+.. contents::
+
+--------------
+
+Introduction
+------------
+
+Please note that this document has been updated for the new platform API
+as required by the PSCI v1.0 implementation. Please refer to the
+`Migration Guide`_ for the previous platform API.
+
+Porting the ARM Trusted Firmware to a new platform involves making some
+mandatory and optional modifications for both the cold and warm boot paths.
+Modifications consist of:
+
+-  Implementing a platform-specific function or variable,
+-  Setting up the execution context in a certain way, or
+-  Defining certain constants (for example #defines).
+
+The platform-specific functions and variables are declared in
+`include/plat/common/platform.h`_. The firmware provides a default implementation
+of variables and functions to fulfill the optional requirements. These
+implementations are all weakly defined; they are provided to ease the porting
+effort. Each platform port can override them with its own implementation if the
+default implementation is inadequate.
+
+Platform ports that want to be aligned with standard ARM platforms (for example
+FVP and Juno) may also use `include/plat/arm/common/plat\_arm.h`_ and the
+corresponding source files in ``plat/arm/common/``. These provide standard
+implementations for some of the required platform porting functions. However,
+using these functions requires the platform port to implement additional
+ARM standard platform porting functions. These additional functions are not
+documented here.
+
+Some modifications are common to all Boot Loader (BL) stages. Section 2
+discusses these in detail. The subsequent sections discuss the remaining
+modifications for each BL stage in detail.
+
+This document should be read in conjunction with the ARM Trusted Firmware
+`User Guide`_.
+
+Common modifications
+--------------------
+
+This section covers the modifications that should be made by the platform for
+each BL stage to correctly port the firmware stack. They are categorized as
+either mandatory or optional.
+
+Common mandatory modifications
+------------------------------
+
+A platform port must enable the Memory Management Unit (MMU) as well as the
+instruction and data caches for each BL stage. Setting up the translation
+tables is the responsibility of the platform port because memory maps differ
+across platforms. A memory translation library (see ``lib/xlat_tables/``) is
+provided to help in this setup.
+
+Note that although this library supports non-identity mappings, this is intended
+only for re-mapping peripheral physical addresses and allows platforms with high
+I/O addresses to reduce their virtual address space. All other addresses
+corresponding to code and data must currently use an identity mapping.
+
+Also, the only translation granule size supported in Trusted Firmware is 4KB, as
+various parts of the code assume that is the case. It is not possible to switch
+to 16 KB or 64 KB granule sizes at the moment.
+
+In ARM standard platforms, each BL stage configures the MMU in the
+platform-specific architecture setup function, ``blX_plat_arch_setup()``, and uses
+an identity mapping for all addresses.
+
+If the build option ``USE_COHERENT_MEM`` is enabled, each platform can allocate a
+block of identity mapped secure memory with Device-nGnRE attributes aligned to
+page boundary (4K) for each BL stage. All sections which allocate coherent
+memory are grouped under ``coherent_ram``. For ex: Bakery locks are placed in a
+section identified by name ``bakery_lock`` inside ``coherent_ram`` so that its
+possible for the firmware to place variables in it using the following C code
+directive:
+
+::
+
+    __section("bakery_lock")
+
+Or alternatively the following assembler code directive:
+
+::
+
+    .section bakery_lock
+
+The ``coherent_ram`` section is a sum of all sections like ``bakery_lock`` which are
+used to allocate any data structures that are accessed both when a CPU is
+executing with its MMU and caches enabled, and when it's running with its MMU
+and caches disabled. Examples are given below.
+
+The following variables, functions and constants must be defined by the platform
+for the firmware to work correctly.
+
+File : platform\_def.h [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Each platform must ensure that a header file of this name is in the system
+include path with the following constants defined. This may require updating the
+list of ``PLAT_INCLUDES`` in the ``platform.mk`` file. In the ARM development
+platforms, this file is found in ``plat/arm/board/<plat_name>/include/``.
+
+Platform ports may optionally use the file `include/plat/common/common\_def.h`_,
+which provides typical values for some of the constants below. These values are
+likely to be suitable for all platform ports.
+
+Platform ports that want to be aligned with standard ARM platforms (for example
+FVP and Juno) may also use `include/plat/arm/common/arm\_def.h`_, which provides
+standard values for some of the constants below. However, this requires the
+platform port to define additional platform porting constants in
+``platform_def.h``. These additional constants are not documented here.
+
+-  **#define : PLATFORM\_LINKER\_FORMAT**
+
+   Defines the linker format used by the platform, for example
+   ``elf64-littleaarch64``.
+
+-  **#define : PLATFORM\_LINKER\_ARCH**
+
+   Defines the processor architecture for the linker by the platform, for
+   example ``aarch64``.
+
+-  **#define : PLATFORM\_STACK\_SIZE**
+
+   Defines the normal stack memory available to each CPU. This constant is used
+   by `plat/common/aarch64/platform\_mp\_stack.S`_ and
+   `plat/common/aarch64/platform\_up\_stack.S`_.
+
+-  **define : CACHE\_WRITEBACK\_GRANULE**
+
+   Defines the size in bits of the largest cache line across all the cache
+   levels in the platform.
+
+-  **#define : FIRMWARE\_WELCOME\_STR**
+
+   Defines the character string printed by BL1 upon entry into the ``bl1_main()``
+   function.
+
+-  **#define : PLATFORM\_CORE\_COUNT**
+
+   Defines the total number of CPUs implemented by the platform across all
+   clusters in the system.
+
+-  **#define : PLAT\_NUM\_PWR\_DOMAINS**
+
+   Defines the total number of nodes in the power domain topology
+   tree at all the power domain levels used by the platform.
+   This macro is used by the PSCI implementation to allocate
+   data structures to represent power domain topology.
+
+-  **#define : PLAT\_MAX\_PWR\_LVL**
+
+   Defines the maximum power domain level that the power management operations
+   should apply to. More often, but not always, the power domain level
+   corresponds to affinity level. This macro allows the PSCI implementation
+   to know the highest power domain level that it should consider for power
+   management operations in the system that the platform implements. For
+   example, the Base AEM FVP implements two clusters with a configurable
+   number of CPUs and it reports the maximum power domain level as 1.
+
+-  **#define : PLAT\_MAX\_OFF\_STATE**
+
+   Defines the local power state corresponding to the deepest power down
+   possible at every power domain level in the platform. The local power
+   states for each level may be sparsely allocated between 0 and this value
+   with 0 being reserved for the RUN state. The PSCI implementation uses this
+   value to initialize the local power states of the power domain nodes and
+   to specify the requested power state for a PSCI\_CPU\_OFF call.
+
+-  **#define : PLAT\_MAX\_RET\_STATE**
+
+   Defines the local power state corresponding to the deepest retention state
+   possible at every power domain level in the platform. This macro should be
+   a value less than PLAT\_MAX\_OFF\_STATE and greater than 0. It is used by the
+   PSCI implementation to distinguish between retention and power down local
+   power states within PSCI\_CPU\_SUSPEND call.
+
+-  **#define : PLAT\_MAX\_PWR\_LVL\_STATES**
+
+   Defines the maximum number of local power states per power domain level
+   that the platform supports. The default value of this macro is 2 since
+   most platforms just support a maximum of two local power states at each
+   power domain level (power-down and retention). If the platform needs to
+   account for more local power states, then it must redefine this macro.
+
+   Currently, this macro is used by the Generic PSCI implementation to size
+   the array used for PSCI\_STAT\_COUNT/RESIDENCY accounting.
+
+-  **#define : BL1\_RO\_BASE**
+
+   Defines the base address in secure ROM where BL1 originally lives. Must be
+   aligned on a page-size boundary.
+
+-  **#define : BL1\_RO\_LIMIT**
+
+   Defines the maximum address in secure ROM that BL1's actual content (i.e.
+   excluding any data section allocated at runtime) can occupy.
+
+-  **#define : BL1\_RW\_BASE**
+
+   Defines the base address in secure RAM where BL1's read-write data will live
+   at runtime. Must be aligned on a page-size boundary.
+
+-  **#define : BL1\_RW\_LIMIT**
+
+   Defines the maximum address in secure RAM that BL1's read-write data can
+   occupy at runtime.
+
+-  **#define : BL2\_BASE**
+
+   Defines the base address in secure RAM where BL1 loads the BL2 binary image.
+   Must be aligned on a page-size boundary.
+
+-  **#define : BL2\_LIMIT**
+
+   Defines the maximum address in secure RAM that the BL2 image can occupy.
+
+-  **#define : BL31\_BASE**
+
+   Defines the base address in secure RAM where BL2 loads the BL31 binary
+   image. Must be aligned on a page-size boundary.
+
+-  **#define : BL31\_LIMIT**
+
+   Defines the maximum address in secure RAM that the BL31 image can occupy.
+
+For every image, the platform must define individual identifiers that will be
+used by BL1 or BL2 to load the corresponding image into memory from non-volatile
+storage. For the sake of performance, integer numbers will be used as
+identifiers. The platform will use those identifiers to return the relevant
+information about the image to be loaded (file handler, load address,
+authentication information, etc.). The following image identifiers are
+mandatory:
+
+-  **#define : BL2\_IMAGE\_ID**
+
+   BL2 image identifier, used by BL1 to load BL2.
+
+-  **#define : BL31\_IMAGE\_ID**
+
+   BL31 image identifier, used by BL2 to load BL31.
+
+-  **#define : BL33\_IMAGE\_ID**
+
+   BL33 image identifier, used by BL2 to load BL33.
+
+If Trusted Board Boot is enabled, the following certificate identifiers must
+also be defined:
+
+-  **#define : TRUSTED\_BOOT\_FW\_CERT\_ID**
+
+   BL2 content certificate identifier, used by BL1 to load the BL2 content
+   certificate.
+
+-  **#define : TRUSTED\_KEY\_CERT\_ID**
+
+   Trusted key certificate identifier, used by BL2 to load the trusted key
+   certificate.
+
+-  **#define : SOC\_FW\_KEY\_CERT\_ID**
+
+   BL31 key certificate identifier, used by BL2 to load the BL31 key
+   certificate.
+
+-  **#define : SOC\_FW\_CONTENT\_CERT\_ID**
+
+   BL31 content certificate identifier, used by BL2 to load the BL31 content
+   certificate.
+
+-  **#define : NON\_TRUSTED\_FW\_KEY\_CERT\_ID**
+
+   BL33 key certificate identifier, used by BL2 to load the BL33 key
+   certificate.
+
+-  **#define : NON\_TRUSTED\_FW\_CONTENT\_CERT\_ID**
+
+   BL33 content certificate identifier, used by BL2 to load the BL33 content
+   certificate.
+
+-  **#define : FWU\_CERT\_ID**
+
+   Firmware Update (FWU) certificate identifier, used by NS\_BL1U to load the
+   FWU content certificate.
+
+-  **#define : PLAT\_CRYPTOCELL\_BASE**
+
+   This defines the base address of ARM® TrustZone® CryptoCell and must be
+   defined if CryptoCell crypto driver is used for Trusted Board Boot. For
+   capable ARM platforms, this driver is used if ``ARM_CRYPTOCELL_INTEG`` is
+   set.
+
+If the AP Firmware Updater Configuration image, BL2U is used, the following
+must also be defined:
+
+-  **#define : BL2U\_BASE**
+
+   Defines the base address in secure memory where BL1 copies the BL2U binary
+   image. Must be aligned on a page-size boundary.
+
+-  **#define : BL2U\_LIMIT**
+
+   Defines the maximum address in secure memory that the BL2U image can occupy.
+
+-  **#define : BL2U\_IMAGE\_ID**
+
+   BL2U image identifier, used by BL1 to fetch an image descriptor
+   corresponding to BL2U.
+
+If the SCP Firmware Update Configuration Image, SCP\_BL2U is used, the following
+must also be defined:
+
+-  **#define : SCP\_BL2U\_IMAGE\_ID**
+
+   SCP\_BL2U image identifier, used by BL1 to fetch an image descriptor
+   corresponding to SCP\_BL2U.
+   NOTE: TF does not provide source code for this image.
+
+If the Non-Secure Firmware Updater ROM, NS\_BL1U is used, the following must
+also be defined:
+
+-  **#define : NS\_BL1U\_BASE**
+
+   Defines the base address in non-secure ROM where NS\_BL1U executes.
+   Must be aligned on a page-size boundary.
+   NOTE: TF does not provide source code for this image.
+
+-  **#define : NS\_BL1U\_IMAGE\_ID**
+
+   NS\_BL1U image identifier, used by BL1 to fetch an image descriptor
+   corresponding to NS\_BL1U.
+
+If the Non-Secure Firmware Updater, NS\_BL2U is used, the following must also
+be defined:
+
+-  **#define : NS\_BL2U\_BASE**
+
+   Defines the base address in non-secure memory where NS\_BL2U executes.
+   Must be aligned on a page-size boundary.
+   NOTE: TF does not provide source code for this image.
+
+-  **#define : NS\_BL2U\_IMAGE\_ID**
+
+   NS\_BL2U image identifier, used by BL1 to fetch an image descriptor
+   corresponding to NS\_BL2U.
+
+For the the Firmware update capability of TRUSTED BOARD BOOT, the following
+macros may also be defined:
+
+-  **#define : PLAT\_FWU\_MAX\_SIMULTANEOUS\_IMAGES**
+
+   Total number of images that can be loaded simultaneously. If the platform
+   doesn't specify any value, it defaults to 10.
+
+If a SCP\_BL2 image is supported by the platform, the following constants must
+also be defined:
+
+-  **#define : SCP\_BL2\_IMAGE\_ID**
+
+   SCP\_BL2 image identifier, used by BL2 to load SCP\_BL2 into secure memory
+   from platform storage before being transfered to the SCP.
+
+-  **#define : SCP\_FW\_KEY\_CERT\_ID**
+
+   SCP\_BL2 key certificate identifier, used by BL2 to load the SCP\_BL2 key
+   certificate (mandatory when Trusted Board Boot is enabled).
+
+-  **#define : SCP\_FW\_CONTENT\_CERT\_ID**
+
+   SCP\_BL2 content certificate identifier, used by BL2 to load the SCP\_BL2
+   content certificate (mandatory when Trusted Board Boot is enabled).
+
+If a BL32 image is supported by the platform, the following constants must
+also be defined:
+
+-  **#define : BL32\_IMAGE\_ID**
+
+   BL32 image identifier, used by BL2 to load BL32.
+
+-  **#define : TRUSTED\_OS\_FW\_KEY\_CERT\_ID**
+
+   BL32 key certificate identifier, used by BL2 to load the BL32 key
+   certificate (mandatory when Trusted Board Boot is enabled).
+
+-  **#define : TRUSTED\_OS\_FW\_CONTENT\_CERT\_ID**
+
+   BL32 content certificate identifier, used by BL2 to load the BL32 content
+   certificate (mandatory when Trusted Board Boot is enabled).
+
+-  **#define : BL32\_BASE**
+
+   Defines the base address in secure memory where BL2 loads the BL32 binary
+   image. Must be aligned on a page-size boundary.
+
+-  **#define : BL32\_LIMIT**
+
+   Defines the maximum address that the BL32 image can occupy.
+
+If the Test Secure-EL1 Payload (TSP) instantiation of BL32 is supported by the
+platform, the following constants must also be defined:
+
+-  **#define : TSP\_SEC\_MEM\_BASE**
+
+   Defines the base address of the secure memory used by the TSP image on the
+   platform. This must be at the same address or below ``BL32_BASE``.
+
+-  **#define : TSP\_SEC\_MEM\_SIZE**
+
+   Defines the size of the secure memory used by the BL32 image on the
+   platform. ``TSP_SEC_MEM_BASE`` and ``TSP_SEC_MEM_SIZE`` must fully accomodate
+   the memory required by the BL32 image, defined by ``BL32_BASE`` and
+   ``BL32_LIMIT``.
+
+-  **#define : TSP\_IRQ\_SEC\_PHY\_TIMER**
+
+   Defines the ID of the secure physical generic timer interrupt used by the
+   TSP's interrupt handling code.
+
+If the platform port uses the translation table library code, the following
+constants must also be defined:
+
+-  **#define : PLAT\_XLAT\_TABLES\_DYNAMIC**
+
+   Optional flag that can be set per-image to enable the dynamic allocation of
+   regions even when the MMU is enabled. If not defined, only static
+   functionality will be available, if defined and set to 1 it will also
+   include the dynamic functionality.
+
+-  **#define : MAX\_XLAT\_TABLES**
+
+   Defines the maximum number of translation tables that are allocated by the
+   translation table library code. To minimize the amount of runtime memory
+   used, choose the smallest value needed to map the required virtual addresses
+   for each BL stage. If ``PLAT_XLAT_TABLES_DYNAMIC`` flag is enabled for a BL
+   image, ``MAX_XLAT_TABLES`` must be defined to accommodate the dynamic regions
+   as well.
+
+-  **#define : MAX\_MMAP\_REGIONS**
+
+   Defines the maximum number of regions that are allocated by the translation
+   table library code. A region consists of physical base address, virtual base
+   address, size and attributes (Device/Memory, RO/RW, Secure/Non-Secure), as
+   defined in the ``mmap_region_t`` structure. The platform defines the regions
+   that should be mapped. Then, the translation table library will create the
+   corresponding tables and descriptors at runtime. To minimize the amount of
+   runtime memory used, choose the smallest value needed to register the
+   required regions for each BL stage. If ``PLAT_XLAT_TABLES_DYNAMIC`` flag is
+   enabled for a BL image, ``MAX_MMAP_REGIONS`` must be defined to accommodate
+   the dynamic regions as well.
+
+-  **#define : ADDR\_SPACE\_SIZE**
+
+   Defines the total size of the address space in bytes. For example, for a 32
+   bit address space, this value should be ``(1ull << 32)``. This definition is
+   now deprecated, platforms should use ``PLAT_PHY_ADDR_SPACE_SIZE`` and
+   ``PLAT_VIRT_ADDR_SPACE_SIZE`` instead.
+
+-  **#define : PLAT\_VIRT\_ADDR\_SPACE\_SIZE**
+
+   Defines the total size of the virtual address space in bytes. For example,
+   for a 32 bit virtual address space, this value should be ``(1ull << 32)``.
+
+-  **#define : PLAT\_PHY\_ADDR\_SPACE\_SIZE**
+
+   Defines the total size of the physical address space in bytes. For example,
+   for a 32 bit physical address space, this value should be ``(1ull << 32)``.
+
+If the platform port uses the IO storage framework, the following constants
+must also be defined:
+
+-  **#define : MAX\_IO\_DEVICES**
+
+   Defines the maximum number of registered IO devices. Attempting to register
+   more devices than this value using ``io_register_device()`` will fail with
+   -ENOMEM.
+
+-  **#define : MAX\_IO\_HANDLES**
+
+   Defines the maximum number of open IO handles. Attempting to open more IO
+   entities than this value using ``io_open()`` will fail with -ENOMEM.
+
+-  **#define : MAX\_IO\_BLOCK\_DEVICES**
+
+   Defines the maximum number of registered IO block devices. Attempting to
+   register more devices this value using ``io_dev_open()`` will fail
+   with -ENOMEM. MAX\_IO\_BLOCK\_DEVICES should be less than MAX\_IO\_DEVICES.
+   With this macro, multiple block devices could be supported at the same
+   time.
+
+If the platform needs to allocate data within the per-cpu data framework in
+BL31, it should define the following macro. Currently this is only required if
+the platform decides not to use the coherent memory section by undefining the
+``USE_COHERENT_MEM`` build flag. In this case, the framework allocates the
+required memory within the the per-cpu data to minimize wastage.
+
+-  **#define : PLAT\_PCPU\_DATA\_SIZE**
+
+   Defines the memory (in bytes) to be reserved within the per-cpu data
+   structure for use by the platform layer.
+
+The following constants are optional. They should be defined when the platform
+memory layout implies some image overlaying like in ARM standard platforms.
+
+-  **#define : BL31\_PROGBITS\_LIMIT**
+
+   Defines the maximum address in secure RAM that the BL31's progbits sections
+   can occupy.
+
+-  **#define : TSP\_PROGBITS\_LIMIT**
+
+   Defines the maximum address that the TSP's progbits sections can occupy.
+
+If the platform port uses the PL061 GPIO driver, the following constant may
+optionally be defined:
+
+-  **PLAT\_PL061\_MAX\_GPIOS**
+   Maximum number of GPIOs required by the platform. This allows control how
+   much memory is allocated for PL061 GPIO controllers. The default value is
+
+   #. $(eval $(call add\_define,PLAT\_PL061\_MAX\_GPIOS))
+
+If the platform port uses the partition driver, the following constant may
+optionally be defined:
+
+-  **PLAT\_PARTITION\_MAX\_ENTRIES**
+   Maximum number of partition entries required by the platform. This allows
+   control how much memory is allocated for partition entries. The default
+   value is 128.
+   `For example, define the build flag in platform.mk`_:
+   PLAT\_PARTITION\_MAX\_ENTRIES := 12
+   $(eval $(call add\_define,PLAT\_PARTITION\_MAX\_ENTRIES))
+
+The following constant is optional. It should be defined to override the default
+behaviour of the ``assert()`` function (for example, to save memory).
+
+-  **PLAT\_LOG\_LEVEL\_ASSERT**
+   If ``PLAT_LOG_LEVEL_ASSERT`` is higher or equal than ``LOG_LEVEL_VERBOSE``,
+   ``assert()`` prints the name of the file, the line number and the asserted
+   expression. Else if it is higher than ``LOG_LEVEL_INFO``, it prints the file
+   name and the line number. Else if it is lower than ``LOG_LEVEL_INFO``, it
+   doesn't print anything to the console. If ``PLAT_LOG_LEVEL_ASSERT`` isn't
+   defined, it defaults to ``LOG_LEVEL``.
+
+File : plat\_macros.S [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Each platform must ensure a file of this name is in the system include path with
+the following macro defined. In the ARM development platforms, this file is
+found in ``plat/arm/board/<plat_name>/include/plat_macros.S``.
+
+-  **Macro : plat\_crash\_print\_regs**
+
+   This macro allows the crash reporting routine to print relevant platform
+   registers in case of an unhandled exception in BL31. This aids in debugging
+   and this macro can be defined to be empty in case register reporting is not
+   desired.
+
+   For instance, GIC or interconnect registers may be helpful for
+   troubleshooting.
+
+Handling Reset
+--------------
+
+BL1 by default implements the reset vector where execution starts from a cold
+or warm boot. BL31 can be optionally set as a reset vector using the
+``RESET_TO_BL31`` make variable.
+
+For each CPU, the reset vector code is responsible for the following tasks:
+
+#. Distinguishing between a cold boot and a warm boot.
+
+#. In the case of a cold boot and the CPU being a secondary CPU, ensuring that
+   the CPU is placed in a platform-specific state until the primary CPU
+   performs the necessary steps to remove it from this state.
+
+#. In the case of a warm boot, ensuring that the CPU jumps to a platform-
+   specific address in the BL31 image in the same processor mode as it was
+   when released from reset.
+
+The following functions need to be implemented by the platform port to enable
+reset vector code to perform the above tasks.
+
+Function : plat\_get\_my\_entrypoint() [mandatory when PROGRAMMABLE\_RESET\_ADDRESS == 0]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : uintptr_t
+
+This function is called with the MMU and caches disabled
+(``SCTLR_EL3.M`` = 0 and ``SCTLR_EL3.C`` = 0). The function is responsible for
+distinguishing between a warm and cold reset for the current CPU using
+platform-specific means. If it's a warm reset, then it returns the warm
+reset entrypoint point provided to ``plat_setup_psci_ops()`` during
+BL31 initialization. If it's a cold reset then this function must return zero.
+
+This function does not follow the Procedure Call Standard used by the
+Application Binary Interface for the ARM 64-bit architecture. The caller should
+not assume that callee saved registers are preserved across a call to this
+function.
+
+This function fulfills requirement 1 and 3 listed above.
+
+Note that for platforms that support programming the reset address, it is
+expected that a CPU will start executing code directly at the right address,
+both on a cold and warm reset. In this case, there is no need to identify the
+type of reset nor to query the warm reset entrypoint. Therefore, implementing
+this function is not required on such platforms.
+
+Function : plat\_secondary\_cold\_boot\_setup() [mandatory when COLD\_BOOT\_SINGLE\_CPU == 0]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+
+This function is called with the MMU and data caches disabled. It is responsible
+for placing the executing secondary CPU in a platform-specific state until the
+primary CPU performs the necessary actions to bring it out of that state and
+allow entry into the OS. This function must not return.
+
+In the ARM FVP port, when using the normal boot flow, each secondary CPU powers
+itself off. The primary CPU is responsible for powering up the secondary CPUs
+when normal world software requires them. When booting an EL3 payload instead,
+they stay powered on and are put in a holding pen until their mailbox gets
+populated.
+
+This function fulfills requirement 2 above.
+
+Note that for platforms that can't release secondary CPUs out of reset, only the
+primary CPU will execute the cold boot code. Therefore, implementing this
+function is not required on such platforms.
+
+Function : plat\_is\_my\_cpu\_primary() [mandatory when COLD\_BOOT\_SINGLE\_CPU == 0]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : unsigned int
+
+This function identifies whether the current CPU is the primary CPU or a
+secondary CPU. A return value of zero indicates that the CPU is not the
+primary CPU, while a non-zero return value indicates that the CPU is the
+primary CPU.
+
+Note that for platforms that can't release secondary CPUs out of reset, only the
+primary CPU will execute the cold boot code. Therefore, there is no need to
+distinguish between primary and secondary CPUs and implementing this function is
+not required.
+
+Function : platform\_mem\_init() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : void
+
+This function is called before any access to data is made by the firmware, in
+order to carry out any essential memory initialization.
+
+Function: plat\_get\_rotpk\_info()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void *, void **, unsigned int *, unsigned int *
+    Return   : int
+
+This function is mandatory when Trusted Board Boot is enabled. It returns a
+pointer to the ROTPK stored in the platform (or a hash of it) and its length.
+The ROTPK must be encoded in DER format according to the following ASN.1
+structure:
+
+::
+
+    AlgorithmIdentifier  ::=  SEQUENCE  {
+        algorithm         OBJECT IDENTIFIER,
+        parameters        ANY DEFINED BY algorithm OPTIONAL
+    }
+
+    SubjectPublicKeyInfo  ::=  SEQUENCE  {
+        algorithm         AlgorithmIdentifier,
+        subjectPublicKey  BIT STRING
+    }
+
+In case the function returns a hash of the key:
+
+::
+
+    DigestInfo ::= SEQUENCE {
+        digestAlgorithm   AlgorithmIdentifier,
+        digest            OCTET STRING
+    }
+
+The function returns 0 on success. Any other value is treated as error by the
+Trusted Board Boot. The function also reports extra information related
+to the ROTPK in the flags parameter:
+
+::
+
+    ROTPK_IS_HASH      : Indicates that the ROTPK returned by the platform is a
+                         hash.
+    ROTPK_NOT_DEPLOYED : This allows the platform to skip certificate ROTPK
+                         verification while the platform ROTPK is not deployed.
+                         When this flag is set, the function does not need to
+                         return a platform ROTPK, and the authentication
+                         framework uses the ROTPK in the certificate without
+                         verifying it against the platform value. This flag
+                         must not be used in a deployed production environment.
+
+Function: plat\_get\_nv\_ctr()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void *, unsigned int *
+    Return   : int
+
+This function is mandatory when Trusted Board Boot is enabled. It returns the
+non-volatile counter value stored in the platform in the second argument. The
+cookie in the first argument may be used to select the counter in case the
+platform provides more than one (for example, on platforms that use the default
+TBBR CoT, the cookie will correspond to the OID values defined in
+TRUSTED\_FW\_NVCOUNTER\_OID or NON\_TRUSTED\_FW\_NVCOUNTER\_OID).
+
+The function returns 0 on success. Any other value means the counter value could
+not be retrieved from the platform.
+
+Function: plat\_set\_nv\_ctr()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void *, unsigned int
+    Return   : int
+
+This function is mandatory when Trusted Board Boot is enabled. It sets a new
+counter value in the platform. The cookie in the first argument may be used to
+select the counter (as explained in plat\_get\_nv\_ctr()). The second argument is
+the updated counter value to be written to the NV counter.
+
+The function returns 0 on success. Any other value means the counter value could
+not be updated.
+
+Function: plat\_set\_nv\_ctr2()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void *, const auth_img_desc_t *, unsigned int
+    Return   : int
+
+This function is optional when Trusted Board Boot is enabled. If this
+interface is defined, then ``plat_set_nv_ctr()`` need not be defined. The
+first argument passed is a cookie and is typically used to
+differentiate between a Non Trusted NV Counter and a Trusted NV
+Counter. The second argument is a pointer to an authentication image
+descriptor and may be used to decide if the counter is allowed to be
+updated or not. The third argument is the updated counter value to
+be written to the NV counter.
+
+The function returns 0 on success. Any other value means the counter value
+either could not be updated or the authentication image descriptor indicates
+that it is not allowed to be updated.
+
+Common mandatory function modifications
+---------------------------------------
+
+The following functions are mandatory functions which need to be implemented
+by the platform port.
+
+Function : plat\_my\_core\_pos()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : unsigned int
+
+This funtion returns the index of the calling CPU which is used as a
+CPU-specific linear index into blocks of memory (for example while allocating
+per-CPU stacks). This function will be invoked very early in the
+initialization sequence which mandates that this function should be
+implemented in assembly and should not rely on the avalability of a C
+runtime environment. This function can clobber x0 - x8 and must preserve
+x9 - x29.
+
+This function plays a crucial role in the power domain topology framework in
+PSCI and details of this can be found in `Power Domain Topology Design`_.
+
+Function : plat\_core\_pos\_by\_mpidr()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : u_register_t
+    Return   : int
+
+This function validates the ``MPIDR`` of a CPU and converts it to an index,
+which can be used as a CPU-specific linear index into blocks of memory. In
+case the ``MPIDR`` is invalid, this function returns -1. This function will only
+be invoked by BL31 after the power domain topology is initialized and can
+utilize the C runtime environment. For further details about how ARM Trusted
+Firmware represents the power domain topology and how this relates to the
+linear CPU index, please refer `Power Domain Topology Design`_.
+
+Common optional modifications
+-----------------------------
+
+The following are helper functions implemented by the firmware that perform
+common platform-specific tasks. A platform may choose to override these
+definitions.
+
+Function : plat\_set\_my\_stack()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : void
+
+This function sets the current stack pointer to the normal memory stack that
+has been allocated for the current CPU. For BL images that only require a
+stack for the primary CPU, the UP version of the function is used. The size
+of the stack allocated to each CPU is specified by the platform defined
+constant ``PLATFORM_STACK_SIZE``.
+
+Common implementations of this function for the UP and MP BL images are
+provided in `plat/common/aarch64/platform\_up\_stack.S`_ and
+`plat/common/aarch64/platform\_mp\_stack.S`_
+
+Function : plat\_get\_my\_stack()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : uintptr_t
+
+This function returns the base address of the normal memory stack that
+has been allocated for the current CPU. For BL images that only require a
+stack for the primary CPU, the UP version of the function is used. The size
+of the stack allocated to each CPU is specified by the platform defined
+constant ``PLATFORM_STACK_SIZE``.
+
+Common implementations of this function for the UP and MP BL images are
+provided in `plat/common/aarch64/platform\_up\_stack.S`_ and
+`plat/common/aarch64/platform\_mp\_stack.S`_
+
+Function : plat\_report\_exception()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int
+    Return   : void
+
+A platform may need to report various information about its status when an
+exception is taken, for example the current exception level, the CPU security
+state (secure/non-secure), the exception type, and so on. This function is
+called in the following circumstances:
+
+-  In BL1, whenever an exception is taken.
+-  In BL2, whenever an exception is taken.
+
+The default implementation doesn't do anything, to avoid making assumptions
+about the way the platform displays its status information.
+
+For AArch64, this function receives the exception type as its argument.
+Possible values for exceptions types are listed in the
+`include/common/bl\_common.h`_ header file. Note that these constants are not
+related to any architectural exception code; they are just an ARM Trusted
+Firmware convention.
+
+For AArch32, this function receives the exception mode as its argument.
+Possible values for exception modes are listed in the
+`include/lib/aarch32/arch.h`_ header file.
+
+Function : plat\_reset\_handler()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : void
+
+A platform may need to do additional initialization after reset. This function
+allows the platform to do the platform specific intializations. Platform
+specific errata workarounds could also be implemented here. The api should
+preserve the values of callee saved registers x19 to x29.
+
+The default implementation doesn't do anything. If a platform needs to override
+the default implementation, refer to the `Firmware Design`_ for general
+guidelines.
+
+Function : plat\_disable\_acp()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : void
+
+This api allows a platform to disable the Accelerator Coherency Port (if
+present) during a cluster power down sequence. The default weak implementation
+doesn't do anything. Since this api is called during the power down sequence,
+it has restrictions for stack usage and it can use the registers x0 - x17 as
+scratch registers. It should preserve the value in x18 register as it is used
+by the caller to store the return address.
+
+Function : plat\_error\_handler()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : int
+    Return   : void
+
+This API is called when the generic code encounters an error situation from
+which it cannot continue. It allows the platform to perform error reporting or
+recovery actions (for example, reset the system). This function must not return.
+
+The parameter indicates the type of error using standard codes from ``errno.h``.
+Possible errors reported by the generic code are:
+
+-  ``-EAUTH``: a certificate or image could not be authenticated (when Trusted
+   Board Boot is enabled)
+-  ``-ENOENT``: the requested image or certificate could not be found or an IO
+   error was detected
+-  ``-ENOMEM``: resources exhausted. Trusted Firmware does not use dynamic
+   memory, so this error is usually an indication of an incorrect array size
+
+The default implementation simply spins.
+
+Function : plat\_panic\_handler()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : void
+
+This API is called when the generic code encounters an unexpected error
+situation from which it cannot recover. This function must not return,
+and must be implemented in assembly because it may be called before the C
+environment is initialized.
+
+Note: The address from where it was called is stored in x30 (Link Register).
+The default implementation simply spins.
+
+Function : plat\_get\_bl\_image\_load\_info()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : bl_load_info_t *
+
+This function returns pointer to the list of images that the platform has
+populated to load. This function is currently invoked in BL2 to load the
+BL3xx images, when LOAD\_IMAGE\_V2 is enabled.
+
+Function : plat\_get\_next\_bl\_params()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : bl_params_t *
+
+This function returns a pointer to the shared memory that the platform has
+kept aside to pass trusted firmware related information that next BL image
+needs. This function is currently invoked in BL2 to pass this information to
+the next BL image, when LOAD\_IMAGE\_V2 is enabled.
+
+Function : plat\_get\_stack\_protector\_canary()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : u_register_t
+
+This function returns a random value that is used to initialize the canary used
+when the stack protector is enabled with ENABLE\_STACK\_PROTECTOR. A predictable
+value will weaken the protection as the attacker could easily write the right
+value as part of the attack most of the time. Therefore, it should return a
+true random number.
+
+Note: For the protection to be effective, the global data need to be placed at
+a lower address than the stack bases. Failure to do so would allow an attacker
+to overwrite the canary as part of the stack buffer overflow attack.
+
+Function : plat\_flush\_next\_bl\_params()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : void
+
+This function flushes to main memory all the image params that are passed to
+next image. This function is currently invoked in BL2 to flush this information
+to the next BL image, when LOAD\_IMAGE\_V2 is enabled.
+
+Function : plat\_log\_get\_prefix()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int
+    Return   : const char *
+
+This function defines the prefix string corresponding to the `log_level` to be
+prepended to all the log output from ARM Trusted Firmware. The `log_level`
+(argument) will correspond to one of the standard log levels defined in
+debug.h. The platform can override the common implementation to define a
+different prefix string for the log output.  The implementation should be
+robust to future changes that increase the number of log levels.
+
+Modifications specific to a Boot Loader stage
+---------------------------------------------
+
+Boot Loader Stage 1 (BL1)
+-------------------------
+
+BL1 implements the reset vector where execution starts from after a cold or
+warm boot. For each CPU, BL1 is responsible for the following tasks:
+
+#. Handling the reset as described in section 2.2
+
+#. In the case of a cold boot and the CPU being the primary CPU, ensuring that
+   only this CPU executes the remaining BL1 code, including loading and passing
+   control to the BL2 stage.
+
+#. Identifying and starting the Firmware Update process (if required).
+
+#. Loading the BL2 image from non-volatile storage into secure memory at the
+   address specified by the platform defined constant ``BL2_BASE``.
+
+#. Populating a ``meminfo`` structure with the following information in memory,
+   accessible by BL2 immediately upon entry.
+
+   ::
+
+       meminfo.total_base = Base address of secure RAM visible to BL2
+       meminfo.total_size = Size of secure RAM visible to BL2
+       meminfo.free_base  = Base address of secure RAM available for
+                            allocation to BL2
+       meminfo.free_size  = Size of secure RAM available for allocation to BL2
+
+   BL1 places this ``meminfo`` structure at the beginning of the free memory
+   available for its use. Since BL1 cannot allocate memory dynamically at the
+   moment, its free memory will be available for BL2's use as-is. However, this
+   means that BL2 must read the ``meminfo`` structure before it starts using its
+   free memory (this is discussed in Section 3.2).
+
+   In future releases of the ARM Trusted Firmware it will be possible for
+   the platform to decide where it wants to place the ``meminfo`` structure for
+   BL2.
+
+   BL1 implements the ``bl1_init_bl2_mem_layout()`` function to populate the
+   BL2 ``meminfo`` structure. The platform may override this implementation, for
+   example if the platform wants to restrict the amount of memory visible to
+   BL2. Details of how to do this are given below.
+
+The following functions need to be implemented by the platform port to enable
+BL1 to perform the above tasks.
+
+Function : bl1\_early\_platform\_setup() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : void
+
+This function executes with the MMU and data caches disabled. It is only called
+by the primary CPU.
+
+On ARM standard platforms, this function:
+
+-  Enables a secure instance of SP805 to act as the Trusted Watchdog.
+
+-  Initializes a UART (PL011 console), which enables access to the ``printf``
+   family of functions in BL1.
+
+-  Enables issuing of snoop and DVM (Distributed Virtual Memory) requests to
+   the CCI slave interface corresponding to the cluster that includes the
+   primary CPU.
+
+Function : bl1\_plat\_arch\_setup() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : void
+
+This function performs any platform-specific and architectural setup that the
+platform requires. Platform-specific setup might include configuration of
+memory controllers and the interconnect.
+
+In ARM standard platforms, this function enables the MMU.
+
+This function helps fulfill requirement 2 above.
+
+Function : bl1\_platform\_setup() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : void
+
+This function executes with the MMU and data caches enabled. It is responsible
+for performing any remaining platform-specific setup that can occur after the
+MMU and data cache have been enabled.
+
+In ARM standard platforms, this function initializes the storage abstraction
+layer used to load the next bootloader image.
+
+This function helps fulfill requirement 4 above.
+
+Function : bl1\_plat\_sec\_mem\_layout() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : meminfo *
+
+This function should only be called on the cold boot path. It executes with the
+MMU and data caches enabled. The pointer returned by this function must point to
+a ``meminfo`` structure containing the extents and availability of secure RAM for
+the BL1 stage.
+
+::
+
+    meminfo.total_base = Base address of secure RAM visible to BL1
+    meminfo.total_size = Size of secure RAM visible to BL1
+    meminfo.free_base  = Base address of secure RAM available for allocation
+                         to BL1
+    meminfo.free_size  = Size of secure RAM available for allocation to BL1
+
+This information is used by BL1 to load the BL2 image in secure RAM. BL1 also
+populates a similar structure to tell BL2 the extents of memory available for
+its own use.
+
+This function helps fulfill requirements 4 and 5 above.
+
+Function : bl1\_init\_bl2\_mem\_layout() [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : meminfo *, meminfo *
+    Return   : void
+
+BL1 needs to tell the next stage the amount of secure RAM available
+for it to use. This information is populated in a ``meminfo``
+structure.
+
+Depending upon where BL2 has been loaded in secure RAM (determined by
+``BL2_BASE``), BL1 calculates the amount of free memory available for BL2 to use.
+BL1 also ensures that its data sections resident in secure RAM are not visible
+to BL2. An illustration of how this is done in ARM standard platforms is given
+in the **Memory layout on ARM development platforms** section in the
+`Firmware Design`_.
+
+Function : bl1\_plat\_prepare\_exit() [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : entry_point_info_t *
+    Return   : void
+
+This function is called prior to exiting BL1 in response to the
+``BL1_SMC_RUN_IMAGE`` SMC request raised by BL2. It should be used to perform
+platform specific clean up or bookkeeping operations before transferring
+control to the next image. It receives the address of the ``entry_point_info_t``
+structure passed from BL2. This function runs with MMU disabled.
+
+Function : bl1\_plat\_set\_ep\_info() [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int image_id, entry_point_info_t *ep_info
+    Return   : void
+
+This function allows platforms to override ``ep_info`` for the given ``image_id``.
+
+The default implementation just returns.
+
+Function : bl1\_plat\_get\_next\_image\_id() [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : unsigned int
+
+This and the following function must be overridden to enable the FWU feature.
+
+BL1 calls this function after platform setup to identify the next image to be
+loaded and executed. If the platform returns ``BL2_IMAGE_ID`` then BL1 proceeds
+with the normal boot sequence, which loads and executes BL2. If the platform
+returns a different image id, BL1 assumes that Firmware Update is required.
+
+The default implementation always returns ``BL2_IMAGE_ID``. The ARM development
+platforms override this function to detect if firmware update is required, and
+if so, return the first image in the firmware update process.
+
+Function : bl1\_plat\_get\_image\_desc() [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int image_id
+    Return   : image_desc_t *
+
+BL1 calls this function to get the image descriptor information ``image_desc_t``
+for the provided ``image_id`` from the platform.
+
+The default implementation always returns a common BL2 image descriptor. ARM
+standard platforms return an image descriptor corresponding to BL2 or one of
+the firmware update images defined in the Trusted Board Boot Requirements
+specification.
+
+Function : bl1\_plat\_fwu\_done() [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int image_id, uintptr_t image_src,
+               unsigned int image_size
+    Return   : void
+
+BL1 calls this function when the FWU process is complete. It must not return.
+The platform may override this function to take platform specific action, for
+example to initiate the normal boot flow.
+
+The default implementation spins forever.
+
+Function : bl1\_plat\_mem\_check() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : uintptr_t mem_base, unsigned int mem_size,
+               unsigned int flags
+    Return   : int
+
+BL1 calls this function while handling FWU related SMCs, more specifically when
+copying or authenticating an image. Its responsibility is to ensure that the
+region of memory identified by ``mem_base`` and ``mem_size`` is mapped in BL1, and
+that this memory corresponds to either a secure or non-secure memory region as
+indicated by the security state of the ``flags`` argument.
+
+This function can safely assume that the value resulting from the addition of
+``mem_base`` and ``mem_size`` fits into a ``uintptr_t`` type variable and does not
+overflow.
+
+This function must return 0 on success, a non-null error code otherwise.
+
+The default implementation of this function asserts therefore platforms must
+override it when using the FWU feature.
+
+Boot Loader Stage 2 (BL2)
+-------------------------
+
+The BL2 stage is executed only by the primary CPU, which is determined in BL1
+using the ``platform_is_primary_cpu()`` function. BL1 passed control to BL2 at
+``BL2_BASE``. BL2 executes in Secure EL1 and is responsible for:
+
+#. (Optional) Loading the SCP\_BL2 binary image (if present) from platform
+   provided non-volatile storage. To load the SCP\_BL2 image, BL2 makes use of
+   the ``meminfo`` returned by the ``bl2_plat_get_scp_bl2_meminfo()`` function.
+   The platform also defines the address in memory where SCP\_BL2 is loaded
+   through the optional constant ``SCP_BL2_BASE``. BL2 uses this information
+   to determine if there is enough memory to load the SCP\_BL2 image.
+   Subsequent handling of the SCP\_BL2 image is platform-specific and is
+   implemented in the ``bl2_plat_handle_scp_bl2()`` function.
+   If ``SCP_BL2_BASE`` is not defined then this step is not performed.
+
+#. Loading the BL31 binary image into secure RAM from non-volatile storage. To
+   load the BL31 image, BL2 makes use of the ``meminfo`` structure passed to it
+   by BL1. This structure allows BL2 to calculate how much secure RAM is
+   available for its use. The platform also defines the address in secure RAM
+   where BL31 is loaded through the constant ``BL31_BASE``. BL2 uses this
+   information to determine if there is enough memory to load the BL31 image.
+
+#. (Optional) Loading the BL32 binary image (if present) from platform
+   provided non-volatile storage. To load the BL32 image, BL2 makes use of
+   the ``meminfo`` returned by the ``bl2_plat_get_bl32_meminfo()`` function.
+   The platform also defines the address in memory where BL32 is loaded
+   through the optional constant ``BL32_BASE``. BL2 uses this information
+   to determine if there is enough memory to load the BL32 image.
+   If ``BL32_BASE`` is not defined then this and the next step is not performed.
+
+#. (Optional) Arranging to pass control to the BL32 image (if present) that
+   has been pre-loaded at ``BL32_BASE``. BL2 populates an ``entry_point_info``
+   structure in memory provided by the platform with information about how
+   BL31 should pass control to the BL32 image.
+
+#. (Optional) Loading the normal world BL33 binary image (if not loaded by
+   other means) into non-secure DRAM from platform storage and arranging for
+   BL31 to pass control to this image. This address is determined using the
+   ``plat_get_ns_image_entrypoint()`` function described below.
+
+#. BL2 populates an ``entry_point_info`` structure in memory provided by the
+   platform with information about how BL31 should pass control to the
+   other BL images.
+
+The following functions must be implemented by the platform port to enable BL2
+to perform the above tasks.
+
+Function : bl2\_early\_platform\_setup() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : meminfo *
+    Return   : void
+
+This function executes with the MMU and data caches disabled. It is only called
+by the primary CPU. The arguments to this function is the address of the
+``meminfo`` structure populated by BL1.
+
+The platform may copy the contents of the ``meminfo`` structure into a private
+variable as the original memory may be subsequently overwritten by BL2. The
+copied structure is made available to all BL2 code through the
+``bl2_plat_sec_mem_layout()`` function.
+
+On ARM standard platforms, this function also:
+
+-  Initializes a UART (PL011 console), which enables access to the ``printf``
+   family of functions in BL2.
+
+-  Initializes the storage abstraction layer used to load further bootloader
+   images. It is necessary to do this early on platforms with a SCP\_BL2 image,
+   since the later ``bl2_platform_setup`` must be done after SCP\_BL2 is loaded.
+
+Function : bl2\_plat\_arch\_setup() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : void
+
+This function executes with the MMU and data caches disabled. It is only called
+by the primary CPU.
+
+The purpose of this function is to perform any architectural initialization
+that varies across platforms.
+
+On ARM standard platforms, this function enables the MMU.
+
+Function : bl2\_platform\_setup() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : void
+
+This function may execute with the MMU and data caches enabled if the platform
+port does the necessary initialization in ``bl2_plat_arch_setup()``. It is only
+called by the primary CPU.
+
+The purpose of this function is to perform any platform initialization
+specific to BL2.
+
+In ARM standard platforms, this function performs security setup, including
+configuration of the TrustZone controller to allow non-secure masters access
+to most of DRAM. Part of DRAM is reserved for secure world use.
+
+Function : bl2\_plat\_sec\_mem\_layout() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : meminfo *
+
+This function should only be called on the cold boot path. It may execute with
+the MMU and data caches enabled if the platform port does the necessary
+initialization in ``bl2_plat_arch_setup()``. It is only called by the primary CPU.
+
+The purpose of this function is to return a pointer to a ``meminfo`` structure
+populated with the extents of secure RAM available for BL2 to use. See
+``bl2_early_platform_setup()`` above.
+
+Following function is required only when LOAD\_IMAGE\_V2 is enabled.
+
+Function : bl2\_plat\_handle\_post\_image\_load() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int
+    Return   : int
+
+This function can be used by the platforms to update/use image information
+for given ``image_id``. This function is currently invoked in BL2 to handle
+BL image specific information based on the ``image_id`` passed, when
+LOAD\_IMAGE\_V2 is enabled.
+
+Following functions are required only when LOAD\_IMAGE\_V2 is disabled.
+
+Function : bl2\_plat\_get\_scp\_bl2\_meminfo() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : meminfo *
+    Return   : void
+
+This function is used to get the memory limits where BL2 can load the
+SCP\_BL2 image. The meminfo provided by this is used by load\_image() to
+validate whether the SCP\_BL2 image can be loaded within the given
+memory from the given base.
+
+Function : bl2\_plat\_handle\_scp\_bl2() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : image_info *
+    Return   : int
+
+This function is called after loading SCP\_BL2 image and it is used to perform
+any platform-specific actions required to handle the SCP firmware. Typically it
+transfers the image into SCP memory using a platform-specific protocol and waits
+until SCP executes it and signals to the Application Processor (AP) for BL2
+execution to continue.
+
+This function returns 0 on success, a negative error code otherwise.
+
+Function : bl2\_plat\_get\_bl31\_params() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : bl31_params *
+
+BL2 platform code needs to return a pointer to a ``bl31_params`` structure it
+will use for passing information to BL31. The ``bl31_params`` structure carries
+the following information.
+- Header describing the version information for interpreting the bl31\_param
+structure
+- Information about executing the BL33 image in the ``bl33_ep_info`` field
+- Information about executing the BL32 image in the ``bl32_ep_info`` field
+- Information about the type and extents of BL31 image in the
+``bl31_image_info`` field
+- Information about the type and extents of BL32 image in the
+``bl32_image_info`` field
+- Information about the type and extents of BL33 image in the
+``bl33_image_info`` field
+
+The memory pointed by this structure and its sub-structures should be
+accessible from BL31 initialisation code. BL31 might choose to copy the
+necessary content, or maintain the structures until BL33 is initialised.
+
+Funtion : bl2\_plat\_get\_bl31\_ep\_info() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : entry_point_info *
+
+BL2 platform code returns a pointer which is used to populate the entry point
+information for BL31 entry point. The location pointed by it should be
+accessible from BL1 while processing the synchronous exception to run to BL31.
+
+In ARM standard platforms this is allocated inside a bl2\_to\_bl31\_params\_mem
+structure in BL2 memory.
+
+Function : bl2\_plat\_set\_bl31\_ep\_info() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : image_info *, entry_point_info *
+    Return   : void
+
+In the normal boot flow, this function is called after loading BL31 image and
+it can be used to overwrite the entry point set by loader and also set the
+security state and SPSR which represents the entry point system state for BL31.
+
+When booting an EL3 payload instead, this function is called after populating
+its entry point address and can be used for the same purpose for the payload
+image. It receives a null pointer as its first argument in this case.
+
+Function : bl2\_plat\_set\_bl32\_ep\_info() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : image_info *, entry_point_info *
+    Return   : void
+
+This function is called after loading BL32 image and it can be used to
+overwrite the entry point set by loader and also set the security state
+and SPSR which represents the entry point system state for BL32.
+
+Function : bl2\_plat\_set\_bl33\_ep\_info() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : image_info *, entry_point_info *
+    Return   : void
+
+This function is called after loading BL33 image and it can be used to
+overwrite the entry point set by loader and also set the security state
+and SPSR which represents the entry point system state for BL33.
+
+In the preloaded BL33 alternative boot flow, this function is called after
+populating its entry point address. It is passed a null pointer as its first
+argument in this case.
+
+Function : bl2\_plat\_get\_bl32\_meminfo() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : meminfo *
+    Return   : void
+
+This function is used to get the memory limits where BL2 can load the
+BL32 image. The meminfo provided by this is used by load\_image() to
+validate whether the BL32 image can be loaded with in the given
+memory from the given base.
+
+Function : bl2\_plat\_get\_bl33\_meminfo() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : meminfo *
+    Return   : void
+
+This function is used to get the memory limits where BL2 can load the
+BL33 image. The meminfo provided by this is used by load\_image() to
+validate whether the BL33 image can be loaded with in the given
+memory from the given base.
+
+This function isn't needed if either ``PRELOADED_BL33_BASE`` or ``EL3_PAYLOAD_BASE``
+build options are used.
+
+Function : bl2\_plat\_flush\_bl31\_params() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : void
+
+Once BL2 has populated all the structures that needs to be read by BL1
+and BL31 including the bl31\_params structures and its sub-structures,
+the bl31\_ep\_info structure and any platform specific data. It flushes
+all these data to the main memory so that it is available when we jump to
+later Bootloader stages with MMU off
+
+Function : plat\_get\_ns\_image\_entrypoint() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : uintptr_t
+
+As previously described, BL2 is responsible for arranging for control to be
+passed to a normal world BL image through BL31. This function returns the
+entrypoint of that image, which BL31 uses to jump to it.
+
+BL2 is responsible for loading the normal world BL33 image (e.g. UEFI).
+
+This function isn't needed if either ``PRELOADED_BL33_BASE`` or ``EL3_PAYLOAD_BASE``
+build options are used.
+
+Function : bl2\_plat\_preload\_setup [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+    Argument : void
+    Return   : void
+
+This optional function performs any BL2 platform initialization
+required before image loading, that is not done later in
+bl2\_platform\_setup(). Specifically, if support for multiple
+boot sources is required, it initializes the boot sequence used by
+plat\_try\_next\_boot\_source().
+
+Function : plat\_try\_next\_boot\_source() [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+    Argument : void
+    Return   : int
+
+This optional function passes to the next boot source in the redundancy
+sequence.
+
+This function moves the current boot redundancy source to the next
+element in the boot sequence. If there are no more boot sources then it
+must return 0, otherwise it must return 1. The default implementation
+of this always returns 0.
+
+FWU Boot Loader Stage 2 (BL2U)
+------------------------------
+
+The AP Firmware Updater Configuration, BL2U, is an optional part of the FWU
+process and is executed only by the primary CPU. BL1 passes control to BL2U at
+``BL2U_BASE``. BL2U executes in Secure-EL1 and is responsible for:
+
+#. (Optional) Transfering the optional SCP\_BL2U binary image from AP secure
+   memory to SCP RAM. BL2U uses the SCP\_BL2U ``image_info`` passed by BL1.
+   ``SCP_BL2U_BASE`` defines the address in AP secure memory where SCP\_BL2U
+   should be copied from. Subsequent handling of the SCP\_BL2U image is
+   implemented by the platform specific ``bl2u_plat_handle_scp_bl2u()`` function.
+   If ``SCP_BL2U_BASE`` is not defined then this step is not performed.
+
+#. Any platform specific setup required to perform the FWU process. For
+   example, ARM standard platforms initialize the TZC controller so that the
+   normal world can access DDR memory.
+
+The following functions must be implemented by the platform port to enable
+BL2U to perform the tasks mentioned above.
+
+Function : bl2u\_early\_platform\_setup() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : meminfo *mem_info, void *plat_info
+    Return   : void
+
+This function executes with the MMU and data caches disabled. It is only
+called by the primary CPU. The arguments to this function is the address
+of the ``meminfo`` structure and platform specific info provided by BL1.
+
+The platform may copy the contents of the ``mem_info`` and ``plat_info`` into
+private storage as the original memory may be subsequently overwritten by BL2U.
+
+On ARM CSS platforms ``plat_info`` is interpreted as an ``image_info_t`` structure,
+to extract SCP\_BL2U image information, which is then copied into a private
+variable.
+
+Function : bl2u\_plat\_arch\_setup() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : void
+
+This function executes with the MMU and data caches disabled. It is only
+called by the primary CPU.
+
+The purpose of this function is to perform any architectural initialization
+that varies across platforms, for example enabling the MMU (since the memory
+map differs across platforms).
+
+Function : bl2u\_platform\_setup() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : void
+
+This function may execute with the MMU and data caches enabled if the platform
+port does the necessary initialization in ``bl2u_plat_arch_setup()``. It is only
+called by the primary CPU.
+
+The purpose of this function is to perform any platform initialization
+specific to BL2U.
+
+In ARM standard platforms, this function performs security setup, including
+configuration of the TrustZone controller to allow non-secure masters access
+to most of DRAM. Part of DRAM is reserved for secure world use.
+
+Function : bl2u\_plat\_handle\_scp\_bl2u() [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : int
+
+This function is used to perform any platform-specific actions required to
+handle the SCP firmware. Typically it transfers the image into SCP memory using
+a platform-specific protocol and waits until SCP executes it and signals to the
+Application Processor (AP) for BL2U execution to continue.
+
+This function returns 0 on success, a negative error code otherwise.
+This function is included if SCP\_BL2U\_BASE is defined.
+
+Boot Loader Stage 3-1 (BL31)
+----------------------------
+
+During cold boot, the BL31 stage is executed only by the primary CPU. This is
+determined in BL1 using the ``platform_is_primary_cpu()`` function. BL1 passes
+control to BL31 at ``BL31_BASE``. During warm boot, BL31 is executed by all
+CPUs. BL31 executes at EL3 and is responsible for:
+
+#. Re-initializing all architectural and platform state. Although BL1 performs
+   some of this initialization, BL31 remains resident in EL3 and must ensure
+   that EL3 architectural and platform state is completely initialized. It
+   should make no assumptions about the system state when it receives control.
+
+#. Passing control to a normal world BL image, pre-loaded at a platform-
+   specific address by BL2. BL31 uses the ``entry_point_info`` structure that BL2
+   populated in memory to do this.
+
+#. Providing runtime firmware services. Currently, BL31 only implements a
+   subset of the Power State Coordination Interface (PSCI) API as a runtime
+   service. See Section 3.3 below for details of porting the PSCI
+   implementation.
+
+#. Optionally passing control to the BL32 image, pre-loaded at a platform-
+   specific address by BL2. BL31 exports a set of apis that allow runtime
+   services to specify the security state in which the next image should be
+   executed and run the corresponding image. BL31 uses the ``entry_point_info``
+   structure populated by BL2 to do this.
+
+If BL31 is a reset vector, It also needs to handle the reset as specified in
+section 2.2 before the tasks described above.
+
+The following functions must be implemented by the platform port to enable BL31
+to perform the above tasks.
+
+Function : bl31\_early\_platform\_setup() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : bl31_params *, void *
+    Return   : void
+
+This function executes with the MMU and data caches disabled. It is only called
+by the primary CPU. The arguments to this function are:
+
+-  The address of the ``bl31_params`` structure populated by BL2.
+-  An opaque pointer that the platform may use as needed.
+
+The platform can copy the contents of the ``bl31_params`` structure and its
+sub-structures into private variables if the original memory may be
+subsequently overwritten by BL31 and similarly the ``void *`` pointing
+to the platform data also needs to be saved.
+
+In ARM standard platforms, BL2 passes a pointer to a ``bl31_params`` structure
+in BL2 memory. BL31 copies the information in this pointer to internal data
+structures. It also performs the following:
+
+-  Initialize a UART (PL011 console), which enables access to the ``printf``
+   family of functions in BL31.
+
+-  Enable issuing of snoop and DVM (Distributed Virtual Memory) requests to the
+   CCI slave interface corresponding to the cluster that includes the primary
+   CPU.
+
+Function : bl31\_plat\_arch\_setup() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : void
+
+This function executes with the MMU and data caches disabled. It is only called
+by the primary CPU.
+
+The purpose of this function is to perform any architectural initialization
+that varies across platforms.
+
+On ARM standard platforms, this function enables the MMU.
+
+Function : bl31\_platform\_setup() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : void
+
+This function may execute with the MMU and data caches enabled if the platform
+port does the necessary initialization in ``bl31_plat_arch_setup()``. It is only
+called by the primary CPU.
+
+The purpose of this function is to complete platform initialization so that both
+BL31 runtime services and normal world software can function correctly.
+
+On ARM standard platforms, this function does the following:
+
+-  Initialize the generic interrupt controller.
+
+   Depending on the GIC driver selected by the platform, the appropriate GICv2
+   or GICv3 initialization will be done, which mainly consists of:
+
+   -  Enable secure interrupts in the GIC CPU interface.
+   -  Disable the legacy interrupt bypass mechanism.
+   -  Configure the priority mask register to allow interrupts of all priorities
+      to be signaled to the CPU interface.
+   -  Mark SGIs 8-15 and the other secure interrupts on the platform as secure.
+   -  Target all secure SPIs to CPU0.
+   -  Enable these secure interrupts in the GIC distributor.
+   -  Configure all other interrupts as non-secure.
+   -  Enable signaling of secure interrupts in the GIC distributor.
+
+-  Enable system-level implementation of the generic timer counter through the
+   memory mapped interface.
+
+-  Grant access to the system counter timer module
+
+-  Initialize the power controller device.
+
+   In particular, initialise the locks that prevent concurrent accesses to the
+   power controller device.
+
+Function : bl31\_plat\_runtime\_setup() [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : void
+
+The purpose of this function is allow the platform to perform any BL31 runtime
+setup just prior to BL31 exit during cold boot. The default weak
+implementation of this function will invoke ``console_uninit()`` which will
+suppress any BL31 runtime logs.
+
+In ARM Standard platforms, this function will initialize the BL31 runtime
+console which will cause all further BL31 logs to be output to the
+runtime console.
+
+Function : bl31\_get\_next\_image\_info() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int
+    Return   : entry_point_info *
+
+This function may execute with the MMU and data caches enabled if the platform
+port does the necessary initializations in ``bl31_plat_arch_setup()``.
+
+This function is called by ``bl31_main()`` to retrieve information provided by
+BL2 for the next image in the security state specified by the argument. BL31
+uses this information to pass control to that image in the specified security
+state. This function must return a pointer to the ``entry_point_info`` structure
+(that was copied during ``bl31_early_platform_setup()``) if the image exists. It
+should return NULL otherwise.
+
+Function : plat\_get\_syscnt\_freq2() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : unsigned int
+
+This function is used by the architecture setup code to retrieve the counter
+frequency for the CPU's generic timer. This value will be programmed into the
+``CNTFRQ_EL0`` register. In ARM standard platforms, it returns the base frequency
+of the system counter, which is retrieved from the first entry in the frequency
+modes table.
+
+#define : PLAT\_PERCPU\_BAKERY\_LOCK\_SIZE [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When ``USE_COHERENT_MEM = 0``, this constant defines the total memory (in
+bytes) aligned to the cache line boundary that should be allocated per-cpu to
+accommodate all the bakery locks.
+
+If this constant is not defined when ``USE_COHERENT_MEM = 0``, the linker
+calculates the size of the ``bakery_lock`` input section, aligns it to the
+nearest ``CACHE_WRITEBACK_GRANULE``, multiplies it with ``PLATFORM_CORE_COUNT``
+and stores the result in a linker symbol. This constant prevents a platform
+from relying on the linker and provide a more efficient mechanism for
+accessing per-cpu bakery lock information.
+
+If this constant is defined and its value is not equal to the value
+calculated by the linker then a link time assertion is raised. A compile time
+assertion is raised if the value of the constant is not aligned to the cache
+line boundary.
+
+Power State Coordination Interface (in BL31)
+--------------------------------------------
+
+The ARM Trusted Firmware's implementation of the PSCI API is based around the
+concept of a *power domain*. A *power domain* is a CPU or a logical group of
+CPUs which share some state on which power management operations can be
+performed as specified by `PSCI`_. Each CPU in the system is assigned a cpu
+index which is a unique number between ``0`` and ``PLATFORM_CORE_COUNT - 1``.
+The *power domains* are arranged in a hierarchical tree structure and
+each *power domain* can be identified in a system by the cpu index of any CPU
+that is part of that domain and a *power domain level*. A processing element
+(for example, a CPU) is at level 0. If the *power domain* node above a CPU is
+a logical grouping of CPUs that share some state, then level 1 is that group
+of CPUs (for example, a cluster), and level 2 is a group of clusters
+(for example, the system). More details on the power domain topology and its
+organization can be found in `Power Domain Topology Design`_.
+
+BL31's platform initialization code exports a pointer to the platform-specific
+power management operations required for the PSCI implementation to function
+correctly. This information is populated in the ``plat_psci_ops`` structure. The
+PSCI implementation calls members of the ``plat_psci_ops`` structure for performing
+power management operations on the power domains. For example, the target
+CPU is specified by its ``MPIDR`` in a PSCI ``CPU_ON`` call. The ``pwr_domain_on()``
+handler (if present) is called for the CPU power domain.
+
+The ``power-state`` parameter of a PSCI ``CPU_SUSPEND`` call can be used to
+describe composite power states specific to a platform. The PSCI implementation
+defines a generic representation of the power-state parameter viz which is an
+array of local power states where each index corresponds to a power domain
+level. Each entry contains the local power state the power domain at that power
+level could enter. It depends on the ``validate_power_state()`` handler to
+convert the power-state parameter (possibly encoding a composite power state)
+passed in a PSCI ``CPU_SUSPEND`` call to this representation.
+
+The following functions form part of platform port of PSCI functionality.
+
+Function : plat\_psci\_stat\_accounting\_start() [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : const psci_power_state_t *
+    Return   : void
+
+This is an optional hook that platforms can implement for residency statistics
+accounting before entering a low power state. The ``pwr_domain_state`` field of
+``state_info`` (first argument) can be inspected if stat accounting is done
+differently at CPU level versus higher levels. As an example, if the element at
+index 0 (CPU power level) in the ``pwr_domain_state`` array indicates a power down
+state, special hardware logic may be programmed in order to keep track of the
+residency statistics. For higher levels (array indices > 0), the residency
+statistics could be tracked in software using PMF. If ``ENABLE_PMF`` is set, the
+default implementation will use PMF to capture timestamps.
+
+Function : plat\_psci\_stat\_accounting\_stop() [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : const psci_power_state_t *
+    Return   : void
+
+This is an optional hook that platforms can implement for residency statistics
+accounting after exiting from a low power state. The ``pwr_domain_state`` field
+of ``state_info`` (first argument) can be inspected if stat accounting is done
+differently at CPU level versus higher levels. As an example, if the element at
+index 0 (CPU power level) in the ``pwr_domain_state`` array indicates a power down
+state, special hardware logic may be programmed in order to keep track of the
+residency statistics. For higher levels (array indices > 0), the residency
+statistics could be tracked in software using PMF. If ``ENABLE_PMF`` is set, the
+default implementation will use PMF to capture timestamps.
+
+Function : plat\_psci\_stat\_get\_residency() [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int, const psci_power_state_t *, int
+    Return   : u_register_t
+
+This is an optional interface that is is invoked after resuming from a low power
+state and provides the time spent resident in that low power state by the power
+domain at a particular power domain level. When a CPU wakes up from suspend,
+all its parent power domain levels are also woken up. The generic PSCI code
+invokes this function for each parent power domain that is resumed and it
+identified by the ``lvl`` (first argument) parameter. The ``state_info`` (second
+argument) describes the low power state that the power domain has resumed from.
+The current CPU is the first CPU in the power domain to resume from the low
+power state and the ``last_cpu_idx`` (third parameter) is the index of the last
+CPU in the power domain to suspend and may be needed to calculate the residency
+for that power domain.
+
+Function : plat\_get\_target\_pwr\_state() [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : unsigned int, const plat_local_state_t *, unsigned int
+    Return   : plat_local_state_t
+
+The PSCI generic code uses this function to let the platform participate in
+state coordination during a power management operation. The function is passed
+a pointer to an array of platform specific local power state ``states`` (second
+argument) which contains the requested power state for each CPU at a particular
+power domain level ``lvl`` (first argument) within the power domain. The function
+is expected to traverse this array of upto ``ncpus`` (third argument) and return
+a coordinated target power state by the comparing all the requested power
+states. The target power state should not be deeper than any of the requested
+power states.
+
+A weak definition of this API is provided by default wherein it assumes
+that the platform assigns a local state value in order of increasing depth
+of the power state i.e. for two power states X & Y, if X < Y
+then X represents a shallower power state than Y. As a result, the
+coordinated target local power state for a power domain will be the minimum
+of the requested local power state values.
+
+Function : plat\_get\_power\_domain\_tree\_desc() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : const unsigned char *
+
+This function returns a pointer to the byte array containing the power domain
+topology tree description. The format and method to construct this array are
+described in `Power Domain Topology Design`_. The BL31 PSCI initilization code
+requires this array to be described by the platform, either statically or
+dynamically, to initialize the power domain topology tree. In case the array
+is populated dynamically, then plat\_core\_pos\_by\_mpidr() and
+plat\_my\_core\_pos() should also be implemented suitably so that the topology
+tree description matches the CPU indices returned by these APIs. These APIs
+together form the platform interface for the PSCI topology framework.
+
+Function : plat\_setup\_psci\_ops() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : uintptr_t, const plat_psci_ops **
+    Return   : int
+
+This function may execute with the MMU and data caches enabled if the platform
+port does the necessary initializations in ``bl31_plat_arch_setup()``. It is only
+called by the primary CPU.
+
+This function is called by PSCI initialization code. Its purpose is to let
+the platform layer know about the warm boot entrypoint through the
+``sec_entrypoint`` (first argument) and to export handler routines for
+platform-specific psci power management actions by populating the passed
+pointer with a pointer to BL31's private ``plat_psci_ops`` structure.
+
+A description of each member of this structure is given below. Please refer to
+the ARM FVP specific implementation of these handlers in
+`plat/arm/board/fvp/fvp\_pm.c`_ as an example. For each PSCI function that the
+platform wants to support, the associated operation or operations in this
+structure must be provided and implemented (Refer section 4 of
+`Firmware Design`_ for the PSCI API supported in Trusted Firmware). To disable
+a PSCI function in a platform port, the operation should be removed from this
+structure instead of providing an empty implementation.
+
+plat\_psci\_ops.cpu\_standby()
+..............................
+
+Perform the platform-specific actions to enter the standby state for a cpu
+indicated by the passed argument. This provides a fast path for CPU standby
+wherein overheads of PSCI state management and lock acquistion is avoided.
+For this handler to be invoked by the PSCI ``CPU_SUSPEND`` API implementation,
+the suspend state type specified in the ``power-state`` parameter should be
+STANDBY and the target power domain level specified should be the CPU. The
+handler should put the CPU into a low power retention state (usually by
+issuing a wfi instruction) and ensure that it can be woken up from that
+state by a normal interrupt. The generic code expects the handler to succeed.
+
+plat\_psci\_ops.pwr\_domain\_on()
+.................................
+
+Perform the platform specific actions to power on a CPU, specified
+by the ``MPIDR`` (first argument). The generic code expects the platform to
+return PSCI\_E\_SUCCESS on success or PSCI\_E\_INTERN\_FAIL for any failure.
+
+plat\_psci\_ops.pwr\_domain\_off()
+..................................
+
+Perform the platform specific actions to prepare to power off the calling CPU
+and its higher parent power domain levels as indicated by the ``target_state``
+(first argument). It is called by the PSCI ``CPU_OFF`` API implementation.
+
+The ``target_state`` encodes the platform coordinated target local power states
+for the CPU power domain and its parent power domain levels. The handler
+needs to perform power management operation corresponding to the local state
+at each power level.
+
+For this handler, the local power state for the CPU power domain will be a
+power down state where as it could be either power down, retention or run state
+for the higher power domain levels depending on the result of state
+coordination. The generic code expects the handler to succeed.
+
+plat\_psci\_ops.pwr\_domain\_suspend\_pwrdown\_early() [optional]
+.................................................................
+
+This optional function may be used as a performance optimization to replace
+or complement pwr_domain_suspend() on some platforms. Its calling semantics
+are identical to pwr_domain_suspend(), except the PSCI implementation only
+calls this function when suspending to a power down state, and it guarantees
+that data caches are enabled.
+
+When HW_ASSISTED_COHERENCY = 0, the PSCI implementation disables data caches
+before calling pwr_domain_suspend(). If the target_state corresponds to a
+power down state and it is safe to perform some or all of the platform
+specific actions in that function with data caches enabled, it may be more
+efficient to move those actions to this function. When HW_ASSISTED_COHERENCY
+= 1, data caches remain enabled throughout, and so there is no advantage to
+moving platform specific actions to this function.
+
+plat\_psci\_ops.pwr\_domain\_suspend()
+......................................
+
+Perform the platform specific actions to prepare to suspend the calling
+CPU and its higher parent power domain levels as indicated by the
+``target_state`` (first argument). It is called by the PSCI ``CPU_SUSPEND``
+API implementation.
+
+The ``target_state`` has a similar meaning as described in
+the ``pwr_domain_off()`` operation. It encodes the platform coordinated
+target local power states for the CPU power domain and its parent
+power domain levels. The handler needs to perform power management operation
+corresponding to the local state at each power level. The generic code
+expects the handler to succeed.
+
+The difference between turning a power domain off versus suspending it is that
+in the former case, the power domain is expected to re-initialize its state
+when it is next powered on (see ``pwr_domain_on_finish()``). In the latter
+case, the power domain is expected to save enough state so that it can resume
+execution by restoring this state when its powered on (see
+``pwr_domain_suspend_finish()``).
+
+When suspending a core, the platform can also choose to power off the GICv3
+Redistributor and ITS through an implementation-defined sequence. To achieve
+this safely, the ITS context must be saved first. The architectural part is
+implemented by the ``gicv3_its_save_disable()`` helper, but most of the needed
+sequence is implementation defined and it is therefore the responsibility of
+the platform code to implement the necessary sequence. Then the GIC
+Redistributor context can be saved using the ``gicv3_rdistif_save()`` helper.
+Powering off the Redistributor requires the implementation to support it and it
+is the responsibility of the platform code to execute the right implementation
+defined sequence.
+
+When a system suspend is requested, the platform can also make use of the
+``gicv3_distif_save()`` helper to save the context of the GIC Distributor after
+it has saved the context of the Redistributors and ITS of all the cores in the
+system. The context of the Distributor can be large and may require it to be
+allocated in a special area if it cannot fit in the platform's global static
+data, for example in DRAM. The Distributor can then be powered down using an
+implementation-defined sequence.
+
+plat\_psci\_ops.pwr\_domain\_pwr\_down\_wfi()
+.............................................
+
+This is an optional function and, if implemented, is expected to perform
+platform specific actions including the ``wfi`` invocation which allows the
+CPU to powerdown. Since this function is invoked outside the PSCI locks,
+the actions performed in this hook must be local to the CPU or the platform
+must ensure that races between multiple CPUs cannot occur.
+
+The ``target_state`` has a similar meaning as described in the ``pwr_domain_off()``
+operation and it encodes the platform coordinated target local power states for
+the CPU power domain and its parent power domain levels. This function must
+not return back to the caller.
+
+If this function is not implemented by the platform, PSCI generic
+implementation invokes ``psci_power_down_wfi()`` for power down.
+
+plat\_psci\_ops.pwr\_domain\_on\_finish()
+.........................................
+
+This function is called by the PSCI implementation after the calling CPU is
+powered on and released from reset in response to an earlier PSCI ``CPU_ON`` call.
+It performs the platform-specific setup required to initialize enough state for
+this CPU to enter the normal world and also provide secure runtime firmware
+services.
+
+The ``target_state`` (first argument) is the prior state of the power domains
+immediately before the CPU was turned on. It indicates which power domains
+above the CPU might require initialization due to having previously been in
+low power states. The generic code expects the handler to succeed.
+
+plat\_psci\_ops.pwr\_domain\_suspend\_finish()
+..............................................
+
+This function is called by the PSCI implementation after the calling CPU is
+powered on and released from reset in response to an asynchronous wakeup
+event, for example a timer interrupt that was programmed by the CPU during the
+``CPU_SUSPEND`` call or ``SYSTEM_SUSPEND`` call. It performs the platform-specific
+setup required to restore the saved state for this CPU to resume execution
+in the normal world and also provide secure runtime firmware services.
+
+The ``target_state`` (first argument) has a similar meaning as described in
+the ``pwr_domain_on_finish()`` operation. The generic code expects the platform
+to succeed.
+
+If the Distributor, Redistributors or ITS have been powered off as part of a
+suspend, their context must be restored in this function in the reverse order
+to how they were saved during suspend sequence.
+
+plat\_psci\_ops.system\_off()
+.............................
+
+This function is called by PSCI implementation in response to a ``SYSTEM_OFF``
+call. It performs the platform-specific system poweroff sequence after
+notifying the Secure Payload Dispatcher.
+
+plat\_psci\_ops.system\_reset()
+...............................
+
+This function is called by PSCI implementation in response to a ``SYSTEM_RESET``
+call. It performs the platform-specific system reset sequence after
+notifying the Secure Payload Dispatcher.
+
+plat\_psci\_ops.validate\_power\_state()
+........................................
+
+This function is called by the PSCI implementation during the ``CPU_SUSPEND``
+call to validate the ``power_state`` parameter of the PSCI API and if valid,
+populate it in ``req_state`` (second argument) array as power domain level
+specific local states. If the ``power_state`` is invalid, the platform must
+return PSCI\_E\_INVALID\_PARAMS as error, which is propagated back to the
+normal world PSCI client.
+
+plat\_psci\_ops.validate\_ns\_entrypoint()
+..........................................
+
+This function is called by the PSCI implementation during the ``CPU_SUSPEND``,
+``SYSTEM_SUSPEND`` and ``CPU_ON`` calls to validate the non-secure ``entry_point``
+parameter passed by the normal world. If the ``entry_point`` is invalid,
+the platform must return PSCI\_E\_INVALID\_ADDRESS as error, which is
+propagated back to the normal world PSCI client.
+
+plat\_psci\_ops.get\_sys\_suspend\_power\_state()
+.................................................
+
+This function is called by the PSCI implementation during the ``SYSTEM_SUSPEND``
+call to get the ``req_state`` parameter from platform which encodes the power
+domain level specific local states to suspend to system affinity level. The
+``req_state`` will be utilized to do the PSCI state coordination and
+``pwr_domain_suspend()`` will be invoked with the coordinated target state to
+enter system suspend.
+
+plat\_psci\_ops.get\_pwr\_lvl\_state\_idx()
+...........................................
+
+This is an optional function and, if implemented, is invoked by the PSCI
+implementation to convert the ``local_state`` (first argument) at a specified
+``pwr_lvl`` (second argument) to an index between 0 and
+``PLAT_MAX_PWR_LVL_STATES`` - 1. This function is only needed if the platform
+supports more than two local power states at each power domain level, that is
+``PLAT_MAX_PWR_LVL_STATES`` is greater than 2, and needs to account for these
+local power states.
+
+plat\_psci\_ops.translate\_power\_state\_by\_mpidr()
+....................................................
+
+This is an optional function and, if implemented, verifies the ``power_state``
+(second argument) parameter of the PSCI API corresponding to a target power
+domain. The target power domain is identified by using both ``MPIDR`` (first
+argument) and the power domain level encoded in ``power_state``. The power domain
+level specific local states are to be extracted from ``power_state`` and be
+populated in the ``output_state`` (third argument) array. The functionality
+is similar to the ``validate_power_state`` function described above and is
+envisaged to be used in case the validity of ``power_state`` depend on the
+targeted power domain. If the ``power_state`` is invalid for the targeted power
+domain, the platform must return PSCI\_E\_INVALID\_PARAMS as error. If this
+function is not implemented, then the generic implementation relies on
+``validate_power_state`` function to translate the ``power_state``.
+
+This function can also be used in case the platform wants to support local
+power state encoding for ``power_state`` parameter of PSCI\_STAT\_COUNT/RESIDENCY
+APIs as described in Section 5.18 of `PSCI`_.
+
+plat\_psci\_ops.get\_node\_hw\_state()
+......................................
+
+This is an optional function. If implemented this function is intended to return
+the power state of a node (identified by the first parameter, the ``MPIDR``) in
+the power domain topology (identified by the second parameter, ``power_level``),
+as retrieved from a power controller or equivalent component on the platform.
+Upon successful completion, the implementation must map and return the final
+status among ``HW_ON``, ``HW_OFF`` or ``HW_STANDBY``. Upon encountering failures, it
+must return either ``PSCI_E_INVALID_PARAMS`` or ``PSCI_E_NOT_SUPPORTED`` as
+appropriate.
+
+Implementations are not expected to handle ``power_levels`` greater than
+``PLAT_MAX_PWR_LVL``.
+
+plat\_psci\_ops.system\_reset2()
+................................
+
+This is an optional function. If implemented this function is
+called during the ``SYSTEM_RESET2`` call to perform a reset
+based on the first parameter ``reset_type`` as specified in
+`PSCI`_. The parameter ``cookie`` can be used to pass additional
+reset information. If the ``reset_type`` is not supported, the
+function must return ``PSCI_E_NOT_SUPPORTED``. For architectural
+resets, all failures must return ``PSCI_E_INVALID_PARAMETERS``
+and vendor reset can return other PSCI error codes as defined
+in `PSCI`_. On success this function will not return.
+
+plat\_psci\_ops.write\_mem\_protect()
+....................................
+
+This is an optional function. If implemented it enables or disables the
+``MEM_PROTECT`` functionality based on the value of ``val``.
+A non-zero value enables ``MEM_PROTECT`` and a value of zero
+disables it. Upon encountering failures it must return a negative value
+and on success it must return 0.
+
+plat\_psci\_ops.read\_mem\_protect()
+.....................................
+
+This is an optional function. If implemented it returns the current
+state of ``MEM_PROTECT`` via the ``val`` parameter.  Upon encountering
+failures it must return a negative value and on success it must
+return 0.
+
+plat\_psci\_ops.mem\_protect\_chk()
+...................................
+
+This is an optional function. If implemented it checks if a memory
+region defined by a base address ``base`` and with a size of ``length``
+bytes is protected by ``MEM_PROTECT``.  If the region is protected
+then it must return 0, otherwise it must return a negative number.
+
+Interrupt Management framework (in BL31)
+----------------------------------------
+
+BL31 implements an Interrupt Management Framework (IMF) to manage interrupts
+generated in either security state and targeted to EL1 or EL2 in the non-secure
+state or EL3/S-EL1 in the secure state. The design of this framework is
+described in the `IMF Design Guide`_
+
+A platform should export the following APIs to support the IMF. The following
+text briefly describes each api and its implementation in ARM standard
+platforms. The API implementation depends upon the type of interrupt controller
+present in the platform. ARM standard platform layer supports both
+`ARM Generic Interrupt Controller version 2.0 (GICv2)`_
+and `3.0 (GICv3)`_. Juno builds the ARM
+Standard layer to use GICv2 and the FVP can be configured to use either GICv2 or
+GICv3 depending on the build flag ``FVP_USE_GIC_DRIVER`` (See FVP platform
+specific build options in `User Guide`_ for more details).
+
+See also: `Interrupt Controller Abstraction APIs`__.
+
+.. __: platform-interrupt-controller-API.rst
+
+Function : plat\_interrupt\_type\_to\_line() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : uint32_t, uint32_t
+    Return   : uint32_t
+
+The ARM processor signals an interrupt exception either through the IRQ or FIQ
+interrupt line. The specific line that is signaled depends on how the interrupt
+controller (IC) reports different interrupt types from an execution context in
+either security state. The IMF uses this API to determine which interrupt line
+the platform IC uses to signal each type of interrupt supported by the framework
+from a given security state. This API must be invoked at EL3.
+
+The first parameter will be one of the ``INTR_TYPE_*`` values (see
+`IMF Design Guide`_) indicating the target type of the interrupt, the second parameter is the
+security state of the originating execution context. The return result is the
+bit position in the ``SCR_EL3`` register of the respective interrupt trap: IRQ=1,
+FIQ=2.
+
+In the case of ARM standard platforms using GICv2, S-EL1 interrupts are
+configured as FIQs and Non-secure interrupts as IRQs from either security
+state.
+
+In the case of ARM standard platforms using GICv3, the interrupt line to be
+configured depends on the security state of the execution context when the
+interrupt is signalled and are as follows:
+
+-  The S-EL1 interrupts are signaled as IRQ in S-EL0/1 context and as FIQ in
+   NS-EL0/1/2 context.
+-  The Non secure interrupts are signaled as FIQ in S-EL0/1 context and as IRQ
+   in the NS-EL0/1/2 context.
+-  The EL3 interrupts are signaled as FIQ in both S-EL0/1 and NS-EL0/1/2
+   context.
+
+Function : plat\_ic\_get\_pending\_interrupt\_type() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : uint32_t
+
+This API returns the type of the highest priority pending interrupt at the
+platform IC. The IMF uses the interrupt type to retrieve the corresponding
+handler function. ``INTR_TYPE_INVAL`` is returned when there is no interrupt
+pending. The valid interrupt types that can be returned are ``INTR_TYPE_EL3``,
+``INTR_TYPE_S_EL1`` and ``INTR_TYPE_NS``. This API must be invoked at EL3.
+
+In the case of ARM standard platforms using GICv2, the *Highest Priority
+Pending Interrupt Register* (``GICC_HPPIR``) is read to determine the id of
+the pending interrupt. The type of interrupt depends upon the id value as
+follows.
+
+#. id < 1022 is reported as a S-EL1 interrupt
+#. id = 1022 is reported as a Non-secure interrupt.
+#. id = 1023 is reported as an invalid interrupt type.
+
+In the case of ARM standard platforms using GICv3, the system register
+``ICC_HPPIR0_EL1``, *Highest Priority Pending group 0 Interrupt Register*,
+is read to determine the id of the pending interrupt. The type of interrupt
+depends upon the id value as follows.
+
+#. id = ``PENDING_G1S_INTID`` (1020) is reported as a S-EL1 interrupt
+#. id = ``PENDING_G1NS_INTID`` (1021) is reported as a Non-secure interrupt.
+#. id = ``GIC_SPURIOUS_INTERRUPT`` (1023) is reported as an invalid interrupt type.
+#. All other interrupt id's are reported as EL3 interrupt.
+
+Function : plat\_ic\_get\_pending\_interrupt\_id() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : uint32_t
+
+This API returns the id of the highest priority pending interrupt at the
+platform IC. ``INTR_ID_UNAVAILABLE`` is returned when there is no interrupt
+pending.
+
+In the case of ARM standard platforms using GICv2, the *Highest Priority
+Pending Interrupt Register* (``GICC_HPPIR``) is read to determine the id of the
+pending interrupt. The id that is returned by API depends upon the value of
+the id read from the interrupt controller as follows.
+
+#. id < 1022. id is returned as is.
+#. id = 1022. The *Aliased Highest Priority Pending Interrupt Register*
+   (``GICC_AHPPIR``) is read to determine the id of the non-secure interrupt.
+   This id is returned by the API.
+#. id = 1023. ``INTR_ID_UNAVAILABLE`` is returned.
+
+In the case of ARM standard platforms using GICv3, if the API is invoked from
+EL3, the system register ``ICC_HPPIR0_EL1``, *Highest Priority Pending Interrupt
+group 0 Register*, is read to determine the id of the pending interrupt. The id
+that is returned by API depends upon the value of the id read from the
+interrupt controller as follows.
+
+#. id < ``PENDING_G1S_INTID`` (1020). id is returned as is.
+#. id = ``PENDING_G1S_INTID`` (1020) or ``PENDING_G1NS_INTID`` (1021). The system
+   register ``ICC_HPPIR1_EL1``, *Highest Priority Pending Interrupt group 1
+   Register* is read to determine the id of the group 1 interrupt. This id
+   is returned by the API as long as it is a valid interrupt id
+#. If the id is any of the special interrupt identifiers,
+   ``INTR_ID_UNAVAILABLE`` is returned.
+
+When the API invoked from S-EL1 for GICv3 systems, the id read from system
+register ``ICC_HPPIR1_EL1``, *Highest Priority Pending group 1 Interrupt
+Register*, is returned if is not equal to GIC\_SPURIOUS\_INTERRUPT (1023) else
+``INTR_ID_UNAVAILABLE`` is returned.
+
+Function : plat\_ic\_acknowledge\_interrupt() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : uint32_t
+
+This API is used by the CPU to indicate to the platform IC that processing of
+the highest pending interrupt has begun. It should return the id of the
+interrupt which is being processed.
+
+This function in ARM standard platforms using GICv2, reads the *Interrupt
+Acknowledge Register* (``GICC_IAR``). This changes the state of the highest
+priority pending interrupt from pending to active in the interrupt controller.
+It returns the value read from the ``GICC_IAR``. This value is the id of the
+interrupt whose state has been changed.
+
+In the case of ARM standard platforms using GICv3, if the API is invoked
+from EL3, the function reads the system register ``ICC_IAR0_EL1``, *Interrupt
+Acknowledge Register group 0*. If the API is invoked from S-EL1, the function
+reads the system register ``ICC_IAR1_EL1``, *Interrupt Acknowledge Register
+group 1*. The read changes the state of the highest pending interrupt from
+pending to active in the interrupt controller. The value read is returned
+and is the id of the interrupt whose state has been changed.
+
+The TSP uses this API to start processing of the secure physical timer
+interrupt.
+
+Function : plat\_ic\_end\_of\_interrupt() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : uint32_t
+    Return   : void
+
+This API is used by the CPU to indicate to the platform IC that processing of
+the interrupt corresponding to the id (passed as the parameter) has
+finished. The id should be the same as the id returned by the
+``plat_ic_acknowledge_interrupt()`` API.
+
+ARM standard platforms write the id to the *End of Interrupt Register*
+(``GICC_EOIR``) in case of GICv2, and to ``ICC_EOIR0_EL1`` or ``ICC_EOIR1_EL1``
+system register in case of GICv3 depending on where the API is invoked from,
+EL3 or S-EL1. This deactivates the corresponding interrupt in the interrupt
+controller.
+
+The TSP uses this API to finish processing of the secure physical timer
+interrupt.
+
+Function : plat\_ic\_get\_interrupt\_type() [mandatory]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : uint32_t
+    Return   : uint32_t
+
+This API returns the type of the interrupt id passed as the parameter.
+``INTR_TYPE_INVAL`` is returned if the id is invalid. If the id is valid, a valid
+interrupt type (one of ``INTR_TYPE_EL3``, ``INTR_TYPE_S_EL1`` and ``INTR_TYPE_NS``) is
+returned depending upon how the interrupt has been configured by the platform
+IC. This API must be invoked at EL3.
+
+ARM standard platforms using GICv2 configures S-EL1 interrupts as Group0 interrupts
+and Non-secure interrupts as Group1 interrupts. It reads the group value
+corresponding to the interrupt id from the relevant *Interrupt Group Register*
+(``GICD_IGROUPRn``). It uses the group value to determine the type of interrupt.
+
+In the case of ARM standard platforms using GICv3, both the *Interrupt Group
+Register* (``GICD_IGROUPRn``) and *Interrupt Group Modifier Register*
+(``GICD_IGRPMODRn``) is read to figure out whether the interrupt is configured
+as Group 0 secure interrupt, Group 1 secure interrupt or Group 1 NS interrupt.
+
+Crash Reporting mechanism (in BL31)
+-----------------------------------
+
+BL31 implements a crash reporting mechanism which prints the various registers
+of the CPU to enable quick crash analysis and debugging. It requires that a
+console is designated as the crash console by the platform which will be used to
+print the register dump.
+
+The following functions must be implemented by the platform if it wants crash
+reporting mechanism in BL31. The functions are implemented in assembly so that
+they can be invoked without a C Runtime stack.
+
+Function : plat\_crash\_console\_init
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : int
+
+This API is used by the crash reporting mechanism to initialize the crash
+console. It must only use the general purpose registers x0 to x4 to do the
+initialization and returns 1 on success.
+
+Function : plat\_crash\_console\_putc
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : int
+    Return   : int
+
+This API is used by the crash reporting mechanism to print a character on the
+designated crash console. It must only use general purpose registers x1 and
+x2 to do its work. The parameter and the return value are in general purpose
+register x0.
+
+Function : plat\_crash\_console\_flush
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : int
+
+This API is used by the crash reporting mechanism to force write of all buffered
+data on the designated crash console. It should only use general purpose
+registers x0 and x1 to do its work. The return value is 0 on successful
+completion; otherwise the return value is -1.
+
+Build flags
+-----------
+
+-  **ENABLE\_PLAT\_COMPAT**
+   All the platforms ports conforming to this API specification should define
+   the build flag ``ENABLE_PLAT_COMPAT`` to 0 as the compatibility layer should
+   be disabled. For more details on compatibility layer, refer
+   `Migration Guide`_.
+
+There are some build flags which can be defined by the platform to control
+inclusion or exclusion of certain BL stages from the FIP image. These flags
+need to be defined in the platform makefile which will get included by the
+build system.
+
+-  **NEED\_BL33**
+   By default, this flag is defined ``yes`` by the build system and ``BL33``
+   build option should be supplied as a build option. The platform has the
+   option of excluding the BL33 image in the ``fip`` image by defining this flag
+   to ``no``. If any of the options ``EL3_PAYLOAD_BASE`` or ``PRELOADED_BL33_BASE``
+   are used, this flag will be set to ``no`` automatically.
+
+C Library
+---------
+
+To avoid subtle toolchain behavioral dependencies, the header files provided
+by the compiler are not used. The software is built with the ``-nostdinc`` flag
+to ensure no headers are included from the toolchain inadvertently. Instead the
+required headers are included in the ARM Trusted Firmware source tree. The
+library only contains those C library definitions required by the local
+implementation. If more functionality is required, the needed library functions
+will need to be added to the local implementation.
+
+Versions of `FreeBSD`_ headers can be found in ``include/lib/stdlib``. Some of
+these headers have been cut down in order to simplify the implementation. In
+order to minimize changes to the header files, the `FreeBSD`_ layout has been
+maintained. The generic C library definitions can be found in
+``include/lib/stdlib`` with more system and machine specific declarations in
+``include/lib/stdlib/sys`` and ``include/lib/stdlib/machine``.
+
+The local C library implementations can be found in ``lib/stdlib``. In order to
+extend the C library these files may need to be modified. It is recommended to
+use a release version of `FreeBSD`_ as a starting point.
+
+The C library header files in the `FreeBSD`_ source tree are located in the
+``include`` and ``sys/sys`` directories. `FreeBSD`_ machine specific definitions
+can be found in the ``sys/<machine-type>`` directories. These files define things
+like 'the size of a pointer' and 'the range of an integer'. Since an AArch64
+port for `FreeBSD`_ does not yet exist, the machine specific definitions are
+based on existing machine types with similar properties (for example SPARC64).
+
+Where possible, C library function implementations were taken from `FreeBSD`_
+as found in the ``lib/libc`` directory.
+
+A copy of the `FreeBSD`_ sources can be downloaded with ``git``.
+
+::
+
+    git clone git://github.com/freebsd/freebsd.git -b origin/release/9.2.0
+
+Storage abstraction layer
+-------------------------
+
+In order to improve platform independence and portability an storage abstraction
+layer is used to load data from non-volatile platform storage.
+
+Each platform should register devices and their drivers via the Storage layer.
+These drivers then need to be initialized by bootloader phases as
+required in their respective ``blx_platform_setup()`` functions. Currently
+storage access is only required by BL1 and BL2 phases. The ``load_image()``
+function uses the storage layer to access non-volatile platform storage.
+
+It is mandatory to implement at least one storage driver. For the ARM
+development platforms the Firmware Image Package (FIP) driver is provided as
+the default means to load data from storage (see the "Firmware Image Package"
+section in the `User Guide`_). The storage layer is described in the header file
+``include/drivers/io/io_storage.h``. The implementation of the common library
+is in ``drivers/io/io_storage.c`` and the driver files are located in
+``drivers/io/``.
+
+Each IO driver must provide ``io_dev_*`` structures, as described in
+``drivers/io/io_driver.h``. These are returned via a mandatory registration
+function that is called on platform initialization. The semi-hosting driver
+implementation in ``io_semihosting.c`` can be used as an example.
+
+The Storage layer provides mechanisms to initialize storage devices before
+IO operations are called. The basic operations supported by the layer
+include ``open()``, ``close()``, ``read()``, ``write()``, ``size()`` and ``seek()``.
+Drivers do not have to implement all operations, but each platform must
+provide at least one driver for a device capable of supporting generic
+operations such as loading a bootloader image.
+
+The current implementation only allows for known images to be loaded by the
+firmware. These images are specified by using their identifiers, as defined in
+[include/plat/common/platform\_def.h] (or a separate header file included from
+there). The platform layer (``plat_get_image_source()``) then returns a reference
+to a device and a driver-specific ``spec`` which will be understood by the driver
+to allow access to the image data.
+
+The layer is designed in such a way that is it possible to chain drivers with
+other drivers. For example, file-system drivers may be implemented on top of
+physical block devices, both represented by IO devices with corresponding
+drivers. In such a case, the file-system "binding" with the block device may
+be deferred until the file-system device is initialised.
+
+The abstraction currently depends on structures being statically allocated
+by the drivers and callers, as the system does not yet provide a means of
+dynamically allocating memory. This may also have the affect of limiting the
+amount of open resources per driver.
+
+--------------
+
+*Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.*
+
+.. _Migration Guide: platform-migration-guide.rst
+.. _include/plat/common/platform.h: ../include/plat/common/platform.h
+.. _include/plat/arm/common/plat\_arm.h: ../include/plat/arm/common/plat_arm.h%5D
+.. _User Guide: user-guide.rst
+.. _include/plat/common/common\_def.h: ../include/plat/common/common_def.h
+.. _include/plat/arm/common/arm\_def.h: ../include/plat/arm/common/arm_def.h
+.. _plat/common/aarch64/platform\_mp\_stack.S: ../plat/common/aarch64/platform_mp_stack.S
+.. _plat/common/aarch64/platform\_up\_stack.S: ../plat/common/aarch64/platform_up_stack.S
+.. _For example, define the build flag in platform.mk: PLAT_PL061_MAX_GPIOS%20:=%20160
+.. _Power Domain Topology Design: psci-pd-tree.rst
+.. _include/common/bl\_common.h: ../include/common/bl_common.h
+.. _include/lib/aarch32/arch.h: ../include/lib/aarch32/arch.h
+.. _Firmware Design: firmware-design.rst
+.. _PSCI: http://infocenter.arm.com/help/topic/com.arm.doc.den0022c/DEN0022C_Power_State_Coordination_Interface.pdf
+.. _plat/arm/board/fvp/fvp\_pm.c: ../plat/arm/board/fvp/fvp_pm.c
+.. _IMF Design Guide: interrupt-framework-design.rst
+.. _ARM Generic Interrupt Controller version 2.0 (GICv2): http://infocenter.arm.com/help/topic/com.arm.doc.ihi0048b/index.html
+.. _3.0 (GICv3): http://infocenter.arm.com/help/topic/com.arm.doc.ihi0069b/index.html
+.. _FreeBSD: http://www.freebsd.org
diff --git a/docs/psci-lib-integration-guide.rst b/docs/psci-lib-integration-guide.rst
new file mode 100644
index 0000000..5e788d1
--- /dev/null
+++ b/docs/psci-lib-integration-guide.rst
@@ -0,0 +1,560 @@
+PSCI Library Integration guide for ARMv8-A AArch32 systems
+==========================================================
+
+
+.. section-numbering::
+    :suffix: .
+
+.. contents::
+
+This document describes the PSCI library interface with a focus on how to
+integrate with a suitable Trusted OS for an ARMv8-A AArch32 system. The PSCI
+Library implements the PSCI Standard as described in `PSCI spec`_ and is meant
+to be integrated with EL3 Runtime Software which invokes the PSCI Library
+interface appropriately. **EL3 Runtime Software** refers to software executing
+at the highest secure privileged mode, which is EL3 in AArch64 or Secure SVC/
+Monitor mode in AArch32, and provides runtime services to the non-secure world.
+The runtime service request is made via SMC (Secure Monitor Call) and the call
+must adhere to `SMCCC`_. In AArch32, EL3 Runtime Software may additionally
+include Trusted OS functionality. A minimal AArch32 Secure Payload, SP-MIN, is
+provided in ARM Trusted Firmware to illustrate the usage and integration of the
+PSCI library. The description of PSCI library interface and its integration
+with EL3 Runtime Software in this document is targeted towards AArch32 systems.
+
+Generic call sequence for PSCI Library interface (AArch32)
+----------------------------------------------------------
+
+The generic call sequence of PSCI Library interfaces (see
+`PSCI Library Interface`_) during cold boot in AArch32
+system is described below:
+
+#. After cold reset, the EL3 Runtime Software performs its cold boot
+   initialization including the PSCI library pre-requisites mentioned in
+   `PSCI Library Interface`_, and also the necessary platform
+   setup.
+
+#. Call ``psci_setup()`` in Monitor mode.
+
+#. Optionally call ``psci_register_spd_pm_hook()`` to register callbacks to
+   do bookkeeping for the EL3 Runtime Software during power management.
+
+#. Call ``psci_prepare_next_non_secure_ctx()`` to initialize the non-secure CPU
+   context.
+
+#. Get the non-secure ``cpu_context_t`` for the current CPU by calling
+   ``cm_get_context()`` , then programming the registers in the non-secure
+   context and exiting to non-secure world. If the EL3 Runtime Software needs
+   additional configuration to be set for non-secure context, like routing
+   FIQs to the secure world, the values of the registers can be modified prior
+   to programming. See `PSCI CPU context management`_ for more
+   details on CPU context management.
+
+The generic call sequence of PSCI library interfaces during warm boot in
+AArch32 systems is described below:
+
+#. After warm reset, the EL3 Runtime Software performs the necessary warm
+   boot initialization including the PSCI library pre-requisites mentioned in
+   `PSCI Library Interface`_ (Note that the Data cache
+   **must not** be enabled).
+
+#. Call ``psci_warmboot_entrypoint()`` in Monitor mode. This interface
+   initializes/restores the non-secure CPU context as well.
+
+#. Do step 5 of the cold boot call sequence described above.
+
+The generic call sequence of PSCI library interfaces on receipt of a PSCI SMC
+on an AArch32 system is described below:
+
+#. On receipt of an SMC, save the register context as per `SMCCC`_.
+
+#. If the SMC function identifier corresponds to a SMC32 PSCI API, construct
+   the appropriate arguments and call the ``psci_smc_handler()`` interface.
+   The invocation may or may not return back to the caller depending on
+   whether the PSCI API resulted in power down of the CPU.
+
+#. If ``psci_smc_handler()`` returns, populate the return value in R0 (AArch32)/
+   X0 (AArch64) and restore other registers as per `SMCCC`_.
+
+PSCI CPU context management
+---------------------------
+
+PSCI library is in charge of initializing/restoring the non-secure CPU system
+registers according to `PSCI specification`_ during cold/warm boot.
+This is referred to as ``PSCI CPU Context Management``. Registers that need to
+be preserved across CPU power down/power up cycles are maintained in
+``cpu_context_t`` data structure. The initialization of other non-secure CPU
+system registers which do not require coordination with the EL3 Runtime
+Software is done directly by the PSCI library (see ``cm_prepare_el3_exit()``).
+
+The EL3 Runtime Software is responsible for managing register context
+during switch between Normal and Secure worlds. The register context to be
+saved and restored depends on the mechanism used to trigger the world switch.
+For example, if the world switch was triggered by an SMC call, then the
+registers need to be saved and restored according to `SMCCC`_. In AArch64,
+due to the tight integration with BL31, both BL31 and PSCI library
+use the same ``cpu_context_t`` data structure for PSCI CPU context management
+and register context management during world switch. This cannot be assumed
+for AArch32 EL3 Runtime Software since most AArch32 Trusted OSes already implement
+a mechanism for register context management during world switch. Hence, when
+the PSCI library is integrated with a AArch32 EL3 Runtime Software, the
+``cpu_context_t`` is stripped down for just PSCI CPU context management.
+
+During cold/warm boot, after invoking appropriate PSCI library interfaces, it
+is expected that the EL3 Runtime Software will query the ``cpu_context_t`` and
+write appropriate values to the corresponding system registers. This mechanism
+resolves 2 additional problems for AArch32 EL3 Runtime Software:
+
+#. Values for certain system registers like SCR and SCTLR cannot be
+   unilaterally determined by PSCI library and need inputs from the EL3
+   Runtime Software. Using ``cpu_context_t`` as an intermediary data store
+   allows EL3 Runtime Software to modify the register values appropriately
+   before programming them.
+
+#. The PSCI library provides appropriate LR and SPSR values (entrypoint
+   information) for exit into non-secure world. Using ``cpu_context_t`` as an
+   intermediary data store allows the EL3 Runtime Software to store these
+   values safely until it is ready for exit to non-secure world.
+
+Currently the ``cpu_context_t`` data structure for AArch32 stores the following
+registers: R0 - R3, LR (R14), SCR, SPSR, SCTLR.
+
+The EL3 Runtime Software must implement accessors to get/set pointers
+to CPU context ``cpu_context_t`` data and these are described in
+`CPU Context management API`_.
+
+PSCI Library Interface
+----------------------
+
+The PSCI library implements the `PSCI Specification`_. The interfaces
+to this library are declared in ``psci.h`` and are as listed below:
+
+.. code:: c
+
+        u_register_t psci_smc_handler(uint32_t smc_fid, u_register_t x1,
+                                      u_register_t x2, u_register_t x3,
+                                      u_register_t x4, void *cookie,
+                                      void *handle, u_register_t flags);
+        int psci_setup(const psci_lib_args_t *lib_args);
+        void psci_warmboot_entrypoint(void);
+        void psci_register_spd_pm_hook(const spd_pm_ops_t *pm);
+        void psci_prepare_next_non_secure_ctx(entry_point_info_t *next_image_info);
+
+The CPU context data 'cpu\_context\_t' is programmed to the registers differently
+when PSCI is integrated with an AArch32 EL3 Runtime Software compared to
+when the PSCI is integrated with an AArch64 EL3 Runtime Software (BL31). For
+example, in the case of AArch64, there is no need to retrieve ``cpu_context_t``
+data and program the registers as it will done implicitly as part of
+``el3_exit``. The description below of the PSCI interfaces is targeted at
+integration with an AArch32 EL3 Runtime Software.
+
+The PSCI library is responsible for initializing/restoring the non-secure world
+to an appropriate state after boot and may choose to directly program the
+non-secure system registers. The PSCI generic code takes care not to directly
+modify any of the system registers affecting the secure world and instead
+returns the values to be programmed to these registers via ``cpu_context_t``.
+The EL3 Runtime Software is responsible for programming those registers and
+can use the proposed values provided in the ``cpu_context_t``, modifying the
+values if required.
+
+PSCI library needs the flexibility to access both secure and non-secure
+copies of banked registers. Hence it needs to be invoked in Monitor mode
+for AArch32 and in EL3 for AArch64. The NS bit in SCR (in AArch32) or SCR\_EL3
+(in AArch64) must be set to 0. Additional requirements for the PSCI library
+interfaces are:
+
+-  Instruction cache must be enabled
+-  Both IRQ and FIQ must be masked for the current CPU
+-  The page tables must be setup and the MMU enabled
+-  The C runtime environment must be setup and stack initialized
+-  The Data cache must be enabled prior to invoking any of the PSCI library
+   interfaces except for ``psci_warmboot_entrypoint()``. For
+   ``psci_warmboot_entrypoint()``, if the build option ``HW_ASSISTED_COHERENCY``
+   is enabled however, data caches are expected to be enabled.
+
+Further requirements for each interface can be found in the interface
+description.
+
+Interface : psci\_setup()
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : const psci_lib_args_t *lib_args
+    Return   : void
+
+This function is to be called by the primary CPU during cold boot before
+any other interface to the PSCI library. It takes ``lib_args``, a const pointer
+to ``psci_lib_args_t``, as the argument. The ``psci_lib_args_t`` is a versioned
+structure and is declared in ``psci.h`` header as follows:
+
+.. code:: c
+
+        typedef struct psci_lib_args {
+            /* The version information of PSCI Library Interface */
+            param_header_t        h;
+            /* The warm boot entrypoint function */
+            mailbox_entrypoint_t  mailbox_ep;
+        } psci_lib_args_t;
+
+The first field ``h``, of ``param_header_t`` type, provides the version
+information. The second field ``mailbox_ep`` is the warm boot entrypoint address
+and is used to configure the platform mailbox. Helper macros are provided in
+psci.h to construct the ``lib_args`` argument statically or during runtime. Prior
+to calling the ``psci_setup()`` interface, the platform setup for cold boot
+must have completed. Major actions performed by this interface are:
+
+-  Initializes architecture.
+-  Initializes PSCI power domain and state coordination data structures.
+-  Calls ``plat_setup_psci_ops()`` with warm boot entrypoint ``mailbox_ep`` as
+   argument.
+-  Calls ``cm_set_context_by_index()`` (see
+   `CPU Context management API`_) for all the CPUs in the
+   platform
+
+Interface : psci\_prepare\_next\_non\_secure\_ctx()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : entry_point_info_t *next_image_info
+    Return   : void
+
+After ``psci_setup()`` and prior to exit to the non-secure world, this function
+must be called by the EL3 Runtime Software to initialize the non-secure world
+context. The non-secure world entrypoint information ``next_image_info`` (first
+argument) will be used to determine the non-secure context. After this function
+returns, the EL3 Runtime Software must retrieve the ``cpu_context_t`` (using
+cm\_get\_context()) for the current CPU and program the registers prior to exit
+to the non-secure world.
+
+Interface : psci\_register\_spd\_pm\_hook()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : const spd_pm_ops_t *
+    Return   : void
+
+As explained in `Secure payload power management callback`_,
+the EL3 Runtime Software may want to perform some bookkeeping during power
+management operations. This function is used to register the ``spd_pm_ops_t``
+(first argument) callbacks with the PSCI library which will be called
+ppropriately during power management. Calling this function is optional and
+need to be called by the primary CPU during the cold boot sequence after
+``psci_setup()`` has completed.
+
+Interface : psci\_smc\_handler()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : uint32_t smc_fid, u_register_t x1,
+               u_register_t x2, u_register_t x3,
+               u_register_t x4, void *cookie,
+               void *handle, u_register_t flags
+    Return   : u_register_t
+
+This function is the top level handler for SMCs which fall within the
+PSCI service range specified in `SMCCC`_. The function ID ``smc_fid`` (first
+argument) determines the PSCI API to be called. The ``x1`` to ``x4`` (2nd to 5th
+arguments), are the values of the registers r1 - r4 (in AArch32) or x1 - x4
+(in AArch64) when the SMC is received. These are the arguments to PSCI API as
+described in `PSCI spec`_. The 'flags' (8th argument) is a bit field parameter
+and is detailed in 'smcc.h' header. It includes whether the call is from the
+secure or non-secure world. The ``cookie`` (6th argument) and the ``handle``
+(7th argument) are not used and are reserved for future use.
+
+The return value from this interface is the return value from the underlying
+PSCI API corresponding to ``smc_fid``. This function may not return back to the
+caller if PSCI API causes power down of the CPU. In this case, when the CPU
+wakes up, it will start execution from the warm reset address.
+
+Interface : psci\_warmboot\_entrypoint()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : void
+    Return   : void
+
+This function performs the warm boot initialization/restoration as mandated by
+`PSCI spec`_. For AArch32, on wakeup from power down the CPU resets to secure SVC
+mode and the EL3 Runtime Software must perform the prerequisite initializations
+mentioned at top of this section. This function must be called with Data cache
+disabled (unless build option ``HW_ASSISTED_COHERENCY`` is enabled) but with MMU
+initialized and enabled. The major actions performed by this function are:
+
+-  Invalidates the stack and enables the data cache.
+-  Initializes architecture and PSCI state coordination.
+-  Restores/Initializes the peripheral drivers to the required state via
+   appropriate ``plat_psci_ops_t`` hooks
+-  Restores the EL3 Runtime Software context via appropriate ``spd_pm_ops_t``
+   callbacks.
+-  Restores/Initializes the non-secure context and populates the
+   ``cpu_context_t`` for the current CPU.
+
+Upon the return of this function, the EL3 Runtime Software must retrieve the
+non-secure ``cpu_context_t`` using ``cm_get_context()`` and program the registers
+prior to exit to the non-secure world.
+
+EL3 Runtime Software dependencies
+---------------------------------
+
+The PSCI Library includes supporting frameworks like context management,
+cpu operations (cpu\_ops) and per-cpu data framework. Other helper library
+functions like bakery locks and spin locks are also included in the library.
+The dependencies which must be fulfilled by the EL3 Runtime Software
+for integration with PSCI library are described below.
+
+General dependencies
+~~~~~~~~~~~~~~~~~~~~
+
+The PSCI library being a Multiprocessor (MP) implementation, EL3 Runtime
+Software must provide an SMC handling framework capable of MP adhering to
+`SMCCC`_ specification.
+
+The EL3 Runtime Software must also export cache maintenance primitives
+and some helper utilities for assert, print and memory operations as listed
+below. The ARM Trusted Firmware source tree provides implementations for all
+these functions but the EL3 Runtime Software may use its own implementation.
+
+**Functions : assert(), memcpy(), memset**
+
+These must be implemented as described in ISO C Standard.
+
+**Function : flush\_dcache\_range()**
+
+::
+
+    Argument : uintptr_t addr, size_t size
+    Return   : void
+
+This function cleans and invalidates (flushes) the data cache for memory
+at address ``addr`` (first argument) address and of size ``size`` (second argument).
+
+**Function : inv\_dcache\_range()**
+
+::
+
+    Argument : uintptr_t addr, size_t size
+    Return   : void
+
+This function invalidates (flushes) the data cache for memory at address
+``addr`` (first argument) address and of size ``size`` (second argument).
+
+**Function : do\_panic()**
+
+::
+
+    Argument : void
+    Return   : void
+
+This function will be called by the PSCI library on encountering a critical
+failure that cannot be recovered from. This function **must not** return.
+
+**Function : tf\_printf()**
+
+This is printf-compatible function, but unlike printf, it does not return any
+value. The ARM Trusted Firmware source tree provides an implementation which
+is optimized for stack usage and supports only a subset of format specifiers.
+The details of the format specifiers supported can be found in the
+``tf_printf.c`` file in ARM Trusted Firmware source tree.
+
+CPU Context management API
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The CPU context management data memory is statically allocated by PSCI library
+in BSS section. The PSCI library requires the EL3 Runtime Software to implement
+APIs to store and retrieve pointers to this CPU context data. SP-MIN
+demonstrates how these APIs can be implemented but the EL3 Runtime Software can
+choose a more optimal implementation (like dedicating the secure TPIDRPRW
+system register (in AArch32) for storing these pointers).
+
+**Function : cm\_set\_context\_by\_index()**
+
+::
+
+    Argument : unsigned int cpu_idx, void *context, unsigned int security_state
+    Return   : void
+
+This function is called during cold boot when the ``psci_setup()`` PSCI library
+interface is called.
+
+This function must store the pointer to the CPU context data, ``context`` (2nd
+argument), for the specified ``security_state`` (3rd argument) and CPU identified
+by ``cpu_idx`` (first argument). The ``security_state`` will always be non-secure
+when called by PSCI library and this argument is retained for compatibility
+with BL31. The ``cpu_idx`` will correspond to the index returned by the
+``plat_core_pos_by_mpidr()`` for ``mpidr`` of the CPU.
+
+The actual method of storing the ``context`` pointers is implementation specific.
+For example, SP-MIN stores the pointers in the array ``sp_min_cpu_ctx_ptr``
+declared in ``sp_min_main.c``.
+
+**Function : cm\_get\_context()**
+
+::
+
+    Argument : uint32_t security_state
+    Return   : void *
+
+This function must return the pointer to the ``cpu_context_t`` structure for
+the specified ``security_state`` (first argument) for the current CPU. The caller
+must ensure that ``cm_set_context_by_index`` is called first and the appropriate
+context pointers are stored prior to invoking this API. The ``security_state``
+will always be non-secure when called by PSCI library and this argument
+is retained for compatibility with BL31.
+
+**Function : cm\_get\_context\_by\_index()**
+
+::
+
+    Argument : unsigned int cpu_idx, unsigned int security_state
+    Return   : void *
+
+This function must return the pointer to the ``cpu_context_t`` structure for
+the specified ``security_state`` (second argument) for the CPU identified by
+``cpu_idx`` (first argument). The caller must ensure that
+``cm_set_context_by_index`` is called first and the appropriate context
+pointers are stored prior to invoking this API. The ``security_state`` will
+always be non-secure when called by PSCI library and this argument is
+retained for compatibility with BL31. The ``cpu_idx`` will correspond to the
+index returned by the ``plat_core_pos_by_mpidr()`` for ``mpidr`` of the CPU.
+
+Platform API
+~~~~~~~~~~~~
+
+The platform layer abstracts the platform-specific details from the generic
+PSCI library. The following platform APIs/macros must be defined by the EL3
+Runtime Software for integration with the PSCI library.
+
+The mandatory platform APIs are:
+
+-  plat\_my\_core\_pos
+-  plat\_core\_pos\_by\_mpidr
+-  plat\_get\_syscnt\_freq2
+-  plat\_get\_power\_domain\_tree\_desc
+-  plat\_setup\_psci\_ops
+-  plat\_reset\_handler
+-  plat\_panic\_handler
+-  plat\_get\_my\_stack
+
+The mandatory platform macros are:
+
+-  PLATFORM\_CORE\_COUNT
+-  PLAT\_MAX\_PWR\_LVL
+-  PLAT\_NUM\_PWR\_DOMAINS
+-  CACHE\_WRITEBACK\_GRANULE
+-  PLAT\_MAX\_OFF\_STATE
+-  PLAT\_MAX\_RET\_STATE
+-  PLAT\_MAX\_PWR\_LVL\_STATES (optional)
+-  PLAT\_PCPU\_DATA\_SIZE (optional)
+
+The details of these APIs/macros can be found in `Porting Guide`_.
+
+All platform specific operations for power management are done via
+``plat_psci_ops_t`` callbacks registered by the platform when
+``plat_setup_psci_ops()`` API is called. The description of each of
+the callbacks in ``plat_psci_ops_t`` can be found in PSCI section of the
+`Porting Guide`_. If any these callbacks are not registered, then the
+PSCI API associated with that callback will not be supported by PSCI
+library.
+
+Secure payload power management callback
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+During PSCI power management operations, the EL3 Runtime Software may
+need to perform some bookkeeping, and PSCI library provides
+``spd_pm_ops_t`` callbacks for this purpose. These hooks must be
+populated and registered by using ``psci_register_spd_pm_hook()`` PSCI
+library interface.
+
+Typical bookkeeping during PSCI power management calls include save/restore
+of the EL3 Runtime Software context. Also if the EL3 Runtime Software makes
+use of secure interrupts, then these interrupts must also be managed
+appropriately during CPU power down/power up. Any secure interrupt targeted
+to the current CPU must be disabled or re-targeted to other running CPU prior
+to power down of the current CPU. During power up, these interrupt can be
+enabled/re-targeted back to the current CPU.
+
+.. code:: c
+
+        typedef struct spd_pm_ops {
+                void (*svc_on)(u_register_t target_cpu);
+                int32_t (*svc_off)(u_register_t __unused);
+                void (*svc_suspend)(u_register_t max_off_pwrlvl);
+                void (*svc_on_finish)(u_register_t __unused);
+                void (*svc_suspend_finish)(u_register_t max_off_pwrlvl);
+                int32_t (*svc_migrate)(u_register_t from_cpu, u_register_t to_cpu);
+                int32_t (*svc_migrate_info)(u_register_t *resident_cpu);
+                void (*svc_system_off)(void);
+                void (*svc_system_reset)(void);
+        } spd_pm_ops_t;
+
+A brief description of each callback is given below:
+
+-  svc\_on, svc\_off, svc\_on\_finish
+
+   The ``svc_on``, ``svc_off`` callbacks are called during PSCI\_CPU\_ON,
+   PSCI\_CPU\_OFF APIs respectively. The ``svc_on_finish`` is called when the
+   target CPU of PSCI\_CPU\_ON API powers up and executes the
+   ``psci_warmboot_entrypoint()`` PSCI library interface.
+
+-  svc\_suspend, svc\_suspend\_finish
+
+   The ``svc_suspend`` callback is called during power down bu either
+   PSCI\_SUSPEND or PSCI\_SYSTEM\_SUSPEND APIs. The ``svc_suspend_finish`` is
+   called when the CPU wakes up from suspend and executes the
+   ``psci_warmboot_entrypoint()`` PSCI library interface. The ``max_off_pwrlvl``
+   (first parameter) denotes the highest power domain level being powered down
+   to or woken up from suspend.
+
+-  svc\_system\_off, svc\_system\_reset
+
+   These callbacks are called during PSCI\_SYSTEM\_OFF and PSCI\_SYSTEM\_RESET
+   PSCI APIs respectively.
+
+-  svc\_migrate\_info
+
+   This callback is called in response to PSCI\_MIGRATE\_INFO\_TYPE or
+   PSCI\_MIGRATE\_INFO\_UP\_CPU APIs. The return value of this callback must
+   correspond to the return value of PSCI\_MIGRATE\_INFO\_TYPE API as described
+   in `PSCI spec`_. If the secure payload is a Uniprocessor (UP)
+   implementation, then it must update the mpidr of the CPU it is resident in
+   via ``resident_cpu`` (first argument). The updates to ``resident_cpu`` is
+   ignored if the secure payload is a multiprocessor (MP) implementation.
+
+-  svc\_migrate
+
+   This callback is only relevant if the secure payload in EL3 Runtime
+   Software is a Uniprocessor (UP) implementation and supports migration from
+   the current CPU ``from_cpu`` (first argument) to another CPU ``to_cpu``
+   (second argument). This callback is called in response to PSCI\_MIGRATE
+   API. This callback is never called if the secure payload is a
+   Multiprocessor (MP) implementation.
+
+CPU operations
+~~~~~~~~~~~~~~
+
+The CPU operations (cpu\_ops) framework implement power down sequence specific
+to the CPU and the details of which can be found in the ``CPU specific operations framework`` section of `Firmware Design`_. The ARM Trusted Firmware
+tree implements the ``cpu_ops`` for various supported CPUs and the EL3 Runtime
+Software needs to include the required ``cpu_ops`` in its build. The start and
+end of the ``cpu_ops`` descriptors must be exported by the EL3 Runtime Software
+via the ``__CPU_OPS_START__`` and ``__CPU_OPS_END__`` linker symbols.
+
+The ``cpu_ops`` descriptors also include reset sequences and may include errata
+workarounds for the CPU. The EL3 Runtime Software can choose to call this
+during cold/warm reset if it does not implement its own reset sequence/errata
+workarounds.
+
+--------------
+
+*Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.*
+
+.. _PSCI spec: http://infocenter.arm.com/help/topic/com.arm.doc.den0022c/DEN0022C_Power_State_Coordination_Interface.pdf
+.. _SMCCC: https://silver.arm.com/download/ARM_and_AMBA_Architecture/AR570-DA-80002-r0p0-00rel0/ARM_DEN0028A_SMC_Calling_Convention.pdf
+.. _PSCI specification: http://infocenter.arm.com/help/topic/com.arm.doc.den0022c/DEN0022C_Power_State_Coordination_Interface.pdf
+.. _PSCI Specification: http://infocenter.arm.com/help/topic/com.arm.doc.den0022c/DEN0022C_Power_State_Coordination_Interface.pdf
+.. _Porting Guide: porting-guide.rst
+.. _Firmware Design: ./firmware-design.rst
diff --git a/docs/psci-pd-tree.rst b/docs/psci-pd-tree.rst
new file mode 100644
index 0000000..329106c
--- /dev/null
+++ b/docs/psci-pd-tree.rst
@@ -0,0 +1,312 @@
+PSCI Library Integration guide for ARMv8-A AArch32 systems
+==========================================================
+
+
+.. section-numbering::
+    :suffix: .
+
+.. contents::
+
+--------------
+
+Requirements
+------------
+
+#. A platform must export the ``plat_get_aff_count()`` and
+   ``plat_get_aff_state()`` APIs to enable the generic PSCI code to
+   populate a tree that describes the hierarchy of power domains in the
+   system. This approach is inflexible because a change to the topology
+   requires a change in the code.
+
+   It would be much simpler for the platform to describe its power domain tree
+   in a data structure.
+
+#. The generic PSCI code generates MPIDRs in order to populate the power domain
+   tree. It also uses an MPIDR to find a node in the tree. The assumption that
+   a platform will use exactly the same MPIDRs as generated by the generic PSCI
+   code is not scalable. The use of an MPIDR also restricts the number of
+   levels in the power domain tree to four.
+
+   Therefore, there is a need to decouple allocation of MPIDRs from the
+   mechanism used to populate the power domain topology tree.
+
+#. The current arrangement of the power domain tree requires a binary search
+   over the sibling nodes at a particular level to find a specified power
+   domain node. During a power management operation, the tree is traversed from
+   a 'start' to an 'end' power level. The binary search is required to find the
+   node at each level. The natural way to perform this traversal is to
+   start from a leaf node and follow the parent node pointer to reach the end
+   level.
+
+   Therefore, there is a need to define data structures that implement the tree in
+   a way which facilitates such a traversal.
+
+#. The attributes of a core power domain differ from the attributes of power
+   domains at higher levels. For example, only a core power domain can be identified
+   using an MPIDR. There is no requirement to perform state coordination while
+   performing a power management operation on the core power domain.
+
+   Therefore, there is a need to implement the tree in a way which facilitates this
+   distinction between a leaf and non-leaf node and any associated
+   optimizations.
+
+--------------
+
+Design
+------
+
+Describing a power domain tree
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To fulfill requirement 1., the existing platform APIs
+``plat_get_aff_count()`` and ``plat_get_aff_state()`` have been
+removed. A platform must define an array of unsigned chars such that:
+
+#. The first entry in the array specifies the number of power domains at the
+   highest power level implemented in the platform. This caters for platforms
+   where the power domain tree does not have a single root node, for example,
+   the FVP has two cluster power domains at the highest level (1).
+
+#. Each subsequent entry corresponds to a power domain and contains the number
+   of power domains that are its direct children.
+
+#. The size of the array minus the first entry will be equal to the number of
+   non-leaf power domains.
+
+#. The value in each entry in the array is used to find the number of entries
+   to consider at the next level. The sum of the values (number of children) of
+   all the entries at a level specifies the number of entries in the array for
+   the next level.
+
+The following example power domain topology tree will be used to describe the
+above text further. The leaf and non-leaf nodes in this tree have been numbered
+separately.
+
+::
+
+                                         +-+
+                                         |0|
+                                         +-+
+                                        /   \
+                                       /     \
+                                      /       \
+                                     /         \
+                                    /           \
+                                   /             \
+                                  /               \
+                                 /                 \
+                                /                   \
+                               /                     \
+                            +-+                       +-+
+                            |1|                       |2|
+                            +-+                       +-+
+                           /   \                     /   \
+                          /     \                   /     \
+                         /       \                 /       \
+                        /         \               /         \
+                     +-+           +-+         +-+           +-+
+                     |3|           |4|         |5|           |6|
+                     +-+           +-+         +-+           +-+
+            +---+-----+    +----+----|     +----+----+     +----+-----+-----+
+            |   |     |    |    |    |     |    |    |     |    |     |     |
+            |   |     |    |    |    |     |    |    |     |    |     |     |
+            v   v     v    v    v    v     v    v    v     v    v     v     v
+          +-+  +-+   +-+  +-+  +-+  +-+   +-+  +-+  +-+   +-+  +--+  +--+  +--+
+          |0|  |1|   |2|  |3|  |4|  |5|   |6|  |7|  |8|   |9|  |10|  |11|  |12|
+          +-+  +-+   +-+  +-+  +-+  +-+   +-+  +-+  +-+   +-+  +--+  +--+  +--+
+
+This tree is defined by the platform as the array described above as follows:
+
+::
+
+        #define PLAT_NUM_POWER_DOMAINS       20
+        #define PLATFORM_CORE_COUNT          13
+        #define PSCI_NUM_NON_CPU_PWR_DOMAINS \
+                           (PLAT_NUM_POWER_DOMAINS - PLATFORM_CORE_COUNT)
+
+        unsigned char plat_power_domain_tree_desc[] = { 1, 2, 2, 2, 3, 3, 3, 4};
+
+Removing assumptions about MPIDRs used in a platform
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To fulfill requirement 2., it is assumed that the platform assigns a
+unique number (core index) between ``0`` and ``PLAT_CORE_COUNT - 1`` to each core
+power domain. MPIDRs could be allocated in any manner and will not be used to
+populate the tree.
+
+``plat_core_pos_by_mpidr(mpidr)`` will return the core index for the core
+corresponding to the MPIDR. It will return an error (-1) if an MPIDR is passed
+which is not allocated or corresponds to an absent core. The semantics of this
+platform API have changed since it is required to validate the passed MPIDR. It
+has been made a mandatory API as a result.
+
+Another mandatory API, ``plat_my_core_pos()`` has been added to return the core
+index for the calling core. This API provides a more lightweight mechanism to get
+the index since there is no need to validate the MPIDR of the calling core.
+
+The platform should assign the core indices (as illustrated in the diagram above)
+such that, if the core nodes are numbered from left to right, then the index
+for a core domain will be the same as the index returned by
+``plat_core_pos_by_mpidr()`` or ``plat_my_core_pos()`` for that core. This
+relationship allows the core nodes to be allocated in a separate array
+(requirement 4.) during ``psci_setup()`` in such an order that the index of the
+core in the array is the same as the return value from these APIs.
+
+Dealing with holes in MPIDR allocation
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+For platforms where the number of allocated MPIDRs is equal to the number of
+core power domains, for example, Juno and FVPs, the logic to convert an MPIDR to
+a core index should remain unchanged. Both Juno and FVP use a simple collision
+proof hash function to do this.
+
+It is possible that on some platforms, the allocation of MPIDRs is not
+contiguous or certain cores have been disabled. This essentially means that the
+MPIDRs have been sparsely allocated, that is, the size of the range of MPIDRs
+used by the platform is not equal to the number of core power domains.
+
+The platform could adopt one of the following approaches to deal with this
+scenario:
+
+#. Implement more complex logic to convert a valid MPIDR to a core index while
+   maintaining the relationship described earlier. This means that the power
+   domain tree descriptor will not describe any core power domains which are
+   disabled or absent. Entries will not be allocated in the tree for these
+   domains.
+
+#. Treat unallocated MPIDRs and disabled cores as absent but still describe them
+   in the power domain descriptor, that is, the number of core nodes described
+   is equal to the size of the range of MPIDRs allocated. This approach will
+   lead to memory wastage since entries will be allocated in the tree but will
+   allow use of a simpler logic to convert an MPIDR to a core index.
+
+Traversing through and distinguishing between core and non-core power domains
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To fulfill requirement 3 and 4, separate data structures have been defined
+to represent leaf and non-leaf power domain nodes in the tree.
+
+.. code:: c
+
+    /*******************************************************************************
+     * The following two data structures implement the power domain tree. The tree
+     * is used to track the state of all the nodes i.e. power domain instances
+     * described by the platform. The tree consists of nodes that describe CPU power
+     * domains i.e. leaf nodes and all other power domains which are parents of a
+     * CPU power domain i.e. non-leaf nodes.
+     ******************************************************************************/
+    typedef struct non_cpu_pwr_domain_node {
+        /*
+         * Index of the first CPU power domain node level 0 which has this node
+         * as its parent.
+         */
+        unsigned int cpu_start_idx;
+
+        /*
+         * Number of CPU power domains which are siblings of the domain indexed
+         * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx
+         * -> cpu_start_idx + ncpus' have this node as their parent.
+         */
+        unsigned int ncpus;
+
+        /* Index of the parent power domain node */
+        unsigned int parent_node;
+
+        -----
+    } non_cpu_pd_node_t;
+
+    typedef struct cpu_pwr_domain_node {
+        u_register_t mpidr;
+
+        /* Index of the parent power domain node */
+        unsigned int parent_node;
+
+        -----
+    } cpu_pd_node_t;
+
+The power domain tree is implemented as a combination of the following data
+structures.
+
+::
+
+    non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
+    cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
+
+Populating the power domain tree
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``populate_power_domain_tree()`` function in ``psci_setup.c`` implements the
+algorithm to parse the power domain descriptor exported by the platform to
+populate the two arrays. It is essentially a breadth-first-search. The nodes for
+each level starting from the root are laid out one after another in the
+``psci_non_cpu_pd_nodes`` and ``psci_cpu_pd_nodes`` arrays as follows:
+
+::
+
+    psci_non_cpu_pd_nodes -> [[Level 3 nodes][Level 2 nodes][Level 1 nodes]]
+    psci_cpu_pd_nodes -> [Level 0 nodes]
+
+For the example power domain tree illustrated above, the ``psci_cpu_pd_nodes``
+will be populated as follows. The value in each entry is the index of the parent
+node. Other fields have been ignored for simplicity.
+
+::
+
+                          +-------------+     ^
+                    CPU0  |      3      |     |
+                          +-------------+     |
+                    CPU1  |      3      |     |
+                          +-------------+     |
+                    CPU2  |      3      |     |
+                          +-------------+     |
+                    CPU3  |      4      |     |
+                          +-------------+     |
+                    CPU4  |      4      |     |
+                          +-------------+     |
+                    CPU5  |      4      |     | PLATFORM_CORE_COUNT
+                          +-------------+     |
+                    CPU6  |      5      |     |
+                          +-------------+     |
+                    CPU7  |      5      |     |
+                          +-------------+     |
+                    CPU8  |      5      |     |
+                          +-------------+     |
+                    CPU9  |      6      |     |
+                          +-------------+     |
+                    CPU10 |      6      |     |
+                          +-------------+     |
+                    CPU11 |      6      |     |
+                          +-------------+     |
+                    CPU12 |      6      |     v
+                          +-------------+
+
+The ``psci_non_cpu_pd_nodes`` array will be populated as follows. The value in
+each entry is the index of the parent node.
+
+::
+
+                          +-------------+     ^
+                    PD0   |      -1     |     |
+                          +-------------+     |
+                    PD1   |      0      |     |
+                          +-------------+     |
+                    PD2   |      0      |     |
+                          +-------------+     |
+                    PD3   |      1      |     | PLAT_NUM_POWER_DOMAINS -
+                          +-------------+     | PLATFORM_CORE_COUNT
+                    PD4   |      1      |     |
+                          +-------------+     |
+                    PD5   |      2      |     |
+                          +-------------+     |
+                    PD6   |      2      |     |
+                          +-------------+     v
+
+Each core can find its node in the ``psci_cpu_pd_nodes`` array using the
+``plat_my_core_pos()`` function. When a core is turned on, the normal world
+provides an MPIDR. The ``plat_core_pos_by_mpidr()`` function is used to validate
+the MPIDR before using it to find the corresponding core node. The non-core power
+domain nodes do not need to be identified.
+
+--------------
+
+*Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.*
diff --git a/docs/reset-design.rst b/docs/reset-design.rst
new file mode 100644
index 0000000..0b14dec
--- /dev/null
+++ b/docs/reset-design.rst
@@ -0,0 +1,166 @@
+ARM Trusted Firmware Reset Design
+=================================
+
+
+.. section-numbering::
+    :suffix: .
+
+.. contents::
+
+This document describes the high-level design of the framework to handle CPU
+resets in ARM Trusted Firmware. It also describes how the platform integrator
+can tailor this code to the system configuration to some extent, resulting in a
+simplified and more optimised boot flow.
+
+This document should be used in conjunction with the `Firmware Design`_, which
+provides greater implementation details around the reset code, specifically
+for the cold boot path.
+
+General reset code flow
+-----------------------
+
+The ARM Trusted Firmware (TF) reset code is implemented in BL1 by default. The
+following high-level diagram illustrates this:
+
+|Default reset code flow|
+
+This diagram shows the default, unoptimised reset flow. Depending on the system
+configuration, some of these steps might be unnecessary. The following sections
+guide the platform integrator by indicating which build options exclude which
+steps, depending on the capability of the platform.
+
+Note: If BL31 is used as the Trusted Firmware entry point instead of BL1, the
+diagram above is still relevant, as all these operations will occur in BL31 in
+this case. Please refer to section 6 "Using BL31 entrypoint as the reset
+address" for more information.
+
+Programmable CPU reset address
+------------------------------
+
+By default, the TF assumes that the CPU reset address is not programmable.
+Therefore, all CPUs start at the same address (typically address 0) whenever
+they reset. Further logic is then required to identify whether it is a cold or
+warm boot to direct CPUs to the right execution path.
+
+If the reset vector address (reflected in the reset vector base address register
+``RVBAR_EL3``) is programmable then it is possible to make each CPU start directly
+at the right address, both on a cold and warm reset. Therefore, the boot type
+detection can be skipped, resulting in the following boot flow:
+
+|Reset code flow with programmable reset address|
+
+To enable this boot flow, compile the TF with ``PROGRAMMABLE_RESET_ADDRESS=1``.
+This option only affects the TF reset image, which is BL1 by default or BL31 if
+``RESET_TO_BL31=1``.
+
+On both the FVP and Juno platforms, the reset vector address is not programmable
+so both ports use ``PROGRAMMABLE_RESET_ADDRESS=0``.
+
+Cold boot on a single CPU
+-------------------------
+
+By default, the TF assumes that several CPUs may be released out of reset.
+Therefore, the cold boot code has to arbitrate access to hardware resources
+shared amongst CPUs. This is done by nominating one of the CPUs as the primary,
+which is responsible for initialising shared hardware and coordinating the boot
+flow with the other CPUs.
+
+If the platform guarantees that only a single CPU will ever be brought up then
+no arbitration is required. The notion of primary/secondary CPU itself no longer
+applies. This results in the following boot flow:
+
+|Reset code flow with single CPU released out of reset|
+
+To enable this boot flow, compile the TF with ``COLD_BOOT_SINGLE_CPU=1``. This
+option only affects the TF reset image, which is BL1 by default or BL31 if
+``RESET_TO_BL31=1``.
+
+On both the FVP and Juno platforms, although only one core is powered up by
+default, there are platform-specific ways to release any number of cores out of
+reset. Therefore, both platform ports use ``COLD_BOOT_SINGLE_CPU=0``.
+
+Programmable CPU reset address, Cold boot on a single CPU
+---------------------------------------------------------
+
+It is obviously possible to combine both optimisations on platforms that have
+a programmable CPU reset address and which release a single CPU out of reset.
+This results in the following boot flow:
+
+
+|Reset code flow with programmable reset address and single CPU released out of reset|
+
+To enable this boot flow, compile the TF with both ``COLD_BOOT_SINGLE_CPU=1``
+and ``PROGRAMMABLE_RESET_ADDRESS=1``. These options only affect the TF reset
+image, which is BL1 by default or BL31 if ``RESET_TO_BL31=1``.
+
+Using BL31 entrypoint as the reset address
+------------------------------------------
+
+On some platforms the runtime firmware (BL3x images) for the application
+processors are loaded by some firmware running on a secure system processor
+on the SoC, rather than by BL1 and BL2 running on the primary application
+processor. For this type of SoC it is desirable for the application processor
+to always reset to BL31 which eliminates the need for BL1 and BL2.
+
+TF provides a build-time option ``RESET_TO_BL31`` that includes some additional
+logic in the BL31 entry point to support this use case.
+
+In this configuration, the platform's Trusted Boot Firmware must ensure that
+BL31 is loaded to its runtime address, which must match the CPU's ``RVBAR_EL3``
+reset vector base address, before the application processor is powered on.
+Additionally, platform software is responsible for loading the other BL3x images
+required and providing entry point information for them to BL31. Loading these
+images might be done by the Trusted Boot Firmware or by platform code in BL31.
+
+Although the ARM FVP platform does not support programming the reset base
+address dynamically at run-time, it is possible to set the initial value of the
+``RVBAR_EL3`` register at start-up. This feature is provided on the Base FVP only.
+It allows the ARM FVP port to support the ``RESET_TO_BL31`` configuration, in
+which case the ``bl31.bin`` image must be loaded to its run address in Trusted
+SRAM and all CPU reset vectors be changed from the default ``0x0`` to this run
+address. See the `User Guide`_ for details of running the FVP models in this way.
+
+Although technically it would be possible to program the reset base address with
+the right support in the SCP firmware, this is currently not implemented so the
+Juno port doesn't support the ``RESET_TO_BL31`` configuration.
+
+The ``RESET_TO_BL31`` configuration requires some additions and changes in the
+BL31 functionality:
+
+Determination of boot path
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In this configuration, BL31 uses the same reset framework and code as the one
+described for BL1 above. Therefore, it is affected by the
+``PROGRAMMABLE_RESET_ADDRESS`` and ``COLD_BOOT_SINGLE_CPU`` build options in the
+same way.
+
+In the default, unoptimised BL31 reset flow, on a warm boot a CPU is directed
+to the PSCI implementation via a platform defined mechanism. On a cold boot,
+the platform must place any secondary CPUs into a safe state while the primary
+CPU executes a modified BL31 initialization, as described below.
+
+Platform initialization
+~~~~~~~~~~~~~~~~~~~~~~~
+
+In this configuration, when the CPU resets to BL31 there are no parameters that
+can be passed in registers by previous boot stages. Instead, the platform code
+in BL31 needs to know, or be able to determine, the location of the BL32 (if
+required) and BL33 images and provide this information in response to the
+``bl31_plat_get_next_image_ep_info()`` function.
+
+Additionally, platform software is responsible for carrying out any security
+initialisation, for example programming a TrustZone address space controller.
+This might be done by the Trusted Boot Firmware or by platform code in BL31.
+
+--------------
+
+*Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.*
+
+.. _Firmware Design: firmware-design.rst
+.. _User Guide: user-guide.rst
+
+.. |Default reset code flow| image:: diagrams/default_reset_code.png?raw=true
+.. |Reset code flow with programmable reset address| image:: diagrams/reset_code_no_boot_type_check.png?raw=true
+.. |Reset code flow with single CPU released out of reset| image:: diagrams/reset_code_no_cpu_check.png?raw=true
+.. |Reset code flow with programmable reset address and single CPU released out of reset| image:: diagrams/reset_code_no_checks.png?raw=true
diff --git a/docs/rt-svc-writers-guide.md b/docs/rt-svc-writers-guide.md
deleted file mode 100644
index 13f5310..0000000
--- a/docs/rt-svc-writers-guide.md
+++ /dev/null
@@ -1,309 +0,0 @@
-EL3 Runtime Service Writers Guide for ARM Trusted Firmware
-==========================================================
-
-Contents
---------
-
-1.  [Introduction](#1--introduction)
-2.  [Owning Entities, Call Types and Function IDs](#2--owning-entities-call-types-and-function-ids)
-3.  [Getting started](#3--getting-started)
-4.  [Registering a runtime service](#4--registering-a-runtime-service)
-5.  [Initializing a runtime service](#5-initializing-a-runtime-service)
-6.  [Handling runtime service requests](#6--handling-runtime-service-requests)
-7.  [Services that contain multiple sub-services](#7--services-that-contain-multiple-sub-services)
-8.  [Secure-EL1 Payload Dispatcher service (SPD)](#8--secure-el1-payload-dispatcher-service-spd)
-
-- - - - - - - - - - - - - - - - - -
-
-1.  Introduction
-----------------
-
-This document describes how to add a runtime service to the EL3 Runtime
-Firmware component of ARM Trusted Firmware (BL3-1).
-
-Software executing in the normal world and in the trusted world at exception
-levels lower than EL3 will request runtime services using the Secure Monitor
-Call (SMC) instruction. These requests will follow the convention described in
-the SMC Calling Convention PDD ([SMCCC]). The [SMCCC] assigns function
-identifiers to each SMC request and describes how arguments are passed and
-results are returned.
-
-SMC Functions are grouped together based on the implementor of the service, for
-example a subset of the Function IDs are designated as "OEM Calls" (see [SMCCC]
-for full details). The EL3 runtime services framework in BL3-1 enables the
-independent implementation of services for each group, which are then compiled
-into the BL3-1 image. This simplifies the integration of common software from
-ARM to support [PSCI], Secure Monitor for a Trusted OS and SoC specific
-software. The common runtime services framework ensures that SMC Functions are
-dispatched to their respective service implementation - the [Firmware Design]
-provides details of how this is achieved.
-
-The interface and operation of the runtime services depends heavily on the
-concepts and definitions described in the [SMCCC], in particular SMC Function
-IDs, Owning Entity Numbers (OEN), Fast and Standard calls, and the SMC32 and
-SMC64 calling conventions. Please refer to that document for a full explanation
-of these terms.
-
-
-2.  Owning Entities, Call Types and Function IDs
-------------------------------------------------
-
-The SMC Function Identifier includes a OEN field. These values and their
-meaning are described in [SMCCC] and summarized in table 1 below. Some entities
-are allocated a range of of OENs. The OEN must be interpreted in conjunction
-with the SMC call type, which is either _Fast_ or _Standard_. Fast calls are
-uninterruptible whereas Standard calls can be pre-empted. The majority of
-Owning Entities only have allocated ranges for Fast calls: Standard calls are
-reserved exclusively for Trusted OS providers or for interoperability with
-legacy 32-bit software that predates the [SMCCC].
-
-    Type    OEN     Service
-    Fast     0      ARM Architecture calls
-    Fast     1      CPU Service calls
-    Fast     2      SiP Service calls
-    Fast     3      OEM Service calls
-    Fast     4      Standard Service calls
-    Fast    5-47    Reserved for future use
-    Fast   48-49    Trusted Application calls
-    Fast   50-63    Trusted OS calls
-
-    Std     0- 1    Reserved for existing ARMv7 calls
-    Std     2-63    Trusted OS Standard Calls
-
-_Table 1: Service types and their corresponding Owning Entity Numbers_
-
-Each individual entity can allocate the valid identifiers within the entity
-range as they need - it is not necessary to coordinate with other entities of
-the same type. For example, two SoC providers can use the same Function ID
-within the SiP Service calls OEN range to mean different things - as these
-calls should be specific to the SoC. The Standard Runtime Calls OEN is used for
-services defined by ARM standards, such as [PSCI].
-
-The SMC Function ID also indicates whether the call has followed the SMC32
-calling convention, where all parameters are 32-bit, or the SMC64 calling
-convention, where the parameters are 64-bit. The framework identifies and
-rejects invalid calls that use the SMC64 calling convention but that originate
-from an AArch32 caller.
-
-The EL3 runtime services framework uses the call type and OEN to identify a
-specific handler for each SMC call, but it is expected that an individual
-handler will be responsible for all SMC Functions within a given service type.
-
-
-3.  Getting started
--------------------
-
-ARM Trusted Firmware has a [`services`] directory in the source tree under which
-each owning entity can place the implementation of its runtime service.  The
-[PSCI] implementation is located here in the [`services/std_svc/psci`]
-directory.
-
-Runtime service sources will need to include the [`runtime_svc.h`] header file.
-
-
-4.  Registering a runtime service
----------------------------------
-
-A runtime service is registered using the `DECLARE_RT_SVC()` macro, specifying
-the name of the service, the range of OENs covered, the type of service and
-initialization and call handler functions.
-
-    #define DECLARE_RT_SVC(_name, _start, _end, _type, _setup, _smch)
-
-*   `_name` is used to identify the data structure declared by this macro, and
-    is also used for diagnostic purposes
-
-*   `_start` and `_end` values must be based on the `OEN_*` values defined in
-    [`runtime_svc.h`]
-
-*   `_type` must be one of `SMC_TYPE_FAST` or `SMC_TYPE_STD`
-
-*   `_setup` is the initialization function with the `rt_svc_init` signature:
-
-        typedef int32_t (*rt_svc_init)(void);
-
-*   `_smch` is the SMC handler function with the `rt_svc_handle` signature:
-
-        typedef uint64_t (*rt_svc_handle)(uint32_t smc_fid,
-                                          uint64_t x1, uint64_t x2,
-                                          uint64_t x3, uint64_t x4,
-                                          void *reserved,
-                                          void *handle,
-                                          uint64_t flags);
-
-Details of the requirements and behavior of the two callbacks is provided in
-the following sections.
-
-During initialization the services framework validates each declared service
-to ensure that the following conditions are met:
-
-1.  The `_start` OEN is not greater than the `_end` OEN
-2.  The `_end` OEN does not exceed the maximum OEN value (63)
-3.  The `_type` is one of `SMC_TYPE_FAST` or `SMC_TYPE_STD`
-4.  `_setup` and `_smch` routines have been specified
-
-[`std_svc_setup.c`] provides an example of registering a runtime service:
-
-    /* Register Standard Service Calls as runtime service */
-    DECLARE_RT_SVC(
-            std_svc,
-            OEN_STD_START,
-            OEN_STD_END,
-            SMC_TYPE_FAST,
-            std_svc_setup,
-            std_svc_smc_handler
-    );
-
-
-5. Initializing a runtime service
----------------------------------
-
-Runtime services are initialized once, during cold boot, by the primary CPU
-after platform and architectural initialization is complete. The framework
-performs basic validation of the declared service before calling
-the service initialization function (`_setup` in the declaration). This
-function must carry out any essential EL3 initialization prior to receiving a
-SMC Function call via the handler function.
-
-On success, the initialization function must return `0`. Any other return value
-will cause the framework to issue a diagnostic:
-
-    Error initializing runtime service <name of the service>
-
-and then ignore the service - the system will continue to boot but SMC calls
-will not be passed to the service handler and instead return the _Unknown SMC
-Function ID_ result `0xFFFFFFFF`.
-
-If the system must not be allowed to proceed without the service, the
-initialization function must itself cause the firmware boot to be halted.
-
-If the service uses per-CPU data this must either be initialized for all CPUs
-during this call, or be done lazily when a CPU first issues an SMC call to that
-service.
-
-
-6.  Handling runtime service requests
--------------------------------------
-
-SMC calls for a service are forwarded by the framework to the service's SMC
-handler function (`_smch` in the service declaration). This function must have
-the following signature:
-
-    typedef uint64_t (*rt_svc_handle)(uint32_t smc_fid,
-                                      uint64_t x1, uint64_t x2,
-                                      uint64_t x3, uint64_t x4,
-                                      void *reserved,
-                                      void *handle,
-                                      uint64_t flags);
-
-The handler is responsible for:
-
-1.  Determining that `smc_fid` is a valid and supported SMC Function ID,
-    otherwise completing the request with the _Unknown SMC Function ID_:
-
-        SMC_RET1(handle, SMC_UNK);
-
-2.  Determining if the requested function is valid for the calling security
-    state. SMC Calls can be made from both the normal and trusted worlds and
-    the framework will forward all calls to the service handler.
-
-    The `flags` parameter to this function indicates the caller security state
-    in bit[0], where a value of `1` indicates  a non-secure caller. The
-    `is_caller_secure(flags)` and `is_caller_non_secure(flags)` can be used to
-    test this condition.
-
-    If invalid, the request should be completed with:
-
-        SMC_RET1(handle, SMC_UNK);
-
-3.  Truncating parameters for calls made using the SMC32 calling convention.
-    Such calls can be determined by checking the CC field in bit[30] of the
-    `smc_fid` parameter, for example by using:
-
-        if (GET_SMC_CC(smc_fid) == SMC_32) ...
-
-    For such calls, the upper bits of the parameters x1-x4 and the saved
-    parameters X5-X7 are UNDEFINED and must be explicitly ignored by the
-    handler. This can be done by truncating the values to a suitable 32-bit
-    integer type before use, for example by ensuring that functions defined
-    to handle individual SMC Functions use appropriate 32-bit parameters.
-
-4.  Providing the service requested by the SMC Function, utilizing the
-    immediate parameters x1-x4 and/or the additional saved parameters X5-X7.
-    The latter can be retrieved using the `SMC_GET_GP(handle, ref)` function,
-    supplying the appropriate `CTX_GPREG_Xn` reference, e.g.
-
-        uint64_t x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
-
-5.  Implementing the standard SMC32 Functions that provide information about
-    the implementation of the service. These are the Call Count, Implementor
-    UID and Revision Details for each service documented in section 6 of the
-    [SMCCC].
-
-    The ARM Trusted Firmware expects owning entities to follow this
-    recommendation.
-
-5.  Returning the result to the caller. The [SMCCC] allows for up to 256 bits
-    of return value in SMC64 using X0-X3 and 128 bits in SMC32 using W0-W3. The
-    framework provides a family of macros to set the multi-register return
-    value and complete the handler:
-
-        SMC_RET1(handle, x0);
-        SMC_RET2(handle, x0, x1);
-        SMC_RET3(handle, x0, x1, x2);
-        SMC_RET4(handle, x0, x1, x2, x3);
-
-The `reserved` parameter to the handler is reserved for future use and can be
-ignored. The value returned by a SMC handler is also reserved for future use -
-completion of the handler function must always be via one of the `SMC_RETn()`
-macros.
-
-NOTE: The PSCI and Test Secure-EL1 Payload Dispatcher services do not follow
-all of the above requirements yet.
-
-
-7.  Services that contain multiple sub-services
------------------------------------------------
-
-It is possible that a single owning entity implements multiple sub-services. For
-example, the Standard calls service handles `0x84000000`-`0x8400FFFF` and
-`0xC4000000`-`0xC400FFFF` functions. Within that range, the [PSCI] service
-handles the `0x84000000`-`0x8400001F` and `0xC4000000`-`0xC400001F` functions.
-In that respect, [PSCI] is a 'sub-service' of the Standard calls service. In
-future, there could be additional such sub-services in the Standard calls
-service which perform independent functions.
-
-In this situation it may be valuable to introduce a second level framework to
-enable independent implementation of sub-services. Such a framework might look
-very similar to the current runtime services framework, but using a different
-part of the SMC Function ID to identify the sub-service. Trusted Firmware does
-not provide such a framework at present.
-
-
-8.  Secure-EL1 Payload Dispatcher service (SPD)
------------------------------------------------
-
-Services that handle SMC Functions targeting a Trusted OS, Trusted Application,
-or other Secure-EL1 Payload are special. These services need to manage the
-Secure-EL1 context, provide the _Secure Monitor_ functionality of switching
-between the normal and secure worlds, deliver SMC Calls through to Secure-EL1
-and generally manage the Secure-EL1 Payload through CPU power-state transitions.
-
-TODO: Provide details of the additional work required to implement a SPD and
-the BL3-1 support for these services. Or a reference to the document that will
-provide this information....
-
-
-- - - - - - - - - - - - - - - - - - - - - - - - - -
-
-_Copyright (c) 2014, ARM Limited and Contributors. All rights reserved._
-
-
-[Firmware Design]:  ./firmware-design.md
-
-[`services`]:               ../services
-[`services/std_svc/psci`]:  ../services/std_svc/psci
-[`std_svc_setup.c`]:        ../services/std_svc/std_svc_setup.c
-[`runtime_svc.h`]:          ../include/runtime_svc.h
-[PSCI]:                     http://infocenter.arm.com/help/topic/com.arm.doc.den0022c/DEN0022C_Power_State_Coordination_Interface.pdf "Power State Coordination Interface PDD (ARM DEN 0022C)"
-[SMCCC]:                    http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html "SMC Calling Convention PDD (ARM DEN 0028A)"
diff --git a/docs/rt-svc-writers-guide.rst b/docs/rt-svc-writers-guide.rst
new file mode 100644
index 0000000..6a64ade
--- /dev/null
+++ b/docs/rt-svc-writers-guide.rst
@@ -0,0 +1,316 @@
+EL3 Runtime Service Writers Guide for ARM Trusted Firmware
+==========================================================
+
+
+.. section-numbering::
+    :suffix: .
+
+.. contents::
+
+--------------
+
+Introduction
+------------
+
+This document describes how to add a runtime service to the EL3 Runtime
+Firmware component of ARM Trusted Firmware (BL31).
+
+Software executing in the normal world and in the trusted world at exception
+levels lower than EL3 will request runtime services using the Secure Monitor
+Call (SMC) instruction. These requests will follow the convention described in
+the SMC Calling Convention PDD (`SMCCC`_). The `SMCCC`_ assigns function
+identifiers to each SMC request and describes how arguments are passed and
+results are returned.
+
+SMC Functions are grouped together based on the implementor of the service, for
+example a subset of the Function IDs are designated as "OEM Calls" (see `SMCCC`_
+for full details). The EL3 runtime services framework in BL31 enables the
+independent implementation of services for each group, which are then compiled
+into the BL31 image. This simplifies the integration of common software from
+ARM to support `PSCI`_, Secure Monitor for a Trusted OS and SoC specific
+software. The common runtime services framework ensures that SMC Functions are
+dispatched to their respective service implementation - the `Firmware Design`_
+provides details of how this is achieved.
+
+The interface and operation of the runtime services depends heavily on the
+concepts and definitions described in the `SMCCC`_, in particular SMC Function
+IDs, Owning Entity Numbers (OEN), Fast and Standard calls, and the SMC32 and
+SMC64 calling conventions. Please refer to that document for a full explanation
+of these terms.
+
+Owning Entities, Call Types and Function IDs
+--------------------------------------------
+
+The SMC Function Identifier includes a OEN field. These values and their
+meaning are described in `SMCCC`_ and summarized in table 1 below. Some entities
+are allocated a range of of OENs. The OEN must be interpreted in conjunction
+with the SMC call type, which is either *Fast* or *Yielding*. Fast calls are
+uninterruptible whereas Yielding calls can be pre-empted. The majority of
+Owning Entities only have allocated ranges for Fast calls: Yielding calls are
+reserved exclusively for Trusted OS providers or for interoperability with
+legacy 32-bit software that predates the `SMCCC`_.
+
+::
+
+    Type       OEN     Service
+    Fast        0      ARM Architecture calls
+    Fast        1      CPU Service calls
+    Fast        2      SiP Service calls
+    Fast        3      OEM Service calls
+    Fast        4      Standard Service calls
+    Fast       5-47    Reserved for future use
+    Fast      48-49    Trusted Application calls
+    Fast      50-63    Trusted OS calls
+
+    Yielding   0- 1    Reserved for existing ARMv7 calls
+    Yielding   2-63    Trusted OS Standard Calls
+
+*Table 1: Service types and their corresponding Owning Entity Numbers*
+
+Each individual entity can allocate the valid identifiers within the entity
+range as they need - it is not necessary to coordinate with other entities of
+the same type. For example, two SoC providers can use the same Function ID
+within the SiP Service calls OEN range to mean different things - as these
+calls should be specific to the SoC. The Standard Runtime Calls OEN is used for
+services defined by ARM standards, such as `PSCI`_.
+
+The SMC Function ID also indicates whether the call has followed the SMC32
+calling convention, where all parameters are 32-bit, or the SMC64 calling
+convention, where the parameters are 64-bit. The framework identifies and
+rejects invalid calls that use the SMC64 calling convention but that originate
+from an AArch32 caller.
+
+The EL3 runtime services framework uses the call type and OEN to identify a
+specific handler for each SMC call, but it is expected that an individual
+handler will be responsible for all SMC Functions within a given service type.
+
+Getting started
+---------------
+
+ARM Trusted Firmware has a `services`_ directory in the source tree under which
+each owning entity can place the implementation of its runtime service. The
+`PSCI`_ implementation is located here in the `lib/psci`_ directory.
+
+Runtime service sources will need to include the `runtime\_svc.h`_ header file.
+
+Registering a runtime service
+-----------------------------
+
+A runtime service is registered using the ``DECLARE_RT_SVC()`` macro, specifying
+the name of the service, the range of OENs covered, the type of service and
+initialization and call handler functions.
+
+::
+
+    #define DECLARE_RT_SVC(_name, _start, _end, _type, _setup, _smch)
+
+-  ``_name`` is used to identify the data structure declared by this macro, and
+   is also used for diagnostic purposes
+
+-  ``_start`` and ``_end`` values must be based on the ``OEN_*`` values defined in
+   `smcc.h`_
+
+-  ``_type`` must be one of ``SMC_TYPE_FAST`` or ``SMC_TYPE_YIELD``
+
+-  ``_setup`` is the initialization function with the ``rt_svc_init`` signature:
+
+   .. code:: c
+
+       typedef int32_t (*rt_svc_init)(void);
+
+-  ``_smch`` is the SMC handler function with the ``rt_svc_handle`` signature:
+
+   .. code:: c
+
+       typedef uintptr_t (*rt_svc_handle_t)(uint32_t smc_fid,
+                                         u_register_t x1, u_register_t x2,
+                                         u_register_t x3, u_register_t x4,
+                                         void *cookie,
+                                         void *handle,
+                                         u_register_t flags);
+
+Details of the requirements and behavior of the two callbacks is provided in
+the following sections.
+
+During initialization the services framework validates each declared service
+to ensure that the following conditions are met:
+
+#. The ``_start`` OEN is not greater than the ``_end`` OEN
+#. The ``_end`` OEN does not exceed the maximum OEN value (63)
+#. The ``_type`` is one of ``SMC_TYPE_FAST`` or ``SMC_TYPE_YIELD``
+#. ``_setup`` and ``_smch`` routines have been specified
+
+`std\_svc\_setup.c`_ provides an example of registering a runtime service:
+
+.. code:: c
+
+    /* Register Standard Service Calls as runtime service */
+    DECLARE_RT_SVC(
+            std_svc,
+            OEN_STD_START,
+            OEN_STD_END,
+            SMC_TYPE_FAST,
+            std_svc_setup,
+            std_svc_smc_handler
+    );
+
+Initializing a runtime service
+------------------------------
+
+Runtime services are initialized once, during cold boot, by the primary CPU
+after platform and architectural initialization is complete. The framework
+performs basic validation of the declared service before calling
+the service initialization function (``_setup`` in the declaration). This
+function must carry out any essential EL3 initialization prior to receiving a
+SMC Function call via the handler function.
+
+On success, the initialization function must return ``0``. Any other return value
+will cause the framework to issue a diagnostic:
+
+::
+
+    Error initializing runtime service <name of the service>
+
+and then ignore the service - the system will continue to boot but SMC calls
+will not be passed to the service handler and instead return the *Unknown SMC
+Function ID* result ``0xFFFFFFFF``.
+
+If the system must not be allowed to proceed without the service, the
+initialization function must itself cause the firmware boot to be halted.
+
+If the service uses per-CPU data this must either be initialized for all CPUs
+during this call, or be done lazily when a CPU first issues an SMC call to that
+service.
+
+Handling runtime service requests
+---------------------------------
+
+SMC calls for a service are forwarded by the framework to the service's SMC
+handler function (``_smch`` in the service declaration). This function must have
+the following signature:
+
+.. code:: c
+
+    typedef uintptr_t (*rt_svc_handle_t)(uint32_t smc_fid,
+                                       u_register_t x1, u_register_t x2,
+                                       u_register_t x3, u_register_t x4,
+                                       void *cookie,
+                                       void *handle,
+                                       u_register_t flags);
+
+The handler is responsible for:
+
+#. Determining that ``smc_fid`` is a valid and supported SMC Function ID,
+   otherwise completing the request with the *Unknown SMC Function ID*:
+
+   .. code:: c
+
+       SMC_RET1(handle, SMC_UNK);
+
+#. Determining if the requested function is valid for the calling security
+   state. SMC Calls can be made from both the normal and trusted worlds and
+   the framework will forward all calls to the service handler.
+
+   The ``flags`` parameter to this function indicates the caller security state
+   in bit[0], where a value of ``1`` indicates a non-secure caller. The
+   ``is_caller_secure(flags)`` and ``is_caller_non_secure(flags)`` can be used to
+   test this condition.
+
+   If invalid, the request should be completed with:
+
+   .. code:: c
+
+       SMC_RET1(handle, SMC_UNK);
+
+#. Truncating parameters for calls made using the SMC32 calling convention.
+   Such calls can be determined by checking the CC field in bit[30] of the
+   ``smc_fid`` parameter, for example by using:
+
+   ::
+
+       if (GET_SMC_CC(smc_fid) == SMC_32) ...
+
+   For such calls, the upper bits of the parameters x1-x4 and the saved
+   parameters X5-X7 are UNDEFINED and must be explicitly ignored by the
+   handler. This can be done by truncating the values to a suitable 32-bit
+   integer type before use, for example by ensuring that functions defined
+   to handle individual SMC Functions use appropriate 32-bit parameters.
+
+#. Providing the service requested by the SMC Function, utilizing the
+   immediate parameters x1-x4 and/or the additional saved parameters X5-X7.
+   The latter can be retrieved using the ``SMC_GET_GP(handle, ref)`` function,
+   supplying the appropriate ``CTX_GPREG_Xn`` reference, e.g.
+
+   .. code:: c
+
+       uint64_t x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+
+#. Implementing the standard SMC32 Functions that provide information about
+   the implementation of the service. These are the Call Count, Implementor
+   UID and Revision Details for each service documented in section 6 of the
+   `SMCCC`_.
+
+   The ARM Trusted Firmware expects owning entities to follow this
+   recommendation.
+
+#. Returning the result to the caller. The `SMCCC`_ allows for up to 256 bits
+   of return value in SMC64 using X0-X3 and 128 bits in SMC32 using W0-W3. The
+   framework provides a family of macros to set the multi-register return
+   value and complete the handler:
+
+   .. code:: c
+
+       SMC_RET1(handle, x0);
+       SMC_RET2(handle, x0, x1);
+       SMC_RET3(handle, x0, x1, x2);
+       SMC_RET4(handle, x0, x1, x2, x3);
+
+The ``cookie`` parameter to the handler is reserved for future use and can be
+ignored. The ``handle`` is returned by the SMC handler - completion of the
+handler function must always be via one of the ``SMC_RETn()`` macros.
+
+NOTE: The PSCI and Test Secure-EL1 Payload Dispatcher services do not follow
+all of the above requirements yet.
+
+Services that contain multiple sub-services
+-------------------------------------------
+
+It is possible that a single owning entity implements multiple sub-services. For
+example, the Standard calls service handles ``0x84000000``-``0x8400FFFF`` and
+``0xC4000000``-``0xC400FFFF`` functions. Within that range, the `PSCI`_ service
+handles the ``0x84000000``-``0x8400001F`` and ``0xC4000000``-``0xC400001F`` functions.
+In that respect, `PSCI`_ is a 'sub-service' of the Standard calls service. In
+future, there could be additional such sub-services in the Standard calls
+service which perform independent functions.
+
+In this situation it may be valuable to introduce a second level framework to
+enable independent implementation of sub-services. Such a framework might look
+very similar to the current runtime services framework, but using a different
+part of the SMC Function ID to identify the sub-service. Trusted Firmware does
+not provide such a framework at present.
+
+Secure-EL1 Payload Dispatcher service (SPD)
+-------------------------------------------
+
+Services that handle SMC Functions targeting a Trusted OS, Trusted Application,
+or other Secure-EL1 Payload are special. These services need to manage the
+Secure-EL1 context, provide the *Secure Monitor* functionality of switching
+between the normal and secure worlds, deliver SMC Calls through to Secure-EL1
+and generally manage the Secure-EL1 Payload through CPU power-state transitions.
+
+TODO: Provide details of the additional work required to implement a SPD and
+the BL31 support for these services. Or a reference to the document that will
+provide this information....
+
+--------------
+
+*Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.*
+
+.. _SMCCC: http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
+.. _PSCI: http://infocenter.arm.com/help/topic/com.arm.doc.den0022c/DEN0022C_Power_State_Coordination_Interface.pdf
+.. _Firmware Design: ./firmware-design.rst
+.. _services: ../services
+.. _lib/psci: ../lib/psci
+.. _runtime\_svc.h: ../include/common/runtime_svc.h
+.. _smcc.h: ../include/lib/smcc.h
+.. _std\_svc\_setup.c: ../services/std_svc/std_svc_setup.c
diff --git a/docs/spd/optee-dispatcher.rst b/docs/spd/optee-dispatcher.rst
new file mode 100644
index 0000000..e55926b
--- /dev/null
+++ b/docs/spd/optee-dispatcher.rst
@@ -0,0 +1,14 @@
+OP-TEE Dispatcher
+=================
+
+`OP-TEE OS`_ is a Trusted OS running as Secure EL1.
+
+To build and execute OP-TEE follow the instructions at
+`OP-TEE build.git`_
+
+--------------
+
+*Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.*
+
+.. _OP-TEE OS: https://github.com/OP-TEE/build
+.. _OP-TEE build.git: https://github.com/OP-TEE/build
diff --git a/docs/spd/tlk-dispatcher.rst b/docs/spd/tlk-dispatcher.rst
new file mode 100644
index 0000000..cd37652
--- /dev/null
+++ b/docs/spd/tlk-dispatcher.rst
@@ -0,0 +1,76 @@
+Trusted Little Kernel (TLK) Dispatcher
+======================================
+
+TLK dispatcher adds support for NVIDIA's Trusted Little Kernel (TLK) to work
+with the Trusted Firmware. TLK-D can be compiled by including it in the
+platform's makefile. TLK is primarily meant to work with Tegra SoCs, so until
+Trusted Firmware starts supporting Tegra, the dispatcher code can only be
+compiled for other platforms.
+
+In order to compile TLK-D, we need a BL32 image to be present. Since, TLKD
+just needs to compile, any BL32 image would do. To use TLK as the BL32, please
+refer to the "Build TLK" section.
+
+Once a BL32 is ready, TLKD can be included in the image by adding "SPD=tlkd"
+to the build command.
+
+Trusted Little Kernel (TLK)
+===========================
+
+TLK is a Trusted OS running as Secure EL1. It is a Free Open Source Software
+(FOSS) release of the NVIDIA® Trusted Little Kernel (TLK) technology, which
+extends technology made available with the development of the Little Kernel (LK).
+You can download the LK modular embedded preemptive kernel for use on ARM,
+x86, and AVR32 systems from https://github.com/travisg/lk
+
+NVIDIA implemented its Trusted Little Kernel (TLK) technology, designed as a
+free and open-source trusted execution environment (OTE).
+
+TLK features include:
+
+• Small, pre-emptive kernel
+• Supports multi-threading, IPCs, and thread scheduling
+• Added TrustZone features
+• Added Secure Storage
+• Under MIT/FreeBSD license
+
+NVIDIA extensions to Little Kernel (LK) include:
+
+• User mode
+• Address-space separation for TAs
+• TLK Client Application (CA) library
+• TLK TA library
+• Crypto library (encrypt/decrypt, key handling) via OpenSSL
+• Linux kernel driver
+• Cortex A9/A15 support
+• Power Management
+• TrustZone memory carve-out (reconfigurable)
+• Page table management
+• Debugging support over UART (USB planned)
+
+TLK is hosted by NVIDIA on http://nv-tegra.nvidia.com under the
+3rdparty/ote\_partner/tlk.git repository. Detailed information about
+TLK and OTE can be found in the Tegra\_BSP\_for\_Android\_TLK\_FOSS\_Reference.pdf
+manual located under the "documentation" directory\_.
+
+Build TLK
+=========
+
+To build and execute TLK, follow the instructions from "Building a TLK Device"
+section from Tegra\_BSP\_for\_Android\_TLK\_FOSS\_Reference.pdf manual.
+
+Input parameters to TLK
+=======================
+
+TLK expects the TZDRAM size and a structure containing the boot arguments. BL2
+passes this information to the EL3 software as members of the bl32\_ep\_info
+struct, where bl32\_ep\_info is part of bl31\_params\_t (passed by BL2 in X0)
+
+Example:
+--------
+
+::
+
+    bl32_ep_info->args.arg0 = TZDRAM size available for BL32
+    bl32_ep_info->args.arg1 = unused (used only on ARMv7)
+    bl32_ep_info->args.arg2 = pointer to boot args
diff --git a/docs/spd/trusty-dispatcher.rst b/docs/spd/trusty-dispatcher.rst
new file mode 100644
index 0000000..f1982ea
--- /dev/null
+++ b/docs/spd/trusty-dispatcher.rst
@@ -0,0 +1,15 @@
+Trusty Dispatcher
+=================
+
+Trusty is a a set of software components, supporting a Trusted Execution
+Environment (TEE) on mobile devices, published and maintained by Google.
+
+Detailed information and build instructions can be found on the Android
+Open Source Project (AOSP) webpage for Trusty hosted at
+https://source.android.com/security/trusty
+
+Supported platforms
+===================
+
+Out of all the platforms supported by the ARM Trusted Firmware, Trusty is
+verified and supported by NVIDIA's Tegra SoCs.
diff --git a/docs/trusted-board-boot.md b/docs/trusted-board-boot.md
deleted file mode 100644
index abba030..0000000
--- a/docs/trusted-board-boot.md
+++ /dev/null
@@ -1,261 +0,0 @@
-Trusted Board Boot Design Guide
-===============================
-
-Contents :
-
-1.  [Introduction](#1--introduction)
-2.  [Chain of Trust](#2--chain-of-trust)
-3.  [Trusted Board Boot Sequence](#3--trusted-board-boot-sequence)
-4.  [Authentication Module](#4--authentication-module)
-5.  [Certificate Generation Tool](#5--certificate-generation-tool)
-
-
-1.  Introduction
-----------------
-
-The Trusted Board Boot (TBB) feature prevents malicious firmware from running on
-the platform by authenticating all firmware images up to and including the
-normal world bootloader. It does this by establishing a Chain of Trust using
-Public-Key-Cryptography Standards (PKCS).
-
-This document describes the design of the ARM Trusted Firmware TBB
-implementation. The current implementation is a proof of concept; future
-versions will provide stronger architectural interfaces and implement the
-missing functionality required in a production TBB-enabled system.
-
-
-2.  Chain of Trust
-------------------
-
-A Chain of Trust (CoT) starts with a set of implicitly trusted components. On
-the ARM development platforms, these components are:
-
-*   A SHA-256 hash of the Root of Trust Public Key (ROTPK). It is stored in the
-    trusted root-key storage registers.
-
-*   The BL1 image, on the assumption that it resides in ROM so cannot be
-    tampered with.
-
-The remaining components in the CoT are either certificates or boot loader
-images. The certificates follow the [X.509 v3] standard. This standard
-enables adding custom extensions to the certificates, which are used to store
-essential information to establish the CoT.
-
-In the TBB CoT all certificates are self-signed. There is no need for a
-Certificate Authority (CA) because the CoT is not established by verifying the
-validity of a certificate's issuer but by the content of the certificate
-extensions. To sign the certificates, the PKCS#1 SHA-1 with RSA Encryption
-signature scheme is used with a RSA key length of 2048 bits. Future version of
-Trusted Firmware will replace SHA-1 usage with SHA-256 and support additional
-cryptographic algorithms.
-
-The certificates are categorised as "Key" and "Content" certificates. Key
-certificates are used to verify public keys which have been used to sign content
-certificates. Content certificates are used to store the hash of a boot loader
-image. An image can be authenticated by calculating its hash and matching it
-with the hash extracted from the content certificate. The SHA-256 function is
-used to calculate all hashes. The public keys and hashes are included as
-non-standard extension fields in the [X.509 v3] certificates.
-
-The keys used to establish the CoT are:
-
-*   **Root of trust key**
-
-    The private part of this key is used to sign the BL2 content certificate and
-    the trusted key certificate. The public part is the ROTPK.
-
-*   **Trusted world key**
-
-    The private part is used to sign the key certificates corresponding to the
-    secure world images (BL3-0, BL3-1 and BL3-2). The public part is stored in
-    one of the extension fields in the trusted world certificate.
-
-*   **Non-trusted world key**
-
-    The private part is used to sign the key certificate corresponding to the
-    non secure world image (BL3-3). The public part is stored in one of the
-    extension fields in the trusted world certificate.
-
-*   **BL3-X keys**
-
-    For each of BL3-0, BL3-1, BL3-2 and BL3-3, the private part is used to sign
-    the content certificate for the BL3-X image. The public part is stored in
-    one of the extension fields in the corresponding key certificate.
-
-The following images are included in the CoT:
-
-*   BL1
-*   BL2
-*   BL3-0 (optional)
-*   BL3-1
-*   BL3-3
-*   BL3-2 (optional)
-
-The following certificates are used to authenticate the images.
-
-*   **BL2 content certificate**
-
-    It is self-signed with the private part of the ROT key. It contains a hash
-    of the BL2 image.
-
-*   **Trusted key certificate**
-
-    It is self-signed with the private part of the ROT key. It contains the
-    public part of the trusted world key and the public part of the non-trusted
-    world key.
-
-*   **BL3-0 key certificate**
-
-    It is self-signed with the trusted world key. It contains the public part of
-    the BL3-0 key.
-
-*   **BL3-0 content certificate**
-
-    It is self-signed with the BL3-0 key. It contains a hash of the BL3-0 image.
-
-*   **BL3-1 key certificate**
-
-    It is self-signed with the trusted world key. It contains the public part of
-    the BL3-1 key.
-
-*   **BL3-1 content certificate**
-
-    It is self-signed with the BL3-1 key. It contains a hash of the BL3-1 image.
-
-*   **BL3-2 key certificate**
-
-    It is self-signed with the trusted world key. It contains the public part of
-    the BL3-2 key.
-
-*   **BL3-2 content certificate**
-
-    It is self-signed with the BL3-2 key. It contains a hash of the BL3-2 image.
-
-*   **BL3-3 key certificate**
-
-    It is self-signed with the non-trusted world key. It contains the public
-    part of the BL3-3 key.
-
-*   **BL3-3 content certificate**
-
-    It is self-signed with the BL3-3 key. It contains a hash of the BL3-3 image.
-
-The BL3-0 and BL3-2 certificates are optional, but they must be present if the
-corresponding BL3-0 or BL3-2 images are present.
-
-
-3.  Trusted Board Boot Sequence
--------------------------------
-
-The CoT is verified through the following sequence of steps. The system panics
-if any of the steps fail.
-
-*   BL1 loads and verifies the BL2 content certificate. The issuer public key is
-    read from the verified certificate. A hash of that key is calculated and
-    compared with the hash of the ROTPK read from the trusted root-key storage
-    registers. If they match, the BL2 hash is read from the certificate.
-
-    Note: the matching operation is platform specific and is currently
-    unimplemented on the ARM development platforms.
-
-*   BL1 loads the BL2 image. Its hash is calculated and compared with the hash
-    read from the certificate. Control is transferred to the BL2 image if all
-    the comparisons succeed.
-
-*   BL2 loads and verifies the trusted key certificate. The issuer public key is
-    read from the verified certificate. A hash of that key is calculated and
-    compared with the hash of the ROTPK read from the trusted root-key storage
-    registers. If the comparison succeeds, BL2 reads and saves the trusted and
-    non-trusted world public keys from the verified certificate.
-
-The next two steps are executed for each of the BL3-0, BL3-1 & BL3-2 images. The
-steps for the optional BL3-0 and BL3-2 images are skipped if these images are
-not present.
-
-*   BL2 loads and verifies the BL3-x key certificate. The certificate signature
-    is verified using the trusted world public key. If the signature
-    verification succeeds, BL2 reads and saves the BL3-x public key from the
-    certificate.
-
-*   BL2 loads and verifies the BL3-x content certificate. The signature is
-    verified using the BL3-x public key. If the signature verification succeeds,
-    BL2 reads and saves the BL3-x image hash from the certificate.
-
-The next two steps are executed only for the BL3-3 image.
-
-*   BL2 loads and verifies the BL3-3 key certificate. If the signature
-    verification succeeds, BL2 reads and saves the BL3-3 public key from the
-    certificate.
-
-*   BL2 loads and verifies the BL3-3 content certificate. If the signature
-    verification succeeds, BL2 reads and saves the BL3-3 image hash from the
-    certificate.
-
-The next step is executed for all the boot loader images.
-
-*   BL2 calculates the hash of each image. It compares it with the hash obtained
-    from the corresponding content certificate. The image authentication succeeds
-    if the hashes match.
-
-The Trusted Board Boot implementation spans both generic and platform-specific
-BL1 and BL2 code, and in tool code on the host build machine. The feature is
-enabled through use of specific build flags as described in the [User Guide].
-
-On the host machine, a tool generates the certificates, which are included in
-the FIP along with the boot loader images. These certificates are loaded in
-Trusted SRAM using the IO storage framework. They are then verified by an
-Authentication module included in the Trusted Firmware.
-
-The mechanism used for generating the FIP and the Authentication module are
-described in the following sections.
-
-
-4.  Authentication Module
--------------------------
-
-The authentication module implements the required support to authenticate the
-corresponding certificates or images at each step in the Trusted Board Boot
-sequence. The module relies on the PolarSSL library (v1.3.9) to perform the
-following operations:
-
-*   Parsing X.509 certificates and verifying them using SHA-1 with RSA
-    Encryption.
-*   Extracting public keys and hashes from the certificates.
-*   Generating hashes (SHA-256) of boot loader images
-
-At each step, the module is responsible for allocating memory to store the
-public keys or hashes that will be used in later steps. The step identifier is
-used to determine what information must be saved, according to the CoT model
-detailed in the previous sections.
-
-The authentication module resides in the `common/auth/polarssl` directory.
-Instructions for including the necessary modules of the PolarSSL SSL library and
-building the authentication module can be found in the [User Guide].
-
-
-5.  Certificate Generation Tool
--------------------------------
-
-The `cert_create` tool is built and runs on the host machine as part of the
-Trusted Firmware build process when `GENERATE_COT=1`. It takes the boot loader
-images and keys as inputs (keys must be in PEM format) and generates the
-certificates (in DER format) required to establish the CoT. New keys can be
-generated by the tool in case they are not provided. The certificates are then
-passed as inputs to the `fip_create` tool for creating the FIP.
-
-The certificates are also stored individually in the in the output build
-directory.
-
-The tool resides in the `tools/cert_create` directory. It uses OpenSSL SSL
-library version 1.0.1 or later to generate the X.509 certificates. Instructions
-for building and using the tool can be found in the [User Guide].
-
-
-- - - - - - - - - - - - - - - - - - - - - - - - - -
-
-_Copyright (c) 2015, ARM Limited and Contributors. All rights reserved._
-
-
-[X.509 v3]:          http://www.ietf.org/rfc/rfc5280.txt
-[X.690]:             http://www.itu.int/ITU-T/studygroups/com17/languages/X.690-0207.pdf
-[User Guide]:        user-guide.md
diff --git a/docs/trusted-board-boot.rst b/docs/trusted-board-boot.rst
new file mode 100644
index 0000000..6a28da0
--- /dev/null
+++ b/docs/trusted-board-boot.rst
@@ -0,0 +1,238 @@
+Trusted Board Boot Design Guide
+===============================
+
+
+.. section-numbering::
+    :suffix: .
+
+.. contents::
+
+The Trusted Board Boot (TBB) feature prevents malicious firmware from running on
+the platform by authenticating all firmware images up to and including the
+normal world bootloader. It does this by establishing a Chain of Trust using
+Public-Key-Cryptography Standards (PKCS).
+
+This document describes the design of ARM Trusted Firmware TBB, which is an
+implementation of the Trusted Board Boot Requirements (TBBR) specification,
+ARM DEN0006C-1. It should be used in conjunction with the `Firmware Update`_
+design document, which implements a specific aspect of the TBBR.
+
+Chain of Trust
+--------------
+
+A Chain of Trust (CoT) starts with a set of implicitly trusted components. On
+the ARM development platforms, these components are:
+
+-  A SHA-256 hash of the Root of Trust Public Key (ROTPK). It is stored in the
+   trusted root-key storage registers.
+
+-  The BL1 image, on the assumption that it resides in ROM so cannot be
+   tampered with.
+
+The remaining components in the CoT are either certificates or boot loader
+images. The certificates follow the `X.509 v3`_ standard. This standard
+enables adding custom extensions to the certificates, which are used to store
+essential information to establish the CoT.
+
+In the TBB CoT all certificates are self-signed. There is no need for a
+Certificate Authority (CA) because the CoT is not established by verifying the
+validity of a certificate's issuer but by the content of the certificate
+extensions. To sign the certificates, the PKCS#1 SHA-256 with RSA Encryption
+signature scheme is used with a RSA key length of 2048 bits. Future version of
+Trusted Firmware will support additional cryptographic algorithms.
+
+The certificates are categorised as "Key" and "Content" certificates. Key
+certificates are used to verify public keys which have been used to sign content
+certificates. Content certificates are used to store the hash of a boot loader
+image. An image can be authenticated by calculating its hash and matching it
+with the hash extracted from the content certificate. The SHA-256 function is
+used to calculate all hashes. The public keys and hashes are included as
+non-standard extension fields in the `X.509 v3`_ certificates.
+
+The keys used to establish the CoT are:
+
+-  **Root of trust key**
+
+   The private part of this key is used to sign the BL2 content certificate and
+   the trusted key certificate. The public part is the ROTPK.
+
+-  **Trusted world key**
+
+   The private part is used to sign the key certificates corresponding to the
+   secure world images (SCP\_BL2, BL31 and BL32). The public part is stored in
+   one of the extension fields in the trusted world certificate.
+
+-  **Non-trusted world key**
+
+   The private part is used to sign the key certificate corresponding to the
+   non secure world image (BL33). The public part is stored in one of the
+   extension fields in the trusted world certificate.
+
+-  **BL3-X keys**
+
+   For each of SCP\_BL2, BL31, BL32 and BL33, the private part is used to
+   sign the content certificate for the BL3-X image. The public part is stored
+   in one of the extension fields in the corresponding key certificate.
+
+The following images are included in the CoT:
+
+-  BL1
+-  BL2
+-  SCP\_BL2 (optional)
+-  BL31
+-  BL33
+-  BL32 (optional)
+
+The following certificates are used to authenticate the images.
+
+-  **BL2 content certificate**
+
+   It is self-signed with the private part of the ROT key. It contains a hash
+   of the BL2 image.
+
+-  **Trusted key certificate**
+
+   It is self-signed with the private part of the ROT key. It contains the
+   public part of the trusted world key and the public part of the non-trusted
+   world key.
+
+-  **SCP\_BL2 key certificate**
+
+   It is self-signed with the trusted world key. It contains the public part of
+   the SCP\_BL2 key.
+
+-  **SCP\_BL2 content certificate**
+
+   It is self-signed with the SCP\_BL2 key. It contains a hash of the SCP\_BL2
+   image.
+
+-  **BL31 key certificate**
+
+   It is self-signed with the trusted world key. It contains the public part of
+   the BL31 key.
+
+-  **BL31 content certificate**
+
+   It is self-signed with the BL31 key. It contains a hash of the BL31 image.
+
+-  **BL32 key certificate**
+
+   It is self-signed with the trusted world key. It contains the public part of
+   the BL32 key.
+
+-  **BL32 content certificate**
+
+   It is self-signed with the BL32 key. It contains a hash of the BL32 image.
+
+-  **BL33 key certificate**
+
+   It is self-signed with the non-trusted world key. It contains the public
+   part of the BL33 key.
+
+-  **BL33 content certificate**
+
+   It is self-signed with the BL33 key. It contains a hash of the BL33 image.
+
+The SCP\_BL2 and BL32 certificates are optional, but they must be present if the
+corresponding SCP\_BL2 or BL32 images are present.
+
+Trusted Board Boot Sequence
+---------------------------
+
+The CoT is verified through the following sequence of steps. The system panics
+if any of the steps fail.
+
+-  BL1 loads and verifies the BL2 content certificate. The issuer public key is
+   read from the verified certificate. A hash of that key is calculated and
+   compared with the hash of the ROTPK read from the trusted root-key storage
+   registers. If they match, the BL2 hash is read from the certificate.
+
+   Note: the matching operation is platform specific and is currently
+   unimplemented on the ARM development platforms.
+
+-  BL1 loads the BL2 image. Its hash is calculated and compared with the hash
+   read from the certificate. Control is transferred to the BL2 image if all
+   the comparisons succeed.
+
+-  BL2 loads and verifies the trusted key certificate. The issuer public key is
+   read from the verified certificate. A hash of that key is calculated and
+   compared with the hash of the ROTPK read from the trusted root-key storage
+   registers. If the comparison succeeds, BL2 reads and saves the trusted and
+   non-trusted world public keys from the verified certificate.
+
+The next two steps are executed for each of the SCP\_BL2, BL31 & BL32 images.
+The steps for the optional SCP\_BL2 and BL32 images are skipped if these images
+are not present.
+
+-  BL2 loads and verifies the BL3x key certificate. The certificate signature
+   is verified using the trusted world public key. If the signature
+   verification succeeds, BL2 reads and saves the BL3x public key from the
+   certificate.
+
+-  BL2 loads and verifies the BL3x content certificate. The signature is
+   verified using the BL3x public key. If the signature verification succeeds,
+   BL2 reads and saves the BL3x image hash from the certificate.
+
+The next two steps are executed only for the BL33 image.
+
+-  BL2 loads and verifies the BL33 key certificate. If the signature
+   verification succeeds, BL2 reads and saves the BL33 public key from the
+   certificate.
+
+-  BL2 loads and verifies the BL33 content certificate. If the signature
+   verification succeeds, BL2 reads and saves the BL33 image hash from the
+   certificate.
+
+The next step is executed for all the boot loader images.
+
+-  BL2 calculates the hash of each image. It compares it with the hash obtained
+   from the corresponding content certificate. The image authentication succeeds
+   if the hashes match.
+
+The Trusted Board Boot implementation spans both generic and platform-specific
+BL1 and BL2 code, and in tool code on the host build machine. The feature is
+enabled through use of specific build flags as described in the `User Guide`_.
+
+On the host machine, a tool generates the certificates, which are included in
+the FIP along with the boot loader images. These certificates are loaded in
+Trusted SRAM using the IO storage framework. They are then verified by an
+Authentication module included in the Trusted Firmware.
+
+The mechanism used for generating the FIP and the Authentication module are
+described in the following sections.
+
+Authentication Framework
+------------------------
+
+The authentication framework included in the Trusted Firmware provides support
+to implement the desired trusted boot sequence. ARM platforms use this framework
+to implement the boot requirements specified in the TBBR-client document.
+
+More information about the authentication framework can be found in the
+`Auth Framework`_ document.
+
+Certificate Generation Tool
+---------------------------
+
+The ``cert_create`` tool is built and runs on the host machine as part of the
+Trusted Firmware build process when ``GENERATE_COT=1``. It takes the boot loader
+images and keys as inputs (keys must be in PEM format) and generates the
+certificates (in DER format) required to establish the CoT. New keys can be
+generated by the tool in case they are not provided. The certificates are then
+passed as inputs to the ``fiptool`` utility for creating the FIP.
+
+The certificates are also stored individually in the in the output build
+directory.
+
+The tool resides in the ``tools/cert_create`` directory. It uses OpenSSL SSL
+library version 1.0.1 or later to generate the X.509 certificates. Instructions
+for building and using the tool can be found in the `User Guide`_.
+
+--------------
+
+*Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.*
+
+.. _Firmware Update: firmware-update.rst
+.. _X.509 v3: http://www.ietf.org/rfc/rfc5280.txt
+.. _User Guide: user-guide.rst
+.. _Auth Framework: auth-framework.rst
diff --git a/docs/user-guide.md b/docs/user-guide.md
deleted file mode 100644
index 1badc0c..0000000
--- a/docs/user-guide.md
+++ /dev/null
@@ -1,1102 +0,0 @@
-ARM Trusted Firmware User Guide
-===============================
-
-Contents :
-
-1.  [Introduction](#1--introduction)
-2.  [Host machine requirements](#2--host-machine-requirements)
-3.  [Tools](#3--tools)
-4.  [Building the Trusted Firmware](#4--building-the-trusted-firmware)
-5.  [Obtaining the normal world software](#5--obtaining-the-normal-world-software)
-6.  [Preparing the images to run on FVP](#6--preparing-the-images-to-run-on-fvp)
-7.  [Running the software on FVP](#7--running-the-software-on-fvp)
-8.  [Running the software on Juno](#8--running-the-software-on-juno)
-
-
-1.  Introduction
-----------------
-This document describes how to build ARM Trusted Firmware and run it with a
-tested set of other software components using defined configurations on the Juno
-ARM development platform and ARM Fixed Virtual Platform (FVP) models. It is
-possible to use other software components, configurations and platforms but that
-is outside the scope of this document.
-
-This document should be used in conjunction with the [Firmware Design].
-
-
-2.  Host machine requirements
------------------------------
-
-The minimum recommended machine specification for building the software and
-running the FVP models is a dual-core processor running at 2GHz with 12GB of
-RAM.  For best performance, use a machine with a quad-core processor running at
-2.6GHz with 16GB of RAM.
-
-The software has been tested on Ubuntu 12.04.04 (64-bit).  Packages used
-for building the software were installed from that distribution unless
-otherwise specified.
-
-
-3.  Tools
----------
-
-The following tools are required to use the ARM Trusted Firmware:
-
-*   `git` package to obtain source code.
-
-*   `build-essential`, `uuid-dev` and `iasl` packages for building UEFI and the
-    Firmware Image Package (FIP) tool.
-
-*   `bc` and `ncurses-dev` packages for building Linux.
-
-*   `device-tree-compiler` package for building the Flattened Device Tree (FDT)
-    source files (`.dts` files) provided with this software.
-
-*   Baremetal GNU GCC tools. Verified packages can be downloaded from [Linaro]
-    [Linaro Toolchain]. The rest of this document assumes that the
-    `gcc-linaro-aarch64-none-elf-4.9-2014.07_linux.tar.xz` tools are used.
-
-        wget http://releases.linaro.org/14.07/components/toolchain/binaries/gcc-linaro-aarch64-none-elf-4.9-2014.07_linux.tar.xz
-        tar -xf gcc-linaro-aarch64-none-elf-4.9-2014.07_linux.tar.xz
-
-*   (Optional) For debugging, ARM [Development Studio 5 (DS-5)][DS-5] v5.20.
-
-
-4.  Building the Trusted Firmware
----------------------------------
-
-To build the Trusted Firmware images, follow these steps:
-
-1.  Clone the ARM Trusted Firmware repository from GitHub:
-
-        git clone https://github.com/ARM-software/arm-trusted-firmware.git
-
-2.  Change to the trusted firmware directory:
-
-        cd arm-trusted-firmware
-
-3.  Set the compiler path, specify a Non-trusted Firmware image (BL3-3) and
-    a valid platform, and then build:
-
-        CROSS_COMPILE=<path-to-aarch64-gcc>/bin/aarch64-none-elf- \
-        BL33=<path-to>/<bl33_image>                               \
-        make PLAT=<platform> all fip
-
-    If `PLAT` is not specified, `fvp` is assumed by default. See the "Summary of
-    build options" for more information on available build options.
-
-    The BL3-3 image corresponds to the software that is executed after switching
-    to the non-secure world. UEFI can be used as the BL3-3 image. Refer to the
-    "Obtaining the normal world software" section below.
-
-    The TSP (Test Secure Payload), corresponding to the BL3-2 image, is not
-    compiled in by default. Refer to the "Building the Test Secure Payload"
-    section below.
-
-    By default this produces a release version of the build. To produce a debug
-    version instead, refer to the "Debugging options" section below.
-
-    The build process creates products in a `build` directory tree, building
-    the objects and binaries for each boot loader stage in separate
-    sub-directories.  The following boot loader binary files are created from
-    the corresponding ELF files:
-
-    *   `build/<platform>/<build-type>/bl1.bin`
-    *   `build/<platform>/<build-type>/bl2.bin`
-    *   `build/<platform>/<build-type>/bl31.bin`
-
-    where `<platform>` is the name of the chosen platform and `<build-type>` is
-    either `debug` or `release`. A Firmare Image Package (FIP) will be created
-    as part of the build. It contains all boot loader images except for
-    `bl1.bin`.
-
-    *   `build/<platform>/<build-type>/fip.bin`
-
-    For more information on FIPs, see the "Firmware Image Package" section in
-    the [Firmware Design].
-
-4.  (Optional) Some platforms may require a BL3-0 image to boot. This image can
-    be included in the FIP when building the Trusted Firmware by specifying the
-    `BL30` build option:
-
-        BL30=<path-to>/<bl30_image>
-
-5.  Output binary files `bl1.bin` and `fip.bin` are both required to boot the
-    system. How these files are used is platform specific. Refer to the
-    platform documentation on how to use the firmware images.
-
-6.  (Optional) Build products for a specific build variant can be removed using:
-
-        make DEBUG=<D> PLAT=<platform> clean
-
-    ... where `<D>` is `0` or `1`, as specified when building.
-
-    The build tree can be removed completely using:
-
-        make realclean
-
-7.  (Optional) Path to binary for certain BL stages (BL2, BL3-1 and BL3-2) can be
-    provided by specifying the BLx=<path-to>/<blx_image> where BLx is the BL stage.
-    This will bypass the build of the BL component from source, but will include
-    the specified binary in the final FIP image. Please note that BL3-2 will be
-    included in the build, only if the `SPD` build option is specified.
-
-    For example, specifying BL2=<path-to>/<bl2_image> in the build option, will
-    skip compilation of BL2 source in trusted firmware, but include the BL2
-    binary specified in the final FIP image.
-
-### Summary of build options
-
-ARM Trusted Firmware build system supports the following build options. Unless
-mentioned otherwise, these options are expected to be specified at the build
-command line and are not to be modified in any component makefiles. Note that
-the build system doesn't track dependency for build options. Therefore, if any
-of the build options are changed from a previous build, a clean build must be
-performed.
-
-#### Common build options
-
-*   `BL30`: Path to BL3-0 image in the host file system. This image is optional.
-    If a BL3-0 image is present then this option must be passed for the `fip`
-    target.
-
-*   `BL33`: Path to BL3-3 image in the host file system. This is mandatory for
-    `fip` target in case the BL2 from ARM Trusted Firmware is used.
-
-*   `BL2`: This is an optional build option which specifies the path to BL2
-    image for the `fip` target. In this case, the BL2 in the ARM Trusted
-    Firmware will not be built.
-
-*   `BL31`:  This is an optional build option which specifies the path to
-    BL3-1 image for the `fip` target. In this case, the BL3-1 in the ARM
-    Trusted Firmware will not be built.
-
-*   `BL32`:  This is an optional build option which specifies the path to
-    BL3-2 image for the `fip` target. In this case, the BL3-2 in the ARM
-    Trusted Firmware will not be built.
-
-*   `FIP_NAME`: This is an optional build option which specifies the FIP
-    filename for the `fip` target. Default is `fip.bin`.
-
-*   `CROSS_COMPILE`: Prefix to toolchain binaries. Please refer to examples in
-    this document for usage.
-
-*   `DEBUG`: Chooses between a debug and release build. It can take either 0
-    (release) or 1 (debug) as values. 0 is the default.
-
-*   `LOG_LEVEL`: Chooses the log level, which controls the amount of console log
-    output compiled into the build. This should be one of the following:
-
-        0  (LOG_LEVEL_NONE)
-        10 (LOG_LEVEL_NOTICE)
-        20 (LOG_LEVEL_ERROR)
-        30 (LOG_LEVEL_WARNING)
-        40 (LOG_LEVEL_INFO)
-        50 (LOG_LEVEL_VERBOSE)
-
-    All log output up to and including the log level is compiled into the build.
-    The default value is 40 in debug builds and 20 in release builds.
-
-*   `NS_TIMER_SWITCH`: Enable save and restore for non-secure timer register
-    contents upon world switch. It can take either 0 (don't save and restore) or
-    1 (do save and restore). 0 is the default. An SPD may set this to 1 if it
-    wants the timer registers to be saved and restored.
-
-*   `PLAT`: Choose a platform to build ARM Trusted Firmware for. The chosen
-    platform name must be the name of one of the directories under the `plat/`
-    directory other than `common`.
-
-*   `SPD`: Choose a Secure Payload Dispatcher component to be built into the
-    Trusted Firmware. The value should be the path to the directory containing
-    the SPD source, relative to `services/spd/`; the directory is expected to
-    contain a makefile called `<spd-value>.mk`.
-
-*   `V`: Verbose build. If assigned anything other than 0, the build commands
-    are printed. Default is 0.
-
-*   `ARM_GIC_ARCH`: Choice of ARM GIC architecture version used by the ARM GIC
-    driver for implementing the platform GIC API. This API is used
-    by the interrupt management framework. Default is 2 (that is, version 2.0).
-
-*   `IMF_READ_INTERRUPT_ID`: Boolean flag used by the interrupt management
-    framework to enable passing of the interrupt id to its handler. The id is
-    read using a platform GIC API. `INTR_ID_UNAVAILABLE` is passed instead if
-    this option set to 0. Default is 0.
-
-*   `RESET_TO_BL31`: Enable BL3-1 entrypoint as the CPU reset vector instead
-    of the BL1 entrypoint. It can take the value 0 (CPU reset to BL1
-    entrypoint) or 1 (CPU reset to BL3-1 entrypoint).
-    The default value is 0.
-
-*   `CRASH_REPORTING`: A non-zero value enables a console dump of processor
-    register state when an unexpected exception occurs during execution of
-    BL3-1. This option defaults to the value of `DEBUG` - i.e. by default
-    this is only enabled for a debug build of the firmware.
-
-*   `ASM_ASSERTION`: This flag determines whether the assertion checks within
-    assembly source files are enabled or not. This option defaults to the
-    value of `DEBUG` - that is, by default this is only enabled for a debug
-    build of the firmware.
-
-*   `TSP_INIT_ASYNC`: Choose BL3-2 initialization method as asynchronous or
-    synchronous, (see "Initializing a BL3-2 Image" section in [Firmware
-    Design]). It can take the value 0 (BL3-2 is initialized using
-    synchronous method) or 1 (BL3-2 is initialized using asynchronous method).
-    Default is 0.
-
-*   `USE_COHERENT_MEM`: This flag determines whether to include the coherent
-    memory region in the BL memory map or not (see "Use of Coherent memory in
-    Trusted Firmware" section in [Firmware Design]). It can take the value 1
-    (Coherent memory region is included) or 0 (Coherent memory region is
-    excluded). Default is 1.
-
-*   `TSPD_ROUTE_IRQ_TO_EL3`: A non zero value enables the routing model
-    for non-secure interrupts in which they are routed to EL3 (TSPD). The
-    default model (when the value is 0) is to route non-secure interrupts
-    to S-EL1 (TSP).
-
-*   `TRUSTED_BOARD_BOOT`: Boolean flag to include support for the Trusted Board
-    Boot feature. When set to '1', BL1 and BL2 images include support to load
-    and verify the certificates and images in a FIP. The default value is '0'.
-    A successful build, when `TRUSTED_BOARD_BOOT=1`, depends upon the correct
-    initialization of the `AUTH_MOD` option. Generation and inclusion of
-    certificates in the FIP depends upon the value of the `GENERATE_COT` option.
-
-*   `AUTH_MOD`: This option is used when `TRUSTED_BOARD_BOOT=1`. It specifies
-    the name of the authentication module that will be used in the Trusted Board
-    Boot sequence. The module must be located in `common/auth/<module name>`
-    directory. The directory must contain a makefile `<module name>.mk` which
-    will be used to build the module. More information can be found in
-    [Trusted Board Boot]. The default module name is 'none'.
-
-*   `GENERATE_COT`: Boolean flag used to build and execute the `cert_create`
-    tool to create certificates as per the Chain of Trust described in
-    [Trusted Board Boot].  The build system then calls the `fip_create` tool to
-    include the certificates in the FIP. Default value is '0'.
-
-    Specify `TRUSTED_BOARD_BOOT=1` and `GENERATE_COT=1` to include support for
-    the Trusted Board Boot Sequence in the BL1 and BL2 images and the FIP.
-
-    Note that if `TRUSTED_BOARD_BOOT=0` and `GENERATE_COT=1`, the BL1 and BL2
-    images will not include support for Trusted Board Boot. The FIP will still
-    include the key and content certificates. This FIP can be used to verify the
-    Chain of Trust on the host machine through other mechanisms.
-
-    Note that if `TRUSTED_BOARD_BOOT=1` and `GENERATE_COT=0`, the BL1 and BL2
-    images will include support for Trusted Board Boot, but the FIP will not
-    include the key and content certificates, causing a boot failure.
-
-*   `CREATE_KEYS`: This option is used when `GENERATE_COT=1`. It tells the
-    certificate generation tool to create new keys in case no valid keys are
-    present or specified. Allowed options are '0' or '1'. Default is '1'.
-
-*   `ROT_KEY`: This option is used when `GENERATE_COT=1`. It specifies the
-    file that contains the ROT private key in PEM format.
-
-*   `TRUSTED_WORLD_KEY`: This option is used when `GENERATE_COT=1`. It
-    specifies the file that contains the Trusted World private key in PEM
-    format.
-
-*   `NON_TRUSTED_WORLD_KEY`: This option is used when `GENERATE_COT=1`. It
-    specifies the file that contains the Non-Trusted World private key in PEM
-    format.
-
-*   `BL30_KEY`: This option is used when `GENERATE_COT=1`. It specifies the
-    file that contains the BL3-0 private key in PEM format.
-
-*   `BL31_KEY`: This option is used when `GENERATE_COT=1`. It specifies the
-    file that contains the BL3-1 private key in PEM format.
-
-*   `BL32_KEY`: This option is used when `GENERATE_COT=1`. It specifies the
-    file that contains the BL3-2 private key in PEM format.
-
-*   `BL33_KEY`: This option is used when `GENERATE_COT=1`. It specifies the
-    file that contains the BL3-3 private key in PEM format.
-
-#### FVP specific build options
-
-*   `FVP_TSP_RAM_LOCATION`: location of the TSP binary. Options:
-    -   `tsram` : Trusted SRAM (default option)
-    -   `tdram` : Trusted DRAM
-    -   `dram`  : Secure region in DRAM (configured by the TrustZone controller)
-
-For a better understanding of FVP options, the FVP memory map is explained in
-the [Firmware Design].
-
-#### Juno specific build options
-
-*   `PLAT_TSP_LOCATION`: location of the TSP binary. Options:
-    -   `tsram` : Trusted SRAM (default option)
-    -   `dram`  : Secure region in DRAM (set by the TrustZone controller)
-
-### Creating a Firmware Image Package
-
-FIPs are automatically created as part of the build instructions described in
-the previous section. It is also possible to independently build the FIP
-creation tool and FIPs if required. To do this, follow these steps:
-
-Build the tool:
-
-    make -C tools/fip_create
-
-It is recommended to remove the build artifacts before rebuilding:
-
-    make -C tools/fip_create clean
-
-Create a Firmware package that contains existing BL2 and BL3-1 images:
-
-    # fip_create --help to print usage information
-    # fip_create <fip_name> <images to add> [--dump to show result]
-    ./tools/fip_create/fip_create fip.bin --dump \
-       --bl2 build/<platform>/debug/bl2.bin --bl31 build/<platform>/debug/bl31.bin
-
-     Firmware Image Package ToC:
-    ---------------------------
-    - Trusted Boot Firmware BL2: offset=0x88, size=0x81E8
-      file: 'build/<platform>/debug/bl2.bin'
-    - EL3 Runtime Firmware BL3-1: offset=0x8270, size=0xC218
-      file: 'build/<platform>/debug/bl31.bin'
-    ---------------------------
-    Creating "fip.bin"
-
-View the contents of an existing Firmware package:
-
-    ./tools/fip_create/fip_create fip.bin --dump
-
-     Firmware Image Package ToC:
-    ---------------------------
-    - Trusted Boot Firmware BL2: offset=0x88, size=0x81E8
-    - EL3 Runtime Firmware BL3-1: offset=0x8270, size=0xC218
-    ---------------------------
-
-Existing package entries can be individially updated:
-
-    # Change the BL2 from Debug to Release version
-    ./tools/fip_create/fip_create fip.bin --dump \
-      --bl2 build/<platform>/release/bl2.bin
-
-    Firmware Image Package ToC:
-    ---------------------------
-    - Trusted Boot Firmware BL2: offset=0x88, size=0x7240
-      file: 'build/<platform>/release/bl2.bin'
-    - EL3 Runtime Firmware BL3-1: offset=0x72C8, size=0xC218
-    ---------------------------
-    Updating "fip.bin"
-
-
-### Debugging options
-
-To compile a debug version and make the build more verbose use
-
-    CROSS_COMPILE=<path-to-aarch64-gcc>/bin/aarch64-none-elf- \
-    BL33=<path-to>/<bl33_image>                               \
-    make PLAT=<platform> DEBUG=1 V=1 all fip
-
-AArch64 GCC uses DWARF version 4 debugging symbols by default. Some tools (for
-example DS-5) might not support this and may need an older version of DWARF
-symbols to be emitted by GCC. This can be achieved by using the
-`-gdwarf-<version>` flag, with the version being set to 2 or 3. Setting the
-version to 2 is recommended for DS-5 versions older than 5.16.
-
-When debugging logic problems it might also be useful to disable all compiler
-optimizations by using `-O0`.
-
-NOTE: Using `-O0` could cause output images to be larger and base addresses
-might need to be recalculated (see the "Memory layout of BL images" section in
-the [Firmware Design]).
-
-Extra debug options can be passed to the build system by setting `CFLAGS`:
-
-    CFLAGS='-O0 -gdwarf-2'                                    \
-    CROSS_COMPILE=<path-to-aarch64-gcc>/bin/aarch64-none-elf- \
-    BL33=<path-to>/<bl33_image>                               \
-    make PLAT=<platform> DEBUG=1 V=1 all fip
-
-
-### Building the Test Secure Payload
-
-The TSP is coupled with a companion runtime service in the BL3-1 firmware,
-called the TSPD. Therefore, if you intend to use the TSP, the BL3-1 image
-must be recompiled as well. For more information on SPs and SPDs, see the
-"Secure-EL1 Payloads and Dispatchers" section in the [Firmware Design].
-
-First clean the Trusted Firmware build directory to get rid of any previous
-BL3-1 binary. Then to build the TSP image and include it into the FIP use:
-
-    CROSS_COMPILE=<path-to-aarch64-gcc>/bin/aarch64-none-elf- \
-    BL33=<path-to>/<bl33_image>                               \
-    make PLAT=<platform> SPD=tspd all fip
-
-An additional boot loader binary file is created in the `build` directory:
-
-*   `build/<platform>/<build-type>/bl32.bin`
-
-The FIP will now contain the additional BL3-2 image. Here is an example
-output from an FVP build in release mode including BL3-2 and using
-FVP_AARCH64_EFI.fd as BL3-3 image:
-
-    Firmware Image Package ToC:
-    ---------------------------
-    - Trusted Boot Firmware BL2: offset=0xD8, size=0x6000
-      file: './build/fvp/release/bl2.bin'
-    - EL3 Runtime Firmware BL3-1: offset=0x60D8, size=0x9000
-      file: './build/fvp/release/bl31.bin'
-    - Secure Payload BL3-2 (Trusted OS): offset=0xF0D8, size=0x3000
-      file: './build/fvp/release/bl32.bin'
-    - Non-Trusted Firmware BL3-3: offset=0x120D8, size=0x280000
-      file: '../FVP_AARCH64_EFI.fd'
-    ---------------------------
-    Creating "build/fvp/release/fip.bin"
-
-
-### Building the Certificate Generation Tool
-
-The `cert_create` tool can be built separately through the following commands:
-
-    $ cd tools/cert_create
-    $ make [DEBUG=1] [V=1]
-
-`DEBUG=1` builds the tool in debug mode. `V=1` makes the build process more
-verbose. The following command should be used to obtain help about the tool:
-
-    $ ./cert_create -h
-
-The `cert_create` tool is automatically built with the `fip` target when
-`GENERATE_COT=1`.
-
-
-### Building a FIP image with support for Trusted Board Boot
-
-The Trusted Board Boot feature is described in [Trusted Board Boot]. The
-following steps should be followed to build a FIP image with support for this
-feature.
-
-1.  Fulfill the dependencies of the `polarssl` authentication module by checking
-    out the tag `polarssl-1.3.9` from the [PolarSSL Repository].
-
-    The `common/auth/polarssl/polarssl.mk` contains the list of PolarSSL source
-    files the module depends upon. `common/auth/polarssl/polarssl_config.h`
-    contains the configuration options required to build the PolarSSL sources.
-
-    Note that the PolarSSL SSL library is licensed under the GNU GPL version 2
-    or later license. Using PolarSSL source code will affect the licensing of
-    Trusted Firmware binaries that are built using this library.
-
-2.  Ensure that the following command line variables are set while invoking
-    `make` to build Trusted Firmware:
-
-    *   `POLARSSL_DIR=<path of the directory containing PolarSSL sources>`
-    *   `AUTH_MOD=polarssl`
-    *   `TRUSTED_BOARD_BOOT=1`
-    *   `GENERATE_COT=1`
-
-
-### Checking source code style
-
-When making changes to the source for submission to the project, the source
-must be in compliance with the Linux style guide, and to assist with this check
-the project Makefile contains two targets, which both utilise the
-`checkpatch.pl` script that ships with the Linux source tree.
-
-To check the entire source tree, you must first download a copy of
-`checkpatch.pl` (or the full Linux source), set the `CHECKPATCH` environment
-variable to point to the script and build the target checkcodebase:
-
-    make CHECKPATCH=<path-to-linux>/linux/scripts/checkpatch.pl checkcodebase
-
-To just check the style on the files that differ between your local branch and
-the remote master, use:
-
-    make CHECKPATCH=<path-to-linux>/linux/scripts/checkpatch.pl checkpatch
-
-If you wish to check your patch against something other than the remote master,
-set the `BASE_COMMIT` variable to your desired branch. By default, `BASE_COMMIT`
-is set to `origin/master`.
-
-
-5.  Obtaining the normal world software
----------------------------------------
-
-### Obtaining EDK2
-
-Potentially any kind of non-trusted firmware may be used with the ARM Trusted
-Firmware but the software has only been tested with the EFI Development Kit 2
-(EDK2) open source implementation of the UEFI specification.
-
-To build the software to be compatible with the Foundation and Base FVPs, or the
-Juno platform, follow these steps:
-
-1.  Clone the [EDK2 source code][EDK2] from GitHub:
-
-        git clone -n https://github.com/tianocore/edk2.git
-
-    Not all required features are available in the EDK2 mainline yet. These can
-    be obtained from the ARM-software EDK2 repository instead:
-
-        cd edk2
-        git remote add -f --tags arm-software https://github.com/ARM-software/edk2.git
-        git checkout --detach v2.1-rc0
-
-2.  Copy build config templates to local workspace
-
-        # in edk2/
-        . edksetup.sh
-
-3.  Build the EDK2 host tools
-
-        make -C BaseTools clean
-        make -C BaseTools
-
-4.  Build the EDK2 software
-
-    1.  Build for FVP
-
-            GCC49_AARCH64_PREFIX=<absolute-path-to-aarch64-gcc>/bin/aarch64-none-elf- \
-            make -f ArmPlatformPkg/Scripts/Makefile EDK2_ARCH=AARCH64 \
-            EDK2_DSC=ArmPlatformPkg/ArmVExpressPkg/ArmVExpress-FVP-AArch64.dsc \
-            EDK2_TOOLCHAIN=GCC49 EDK2_BUILD=RELEASE \
-            EDK2_MACROS="-n 6 -D ARM_FOUNDATION_FVP=1"
-
-        The EDK2 binary for use with the ARM Trusted Firmware can then be found
-        here:
-
-             Build/ArmVExpress-FVP-AArch64/RELEASE_GCC49/FV/FVP_AARCH64_EFI.fd
-
-    2.  Build for Juno
-
-            GCC49_AARCH64_PREFIX=<absolute-path-to-aarch64-gcc>/bin/aarch64-none-elf- \
-            make -f ArmPlatformPkg/ArmJunoPkg/Makefile EDK2_ARCH=AARCH64 \
-            EDK2_TOOLCHAIN=GCC49 EDK2_BUILD=RELEASE
-
-        The EDK2 binary for use with the ARM Trusted Firmware can then be found
-        here:
-
-            Build/ArmJuno/RELEASE_GCC49/FV/BL33_AP_UEFI.fd
-
-    The EDK2 binary should be specified as `BL33` in in the `make` command line
-    when building the Trusted Firmware. See the "Building the Trusted Firmware"
-    section above.
-
-5.  (Optional) To build EDK2 in debug mode, remove `EDK2_BUILD=RELEASE` from the
-    command line.
-
-6.  (Optional) To boot Linux using a VirtioBlock file-system, the command line
-    passed from EDK2 to the Linux kernel must be modified as described in the
-    "Obtaining a root file-system" section below.
-
-7.  (Optional) If legacy GICv2 locations are used, the EDK2 platform description
-    must be updated. This is required as EDK2 does not support probing for the
-    GIC location. To do this, first clean the EDK2 build directory.
-
-        make -f ArmPlatformPkg/Scripts/Makefile EDK2_ARCH=AARCH64          \
-        EDK2_DSC=ArmPlatformPkg/ArmVExpressPkg/ArmVExpress-FVP-AArch64.dsc \
-        EDK2_TOOLCHAIN=ARMGCC clean
-
-    Then rebuild EDK2 as described in step 3, using the following flag:
-
-        -D ARM_FVP_LEGACY_GICV2_LOCATION=1
-
-    Finally rebuild the Trusted Firmware to generate a new FIP using the
-    instructions in the "Building the Trusted Firmware" section.
-
-
-### Obtaining a Linux kernel
-
-Preparing a Linux kernel for use on the FVPs can be done as follows
-(GICv2 support only):
-
-1.  Clone Linux:
-
-        git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
-
-    Not all required features are available in the kernel mainline yet. These
-    can be obtained from the ARM-software Linux repository instead:
-
-        cd linux
-        git remote add -f --tags arm-software https://github.com/ARM-software/linux.git
-        git checkout --detach 1.3-Juno
-
-2.  Build with the Linaro GCC tools.
-
-        # in linux/
-        make mrproper
-        make ARCH=arm64 defconfig
-
-        CROSS_COMPILE=<path-to-aarch64-gcc>/bin/aarch64-none-elf- \
-        make -j6 ARCH=arm64
-
-The compiled Linux image will now be found at `arch/arm64/boot/Image`.
-
-
-6.  Preparing the images to run on FVP
---------------------------------------
-
-### Obtaining the Flattened Device Trees
-
-Depending on the FVP configuration and Linux configuration used, different
-FDT files are required. FDTs for the Foundation and Base FVPs can be found in
-the Trusted Firmware source directory under `fdts/`. The Foundation FVP has a
-subset of the Base FVP components. For example, the Foundation FVP lacks CLCD
-and MMC support, and has only one CPU cluster.
-
-*   `fvp-base-gicv2-psci.dtb`
-
-    (Default) For use with both AEMv8 and Cortex-A57-A53 Base FVPs with
-    Base memory map configuration.
-
-*   `fvp-base-gicv2legacy-psci.dtb`
-
-    For use with AEMv8 Base FVP with legacy VE GIC memory map configuration.
-
-*   `fvp-base-gicv3-psci.dtb`
-
-    For use with both AEMv8 and Cortex-A57-A53 Base FVPs with Base memory map
-    configuration and Linux GICv3 support.
-
-*   `fvp-foundation-gicv2-psci.dtb`
-
-    (Default) For use with Foundation FVP with Base memory map configuration.
-
-*   `fvp-foundation-gicv2legacy-psci.dtb`
-
-    For use with Foundation FVP with legacy VE GIC memory map configuration.
-
-*   `fvp-foundation-gicv3-psci.dtb`
-
-    For use with Foundation FVP with Base memory map configuration and Linux
-    GICv3 support.
-
-
-Copy the chosen FDT blob as `fdt.dtb` to the directory from which the FVP
-is launched. Alternatively a symbolic link may be used.
-
-### Preparing the kernel image
-
-Copy the kernel image file `arch/arm64/boot/Image` to the directory from which
-the FVP is launched. Alternatively a symbolic link may be used.
-
-### Obtaining a root file-system
-
-To prepare a Linaro LAMP based Open Embedded file-system, the following
-instructions can be used as a guide. The file-system can be provided to Linux
-via VirtioBlock or as a RAM-disk. Both methods are described below.
-
-#### Prepare VirtioBlock
-
-To prepare a VirtioBlock file-system, do the following:
-
-1.  Download and unpack the disk image.
-
-    NOTE: The unpacked disk image grows to 3 GiB in size.
-
-        wget http://releases.linaro.org/14.12/openembedded/aarch64/vexpress64-openembedded_lamp-armv8-gcc-4.9_20141211-701.img.gz
-        gunzip vexpress64-openembedded_lamp-armv8-gcc-4.9_20141211-701.img.gz
-
-2.  Make sure the Linux kernel has Virtio support enabled using
-    `make ARCH=arm64 menuconfig`.
-
-        Device Drivers  ---> Virtio drivers  ---> <*> Platform bus driver for memory mapped virtio devices
-        Device Drivers  ---> [*] Block devices  --->  <*> Virtio block driver
-        File systems    ---> <*> The Extended 4 (ext4) filesystem
-
-    If some of these configurations are missing, enable them, save the kernel
-    configuration, then rebuild the kernel image using the instructions
-    provided in the section "Obtaining a Linux kernel".
-
-3.  Change the Kernel command line to include `root=/dev/vda2`. This can either
-    be done in the EDK2 boot menu or in the platform file. Editing the platform
-    file and rebuilding EDK2 will make the change persist. To do this:
-
-    1.  In EDK2, edit the following file:
-
-            ArmPlatformPkg/ArmVExpressPkg/ArmVExpress-FVP-AArch64.dsc
-
-    2.  Add `root=/dev/vda2` to:
-
-            gArmPlatformTokenSpaceGuid.PcdDefaultBootArgument|"<Other default options>"
-
-    3.  Remove the entry:
-
-            gArmPlatformTokenSpaceGuid.PcdDefaultBootInitrdPath|""
-
-    4.  Rebuild EDK2 (see "Obtaining UEFI" section above).
-
-4.  The file-system image file should be provided to the model environment by
-    passing it the correct command line option. In the FVPs the following
-    option should be provided in addition to the ones described in the
-    "Running the software on FVP" section below.
-
-    NOTE: A symbolic link to this file cannot be used with the FVP; the path
-    to the real file must be provided.
-
-    On the Base FVPs:
-
-        -C bp.virtioblockdevice.image_path="<path-to>/<file-system-image>"
-
-    On the Foundation FVP:
-
-        --block-device="<path-to>/<file-system-image>"
-
-5.  Ensure that the FVP doesn't output any error messages. If the following
-    error message is displayed:
-
-        ERROR: BlockDevice: Failed to open "<path-to>/<file-system-image>"!
-
-    then make sure the path to the file-system image in the model parameter is
-    correct and that read permission is correctly set on the file-system image
-    file.
-
-#### Prepare RAM-disk
-
-To prepare a RAM-disk root file-system, do the following:
-
-1.  Download the file-system image:
-
-        wget http://releases.linaro.org/14.12/openembedded/aarch64/linaro-image-lamp-genericarmv8-20141212-729.rootfs.tar.gz
-
-2.  Modify the Linaro image:
-
-        # Prepare for use as RAM-disk. Normally use MMC, NFS or VirtioBlock.
-        # Be careful, otherwise you could damage your host file-system.
-        mkdir tmp; cd tmp
-        sudo sh -c "zcat ../linaro-image-lamp-genericarmv8-20141212-729.rootfs.tar.gz | cpio -id"
-        sudo ln -s sbin/init .
-        sudo sh -c "echo 'devtmpfs /dev devtmpfs mode=0755,nosuid 0 0' >> etc/fstab"
-        sudo sh -c "find . | cpio --quiet -H newc -o | gzip -3 -n > ../filesystem.cpio.gz"
-        cd ..
-
-3.  Copy the resultant `filesystem.cpio.gz` to the directory where the FVP is
-    launched from. Alternatively a symbolic link may be used.
-
-
-7.  Running the software on FVP
--------------------------------
-
-This version of the ARM Trusted Firmware has been tested on the following ARM
-FVPs (64-bit versions only).
-
-*   `Foundation_Platform` (Version 9.1, Build 9.1.33)
-*   `FVP_Base_AEMv8A-AEMv8A` (Version 6.2, Build 0.8.6202)
-*   `FVP_Base_Cortex-A57x4-A53x4` (Version 6.2, Build 0.8.6202)
-*   `FVP_Base_Cortex-A57x1-A53x1` (Version 6.2, Build 0.8.6202)
-*   `FVP_Base_Cortex-A57x2-A53x4` (Version 6.2, Build 0.8.6202)
-
-NOTE: The build numbers quoted above are those reported by launching the FVP
-with the `--version` parameter.
-
-NOTE: The software will not work on Version 1.0 of the Foundation FVP.
-The commands below would report an `unhandled argument` error in this case.
-
-NOTE: The Foundation FVP does not provide a debugger interface.
-
-Please refer to the FVP documentation for a detailed description of the model
-parameter options. A brief description of the important ones that affect the
-ARM Trusted Firmware and normal world software behavior is provided below.
-
-The Foundation FVP is a cut down version of the AArch64 Base FVP. It can be
-downloaded for free from [ARM's website][ARM FVP website].
-
-
-### Running on the Foundation FVP with reset to BL1 entrypoint
-
-The following `Foundation_Platform` parameters should be used to boot Linux with
-4 CPUs using the ARM Trusted Firmware.
-
-NOTE: Using the `--block-device` parameter is not necessary if a Linux RAM-disk
-file-system is used (see the "Obtaining a File-system" section above).
-
-NOTE: The `--data="<path to FIP binary>"@0x8000000` parameter is used to load a
-Firmware Image Package at the start of NOR FLASH0 (see the "Building the
-Trusted Firmware" section above).
-
-    <path-to>/Foundation_Platform             \
-    --cores=4                                 \
-    --secure-memory                           \
-    --visualization                           \
-    --gicv3                                   \
-    --data="<path-to>/<bl1-binary>"@0x0       \
-    --data="<path-to>/<FIP-binary>"@0x8000000 \
-    --block-device="<path-to>/<file-system-image>"
-
-The default use-case for the Foundation FVP is to enable the GICv3 device in
-the model but use the GICv2 FDT, in order for Linux to drive the GIC in GICv2
-emulation mode.
-
-The memory mapped addresses `0x0` and `0x8000000` correspond to the start of
-trusted ROM and NOR FLASH0 respectively.
-
-### Notes regarding Base FVP configuration options
-
-Please refer to these notes in the subsequent "Running on the Base FVP"
-sections.
-
-1.  The `-C bp.flashloader0.fname` parameter is used to load a Firmware Image
-    Package at the start of NOR FLASH0 (see the "Building the Trusted Firmware"
-    section above).
-
-2.  Using `cache_state_modelled=1` makes booting very slow. The software will
-    still work (and run much faster) without this option but this will hide any
-    cache maintenance defects in the software.
-
-3.  Using the `-C bp.virtioblockdevice.image_path` parameter is not necessary
-    if a Linux RAM-disk file-system is used (see the "Obtaining a root
-    file-system" section above).
-
-4.  Setting the `-C bp.secure_memory` parameter to `1` is only supported on
-    Base FVP versions 5.4 and newer. Setting this parameter to `0` is also
-    supported. The `-C bp.tzc_400.diagnostics=1` parameter is optional. It
-    instructs the FVP to provide some helpful information if a secure memory
-    violation occurs.
-
-5.  This and the following notes only apply when the firmware is built with
-    the `RESET_TO_BL31` option.
-
-    The `--data="<path-to><bl31|bl32|bl33-binary>"@<base-address-of-binary>`
-    parameter is used to load bootloader images into Base FVP memory (see the
-    "Building the Trusted Firmware" section above). The base addresses used
-    should match the image base addresses in `platform_def.h` used while linking
-    the images. The BL3-2 image is only needed if BL3-1 has been built to expect
-    a Secure-EL1 Payload.
-
-6.  The `-C cluster<X>.cpu<Y>.RVBAR=@<base-address-of-bl31>` parameter, where
-    X and Y are the cluster and CPU numbers respectively, is used to set the
-    reset vector for each core.
-
-7.  Changing the default value of `FVP_SHARED_DATA_LOCATION` will also require
-    changing the value of
-    `--data="<path-to><bl31-binary>"@<base-address-of-bl31>` and
-    `-C cluster<X>.cpu<X>.RVBAR=@<base-address-of-bl31>`, to the new value of
-    `BL31_BASE` in `platform_def.h`.
-
-8.  Changing the default value of `FVP_TSP_RAM_LOCATION` will also require
-    changing the value of
-    `--data="<path-to><bl32-binary>"@<base-address-of-bl32>` to the new value of
-    `BL32_BASE` in `platform_def.h`.
-
-
-### Running on the AEMv8 Base FVP with reset to BL1 entrypoint
-
-Please read "Notes regarding Base FVP configuration options" section above for
-information about some of the options to run the software.
-
-The following `FVP_Base_AEMv8A-AEMv8A` parameters should be used to boot Linux
-with 8 CPUs using the ARM Trusted Firmware.
-
-    <path-to>/FVP_Base_AEMv8A-AEMv8A                       \
-    -C pctl.startup=0.0.0.0                                \
-    -C bp.secure_memory=1                                  \
-    -C bp.tzc_400.diagnostics=1                            \
-    -C cluster0.NUM_CORES=4                                \
-    -C cluster1.NUM_CORES=4                                \
-    -C cache_state_modelled=1                              \
-    -C bp.secureflashloader.fname="<path-to>/<bl1-binary>" \
-    -C bp.flashloader0.fname="<path-to>/<FIP-binary>"      \
-    -C bp.virtioblockdevice.image_path="<path-to>/<file-system-image>"
-
-### Running on the Cortex-A57-A53 Base FVP with reset to BL1 entrypoint
-
-Please read "Notes regarding Base FVP configuration options" section above for
-information about some of the options to run the software.
-
-The following `FVP_Base_Cortex-A57x4-A53x4` model parameters should be used to
-boot Linux with 8 CPUs using the ARM Trusted Firmware.
-
-    <path-to>/FVP_Base_Cortex-A57x4-A53x4                  \
-    -C pctl.startup=0.0.0.0                                \
-    -C bp.secure_memory=1                                  \
-    -C bp.tzc_400.diagnostics=1                            \
-    -C cache_state_modelled=1                              \
-    -C bp.secureflashloader.fname="<path-to>/<bl1-binary>" \
-    -C bp.flashloader0.fname="<path-to>/<FIP-binary>"      \
-    -C bp.virtioblockdevice.image_path="<path-to>/<file-system-image>"
-
-### Running on the AEMv8 Base FVP with reset to BL3-1 entrypoint
-
-Please read "Notes regarding Base FVP configuration options" section above for
-information about some of the options to run the software.
-
-The following `FVP_Base_AEMv8A-AEMv8A` parameters should be used to boot Linux
-with 8 CPUs using the ARM Trusted Firmware.
-
-    <path-to>/FVP_Base_AEMv8A-AEMv8A                             \
-    -C pctl.startup=0.0.0.0                                      \
-    -C bp.secure_memory=1                                        \
-    -C bp.tzc_400.diagnostics=1                                  \
-    -C cluster0.NUM_CORES=4                                      \
-    -C cluster1.NUM_CORES=4                                      \
-    -C cache_state_modelled=1                                    \
-    -C cluster0.cpu0.RVBAR=0x04023000                            \
-    -C cluster0.cpu1.RVBAR=0x04023000                            \
-    -C cluster0.cpu2.RVBAR=0x04023000                            \
-    -C cluster0.cpu3.RVBAR=0x04023000                            \
-    -C cluster1.cpu0.RVBAR=0x04023000                            \
-    -C cluster1.cpu1.RVBAR=0x04023000                            \
-    -C cluster1.cpu2.RVBAR=0x04023000                            \
-    -C cluster1.cpu3.RVBAR=0x04023000                            \
-    --data cluster0.cpu0="<path-to>/<bl31-binary>"@0x04023000    \
-    --data cluster0.cpu0="<path-to>/<bl32-binary>"@0x04001000    \
-    --data cluster0.cpu0="<path-to>/<bl33-binary>"@0x88000000    \
-    -C bp.virtioblockdevice.image_path="<path-to>/<file-system-image>"
-
-### Running on the Cortex-A57-A53 Base FVP with reset to BL3-1 entrypoint
-
-Please read "Notes regarding Base FVP configuration options" section above for
-information about some of the options to run the software.
-
-The following `FVP_Base_Cortex-A57x4-A53x4` model parameters should be used to
-boot Linux with 8 CPUs using the ARM Trusted Firmware.
-
-    <path-to>/FVP_Base_Cortex-A57x4-A53x4                        \
-    -C pctl.startup=0.0.0.0                                      \
-    -C bp.secure_memory=1                                        \
-    -C bp.tzc_400.diagnostics=1                                  \
-    -C cache_state_modelled=1                                    \
-    -C cluster0.cpu0.RVBARADDR=0x04023000                        \
-    -C cluster0.cpu1.RVBARADDR=0x04023000                        \
-    -C cluster0.cpu2.RVBARADDR=0x04023000                        \
-    -C cluster0.cpu3.RVBARADDR=0x04023000                        \
-    -C cluster1.cpu0.RVBARADDR=0x04023000                        \
-    -C cluster1.cpu1.RVBARADDR=0x04023000                        \
-    -C cluster1.cpu2.RVBARADDR=0x04023000                        \
-    -C cluster1.cpu3.RVBARADDR=0x04023000                        \
-    --data cluster0.cpu0="<path-to>/<bl31-binary>"@0x04023000    \
-    --data cluster0.cpu0="<path-to>/<bl32-binary>"@0x04001000    \
-    --data cluster0.cpu0="<path-to>/<bl33-binary>"@0x88000000    \
-    -C bp.virtioblockdevice.image_path="<path-to>/<file-system-image>"
-
-### Configuring the GICv2 memory map
-
-The Base FVP models support GICv2 with the default model parameters at the
-following addresses. The Foundation FVP also supports these addresses when
-configured for GICv3 in GICv2 emulation mode.
-
-    GICv2 Distributor Interface     0x2f000000
-    GICv2 CPU Interface             0x2c000000
-    GICv2 Virtual CPU Interface     0x2c010000
-    GICv2 Hypervisor Interface      0x2c02f000
-
-The AEMv8 Base FVP can be configured to support GICv2 at addresses
-corresponding to the legacy (Versatile Express) memory map as follows. These are
-the default addresses when using the Foundation FVP in GICv2 mode.
-
-    GICv2 Distributor Interface     0x2c001000
-    GICv2 CPU Interface             0x2c002000
-    GICv2 Virtual CPU Interface     0x2c004000
-    GICv2 Hypervisor Interface      0x2c006000
-
-The choice of memory map is reflected in the build variant field (bits[15:12])
-in the `SYS_ID` register (Offset `0x0`) in the Versatile Express System
-registers memory map (`0x1c010000`).
-
-*   `SYS_ID.Build[15:12]`
-
-    `0x1` corresponds to the presence of the Base GIC memory map. This is the
-    default value on the Base FVPs.
-
-*   `SYS_ID.Build[15:12]`
-
-    `0x0` corresponds to the presence of the Legacy VE GIC memory map. This is
-    the default value on the Foundation FVP.
-
-This register can be configured as described in the following sections.
-
-NOTE: If the legacy VE GIC memory map is used, then the corresponding FDT and
-BL3-3 images should be used.
-
-#### Configuring AEMv8 Foundation FVP GIC for legacy VE memory map
-
-The following parameters configure the Foundation FVP to use GICv2 with the
-legacy VE memory map:
-
-    <path-to>/Foundation_Platform             \
-    --cores=4                                 \
-    --secure-memory                           \
-    --visualization                           \
-    --no-gicv3                                \
-    --data="<path-to>/<bl1-binary>"@0x0       \
-    --data="<path-to>/<FIP-binary>"@0x8000000 \
-    --block-device="<path-to>/<file-system-image>"
-
-Explicit configuration of the `SYS_ID` register is not required.
-
-#### Configuring AEMv8 Base FVP GIC for legacy VE memory map
-
-The following parameters configure the AEMv8 Base FVP to use GICv2 with the
-legacy VE memory map. They must added to the parameters described in the
-"Running on the AEMv8 Base FVP" section above:
-
-    -C cluster0.gic.GICD-offset=0x1000                  \
-    -C cluster0.gic.GICC-offset=0x2000                  \
-    -C cluster0.gic.GICH-offset=0x4000                  \
-    -C cluster0.gic.GICH-other-CPU-offset=0x5000        \
-    -C cluster0.gic.GICV-offset=0x6000                  \
-    -C cluster0.gic.PERIPH-size=0x8000                  \
-    -C cluster1.gic.GICD-offset=0x1000                  \
-    -C cluster1.gic.GICC-offset=0x2000                  \
-    -C cluster1.gic.GICH-offset=0x4000                  \
-    -C cluster1.gic.GICH-other-CPU-offset=0x5000        \
-    -C cluster1.gic.GICV-offset=0x6000                  \
-    -C cluster1.gic.PERIPH-size=0x8000                  \
-    -C gic_distributor.GICD-alias=0x2c001000            \
-    -C gicv3.gicv2-only=1                               \
-    -C bp.variant=0x0
-
-The `bp.variant` parameter corresponds to the build variant field of the
-`SYS_ID` register.  Setting this to `0x0` allows the ARM Trusted Firmware to
-detect the legacy VE memory map while configuring the GIC.
-
-
-8.  Running the software on Juno
---------------------------------
-
-### Preparing Trusted Firmware images
-
-To execute the versions of software components on Juno referred to in this
-document, the latest [Juno Board Recovery Image] must be installed. If you
-have an earlier version installed or are unsure which version is installed,
-follow the recovery image update instructions in the [Juno Software Guide]
-on the [ARM Connected Community] website.
-
-The Juno platform requires a BL3-0 image to boot up. This image contains the
-runtime firmware that runs on the SCP (System Control Processor). This image is
-embedded within the [Juno Board Recovery Image] but can also be
-[downloaded directly][Juno SCP Firmware].
-
-Rebuild the Trusted Firmware specifying the BL3-0 image. Refer to the section
-"Building the Trusted Firmware". Alternatively, the FIP image can be updated
-manually with the BL3-0 image:
-
-    fip_create --dump --bl30 <path-to>/<bl30-binary> <path-to>/<FIP-binary>
-
-### Obtaining the Flattened Device Tree
-
-Juno's device tree blob is built along with the kernel. It is located in:
-
-    <path-to-linux>/arch/arm64/boot/dts/juno.dtb
-
-### Other Juno software information
-
-Please refer to the [Juno Software Guide] to:
-
-*   Deploy a root filesystem
-*   Install and run the Juno binaries on the board
-*   Obtain any other Juno software information
-
-
-- - - - - - - - - - - - - - - - - - - - - - - - - -
-
-_Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved._
-
-
-[Firmware Design]:  ./firmware-design.md
-
-[ARM FVP website]:             http://www.arm.com/fvp
-[ARM Connected Community]:     http://community.arm.com
-[Juno Software Guide]:         http://community.arm.com/docs/DOC-8396
-[Juno Board Recovery Image]:   http://community.arm.com/servlet/JiveServlet/download/9427-1-15432/board_recovery_image_0.10.1.zip
-[Juno SCP Firmware]:           http://community.arm.com/servlet/JiveServlet/download/9427-1-15422/bl30.bin.zip
-[Linaro Toolchain]:            http://releases.linaro.org/14.07/components/toolchain/binaries/
-[EDK2]:                        http://github.com/tianocore/edk2
-[DS-5]:                        http://www.arm.com/products/tools/software-tools/ds-5/index.php
-[Polarssl Repository]:         https://github.com/polarssl/polarssl.git
-[Trusted Board Boot]:          trusted-board-boot.md
diff --git a/docs/user-guide.rst b/docs/user-guide.rst
new file mode 100644
index 0000000..4df7590
--- /dev/null
+++ b/docs/user-guide.rst
@@ -0,0 +1,1856 @@
+ARM Trusted Firmware User Guide
+===============================
+
+
+.. section-numbering::
+    :suffix: .
+
+.. contents::
+
+This document describes how to build ARM Trusted Firmware (TF) and run it with a
+tested set of other software components using defined configurations on the Juno
+ARM development platform and ARM Fixed Virtual Platform (FVP) models. It is
+possible to use other software components, configurations and platforms but that
+is outside the scope of this document.
+
+This document assumes that the reader has previous experience running a fully
+bootable Linux software stack on Juno or FVP using the prebuilt binaries and
+filesystems provided by `Linaro`_. Further information may be found in the
+`Linaro instructions`_. It also assumes that the user understands the role of
+the different software components required to boot a Linux system:
+
+-  Specific firmware images required by the platform (e.g. SCP firmware on Juno)
+-  Normal world bootloader (e.g. UEFI or U-Boot)
+-  Device tree
+-  Linux kernel image
+-  Root filesystem
+
+This document also assumes that the user is familiar with the `FVP models`_ and
+the different command line options available to launch the model.
+
+This document should be used in conjunction with the `Firmware Design`_.
+
+Host machine requirements
+-------------------------
+
+The minimum recommended machine specification for building the software and
+running the FVP models is a dual-core processor running at 2GHz with 12GB of
+RAM. For best performance, use a machine with a quad-core processor running at
+2.6GHz with 16GB of RAM.
+
+The software has been tested on Ubuntu 14.04 LTS (64-bit). Packages used for
+building the software were installed from that distribution unless otherwise
+specified.
+
+The software has also been built on Windows 7 Enterprise SP1, using CMD.EXE,
+Cygwin, and Msys (MinGW) shells, using version 5.3.1 of the GNU toolchain.
+
+Tools
+-----
+
+Install the required packages to build Trusted Firmware with the following
+command:
+
+::
+
+    sudo apt-get install build-essential gcc make git libssl-dev
+
+ARM TF has been tested with `Linaro Release 17.04`_.
+
+Download and install the AArch32 or AArch64 little-endian GCC cross compiler.
+The `Linaro Release Notes`_ documents which version of the compiler to use for a
+given Linaro Release. Also, these `Linaro instructions`_ provide further
+guidance and a script, which can be used to download Linaro deliverables
+automatically.
+
+Optionally, Trusted Firmware can be built using clang or ARM Compiler 6.
+See instructions below on how to switch the default compiler.
+
+In addition, the following optional packages and tools may be needed:
+
+-  ``device-tree-compiler`` package if you need to rebuild the Flattened Device
+   Tree (FDT) source files (``.dts`` files) provided with this software.
+
+-  For debugging, ARM `Development Studio 5 (DS-5)`_.
+
+-  To create and modify the diagram files included in the documentation, `Dia`_.
+   This tool can be found in most Linux distributions. Inkscape is needed to
+   generate the actual *.png files.
+
+Getting the Trusted Firmware source code
+----------------------------------------
+
+Download the Trusted Firmware source code from Github:
+
+::
+
+    git clone https://github.com/ARM-software/arm-trusted-firmware.git
+
+Building the Trusted Firmware
+-----------------------------
+
+-  Before building Trusted Firmware, the environment variable ``CROSS_COMPILE``
+   must point to the Linaro cross compiler.
+
+   For AArch64:
+
+   ::
+
+       export CROSS_COMPILE=<path-to-aarch64-gcc>/bin/aarch64-linux-gnu-
+
+   For AArch32:
+
+   ::
+
+       export CROSS_COMPILE=<path-to-aarch32-gcc>/bin/arm-linux-gnueabihf-
+
+   It is possible to build Trusted Firmware using clang or ARM Compiler 6.
+   To do so ``CC`` needs to point to the clang or armclang binary. Only the
+   compiler is switched; the assembler and linker need to be provided by
+   the GNU toolchain, thus ``CROSS_COMPILE`` should be set as described above.
+
+   ARM Compiler 6 will be selected when the base name of the path assigned
+   to ``CC`` matches the string 'armclang'.
+
+   For AArch64 using ARM Compiler 6:
+
+   ::
+
+       export CROSS_COMPILE=<path-to-aarch64-gcc>/bin/aarch64-linux-gnu-
+       make CC=<path-to-armclang>/bin/armclang PLAT=<platform> all
+
+   Clang will be selected when the base name of the path assigned to ``CC``
+   contains the string 'clang'. This is to allow both clang and clang-X.Y
+   to work.
+
+   For AArch64 using clang:
+
+   ::
+
+       export CROSS_COMPILE=<path-to-aarch64-gcc>/bin/aarch64-linux-gnu-
+       make CC=<path-to-clang>/bin/clang PLAT=<platform> all
+
+-  Change to the root directory of the Trusted Firmware source tree and build.
+
+   For AArch64:
+
+   ::
+
+       make PLAT=<platform> all
+
+   For AArch32:
+
+   ::
+
+       make PLAT=<platform> ARCH=aarch32 AARCH32_SP=sp_min all
+
+   Notes:
+
+   -  If ``PLAT`` is not specified, ``fvp`` is assumed by default. See the
+      `Summary of build options`_ for more information on available build
+      options.
+
+   -  (AArch32 only) Currently only ``PLAT=fvp`` is supported.
+
+   -  (AArch32 only) ``AARCH32_SP`` is the AArch32 EL3 Runtime Software and it
+      corresponds to the BL32 image. A minimal ``AARCH32_SP``, sp\_min, is
+      provided by ARM Trusted Firmware to demonstrate how PSCI Library can
+      be integrated with an AArch32 EL3 Runtime Software. Some AArch32 EL3
+      Runtime Software may include other runtime services, for example
+      Trusted OS services. A guide to integrate PSCI library with AArch32
+      EL3 Runtime Software can be found `here`_.
+
+   -  (AArch64 only) The TSP (Test Secure Payload), corresponding to the BL32
+      image, is not compiled in by default. Refer to the
+      `Building the Test Secure Payload`_ section below.
+
+   -  By default this produces a release version of the build. To produce a
+      debug version instead, refer to the "Debugging options" section below.
+
+   -  The build process creates products in a ``build`` directory tree, building
+      the objects and binaries for each boot loader stage in separate
+      sub-directories. The following boot loader binary files are created
+      from the corresponding ELF files:
+
+      -  ``build/<platform>/<build-type>/bl1.bin``
+      -  ``build/<platform>/<build-type>/bl2.bin``
+      -  ``build/<platform>/<build-type>/bl31.bin`` (AArch64 only)
+      -  ``build/<platform>/<build-type>/bl32.bin`` (mandatory for AArch32)
+
+      where ``<platform>`` is the name of the chosen platform and ``<build-type>``
+      is either ``debug`` or ``release``. The actual number of images might differ
+      depending on the platform.
+
+-  Build products for a specific build variant can be removed using:
+
+   ::
+
+       make DEBUG=<D> PLAT=<platform> clean
+
+   ... where ``<D>`` is ``0`` or ``1``, as specified when building.
+
+   The build tree can be removed completely using:
+
+   ::
+
+       make realclean
+
+Summary of build options
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+ARM Trusted Firmware build system supports the following build options. Unless
+mentioned otherwise, these options are expected to be specified at the build
+command line and are not to be modified in any component makefiles. Note that
+the build system doesn't track dependency for build options. Therefore, if any
+of the build options are changed from a previous build, a clean build must be
+performed.
+
+Common build options
+^^^^^^^^^^^^^^^^^^^^
+
+-  ``AARCH32_SP`` : Choose the AArch32 Secure Payload component to be built as
+   as the BL32 image when ``ARCH=aarch32``. The value should be the path to the
+   directory containing the SP source, relative to the ``bl32/``; the directory
+   is expected to contain a makefile called ``<aarch32_sp-value>.mk``.
+
+-  ``ARCH`` : Choose the target build architecture for ARM Trusted Firmware.
+   It can take either ``aarch64`` or ``aarch32`` as values. By default, it is
+   defined to ``aarch64``.
+
+-  ``ARM_ARCH_MAJOR``: The major version of ARM Architecture to target when
+   compiling ARM Trusted Firmware. Its value must be numeric, and defaults to
+   8 . See also, *ARMv8 Architecture Extensions* in `Firmware Design`_.
+
+-  ``ARM_ARCH_MINOR``: The minor version of ARM Architecture to target when
+   compiling ARM Trusted Firmware. Its value must be a numeric, and defaults
+   to 0. See also, *ARMv8 Architecture Extensions* in `Firmware Design`_.
+
+-  ``ARM_GIC_ARCH``: Choice of ARM GIC architecture version used by the ARM
+   Legacy GIC driver for implementing the platform GIC API. This API is used
+   by the interrupt management framework. Default is 2 (that is, version 2.0).
+   This build option is deprecated.
+
+-  ``ARM_PLAT_MT``: This flag determines whether the ARM platform layer has to
+   cater for the multi-threading ``MT`` bit when accessing MPIDR. When this flag
+   is set, the functions which deal with MPIDR assume that the ``MT`` bit in
+   MPIDR is set and access the bit-fields in MPIDR accordingly. Default value of
+   this flag is 0. Note that this option is not used on FVP platforms.
+
+-  ``BL2``: This is an optional build option which specifies the path to BL2
+   image for the ``fip`` target. In this case, the BL2 in the ARM Trusted
+   Firmware will not be built.
+
+-  ``BL2U``: This is an optional build option which specifies the path to
+   BL2U image. In this case, the BL2U in the ARM Trusted Firmware will not
+   be built.
+
+-  ``BL31``: This is an optional build option which specifies the path to
+   BL31 image for the ``fip`` target. In this case, the BL31 in the ARM
+   Trusted Firmware will not be built.
+
+-  ``BL31_KEY``: This option is used when ``GENERATE_COT=1``. It specifies the
+   file that contains the BL31 private key in PEM format. If ``SAVE_KEYS=1``,
+   this file name will be used to save the key.
+
+-  ``BL32``: This is an optional build option which specifies the path to
+   BL32 image for the ``fip`` target. In this case, the BL32 in the ARM
+   Trusted Firmware will not be built.
+
+- ``BL32_EXTRA1``: This is an optional build option which specifies the path to
+   Trusted OS Extra1 image for the  ``fip`` target.
+
+- ``BL32_EXTRA2``: This is an optional build option which specifies the path to
+   Trusted OS Extra2 image for the ``fip`` target.
+
+-  ``BL32_KEY``: This option is used when ``GENERATE_COT=1``. It specifies the
+   file that contains the BL32 private key in PEM format. If ``SAVE_KEYS=1``,
+   this file name will be used to save the key.
+
+-  ``BL33``: Path to BL33 image in the host file system. This is mandatory for
+   ``fip`` target in case the BL2 from ARM Trusted Firmware is used.
+
+-  ``BL33_KEY``: This option is used when ``GENERATE_COT=1``. It specifies the
+   file that contains the BL33 private key in PEM format. If ``SAVE_KEYS=1``,
+   this file name will be used to save the key.
+
+-  ``BUILD_MESSAGE_TIMESTAMP``: String used to identify the time and date of the
+   compilation of each build. It must be set to a C string (including quotes
+   where applicable). Defaults to a string that contains the time and date of
+   the compilation.
+
+-  ``BUILD_STRING``: Input string for VERSION\_STRING, which allows the TF build
+   to be uniquely identified. Defaults to the current git commit id.
+
+-  ``CFLAGS``: Extra user options appended on the compiler's command line in
+   addition to the options set by the build system.
+
+-  ``COLD_BOOT_SINGLE_CPU``: This option indicates whether the platform may
+   release several CPUs out of reset. It can take either 0 (several CPUs may be
+   brought up) or 1 (only one CPU will ever be brought up during cold reset).
+   Default is 0. If the platform always brings up a single CPU, there is no
+   need to distinguish between primary and secondary CPUs and the boot path can
+   be optimised. The ``plat_is_my_cpu_primary()`` and
+   ``plat_secondary_cold_boot_setup()`` platform porting interfaces do not need
+   to be implemented in this case.
+
+-  ``CRASH_REPORTING``: A non-zero value enables a console dump of processor
+   register state when an unexpected exception occurs during execution of
+   BL31. This option defaults to the value of ``DEBUG`` - i.e. by default
+   this is only enabled for a debug build of the firmware.
+
+-  ``CREATE_KEYS``: This option is used when ``GENERATE_COT=1``. It tells the
+   certificate generation tool to create new keys in case no valid keys are
+   present or specified. Allowed options are '0' or '1'. Default is '1'.
+
+-  ``CTX_INCLUDE_AARCH32_REGS`` : Boolean option that, when set to 1, will cause
+   the AArch32 system registers to be included when saving and restoring the
+   CPU context. The option must be set to 0 for AArch64-only platforms (that
+   is on hardware that does not implement AArch32, or at least not at EL1 and
+   higher ELs). Default value is 1.
+
+-  ``CTX_INCLUDE_FPREGS``: Boolean option that, when set to 1, will cause the FP
+   registers to be included when saving and restoring the CPU context. Default
+   is 0.
+
+-  ``DEBUG``: Chooses between a debug and release build. It can take either 0
+   (release) or 1 (debug) as values. 0 is the default.
+
+-  ``EL3_PAYLOAD_BASE``: This option enables booting an EL3 payload instead of
+   the normal boot flow. It must specify the entry point address of the EL3
+   payload. Please refer to the "Booting an EL3 payload" section for more
+   details.
+
+-  ``ENABLE_ASSERTIONS``: This option controls whether or not calls to ``assert()``
+   are compiled out. For debug builds, this option defaults to 1, and calls to
+   ``assert()`` are left in place. For release builds, this option defaults to 0
+   and calls to ``assert()`` function are compiled out. This option can be set
+   independently of ``DEBUG``. It can also be used to hide any auxiliary code
+   that is only required for the assertion and does not fit in the assertion
+   itself.
+
+-  ``ENABLE_PMF``: Boolean option to enable support for optional Performance
+   Measurement Framework(PMF). Default is 0.
+
+-  ``ENABLE_PSCI_STAT``: Boolean option to enable support for optional PSCI
+   functions ``PSCI_STAT_RESIDENCY`` and ``PSCI_STAT_COUNT``. Default is 0.
+   In the absence of an alternate stat collection backend, ``ENABLE_PMF`` must
+   be enabled. If ``ENABLE_PMF`` is set, the residency statistics are tracked in
+   software.
+
+-  ``ENABLE_RUNTIME_INSTRUMENTATION``: Boolean option to enable runtime
+   instrumentation which injects timestamp collection points into
+   Trusted Firmware to allow runtime performance to be measured.
+   Currently, only PSCI is instrumented. Enabling this option enables
+   the ``ENABLE_PMF`` build option as well. Default is 0.
+
+-  ``ENABLE_SPE_FOR_LOWER_ELS`` : Boolean option to enable Statistical Profiling
+   extensions. This is an optional architectural feature available only for
+   AArch64 8.2 onwards. This option defaults to 1 but is automatically
+   disabled when the target architecture is AArch32 or AArch64 8.0/8.1.
+
+-  ``ENABLE_STACK_PROTECTOR``: String option to enable the stack protection
+   checks in GCC. Allowed values are "all", "strong" and "0" (default).
+   "strong" is the recommended stack protection level if this feature is
+   desired. 0 disables the stack protection. For all values other than 0, the
+   ``plat_get_stack_protector_canary()`` platform hook needs to be implemented.
+   The value is passed as the last component of the option
+   ``-fstack-protector-$ENABLE_STACK_PROTECTOR``.
+
+-  ``ERROR_DEPRECATED``: This option decides whether to treat the usage of
+   deprecated platform APIs, helper functions or drivers within Trusted
+   Firmware as error. It can take the value 1 (flag the use of deprecated
+   APIs as error) or 0. The default is 0.
+
+-  ``FIP_NAME``: This is an optional build option which specifies the FIP
+   filename for the ``fip`` target. Default is ``fip.bin``.
+
+-  ``FWU_FIP_NAME``: This is an optional build option which specifies the FWU
+   FIP filename for the ``fwu_fip`` target. Default is ``fwu_fip.bin``.
+
+-  ``GENERATE_COT``: Boolean flag used to build and execute the ``cert_create``
+   tool to create certificates as per the Chain of Trust described in
+   `Trusted Board Boot`_. The build system then calls ``fiptool`` to
+   include the certificates in the FIP and FWU\_FIP. Default value is '0'.
+
+   Specify both ``TRUSTED_BOARD_BOOT=1`` and ``GENERATE_COT=1`` to include support
+   for the Trusted Board Boot feature in the BL1 and BL2 images, to generate
+   the corresponding certificates, and to include those certificates in the
+   FIP and FWU\_FIP.
+
+   Note that if ``TRUSTED_BOARD_BOOT=0`` and ``GENERATE_COT=1``, the BL1 and BL2
+   images will not include support for Trusted Board Boot. The FIP will still
+   include the corresponding certificates. This FIP can be used to verify the
+   Chain of Trust on the host machine through other mechanisms.
+
+   Note that if ``TRUSTED_BOARD_BOOT=1`` and ``GENERATE_COT=0``, the BL1 and BL2
+   images will include support for Trusted Board Boot, but the FIP and FWU\_FIP
+   will not include the corresponding certificates, causing a boot failure.
+
+-  ``GICV2_G0_FOR_EL3``: Unlike GICv3, the GICv2 architecture doesn't have
+   inherent support for specific EL3 type interrupts. Setting this build option
+   to ``1`` assumes GICv2 *Group 0* interrupts are expected to target EL3, both
+   by `platform abstraction layer`__ and `Interrupt Management Framework`__.
+   This allows GICv2 platforms to enable features requiring EL3 interrupt type.
+   This also means that all GICv2 Group 0 interrupts are delivered to EL3, and
+   the Secure Payload interrupts needs to be synchronously handed over to Secure
+   EL1 for handling. The default value of this option is ``0``, which means the
+   Group 0 interrupts are assumed to be handled by Secure EL1.
+
+   .. __: `platform-interrupt-controller-API.rst`
+   .. __: `interrupt-framework-design.rst`
+
+-  ``HANDLE_EA_EL3_FIRST``: When defined External Aborts and SError Interrupts
+   will be always trapped in EL3 i.e. in BL31 at runtime.
+
+-  ``HW_ASSISTED_COHERENCY``: On most ARM systems to-date, platform-specific
+   software operations are required for CPUs to enter and exit coherency.
+   However, there exists newer systems where CPUs' entry to and exit from
+   coherency is managed in hardware. Such systems require software to only
+   initiate the operations, and the rest is managed in hardware, minimizing
+   active software management. In such systems, this boolean option enables ARM
+   Trusted Firmware to carry out build and run-time optimizations during boot
+   and power management operations. This option defaults to 0 and if it is
+   enabled, then it implies ``WARMBOOT_ENABLE_DCACHE_EARLY`` is also enabled.
+
+-  ``JUNO_AARCH32_EL3_RUNTIME``: This build flag enables you to execute EL3
+   runtime software in AArch32 mode, which is required to run AArch32 on Juno.
+   By default this flag is set to '0'. Enabling this flag builds BL1 and BL2 in
+   AArch64 and facilitates the loading of ``SP_MIN`` and BL33 as AArch32 executable
+   images.
+
+-  ``KEY_ALG``: This build flag enables the user to select the algorithm to be
+   used for generating the PKCS keys and subsequent signing of the certificate.
+   It accepts 3 values viz ``rsa``, ``rsa_1_5``, ``ecdsa``. The ``rsa_1_5`` is
+   the legacy PKCS#1 RSA 1.5 algorithm which is not TBBR compliant and is
+   retained only for compatibility. The default value of this flag is ``rsa``
+   which is the TBBR compliant PKCS#1 RSA 2.1 scheme.
+
+-  ``LDFLAGS``: Extra user options appended to the linkers' command line in
+   addition to the one set by the build system.
+
+-  ``LOAD_IMAGE_V2``: Boolean option to enable support for new version (v2) of
+   image loading, which provides more flexibility and scalability around what
+   images are loaded and executed during boot. Default is 0.
+   Note: ``TRUSTED_BOARD_BOOT`` is currently only supported for AArch64 when
+   ``LOAD_IMAGE_V2`` is enabled.
+
+-  ``LOG_LEVEL``: Chooses the log level, which controls the amount of console log
+   output compiled into the build. This should be one of the following:
+
+   ::
+
+       0  (LOG_LEVEL_NONE)
+       10 (LOG_LEVEL_NOTICE)
+       20 (LOG_LEVEL_ERROR)
+       30 (LOG_LEVEL_WARNING)
+       40 (LOG_LEVEL_INFO)
+       50 (LOG_LEVEL_VERBOSE)
+
+   All log output up to and including the log level is compiled into the build.
+   The default value is 40 in debug builds and 20 in release builds.
+
+-  ``NON_TRUSTED_WORLD_KEY``: This option is used when ``GENERATE_COT=1``. It
+   specifies the file that contains the Non-Trusted World private key in PEM
+   format. If ``SAVE_KEYS=1``, this file name will be used to save the key.
+
+-  ``NS_BL2U``: Path to NS\_BL2U image in the host file system. This image is
+   optional. It is only needed if the platform makefile specifies that it
+   is required in order to build the ``fwu_fip`` target.
+
+-  ``NS_TIMER_SWITCH``: Enable save and restore for non-secure timer register
+   contents upon world switch. It can take either 0 (don't save and restore) or
+   1 (do save and restore). 0 is the default. An SPD may set this to 1 if it
+   wants the timer registers to be saved and restored.
+
+-  ``PL011_GENERIC_UART``: Boolean option to indicate the PL011 driver that
+   the underlying hardware is not a full PL011 UART but a minimally compliant
+   generic UART, which is a subset of the PL011. The driver will not access
+   any register that is not part of the SBSA generic UART specification.
+   Default value is 0 (a full PL011 compliant UART is present).
+
+-  ``PLAT``: Choose a platform to build ARM Trusted Firmware for. The chosen
+   platform name must be subdirectory of any depth under ``plat/``, and must
+   contain a platform makefile named ``platform.mk``. For example to build ARM
+   Trusted Firmware for ARM Juno board select PLAT=juno.
+
+-  ``PRELOADED_BL33_BASE``: This option enables booting a preloaded BL33 image
+   instead of the normal boot flow. When defined, it must specify the entry
+   point address for the preloaded BL33 image. This option is incompatible with
+   ``EL3_PAYLOAD_BASE``. If both are defined, ``EL3_PAYLOAD_BASE`` has priority
+   over ``PRELOADED_BL33_BASE``.
+
+-  ``PROGRAMMABLE_RESET_ADDRESS``: This option indicates whether the reset
+   vector address can be programmed or is fixed on the platform. It can take
+   either 0 (fixed) or 1 (programmable). Default is 0. If the platform has a
+   programmable reset address, it is expected that a CPU will start executing
+   code directly at the right address, both on a cold and warm reset. In this
+   case, there is no need to identify the entrypoint on boot and the boot path
+   can be optimised. The ``plat_get_my_entrypoint()`` platform porting interface
+   does not need to be implemented in this case.
+
+-  ``PSCI_EXTENDED_STATE_ID``: As per PSCI1.0 Specification, there are 2 formats
+   possible for the PSCI power-state parameter viz original and extended
+   State-ID formats. This flag if set to 1, configures the generic PSCI layer
+   to use the extended format. The default value of this flag is 0, which
+   means by default the original power-state format is used by the PSCI
+   implementation. This flag should be specified by the platform makefile
+   and it governs the return value of PSCI\_FEATURES API for CPU\_SUSPEND
+   smc function id. When this option is enabled on ARM platforms, the
+   option ``ARM_RECOM_STATE_ID_ENC`` needs to be set to 1 as well.
+
+-  ``RESET_TO_BL31``: Enable BL31 entrypoint as the CPU reset vector instead
+   of the BL1 entrypoint. It can take the value 0 (CPU reset to BL1
+   entrypoint) or 1 (CPU reset to BL31 entrypoint).
+   The default value is 0.
+
+-  ``RESET_TO_SP_MIN``: SP\_MIN is the minimal AArch32 Secure Payload provided in
+   ARM Trusted Firmware. This flag configures SP\_MIN entrypoint as the CPU
+   reset vector instead of the BL1 entrypoint. It can take the value 0 (CPU
+   reset to BL1 entrypoint) or 1 (CPU reset to SP\_MIN entrypoint). The default
+   value is 0.
+
+-  ``ROT_KEY``: This option is used when ``GENERATE_COT=1``. It specifies the
+   file that contains the ROT private key in PEM format. If ``SAVE_KEYS=1``, this
+   file name will be used to save the key.
+
+-  ``SAVE_KEYS``: This option is used when ``GENERATE_COT=1``. It tells the
+   certificate generation tool to save the keys used to establish the Chain of
+   Trust. Allowed options are '0' or '1'. Default is '0' (do not save).
+
+-  ``SCP_BL2``: Path to SCP\_BL2 image in the host file system. This image is optional.
+   If a SCP\_BL2 image is present then this option must be passed for the ``fip``
+   target.
+
+-  ``SCP_BL2_KEY``: This option is used when ``GENERATE_COT=1``. It specifies the
+   file that contains the SCP\_BL2 private key in PEM format. If ``SAVE_KEYS=1``,
+   this file name will be used to save the key.
+
+-  ``SCP_BL2U``: Path to SCP\_BL2U image in the host file system. This image is
+   optional. It is only needed if the platform makefile specifies that it
+   is required in order to build the ``fwu_fip`` target.
+
+-  ``SEPARATE_CODE_AND_RODATA``: Whether code and read-only data should be
+   isolated on separate memory pages. This is a trade-off between security and
+   memory usage. See "Isolating code and read-only data on separate memory
+   pages" section in `Firmware Design`_. This flag is disabled by default and
+   affects all BL images.
+
+-  ``SPD``: Choose a Secure Payload Dispatcher component to be built into the
+   Trusted Firmware. This build option is only valid if ``ARCH=aarch64``. The
+   value should be the path to the directory containing the SPD source,
+   relative to ``services/spd/``; the directory is expected to
+   contain a makefile called ``<spd-value>.mk``.
+
+-  ``SPIN_ON_BL1_EXIT``: This option introduces an infinite loop in BL1. It can
+   take either 0 (no loop) or 1 (add a loop). 0 is the default. This loop stops
+   execution in BL1 just before handing over to BL31. At this point, all
+   firmware images have been loaded in memory, and the MMU and caches are
+   turned off. Refer to the "Debugging options" section for more details.
+
+- ``SP_MIN_WITH_SECURE_FIQ``: Boolean flag to indicate the SP_MIN handles
+   secure interrupts (caught through the FIQ line). Platforms can enable
+   this directive if they need to handle such interruption. When enabled,
+   the FIQ are handled in monitor mode and non secure world is not allowed
+   to mask these events. Platforms that enable FIQ handling in SP_MIN shall
+   implement the api ``sp_min_plat_fiq_handler()``. The default value is 0.
+
+-  ``TRUSTED_BOARD_BOOT``: Boolean flag to include support for the Trusted Board
+   Boot feature. When set to '1', BL1 and BL2 images include support to load
+   and verify the certificates and images in a FIP, and BL1 includes support
+   for the Firmware Update. The default value is '0'. Generation and inclusion
+   of certificates in the FIP and FWU\_FIP depends upon the value of the
+   ``GENERATE_COT`` option.
+
+   Note: This option depends on ``CREATE_KEYS`` to be enabled. If the keys
+   already exist in disk, they will be overwritten without further notice.
+
+-  ``TRUSTED_WORLD_KEY``: This option is used when ``GENERATE_COT=1``. It
+   specifies the file that contains the Trusted World private key in PEM
+   format. If ``SAVE_KEYS=1``, this file name will be used to save the key.
+
+-  ``TSP_INIT_ASYNC``: Choose BL32 initialization method as asynchronous or
+   synchronous, (see "Initializing a BL32 Image" section in
+   `Firmware Design`_). It can take the value 0 (BL32 is initialized using
+   synchronous method) or 1 (BL32 is initialized using asynchronous method).
+   Default is 0.
+
+-  ``TSP_NS_INTR_ASYNC_PREEMPT``: A non zero value enables the interrupt
+   routing model which routes non-secure interrupts asynchronously from TSP
+   to EL3 causing immediate preemption of TSP. The EL3 is responsible
+   for saving and restoring the TSP context in this routing model. The
+   default routing model (when the value is 0) is to route non-secure
+   interrupts to TSP allowing it to save its context and hand over
+   synchronously to EL3 via an SMC.
+
+-  ``USE_COHERENT_MEM``: This flag determines whether to include the coherent
+   memory region in the BL memory map or not (see "Use of Coherent memory in
+   Trusted Firmware" section in `Firmware Design`_). It can take the value 1
+   (Coherent memory region is included) or 0 (Coherent memory region is
+   excluded). Default is 1.
+
+-  ``V``: Verbose build. If assigned anything other than 0, the build commands
+   are printed. Default is 0.
+
+-  ``VERSION_STRING``: String used in the log output for each TF image. Defaults
+   to a string formed by concatenating the version number, build type and build
+   string.
+
+-  ``WARMBOOT_ENABLE_DCACHE_EARLY`` : Boolean option to enable D-cache early on
+   the CPU after warm boot. This is applicable for platforms which do not
+   require interconnect programming to enable cache coherency (eg: single
+   cluster platforms). If this option is enabled, then warm boot path
+   enables D-caches immediately after enabling MMU. This option defaults to 0.
+
+ARM development platform specific build options
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+-  ``ARM_BL31_IN_DRAM``: Boolean option to select loading of BL31 in TZC secured
+   DRAM. By default, BL31 is in the secure SRAM. Set this flag to 1 to load
+   BL31 in TZC secured DRAM. If TSP is present, then setting this option also
+   sets the TSP location to DRAM and ignores the ``ARM_TSP_RAM_LOCATION`` build
+   flag.
+
+-  ``ARM_BOARD_OPTIMISE_MEM``: Boolean option to enable or disable optimisation
+   of the memory reserved for each image. This affects the maximum size of each
+   BL image as well as the number of allocated memory regions and translation
+   tables. By default this flag is 0, which means it uses the default
+   unoptimised values for these macros. ARM development platforms that wish to
+   optimise memory usage need to set this flag to 1 and must override the
+   related macros.
+
+-  ``ARM_CONFIG_CNTACR``: boolean option to unlock access to the ``CNTBase<N>``
+   frame registers by setting the ``CNTCTLBase.CNTACR<N>`` register bits. The
+   frame number ``<N>`` is defined by ``PLAT_ARM_NSTIMER_FRAME_ID``, which should
+   match the frame used by the Non-Secure image (normally the Linux kernel).
+   Default is true (access to the frame is allowed).
+
+-  ``ARM_DISABLE_TRUSTED_WDOG``: boolean option to disable the Trusted Watchdog.
+   By default, ARM platforms use a watchdog to trigger a system reset in case
+   an error is encountered during the boot process (for example, when an image
+   could not be loaded or authenticated). The watchdog is enabled in the early
+   platform setup hook at BL1 and disabled in the BL1 prepare exit hook. The
+   Trusted Watchdog may be disabled at build time for testing or development
+   purposes.
+
+-  ``ARM_RECOM_STATE_ID_ENC``: The PSCI1.0 specification recommends an encoding
+   for the construction of composite state-ID in the power-state parameter.
+   The existing PSCI clients currently do not support this encoding of
+   State-ID yet. Hence this flag is used to configure whether to use the
+   recommended State-ID encoding or not. The default value of this flag is 0,
+   in which case the platform is configured to expect NULL in the State-ID
+   field of power-state parameter.
+
+-  ``ARM_ROTPK_LOCATION``: used when ``TRUSTED_BOARD_BOOT=1``. It specifies the
+   location of the ROTPK hash returned by the function ``plat_get_rotpk_info()``
+   for ARM platforms. Depending on the selected option, the proper private key
+   must be specified using the ``ROT_KEY`` option when building the Trusted
+   Firmware. This private key will be used by the certificate generation tool
+   to sign the BL2 and Trusted Key certificates. Available options for
+   ``ARM_ROTPK_LOCATION`` are:
+
+   -  ``regs`` : return the ROTPK hash stored in the Trusted root-key storage
+      registers. The private key corresponding to this ROTPK hash is not
+      currently available.
+   -  ``devel_rsa`` : return a development public key hash embedded in the BL1
+      and BL2 binaries. This hash has been obtained from the RSA public key
+      ``arm_rotpk_rsa.der``, located in ``plat/arm/board/common/rotpk``. To use
+      this option, ``arm_rotprivk_rsa.pem`` must be specified as ``ROT_KEY`` when
+      creating the certificates.
+   -  ``devel_ecdsa`` : return a development public key hash embedded in the BL1
+      and BL2 binaries. This hash has been obtained from the ECDSA public key
+      ``arm_rotpk_ecdsa.der``, located in ``plat/arm/board/common/rotpk``. To use
+      this option, ``arm_rotprivk_ecdsa.pem`` must be specified as ``ROT_KEY``
+      when creating the certificates.
+
+-  ``ARM_TSP_RAM_LOCATION``: location of the TSP binary. Options:
+
+   -  ``tsram`` : Trusted SRAM (default option when TBB is not enabled)
+   -  ``tdram`` : Trusted DRAM (if available)
+   -  ``dram``  : Secure region in DRAM (default option when TBB is enabled,
+                  configured by the TrustZone controller)
+
+-  ``ARM_XLAT_TABLES_LIB_V1``: boolean option to compile the Trusted Firmware
+   with version 1 of the translation tables library instead of version 2. It is
+   set to 0 by default, which selects version 2.
+
+-  ``ARM_CRYPTOCELL_INTEG`` : bool option to enable Trusted Firmware to invoke
+   ARM® TrustZone® CryptoCell functionality for Trusted Board Boot on capable
+   ARM platforms. If this option is specified, then the path to the CryptoCell
+   SBROM library must be specified via ``CCSBROM_LIB_PATH`` flag.
+
+For a better understanding of these options, the ARM development platform memory
+map is explained in the `Firmware Design`_.
+
+ARM CSS platform specific build options
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+-  ``CSS_DETECT_PRE_1_7_0_SCP``: Boolean flag to detect SCP version
+   incompatibility. Version 1.7.0 of the SCP firmware made a non-backwards
+   compatible change to the MTL protocol, used for AP/SCP communication.
+   Trusted Firmware no longer supports earlier SCP versions. If this option is
+   set to 1 then Trusted Firmware will detect if an earlier version is in use.
+   Default is 1.
+
+-  ``CSS_LOAD_SCP_IMAGES``: Boolean flag, which when set, adds SCP\_BL2 and
+   SCP\_BL2U to the FIP and FWU\_FIP respectively, and enables them to be loaded
+   during boot. Default is 1.
+
+-  ``CSS_USE_SCMI_SDS_DRIVER``: Boolean flag which selects SCMI/SDS drivers
+   instead of SCPI/BOM driver for communicating with the SCP during power
+   management operations and for SCP RAM Firmware transfer. If this option
+   is set to 1, then SCMI/SDS drivers will be used. Default is 0.
+
+ARM FVP platform specific build options
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+-  ``FVP_CLUSTER_COUNT`` : Configures the cluster count to be used to
+   build the topology tree within Trusted Firmware. By default the
+   Trusted Firmware is configured for dual cluster topology and this option
+   can be used to override the default value.
+
+-  ``FVP_INTERCONNECT_DRIVER``: Selects the interconnect driver to be built. The
+   default interconnect driver depends on the value of ``FVP_CLUSTER_COUNT`` as
+   explained in the options below:
+
+   -  ``FVP_CCI`` : The CCI driver is selected. This is the default
+      if 0 < ``FVP_CLUSTER_COUNT`` <= 2.
+   -  ``FVP_CCN`` : The CCN driver is selected. This is the default
+      if ``FVP_CLUSTER_COUNT`` > 2.
+
+-  ``FVP_MAX_PE_PER_CPU``: Sets the maximum number of PEs implemented on any CPU
+   in the system. This option defaults to 1. Note that the build option
+   ``ARM_PLAT_MT`` doesn't have any effect on FVP platforms.
+
+-  ``FVP_USE_GIC_DRIVER`` : Selects the GIC driver to be built. Options:
+
+   -  ``FVP_GIC600`` : The GIC600 implementation of GICv3 is selected
+   -  ``FVP_GICV2`` : The GICv2 only driver is selected
+   -  ``FVP_GICV3`` : The GICv3 only driver is selected (default option)
+   -  ``FVP_GICV3_LEGACY``: The Legacy GICv3 driver is selected (deprecated)
+      Note: If Trusted Firmware is compiled with this option on FVPs with
+      GICv3 hardware, then it configures the hardware to run in GICv2
+      emulation mode
+
+-  ``FVP_USE_SP804_TIMER`` : Use the SP804 timer instead of the Generic Timer
+   for functions that wait for an arbitrary time length (udelay and mdelay).
+   The default value is 0.
+
+Debugging options
+~~~~~~~~~~~~~~~~~
+
+To compile a debug version and make the build more verbose use
+
+::
+
+    make PLAT=<platform> DEBUG=1 V=1 all
+
+AArch64 GCC uses DWARF version 4 debugging symbols by default. Some tools (for
+example DS-5) might not support this and may need an older version of DWARF
+symbols to be emitted by GCC. This can be achieved by using the
+``-gdwarf-<version>`` flag, with the version being set to 2 or 3. Setting the
+version to 2 is recommended for DS-5 versions older than 5.16.
+
+When debugging logic problems it might also be useful to disable all compiler
+optimizations by using ``-O0``.
+
+NOTE: Using ``-O0`` could cause output images to be larger and base addresses
+might need to be recalculated (see the **Memory layout on ARM development
+platforms** section in the `Firmware Design`_).
+
+Extra debug options can be passed to the build system by setting ``CFLAGS`` or
+``LDFLAGS``:
+
+.. code:: makefile
+
+    CFLAGS='-O0 -gdwarf-2'                                     \
+    make PLAT=<platform> DEBUG=1 V=1 all
+
+Note that using ``-Wl,`` style compilation driver options in ``CFLAGS`` will be
+ignored as the linker is called directly.
+
+It is also possible to introduce an infinite loop to help in debugging the
+post-BL2 phase of the Trusted Firmware. This can be done by rebuilding BL1 with
+the ``SPIN_ON_BL1_EXIT=1`` build flag. Refer to the `Summary of build options`_
+section. In this case, the developer may take control of the target using a
+debugger when indicated by the console output. When using DS-5, the following
+commands can be used:
+
+::
+
+    # Stop target execution
+    interrupt
+
+    #
+    # Prepare your debugging environment, e.g. set breakpoints
+    #
+
+    # Jump over the debug loop
+    set var $AARCH64::$Core::$PC = $AARCH64::$Core::$PC + 4
+
+    # Resume execution
+    continue
+
+Building the Test Secure Payload
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The TSP is coupled with a companion runtime service in the BL31 firmware,
+called the TSPD. Therefore, if you intend to use the TSP, the BL31 image
+must be recompiled as well. For more information on SPs and SPDs, see the
+`Secure-EL1 Payloads and Dispatchers`_ section in the `Firmware Design`_.
+
+First clean the Trusted Firmware build directory to get rid of any previous
+BL31 binary. Then to build the TSP image use:
+
+::
+
+    make PLAT=<platform> SPD=tspd all
+
+An additional boot loader binary file is created in the ``build`` directory:
+
+::
+
+    build/<platform>/<build-type>/bl32.bin
+
+Checking source code style
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When making changes to the source for submission to the project, the source
+must be in compliance with the Linux style guide, and to assist with this check
+the project Makefile contains two targets, which both utilise the
+``checkpatch.pl`` script that ships with the Linux source tree.
+
+To check the entire source tree, you must first download a copy of
+``checkpatch.pl`` (or the full Linux source), set the ``CHECKPATCH`` environment
+variable to point to the script and build the target checkcodebase:
+
+::
+
+    make CHECKPATCH=<path-to-linux>/linux/scripts/checkpatch.pl checkcodebase
+
+To just check the style on the files that differ between your local branch and
+the remote master, use:
+
+::
+
+    make CHECKPATCH=<path-to-linux>/linux/scripts/checkpatch.pl checkpatch
+
+If you wish to check your patch against something other than the remote master,
+set the ``BASE_COMMIT`` variable to your desired branch. By default, ``BASE_COMMIT``
+is set to ``origin/master``.
+
+Building and using the FIP tool
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Firmware Image Package (FIP) is a packaging format used by the Trusted Firmware
+project to package firmware images in a single binary. The number and type of
+images that should be packed in a FIP is platform specific and may include TF
+images and other firmware images required by the platform. For example, most
+platforms require a BL33 image which corresponds to the normal world bootloader
+(e.g. UEFI or U-Boot).
+
+The TF build system provides the make target ``fip`` to create a FIP file for the
+specified platform using the FIP creation tool included in the TF project.
+Examples below show how to build a FIP file for FVP, packaging TF images and a
+BL33 image.
+
+For AArch64:
+
+::
+
+    make PLAT=fvp BL33=<path/to/bl33.bin> fip
+
+For AArch32:
+
+::
+
+    make PLAT=fvp ARCH=aarch32 AARCH32_SP=sp_min BL33=<path/to/bl33.bin> fip
+
+Note that AArch32 support for Normal world boot loader (BL33), like U-boot or
+UEFI, on FVP is not available upstream. Hence custom solutions are required to
+allow Linux boot on FVP. These instructions assume such a custom boot loader
+(BL33) is available.
+
+The resulting FIP may be found in:
+
+::
+
+    build/fvp/<build-type>/fip.bin
+
+For advanced operations on FIP files, it is also possible to independently build
+the tool and create or modify FIPs using this tool. To do this, follow these
+steps:
+
+It is recommended to remove old artifacts before building the tool:
+
+::
+
+    make -C tools/fiptool clean
+
+Build the tool:
+
+::
+
+    make [DEBUG=1] [V=1] fiptool
+
+The tool binary can be located in:
+
+::
+
+    ./tools/fiptool/fiptool
+
+Invoking the tool with ``--help`` will print a help message with all available
+options.
+
+Example 1: create a new Firmware package ``fip.bin`` that contains BL2 and BL31:
+
+::
+
+    ./tools/fiptool/fiptool create \
+        --tb-fw build/<platform>/<build-type>/bl2.bin \
+        --soc-fw build/<platform>/<build-type>/bl31.bin \
+        fip.bin
+
+Example 2: view the contents of an existing Firmware package:
+
+::
+
+    ./tools/fiptool/fiptool info <path-to>/fip.bin
+
+Example 3: update the entries of an existing Firmware package:
+
+::
+
+    # Change the BL2 from Debug to Release version
+    ./tools/fiptool/fiptool update \
+        --tb-fw build/<platform>/release/bl2.bin \
+        build/<platform>/debug/fip.bin
+
+Example 4: unpack all entries from an existing Firmware package:
+
+::
+
+    # Images will be unpacked to the working directory
+    ./tools/fiptool/fiptool unpack <path-to>/fip.bin
+
+Example 5: remove an entry from an existing Firmware package:
+
+::
+
+    ./tools/fiptool/fiptool remove \
+        --tb-fw build/<platform>/debug/fip.bin
+
+Note that if the destination FIP file exists, the create, update and
+remove operations will automatically overwrite it.
+
+The unpack operation will fail if the images already exist at the
+destination. In that case, use -f or --force to continue.
+
+More information about FIP can be found in the `Firmware Design`_ document.
+
+Migrating from fip\_create to fiptool
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The previous version of fiptool was called fip\_create. A compatibility script
+that emulates the basic functionality of the previous fip\_create is provided.
+However, users are strongly encouraged to migrate to fiptool.
+
+-  To create a new FIP file, replace "fip\_create" with "fiptool create".
+-  To update a FIP file, replace "fip\_create" with "fiptool update".
+-  To dump the contents of a FIP file, replace "fip\_create --dump"
+   with "fiptool info".
+
+Building FIP images with support for Trusted Board Boot
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Trusted Board Boot primarily consists of the following two features:
+
+-  Image Authentication, described in `Trusted Board Boot`_, and
+-  Firmware Update, described in `Firmware Update`_
+
+The following steps should be followed to build FIP and (optionally) FWU\_FIP
+images with support for these features:
+
+#. Fulfill the dependencies of the ``mbedtls`` cryptographic and image parser
+   modules by checking out a recent version of the `mbed TLS Repository`_. It
+   is important to use a version that is compatible with TF and fixes any
+   known security vulnerabilities. See `mbed TLS Security Center`_ for more
+   information. The latest version of TF is tested with tag ``mbedtls-2.4.2``.
+
+   The ``drivers/auth/mbedtls/mbedtls_*.mk`` files contain the list of mbed TLS
+   source files the modules depend upon.
+   ``include/drivers/auth/mbedtls/mbedtls_config.h`` contains the configuration
+   options required to build the mbed TLS sources.
+
+   Note that the mbed TLS library is licensed under the Apache version 2.0
+   license. Using mbed TLS source code will affect the licensing of
+   Trusted Firmware binaries that are built using this library.
+
+#. To build the FIP image, ensure the following command line variables are set
+   while invoking ``make`` to build Trusted Firmware:
+
+   -  ``MBEDTLS_DIR=<path of the directory containing mbed TLS sources>``
+   -  ``TRUSTED_BOARD_BOOT=1``
+   -  ``GENERATE_COT=1``
+
+   In the case of ARM platforms, the location of the ROTPK hash must also be
+   specified at build time. Two locations are currently supported (see
+   ``ARM_ROTPK_LOCATION`` build option):
+
+   -  ``ARM_ROTPK_LOCATION=regs``: the ROTPK hash is obtained from the Trusted
+      root-key storage registers present in the platform. On Juno, this
+      registers are read-only. On FVP Base and Cortex models, the registers
+      are read-only, but the value can be specified using the command line
+      option ``bp.trusted_key_storage.public_key`` when launching the model.
+      On both Juno and FVP models, the default value corresponds to an
+      ECDSA-SECP256R1 public key hash, whose private part is not currently
+      available.
+
+   -  ``ARM_ROTPK_LOCATION=devel_rsa``: use the ROTPK hash that is hardcoded
+      in the ARM platform port. The private/public RSA key pair may be
+      found in ``plat/arm/board/common/rotpk``.
+
+   -  ``ARM_ROTPK_LOCATION=devel_ecdsa``: use the ROTPK hash that is hardcoded
+      in the ARM platform port. The private/public ECDSA key pair may be
+      found in ``plat/arm/board/common/rotpk``.
+
+   Example of command line using RSA development keys:
+
+   ::
+
+       MBEDTLS_DIR=<path of the directory containing mbed TLS sources> \
+       make PLAT=<platform> TRUSTED_BOARD_BOOT=1 GENERATE_COT=1        \
+       ARM_ROTPK_LOCATION=devel_rsa                                    \
+       ROT_KEY=plat/arm/board/common/rotpk/arm_rotprivk_rsa.pem        \
+       BL33=<path-to>/<bl33_image>                                     \
+       all fip
+
+   The result of this build will be the bl1.bin and the fip.bin binaries. This
+   FIP will include the certificates corresponding to the Chain of Trust
+   described in the TBBR-client document. These certificates can also be found
+   in the output build directory.
+
+#. The optional FWU\_FIP contains any additional images to be loaded from
+   Non-Volatile storage during the `Firmware Update`_ process. To build the
+   FWU\_FIP, any FWU images required by the platform must be specified on the
+   command line. On ARM development platforms like Juno, these are:
+
+   -  NS\_BL2U. The AP non-secure Firmware Updater image.
+   -  SCP\_BL2U. The SCP Firmware Update Configuration image.
+
+   Example of Juno command line for generating both ``fwu`` and ``fwu_fip``
+   targets using RSA development:
+
+   ::
+
+       MBEDTLS_DIR=<path of the directory containing mbed TLS sources> \
+       make PLAT=juno TRUSTED_BOARD_BOOT=1 GENERATE_COT=1              \
+       ARM_ROTPK_LOCATION=devel_rsa                                    \
+       ROT_KEY=plat/arm/board/common/rotpk/arm_rotprivk_rsa.pem        \
+       BL33=<path-to>/<bl33_image>                                     \
+       SCP_BL2=<path-to>/<scp_bl2_image>                               \
+       SCP_BL2U=<path-to>/<scp_bl2u_image>                             \
+       NS_BL2U=<path-to>/<ns_bl2u_image>                               \
+       all fip fwu_fip
+
+   Note: The BL2U image will be built by default and added to the FWU\_FIP.
+   The user may override this by adding ``BL2U=<path-to>/<bl2u_image>``
+   to the command line above.
+
+   Note: Building and installing the non-secure and SCP FWU images (NS\_BL1U,
+   NS\_BL2U and SCP\_BL2U) is outside the scope of this document.
+
+   The result of this build will be bl1.bin, fip.bin and fwu\_fip.bin binaries.
+   Both the FIP and FWU\_FIP will include the certificates corresponding to the
+   Chain of Trust described in the TBBR-client document. These certificates
+   can also be found in the output build directory.
+
+Building the Certificate Generation Tool
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``cert_create`` tool is built as part of the TF build process when the ``fip``
+make target is specified and TBB is enabled (as described in the previous
+section), but it can also be built separately with the following command:
+
+::
+
+    make PLAT=<platform> [DEBUG=1] [V=1] certtool
+
+For platforms that do not require their own IDs in certificate files,
+the generic 'cert\_create' tool can be built with the following command:
+
+::
+
+    make USE_TBBR_DEFS=1 [DEBUG=1] [V=1] certtool
+
+``DEBUG=1`` builds the tool in debug mode. ``V=1`` makes the build process more
+verbose. The following command should be used to obtain help about the tool:
+
+::
+
+    ./tools/cert_create/cert_create -h
+
+Building a FIP for Juno and FVP
+-------------------------------
+
+This section provides Juno and FVP specific instructions to build Trusted
+Firmware, obtain the additional required firmware, and pack it all together in
+a single FIP binary. It assumes that a `Linaro Release`_ has been installed.
+
+Note: Pre-built binaries for AArch32 are available from Linaro Release 16.12
+onwards. Before that release, pre-built binaries are only available for AArch64.
+
+Note: follow the full instructions for one platform before switching to a
+different one. Mixing instructions for different platforms may result in
+corrupted binaries.
+
+#. Clean the working directory
+
+   ::
+
+       make realclean
+
+#. Obtain SCP\_BL2 (Juno) and BL33 (all platforms)
+
+   Use the fiptool to extract the SCP\_BL2 and BL33 images from the FIP
+   package included in the Linaro release:
+
+   ::
+
+       # Build the fiptool
+       make [DEBUG=1] [V=1] fiptool
+
+       # Unpack firmware images from Linaro FIP
+       ./tools/fiptool/fiptool unpack \
+            <path/to/linaro/release>/fip.bin
+
+   The unpack operation will result in a set of binary images extracted to the
+   current working directory. The SCP\_BL2 image corresponds to
+   ``scp-fw.bin`` and BL33 corresponds to ``nt-fw.bin``.
+
+   Note: the fiptool will complain if the images to be unpacked already
+   exist in the current directory. If that is the case, either delete those
+   files or use the ``--force`` option to overwrite.
+
+   Note for AArch32, the instructions below assume that nt-fw.bin is a custom
+   Normal world boot loader that supports AArch32.
+
+#. Build TF images and create a new FIP for FVP
+
+   ::
+
+       # AArch64
+       make PLAT=fvp BL33=nt-fw.bin all fip
+
+       # AArch32
+       make PLAT=fvp ARCH=aarch32 AARCH32_SP=sp_min BL33=nt-fw.bin all fip
+
+#. Build TF images and create a new FIP for Juno
+
+   For AArch64:
+
+   Building for AArch64 on Juno simply requires the addition of ``SCP_BL2``
+   as a build parameter.
+
+   ::
+
+       make PLAT=juno all fip \
+       BL33=<path-to-juno-oe-uboot>/SOFTWARE/bl33-uboot.bin \
+       SCP_BL2=<path-to-juno-busybox-uboot>/SOFTWARE/scp_bl2.bin
+
+   For AArch32:
+
+   Hardware restrictions on Juno prevent cold reset into AArch32 execution mode,
+   therefore BL1 and BL2 must be compiled for AArch64, and BL32 is compiled
+   separately for AArch32.
+
+   -  Before building BL32, the environment variable ``CROSS_COMPILE`` must point
+      to the AArch32 Linaro cross compiler.
+
+      ::
+
+          export CROSS_COMPILE=<path-to-aarch32-gcc>/bin/arm-linux-gnueabihf-
+
+   -  Build BL32 in AArch32.
+
+      ::
+
+          make ARCH=aarch32 PLAT=juno AARCH32_SP=sp_min \
+          RESET_TO_SP_MIN=1 JUNO_AARCH32_EL3_RUNTIME=1 bl32
+
+   -  Before building BL1 and BL2, the environment variable ``CROSS_COMPILE``
+      must point to the AArch64 Linaro cross compiler.
+
+      ::
+
+          export CROSS_COMPILE=<path-to-aarch64-gcc>/bin/aarch64-linux-gnu-
+
+   -  The following parameters should be used to build BL1 and BL2 in AArch64
+      and point to the BL32 file.
+
+      ::
+
+          make ARCH=aarch64 PLAT=juno LOAD_IMAGE_V2=1 JUNO_AARCH32_EL3_RUNTIME=1 \
+          BL33=<path-to-juno32-oe-uboot>/SOFTWARE/bl33-uboot.bin \
+          SCP_BL2=<path-to-juno32-oe-uboot>/SOFTWARE/scp_bl2.bin SPD=tspd \
+          BL32=<path-to-bl32>/bl32.bin all fip
+
+The resulting BL1 and FIP images may be found in:
+
+::
+
+    # Juno
+    ./build/juno/release/bl1.bin
+    ./build/juno/release/fip.bin
+
+    # FVP
+    ./build/fvp/release/bl1.bin
+    ./build/fvp/release/fip.bin
+
+
+Booting Firmware Update images
+-------------------------------------
+
+When Firmware Update (FWU) is enabled there are at least 2 new images
+that have to be loaded, the Non-Secure FWU ROM (NS-BL1U), and the
+FWU FIP.
+
+Juno
+~~~~
+
+The new images must be programmed in flash memory by adding
+an entry in the ``SITE1/HBI0262x/images.txt`` configuration file
+on the Juno SD card (where ``x`` depends on the revision of the Juno board).
+Refer to the `Juno Getting Started Guide`_, section 2.3 "Flash memory
+programming" for more information. User should ensure these do not
+overlap with any other entries in the file.
+
+::
+
+	NOR10UPDATE: AUTO                       ;Image Update:NONE/AUTO/FORCE
+	NOR10ADDRESS: 0x00400000                ;Image Flash Address [ns_bl2u_base_address]
+	NOR10FILE: \SOFTWARE\fwu_fip.bin        ;Image File Name
+	NOR10LOAD: 00000000                     ;Image Load Address
+	NOR10ENTRY: 00000000                    ;Image Entry Point
+
+	NOR11UPDATE: AUTO                       ;Image Update:NONE/AUTO/FORCE
+	NOR11ADDRESS: 0x03EB8000                ;Image Flash Address [ns_bl1u_base_address]
+	NOR11FILE: \SOFTWARE\ns_bl1u.bin        ;Image File Name
+	NOR11LOAD: 00000000                     ;Image Load Address
+
+The address ns_bl1u_base_address is the value of NS_BL1U_BASE - 0x8000000.
+In the same way, the address ns_bl2u_base_address is the value of
+NS_BL2U_BASE - 0x8000000.
+
+FVP
+~~~
+
+The additional fip images must be loaded with:
+
+::
+
+    --data cluster0.cpu0="<path_to>/ns_bl1u.bin"@0x0beb8000	[ns_bl1u_base_address]
+    --data cluster0.cpu0="<path_to>/fwu_fip.bin"@0x08400000	[ns_bl2u_base_address]
+
+The address ns_bl1u_base_address is the value of NS_BL1U_BASE.
+In the same way, the address ns_bl2u_base_address is the value of
+NS_BL2U_BASE.
+
+
+EL3 payloads alternative boot flow
+----------------------------------
+
+On a pre-production system, the ability to execute arbitrary, bare-metal code at
+the highest exception level is required. It allows full, direct access to the
+hardware, for example to run silicon soak tests.
+
+Although it is possible to implement some baremetal secure firmware from
+scratch, this is a complex task on some platforms, depending on the level of
+configuration required to put the system in the expected state.
+
+Rather than booting a baremetal application, a possible compromise is to boot
+``EL3 payloads`` through the Trusted Firmware instead. This is implemented as an
+alternative boot flow, where a modified BL2 boots an EL3 payload, instead of
+loading the other BL images and passing control to BL31. It reduces the
+complexity of developing EL3 baremetal code by:
+
+-  putting the system into a known architectural state;
+-  taking care of platform secure world initialization;
+-  loading the SCP\_BL2 image if required by the platform.
+
+When booting an EL3 payload on ARM standard platforms, the configuration of the
+TrustZone controller is simplified such that only region 0 is enabled and is
+configured to permit secure access only. This gives full access to the whole
+DRAM to the EL3 payload.
+
+The system is left in the same state as when entering BL31 in the default boot
+flow. In particular:
+
+-  Running in EL3;
+-  Current state is AArch64;
+-  Little-endian data access;
+-  All exceptions disabled;
+-  MMU disabled;
+-  Caches disabled.
+
+Booting an EL3 payload
+~~~~~~~~~~~~~~~~~~~~~~
+
+The EL3 payload image is a standalone image and is not part of the FIP. It is
+not loaded by the Trusted Firmware. Therefore, there are 2 possible scenarios:
+
+-  The EL3 payload may reside in non-volatile memory (NVM) and execute in
+   place. In this case, booting it is just a matter of specifying the right
+   address in NVM through ``EL3_PAYLOAD_BASE`` when building the TF.
+
+-  The EL3 payload needs to be loaded in volatile memory (e.g. DRAM) at
+   run-time.
+
+To help in the latter scenario, the ``SPIN_ON_BL1_EXIT=1`` build option can be
+used. The infinite loop that it introduces in BL1 stops execution at the right
+moment for a debugger to take control of the target and load the payload (for
+example, over JTAG).
+
+It is expected that this loading method will work in most cases, as a debugger
+connection is usually available in a pre-production system. The user is free to
+use any other platform-specific mechanism to load the EL3 payload, though.
+
+Booting an EL3 payload on FVP
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The EL3 payloads boot flow requires the CPU's mailbox to be cleared at reset for
+the secondary CPUs holding pen to work properly. Unfortunately, its reset value
+is undefined on the FVP platform and the FVP platform code doesn't clear it.
+Therefore, one must modify the way the model is normally invoked in order to
+clear the mailbox at start-up.
+
+One way to do that is to create an 8-byte file containing all zero bytes using
+the following command:
+
+::
+
+    dd if=/dev/zero of=mailbox.dat bs=1 count=8
+
+and pre-load it into the FVP memory at the mailbox address (i.e. ``0x04000000``)
+using the following model parameters:
+
+::
+
+    --data cluster0.cpu0=mailbox.dat@0x04000000   [Base FVPs]
+    --data=mailbox.dat@0x04000000                 [Foundation FVP]
+
+To provide the model with the EL3 payload image, the following methods may be
+used:
+
+#. If the EL3 payload is able to execute in place, it may be programmed into
+   flash memory. On Base Cortex and AEM FVPs, the following model parameter
+   loads it at the base address of the NOR FLASH1 (the NOR FLASH0 is already
+   used for the FIP):
+
+   ::
+
+       -C bp.flashloader1.fname="/path/to/el3-payload"
+
+   On Foundation FVP, there is no flash loader component and the EL3 payload
+   may be programmed anywhere in flash using method 3 below.
+
+#. When using the ``SPIN_ON_BL1_EXIT=1`` loading method, the following DS-5
+   command may be used to load the EL3 payload ELF image over JTAG:
+
+   ::
+
+       load /path/to/el3-payload.elf
+
+#. The EL3 payload may be pre-loaded in volatile memory using the following
+   model parameters:
+
+   ::
+
+       --data cluster0.cpu0="/path/to/el3-payload"@address  [Base FVPs]
+       --data="/path/to/el3-payload"@address                [Foundation FVP]
+
+   The address provided to the FVP must match the ``EL3_PAYLOAD_BASE`` address
+   used when building the Trusted Firmware.
+
+Booting an EL3 payload on Juno
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If the EL3 payload is able to execute in place, it may be programmed in flash
+memory by adding an entry in the ``SITE1/HBI0262x/images.txt`` configuration file
+on the Juno SD card (where ``x`` depends on the revision of the Juno board).
+Refer to the `Juno Getting Started Guide`_, section 2.3 "Flash memory
+programming" for more information.
+
+Alternatively, the same DS-5 command mentioned in the FVP section above can
+be used to load the EL3 payload's ELF file over JTAG on Juno.
+
+Preloaded BL33 alternative boot flow
+------------------------------------
+
+Some platforms have the ability to preload BL33 into memory instead of relying
+on Trusted Firmware to load it. This may simplify packaging of the normal world
+code and improve performance in a development environment. When secure world
+cold boot is complete, Trusted Firmware simply jumps to a BL33 base address
+provided at build time.
+
+For this option to be used, the ``PRELOADED_BL33_BASE`` build option has to be
+used when compiling the Trusted Firmware. For example, the following command
+will create a FIP without a BL33 and prepare to jump to a BL33 image loaded at
+address 0x80000000:
+
+::
+
+    make PRELOADED_BL33_BASE=0x80000000 PLAT=fvp all fip
+
+Boot of a preloaded bootwrapped kernel image on Base FVP
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following example uses the AArch64 boot wrapper. This simplifies normal
+world booting while also making use of TF features. It can be obtained from its
+repository with:
+
+::
+
+    git clone git://git.kernel.org/pub/scm/linux/kernel/git/mark/boot-wrapper-aarch64.git
+
+After compiling it, an ELF file is generated. It can be loaded with the
+following command:
+
+::
+
+    <path-to>/FVP_Base_AEMv8A-AEMv8A              \
+        -C bp.secureflashloader.fname=bl1.bin     \
+        -C bp.flashloader0.fname=fip.bin          \
+        -a cluster0.cpu0=<bootwrapped-kernel.elf> \
+        --start cluster0.cpu0=0x0
+
+The ``-a cluster0.cpu0=<bootwrapped-kernel.elf>`` option loads the ELF file. It
+also sets the PC register to the ELF entry point address, which is not the
+desired behaviour, so the ``--start cluster0.cpu0=0x0`` option forces the PC back
+to 0x0 (the BL1 entry point address) on CPU #0. The ``PRELOADED_BL33_BASE`` define
+used when compiling the FIP must match the ELF entry point.
+
+Boot of a preloaded bootwrapped kernel image on Juno
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The procedure to obtain and compile the boot wrapper is very similar to the case
+of the FVP. The execution must be stopped at the end of bl2\_main(), and the
+loading method explained above in the EL3 payload boot flow section may be used
+to load the ELF file over JTAG on Juno.
+
+Running the software on FVP
+---------------------------
+
+The latest version of the AArch64 build of ARM Trusted Firmware has been tested
+on the following ARM FVPs (64-bit host machine only).
+
+NOTE: Unless otherwise stated, the model version is Version 11.1 Build 11.1.22.
+
+-  ``Foundation_Platform``
+-  ``FVP_Base_AEMv8A-AEMv8A`` (Version 8.7, Build 0.8.8702)
+-  ``FVP_Base_Cortex-A35x4``
+-  ``FVP_Base_Cortex-A53x4``
+-  ``FVP_Base_Cortex-A57x4-A53x4``
+-  ``FVP_Base_Cortex-A57x4``
+-  ``FVP_Base_Cortex-A72x4-A53x4``
+-  ``FVP_Base_Cortex-A72x4``
+-  ``FVP_Base_Cortex-A73x4-A53x4``
+-  ``FVP_Base_Cortex-A73x4``
+
+The latest version of the AArch32 build of ARM Trusted Firmware has been tested
+on the following ARM FVPs (64-bit host machine only).
+
+-  ``FVP_Base_AEMv8A-AEMv8A`` (Version 8.7, Build 0.8.8702)
+-  ``FVP_Base_Cortex-A32x4``
+
+NOTE: The build numbers quoted above are those reported by launching the FVP
+with the ``--version`` parameter.
+
+NOTE: Linaro provides a ramdisk image in prebuilt FVP configurations and full
+file systems that can be downloaded separately. To run an FVP with a virtio
+file system image an additional FVP configuration option
+``-C bp.virtioblockdevice.image_path="<path-to>/<file-system-image>`` can be
+used.
+
+NOTE: The software will not work on Version 1.0 of the Foundation FVP.
+The commands below would report an ``unhandled argument`` error in this case.
+
+NOTE: FVPs can be launched with ``--cadi-server`` option such that a
+CADI-compliant debugger (for example, ARM DS-5) can connect to and control its
+execution.
+
+NOTE: Since FVP model Version 11.0 Build 11.0.34 and Version 8.5 Build 0.8.5202
+the internal synchronisation timings changed compared to older versions of the
+models. The models can be launched with ``-Q 100`` option if they are required
+to match the run time characteristics of the older versions.
+
+The Foundation FVP is a cut down version of the AArch64 Base FVP. It can be
+downloaded for free from `ARM's website`_.
+
+The Cortex-A models listed above are also available to download from
+`ARM's website`_.
+
+Please refer to the FVP documentation for a detailed description of the model
+parameter options. A brief description of the important ones that affect the ARM
+Trusted Firmware and normal world software behavior is provided below.
+
+Obtaining the Flattened Device Trees
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Depending on the FVP configuration and Linux configuration used, different
+FDT files are required. FDTs for the Foundation and Base FVPs can be found in
+the Trusted Firmware source directory under ``fdts/``. The Foundation FVP has a
+subset of the Base FVP components. For example, the Foundation FVP lacks CLCD
+and MMC support, and has only one CPU cluster.
+
+Note: It is not recommended to use the FDTs built along the kernel because not
+all FDTs are available from there.
+
+-  ``fvp-base-gicv2-psci.dtb``
+
+   For use with both AEMv8 and Cortex-A57-A53 Base FVPs with
+   Base memory map configuration.
+
+-  ``fvp-base-gicv2-psci-aarch32.dtb``
+
+   For use with AEMv8 and Cortex-A32 Base FVPs running Linux in AArch32 state
+   with Base memory map configuration.
+
+-  ``fvp-base-gicv3-psci.dtb``
+
+   (Default) For use with both AEMv8 and Cortex-A57-A53 Base FVPs with Base
+   memory map configuration and Linux GICv3 support.
+
+-  ``fvp-base-gicv3-psci-aarch32.dtb``
+
+   For use with AEMv8 and Cortex-A32 Base FVPs running Linux in AArch32 state
+   with Base memory map configuration and Linux GICv3 support.
+
+-  ``fvp-foundation-gicv2-psci.dtb``
+
+   For use with Foundation FVP with Base memory map configuration.
+
+-  ``fvp-foundation-gicv3-psci.dtb``
+
+   (Default) For use with Foundation FVP with Base memory map configuration
+   and Linux GICv3 support.
+
+Running on the Foundation FVP with reset to BL1 entrypoint
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following ``Foundation_Platform`` parameters should be used to boot Linux with
+4 CPUs using the AArch64 build of ARM Trusted Firmware.
+
+::
+
+    <path-to>/Foundation_Platform                   \
+    --cores=4                                       \
+    --secure-memory                                 \
+    --visualization                                 \
+    --gicv3                                         \
+    --data="<path-to>/<bl1-binary>"@0x0             \
+    --data="<path-to>/<FIP-binary>"@0x08000000      \
+    --data="<path-to>/<fdt>"@0x82000000             \
+    --data="<path-to>/<kernel-binary>"@0x80080000   \
+    --data="<path-to>/<ramdisk-binary>"@0x84000000
+
+Notes:
+
+-  BL1 is loaded at the start of the Trusted ROM.
+-  The Firmware Image Package is loaded at the start of NOR FLASH0.
+-  The Linux kernel image and device tree are loaded in DRAM.
+-  The default use-case for the Foundation FVP is to use the ``--gicv3`` option
+   and enable the GICv3 device in the model. Note that without this option,
+   the Foundation FVP defaults to legacy (Versatile Express) memory map which
+   is not supported by ARM Trusted Firmware.
+
+Running on the AEMv8 Base FVP with reset to BL1 entrypoint
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following ``FVP_Base_AEMv8A-AEMv8A`` parameters should be used to boot Linux
+with 8 CPUs using the AArch64 build of ARM Trusted Firmware.
+
+::
+
+    <path-to>/FVP_Base_AEMv8A-AEMv8A                            \
+    -C pctl.startup=0.0.0.0                                     \
+    -C bp.secure_memory=1                                       \
+    -C bp.tzc_400.diagnostics=1                                 \
+    -C cluster0.NUM_CORES=4                                     \
+    -C cluster1.NUM_CORES=4                                     \
+    -C cache_state_modelled=1                                   \
+    -C bp.secureflashloader.fname="<path-to>/<bl1-binary>"      \
+    -C bp.flashloader0.fname="<path-to>/<FIP-binary>"           \
+    --data cluster0.cpu0="<path-to>/<fdt>"@0x82000000           \
+    --data cluster0.cpu0="<path-to>/<kernel-binary>"@0x80080000 \
+    --data cluster0.cpu0="<path-to>/<ramdisk>"@0x84000000
+
+Running on the AEMv8 Base FVP (AArch32) with reset to BL1 entrypoint
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following ``FVP_Base_AEMv8A-AEMv8A`` parameters should be used to boot Linux
+with 8 CPUs using the AArch32 build of ARM Trusted Firmware.
+
+::
+
+    <path-to>/FVP_Base_AEMv8A-AEMv8A                            \
+    -C pctl.startup=0.0.0.0                                     \
+    -C bp.secure_memory=1                                       \
+    -C bp.tzc_400.diagnostics=1                                 \
+    -C cluster0.NUM_CORES=4                                     \
+    -C cluster1.NUM_CORES=4                                     \
+    -C cache_state_modelled=1                                   \
+    -C cluster0.cpu0.CONFIG64=0                                 \
+    -C cluster0.cpu1.CONFIG64=0                                 \
+    -C cluster0.cpu2.CONFIG64=0                                 \
+    -C cluster0.cpu3.CONFIG64=0                                 \
+    -C cluster1.cpu0.CONFIG64=0                                 \
+    -C cluster1.cpu1.CONFIG64=0                                 \
+    -C cluster1.cpu2.CONFIG64=0                                 \
+    -C cluster1.cpu3.CONFIG64=0                                 \
+    -C bp.secureflashloader.fname="<path-to>/<bl1-binary>"      \
+    -C bp.flashloader0.fname="<path-to>/<FIP-binary>"           \
+    --data cluster0.cpu0="<path-to>/<fdt>"@0x82000000           \
+    --data cluster0.cpu0="<path-to>/<kernel-binary>"@0x80080000 \
+    --data cluster0.cpu0="<path-to>/<ramdisk>"@0x84000000
+
+Running on the Cortex-A57-A53 Base FVP with reset to BL1 entrypoint
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following ``FVP_Base_Cortex-A57x4-A53x4`` model parameters should be used to
+boot Linux with 8 CPUs using the AArch64 build of ARM Trusted Firmware.
+
+::
+
+    <path-to>/FVP_Base_Cortex-A57x4-A53x4                       \
+    -C pctl.startup=0.0.0.0                                     \
+    -C bp.secure_memory=1                                       \
+    -C bp.tzc_400.diagnostics=1                                 \
+    -C cache_state_modelled=1                                   \
+    -C bp.secureflashloader.fname="<path-to>/<bl1-binary>"      \
+    -C bp.flashloader0.fname="<path-to>/<FIP-binary>"           \
+    --data cluster0.cpu0="<path-to>/<fdt>"@0x82000000           \
+    --data cluster0.cpu0="<path-to>/<kernel-binary>"@0x80080000 \
+    --data cluster0.cpu0="<path-to>/<ramdisk>"@0x84000000
+
+Running on the Cortex-A32 Base FVP (AArch32) with reset to BL1 entrypoint
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following ``FVP_Base_Cortex-A32x4`` model parameters should be used to
+boot Linux with 4 CPUs using the AArch32 build of ARM Trusted Firmware.
+
+::
+
+    <path-to>/FVP_Base_Cortex-A32x4                             \
+    -C pctl.startup=0.0.0.0                                     \
+    -C bp.secure_memory=1                                       \
+    -C bp.tzc_400.diagnostics=1                                 \
+    -C cache_state_modelled=1                                   \
+    -C bp.secureflashloader.fname="<path-to>/<bl1-binary>"      \
+    -C bp.flashloader0.fname="<path-to>/<FIP-binary>"           \
+    --data cluster0.cpu0="<path-to>/<fdt>"@0x82000000           \
+    --data cluster0.cpu0="<path-to>/<kernel-binary>"@0x80080000 \
+    --data cluster0.cpu0="<path-to>/<ramdisk>"@0x84000000
+
+Running on the AEMv8 Base FVP with reset to BL31 entrypoint
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following ``FVP_Base_AEMv8A-AEMv8A`` parameters should be used to boot Linux
+with 8 CPUs using the AArch64 build of ARM Trusted Firmware.
+
+::
+
+    <path-to>/FVP_Base_AEMv8A-AEMv8A                             \
+    -C pctl.startup=0.0.0.0                                      \
+    -C bp.secure_memory=1                                        \
+    -C bp.tzc_400.diagnostics=1                                  \
+    -C cluster0.NUM_CORES=4                                      \
+    -C cluster1.NUM_CORES=4                                      \
+    -C cache_state_modelled=1                                    \
+    -C cluster0.cpu0.RVBAR=0x04020000                            \
+    -C cluster0.cpu1.RVBAR=0x04020000                            \
+    -C cluster0.cpu2.RVBAR=0x04020000                            \
+    -C cluster0.cpu3.RVBAR=0x04020000                            \
+    -C cluster1.cpu0.RVBAR=0x04020000                            \
+    -C cluster1.cpu1.RVBAR=0x04020000                            \
+    -C cluster1.cpu2.RVBAR=0x04020000                            \
+    -C cluster1.cpu3.RVBAR=0x04020000                            \
+    --data cluster0.cpu0="<path-to>/<bl31-binary>"@0x04020000    \
+    --data cluster0.cpu0="<path-to>/<bl32-binary>"@0x04001000    \
+    --data cluster0.cpu0="<path-to>/<bl33-binary>"@0x88000000    \
+    --data cluster0.cpu0="<path-to>/<fdt>"@0x82000000            \
+    --data cluster0.cpu0="<path-to>/<kernel-binary>"@0x80080000  \
+    --data cluster0.cpu0="<path-to>/<ramdisk>"@0x84000000
+
+Notes:
+
+-  Since a FIP is not loaded when using BL31 as reset entrypoint, the
+   ``--data="<path-to><bl31|bl32|bl33-binary>"@<base-address-of-binary>``
+   parameter is needed to load the individual bootloader images in memory.
+   BL32 image is only needed if BL31 has been built to expect a Secure-EL1
+   Payload.
+
+-  The ``-C cluster<X>.cpu<Y>.RVBAR=@<base-address-of-bl31>`` parameter, where
+   X and Y are the cluster and CPU numbers respectively, is used to set the
+   reset vector for each core.
+
+-  Changing the default value of ``ARM_TSP_RAM_LOCATION`` will also require
+   changing the value of
+   ``--data="<path-to><bl32-binary>"@<base-address-of-bl32>`` to the new value of
+   ``BL32_BASE``.
+
+Running on the AEMv8 Base FVP (AArch32) with reset to SP\_MIN entrypoint
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following ``FVP_Base_AEMv8A-AEMv8A`` parameters should be used to boot Linux
+with 8 CPUs using the AArch32 build of ARM Trusted Firmware.
+
+::
+
+    <path-to>/FVP_Base_AEMv8A-AEMv8A                             \
+    -C pctl.startup=0.0.0.0                                      \
+    -C bp.secure_memory=1                                        \
+    -C bp.tzc_400.diagnostics=1                                  \
+    -C cluster0.NUM_CORES=4                                      \
+    -C cluster1.NUM_CORES=4                                      \
+    -C cache_state_modelled=1                                    \
+    -C cluster0.cpu0.CONFIG64=0                                  \
+    -C cluster0.cpu1.CONFIG64=0                                  \
+    -C cluster0.cpu2.CONFIG64=0                                  \
+    -C cluster0.cpu3.CONFIG64=0                                  \
+    -C cluster1.cpu0.CONFIG64=0                                  \
+    -C cluster1.cpu1.CONFIG64=0                                  \
+    -C cluster1.cpu2.CONFIG64=0                                  \
+    -C cluster1.cpu3.CONFIG64=0                                  \
+    -C cluster0.cpu0.RVBAR=0x04001000                            \
+    -C cluster0.cpu1.RVBAR=0x04001000                            \
+    -C cluster0.cpu2.RVBAR=0x04001000                            \
+    -C cluster0.cpu3.RVBAR=0x04001000                            \
+    -C cluster1.cpu0.RVBAR=0x04001000                            \
+    -C cluster1.cpu1.RVBAR=0x04001000                            \
+    -C cluster1.cpu2.RVBAR=0x04001000                            \
+    -C cluster1.cpu3.RVBAR=0x04001000                            \
+    --data cluster0.cpu0="<path-to>/<bl32-binary>"@0x04001000    \
+    --data cluster0.cpu0="<path-to>/<bl33-binary>"@0x88000000    \
+    --data cluster0.cpu0="<path-to>/<fdt>"@0x82000000            \
+    --data cluster0.cpu0="<path-to>/<kernel-binary>"@0x80080000  \
+    --data cluster0.cpu0="<path-to>/<ramdisk>"@0x84000000
+
+Note: The load address of ``<bl32-binary>`` depends on the value ``BL32_BASE``.
+It should match the address programmed into the RVBAR register as well.
+
+Running on the Cortex-A57-A53 Base FVP with reset to BL31 entrypoint
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following ``FVP_Base_Cortex-A57x4-A53x4`` model parameters should be used to
+boot Linux with 8 CPUs using the AArch64 build of ARM Trusted Firmware.
+
+::
+
+    <path-to>/FVP_Base_Cortex-A57x4-A53x4                        \
+    -C pctl.startup=0.0.0.0                                      \
+    -C bp.secure_memory=1                                        \
+    -C bp.tzc_400.diagnostics=1                                  \
+    -C cache_state_modelled=1                                    \
+    -C cluster0.cpu0.RVBARADDR=0x04020000                        \
+    -C cluster0.cpu1.RVBARADDR=0x04020000                        \
+    -C cluster0.cpu2.RVBARADDR=0x04020000                        \
+    -C cluster0.cpu3.RVBARADDR=0x04020000                        \
+    -C cluster1.cpu0.RVBARADDR=0x04020000                        \
+    -C cluster1.cpu1.RVBARADDR=0x04020000                        \
+    -C cluster1.cpu2.RVBARADDR=0x04020000                        \
+    -C cluster1.cpu3.RVBARADDR=0x04020000                        \
+    --data cluster0.cpu0="<path-to>/<bl31-binary>"@0x04020000    \
+    --data cluster0.cpu0="<path-to>/<bl32-binary>"@0x04001000    \
+    --data cluster0.cpu0="<path-to>/<bl33-binary>"@0x88000000    \
+    --data cluster0.cpu0="<path-to>/<fdt>"@0x82000000            \
+    --data cluster0.cpu0="<path-to>/<kernel-binary>"@0x80080000  \
+    --data cluster0.cpu0="<path-to>/<ramdisk>"@0x84000000
+
+Running on the Cortex-A32 Base FVP (AArch32) with reset to SP\_MIN entrypoint
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following ``FVP_Base_Cortex-A32x4`` model parameters should be used to
+boot Linux with 4 CPUs using the AArch32 build of ARM Trusted Firmware.
+
+::
+
+    <path-to>/FVP_Base_Cortex-A32x4                             \
+    -C pctl.startup=0.0.0.0                                     \
+    -C bp.secure_memory=1                                       \
+    -C bp.tzc_400.diagnostics=1                                 \
+    -C cache_state_modelled=1                                   \
+    -C cluster0.cpu0.RVBARADDR=0x04001000                       \
+    -C cluster0.cpu1.RVBARADDR=0x04001000                       \
+    -C cluster0.cpu2.RVBARADDR=0x04001000                       \
+    -C cluster0.cpu3.RVBARADDR=0x04001000                       \
+    --data cluster0.cpu0="<path-to>/<bl32-binary>"@0x04001000   \
+    --data cluster0.cpu0="<path-to>/<bl33-binary>"@0x88000000   \
+    --data cluster0.cpu0="<path-to>/<fdt>"@0x82000000           \
+    --data cluster0.cpu0="<path-to>/<kernel-binary>"@0x80080000 \
+    --data cluster0.cpu0="<path-to>/<ramdisk>"@0x84000000
+
+Running the software on Juno
+----------------------------
+
+This version of the ARM Trusted Firmware has been tested on variants r0, r1 and
+r2 of Juno.
+
+To execute the software stack on Juno, the version of the Juno board recovery
+image indicated in the `Linaro Release Notes`_ must be installed. If you have an
+earlier version installed or are unsure which version is installed, please
+re-install the recovery image by following the
+`Instructions for using Linaro's deliverables on Juno`_.
+
+Preparing Trusted Firmware images
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+After building Trusted Firmware, the files ``bl1.bin`` and ``fip.bin`` need copying
+to the ``SOFTWARE/`` directory of the Juno SD card.
+
+Other Juno software information
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Please visit the `ARM Platforms Portal`_ to get support and obtain any other Juno
+software information. Please also refer to the `Juno Getting Started Guide`_ to
+get more detailed information about the Juno ARM development platform and how to
+configure it.
+
+Testing SYSTEM SUSPEND on Juno
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The SYSTEM SUSPEND is a PSCI API which can be used to implement system suspend
+to RAM. For more details refer to section 5.16 of `PSCI`_. To test system suspend
+on Juno, at the linux shell prompt, issue the following command:
+
+::
+
+    echo +10 > /sys/class/rtc/rtc0/wakealarm
+    echo -n mem > /sys/power/state
+
+The Juno board should suspend to RAM and then wakeup after 10 seconds due to
+wakeup interrupt from RTC.
+
+--------------
+
+*Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.*
+
+.. _Linaro: `Linaro Release Notes`_
+.. _Linaro Release: `Linaro Release Notes`_
+.. _Linaro Release Notes: https://community.arm.com/tools/dev-platforms/b/documents/posts/linaro-release-notes-deprecated
+.. _Linaro Release 17.04: https://community.arm.com/tools/dev-platforms/b/documents/posts/linaro-release-notes-deprecated#LinaroRelease17.04
+.. _Linaro instructions: https://community.arm.com/dev-platforms/b/documents/posts/instructions-for-using-the-linaro-software-deliverables
+.. _Instructions for using Linaro's deliverables on Juno: https://community.arm.com/dev-platforms/b/documents/posts/using-linaros-deliverables-on-juno
+.. _ARM Platforms Portal: https://community.arm.com/dev-platforms/
+.. _Development Studio 5 (DS-5): http://www.arm.com/products/tools/software-tools/ds-5/index.php
+.. _Dia: https://wiki.gnome.org/Apps/Dia/Download
+.. _here: psci-lib-integration-guide.rst
+.. _Trusted Board Boot: trusted-board-boot.rst
+.. _Secure-EL1 Payloads and Dispatchers: firmware-design.rst#user-content-secure-el1-payloads-and-dispatchers
+.. _Firmware Update: firmware-update.rst
+.. _Firmware Design: firmware-design.rst
+.. _mbed TLS Repository: https://github.com/ARMmbed/mbedtls.git
+.. _mbed TLS Security Center: https://tls.mbed.org/security
+.. _ARM's website: `FVP models`_
+.. _FVP models: https://developer.arm.com/products/system-design/fixed-virtual-platforms
+.. _Juno Getting Started Guide: http://infocenter.arm.com/help/topic/com.arm.doc.dui0928e/DUI0928E_juno_arm_development_platform_gsg.pdf
+.. _PSCI: http://infocenter.arm.com/help/topic/com.arm.doc.den0022d/Power_State_Coordination_Interface_PDD_v1_1_DEN0022D.pdf
diff --git a/docs/xlat-tables-lib-v2-design.rst b/docs/xlat-tables-lib-v2-design.rst
new file mode 100644
index 0000000..07cbf86
--- /dev/null
+++ b/docs/xlat-tables-lib-v2-design.rst
@@ -0,0 +1,417 @@
+Translation Tables Library Design
+=================================
+
+
+.. section-numbering::
+    :suffix: .
+
+.. contents::
+
+
+This document describes the design of the translation tables library (version 2)
+used by the ARM Trusted Firmware. This library provides APIs to create page
+tables based on a description of the memory layout, as well as setting up system
+registers related to the Memory Management Unit (MMU) and performing the
+required Translation Lookaside Buffer (TLB) maintenance operations.
+
+More specifically, some use cases that this library aims to support are:
+
+#. Statically allocate translation tables and populate them (at run-time) based
+   on a description of the memory layout. The memory layout is typically
+   provided by the platform port as a list of memory regions;
+
+#. Support for generating translation tables pertaining to a different
+   translation regime than the exception level the library code is executing at;
+
+#. Support for dynamic mapping and unmapping of regions, even while the MMU is
+   on. This can be used to temporarily map some memory regions and unmap them
+   later on when no longer needed;
+
+#. Support for non-identity virtual to physical mappings to compress the virtual
+   address space;
+
+#. Support for changing memory attributes of memory regions at run-time.
+
+
+About version 1 and version 2
+-----------------------------
+
+This document focuses on version 2 of the library, whose sources are available
+in the `lib/xlat\_tables\_v2`_ directory. Version 1 of the library can still be
+found in `lib/xlat\_tables`_ directory but it is less flexible and doesn't
+support dynamic mapping. Although potential bug fixes will be applied to both
+versions, future features enhancements will focus on version 2 and might not be
+back-ported to version 1. Therefore, it is recommended to use version 2,
+especially for new platform ports.
+
+However, please note that version 2 is still in active development and is not
+considered stable yet. Hence, compatibility breaks might be introduced.
+
+From this point onwards, this document will implicitly refer to version 2 of the
+library.
+
+
+Design concepts and interfaces
+------------------------------
+
+This section presents some of the key concepts and data structures used in the
+translation tables library.
+
+`mmap` regions
+~~~~~~~~~~~~~~
+
+An ``mmap_region`` is an abstract, concise way to represent a memory region to
+map. It is one of the key interfaces to the library. It is identified by:
+
+- its physical base address;
+- its virtual base address;
+- its size;
+- its attributes;
+- its mapping granularity (optional).
+
+See the ``struct mmap_region`` type in `xlat\_tables\_v2.h`_.
+
+The user usually provides a list of such mmap regions to map and lets the
+library transpose that in a set of translation tables. As a result, the library
+might create new translation tables, update or split existing ones.
+
+The region attributes specify the type of memory (for example device or cached
+normal memory) as well as the memory access permissions (read-only or
+read-write, executable or not, secure or non-secure, and so on). In the case of
+the EL1&0 translation regime, the attributes also specify whether the region is
+a User region (EL0) or Privileged region (EL1). See the ``mmap_attr_t``
+enumeration type in `xlat\_tables\_v2.h`_. Note that for the EL1&0 translation
+regime the Execute Never attribute is set simultaneously for both EL1 and EL0.
+
+The granularity controls the translation table level to go down to when mapping
+the region. For example, assuming the MMU has been configured to use a 4KB
+granule size, the library might map a 2MB memory region using either of the two
+following options:
+
+- using a single level-2 translation table entry;
+- using a level-2 intermediate entry to a level-3 translation table (which
+  contains 512 entries, each mapping 4KB).
+
+The first solution potentially requires less translation tables, hence
+potentially less memory.  However, if part of this 2MB region is later remapped
+with different memory attributes, the library might need to split the existing
+page tables to refine the mappings. If a single level-2 entry has been used
+here, a level-3 table will need to be allocated on the fly and the level-2
+modified to point to this new level-3 table. This has a performance cost at
+run-time.
+
+If the user knows upfront that such a remapping operation is likely to happen
+then they might enforce a 4KB mapping granularity for this 2MB region from the
+beginning; remapping some of these 4KB pages on the fly then becomes a
+lightweight operation.
+
+The region's granularity is an optional field; if it is not specified the
+library will choose the mapping granularity for this region as it sees fit (more
+details can be found in `The memory mapping algorithm`_ section below).
+
+Translation Context
+~~~~~~~~~~~~~~~~~~~
+
+The library can create or modify translation tables pertaining to a different
+translation regime than the exception level the library code is executing at.
+For example, the library might be used by EL3 software (for instance BL31) to
+create translation tables pertaining to the S-EL1&0 translation regime.
+
+This flexibility comes from the use of *translation contexts*. A *translation
+context* constitutes the superset of information used by the library to track
+the status of a set of translation tables for a given translation regime.
+
+The library internally allocates a default translation context, which pertains
+to the translation regime of the current exception level. Additional contexts
+may be explicitly allocated and initialized using the
+``REGISTER_XLAT_CONTEXT()`` macro. Separate APIs are provided to act either on
+the default translation context or on an alternative one.
+
+To register a translation context, the user must provide the library with the
+following information:
+
+* A name.
+
+  The resulting translation context variable will be called after this name, to
+  which ``_xlat_ctx`` is appended. For example, if the macro name parameter is
+  ``foo``, the context variable name will be ``foo_xlat_ctx``.
+
+* The maximum number of `mmap` regions to map.
+
+  Should account for both static and dynamic regions, if applicable.
+
+* The number of sub-translation tables to allocate.
+
+  Number of translation tables to statically allocate for this context,
+  excluding the initial lookup level translation table, which is always
+  allocated. For example, if the initial lookup level is 1, this parameter would
+  specify the number of level-2 and level-3 translation tables to pre-allocate
+  for this context.
+
+* The size of the virtual address space.
+
+  Size in bytes of the virtual address space to map using this context. This
+  will incidentally determine the number of entries in the initial lookup level
+  translation table : the library will allocate as many entries as is required
+  to map the entire virtual address space.
+
+* The size of the physical address space.
+
+  Size in bytes of the physical address space to map using this context.
+
+The default translation context is internally initialized using information
+coming (for the most part) from platform-specific defines:
+
+- name: hard-coded to ``tf`` ; hence the name of the default context variable is
+  ``tf_xlat_ctx``;
+- number of `mmap` regions: ``MAX_MMAP_REGIONS``;
+- number of sub-translation tables: ``MAX_XLAT_TABLES``;
+- size of the virtual address space: ``PLAT_VIRT_ADDR_SPACE_SIZE``;
+- size of the physical address space: ``PLAT_PHY_ADDR_SPACE_SIZE``.
+
+Please refer to the `Porting Guide`_ for more details about these macros.
+
+
+Static and dynamic memory regions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The library optionally supports dynamic memory mapping. This feature may be
+enabled using the ``PLAT_XLAT_TABLES_DYNAMIC`` platform build flag.
+
+When dynamic memory mapping is enabled, the library categorises mmap regions as
+*static* or *dynamic*.
+
+- *Static regions* are fixed for the lifetime of the system. They can only be
+  added early on, before the translation tables are created and populated. They
+  cannot be removed afterwards.
+
+- *Dynamic regions* can be added or removed any time.
+
+When the dynamic memory mapping feature is disabled, only static regions exist.
+
+The dynamic memory mapping feature may be used to map and unmap transient memory
+areas. This is useful when the user needs to access some memory for a fixed
+period of time, after which the memory may be discarded and reclaimed. For
+example, a memory region that is only required at boot time while the system is
+initializing, or to temporarily share a memory buffer between the normal world
+and trusted world. Note that it is up to the caller to ensure that these regions
+are not accessed concurrently while the regions are being added or removed.
+
+Although this feature provides some level of dynamic memory allocation, this
+does not allow dynamically allocating an arbitrary amount of memory at an
+arbitrary memory location. The user is still required to declare at compile-time
+the limits of these allocations ; the library will deny any mapping request that
+does not fit within this pre-allocated pool of memory.
+
+
+Library APIs
+------------
+
+The external APIs exposed by this library are declared and documented in the
+`xlat\_tables\_v2.h`_ header file. This should be the reference point for
+getting information about the usage of the different APIs this library
+provides. This section just provides some extra details and clarifications.
+
+Although the ``mmap_region`` structure is a publicly visible type, it is not
+recommended to populate these structures by hand. Instead, wherever APIs expect
+function arguments of type ``mmap_region_t``, these should be constructed using
+the ``MAP_REGION*()`` family of helper macros. This is to limit the risk of
+compatibility breaks, should the ``mmap_region`` structure type evolve in the
+future.
+
+The ``MAP_REGION()`` and ``MAP_REGION_FLAT()`` macros do not allow specifying a
+mapping granularity, which leaves the library implementation free to choose
+it. However, in cases where a specific granularity is required, the
+``MAP_REGION2()`` macro might be used instead.
+
+As explained earlier in this document, when the dynamic mapping feature is
+disabled, there is no notion of dynamic regions. Conceptually, there are only
+static regions. For this reason (and to retain backward compatibility with the
+version 1 of the library), the APIs that map static regions do not embed the
+word *static* in their functions names (for example ``mmap_add_region()``), in
+contrast with the dynamic regions APIs (for example
+``mmap_add_dynamic_region()``).
+
+Although the definition of static and dynamic regions is not based on the state
+of the MMU, the two are still related in some way. Static regions can only be
+added before ``init_xlat_tables()`` is called and ``init_xlat_tables()`` must be
+called while the MMU is still off. As a result, static regions cannot be added
+once the MMU has been enabled. Dynamic regions can be added with the MMU on or
+off. In practice, the usual call flow would look like this:
+
+#. The MMU is initially off.
+
+#. Add some static regions, add some dynamic regions.
+
+#. Initialize translation tables based on the list of mmap regions (using one of
+   the ``init_xlat_tables*()`` APIs).
+
+#. At this point, it is no longer possible to add static regions. Dynamic
+   regions can still be added or removed.
+
+#. Enable the MMU.
+
+#. Dynamic regions can continue to be added or removed.
+
+Because static regions are added early on at boot time and are all in the
+control of the platform initialization code, the ``mmap_add*()`` family of APIs
+are not expected to fail. They do not return any error code.
+
+Nonetheless, these APIs will check upfront whether the region can be
+successfully added before updating the translation context structure. If the
+library detects that there is insufficient memory to meet the request, or that
+the new region will overlap another one in an invalid way, or if any other
+unexpected error is encountered, they will print an error message on the UART.
+Additionally, when asserts are enabled (typically in debug builds), an assertion
+will be triggered. Otherwise, the function call will just return straight away,
+without adding the offending memory region.
+
+
+Library limitations
+-------------------
+
+Dynamic regions are not allowed to overlap each other. Static regions are
+allowed to overlap as long as one of them is fully contained inside the other
+one. This is allowed for backwards compatibility with the previous behaviour in
+the version 1 of the library.
+
+
+Implementation details
+----------------------
+
+Code structure
+~~~~~~~~~~~~~~
+
+The library is divided into 2 modules:
+
+The core module
+    Provides the main functionality of the library.
+
+    See `xlat\_tables\_internal.c`_.
+
+The architectural module
+    Provides functions that are dependent on the current execution state
+    (AArch32/AArch64), such as the functions used for TLB invalidation or MMU
+    setup.
+
+    See `aarch32/xlat\_tables\_arch.c`_ and `aarch64/xlat\_tables\_arch.c`_.
+
+Core module
+~~~~~~~~~~~
+
+From mmap regions to translation tables
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+All the APIs in this module work on a translation context. The translation
+context contains the list of ``mmap_region``, which holds the information of all
+the regions that are mapped at any given time. Whenever there is a request to
+map (resp. unmap) a memory region, it is added to (resp. removed from) the
+``mmap_region`` list.
+
+The mmap regions list is a conceptual way to represent the memory layout. At
+some point, the library has to convert this information into actual translation
+tables to program into the MMU.
+
+Before the ``init_xlat_tables()`` API is called, the library only acts on the
+mmap regions list. Adding a static or dynamic region at this point through one
+of the ``mmap_add*()`` APIs does not affect the translation tables in any way,
+they only get registered in the internal mmap region list. It is only when the
+user calls the ``init_xlat_tables()`` that the translation tables are populated
+in memory based on the list of mmap regions registered so far. This is an
+optimization that allows creation of the initial set of translation tables in
+one go, rather than having to edit them every time while the MMU is disabled.
+
+After the ``init_xlat_tables()`` API has been called, only dynamic regions can
+be added. Changes to the translation tables (as well as the mmap regions list)
+will take effect immediately.
+
+The memory mapping algorithm
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The mapping function is implemented as a recursive algorithm. It is however
+bound by the level of depth of the translation tables (the ARMv8-A architecture
+allows up to 4 lookup levels).
+
+By default [#granularity-ref]_, the algorithm will attempt to minimize the
+number of translation tables created to satisfy the user's request. It will
+favour mapping a region using the biggest possible blocks, only creating a
+sub-table if it is strictly necessary. This is to reduce the memory footprint of
+the firmware.
+
+The most common reason for needing a sub-table is when a specific mapping
+requires a finer granularity. Misaligned regions also require a finer
+granularity than what the user may had originally expected, using a lot more
+memory than expected. The reason is that all levels of translation are
+restricted to address translations of the same granularity as the size of the
+blocks of that level.  For example, for a 4 KiB page size, a level 2 block entry
+can only translate up to a granularity of 2 MiB. If the Physical Address is not
+aligned to 2 MiB then additional level 3 tables are also needed.
+
+Note that not every translation level allows any type of descriptor. Depending
+on the page size, levels 0 and 1 of translation may only allow table
+descriptors. If a block entry could be able to describe a translation, but that
+level does not allow block descriptors, a table descriptor will have to be used
+instead, as well as additional tables at the next level.
+
+|Alignment Example|
+
+The mmap regions are sorted in a way that simplifies the code that maps
+them. Even though this ordering is only strictly needed for overlapping static
+regions, it must also be applied for dynamic regions to maintain a consistent
+order of all regions at all times. As each new region is mapped, existing
+entries in the translation tables are checked to ensure consistency. Please
+refer to the comments in the source code of the core module for more details
+about the sorting algorithm in use.
+
+.. [#granularity-ref] That is, when mmap regions do not enforce their mapping
+                      granularity.
+
+TLB maintenance operations
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The library takes care of performing TLB maintenance operations when required.
+For example, when the user requests removing a dynamic region, the library
+invalidates all TLB entries associated to that region to ensure that these
+changes are visible to subsequent execution, including speculative execution,
+that uses the changed translation table entries.
+
+A counter-example is the initialization of translation tables. In this case,
+explicit TLB maintenance is not required. The ARMv8-A architecture guarantees
+that all TLBs are disabled from reset and their contents have no effect on
+address translation at reset [#tlb-reset-ref]_. Therefore, the TLBs invalidation
+is deferred to the ``enable_mmu*()`` family of functions, just before the MMU is
+turned on.
+
+TLB invalidation is not required when adding dynamic regions either. Dynamic
+regions are not allowed to overlap existing memory region. Therefore, if the
+dynamic mapping request is deemed legitimate, it automatically concerns memory
+that was not mapped in this translation regime and the library will have
+initialized its corresponding translation table entry to an invalid
+descriptor. Given that the TLBs are not architecturally permitted to hold any
+invalid translation table entry [#tlb-no-invalid-entry]_, this means that this
+mapping cannot be cached in the TLBs.
+
+.. [#tlb-reset-ref] See section D4.8 `Translation Lookaside Buffers (TLBs)`, subsection `TLB behavior at reset` in ARMv8-A, rev B.a.
+
+.. [#tlb-no-invalid-entry] See section D4.9.1 `General TLB maintenance requirements` in ARMv8-A, rev B.a.
+
+Architectural module
+~~~~~~~~~~~~~~~~~~~~
+
+This module contains functions that have different implementations for AArch32
+and AArch64. For example, it provides APIs to perform TLB maintenance operations,
+enable the MMU or calculate the Physical Address Space size. They do not need a
+translation context to work on.
+
+--------------
+
+*Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.*
+
+.. _lib/xlat\_tables\_v2: ../lib/xlat_tables_v2
+.. _lib/xlat\_tables: ../lib/xlat_tables
+.. _xlat\_tables\_v2.h: ../include/lib/xlat_tables/xlat_tables_v2.h
+.. _xlat\_tables\_internal.c: ../lib/xlat_tables_v2/xlat_tables_internal.c
+.. _aarch32/xlat\_tables\_arch.c: ../lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
+.. _aarch64/xlat\_tables\_arch.c: ../lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
+.. _Porting Guide: porting-guide.rst
+.. |Alignment Example| image:: ./diagrams/xlat_align.png?raw=true
diff --git a/drivers/arm/cci/cci.c b/drivers/arm/cci/cci.c
new file mode 100644
index 0000000..e156838
--- /dev/null
+++ b/drivers/arm/cci/cci.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert.h>
+#include <cci.h>
+#include <debug.h>
+#include <mmio.h>
+#include <stdint.h>
+
+#define MAKE_CCI_PART_NUMBER(hi, lo)	((hi << 8) | lo)
+#define CCI_PART_LO_MASK		0xff
+#define CCI_PART_HI_MASK		0xf
+
+/* CCI part number codes read from Peripheral ID registers 0 and 1 */
+#define CCI400_PART_NUM		0x420
+#define CCI500_PART_NUM		0x422
+#define CCI550_PART_NUM		0x423
+
+#define CCI400_SLAVE_PORTS	5
+#define CCI500_SLAVE_PORTS	7
+#define CCI550_SLAVE_PORTS	7
+
+static uintptr_t cci_base;
+static const int *cci_slave_if_map;
+
+#if ENABLE_ASSERTIONS
+static unsigned int max_master_id;
+static int cci_num_slave_ports;
+
+static int validate_cci_map(const int *map)
+{
+	unsigned int valid_cci_map = 0;
+	int slave_if_id;
+	int i;
+
+	/* Validate the map */
+	for (i = 0; i <= max_master_id; i++) {
+		slave_if_id = map[i];
+
+		if (slave_if_id < 0)
+			continue;
+
+		if (slave_if_id >= cci_num_slave_ports) {
+			ERROR("Slave interface ID is invalid\n");
+			return 0;
+		}
+
+		if (valid_cci_map & (1 << slave_if_id)) {
+			ERROR("Multiple masters are assigned same slave interface ID\n");
+			return 0;
+		}
+		valid_cci_map |= 1 << slave_if_id;
+	}
+
+	if (!valid_cci_map) {
+		ERROR("No master is assigned a valid slave interface\n");
+		return 0;
+	}
+
+	return 1;
+}
+
+/*
+ * Read CCI part number from Peripheral ID registers
+ */
+static unsigned int read_cci_part_number(uintptr_t base)
+{
+	unsigned int part_lo, part_hi;
+
+	part_lo = mmio_read_32(base + PERIPHERAL_ID0) & CCI_PART_LO_MASK;
+	part_hi = mmio_read_32(base + PERIPHERAL_ID1) & CCI_PART_HI_MASK;
+
+	return MAKE_CCI_PART_NUMBER(part_hi, part_lo);
+}
+
+/*
+ * Identify a CCI device, and return the number of slaves. Return -1 for an
+ * unidentified device.
+ */
+static int get_slave_ports(unsigned int part_num)
+{
+	/* Macro to match CCI products */
+#define RET_ON_MATCH(product) \
+	case CCI ## product ## _PART_NUM: \
+		return CCI ## product ## _SLAVE_PORTS
+
+	switch (part_num) {
+
+	RET_ON_MATCH(400);
+	RET_ON_MATCH(500);
+	RET_ON_MATCH(550);
+
+	default:
+		return -1;
+	}
+
+#undef RET_ON_MATCH
+}
+#endif /* ENABLE_ASSERTIONS */
+
+void cci_init(uintptr_t base, const int *map, unsigned int num_cci_masters)
+{
+	assert(map);
+	assert(base);
+
+	cci_base = base;
+	cci_slave_if_map = map;
+
+#if ENABLE_ASSERTIONS
+	/*
+	 * Master Id's are assigned from zero, So in an array of size n
+	 * the max master id is (n - 1).
+	 */
+	max_master_id = num_cci_masters - 1;
+	cci_num_slave_ports = get_slave_ports(read_cci_part_number(base));
+#endif
+	assert(cci_num_slave_ports >= 0);
+
+	assert(validate_cci_map(map));
+}
+
+void cci_enable_snoop_dvm_reqs(unsigned int master_id)
+{
+	int slave_if_id = cci_slave_if_map[master_id];
+
+	assert(master_id <= max_master_id);
+	assert((slave_if_id < cci_num_slave_ports) && (slave_if_id >= 0));
+	assert(cci_base);
+
+	/*
+	 * Enable Snoops and DVM messages, no need for Read/Modify/Write as
+	 * rest of bits are write ignore
+	 */
+	mmio_write_32(cci_base +
+		      SLAVE_IFACE_OFFSET(slave_if_id) + SNOOP_CTRL_REG,
+		      DVM_EN_BIT | SNOOP_EN_BIT);
+
+	/* Wait for the dust to settle down */
+	while (mmio_read_32(cci_base + STATUS_REG) & CHANGE_PENDING_BIT)
+		;
+}
+
+void cci_disable_snoop_dvm_reqs(unsigned int master_id)
+{
+	int slave_if_id = cci_slave_if_map[master_id];
+
+	assert(master_id <= max_master_id);
+	assert((slave_if_id < cci_num_slave_ports) && (slave_if_id >= 0));
+	assert(cci_base);
+
+	/*
+	 * Disable Snoops and DVM messages, no need for Read/Modify/Write as
+	 * rest of bits are write ignore.
+	 */
+	mmio_write_32(cci_base +
+		      SLAVE_IFACE_OFFSET(slave_if_id) + SNOOP_CTRL_REG,
+		      ~(DVM_EN_BIT | SNOOP_EN_BIT));
+
+	/* Wait for the dust to settle down */
+	while (mmio_read_32(cci_base + STATUS_REG) & CHANGE_PENDING_BIT)
+		;
+}
+
diff --git a/drivers/arm/cci400/cci400.c b/drivers/arm/cci400/cci400.c
index 6a8737a..402e5e1 100644
--- a/drivers/arm/cci400/cci400.c
+++ b/drivers/arm/cci400/cci400.c
@@ -1,59 +1,40 @@
 /*
  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
 #include <assert.h>
 #include <cci400.h>
+#include <debug.h>
 #include <mmio.h>
+#include <stdint.h>
 
 #define MAX_CLUSTERS		2
 
-static unsigned long cci_base_addr;
+static uintptr_t cci_base_addr;
 static unsigned int cci_cluster_ix_to_iface[MAX_CLUSTERS];
 
 
-void cci_init(unsigned long cci_base,
+void cci_init(uintptr_t cci_base,
 		int slave_iface3_cluster_ix,
 		int slave_iface4_cluster_ix)
 {
 	/*
 	 * Check the passed arguments are valid. The cluster indices must be
 	 * less than MAX_CLUSTERS, not the same as each other and at least one
-	 * of them must be refer to a valid cluster index.
+	 * of them must refer to a valid cluster index.
 	 */
 	assert(cci_base);
 	assert(slave_iface3_cluster_ix < MAX_CLUSTERS);
 	assert(slave_iface4_cluster_ix < MAX_CLUSTERS);
 	assert(slave_iface3_cluster_ix != slave_iface4_cluster_ix);
 	assert((slave_iface3_cluster_ix >= 0) ||
-		(slave_iface3_cluster_ix >= 0));
+		(slave_iface4_cluster_ix >= 0));
+
+	WARN("Please migrate to common cci driver, This driver will be" \
+		" deprecated in future\n");
 
 	cci_base_addr = cci_base;
 	if (slave_iface3_cluster_ix >= 0)
diff --git a/drivers/arm/ccn/ccn.c b/drivers/arm/ccn/ccn.c
new file mode 100644
index 0000000..afb7d9d
--- /dev/null
+++ b/drivers/arm/ccn/ccn.c
@@ -0,0 +1,492 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert.h>
+#include <bakery_lock.h>
+#include <ccn.h>
+#include <debug.h>
+#include <errno.h>
+#include <mmio.h>
+#include "ccn_private.h"
+
+static const ccn_desc_t *ccn_plat_desc;
+#if defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32))
+DEFINE_BAKERY_LOCK(ccn_lock);
+#endif
+
+/*******************************************************************************
+ * This function takes the base address of the CCN's programmer's view (PV), a
+ * region ID of one of the 256 regions (0-255) and a register offset within the
+ * region. It converts the first two parameters into a base address and uses it
+ * to read the register at the offset.
+ ******************************************************************************/
+static inline unsigned long long ccn_reg_read(uintptr_t periphbase,
+			     unsigned int region_id,
+			     unsigned int register_offset)
+{
+	uintptr_t region_base;
+
+	assert(periphbase);
+	assert(region_id < REGION_ID_LIMIT);
+
+	region_base = periphbase + region_id_to_base(region_id);
+	return mmio_read_64(region_base + register_offset);
+}
+
+/*******************************************************************************
+ * This function takes the base address of the CCN's programmer's view (PV), a
+ * region ID of one of the 256 regions (0-255), a register offset within the
+ * region and a value. It converts the first two parameters into a base address
+ * and uses it to write the value in the register at the offset.
+ ******************************************************************************/
+static inline void ccn_reg_write(uintptr_t periphbase,
+			  unsigned int region_id,
+			  unsigned int register_offset,
+			  unsigned long long value)
+{
+	uintptr_t region_base;
+
+	assert(periphbase);
+	assert(region_id < REGION_ID_LIMIT);
+
+	region_base = periphbase + region_id_to_base(region_id);
+	mmio_write_64(region_base + register_offset, value);
+}
+
+#if ENABLE_ASSERTIONS
+
+typedef struct rn_info {
+		unsigned char node_desc[MAX_RN_NODES];
+	} rn_info_t;
+
+/*******************************************************************************
+ * This function takes the base address of the CCN's programmer's view (PV) and
+ * the node ID of a Request Node (RN-D or RN-I). It returns the maximum number
+ * of master interfaces resident on that node. This number is equal to the least
+ * significant two bits of the node type ID + 1.
+ ******************************************************************************/
+static unsigned int ccn_get_rni_mcount(uintptr_t periphbase,
+				       unsigned int rn_id)
+{
+	unsigned int rn_type_id;
+
+	/* Use the node id to find the type of RN-I/D node */
+	rn_type_id = get_node_type(ccn_reg_read(periphbase,
+						rn_id + RNI_REGION_ID_START,
+						REGION_ID_OFFSET));
+
+	/* Return the number master interfaces based on node type */
+	return rn_type_id_to_master_cnt(rn_type_id);
+}
+
+/*******************************************************************************
+ * This function reads the CCN registers to find the following information about
+ * the ACE/ACELite/ACELite+DVM/CHI interfaces resident on the various types of
+ * Request Nodes (RN-Fs, RN-Is and RN-Ds) in the system:
+ *
+ * 1. The total number of such interfaces that this CCN IP supports. This is the
+ *    cumulative number of interfaces across all Request node types. It is
+ *    passed back as the return value of this function.
+ *
+ * 2. The maximum number of interfaces of a type resident on a Request node of
+ *    one of the three types. This information is populated in the 'info'
+ *    array provided by the caller as described next.
+ *
+ *    The array has 64 entries. Each entry corresponds to a Request node. The
+ *    Miscellaneous node's programmer's view has RN-F, RN-I and RN-D ID
+ *    registers. For each RN-I and RN-D ID indicated as being present in these
+ *    registers, its identification register (offset 0xFF00) is read. This
+ *    register specifies the maximum number of master interfaces the node
+ *    supports. For RN-Fs it is assumed that there can be only a single fully
+ *    coherent master resident on each node. The counts for each type of node
+ *    are use to populate the array entry at the index corresponding to the node
+ *    ID i.e. rn_info[node ID] = <number of master interfaces>
+ ******************************************************************************/
+static unsigned int ccn_get_rn_master_info(uintptr_t periphbase,
+					   rn_info_t *info)
+{
+	unsigned int num_masters = 0;
+	rn_types_t rn_type;
+
+	assert (info);
+
+	for (rn_type = RN_TYPE_RNF; rn_type < NUM_RN_TYPES; rn_type++) {
+		unsigned int mn_reg_off, node_id;
+		unsigned long long rn_bitmap;
+
+		/*
+		 * RN-F, RN-I, RN-D node registers in the MN region occupy
+		 * contiguous 16 byte apart offsets.
+		 */
+		mn_reg_off = MN_RNF_NODEID_OFFSET + (rn_type << 4);
+		rn_bitmap = ccn_reg_read(periphbase, MN_REGION_ID, mn_reg_off);
+
+		FOR_EACH_PRESENT_NODE_ID(node_id, rn_bitmap) {
+			unsigned int node_mcount;
+
+			/*
+			 * A RN-F does not have a node type since it does not
+			 * export a programmer's interface. It can only have a
+			 * single fully coherent master residing on it. If the
+			 * offset of the MN(Miscellaneous Node) register points
+			 * to a RN-I/D node then the master count is set to the
+			 * maximum number of master interfaces that can possibly
+			 * reside on the node.
+			 */
+			node_mcount = (mn_reg_off == MN_RNF_NODEID_OFFSET ? 1 :
+				       ccn_get_rni_mcount(periphbase, node_id));
+
+			/*
+			 * Use this value to increment the maximum possible
+			 * master interfaces in the system.
+			 */
+			num_masters += node_mcount;
+
+			/*
+			 * Update the entry in 'info' for this node ID with
+			 * the maximum number of masters than can sit on
+			 * it. This information will be used to validate the
+			 * node information passed by the platform later.
+			 */
+			info->node_desc[node_id] = node_mcount;
+		}
+	}
+
+	return num_masters;
+}
+
+/*******************************************************************************
+ * This function validates parameters passed by the platform (in a debug build).
+ * It collects information about the maximum number of master interfaces that:
+ * a) the CCN IP can accommodate and
+ * b) can exist on each Request node.
+ * It compares this with the information provided by the platform to determine
+ * the validity of the latter.
+ ******************************************************************************/
+static void ccn_validate_plat_params(const ccn_desc_t *plat_desc)
+{
+	unsigned int master_id, num_rn_masters;
+	rn_info_t info = { {0} };
+
+	assert(plat_desc);
+	assert(plat_desc->periphbase);
+	assert(plat_desc->master_to_rn_id_map);
+	assert(plat_desc->num_masters);
+	assert(plat_desc->num_masters < CCN_MAX_RN_MASTERS);
+
+	/*
+	 * Find the number and properties of fully coherent, IO coherent and IO
+	 * coherent + DVM master interfaces
+	 */
+	num_rn_masters = ccn_get_rn_master_info(plat_desc->periphbase, &info);
+	assert(plat_desc->num_masters < num_rn_masters);
+
+	/*
+	 * Iterate through the Request nodes specified by the platform.
+	 * Decrement the count of the masters in the 'info' array for each
+	 * Request node encountered. If the count would drop below 0 then the
+	 * platform's view of this aspect of CCN configuration is incorrect.
+	 */
+	for (master_id = 0; master_id < plat_desc->num_masters; master_id++) {
+		unsigned int node_id;
+
+		node_id = plat_desc->master_to_rn_id_map[master_id];
+		assert(node_id < MAX_RN_NODES);
+		assert(info.node_desc[node_id]);
+		info.node_desc[node_id]--;
+	}
+}
+#endif /* ENABLE_ASSERTIONS */
+
+/*******************************************************************************
+ * This function validates parameters passed by the platform (in a debug build)
+ * and initialises its internal data structures. A lock is required to prevent
+ * simultaneous CCN operations at runtime (only BL31) to add and remove Request
+ * nodes from coherency.
+ ******************************************************************************/
+void ccn_init(const ccn_desc_t *plat_desc)
+{
+#if ENABLE_ASSERTIONS
+	ccn_validate_plat_params(plat_desc);
+#endif
+
+	ccn_plat_desc = plat_desc;
+}
+
+/*******************************************************************************
+ * This function converts a bit map of master interface IDs to a bit map of the
+ * Request node IDs that they reside on.
+ ******************************************************************************/
+static unsigned long long ccn_master_to_rn_id_map(unsigned long long master_map)
+{
+	unsigned long long rn_id_map = 0;
+	unsigned int node_id, iface_id;
+
+	assert(master_map);
+	assert(ccn_plat_desc);
+
+	FOR_EACH_PRESENT_MASTER_INTERFACE(iface_id, master_map) {
+		assert(iface_id < ccn_plat_desc->num_masters);
+
+		/* Convert the master ID into the node ID */
+		node_id = ccn_plat_desc->master_to_rn_id_map[iface_id];
+
+		/* Set the bit corresponding to this node ID */
+		rn_id_map |= (1ULL << node_id);
+	}
+
+	return rn_id_map;
+}
+
+/*******************************************************************************
+ * This function executes the necessary operations to add or remove Request node
+ * IDs specified in the 'rn_id_map' bitmap from the snoop/DVM domains specified
+ * in the 'hn_id_map'. The 'region_id' specifies the ID of the first HN-F/MN
+ * on which the operation should be performed. 'op_reg_offset' specifies the
+ * type of operation (add/remove). 'stat_reg_offset' specifies the register
+ * which should be polled to determine if the operation has completed or not.
+ ******************************************************************************/
+static void ccn_snoop_dvm_do_op(unsigned long long rn_id_map,
+				unsigned long long hn_id_map,
+				unsigned int region_id,
+				unsigned int op_reg_offset,
+				unsigned int stat_reg_offset)
+{
+	unsigned int start_region_id;
+
+	assert(ccn_plat_desc);
+	assert(ccn_plat_desc->periphbase);
+
+#if defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32))
+	bakery_lock_get(&ccn_lock);
+#endif
+	start_region_id = region_id;
+	FOR_EACH_PRESENT_REGION_ID(start_region_id, hn_id_map) {
+		ccn_reg_write(ccn_plat_desc->periphbase,
+			      start_region_id,
+			      op_reg_offset,
+			      rn_id_map);
+	}
+
+	start_region_id = region_id;
+
+	FOR_EACH_PRESENT_REGION_ID(start_region_id, hn_id_map) {
+		WAIT_FOR_DOMAIN_CTRL_OP_COMPLETION(start_region_id,
+						   stat_reg_offset,
+						   op_reg_offset,
+						   rn_id_map);
+	}
+
+#if defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32))
+	bakery_lock_release(&ccn_lock);
+#endif
+}
+
+/*******************************************************************************
+ * The following functions provide the boot and runtime API to the platform for
+ * adding and removing master interfaces from the snoop/DVM domains. A bitmap of
+ * master interfaces IDs is passed as a parameter. It is converted into a bitmap
+ * of Request node IDs using the mapping provided by the platform while
+ * initialising the driver.
+ * For example, consider a dual cluster system where the clusters have values 0
+ * & 1 in the affinity level 1 field of their respective MPIDRs. While
+ * initialising this driver, the platform provides the mapping between each
+ * cluster and the corresponding Request node. To add or remove a cluster from
+ * the snoop and dvm domain, the bit position corresponding to the cluster ID
+ * should be set in the 'master_iface_map' i.e. to remove both clusters the
+ * bitmap would equal 0x11.
+ ******************************************************************************/
+void ccn_enter_snoop_dvm_domain(unsigned long long master_iface_map)
+{
+	unsigned long long rn_id_map;
+
+	rn_id_map = ccn_master_to_rn_id_map(master_iface_map);
+	ccn_snoop_dvm_do_op(rn_id_map,
+			    CCN_GET_HN_NODEID_MAP(ccn_plat_desc->periphbase,
+						  MN_HNF_NODEID_OFFSET),
+			    HNF_REGION_ID_START,
+			    HNF_SDC_SET_OFFSET,
+			    HNF_SDC_STAT_OFFSET);
+
+	ccn_snoop_dvm_do_op(rn_id_map,
+			    CCN_GET_MN_NODEID_MAP(ccn_plat_desc->periphbase),
+			    MN_REGION_ID,
+			    MN_DDC_SET_OFFSET,
+			    MN_DDC_STAT_OFFSET);
+}
+
+void ccn_exit_snoop_dvm_domain(unsigned long long master_iface_map)
+{
+	unsigned long long rn_id_map;
+
+	rn_id_map = ccn_master_to_rn_id_map(master_iface_map);
+	ccn_snoop_dvm_do_op(rn_id_map,
+			    CCN_GET_HN_NODEID_MAP(ccn_plat_desc->periphbase,
+						  MN_HNF_NODEID_OFFSET),
+			    HNF_REGION_ID_START,
+			    HNF_SDC_CLR_OFFSET,
+			    HNF_SDC_STAT_OFFSET);
+
+	ccn_snoop_dvm_do_op(rn_id_map,
+			    CCN_GET_MN_NODEID_MAP(ccn_plat_desc->periphbase),
+			    MN_REGION_ID,
+			    MN_DDC_CLR_OFFSET,
+			    MN_DDC_STAT_OFFSET);
+}
+
+void ccn_enter_dvm_domain(unsigned long long master_iface_map)
+{
+	unsigned long long rn_id_map;
+
+	rn_id_map = ccn_master_to_rn_id_map(master_iface_map);
+	ccn_snoop_dvm_do_op(rn_id_map,
+			    CCN_GET_MN_NODEID_MAP(ccn_plat_desc->periphbase),
+			    MN_REGION_ID,
+			    MN_DDC_SET_OFFSET,
+			    MN_DDC_STAT_OFFSET);
+}
+
+void ccn_exit_dvm_domain(unsigned long long master_iface_map)
+{
+	unsigned long long rn_id_map;
+
+	rn_id_map = ccn_master_to_rn_id_map(master_iface_map);
+	ccn_snoop_dvm_do_op(rn_id_map,
+			    CCN_GET_MN_NODEID_MAP(ccn_plat_desc->periphbase),
+			    MN_REGION_ID,
+			    MN_DDC_CLR_OFFSET,
+			    MN_DDC_STAT_OFFSET);
+}
+
+/*******************************************************************************
+ * This function returns the run mode of all the L3 cache partitions in the
+ * system. The state is expected to be one of NO_L3, SF_ONLY, L3_HAM or
+ * L3_FAM. Instead of comparing the states reported by all HN-Fs, the state of
+ * the first present HN-F node is reported. Since the driver does not export an
+ * interface to program them seperately, there is no reason to perform this
+ * check. An HN-F could report that the L3 cache is transitioning from one mode
+ * to another e.g. HNF_PM_NOL3_2_SFONLY. In this case, the function waits for
+ * the transition to complete and reports the final state.
+ ******************************************************************************/
+unsigned int ccn_get_l3_run_mode(void)
+{
+	unsigned long long hnf_pstate_stat;
+
+	assert(ccn_plat_desc);
+	assert(ccn_plat_desc->periphbase);
+
+	/*
+	 * Wait for a L3 cache paritition to enter any run mode. The pstate
+	 * parameter is read from an HN-F P-state status register. A non-zero
+	 * value in bits[1:0] means that the cache is transitioning to a run
+	 * mode.
+	 */
+	do {
+		hnf_pstate_stat = ccn_reg_read(ccn_plat_desc->periphbase,
+					       HNF_REGION_ID_START,
+					       HNF_PSTATE_STAT_OFFSET);
+	} while (hnf_pstate_stat & 0x3);
+
+	return PSTATE_TO_RUN_MODE(hnf_pstate_stat);
+}
+
+/*******************************************************************************
+ * This function sets the run mode of all the L3 cache partitions in the
+ * system to one of NO_L3, SF_ONLY, L3_HAM or L3_FAM depending upon the state
+ * specified by the 'mode' argument.
+ ******************************************************************************/
+void ccn_set_l3_run_mode(unsigned int mode)
+{
+	unsigned long long mn_hnf_id_map, hnf_pstate_stat;
+	unsigned int region_id;
+
+	assert(ccn_plat_desc);
+	assert(ccn_plat_desc->periphbase);
+	assert(mode <= CCN_L3_RUN_MODE_FAM);
+
+	mn_hnf_id_map = ccn_reg_read(ccn_plat_desc->periphbase,
+				     MN_REGION_ID,
+				     MN_HNF_NODEID_OFFSET);
+	region_id = HNF_REGION_ID_START;
+
+	/* Program the desired run mode */
+	FOR_EACH_PRESENT_REGION_ID(region_id, mn_hnf_id_map) {
+		ccn_reg_write(ccn_plat_desc->periphbase,
+			      region_id,
+			      HNF_PSTATE_REQ_OFFSET,
+			      mode);
+	}
+
+	/* Wait for the caches to transition to the run mode */
+	region_id = HNF_REGION_ID_START;
+	FOR_EACH_PRESENT_REGION_ID(region_id, mn_hnf_id_map) {
+		/*
+		 * Wait for a L3 cache paritition to enter a target run
+		 * mode. The pstate parameter is read from an HN-F P-state
+		 * status register.
+		 */
+		do {
+			hnf_pstate_stat = ccn_reg_read(ccn_plat_desc->periphbase,
+					       region_id,
+					       HNF_PSTATE_STAT_OFFSET);
+		} while (((hnf_pstate_stat & HNF_PSTATE_MASK) >> 2) != mode);
+	}
+}
+
+/*******************************************************************************
+ * This function configures system address map and provides option to enable the
+ * 3SN striping mode of Slave node operation. The Slave node IDs and the Top
+ * Address bit1 and bit0 are provided as parameters to this function. This
+ * configuration is needed only if network contains a single SN-F or 3 SN-F and
+ * must be completed before the first request by the system to normal memory.
+ ******************************************************************************/
+void ccn_program_sys_addrmap(unsigned int sn0_id,
+		 unsigned int sn1_id,
+		 unsigned int sn2_id,
+		 unsigned int top_addr_bit0,
+		 unsigned int top_addr_bit1,
+		 unsigned char three_sn_en)
+{
+	unsigned long long mn_hnf_id_map, hnf_sam_ctrl_value;
+	unsigned int region_id;
+
+	assert(ccn_plat_desc);
+	assert(ccn_plat_desc->periphbase);
+
+	mn_hnf_id_map = ccn_reg_read(ccn_plat_desc->periphbase,
+				     MN_REGION_ID,
+				     MN_HNF_NODEID_OFFSET);
+	region_id = HNF_REGION_ID_START;
+	hnf_sam_ctrl_value = MAKE_HNF_SAM_CTRL_VALUE(sn0_id,
+						     sn1_id,
+						     sn2_id,
+						     top_addr_bit0,
+						     top_addr_bit1,
+						     three_sn_en);
+
+	FOR_EACH_PRESENT_REGION_ID(region_id, mn_hnf_id_map) {
+
+		/* Program the SAM control register */
+		ccn_reg_write(ccn_plat_desc->periphbase,
+			      region_id,
+			      HNF_SAM_CTRL_OFFSET,
+			      hnf_sam_ctrl_value);
+	}
+
+}
+
+/*******************************************************************************
+ * This function returns the part0 id from the peripheralID 0 register
+ * in CCN. This id can be used to distinguish the CCN variant present in the
+ * system.
+ ******************************************************************************/
+int ccn_get_part0_id(uintptr_t periphbase)
+{
+	assert(periphbase);
+	return (int)(mmio_read_64(periphbase
+			+ MN_PERIPH_ID_0_1_OFFSET) & 0xFF);
+}
diff --git a/drivers/arm/ccn/ccn_private.h b/drivers/arm/ccn/ccn_private.h
new file mode 100644
index 0000000..c17c274
--- /dev/null
+++ b/drivers/arm/ccn/ccn_private.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CCN_PRIVATE_H__
+#define __CCN_PRIVATE_H__
+
+/*
+ * A CCN implementation can have a maximum of 64 Request nodes with node IDs
+ * from 0-63. These IDs are split across the three types of Request nodes
+ * i.e. RN-F, RN-D and RN-I.
+ */
+#define MAX_RN_NODES		64
+
+/* Enum used to loop through the 3 types of Request nodes */
+typedef enum rn_types {
+	RN_TYPE_RNF = 0,
+	RN_TYPE_RNI,
+	RN_TYPE_RND,
+	NUM_RN_TYPES
+} rn_types_t;
+
+/* Macro to convert a region id to its base address */
+#define region_id_to_base(id)	((id) << 16)
+
+/*
+ * Macro to calculate the number of master interfaces resident on a RN-I/RN-D.
+ * Value of first two bits of the RN-I/D node type + 1 == Maximum number of
+ * ACE-Lite or ACE-Lite+DVM interfaces supported on this node. E.g.
+ *
+ * 0x14 : RN-I with 1 ACE-Lite interface
+ * 0x15 : RN-I with 2 ACE-Lite interfaces
+ * 0x16 : RN-I with 3 ACE-Lite interfaces
+ */
+#define rn_type_id_to_master_cnt(id)	(((id) & 0x3) + 1)
+
+/*
+ * Constants used to identify a region in the programmer's view. These are
+ * common for all regions.
+ */
+#define REGION_ID_LIMIT		256
+#define REGION_ID_OFFSET	0xFF00
+
+#define REGION_NODE_ID_SHIFT	8
+#define REGION_NODE_ID_MASK	0x7f
+#define get_node_id(id_reg)	(((id_reg) >> REGION_NODE_ID_SHIFT) \
+				 & REGION_NODE_ID_MASK)
+
+#define REGION_NODE_TYPE_SHIFT	0
+#define REGION_NODE_TYPE_MASK	0x1f
+#define get_node_type(id_reg)	(((id_reg) >> REGION_NODE_TYPE_SHIFT) \
+				 & REGION_NODE_TYPE_MASK)
+
+/* Common offsets of registers to enter or exit a snoop/dvm domain */
+#define DOMAIN_CTRL_STAT_OFFSET	0x0200
+#define DOMAIN_CTRL_SET_OFFSET	0x0210
+#define DOMAIN_CTRL_CLR_OFFSET	0x0220
+
+/*
+ * Thess macros are used to determine if an operation to add or remove a Request
+ * node from the snoop/dvm domain has completed. 'rn_id_map' is a bit map of
+ * nodes. It was used to program the SET or CLEAR control register. The type of
+ * register is specified by 'op_reg_offset'. 'status_reg' is the bit map of
+ * nodes currently present in the snoop/dvm domain. 'rn_id_map' and 'status_reg'
+ * are logically ANDed and the result it stored back in the 'status_reg'. There
+ * are two outcomes of this operation:
+ *
+ * 1. If the DOMAIN_CTRL_SET_OFFSET register was programmed, then the set bits in
+ *    'rn_id_map' should appear in 'status_reg' when the operation completes. So
+ *    after the AND operation, at some point of time 'status_reg' should equal
+ *    'rn_id_map'.
+ *
+ * 2. If the DOMAIN_CTRL_CLR_OFFSET register was programmed, then the set bits in
+ *    'rn_id_map' should disappear in 'status_reg' when the operation
+ *    completes. So after the AND operation, at some point of time 'status_reg'
+ *    should equal 0.
+ */
+#define WAIT_FOR_DOMAIN_CTRL_OP_COMPLETION(region_id, stat_reg_offset,		\
+					   op_reg_offset, rn_id_map)		\
+	{									\
+		unsigned long long status_reg;						\
+		do {								\
+			status_reg = ccn_reg_read((ccn_plat_desc->periphbase),	\
+						  (region_id),			\
+						  (stat_reg_offset));		\
+			status_reg &= (rn_id_map);				\
+		} while ((op_reg_offset) == DOMAIN_CTRL_SET_OFFSET ?		\
+			 (rn_id_map) != status_reg : status_reg);		\
+	}
+
+/*
+ * Region ID of the Miscellaneous Node is always 0 as its located at the base of
+ * the programmer's view.
+ */
+#define MN_REGION_ID		0
+
+#define MN_REGION_ID_START	0
+#define DEBUG_REGION_ID_START	1
+#define HNI_REGION_ID_START	8
+#define SBSX_REGION_ID_START	16
+#define HNF_REGION_ID_START	32
+#define XP_REGION_ID_START	64
+#define RNI_REGION_ID_START	128
+
+/* Selected register offsets from the base of a HNF region */
+#define HNF_CFG_CTRL_OFFSET	0x0000
+#define HNF_SAM_CTRL_OFFSET	0x0008
+#define HNF_PSTATE_REQ_OFFSET	0x0010
+#define HNF_PSTATE_STAT_OFFSET	0x0018
+#define HNF_SDC_STAT_OFFSET	DOMAIN_CTRL_STAT_OFFSET
+#define HNF_SDC_SET_OFFSET	DOMAIN_CTRL_SET_OFFSET
+#define HNF_SDC_CLR_OFFSET	DOMAIN_CTRL_CLR_OFFSET
+#define HNF_AUX_CTRL_OFFSET	0x0500
+
+/* Selected register offsets from the base of a MN region */
+#define MN_SAR_OFFSET		0x0000
+#define MN_RNF_NODEID_OFFSET	0x0180
+#define MN_RNI_NODEID_OFFSET	0x0190
+#define MN_RND_NODEID_OFFSET	0x01A0
+#define MN_HNF_NODEID_OFFSET	0x01B0
+#define MN_HNI_NODEID_OFFSET	0x01C0
+#define MN_SN_NODEID_OFFSET	0x01D0
+#define MN_DDC_STAT_OFFSET	DOMAIN_CTRL_STAT_OFFSET
+#define MN_DDC_SET_OFFSET	DOMAIN_CTRL_SET_OFFSET
+#define MN_DDC_CLR_OFFSET	DOMAIN_CTRL_CLR_OFFSET
+#define MN_PERIPH_ID_0_1_OFFSET	0xFE0
+#define MN_ID_OFFSET		REGION_ID_OFFSET
+
+/* HNF System Address Map register bit masks and shifts */
+#define HNF_SAM_CTRL_SN_ID_MASK		0x7f
+#define HNF_SAM_CTRL_SN0_ID_SHIFT	0
+#define HNF_SAM_CTRL_SN1_ID_SHIFT	8
+#define HNF_SAM_CTRL_SN2_ID_SHIFT	16
+
+#define HNF_SAM_CTRL_TAB0_MASK		ULL(0x3f)
+#define HNF_SAM_CTRL_TAB0_SHIFT		48
+#define HNF_SAM_CTRL_TAB1_MASK		ULL(0x3f)
+#define HNF_SAM_CTRL_TAB1_SHIFT		56
+
+#define HNF_SAM_CTRL_3SN_ENB_SHIFT	32
+#define HNF_SAM_CTRL_3SN_ENB_MASK	ULL(0x01)
+
+/*
+ * Macro to create a value suitable for programming into a HNF SAM Control
+ * register for enabling 3SN striping.
+ */
+#define MAKE_HNF_SAM_CTRL_VALUE(sn0, sn1, sn2, tab0, tab1, three_sn_en)     \
+	((((sn0) & HNF_SAM_CTRL_SN_ID_MASK) << HNF_SAM_CTRL_SN0_ID_SHIFT) | \
+	 (((sn1) & HNF_SAM_CTRL_SN_ID_MASK) << HNF_SAM_CTRL_SN1_ID_SHIFT) | \
+	 (((sn2) & HNF_SAM_CTRL_SN_ID_MASK) << HNF_SAM_CTRL_SN2_ID_SHIFT) | \
+	 (((tab0) & HNF_SAM_CTRL_TAB0_MASK) << HNF_SAM_CTRL_TAB0_SHIFT)   | \
+	 (((tab1) & HNF_SAM_CTRL_TAB1_MASK) << HNF_SAM_CTRL_TAB1_SHIFT)   | \
+	 (((three_sn_en) & HNF_SAM_CTRL_3SN_ENB_MASK) << HNF_SAM_CTRL_3SN_ENB_SHIFT))
+
+/* Mask to read the power state value from an HN-F P-state register */
+#define HNF_PSTATE_MASK		0xf
+
+/* Macro to extract the run mode from a p-state value */
+#define PSTATE_TO_RUN_MODE(pstate)	(((pstate) & HNF_PSTATE_MASK) >> 2)
+
+/*
+ * Helper macro that iterates through a given bit map. In each iteration,
+ * it returns the position of the set bit.
+ * It can be used by other utility macros to iterates through all nodes
+ * or masters given a bit map of them.
+ */
+#define FOR_EACH_BIT(bit_pos, bit_map)			\
+	for (bit_pos = __builtin_ctzll(bit_map);	\
+	     bit_map;					\
+	     bit_map &= ~(1ULL << (bit_pos)),		\
+	     bit_pos = __builtin_ctzll(bit_map))
+
+/*
+ * Utility macro that iterates through a bit map of node IDs. In each
+ * iteration, it returns the ID of the next present node in the bit map. Node
+ * ID of a present node == Position of set bit == Number of zeroes trailing the
+ * bit.
+ */
+#define FOR_EACH_PRESENT_NODE_ID(node_id, bit_map)	\
+		FOR_EACH_BIT(node_id, bit_map)
+
+/*
+ * Helper function to return number of set bits in bitmap
+ */
+static inline unsigned int count_set_bits(unsigned long long bitmap)
+{
+	unsigned int count = 0;
+
+	for (; bitmap; bitmap &= bitmap - 1)
+		++count;
+
+	return count;
+}
+
+/*
+ * Utility macro that iterates through a bit map of node IDs. In each iteration,
+ * it returns the ID of the next present region corresponding to a node present
+ * in the bit map. Region ID of a present node is in between passed region id
+ * and region id + number of set bits in the bitmap i.e. the number of present
+ * nodes.
+ */
+#define FOR_EACH_PRESENT_REGION_ID(region_id, bit_map)				\
+	for (unsigned long long region_id_limit = count_set_bits(bit_map)	\
+							+ region_id;		\
+	    region_id < region_id_limit;					\
+	    region_id++)
+
+/*
+ * Same macro as FOR_EACH_PRESENT_NODE, but renamed to indicate it traverses
+ * through a bit map of master interfaces.
+ */
+#define FOR_EACH_PRESENT_MASTER_INTERFACE(iface_id, bit_map)	\
+			FOR_EACH_BIT(iface_id, bit_map)
+
+/*
+ * Macro that returns the node id bit map for the Miscellaneous Node
+ */
+#define CCN_GET_MN_NODEID_MAP(periphbase)				\
+	(1 << get_node_id(ccn_reg_read(periphbase, MN_REGION_ID,	\
+						REGION_ID_OFFSET)))
+
+/*
+ * This macro returns the bitmap of Home nodes on the basis of the
+ * 'mn_hn_id_reg_offset' parameter from the Miscellaneous node's (MN)
+ * programmer's view. The MN has a register which carries the bitmap of present
+ * Home nodes of each type i.e. HN-Fs, HN-Is & HN-Ds.
+ */
+#define CCN_GET_HN_NODEID_MAP(periphbase, mn_hn_id_reg_offset)		\
+	ccn_reg_read(periphbase, MN_REGION_ID, mn_hn_id_reg_offset)
+
+#endif /* __CCN_PRIVATE_H__ */
diff --git a/drivers/arm/gic/arm_gic.c b/drivers/arm/gic/arm_gic.c
index 58fbc89..e040e0a 100644
--- a/drivers/arm/gic/arm_gic.c
+++ b/drivers/arm/gic/arm_gic.c
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
@@ -47,9 +23,9 @@
 	(GIC_HIGHEST_NS_PRIORITY << 16) | \
 	(GIC_HIGHEST_NS_PRIORITY << 24))
 
-static unsigned int g_gicc_base;
-static unsigned int g_gicd_base;
-static unsigned long g_gicr_base;
+static uintptr_t g_gicc_base;
+static uintptr_t g_gicd_base;
+static uintptr_t g_gicr_base;
 static const unsigned int *g_irq_sec_ptr;
 static unsigned int g_num_irqs;
 
@@ -62,7 +38,7 @@
  ******************************************************************************/
 static void gicv3_cpuif_setup(void)
 {
-	unsigned int scr_val, val;
+	unsigned int val;
 	uintptr_t base;
 
 	/*
@@ -93,35 +69,9 @@
 	while (val & WAKER_CA)
 		val = gicr_read_waker(base);
 
-	/*
-	 * We need to set SCR_EL3.NS in order to see GICv3 non-secure state.
-	 * Restore SCR_EL3.NS again before exit.
-	 */
-	scr_val = read_scr();
-	write_scr(scr_val | SCR_NS_BIT);
-	isb();	/* ensure NS=1 takes effect before accessing ICC_SRE_EL2 */
-
-	/*
-	 * By default EL2 and NS-EL1 software should be able to enable GICv3
-	 * System register access without any configuration at EL3. But it turns
-	 * out that GICC PMR as set in GICv2 mode does not affect GICv3 mode. So
-	 * we need to set it here again. In order to do that we need to enable
-	 * register access. We leave it enabled as it should be fine and might
-	 * prevent problems with later software trying to access GIC System
-	 * Registers.
-	 */
 	val = read_icc_sre_el3();
 	write_icc_sre_el3(val | ICC_SRE_EN | ICC_SRE_SRE);
-
-	val = read_icc_sre_el2();
-	write_icc_sre_el2(val | ICC_SRE_EN | ICC_SRE_SRE);
-
-	write_icc_pmr_el1(GIC_PRI_MASK);
-	isb();	/* commit ICC_* changes before setting NS=0 */
-
-	/* Restore SCR_EL3 */
-	write_scr(scr_val);
-	isb();	/* ensure NS=0 takes effect immediately */
+	isb();
 }
 
 /*******************************************************************************
@@ -219,13 +169,10 @@
  ******************************************************************************/
 void arm_gic_pcpu_distif_setup(void)
 {
-	unsigned int index, irq_num;
+	unsigned int index, irq_num, sec_ppi_sgi_mask;
 
 	assert(g_gicd_base);
 
-	/* Mark all 32 SGI+PPI interrupts as Group 1 (non-secure) */
-	gicd_write_igroupr(g_gicd_base, 0, ~0);
-
 	/* Setup PPI priorities doing four at a time */
 	for (index = 0; index < 32; index += 4) {
 		gicd_write_ipriorityr(g_gicd_base, index,
@@ -233,16 +180,29 @@
 	}
 
 	assert(g_irq_sec_ptr);
+	sec_ppi_sgi_mask = 0;
+
+	/* Ensure all SGIs and PPIs are Group0 to begin with */
+	gicd_write_igroupr(g_gicd_base, 0, 0);
+
 	for (index = 0; index < g_num_irqs; index++) {
 		irq_num = g_irq_sec_ptr[index];
 		if (irq_num < MIN_SPI_ID) {
 			/* We have an SGI or a PPI */
-			gicd_clr_igroupr(g_gicd_base, irq_num);
+			sec_ppi_sgi_mask |= 1U << irq_num;
 			gicd_set_ipriorityr(g_gicd_base, irq_num,
 				GIC_HIGHEST_SEC_PRIORITY);
 			gicd_set_isenabler(g_gicd_base, irq_num);
 		}
 	}
+
+	/*
+	 * Invert the bitmask to create a mask for non-secure PPIs and
+	 * SGIs. Program the GICD_IGROUPR0 with this bit mask. This write will
+	 * update the GICR_IGROUPR0 as well in case we are running on a GICv3
+	 * system. This is critical if GICD_CTLR.ARE_NS=1.
+	 */
+	gicd_write_igroupr(g_gicd_base, 0, ~sec_ppi_sgi_mask);
 }
 
 /*******************************************************************************
@@ -317,12 +277,11 @@
 /*******************************************************************************
  * Initialize the ARM GIC driver with the provided platform inputs
 ******************************************************************************/
-void arm_gic_init(unsigned int gicc_base,
-		unsigned int gicd_base,
-		unsigned long gicr_base,
-		const unsigned int *irq_sec_ptr,
-		unsigned int num_irqs
-		)
+void arm_gic_init(uintptr_t gicc_base,
+		  uintptr_t gicd_base,
+		  uintptr_t gicr_base,
+		  const unsigned int *irq_sec_ptr,
+		  unsigned int num_irqs)
 {
 	unsigned int val;
 
@@ -395,7 +354,7 @@
 	uint32_t id;
 
 	assert(g_gicc_base);
-	id = gicc_read_hppir(g_gicc_base);
+	id = gicc_read_hppir(g_gicc_base) & INT_ID_MASK;
 
 	/* Assume that all secure interrupts are S-EL1 interrupts */
 	if (id < 1022)
@@ -417,7 +376,7 @@
 	uint32_t id;
 
 	assert(g_gicc_base);
-	id = gicc_read_hppir(g_gicc_base);
+	id = gicc_read_hppir(g_gicc_base) & INT_ID_MASK;
 
 	if (id < 1022)
 		return id;
@@ -429,7 +388,7 @@
 	 * Find out which non-secure interrupt it is under the assumption that
 	 * the GICC_CTLR.AckCtl bit is 0.
 	 */
-	return gicc_read_ahppir(g_gicc_base);
+	return gicc_read_ahppir(g_gicc_base) & INT_ID_MASK;
 }
 
 /*******************************************************************************
diff --git a/drivers/arm/gic/common/gic_common.c b/drivers/arm/gic/common/gic_common.c
new file mode 100644
index 0000000..d523772
--- /dev/null
+++ b/drivers/arm/gic/common/gic_common.c
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <gic_common.h>
+#include <mmio.h>
+#include "gic_common_private.h"
+
+/*******************************************************************************
+ * GIC Distributor interface accessors for reading entire registers
+ ******************************************************************************/
+/*
+ * Accessor to read the GIC Distributor IGROUPR corresponding to the interrupt
+ * `id`, 32 interrupt ids at a time.
+ */
+unsigned int gicd_read_igroupr(uintptr_t base, unsigned int id)
+{
+	unsigned n = id >> IGROUPR_SHIFT;
+	return mmio_read_32(base + GICD_IGROUPR + (n << 2));
+}
+
+/*
+ * Accessor to read the GIC Distributor ISENABLER corresponding to the
+ * interrupt `id`, 32 interrupt ids at a time.
+ */
+unsigned int gicd_read_isenabler(uintptr_t base, unsigned int id)
+{
+	unsigned n = id >> ISENABLER_SHIFT;
+	return mmio_read_32(base + GICD_ISENABLER + (n << 2));
+}
+
+/*
+ * Accessor to read the GIC Distributor ICENABLER corresponding to the
+ * interrupt `id`, 32 interrupt IDs at a time.
+ */
+unsigned int gicd_read_icenabler(uintptr_t base, unsigned int id)
+{
+	unsigned n = id >> ICENABLER_SHIFT;
+	return mmio_read_32(base + GICD_ICENABLER + (n << 2));
+}
+
+/*
+ * Accessor to read the GIC Distributor ISPENDR corresponding to the
+ * interrupt `id`, 32 interrupt IDs at a time.
+ */
+unsigned int gicd_read_ispendr(uintptr_t base, unsigned int id)
+{
+	unsigned n = id >> ISPENDR_SHIFT;
+	return mmio_read_32(base + GICD_ISPENDR + (n << 2));
+}
+
+/*
+ * Accessor to read the GIC Distributor ICPENDR corresponding to the
+ * interrupt `id`, 32 interrupt IDs at a time.
+ */
+unsigned int gicd_read_icpendr(uintptr_t base, unsigned int id)
+{
+	unsigned n = id >> ICPENDR_SHIFT;
+	return mmio_read_32(base + GICD_ICPENDR + (n << 2));
+}
+
+/*
+ * Accessor to read the GIC Distributor ISACTIVER corresponding to the
+ * interrupt `id`, 32 interrupt IDs at a time.
+ */
+unsigned int gicd_read_isactiver(uintptr_t base, unsigned int id)
+{
+	unsigned n = id >> ISACTIVER_SHIFT;
+	return mmio_read_32(base + GICD_ISACTIVER + (n << 2));
+}
+
+/*
+ * Accessor to read the GIC Distributor ICACTIVER corresponding to the
+ * interrupt `id`, 32 interrupt IDs at a time.
+ */
+unsigned int gicd_read_icactiver(uintptr_t base, unsigned int id)
+{
+	unsigned n = id >> ICACTIVER_SHIFT;
+	return mmio_read_32(base + GICD_ICACTIVER + (n << 2));
+}
+
+/*
+ * Accessor to read the GIC Distributor IPRIORITYR corresponding to the
+ * interrupt `id`, 4 interrupt IDs at a time.
+ */
+unsigned int gicd_read_ipriorityr(uintptr_t base, unsigned int id)
+{
+	unsigned n = id >> IPRIORITYR_SHIFT;
+	return mmio_read_32(base + GICD_IPRIORITYR + (n << 2));
+}
+
+/*
+ * Accessor to read the GIC Distributor ICGFR corresponding to the
+ * interrupt `id`, 16 interrupt IDs at a time.
+ */
+unsigned int gicd_read_icfgr(uintptr_t base, unsigned int id)
+{
+	unsigned n = id >> ICFGR_SHIFT;
+	return mmio_read_32(base + GICD_ICFGR + (n << 2));
+}
+
+/*
+ * Accessor to read the GIC Distributor NSACR corresponding to the
+ * interrupt `id`, 16 interrupt IDs at a time.
+ */
+unsigned int gicd_read_nsacr(uintptr_t base, unsigned int id)
+{
+	unsigned n = id >> NSACR_SHIFT;
+	return mmio_read_32(base + GICD_NSACR + (n << 2));
+}
+
+/*******************************************************************************
+ * GIC Distributor interface accessors for writing entire registers
+ ******************************************************************************/
+/*
+ * Accessor to write the GIC Distributor IGROUPR corresponding to the
+ * interrupt `id`, 32 interrupt IDs at a time.
+ */
+void gicd_write_igroupr(uintptr_t base, unsigned int id, unsigned int val)
+{
+	unsigned n = id >> IGROUPR_SHIFT;
+	mmio_write_32(base + GICD_IGROUPR + (n << 2), val);
+}
+
+/*
+ * Accessor to write the GIC Distributor ISENABLER corresponding to the
+ * interrupt `id`, 32 interrupt IDs at a time.
+ */
+void gicd_write_isenabler(uintptr_t base, unsigned int id, unsigned int val)
+{
+	unsigned n = id >> ISENABLER_SHIFT;
+	mmio_write_32(base + GICD_ISENABLER + (n << 2), val);
+}
+
+/*
+ * Accessor to write the GIC Distributor ICENABLER corresponding to the
+ * interrupt `id`, 32 interrupt IDs at a time.
+ */
+void gicd_write_icenabler(uintptr_t base, unsigned int id, unsigned int val)
+{
+	unsigned n = id >> ICENABLER_SHIFT;
+	mmio_write_32(base + GICD_ICENABLER + (n << 2), val);
+}
+
+/*
+ * Accessor to write the GIC Distributor ISPENDR corresponding to the
+ * interrupt `id`, 32 interrupt IDs at a time.
+ */
+void gicd_write_ispendr(uintptr_t base, unsigned int id, unsigned int val)
+{
+	unsigned n = id >> ISPENDR_SHIFT;
+	mmio_write_32(base + GICD_ISPENDR + (n << 2), val);
+}
+
+/*
+ * Accessor to write the GIC Distributor ICPENDR corresponding to the
+ * interrupt `id`, 32 interrupt IDs at a time.
+ */
+void gicd_write_icpendr(uintptr_t base, unsigned int id, unsigned int val)
+{
+	unsigned n = id >> ICPENDR_SHIFT;
+	mmio_write_32(base + GICD_ICPENDR + (n << 2), val);
+}
+
+/*
+ * Accessor to write the GIC Distributor ISACTIVER corresponding to the
+ * interrupt `id`, 32 interrupt IDs at a time.
+ */
+void gicd_write_isactiver(uintptr_t base, unsigned int id, unsigned int val)
+{
+	unsigned n = id >> ISACTIVER_SHIFT;
+	mmio_write_32(base + GICD_ISACTIVER + (n << 2), val);
+}
+
+/*
+ * Accessor to write the GIC Distributor ICACTIVER corresponding to the
+ * interrupt `id`, 32 interrupt IDs at a time.
+ */
+void gicd_write_icactiver(uintptr_t base, unsigned int id, unsigned int val)
+{
+	unsigned n = id >> ICACTIVER_SHIFT;
+	mmio_write_32(base + GICD_ICACTIVER + (n << 2), val);
+}
+
+/*
+ * Accessor to write the GIC Distributor IPRIORITYR corresponding to the
+ * interrupt `id`, 4 interrupt IDs at a time.
+ */
+void gicd_write_ipriorityr(uintptr_t base, unsigned int id, unsigned int val)
+{
+	unsigned n = id >> IPRIORITYR_SHIFT;
+	mmio_write_32(base + GICD_IPRIORITYR + (n << 2), val);
+}
+
+/*
+ * Accessor to write the GIC Distributor ICFGR corresponding to the
+ * interrupt `id`, 16 interrupt IDs at a time.
+ */
+void gicd_write_icfgr(uintptr_t base, unsigned int id, unsigned int val)
+{
+	unsigned n = id >> ICFGR_SHIFT;
+	mmio_write_32(base + GICD_ICFGR + (n << 2), val);
+}
+
+/*
+ * Accessor to write the GIC Distributor NSACR corresponding to the
+ * interrupt `id`, 16 interrupt IDs at a time.
+ */
+void gicd_write_nsacr(uintptr_t base, unsigned int id, unsigned int val)
+{
+	unsigned n = id >> NSACR_SHIFT;
+	mmio_write_32(base + GICD_NSACR + (n << 2), val);
+}
+
+/*******************************************************************************
+ * GIC Distributor functions for accessing the GIC registers
+ * corresponding to a single interrupt ID. These functions use bitwise
+ * operations or appropriate register accesses to modify or return
+ * the bit-field corresponding the single interrupt ID.
+ ******************************************************************************/
+unsigned int gicd_get_igroupr(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << IGROUPR_SHIFT) - 1);
+	unsigned int reg_val = gicd_read_igroupr(base, id);
+
+	return (reg_val >> bit_num) & 0x1;
+}
+
+void gicd_set_igroupr(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << IGROUPR_SHIFT) - 1);
+	unsigned int reg_val = gicd_read_igroupr(base, id);
+
+	gicd_write_igroupr(base, id, reg_val | (1 << bit_num));
+}
+
+void gicd_clr_igroupr(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << IGROUPR_SHIFT) - 1);
+	unsigned int reg_val = gicd_read_igroupr(base, id);
+
+	gicd_write_igroupr(base, id, reg_val & ~(1 << bit_num));
+}
+
+void gicd_set_isenabler(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << ISENABLER_SHIFT) - 1);
+
+	gicd_write_isenabler(base, id, (1 << bit_num));
+}
+
+void gicd_set_icenabler(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << ICENABLER_SHIFT) - 1);
+
+	gicd_write_icenabler(base, id, (1 << bit_num));
+}
+
+void gicd_set_ispendr(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << ISPENDR_SHIFT) - 1);
+
+	gicd_write_ispendr(base, id, (1 << bit_num));
+}
+
+void gicd_set_icpendr(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << ICPENDR_SHIFT) - 1);
+
+	gicd_write_icpendr(base, id, (1 << bit_num));
+}
+
+unsigned int gicd_get_isactiver(uintptr_t base, unsigned int id)
+{
+	unsigned int bit_num = id & ((1 << ISACTIVER_SHIFT) - 1);
+	unsigned int reg_val = gicd_read_isactiver(base, id);
+
+	return (reg_val >> bit_num) & 0x1;
+}
+
+void gicd_set_isactiver(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << ISACTIVER_SHIFT) - 1);
+
+	gicd_write_isactiver(base, id, (1 << bit_num));
+}
+
+void gicd_set_icactiver(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << ICACTIVER_SHIFT) - 1);
+
+	gicd_write_icactiver(base, id, (1 << bit_num));
+}
+
+void gicd_set_ipriorityr(uintptr_t base, unsigned int id, unsigned int pri)
+{
+	mmio_write_8(base + GICD_IPRIORITYR + id, pri & GIC_PRI_MASK);
+}
+
+void gicd_set_icfgr(uintptr_t base, unsigned int id, unsigned int cfg)
+{
+	unsigned bit_num = id & ((1 << ICFGR_SHIFT) - 1);
+	uint32_t reg_val = gicd_read_icfgr(base, id);
+
+	/* Clear the field, and insert required configuration */
+	reg_val &= ~(GIC_CFG_MASK << bit_num);
+	reg_val |= ((cfg & GIC_CFG_MASK) << bit_num);
+
+	gicd_write_icfgr(base, id, reg_val);
+}
diff --git a/drivers/arm/gic/common/gic_common_private.h b/drivers/arm/gic/common/gic_common_private.h
new file mode 100644
index 0000000..2021f9a
--- /dev/null
+++ b/drivers/arm/gic/common/gic_common_private.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef GIC_COMMON_PRIVATE_H_
+#define GIC_COMMON_PRIVATE_H_
+
+#include <gic_common.h>
+#include <mmio.h>
+#include <stdint.h>
+
+/*******************************************************************************
+ * GIC Distributor interface register accessors that are common to GICv3 & GICv2
+ ******************************************************************************/
+static inline unsigned int gicd_read_ctlr(uintptr_t base)
+{
+	return mmio_read_32(base + GICD_CTLR);
+}
+
+static inline unsigned int gicd_read_typer(uintptr_t base)
+{
+	return mmio_read_32(base + GICD_TYPER);
+}
+
+static inline unsigned int gicd_read_iidr(uintptr_t base)
+{
+	return mmio_read_32(base + GICD_IIDR);
+}
+
+static inline void gicd_write_ctlr(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICD_CTLR, val);
+}
+
+/*******************************************************************************
+ * GIC Distributor function prototypes for accessing entire registers.
+ * Note: The raw register values correspond to multiple interrupt IDs and
+ * the number of interrupt IDs involved depends on the register accessed.
+ ******************************************************************************/
+unsigned int gicd_read_igroupr(uintptr_t base, unsigned int id);
+unsigned int gicd_read_isenabler(uintptr_t base, unsigned int id);
+unsigned int gicd_read_icenabler(uintptr_t base, unsigned int id);
+unsigned int gicd_read_ispendr(uintptr_t base, unsigned int id);
+unsigned int gicd_read_icpendr(uintptr_t base, unsigned int id);
+unsigned int gicd_read_isactiver(uintptr_t base, unsigned int id);
+unsigned int gicd_read_icactiver(uintptr_t base, unsigned int id);
+unsigned int gicd_read_ipriorityr(uintptr_t base, unsigned int id);
+unsigned int gicd_read_icfgr(uintptr_t base, unsigned int id);
+unsigned int gicd_read_nsacr(uintptr_t base, unsigned int id);
+void gicd_write_igroupr(uintptr_t base, unsigned int id, unsigned int val);
+void gicd_write_isenabler(uintptr_t base, unsigned int id, unsigned int val);
+void gicd_write_icenabler(uintptr_t base, unsigned int id, unsigned int val);
+void gicd_write_ispendr(uintptr_t base, unsigned int id, unsigned int val);
+void gicd_write_icpendr(uintptr_t base, unsigned int id, unsigned int val);
+void gicd_write_isactiver(uintptr_t base, unsigned int id, unsigned int val);
+void gicd_write_icactiver(uintptr_t base, unsigned int id, unsigned int val);
+void gicd_write_ipriorityr(uintptr_t base, unsigned int id, unsigned int val);
+void gicd_write_icfgr(uintptr_t base, unsigned int id, unsigned int val);
+void gicd_write_nsacr(uintptr_t base, unsigned int id, unsigned int val);
+
+/*******************************************************************************
+ * GIC Distributor function prototypes for accessing the GIC registers
+ * corresponding to a single interrupt ID. These functions use bitwise
+ * operations or appropriate register accesses to modify or return
+ * the bit-field corresponding the single interrupt ID.
+ ******************************************************************************/
+unsigned int gicd_get_igroupr(uintptr_t base, unsigned int id);
+void gicd_set_igroupr(uintptr_t base, unsigned int id);
+void gicd_clr_igroupr(uintptr_t base, unsigned int id);
+void gicd_set_isenabler(uintptr_t base, unsigned int id);
+void gicd_set_icenabler(uintptr_t base, unsigned int id);
+void gicd_set_ispendr(uintptr_t base, unsigned int id);
+void gicd_set_icpendr(uintptr_t base, unsigned int id);
+unsigned int gicd_get_isactiver(uintptr_t base, unsigned int id);
+void gicd_set_isactiver(uintptr_t base, unsigned int id);
+void gicd_set_icactiver(uintptr_t base, unsigned int id);
+void gicd_set_ipriorityr(uintptr_t base, unsigned int id, unsigned int pri);
+void gicd_set_icfgr(uintptr_t base, unsigned int id, unsigned int cfg);
+
+#endif /* GIC_COMMON_PRIVATE_H_ */
diff --git a/drivers/arm/gic/gic_v2.c b/drivers/arm/gic/gic_v2.c
index 41603a9..29c79e0 100644
--- a/drivers/arm/gic/gic_v2.c
+++ b/drivers/arm/gic/gic_v2.c
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
@@ -38,73 +14,73 @@
  * GIC Distributor interface accessors for reading entire registers
  ******************************************************************************/
 
-unsigned int gicd_read_igroupr(unsigned int base, unsigned int id)
+unsigned int gicd_read_igroupr(uintptr_t base, unsigned int id)
 {
 	unsigned n = id >> IGROUPR_SHIFT;
 	return mmio_read_32(base + GICD_IGROUPR + (n << 2));
 }
 
-unsigned int gicd_read_isenabler(unsigned int base, unsigned int id)
+unsigned int gicd_read_isenabler(uintptr_t base, unsigned int id)
 {
 	unsigned n = id >> ISENABLER_SHIFT;
 	return mmio_read_32(base + GICD_ISENABLER + (n << 2));
 }
 
-unsigned int gicd_read_icenabler(unsigned int base, unsigned int id)
+unsigned int gicd_read_icenabler(uintptr_t base, unsigned int id)
 {
 	unsigned n = id >> ICENABLER_SHIFT;
 	return mmio_read_32(base + GICD_ICENABLER + (n << 2));
 }
 
-unsigned int gicd_read_ispendr(unsigned int base, unsigned int id)
+unsigned int gicd_read_ispendr(uintptr_t base, unsigned int id)
 {
 	unsigned n = id >> ISPENDR_SHIFT;
 	return mmio_read_32(base + GICD_ISPENDR + (n << 2));
 }
 
-unsigned int gicd_read_icpendr(unsigned int base, unsigned int id)
+unsigned int gicd_read_icpendr(uintptr_t base, unsigned int id)
 {
 	unsigned n = id >> ICPENDR_SHIFT;
 	return mmio_read_32(base + GICD_ICPENDR + (n << 2));
 }
 
-unsigned int gicd_read_isactiver(unsigned int base, unsigned int id)
+unsigned int gicd_read_isactiver(uintptr_t base, unsigned int id)
 {
 	unsigned n = id >> ISACTIVER_SHIFT;
 	return mmio_read_32(base + GICD_ISACTIVER + (n << 2));
 }
 
-unsigned int gicd_read_icactiver(unsigned int base, unsigned int id)
+unsigned int gicd_read_icactiver(uintptr_t base, unsigned int id)
 {
 	unsigned n = id >> ICACTIVER_SHIFT;
 	return mmio_read_32(base + GICD_ICACTIVER + (n << 2));
 }
 
-unsigned int gicd_read_ipriorityr(unsigned int base, unsigned int id)
+unsigned int gicd_read_ipriorityr(uintptr_t base, unsigned int id)
 {
 	unsigned n = id >> IPRIORITYR_SHIFT;
 	return mmio_read_32(base + GICD_IPRIORITYR + (n << 2));
 }
 
-unsigned int gicd_read_itargetsr(unsigned int base, unsigned int id)
+unsigned int gicd_read_itargetsr(uintptr_t base, unsigned int id)
 {
 	unsigned n = id >> ITARGETSR_SHIFT;
 	return mmio_read_32(base + GICD_ITARGETSR + (n << 2));
 }
 
-unsigned int gicd_read_icfgr(unsigned int base, unsigned int id)
+unsigned int gicd_read_icfgr(uintptr_t base, unsigned int id)
 {
 	unsigned n = id >> ICFGR_SHIFT;
 	return mmio_read_32(base + GICD_ICFGR + (n << 2));
 }
 
-unsigned int gicd_read_cpendsgir(unsigned int base, unsigned int id)
+unsigned int gicd_read_cpendsgir(uintptr_t base, unsigned int id)
 {
 	unsigned n = id >> CPENDSGIR_SHIFT;
 	return mmio_read_32(base + GICD_CPENDSGIR + (n << 2));
 }
 
-unsigned int gicd_read_spendsgir(unsigned int base, unsigned int id)
+unsigned int gicd_read_spendsgir(uintptr_t base, unsigned int id)
 {
 	unsigned n = id >> SPENDSGIR_SHIFT;
 	return mmio_read_32(base + GICD_SPENDSGIR + (n << 2));
@@ -114,73 +90,73 @@
  * GIC Distributor interface accessors for writing entire registers
  ******************************************************************************/
 
-void gicd_write_igroupr(unsigned int base, unsigned int id, unsigned int val)
+void gicd_write_igroupr(uintptr_t base, unsigned int id, unsigned int val)
 {
 	unsigned n = id >> IGROUPR_SHIFT;
 	mmio_write_32(base + GICD_IGROUPR + (n << 2), val);
 }
 
-void gicd_write_isenabler(unsigned int base, unsigned int id, unsigned int val)
+void gicd_write_isenabler(uintptr_t base, unsigned int id, unsigned int val)
 {
 	unsigned n = id >> ISENABLER_SHIFT;
 	mmio_write_32(base + GICD_ISENABLER + (n << 2), val);
 }
 
-void gicd_write_icenabler(unsigned int base, unsigned int id, unsigned int val)
+void gicd_write_icenabler(uintptr_t base, unsigned int id, unsigned int val)
 {
 	unsigned n = id >> ICENABLER_SHIFT;
 	mmio_write_32(base + GICD_ICENABLER + (n << 2), val);
 }
 
-void gicd_write_ispendr(unsigned int base, unsigned int id, unsigned int val)
+void gicd_write_ispendr(uintptr_t base, unsigned int id, unsigned int val)
 {
 	unsigned n = id >> ISPENDR_SHIFT;
 	mmio_write_32(base + GICD_ISPENDR + (n << 2), val);
 }
 
-void gicd_write_icpendr(unsigned int base, unsigned int id, unsigned int val)
+void gicd_write_icpendr(uintptr_t base, unsigned int id, unsigned int val)
 {
 	unsigned n = id >> ICPENDR_SHIFT;
 	mmio_write_32(base + GICD_ICPENDR + (n << 2), val);
 }
 
-void gicd_write_isactiver(unsigned int base, unsigned int id, unsigned int val)
+void gicd_write_isactiver(uintptr_t base, unsigned int id, unsigned int val)
 {
 	unsigned n = id >> ISACTIVER_SHIFT;
 	mmio_write_32(base + GICD_ISACTIVER + (n << 2), val);
 }
 
-void gicd_write_icactiver(unsigned int base, unsigned int id, unsigned int val)
+void gicd_write_icactiver(uintptr_t base, unsigned int id, unsigned int val)
 {
 	unsigned n = id >> ICACTIVER_SHIFT;
 	mmio_write_32(base + GICD_ICACTIVER + (n << 2), val);
 }
 
-void gicd_write_ipriorityr(unsigned int base, unsigned int id, unsigned int val)
+void gicd_write_ipriorityr(uintptr_t base, unsigned int id, unsigned int val)
 {
 	unsigned n = id >> IPRIORITYR_SHIFT;
 	mmio_write_32(base + GICD_IPRIORITYR + (n << 2), val);
 }
 
-void gicd_write_itargetsr(unsigned int base, unsigned int id, unsigned int val)
+void gicd_write_itargetsr(uintptr_t base, unsigned int id, unsigned int val)
 {
 	unsigned n = id >> ITARGETSR_SHIFT;
 	mmio_write_32(base + GICD_ITARGETSR + (n << 2), val);
 }
 
-void gicd_write_icfgr(unsigned int base, unsigned int id, unsigned int val)
+void gicd_write_icfgr(uintptr_t base, unsigned int id, unsigned int val)
 {
 	unsigned n = id >> ICFGR_SHIFT;
 	mmio_write_32(base + GICD_ICFGR + (n << 2), val);
 }
 
-void gicd_write_cpendsgir(unsigned int base, unsigned int id, unsigned int val)
+void gicd_write_cpendsgir(uintptr_t base, unsigned int id, unsigned int val)
 {
 	unsigned n = id >> CPENDSGIR_SHIFT;
 	mmio_write_32(base + GICD_CPENDSGIR + (n << 2), val);
 }
 
-void gicd_write_spendsgir(unsigned int base, unsigned int id, unsigned int val)
+void gicd_write_spendsgir(uintptr_t base, unsigned int id, unsigned int val)
 {
 	unsigned n = id >> SPENDSGIR_SHIFT;
 	mmio_write_32(base + GICD_SPENDSGIR + (n << 2), val);
@@ -189,7 +165,7 @@
 /*******************************************************************************
  * GIC Distributor interface accessors for individual interrupt manipulation
  ******************************************************************************/
-unsigned int gicd_get_igroupr(unsigned int base, unsigned int id)
+unsigned int gicd_get_igroupr(uintptr_t base, unsigned int id)
 {
 	unsigned bit_num = id & ((1 << IGROUPR_SHIFT) - 1);
 	unsigned int reg_val = gicd_read_igroupr(base, id);
@@ -197,7 +173,7 @@
 	return (reg_val >> bit_num) & 0x1;
 }
 
-void gicd_set_igroupr(unsigned int base, unsigned int id)
+void gicd_set_igroupr(uintptr_t base, unsigned int id)
 {
 	unsigned bit_num = id & ((1 << IGROUPR_SHIFT) - 1);
 	unsigned int reg_val = gicd_read_igroupr(base, id);
@@ -205,7 +181,7 @@
 	gicd_write_igroupr(base, id, reg_val | (1 << bit_num));
 }
 
-void gicd_clr_igroupr(unsigned int base, unsigned int id)
+void gicd_clr_igroupr(uintptr_t base, unsigned int id)
 {
 	unsigned bit_num = id & ((1 << IGROUPR_SHIFT) - 1);
 	unsigned int reg_val = gicd_read_igroupr(base, id);
@@ -213,42 +189,42 @@
 	gicd_write_igroupr(base, id, reg_val & ~(1 << bit_num));
 }
 
-void gicd_set_isenabler(unsigned int base, unsigned int id)
+void gicd_set_isenabler(uintptr_t base, unsigned int id)
 {
 	unsigned bit_num = id & ((1 << ISENABLER_SHIFT) - 1);
 
 	gicd_write_isenabler(base, id, (1 << bit_num));
 }
 
-void gicd_set_icenabler(unsigned int base, unsigned int id)
+void gicd_set_icenabler(uintptr_t base, unsigned int id)
 {
 	unsigned bit_num = id & ((1 << ICENABLER_SHIFT) - 1);
 
 	gicd_write_icenabler(base, id, (1 << bit_num));
 }
 
-void gicd_set_ispendr(unsigned int base, unsigned int id)
+void gicd_set_ispendr(uintptr_t base, unsigned int id)
 {
 	unsigned bit_num = id & ((1 << ISPENDR_SHIFT) - 1);
 
 	gicd_write_ispendr(base, id, (1 << bit_num));
 }
 
-void gicd_set_icpendr(unsigned int base, unsigned int id)
+void gicd_set_icpendr(uintptr_t base, unsigned int id)
 {
 	unsigned bit_num = id & ((1 << ICPENDR_SHIFT) - 1);
 
 	gicd_write_icpendr(base, id, (1 << bit_num));
 }
 
-void gicd_set_isactiver(unsigned int base, unsigned int id)
+void gicd_set_isactiver(uintptr_t base, unsigned int id)
 {
 	unsigned bit_num = id & ((1 << ISACTIVER_SHIFT) - 1);
 
 	gicd_write_isactiver(base, id, (1 << bit_num));
 }
 
-void gicd_set_icactiver(unsigned int base, unsigned int id)
+void gicd_set_icactiver(uintptr_t base, unsigned int id)
 {
 	unsigned bit_num = id & ((1 << ICACTIVER_SHIFT) - 1);
 
@@ -259,12 +235,8 @@
  * Make sure that the interrupt's group is set before expecting
  * this function to do its job correctly.
  */
-void gicd_set_ipriorityr(unsigned int base, unsigned int id, unsigned int pri)
+void gicd_set_ipriorityr(uintptr_t base, unsigned int id, unsigned int pri)
 {
-	unsigned int reg = base + GICD_IPRIORITYR + (id & ~3);
-	unsigned int shift = (id & 3) << 3;
-	unsigned int reg_val = mmio_read_32(reg);
-
 	/*
 	 * Enforce ARM recommendation to manage priority values such
 	 * that group1 interrupts always have a lower priority than
@@ -278,17 +250,12 @@
 		pri >= GIC_HIGHEST_SEC_PRIORITY &&
 			pri <= GIC_LOWEST_SEC_PRIORITY);
 
-	reg_val &= ~(GIC_PRI_MASK << shift);
-	reg_val |= (pri & GIC_PRI_MASK) << shift;
-	mmio_write_32(reg, reg_val);
+	mmio_write_8(base + GICD_IPRIORITYR + id, pri & GIC_PRI_MASK);
 }
 
-void gicd_set_itargetsr(unsigned int base, unsigned int id, unsigned int target)
+void gicd_set_itargetsr(uintptr_t base, unsigned int id, unsigned int target)
 {
-	unsigned byte_off = id & ((1 << ITARGETSR_SHIFT) - 1);
-	unsigned int reg_val = gicd_read_itargetsr(base, id);
-
-	gicd_write_itargetsr(base, id, reg_val | (target << (byte_off << 3)));
+	mmio_write_8(base + GICD_ITARGETSR + id, target & GIC_TARGET_CPU_MASK);
 }
 
 /*******************************************************************************
diff --git a/drivers/arm/gic/gic_v3.c b/drivers/arm/gic/gic_v3.c
index f429662..3e802fd 100644
--- a/drivers/arm/gic/gic_v3.c
+++ b/drivers/arm/gic/gic_v3.c
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
@@ -60,8 +36,8 @@
 			/* Disable this print for now as it appears every time
 			 * when using PSCI CPU_SUSPEND.
 			 * TODO: Print this only the first time for each CPU.
-			 * INFO("GICv3 - Found RDIST for MPIDR(0x%lx) at 0x%lx\n",
-			 *	mpidr, addr);
+			 * INFO("GICv3 - Found RDIST for MPIDR(0x%lx) at %p\n",
+			 *	mpidr, (void *) addr);
 			 */
 			return addr;
 		}
diff --git a/drivers/arm/gic/v2/gicv2_helpers.c b/drivers/arm/gic/v2/gicv2_helpers.c
new file mode 100644
index 0000000..0df50fb
--- /dev/null
+++ b/drivers/arm/gic/v2/gicv2_helpers.c
@@ -0,0 +1,302 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <gic_common.h>
+#include <interrupt_props.h>
+#include "../common/gic_common_private.h"
+#include "gicv2_private.h"
+
+/*
+ * Accessor to read the GIC Distributor ITARGETSR corresponding to the
+ * interrupt `id`, 4 interrupt IDs at a time.
+ */
+unsigned int gicd_read_itargetsr(uintptr_t base, unsigned int id)
+{
+	unsigned n = id >> ITARGETSR_SHIFT;
+	return mmio_read_32(base + GICD_ITARGETSR + (n << 2));
+}
+
+/*
+ * Accessor to read the GIC Distributor CPENDSGIR corresponding to the
+ * interrupt `id`, 4 interrupt IDs at a time.
+ */
+unsigned int gicd_read_cpendsgir(uintptr_t base, unsigned int id)
+{
+	unsigned n = id >> CPENDSGIR_SHIFT;
+	return mmio_read_32(base + GICD_CPENDSGIR + (n << 2));
+}
+
+/*
+ * Accessor to read the GIC Distributor SPENDSGIR corresponding to the
+ * interrupt `id`, 4 interrupt IDs at a time.
+ */
+unsigned int gicd_read_spendsgir(uintptr_t base, unsigned int id)
+{
+	unsigned n = id >> SPENDSGIR_SHIFT;
+	return mmio_read_32(base + GICD_SPENDSGIR + (n << 2));
+}
+
+/*
+ * Accessor to write the GIC Distributor ITARGETSR corresponding to the
+ * interrupt `id`, 4 interrupt IDs at a time.
+ */
+void gicd_write_itargetsr(uintptr_t base, unsigned int id, unsigned int val)
+{
+	unsigned n = id >> ITARGETSR_SHIFT;
+	mmio_write_32(base + GICD_ITARGETSR + (n << 2), val);
+}
+
+/*
+ * Accessor to write the GIC Distributor CPENDSGIR corresponding to the
+ * interrupt `id`, 4 interrupt IDs at a time.
+ */
+void gicd_write_cpendsgir(uintptr_t base, unsigned int id, unsigned int val)
+{
+	unsigned n = id >> CPENDSGIR_SHIFT;
+	mmio_write_32(base + GICD_CPENDSGIR + (n << 2), val);
+}
+
+/*
+ * Accessor to write the GIC Distributor SPENDSGIR corresponding to the
+ * interrupt `id`, 4 interrupt IDs at a time.
+ */
+void gicd_write_spendsgir(uintptr_t base, unsigned int id, unsigned int val)
+{
+	unsigned n = id >> SPENDSGIR_SHIFT;
+	mmio_write_32(base + GICD_SPENDSGIR + (n << 2), val);
+}
+
+/*******************************************************************************
+ * Get the current CPU bit mask from GICD_ITARGETSR0
+ ******************************************************************************/
+unsigned int gicv2_get_cpuif_id(uintptr_t base)
+{
+	unsigned int val;
+
+	val = gicd_read_itargetsr(base, 0);
+	return val & GIC_TARGET_CPU_MASK;
+}
+
+/*******************************************************************************
+ * Helper function to configure the default attributes of SPIs.
+ ******************************************************************************/
+void gicv2_spis_configure_defaults(uintptr_t gicd_base)
+{
+	unsigned int index, num_ints;
+
+	num_ints = gicd_read_typer(gicd_base);
+	num_ints &= TYPER_IT_LINES_NO_MASK;
+	num_ints = (num_ints + 1) << 5;
+
+	/*
+	 * Treat all SPIs as G1NS by default. The number of interrupts is
+	 * calculated as 32 * (IT_LINES + 1). We do 32 at a time.
+	 */
+	for (index = MIN_SPI_ID; index < num_ints; index += 32)
+		gicd_write_igroupr(gicd_base, index, ~0U);
+
+	/* Setup the default SPI priorities doing four at a time */
+	for (index = MIN_SPI_ID; index < num_ints; index += 4)
+		gicd_write_ipriorityr(gicd_base,
+				      index,
+				      GICD_IPRIORITYR_DEF_VAL);
+
+	/* Treat all SPIs as level triggered by default, 16 at a time */
+	for (index = MIN_SPI_ID; index < num_ints; index += 16)
+		gicd_write_icfgr(gicd_base, index, 0);
+}
+
+#if !ERROR_DEPRECATED
+/*******************************************************************************
+ * Helper function to configure secure G0 SPIs.
+ ******************************************************************************/
+void gicv2_secure_spis_configure(uintptr_t gicd_base,
+				     unsigned int num_ints,
+				     const unsigned int *sec_intr_list)
+{
+	unsigned int index, irq_num;
+
+	/* If `num_ints` is not 0, ensure that `sec_intr_list` is not NULL */
+	assert(num_ints ? (uintptr_t)sec_intr_list : 1);
+
+	for (index = 0; index < num_ints; index++) {
+		irq_num = sec_intr_list[index];
+		if (irq_num >= MIN_SPI_ID) {
+			/* Configure this interrupt as a secure interrupt */
+			gicd_clr_igroupr(gicd_base, irq_num);
+
+			/* Set the priority of this interrupt */
+			gicd_set_ipriorityr(gicd_base,
+					      irq_num,
+					      GIC_HIGHEST_SEC_PRIORITY);
+
+			/* Target the secure interrupts to primary CPU */
+			gicd_set_itargetsr(gicd_base, irq_num,
+					gicv2_get_cpuif_id(gicd_base));
+
+			/* Enable this interrupt */
+			gicd_set_isenabler(gicd_base, irq_num);
+		}
+	}
+
+}
+#endif
+
+/*******************************************************************************
+ * Helper function to configure properties of secure G0 SPIs.
+ ******************************************************************************/
+void gicv2_secure_spis_configure_props(uintptr_t gicd_base,
+		const interrupt_prop_t *interrupt_props,
+		unsigned int interrupt_props_num)
+{
+	unsigned int i;
+	const interrupt_prop_t *prop_desc;
+
+	/* Make sure there's a valid property array */
+	assert(interrupt_props_num != 0 ? (uintptr_t) interrupt_props : 1);
+
+	for (i = 0; i < interrupt_props_num; i++) {
+		prop_desc = &interrupt_props[i];
+
+		if (prop_desc->intr_num < MIN_SPI_ID)
+			continue;
+
+		/* Configure this interrupt as a secure interrupt */
+		assert(prop_desc->intr_grp == GICV2_INTR_GROUP0);
+		gicd_clr_igroupr(gicd_base, prop_desc->intr_num);
+
+		/* Set the priority of this interrupt */
+		gicd_set_ipriorityr(gicd_base, prop_desc->intr_num,
+				prop_desc->intr_pri);
+
+		/* Target the secure interrupts to primary CPU */
+		gicd_set_itargetsr(gicd_base, prop_desc->intr_num,
+				gicv2_get_cpuif_id(gicd_base));
+
+		/* Set interrupt configuration */
+		gicd_set_icfgr(gicd_base, prop_desc->intr_num,
+				prop_desc->intr_cfg);
+
+		/* Enable this interrupt */
+		gicd_set_isenabler(gicd_base, prop_desc->intr_num);
+	}
+}
+
+#if !ERROR_DEPRECATED
+/*******************************************************************************
+ * Helper function to configure secure G0 SGIs and PPIs.
+ ******************************************************************************/
+void gicv2_secure_ppi_sgi_setup(uintptr_t gicd_base,
+					unsigned int num_ints,
+					const unsigned int *sec_intr_list)
+{
+	unsigned int index, irq_num, sec_ppi_sgi_mask = 0;
+
+	/* If `num_ints` is not 0, ensure that `sec_intr_list` is not NULL */
+	assert(num_ints ? (uintptr_t)sec_intr_list : 1);
+
+	/*
+	 * Disable all SGIs (imp. def.)/PPIs before configuring them. This is a
+	 * more scalable approach as it avoids clearing the enable bits in the
+	 * GICD_CTLR.
+	 */
+	gicd_write_icenabler(gicd_base, 0, ~0);
+
+	/* Setup the default PPI/SGI priorities doing four at a time */
+	for (index = 0; index < MIN_SPI_ID; index += 4)
+		gicd_write_ipriorityr(gicd_base,
+				      index,
+				      GICD_IPRIORITYR_DEF_VAL);
+
+	for (index = 0; index < num_ints; index++) {
+		irq_num = sec_intr_list[index];
+		if (irq_num < MIN_SPI_ID) {
+			/* We have an SGI or a PPI. They are Group0 at reset */
+			sec_ppi_sgi_mask |= 1U << irq_num;
+
+			/* Set the priority of this interrupt */
+			gicd_set_ipriorityr(gicd_base,
+					    irq_num,
+					    GIC_HIGHEST_SEC_PRIORITY);
+		}
+	}
+
+	/*
+	 * Invert the bitmask to create a mask for non-secure PPIs and
+	 * SGIs. Program the GICD_IGROUPR0 with this bit mask.
+	 */
+	gicd_write_igroupr(gicd_base, 0, ~sec_ppi_sgi_mask);
+
+	/* Enable the Group 0 SGIs and PPIs */
+	gicd_write_isenabler(gicd_base, 0, sec_ppi_sgi_mask);
+}
+#endif
+
+/*******************************************************************************
+ * Helper function to configure properties of secure G0 SGIs and PPIs.
+ ******************************************************************************/
+void gicv2_secure_ppi_sgi_setup_props(uintptr_t gicd_base,
+		const interrupt_prop_t *interrupt_props,
+		unsigned int interrupt_props_num)
+{
+	unsigned int i;
+	uint32_t sec_ppi_sgi_mask = 0;
+	const interrupt_prop_t *prop_desc;
+
+	/* Make sure there's a valid property array */
+	assert(interrupt_props_num != 0 ? (uintptr_t) interrupt_props : 1);
+
+	/*
+	 * Disable all SGIs (imp. def.)/PPIs before configuring them. This is a
+	 * more scalable approach as it avoids clearing the enable bits in the
+	 * GICD_CTLR.
+	 */
+	gicd_write_icenabler(gicd_base, 0, ~0);
+
+	/* Setup the default PPI/SGI priorities doing four at a time */
+	for (i = 0; i < MIN_SPI_ID; i += 4)
+		gicd_write_ipriorityr(gicd_base, i, GICD_IPRIORITYR_DEF_VAL);
+
+	for (i = 0; i < interrupt_props_num; i++) {
+		prop_desc = &interrupt_props[i];
+
+		if (prop_desc->intr_num >= MIN_SPI_ID)
+			continue;
+
+		/* Configure this interrupt as a secure interrupt */
+		assert(prop_desc->intr_grp == GICV2_INTR_GROUP0);
+
+		/*
+		 * Set interrupt configuration for PPIs. Configuration for SGIs
+		 * are ignored.
+		 */
+		if ((prop_desc->intr_num >= MIN_PPI_ID) &&
+				(prop_desc->intr_num < MIN_SPI_ID)) {
+			gicd_set_icfgr(gicd_base, prop_desc->intr_num,
+					prop_desc->intr_cfg);
+		}
+
+		/* We have an SGI or a PPI. They are Group0 at reset */
+		sec_ppi_sgi_mask |= (1u << prop_desc->intr_num);
+
+		/* Set the priority of this interrupt */
+		gicd_set_ipriorityr(gicd_base, prop_desc->intr_num,
+				prop_desc->intr_pri);
+	}
+
+	/*
+	 * Invert the bitmask to create a mask for non-secure PPIs and SGIs.
+	 * Program the GICD_IGROUPR0 with this bit mask.
+	 */
+	gicd_write_igroupr(gicd_base, 0, ~sec_ppi_sgi_mask);
+
+	/* Enable the Group 0 SGIs and PPIs */
+	gicd_write_isenabler(gicd_base, 0, sec_ppi_sgi_mask);
+}
diff --git a/drivers/arm/gic/v2/gicv2_main.c b/drivers/arm/gic/v2/gicv2_main.c
new file mode 100644
index 0000000..25296a6
--- /dev/null
+++ b/drivers/arm/gic/v2/gicv2_main.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <gic_common.h>
+#include <gicv2.h>
+#include <interrupt_props.h>
+#include <spinlock.h>
+#include "../common/gic_common_private.h"
+#include "gicv2_private.h"
+
+static const gicv2_driver_data_t *driver_data;
+
+/*
+ * Spinlock to guard registers needing read-modify-write. APIs protected by this
+ * spinlock are used either at boot time (when only a single CPU is active), or
+ * when the system is fully coherent.
+ */
+spinlock_t gic_lock;
+
+/*******************************************************************************
+ * Enable secure interrupts and use FIQs to route them. Disable legacy bypass
+ * and set the priority mask register to allow all interrupts to trickle in.
+ ******************************************************************************/
+void gicv2_cpuif_enable(void)
+{
+	unsigned int val;
+
+	assert(driver_data);
+	assert(driver_data->gicc_base);
+
+	/*
+	 * Enable the Group 0 interrupts, FIQEn and disable Group 0/1
+	 * bypass.
+	 */
+	val = CTLR_ENABLE_G0_BIT | FIQ_EN_BIT | FIQ_BYP_DIS_GRP0;
+	val |= IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP1 | IRQ_BYP_DIS_GRP1;
+
+	/* Program the idle priority in the PMR */
+	gicc_write_pmr(driver_data->gicc_base, GIC_PRI_MASK);
+	gicc_write_ctlr(driver_data->gicc_base, val);
+}
+
+/*******************************************************************************
+ * Place the cpu interface in a state where it can never make a cpu exit wfi as
+ * as result of an asserted interrupt. This is critical for powering down a cpu
+ ******************************************************************************/
+void gicv2_cpuif_disable(void)
+{
+	unsigned int val;
+
+	assert(driver_data);
+	assert(driver_data->gicc_base);
+
+	/* Disable secure, non-secure interrupts and disable their bypass */
+	val = gicc_read_ctlr(driver_data->gicc_base);
+	val &= ~(CTLR_ENABLE_G0_BIT | CTLR_ENABLE_G1_BIT);
+	val |= FIQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP0;
+	val |= IRQ_BYP_DIS_GRP0 | IRQ_BYP_DIS_GRP1;
+	gicc_write_ctlr(driver_data->gicc_base, val);
+}
+
+/*******************************************************************************
+ * Per cpu gic distributor setup which will be done by all cpus after a cold
+ * boot/hotplug. This marks out the secure SPIs and PPIs & enables them.
+ ******************************************************************************/
+void gicv2_pcpu_distif_init(void)
+{
+	assert(driver_data);
+	assert(driver_data->gicd_base);
+
+#if !ERROR_DEPRECATED
+	if (driver_data->interrupt_props != NULL) {
+#endif
+		gicv2_secure_ppi_sgi_setup_props(driver_data->gicd_base,
+				driver_data->interrupt_props,
+				driver_data->interrupt_props_num);
+#if !ERROR_DEPRECATED
+	} else {
+		assert(driver_data->g0_interrupt_array);
+		gicv2_secure_ppi_sgi_setup(driver_data->gicd_base,
+				driver_data->g0_interrupt_num,
+				driver_data->g0_interrupt_array);
+	}
+#endif
+}
+
+/*******************************************************************************
+ * Global gic distributor init which will be done by the primary cpu after a
+ * cold boot. It marks out the secure SPIs, PPIs & SGIs and enables them. It
+ * then enables the secure GIC distributor interface.
+ ******************************************************************************/
+void gicv2_distif_init(void)
+{
+	unsigned int ctlr;
+
+	assert(driver_data);
+	assert(driver_data->gicd_base);
+
+	/* Disable the distributor before going further */
+	ctlr = gicd_read_ctlr(driver_data->gicd_base);
+	gicd_write_ctlr(driver_data->gicd_base,
+			ctlr & ~(CTLR_ENABLE_G0_BIT | CTLR_ENABLE_G1_BIT));
+
+	/* Set the default attribute of all SPIs */
+	gicv2_spis_configure_defaults(driver_data->gicd_base);
+
+#if !ERROR_DEPRECATED
+	if (driver_data->interrupt_props != NULL) {
+#endif
+		gicv2_secure_spis_configure_props(driver_data->gicd_base,
+				driver_data->interrupt_props,
+				driver_data->interrupt_props_num);
+#if !ERROR_DEPRECATED
+	} else {
+		assert(driver_data->g0_interrupt_array);
+
+		/* Configure the G0 SPIs */
+		gicv2_secure_spis_configure(driver_data->gicd_base,
+				driver_data->g0_interrupt_num,
+				driver_data->g0_interrupt_array);
+	}
+#endif
+
+	/* Re-enable the secure SPIs now that they have been configured */
+	gicd_write_ctlr(driver_data->gicd_base, ctlr | CTLR_ENABLE_G0_BIT);
+}
+
+/*******************************************************************************
+ * Initialize the ARM GICv2 driver with the provided platform inputs
+ ******************************************************************************/
+void gicv2_driver_init(const gicv2_driver_data_t *plat_driver_data)
+{
+	unsigned int gic_version;
+	assert(plat_driver_data);
+	assert(plat_driver_data->gicd_base);
+	assert(plat_driver_data->gicc_base);
+
+#if !ERROR_DEPRECATED
+	if (plat_driver_data->interrupt_props == NULL) {
+		/* Interrupt properties array size must be 0 */
+		assert(plat_driver_data->interrupt_props_num == 0);
+
+		/* The platform should provide a list of secure interrupts */
+		assert(plat_driver_data->g0_interrupt_array);
+
+		/*
+		 * If there are no interrupts of a particular type, then the
+		 * number of interrupts of that type should be 0 and vice-versa.
+		 */
+		assert(plat_driver_data->g0_interrupt_array ?
+				plat_driver_data->g0_interrupt_num :
+				plat_driver_data->g0_interrupt_num == 0);
+	}
+#else
+	assert(plat_driver_data->interrupt_props != NULL);
+	assert(plat_driver_data->interrupt_props_num > 0);
+#endif
+
+	/* Ensure that this is a GICv2 system */
+	gic_version = gicd_read_pidr2(plat_driver_data->gicd_base);
+	gic_version = (gic_version >> PIDR2_ARCH_REV_SHIFT)
+					& PIDR2_ARCH_REV_MASK;
+	assert(gic_version == ARCH_REV_GICV2);
+
+	driver_data = plat_driver_data;
+
+	/*
+	 * The GIC driver data is initialized by the primary CPU with caches
+	 * enabled. When the secondary CPU boots up, it initializes the
+	 * GICC/GICR interface with the caches disabled. Hence flush the
+	 * driver_data to ensure coherency. This is not required if the
+	 * platform has HW_ASSISTED_COHERENCY enabled.
+	 */
+#if !HW_ASSISTED_COHERENCY
+	flush_dcache_range((uintptr_t) &driver_data, sizeof(driver_data));
+	flush_dcache_range((uintptr_t) driver_data, sizeof(*driver_data));
+#endif
+	INFO("ARM GICv2 driver initialized\n");
+}
+
+/******************************************************************************
+ * This function returns whether FIQ is enabled in the GIC CPU interface.
+ *****************************************************************************/
+unsigned int gicv2_is_fiq_enabled(void)
+{
+	unsigned int gicc_ctlr;
+
+	assert(driver_data);
+	assert(driver_data->gicc_base);
+
+	gicc_ctlr = gicc_read_ctlr(driver_data->gicc_base);
+	return (gicc_ctlr >> FIQ_EN_SHIFT) & 0x1;
+}
+
+/*******************************************************************************
+ * This function returns the type of the highest priority pending interrupt at
+ * the GIC cpu interface. The return values can be one of the following :
+ *   PENDING_G1_INTID   : The interrupt type is non secure Group 1.
+ *   0 - 1019           : The interrupt type is secure Group 0.
+ *   GIC_SPURIOUS_INTERRUPT : there is no pending interrupt with
+ *                            sufficient priority to be signaled
+ ******************************************************************************/
+unsigned int gicv2_get_pending_interrupt_type(void)
+{
+	assert(driver_data);
+	assert(driver_data->gicc_base);
+
+	return gicc_read_hppir(driver_data->gicc_base) & INT_ID_MASK;
+}
+
+/*******************************************************************************
+ * This function returns the id of the highest priority pending interrupt at
+ * the GIC cpu interface. GIC_SPURIOUS_INTERRUPT is returned when there is no
+ * interrupt pending.
+ ******************************************************************************/
+unsigned int gicv2_get_pending_interrupt_id(void)
+{
+	unsigned int id;
+
+	assert(driver_data);
+	assert(driver_data->gicc_base);
+
+	id = gicc_read_hppir(driver_data->gicc_base) & INT_ID_MASK;
+
+	/*
+	 * Find out which non-secure interrupt it is under the assumption that
+	 * the GICC_CTLR.AckCtl bit is 0.
+	 */
+	if (id == PENDING_G1_INTID)
+		id = gicc_read_ahppir(driver_data->gicc_base) & INT_ID_MASK;
+
+	return id;
+}
+
+/*******************************************************************************
+ * This functions reads the GIC cpu interface Interrupt Acknowledge register
+ * to start handling the pending secure 0 interrupt. It returns the
+ * contents of the IAR.
+ ******************************************************************************/
+unsigned int gicv2_acknowledge_interrupt(void)
+{
+	assert(driver_data);
+	assert(driver_data->gicc_base);
+
+	return gicc_read_IAR(driver_data->gicc_base);
+}
+
+/*******************************************************************************
+ * This functions writes the GIC cpu interface End Of Interrupt register with
+ * the passed value to finish handling the active secure group 0 interrupt.
+ ******************************************************************************/
+void gicv2_end_of_interrupt(unsigned int id)
+{
+	assert(driver_data);
+	assert(driver_data->gicc_base);
+
+	gicc_write_EOIR(driver_data->gicc_base, id);
+}
+
+/*******************************************************************************
+ * This function returns the type of the interrupt id depending upon the group
+ * this interrupt has been configured under by the interrupt controller i.e.
+ * group0 secure or group1 non secure. It returns zero for Group 0 secure and
+ * one for Group 1 non secure interrupt.
+ ******************************************************************************/
+unsigned int gicv2_get_interrupt_group(unsigned int id)
+{
+	assert(driver_data);
+	assert(driver_data->gicd_base);
+
+	return gicd_get_igroupr(driver_data->gicd_base, id);
+}
+
+/*******************************************************************************
+ * This function returns the priority of the interrupt the processor is
+ * currently servicing.
+ ******************************************************************************/
+unsigned int gicv2_get_running_priority(void)
+{
+	assert(driver_data);
+	assert(driver_data->gicc_base);
+
+	return gicc_read_rpr(driver_data->gicc_base);
+}
+
+/*******************************************************************************
+ * This function sets the GICv2 target mask pattern for the current PE. The PE
+ * target mask is used to translate linear PE index (returned by platform core
+ * position) to a bit mask used when targeting interrupts to a PE, viz. when
+ * raising SGIs and routing SPIs.
+ ******************************************************************************/
+void gicv2_set_pe_target_mask(unsigned int proc_num)
+{
+	assert(driver_data);
+	assert(driver_data->gicd_base);
+	assert(driver_data->target_masks);
+	assert(proc_num < GICV2_MAX_TARGET_PE);
+	assert(proc_num < driver_data->target_masks_num);
+
+	/* Return if the target mask is already populated */
+	if (driver_data->target_masks[proc_num])
+		return;
+
+	/* Read target register corresponding to this CPU */
+	driver_data->target_masks[proc_num] =
+		gicv2_get_cpuif_id(driver_data->gicd_base);
+}
+
+/*******************************************************************************
+ * This function returns the active status of the interrupt (either because the
+ * state is active, or active and pending).
+ ******************************************************************************/
+unsigned int gicv2_get_interrupt_active(unsigned int id)
+{
+	assert(driver_data);
+	assert(driver_data->gicd_base);
+	assert(id <= MAX_SPI_ID);
+
+	return gicd_get_isactiver(driver_data->gicd_base, id);
+}
+
+/*******************************************************************************
+ * This function enables the interrupt identified by id.
+ ******************************************************************************/
+void gicv2_enable_interrupt(unsigned int id)
+{
+	assert(driver_data);
+	assert(driver_data->gicd_base);
+	assert(id <= MAX_SPI_ID);
+
+	/*
+	 * Ensure that any shared variable updates depending on out of band
+	 * interrupt trigger are observed before enabling interrupt.
+	 */
+	dsbishst();
+	gicd_set_isenabler(driver_data->gicd_base, id);
+}
+
+/*******************************************************************************
+ * This function disables the interrupt identified by id.
+ ******************************************************************************/
+void gicv2_disable_interrupt(unsigned int id)
+{
+	assert(driver_data);
+	assert(driver_data->gicd_base);
+	assert(id <= MAX_SPI_ID);
+
+	/*
+	 * Disable interrupt, and ensure that any shared variable updates
+	 * depending on out of band interrupt trigger are observed afterwards.
+	 */
+	gicd_set_icenabler(driver_data->gicd_base, id);
+	dsbishst();
+}
+
+/*******************************************************************************
+ * This function sets the interrupt priority as supplied for the given interrupt
+ * id.
+ ******************************************************************************/
+void gicv2_set_interrupt_priority(unsigned int id, unsigned int priority)
+{
+	assert(driver_data);
+	assert(driver_data->gicd_base);
+	assert(id <= MAX_SPI_ID);
+
+	gicd_set_ipriorityr(driver_data->gicd_base, id, priority);
+}
+
+/*******************************************************************************
+ * This function assigns group for the interrupt identified by id. The group can
+ * be any of GICV2_INTR_GROUP*
+ ******************************************************************************/
+void gicv2_set_interrupt_type(unsigned int id, unsigned int type)
+{
+	assert(driver_data);
+	assert(driver_data->gicd_base);
+	assert(id <= MAX_SPI_ID);
+
+	/* Serialize read-modify-write to Distributor registers */
+	spin_lock(&gic_lock);
+	switch (type) {
+	case GICV2_INTR_GROUP1:
+		gicd_set_igroupr(driver_data->gicd_base, id);
+		break;
+	case GICV2_INTR_GROUP0:
+		gicd_clr_igroupr(driver_data->gicd_base, id);
+		break;
+	default:
+		assert(0);
+	}
+	spin_unlock(&gic_lock);
+}
+
+/*******************************************************************************
+ * This function raises the specified SGI to requested targets.
+ *
+ * The proc_num parameter must be the linear index of the target PE in the
+ * system.
+ ******************************************************************************/
+void gicv2_raise_sgi(int sgi_num, int proc_num)
+{
+	unsigned int sgir_val, target;
+
+	assert(driver_data);
+	assert(proc_num < GICV2_MAX_TARGET_PE);
+	assert(driver_data->gicd_base);
+
+	/*
+	 * Target masks array must have been supplied, and the core position
+	 * should be valid.
+	 */
+	assert(driver_data->target_masks);
+	assert(proc_num < driver_data->target_masks_num);
+
+	/* Don't raise SGI if the mask hasn't been populated */
+	target = driver_data->target_masks[proc_num];
+	assert(target != 0);
+
+	sgir_val = GICV2_SGIR_VALUE(SGIR_TGT_SPECIFIC, target, sgi_num);
+
+	/*
+	 * Ensure that any shared variable updates depending on out of band
+	 * interrupt trigger are observed before raising SGI.
+	 */
+	dsbishst();
+	gicd_write_sgir(driver_data->gicd_base, sgir_val);
+}
+
+/*******************************************************************************
+ * This function sets the interrupt routing for the given SPI interrupt id.
+ * The interrupt routing is specified in routing mode. The proc_num parameter is
+ * linear index of the PE to target SPI. When proc_num < 0, the SPI may target
+ * all PEs.
+ ******************************************************************************/
+void gicv2_set_spi_routing(unsigned int id, int proc_num)
+{
+	int target;
+
+	assert(driver_data);
+	assert(driver_data->gicd_base);
+
+	assert(id >= MIN_SPI_ID && id <= MAX_SPI_ID);
+
+	/*
+	 * Target masks array must have been supplied, and the core position
+	 * should be valid.
+	 */
+	assert(driver_data->target_masks);
+	assert(proc_num < GICV2_MAX_TARGET_PE);
+	assert(proc_num < driver_data->target_masks_num);
+
+	if (proc_num < 0) {
+		/* Target all PEs */
+		target = GIC_TARGET_CPU_MASK;
+	} else {
+		/* Don't route interrupt if the mask hasn't been populated */
+		target = driver_data->target_masks[proc_num];
+		assert(target != 0);
+	}
+
+	gicd_set_itargetsr(driver_data->gicd_base, id, target);
+}
+
+/*******************************************************************************
+ * This function clears the pending status of an interrupt identified by id.
+ ******************************************************************************/
+void gicv2_clear_interrupt_pending(unsigned int id)
+{
+	assert(driver_data);
+	assert(driver_data->gicd_base);
+
+	/* SGIs can't be cleared pending */
+	assert(id >= MIN_PPI_ID);
+
+	/*
+	 * Clear pending interrupt, and ensure that any shared variable updates
+	 * depending on out of band interrupt trigger are observed afterwards.
+	 */
+	gicd_set_icpendr(driver_data->gicd_base, id);
+	dsbishst();
+}
+
+/*******************************************************************************
+ * This function sets the pending status of an interrupt identified by id.
+ ******************************************************************************/
+void gicv2_set_interrupt_pending(unsigned int id)
+{
+	assert(driver_data);
+	assert(driver_data->gicd_base);
+
+	/* SGIs can't be cleared pending */
+	assert(id >= MIN_PPI_ID);
+
+	/*
+	 * Ensure that any shared variable updates depending on out of band
+	 * interrupt trigger are observed before setting interrupt pending.
+	 */
+	dsbishst();
+	gicd_set_ispendr(driver_data->gicd_base, id);
+}
+
+/*******************************************************************************
+ * This function sets the PMR register with the supplied value. Returns the
+ * original PMR.
+ ******************************************************************************/
+unsigned int gicv2_set_pmr(unsigned int mask)
+{
+	unsigned int old_mask;
+
+	assert(driver_data);
+	assert(driver_data->gicc_base);
+
+	old_mask = gicc_read_pmr(driver_data->gicc_base);
+
+	/*
+	 * Order memory updates w.r.t. PMR write, and ensure they're visible
+	 * before potential out of band interrupt trigger because of PMR update.
+	 */
+	dmbishst();
+	gicc_write_pmr(driver_data->gicc_base, mask);
+	dsbishst();
+
+	return old_mask;
+}
diff --git a/drivers/arm/gic/v2/gicv2_private.h b/drivers/arm/gic/v2/gicv2_private.h
new file mode 100644
index 0000000..25600de
--- /dev/null
+++ b/drivers/arm/gic/v2/gicv2_private.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __GICV2_PRIVATE_H__
+#define __GICV2_PRIVATE_H__
+
+#include <gicv2.h>
+#include <mmio.h>
+#include <stdint.h>
+
+/*******************************************************************************
+ * Private function prototypes
+ ******************************************************************************/
+void gicv2_spis_configure_defaults(uintptr_t gicd_base);
+#if !ERROR_DEPRECATED
+void gicv2_secure_spis_configure(uintptr_t gicd_base,
+				     unsigned int num_ints,
+				     const unsigned int *sec_intr_list);
+void gicv2_secure_ppi_sgi_setup(uintptr_t gicd_base,
+					unsigned int num_ints,
+					const unsigned int *sec_intr_list);
+#endif
+void gicv2_secure_spis_configure_props(uintptr_t gicd_base,
+		const interrupt_prop_t *interrupt_props,
+		unsigned int interrupt_props_num);
+void gicv2_secure_ppi_sgi_setup_props(uintptr_t gicd_base,
+		const interrupt_prop_t *interrupt_props,
+		unsigned int interrupt_props_num);
+unsigned int gicv2_get_cpuif_id(uintptr_t base);
+
+/*******************************************************************************
+ * GIC Distributor interface accessors for reading entire registers
+ ******************************************************************************/
+static inline unsigned int gicd_read_pidr2(uintptr_t base)
+{
+	return mmio_read_32(base + GICD_PIDR2_GICV2);
+}
+
+/*******************************************************************************
+ * GIC Distributor interface accessors for writing entire registers
+ ******************************************************************************/
+static inline unsigned int gicd_get_itargetsr(uintptr_t base, unsigned int id)
+{
+	return mmio_read_8(base + GICD_ITARGETSR + id);
+}
+
+static inline void gicd_set_itargetsr(uintptr_t base, unsigned int id,
+		unsigned int target)
+{
+	mmio_write_8(base + GICD_ITARGETSR + id, target & GIC_TARGET_CPU_MASK);
+}
+
+static inline void gicd_write_sgir(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICD_SGIR, val);
+}
+
+/*******************************************************************************
+ * GIC CPU interface accessors for reading entire registers
+ ******************************************************************************/
+
+static inline unsigned int gicc_read_ctlr(uintptr_t base)
+{
+	return mmio_read_32(base + GICC_CTLR);
+}
+
+static inline unsigned int gicc_read_pmr(uintptr_t base)
+{
+	return mmio_read_32(base + GICC_PMR);
+}
+
+static inline unsigned int gicc_read_BPR(uintptr_t base)
+{
+	return mmio_read_32(base + GICC_BPR);
+}
+
+static inline unsigned int gicc_read_IAR(uintptr_t base)
+{
+	return mmio_read_32(base + GICC_IAR);
+}
+
+static inline unsigned int gicc_read_EOIR(uintptr_t base)
+{
+	return mmio_read_32(base + GICC_EOIR);
+}
+
+static inline unsigned int gicc_read_hppir(uintptr_t base)
+{
+	return mmio_read_32(base + GICC_HPPIR);
+}
+
+static inline unsigned int gicc_read_ahppir(uintptr_t base)
+{
+	return mmio_read_32(base + GICC_AHPPIR);
+}
+
+static inline unsigned int gicc_read_dir(uintptr_t base)
+{
+	return mmio_read_32(base + GICC_DIR);
+}
+
+static inline unsigned int gicc_read_iidr(uintptr_t base)
+{
+	return mmio_read_32(base + GICC_IIDR);
+}
+
+static inline unsigned int gicc_read_rpr(uintptr_t base)
+{
+	return mmio_read_32(base + GICC_RPR);
+}
+
+/*******************************************************************************
+ * GIC CPU interface accessors for writing entire registers
+ ******************************************************************************/
+
+static inline void gicc_write_ctlr(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICC_CTLR, val);
+}
+
+static inline void gicc_write_pmr(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICC_PMR, val);
+}
+
+static inline void gicc_write_BPR(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICC_BPR, val);
+}
+
+
+static inline void gicc_write_IAR(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICC_IAR, val);
+}
+
+static inline void gicc_write_EOIR(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICC_EOIR, val);
+}
+
+static inline void gicc_write_hppir(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICC_HPPIR, val);
+}
+
+static inline void gicc_write_dir(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICC_DIR, val);
+}
+
+#endif /* __GICV2_PRIVATE_H__ */
diff --git a/drivers/arm/gic/v3/arm_gicv3_common.c b/drivers/arm/gic/v3/arm_gicv3_common.c
new file mode 100644
index 0000000..8d552ca
--- /dev/null
+++ b/drivers/arm/gic/v3/arm_gicv3_common.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * Driver for implementation defined features that are identical in ARM GICv3
+* implementations (GIC-500 and GIC-600 for now). This driver only overrides
+* APIs that are different to those generic ones in GICv3 driver.
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <gicv3.h>
+
+#include "gicv3_private.h"
+#include "arm_gicv3_common.h"
+
+/*
+ * Flush the internal GIC cache of the LPIs pending tables to memory before
+ * saving the state of the Redistributor. This is required before powering off
+ * the GIC when the pending status must be preserved.
+ * `rdist_proc_num` is the processor number corresponding to the Redistributor of the
+ * current CPU.
+ */
+void arm_gicv3_distif_pre_save(unsigned int rdist_proc_num)
+{
+	uintptr_t gicr_base = 0;
+
+	assert(gicv3_driver_data);
+	assert(gicv3_driver_data->rdistif_base_addrs);
+
+	/*
+	 * The GICR_WAKER.Sleep bit should be set only when both
+	 * GICR_WAKER.ChildrenAsleep and GICR_WAKER.ProcessorSleep are set on
+	 * all the Redistributors.
+	 */
+	for (unsigned int i = 0; i < gicv3_driver_data->rdistif_num; i++) {
+		gicr_base = gicv3_driver_data->rdistif_base_addrs[i];
+		assert(gicr_base);
+		assert(gicr_read_waker(gicr_base) & WAKER_CA_BIT);
+		assert(gicr_read_waker(gicr_base) & WAKER_PS_BIT);
+	}
+
+	gicr_base = gicv3_driver_data->rdistif_base_addrs[rdist_proc_num];
+	/*
+	 * According to the TRM, there is only one instance of the
+	 * GICR_WAKER.Sleep and GICR_WAKER.Quiescent bits that can be accessed
+	 * through any of the Redistributor.
+	 */
+
+	/*
+	 * Set GICR_WAKER.Sleep
+	 * After this point, the system must be configured so that the
+	 * wake_request signals for the right cores are asserted when a wakeup
+	 * interrupt is detected. The GIC will not be able to do that anymore
+	 * when the GICR_WAKER.Sleep bit is set to 1.
+	 */
+	gicr_write_waker(gicr_base, gicr_read_waker(gicr_base) | WAKER_SL_BIT);
+
+	/* Wait until the GICR_WAKER.Quiescent bit is set */
+	while (!(gicr_read_waker(gicr_base) & WAKER_QSC_BIT))
+		;
+}
+
+/*
+ * Allow the LPIs pending state to be read back from the tables in memory after
+ * having restored the state of the GIC Redistributor.
+ */
+void arm_gicv3_distif_post_restore(unsigned int rdist_proc_num)
+{
+	uintptr_t gicr_base;
+
+	assert(gicv3_driver_data);
+	assert(gicv3_driver_data->rdistif_base_addrs);
+
+	/*
+	 * According to the TRM, there is only one instance of the
+	 * GICR_WAKER.Sleep and GICR_WAKER.Quiescent bits that can be accessed
+	 * through any of the Redistributor.
+	 */
+	gicr_base = gicv3_driver_data->rdistif_base_addrs[rdist_proc_num];
+	assert(gicr_base);
+
+	/*
+	 * Writes to GICR_WAKER.Sleep bit are ignored if GICR_WAKER.Quiescent
+	 * bit is not set. We should be alright on power on path, therefore
+	 * coming out of sleep and Quiescent should be set, but we assert in
+	 * case.
+	 */
+	assert(gicr_read_waker(gicr_base) & WAKER_QSC_BIT);
+
+	/* Clear GICR_WAKER.Sleep */
+	gicr_write_waker(gicr_base, gicr_read_waker(gicr_base) & ~WAKER_SL_BIT);
+
+	/*
+	 * We don't know if the effects of setting GICR_WAKER.Sleep bit is
+	 * instantaneous, so we wait until the interface is not Quiescent
+	 * anymore.
+	 */
+	while (gicr_read_waker(gicr_base) & WAKER_QSC_BIT)
+		;
+}
+
diff --git a/drivers/arm/gic/v3/gic500.c b/drivers/arm/gic/v3/gic500.c
new file mode 100644
index 0000000..f03e33f
--- /dev/null
+++ b/drivers/arm/gic/v3/gic500.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * Driver for GIC500-specific features. This driver only overrides APIs that are
+ * different to those generic ones in GICv3 driver.
+ */
+#include "gicv3_private.h"
+
+void gicv3_distif_pre_save(unsigned int proc_num)
+{
+	arm_gicv3_distif_pre_save(proc_num);
+}
+
+void gicv3_distif_post_restore(unsigned int proc_num)
+{
+	arm_gicv3_distif_post_restore(proc_num);
+}
+
diff --git a/drivers/arm/gic/v3/gic600.c b/drivers/arm/gic/v3/gic600.c
new file mode 100644
index 0000000..eb4fc54
--- /dev/null
+++ b/drivers/arm/gic/v3/gic600.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * Driver for GIC600-specific features. This driver only overrides APIs that are
+ * different to those generic ones in GICv3 driver.
+ *
+ * GIC600 supports independently power-gating redistributor interface.
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <gicv3.h>
+
+#include "gicv3_private.h"
+
+/* GIC600-specific register offsets */
+#define GICR_PWRR	0x24
+
+/* GICR_PWRR fields */
+#define PWRR_RDPD_SHIFT		0
+#define PWRR_RDGPD_SHIFT	2
+#define PWRR_RDGPO_SHIFT	3
+
+#define PWRR_RDGPD	(1 << PWRR_RDGPD_SHIFT)
+#define PWRR_RDGPO	(1 << PWRR_RDGPO_SHIFT)
+
+/* Values to write to GICR_PWRR register to power redistributor */
+#define PWRR_ON		(0 << PWRR_RDPD_SHIFT)
+#define PWRR_OFF	(1 << PWRR_RDPD_SHIFT)
+
+/* GIC600-specific accessor functions */
+static void gicr_write_pwrr(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICR_PWRR, val);
+}
+
+static uint32_t gicr_read_pwrr(uintptr_t base)
+{
+	return mmio_read_32(base + GICR_PWRR);
+}
+
+static int gicr_group_powering_down(uint32_t pwrr)
+{
+	/*
+	 * Whether the redistributor group power down operation is in transit:
+	 * i.e. it's intending to, but not finished yet.
+	 */
+	return ((pwrr & PWRR_RDGPD) && !(pwrr & PWRR_RDGPO));
+}
+
+static void gic600_pwr_on(uintptr_t base)
+{
+	/* Power on redistributor */
+	gicr_write_pwrr(base, PWRR_ON);
+
+	/* Wait until the power on state is reflected */
+	while (gicr_read_pwrr(base) & PWRR_RDGPO)
+		;
+}
+
+static void gic600_pwr_off(uintptr_t base)
+{
+	/* Power off redistributor */
+	gicr_write_pwrr(base, PWRR_OFF);
+
+	/*
+	 * If this is the last man, turning this redistributor frame off will
+	 * result in the group itself being powered off. In that case, wait as
+	 * long as it's in transition, or has aborted the transition altogether
+	 * for any reason.
+	 */
+	if (gicr_read_pwrr(base) & PWRR_RDGPD) {
+		while (gicr_group_powering_down(gicr_read_pwrr(base)))
+			;
+	}
+}
+
+void gicv3_distif_pre_save(unsigned int proc_num)
+{
+	arm_gicv3_distif_pre_save(proc_num);
+}
+
+void gicv3_distif_post_restore(unsigned int proc_num)
+{
+	arm_gicv3_distif_post_restore(proc_num);
+}
+
+/*
+ * Power off GIC600 redistributor
+ */
+void gicv3_rdistif_off(unsigned int proc_num)
+{
+	uintptr_t gicr_base;
+
+	assert(gicv3_driver_data);
+	assert(proc_num < gicv3_driver_data->rdistif_num);
+	assert(gicv3_driver_data->rdistif_base_addrs);
+
+	gicr_base = gicv3_driver_data->rdistif_base_addrs[proc_num];
+	assert(gicr_base);
+
+	/* Attempt to power redistributor off */
+	gic600_pwr_off(gicr_base);
+}
+
+/*
+ * Power on GIC600 redistributor
+ */
+void gicv3_rdistif_on(unsigned int proc_num)
+{
+	uintptr_t gicr_base;
+
+	assert(gicv3_driver_data);
+	assert(proc_num < gicv3_driver_data->rdistif_num);
+	assert(gicv3_driver_data->rdistif_base_addrs);
+
+	gicr_base = gicv3_driver_data->rdistif_base_addrs[proc_num];
+	assert(gicr_base);
+
+	/* Power redistributor on */
+	gic600_pwr_on(gicr_base);
+}
diff --git a/drivers/arm/gic/v3/gicv3_helpers.c b/drivers/arm/gic/v3/gicv3_helpers.c
new file mode 100644
index 0000000..2522695
--- /dev/null
+++ b/drivers/arm/gic/v3/gicv3_helpers.c
@@ -0,0 +1,589 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <gic_common.h>
+#include <interrupt_props.h>
+#include "../common/gic_common_private.h"
+#include "gicv3_private.h"
+
+/*
+ * Accessor to read the GIC Distributor IGRPMODR corresponding to the
+ * interrupt `id`, 32 interrupt IDs at a time.
+ */
+unsigned int gicd_read_igrpmodr(uintptr_t base, unsigned int id)
+{
+	unsigned n = id >> IGRPMODR_SHIFT;
+	return mmio_read_32(base + GICD_IGRPMODR + (n << 2));
+}
+
+/*
+ * Accessor to write the GIC Distributor IGRPMODR corresponding to the
+ * interrupt `id`, 32 interrupt IDs at a time.
+ */
+void gicd_write_igrpmodr(uintptr_t base, unsigned int id, unsigned int val)
+{
+	unsigned n = id >> IGRPMODR_SHIFT;
+	mmio_write_32(base + GICD_IGRPMODR + (n << 2), val);
+}
+
+/*
+ * Accessor to get the bit corresponding to interrupt ID
+ * in GIC Distributor IGRPMODR.
+ */
+unsigned int gicd_get_igrpmodr(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << IGRPMODR_SHIFT) - 1);
+	unsigned int reg_val = gicd_read_igrpmodr(base, id);
+
+	return (reg_val >> bit_num) & 0x1;
+}
+
+/*
+ * Accessor to set the bit corresponding to interrupt ID
+ * in GIC Distributor IGRPMODR.
+ */
+void gicd_set_igrpmodr(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << IGRPMODR_SHIFT) - 1);
+	unsigned int reg_val = gicd_read_igrpmodr(base, id);
+
+	gicd_write_igrpmodr(base, id, reg_val | (1 << bit_num));
+}
+
+/*
+ * Accessor to clear the bit corresponding to interrupt ID
+ * in GIC Distributor IGRPMODR.
+ */
+void gicd_clr_igrpmodr(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << IGRPMODR_SHIFT) - 1);
+	unsigned int reg_val = gicd_read_igrpmodr(base, id);
+
+	gicd_write_igrpmodr(base, id, reg_val & ~(1 << bit_num));
+}
+
+/*
+ * Accessor to read the GIC Re-distributor IPRIORITYR corresponding to the
+ * interrupt `id`, 4 interrupts IDs at a time.
+ */
+unsigned int gicr_read_ipriorityr(uintptr_t base, unsigned int id)
+{
+	unsigned n = id >> IPRIORITYR_SHIFT;
+	return mmio_read_32(base + GICR_IPRIORITYR + (n << 2));
+}
+
+/*
+ * Accessor to write the GIC Re-distributor IPRIORITYR corresponding to the
+ * interrupt `id`, 4 interrupts IDs at a time.
+ */
+void gicr_write_ipriorityr(uintptr_t base, unsigned int id, unsigned int val)
+{
+	unsigned n = id >> IPRIORITYR_SHIFT;
+	mmio_write_32(base + GICR_IPRIORITYR + (n << 2), val);
+}
+
+/*
+ * Accessor to get the bit corresponding to interrupt ID
+ * from GIC Re-distributor IGROUPR0.
+ */
+unsigned int gicr_get_igroupr0(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << IGROUPR_SHIFT) - 1);
+	unsigned int reg_val = gicr_read_igroupr0(base);
+
+	return (reg_val >> bit_num) & 0x1;
+}
+
+/*
+ * Accessor to set the bit corresponding to interrupt ID
+ * in GIC Re-distributor IGROUPR0.
+ */
+void gicr_set_igroupr0(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << IGROUPR_SHIFT) - 1);
+	unsigned int reg_val = gicr_read_igroupr0(base);
+
+	gicr_write_igroupr0(base, reg_val | (1 << bit_num));
+}
+
+/*
+ * Accessor to clear the bit corresponding to interrupt ID
+ * in GIC Re-distributor IGROUPR0.
+ */
+void gicr_clr_igroupr0(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << IGROUPR_SHIFT) - 1);
+	unsigned int reg_val = gicr_read_igroupr0(base);
+
+	gicr_write_igroupr0(base, reg_val & ~(1 << bit_num));
+}
+
+/*
+ * Accessor to get the bit corresponding to interrupt ID
+ * from GIC Re-distributor IGRPMODR0.
+ */
+unsigned int gicr_get_igrpmodr0(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << IGRPMODR_SHIFT) - 1);
+	unsigned int reg_val = gicr_read_igrpmodr0(base);
+
+	return (reg_val >> bit_num) & 0x1;
+}
+
+/*
+ * Accessor to set the bit corresponding to interrupt ID
+ * in GIC Re-distributor IGRPMODR0.
+ */
+void gicr_set_igrpmodr0(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << IGRPMODR_SHIFT) - 1);
+	unsigned int reg_val = gicr_read_igrpmodr0(base);
+
+	gicr_write_igrpmodr0(base, reg_val | (1 << bit_num));
+}
+
+/*
+ * Accessor to clear the bit corresponding to interrupt ID
+ * in GIC Re-distributor IGRPMODR0.
+ */
+void gicr_clr_igrpmodr0(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << IGRPMODR_SHIFT) - 1);
+	unsigned int reg_val = gicr_read_igrpmodr0(base);
+
+	gicr_write_igrpmodr0(base, reg_val & ~(1 << bit_num));
+}
+
+/*
+ * Accessor to set the bit corresponding to interrupt ID
+ * in GIC Re-distributor ISENABLER0.
+ */
+void gicr_set_isenabler0(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << ISENABLER_SHIFT) - 1);
+
+	gicr_write_isenabler0(base, (1 << bit_num));
+}
+
+/*
+ * Accessor to set the bit corresponding to interrupt ID in GIC Re-distributor
+ * ICENABLER0.
+ */
+void gicr_set_icenabler0(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << ICENABLER_SHIFT) - 1);
+
+	gicr_write_icenabler0(base, (1 << bit_num));
+}
+
+/*
+ * Accessor to set the bit corresponding to interrupt ID in GIC Re-distributor
+ * ISACTIVER0.
+ */
+unsigned int gicr_get_isactiver0(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << ISACTIVER_SHIFT) - 1);
+	unsigned int reg_val = gicr_read_isactiver0(base);
+
+	return (reg_val >> bit_num) & 0x1;
+}
+
+/*
+ * Accessor to clear the bit corresponding to interrupt ID in GIC Re-distributor
+ * ICPENDRR0.
+ */
+void gicr_set_icpendr0(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << ICPENDR_SHIFT) - 1);
+
+	gicr_write_icpendr0(base, (1 << bit_num));
+}
+
+/*
+ * Accessor to set the bit corresponding to interrupt ID in GIC Re-distributor
+ * ISPENDR0.
+ */
+void gicr_set_ispendr0(uintptr_t base, unsigned int id)
+{
+	unsigned bit_num = id & ((1 << ISPENDR_SHIFT) - 1);
+
+	gicr_write_ispendr0(base, (1 << bit_num));
+}
+
+/*
+ * Accessor to set the byte corresponding to interrupt ID
+ * in GIC Re-distributor IPRIORITYR.
+ */
+void gicr_set_ipriorityr(uintptr_t base, unsigned int id, unsigned int pri)
+{
+	mmio_write_8(base + GICR_IPRIORITYR + id, pri & GIC_PRI_MASK);
+}
+
+/*
+ * Accessor to set the bit fields corresponding to interrupt ID
+ * in GIC Re-distributor ICFGR0.
+ */
+void gicr_set_icfgr0(uintptr_t base, unsigned int id, unsigned int cfg)
+{
+	unsigned bit_num = id & ((1 << ICFGR_SHIFT) - 1);
+	uint32_t reg_val = gicr_read_icfgr0(base);
+
+	/* Clear the field, and insert required configuration */
+	reg_val &= ~(GIC_CFG_MASK << bit_num);
+	reg_val |= ((cfg & GIC_CFG_MASK) << bit_num);
+
+	gicr_write_icfgr0(base, reg_val);
+}
+
+/*
+ * Accessor to set the bit fields corresponding to interrupt ID
+ * in GIC Re-distributor ICFGR1.
+ */
+void gicr_set_icfgr1(uintptr_t base, unsigned int id, unsigned int cfg)
+{
+	unsigned bit_num = id & ((1 << ICFGR_SHIFT) - 1);
+	uint32_t reg_val = gicr_read_icfgr1(base);
+
+	/* Clear the field, and insert required configuration */
+	reg_val &= ~(GIC_CFG_MASK << bit_num);
+	reg_val |= ((cfg & GIC_CFG_MASK) << bit_num);
+
+	gicr_write_icfgr1(base, reg_val);
+}
+
+/******************************************************************************
+ * This function marks the core as awake in the re-distributor and
+ * ensures that the interface is active.
+ *****************************************************************************/
+void gicv3_rdistif_mark_core_awake(uintptr_t gicr_base)
+{
+	/*
+	 * The WAKER_PS_BIT should be changed to 0
+	 * only when WAKER_CA_BIT is 1.
+	 */
+	assert(gicr_read_waker(gicr_base) & WAKER_CA_BIT);
+
+	/* Mark the connected core as awake */
+	gicr_write_waker(gicr_base, gicr_read_waker(gicr_base) & ~WAKER_PS_BIT);
+
+	/* Wait till the WAKER_CA_BIT changes to 0 */
+	while (gicr_read_waker(gicr_base) & WAKER_CA_BIT)
+		;
+}
+
+
+/******************************************************************************
+ * This function marks the core as asleep in the re-distributor and ensures
+ * that the interface is quiescent.
+ *****************************************************************************/
+void gicv3_rdistif_mark_core_asleep(uintptr_t gicr_base)
+{
+	/* Mark the connected core as asleep */
+	gicr_write_waker(gicr_base, gicr_read_waker(gicr_base) | WAKER_PS_BIT);
+
+	/* Wait till the WAKER_CA_BIT changes to 1 */
+	while (!(gicr_read_waker(gicr_base) & WAKER_CA_BIT))
+		;
+}
+
+
+/*******************************************************************************
+ * This function probes the Redistributor frames when the driver is initialised
+ * and saves their base addresses. These base addresses are used later to
+ * initialise each Redistributor interface.
+ ******************************************************************************/
+void gicv3_rdistif_base_addrs_probe(uintptr_t *rdistif_base_addrs,
+					unsigned int rdistif_num,
+					uintptr_t gicr_base,
+					mpidr_hash_fn mpidr_to_core_pos)
+{
+	u_register_t mpidr;
+	unsigned int proc_num;
+	unsigned long long typer_val;
+	uintptr_t rdistif_base = gicr_base;
+
+	assert(rdistif_base_addrs);
+
+	/*
+	 * Iterate over the Redistributor frames. Store the base address of each
+	 * frame in the platform provided array. Use the "Processor Number"
+	 * field to index into the array if the platform has not provided a hash
+	 * function to convert an MPIDR (obtained from the "Affinity Value"
+	 * field into a linear index.
+	 */
+	do {
+		typer_val = gicr_read_typer(rdistif_base);
+		if (mpidr_to_core_pos) {
+			mpidr = mpidr_from_gicr_typer(typer_val);
+			proc_num = mpidr_to_core_pos(mpidr);
+		} else {
+			proc_num = (typer_val >> TYPER_PROC_NUM_SHIFT) &
+				TYPER_PROC_NUM_MASK;
+		}
+		assert(proc_num < rdistif_num);
+		rdistif_base_addrs[proc_num] = rdistif_base;
+		rdistif_base += (1 << GICR_PCPUBASE_SHIFT);
+	} while (!(typer_val & TYPER_LAST_BIT));
+}
+
+/*******************************************************************************
+ * Helper function to configure the default attributes of SPIs.
+ ******************************************************************************/
+void gicv3_spis_configure_defaults(uintptr_t gicd_base)
+{
+	unsigned int index, num_ints;
+
+	num_ints = gicd_read_typer(gicd_base);
+	num_ints &= TYPER_IT_LINES_NO_MASK;
+	num_ints = (num_ints + 1) << 5;
+
+	/*
+	 * Treat all SPIs as G1NS by default. The number of interrupts is
+	 * calculated as 32 * (IT_LINES + 1). We do 32 at a time.
+	 */
+	for (index = MIN_SPI_ID; index < num_ints; index += 32)
+		gicd_write_igroupr(gicd_base, index, ~0U);
+
+	/* Setup the default SPI priorities doing four at a time */
+	for (index = MIN_SPI_ID; index < num_ints; index += 4)
+		gicd_write_ipriorityr(gicd_base,
+				      index,
+				      GICD_IPRIORITYR_DEF_VAL);
+
+	/*
+	 * Treat all SPIs as level triggered by default, write 16 at
+	 * a time
+	 */
+	for (index = MIN_SPI_ID; index < num_ints; index += 16)
+		gicd_write_icfgr(gicd_base, index, 0);
+}
+
+#if !ERROR_DEPRECATED
+/*******************************************************************************
+ * Helper function to configure secure G0 and G1S SPIs.
+ ******************************************************************************/
+void gicv3_secure_spis_configure(uintptr_t gicd_base,
+				     unsigned int num_ints,
+				     const unsigned int *sec_intr_list,
+				     unsigned int int_grp)
+{
+	unsigned int index, irq_num;
+	unsigned long long gic_affinity_val;
+
+	assert((int_grp == INTR_GROUP1S) || (int_grp == INTR_GROUP0));
+	/* If `num_ints` is not 0, ensure that `sec_intr_list` is not NULL */
+	assert(num_ints ? (uintptr_t)sec_intr_list : 1);
+
+	for (index = 0; index < num_ints; index++) {
+		irq_num = sec_intr_list[index];
+		if (irq_num >= MIN_SPI_ID) {
+
+			/* Configure this interrupt as a secure interrupt */
+			gicd_clr_igroupr(gicd_base, irq_num);
+
+			/* Configure this interrupt as G0 or a G1S interrupt */
+			if (int_grp == INTR_GROUP1S)
+				gicd_set_igrpmodr(gicd_base, irq_num);
+			else
+				gicd_clr_igrpmodr(gicd_base, irq_num);
+
+			/* Set the priority of this interrupt */
+			gicd_set_ipriorityr(gicd_base,
+					      irq_num,
+					      GIC_HIGHEST_SEC_PRIORITY);
+
+			/* Target SPIs to the primary CPU */
+			gic_affinity_val =
+				gicd_irouter_val_from_mpidr(read_mpidr(), 0);
+			gicd_write_irouter(gicd_base,
+					   irq_num,
+					   gic_affinity_val);
+
+			/* Enable this interrupt */
+			gicd_set_isenabler(gicd_base, irq_num);
+		}
+	}
+
+}
+#endif
+
+/*******************************************************************************
+ * Helper function to configure properties of secure SPIs
+ ******************************************************************************/
+unsigned int gicv3_secure_spis_configure_props(uintptr_t gicd_base,
+		const interrupt_prop_t *interrupt_props,
+		unsigned int interrupt_props_num)
+{
+	unsigned int i;
+	const interrupt_prop_t *current_prop;
+	unsigned long long gic_affinity_val;
+	unsigned int ctlr_enable = 0;
+
+	/* Make sure there's a valid property array */
+	assert(interrupt_props != NULL);
+	assert(interrupt_props_num > 0);
+
+	for (i = 0; i < interrupt_props_num; i++) {
+		current_prop = &interrupt_props[i];
+
+		if (current_prop->intr_num < MIN_SPI_ID)
+			continue;
+
+		/* Configure this interrupt as a secure interrupt */
+		gicd_clr_igroupr(gicd_base, current_prop->intr_num);
+
+		/* Configure this interrupt as G0 or a G1S interrupt */
+		assert((current_prop->intr_grp == INTR_GROUP0) ||
+				(current_prop->intr_grp == INTR_GROUP1S));
+		if (current_prop->intr_grp == INTR_GROUP1S) {
+			gicd_set_igrpmodr(gicd_base, current_prop->intr_num);
+			ctlr_enable |= CTLR_ENABLE_G1S_BIT;
+		} else {
+			gicd_clr_igrpmodr(gicd_base, current_prop->intr_num);
+			ctlr_enable |= CTLR_ENABLE_G0_BIT;
+		}
+
+		/* Set interrupt configuration */
+		gicd_set_icfgr(gicd_base, current_prop->intr_num,
+				current_prop->intr_cfg);
+
+		/* Set the priority of this interrupt */
+		gicd_set_ipriorityr(gicd_base, current_prop->intr_num,
+				current_prop->intr_pri);
+
+		/* Target SPIs to the primary CPU */
+		gic_affinity_val = gicd_irouter_val_from_mpidr(read_mpidr(), 0);
+		gicd_write_irouter(gicd_base, current_prop->intr_num,
+				gic_affinity_val);
+
+		/* Enable this interrupt */
+		gicd_set_isenabler(gicd_base, current_prop->intr_num);
+	}
+
+	return ctlr_enable;
+}
+
+/*******************************************************************************
+ * Helper function to configure the default attributes of SPIs.
+ ******************************************************************************/
+void gicv3_ppi_sgi_configure_defaults(uintptr_t gicr_base)
+{
+	unsigned int index;
+
+	/*
+	 * Disable all SGIs (imp. def.)/PPIs before configuring them. This is a
+	 * more scalable approach as it avoids clearing the enable bits in the
+	 * GICD_CTLR
+	 */
+	gicr_write_icenabler0(gicr_base, ~0);
+	gicr_wait_for_pending_write(gicr_base);
+
+	/* Treat all SGIs/PPIs as G1NS by default. */
+	gicr_write_igroupr0(gicr_base, ~0U);
+
+	/* Setup the default PPI/SGI priorities doing four at a time */
+	for (index = 0; index < MIN_SPI_ID; index += 4)
+		gicr_write_ipriorityr(gicr_base,
+				      index,
+				      GICD_IPRIORITYR_DEF_VAL);
+
+	/* Configure all PPIs as level triggered by default */
+	gicr_write_icfgr1(gicr_base, 0);
+}
+
+#if !ERROR_DEPRECATED
+/*******************************************************************************
+ * Helper function to configure secure G0 and G1S SPIs.
+ ******************************************************************************/
+void gicv3_secure_ppi_sgi_configure(uintptr_t gicr_base,
+					unsigned int num_ints,
+					const unsigned int *sec_intr_list,
+					unsigned int int_grp)
+{
+	unsigned int index, irq_num;
+
+	assert((int_grp == INTR_GROUP1S) || (int_grp == INTR_GROUP0));
+	/* If `num_ints` is not 0, ensure that `sec_intr_list` is not NULL */
+	assert(num_ints ? (uintptr_t)sec_intr_list : 1);
+
+	for (index = 0; index < num_ints; index++) {
+		irq_num = sec_intr_list[index];
+		if (irq_num < MIN_SPI_ID) {
+
+			/* Configure this interrupt as a secure interrupt */
+			gicr_clr_igroupr0(gicr_base, irq_num);
+
+			/* Configure this interrupt as G0 or a G1S interrupt */
+			if (int_grp == INTR_GROUP1S)
+				gicr_set_igrpmodr0(gicr_base, irq_num);
+			else
+				gicr_clr_igrpmodr0(gicr_base, irq_num);
+
+			/* Set the priority of this interrupt */
+			gicr_set_ipriorityr(gicr_base,
+					    irq_num,
+					    GIC_HIGHEST_SEC_PRIORITY);
+
+			/* Enable this interrupt */
+			gicr_set_isenabler0(gicr_base, irq_num);
+		}
+	}
+}
+#endif
+
+/*******************************************************************************
+ * Helper function to configure properties of secure G0 and G1S PPIs and SGIs.
+ ******************************************************************************/
+void gicv3_secure_ppi_sgi_configure_props(uintptr_t gicr_base,
+		const interrupt_prop_t *interrupt_props,
+		unsigned int interrupt_props_num)
+{
+	unsigned int i;
+	const interrupt_prop_t *current_prop;
+
+	/* Make sure there's a valid property array */
+	assert(interrupt_props != NULL);
+	assert(interrupt_props_num > 0);
+
+	for (i = 0; i < interrupt_props_num; i++) {
+		current_prop = &interrupt_props[i];
+
+		if (current_prop->intr_num >= MIN_SPI_ID)
+			continue;
+
+		/* Configure this interrupt as a secure interrupt */
+		gicr_clr_igroupr0(gicr_base, current_prop->intr_num);
+
+		/* Configure this interrupt as G0 or a G1S interrupt */
+		assert((current_prop->intr_grp == INTR_GROUP0) ||
+				(current_prop->intr_grp == INTR_GROUP1S));
+		if (current_prop->intr_grp == INTR_GROUP1S)
+			gicr_set_igrpmodr0(gicr_base, current_prop->intr_num);
+		else
+			gicr_clr_igrpmodr0(gicr_base, current_prop->intr_num);
+
+		/* Set the priority of this interrupt */
+		gicr_set_ipriorityr(gicr_base, current_prop->intr_num,
+				current_prop->intr_pri);
+
+		/*
+		 * Set interrupt configuration for PPIs. Configuration for SGIs
+		 * are ignored.
+		 */
+		if ((current_prop->intr_num >= MIN_PPI_ID) &&
+				(current_prop->intr_num < MIN_SPI_ID)) {
+			gicr_set_icfgr1(gicr_base, current_prop->intr_num,
+					current_prop->intr_cfg);
+		}
+
+		/* Enable this interrupt */
+		gicr_set_isenabler0(gicr_base, current_prop->intr_num);
+	}
+}
diff --git a/drivers/arm/gic/v3/gicv3_main.c b/drivers/arm/gic/v3/gicv3_main.c
new file mode 100644
index 0000000..8c4f508
--- /dev/null
+++ b/drivers/arm/gic/v3/gicv3_main.c
@@ -0,0 +1,1147 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <gicv3.h>
+#include <interrupt_props.h>
+#include <spinlock.h>
+#include "gicv3_private.h"
+
+const gicv3_driver_data_t *gicv3_driver_data;
+static unsigned int gicv2_compat;
+
+/*
+ * Spinlock to guard registers needing read-modify-write. APIs protected by this
+ * spinlock are used either at boot time (when only a single CPU is active), or
+ * when the system is fully coherent.
+ */
+spinlock_t gic_lock;
+
+/*
+ * Redistributor power operations are weakly bound so that they can be
+ * overridden
+ */
+#pragma weak gicv3_rdistif_off
+#pragma weak gicv3_rdistif_on
+
+
+/* Helper macros to save and restore GICD registers to and from the context */
+#define RESTORE_GICD_REGS(base, ctx, intr_num, reg, REG)		\
+	do {								\
+		for (unsigned int int_id = MIN_SPI_ID; int_id < intr_num; \
+				int_id += (1 << REG##_SHIFT)) {		\
+			gicd_write_##reg(base, int_id,			\
+				ctx->gicd_##reg[(int_id - MIN_SPI_ID) >> REG##_SHIFT]); \
+		}							\
+	} while (0)
+
+#define SAVE_GICD_REGS(base, ctx, intr_num, reg, REG)			\
+	do {								\
+		for (unsigned int int_id = MIN_SPI_ID; int_id < intr_num; \
+				int_id += (1 << REG##_SHIFT)) {		\
+			ctx->gicd_##reg[(int_id - MIN_SPI_ID) >> REG##_SHIFT] =\
+					gicd_read_##reg(base, int_id);	\
+		}							\
+	} while (0)
+
+
+/*******************************************************************************
+ * This function initialises the ARM GICv3 driver in EL3 with provided platform
+ * inputs.
+ ******************************************************************************/
+void gicv3_driver_init(const gicv3_driver_data_t *plat_driver_data)
+{
+	unsigned int gic_version;
+
+	assert(plat_driver_data);
+	assert(plat_driver_data->gicd_base);
+	assert(plat_driver_data->gicr_base);
+	assert(plat_driver_data->rdistif_num);
+	assert(plat_driver_data->rdistif_base_addrs);
+
+	assert(IS_IN_EL3());
+
+#if !ERROR_DEPRECATED
+	if (plat_driver_data->interrupt_props == NULL) {
+		/* Interrupt properties array size must be 0 */
+		assert(plat_driver_data->interrupt_props_num == 0);
+
+		/*
+		 * The platform should provide a list of at least one type of
+		 * interrupt.
+		 */
+		assert(plat_driver_data->g0_interrupt_array ||
+				plat_driver_data->g1s_interrupt_array);
+
+		/*
+		 * If there are no interrupts of a particular type, then the
+		 * number of interrupts of that type should be 0 and vice-versa.
+		 */
+		assert(plat_driver_data->g0_interrupt_array ?
+				plat_driver_data->g0_interrupt_num :
+				plat_driver_data->g0_interrupt_num == 0);
+		assert(plat_driver_data->g1s_interrupt_array ?
+				plat_driver_data->g1s_interrupt_num :
+				plat_driver_data->g1s_interrupt_num == 0);
+	}
+#else
+	assert(plat_driver_data->interrupt_props != NULL);
+	assert(plat_driver_data->interrupt_props_num > 0);
+#endif
+
+	/* Check for system register support */
+#ifdef AARCH32
+	assert(read_id_pfr1() & (ID_PFR1_GIC_MASK << ID_PFR1_GIC_SHIFT));
+#else
+	assert(read_id_aa64pfr0_el1() &
+			(ID_AA64PFR0_GIC_MASK << ID_AA64PFR0_GIC_SHIFT));
+#endif /* AARCH32 */
+
+	/* The GIC version should be 3.0 */
+	gic_version = gicd_read_pidr2(plat_driver_data->gicd_base);
+	gic_version >>=	PIDR2_ARCH_REV_SHIFT;
+	gic_version &= PIDR2_ARCH_REV_MASK;
+	assert(gic_version == ARCH_REV_GICV3);
+
+	/*
+	 * Find out whether the GIC supports the GICv2 compatibility mode. The
+	 * ARE_S bit resets to 0 if supported
+	 */
+	gicv2_compat = gicd_read_ctlr(plat_driver_data->gicd_base);
+	gicv2_compat >>= CTLR_ARE_S_SHIFT;
+	gicv2_compat = !(gicv2_compat & CTLR_ARE_S_MASK);
+
+	/*
+	 * Find the base address of each implemented Redistributor interface.
+	 * The number of interfaces should be equal to the number of CPUs in the
+	 * system. The memory for saving these addresses has to be allocated by
+	 * the platform port
+	 */
+	gicv3_rdistif_base_addrs_probe(plat_driver_data->rdistif_base_addrs,
+					   plat_driver_data->rdistif_num,
+					   plat_driver_data->gicr_base,
+					   plat_driver_data->mpidr_to_core_pos);
+
+	gicv3_driver_data = plat_driver_data;
+
+	/*
+	 * The GIC driver data is initialized by the primary CPU with caches
+	 * enabled. When the secondary CPU boots up, it initializes the
+	 * GICC/GICR interface with the caches disabled. Hence flush the
+	 * driver data to ensure coherency. This is not required if the
+	 * platform has HW_ASSISTED_COHERENCY enabled.
+	 */
+#if !HW_ASSISTED_COHERENCY
+	flush_dcache_range((uintptr_t) &gicv3_driver_data,
+			sizeof(gicv3_driver_data));
+	flush_dcache_range((uintptr_t) gicv3_driver_data,
+			sizeof(*gicv3_driver_data));
+#endif
+
+	INFO("GICv3 %s legacy support detected."
+			" ARM GICV3 driver initialized in EL3\n",
+			gicv2_compat ? "with" : "without");
+}
+
+/*******************************************************************************
+ * This function initialises the GIC distributor interface based upon the data
+ * provided by the platform while initialising the driver.
+ ******************************************************************************/
+void gicv3_distif_init(void)
+{
+	unsigned int bitmap = 0;
+
+	assert(gicv3_driver_data);
+	assert(gicv3_driver_data->gicd_base);
+
+	assert(IS_IN_EL3());
+
+	/*
+	 * Clear the "enable" bits for G0/G1S/G1NS interrupts before configuring
+	 * the ARE_S bit. The Distributor might generate a system error
+	 * otherwise.
+	 */
+	gicd_clr_ctlr(gicv3_driver_data->gicd_base,
+		      CTLR_ENABLE_G0_BIT |
+		      CTLR_ENABLE_G1S_BIT |
+		      CTLR_ENABLE_G1NS_BIT,
+		      RWP_TRUE);
+
+	/* Set the ARE_S and ARE_NS bit now that interrupts have been disabled */
+	gicd_set_ctlr(gicv3_driver_data->gicd_base,
+			CTLR_ARE_S_BIT | CTLR_ARE_NS_BIT, RWP_TRUE);
+
+	/* Set the default attribute of all SPIs */
+	gicv3_spis_configure_defaults(gicv3_driver_data->gicd_base);
+
+#if !ERROR_DEPRECATED
+	if (gicv3_driver_data->interrupt_props != NULL) {
+#endif
+		bitmap = gicv3_secure_spis_configure_props(
+				gicv3_driver_data->gicd_base,
+				gicv3_driver_data->interrupt_props,
+				gicv3_driver_data->interrupt_props_num);
+#if !ERROR_DEPRECATED
+	} else {
+		assert(gicv3_driver_data->g1s_interrupt_array ||
+				gicv3_driver_data->g0_interrupt_array);
+
+		/* Configure the G1S SPIs */
+		if (gicv3_driver_data->g1s_interrupt_array) {
+			gicv3_secure_spis_configure(gicv3_driver_data->gicd_base,
+					gicv3_driver_data->g1s_interrupt_num,
+					gicv3_driver_data->g1s_interrupt_array,
+					INTR_GROUP1S);
+			bitmap |= CTLR_ENABLE_G1S_BIT;
+		}
+
+		/* Configure the G0 SPIs */
+		if (gicv3_driver_data->g0_interrupt_array) {
+			gicv3_secure_spis_configure(gicv3_driver_data->gicd_base,
+					gicv3_driver_data->g0_interrupt_num,
+					gicv3_driver_data->g0_interrupt_array,
+					INTR_GROUP0);
+			bitmap |= CTLR_ENABLE_G0_BIT;
+		}
+	}
+#endif
+
+	/* Enable the secure SPIs now that they have been configured */
+	gicd_set_ctlr(gicv3_driver_data->gicd_base, bitmap, RWP_TRUE);
+}
+
+/*******************************************************************************
+ * This function initialises the GIC Redistributor interface of the calling CPU
+ * (identified by the 'proc_num' parameter) based upon the data provided by the
+ * platform while initialising the driver.
+ ******************************************************************************/
+void gicv3_rdistif_init(unsigned int proc_num)
+{
+	uintptr_t gicr_base;
+
+	assert(gicv3_driver_data);
+	assert(proc_num < gicv3_driver_data->rdistif_num);
+	assert(gicv3_driver_data->rdistif_base_addrs);
+	assert(gicv3_driver_data->gicd_base);
+	assert(gicd_read_ctlr(gicv3_driver_data->gicd_base) & CTLR_ARE_S_BIT);
+
+	assert(IS_IN_EL3());
+
+	/* Power on redistributor */
+	gicv3_rdistif_on(proc_num);
+
+	gicr_base = gicv3_driver_data->rdistif_base_addrs[proc_num];
+
+	/* Set the default attribute of all SGIs and PPIs */
+	gicv3_ppi_sgi_configure_defaults(gicr_base);
+
+#if !ERROR_DEPRECATED
+	if (gicv3_driver_data->interrupt_props != NULL) {
+#endif
+		gicv3_secure_ppi_sgi_configure_props(gicr_base,
+				gicv3_driver_data->interrupt_props,
+				gicv3_driver_data->interrupt_props_num);
+#if !ERROR_DEPRECATED
+	} else {
+		assert(gicv3_driver_data->g1s_interrupt_array ||
+		       gicv3_driver_data->g0_interrupt_array);
+
+		/* Configure the G1S SGIs/PPIs */
+		if (gicv3_driver_data->g1s_interrupt_array) {
+			gicv3_secure_ppi_sgi_configure(gicr_base,
+					gicv3_driver_data->g1s_interrupt_num,
+					gicv3_driver_data->g1s_interrupt_array,
+					INTR_GROUP1S);
+		}
+
+		/* Configure the G0 SGIs/PPIs */
+		if (gicv3_driver_data->g0_interrupt_array) {
+			gicv3_secure_ppi_sgi_configure(gicr_base,
+					gicv3_driver_data->g0_interrupt_num,
+					gicv3_driver_data->g0_interrupt_array,
+					INTR_GROUP0);
+		}
+	}
+#endif
+}
+
+/*******************************************************************************
+ * Functions to perform power operations on GIC Redistributor
+ ******************************************************************************/
+void gicv3_rdistif_off(unsigned int proc_num)
+{
+	return;
+}
+
+void gicv3_rdistif_on(unsigned int proc_num)
+{
+	return;
+}
+
+/*******************************************************************************
+ * This function enables the GIC CPU interface of the calling CPU using only
+ * system register accesses.
+ ******************************************************************************/
+void gicv3_cpuif_enable(unsigned int proc_num)
+{
+	uintptr_t gicr_base;
+	unsigned int scr_el3;
+	unsigned int icc_sre_el3;
+
+	assert(gicv3_driver_data);
+	assert(proc_num < gicv3_driver_data->rdistif_num);
+	assert(gicv3_driver_data->rdistif_base_addrs);
+	assert(IS_IN_EL3());
+
+	/* Mark the connected core as awake */
+	gicr_base = gicv3_driver_data->rdistif_base_addrs[proc_num];
+	gicv3_rdistif_mark_core_awake(gicr_base);
+
+	/* Disable the legacy interrupt bypass */
+	icc_sre_el3 = ICC_SRE_DIB_BIT | ICC_SRE_DFB_BIT;
+
+	/*
+	 * Enable system register access for EL3 and allow lower exception
+	 * levels to configure the same for themselves. If the legacy mode is
+	 * not supported, the SRE bit is RAO/WI
+	 */
+	icc_sre_el3 |= (ICC_SRE_EN_BIT | ICC_SRE_SRE_BIT);
+	write_icc_sre_el3(read_icc_sre_el3() | icc_sre_el3);
+
+	scr_el3 = read_scr_el3();
+
+	/*
+	 * Switch to NS state to write Non secure ICC_SRE_EL1 and
+	 * ICC_SRE_EL2 registers.
+	 */
+	write_scr_el3(scr_el3 | SCR_NS_BIT);
+	isb();
+
+	write_icc_sre_el2(read_icc_sre_el2() | icc_sre_el3);
+	write_icc_sre_el1(ICC_SRE_SRE_BIT);
+	isb();
+
+	/* Switch to secure state. */
+	write_scr_el3(scr_el3 & (~SCR_NS_BIT));
+	isb();
+
+	/* Program the idle priority in the PMR */
+	write_icc_pmr_el1(GIC_PRI_MASK);
+
+	/* Enable Group0 interrupts */
+	write_icc_igrpen0_el1(IGRPEN1_EL1_ENABLE_G0_BIT);
+
+	/* Enable Group1 Secure interrupts */
+	write_icc_igrpen1_el3(read_icc_igrpen1_el3() |
+				IGRPEN1_EL3_ENABLE_G1S_BIT);
+
+	/* Write the secure ICC_SRE_EL1 register */
+	write_icc_sre_el1(ICC_SRE_SRE_BIT);
+	isb();
+}
+
+/*******************************************************************************
+ * This function disables the GIC CPU interface of the calling CPU using
+ * only system register accesses.
+ ******************************************************************************/
+void gicv3_cpuif_disable(unsigned int proc_num)
+{
+	uintptr_t gicr_base;
+
+	assert(gicv3_driver_data);
+	assert(proc_num < gicv3_driver_data->rdistif_num);
+	assert(gicv3_driver_data->rdistif_base_addrs);
+
+	assert(IS_IN_EL3());
+
+	/* Disable legacy interrupt bypass */
+	write_icc_sre_el3(read_icc_sre_el3() |
+			  (ICC_SRE_DIB_BIT | ICC_SRE_DFB_BIT));
+
+	/* Disable Group0 interrupts */
+	write_icc_igrpen0_el1(read_icc_igrpen0_el1() &
+			      ~IGRPEN1_EL1_ENABLE_G0_BIT);
+
+	/* Disable Group1 Secure and Non-Secure interrupts */
+	write_icc_igrpen1_el3(read_icc_igrpen1_el3() &
+			      ~(IGRPEN1_EL3_ENABLE_G1NS_BIT |
+			      IGRPEN1_EL3_ENABLE_G1S_BIT));
+
+	/* Synchronise accesses to group enable registers */
+	isb();
+
+	/* Mark the connected core as asleep */
+	gicr_base = gicv3_driver_data->rdistif_base_addrs[proc_num];
+	gicv3_rdistif_mark_core_asleep(gicr_base);
+}
+
+/*******************************************************************************
+ * This function returns the id of the highest priority pending interrupt at
+ * the GIC cpu interface.
+ ******************************************************************************/
+unsigned int gicv3_get_pending_interrupt_id(void)
+{
+	unsigned int id;
+
+	assert(IS_IN_EL3());
+	id = read_icc_hppir0_el1() & HPPIR0_EL1_INTID_MASK;
+
+	/*
+	 * If the ID is special identifier corresponding to G1S or G1NS
+	 * interrupt, then read the highest pending group 1 interrupt.
+	 */
+	if ((id == PENDING_G1S_INTID) || (id == PENDING_G1NS_INTID))
+		return read_icc_hppir1_el1() & HPPIR1_EL1_INTID_MASK;
+
+	return id;
+}
+
+/*******************************************************************************
+ * This function returns the type of the highest priority pending interrupt at
+ * the GIC cpu interface. The return values can be one of the following :
+ *   PENDING_G1S_INTID  : The interrupt type is secure Group 1.
+ *   PENDING_G1NS_INTID : The interrupt type is non secure Group 1.
+ *   0 - 1019           : The interrupt type is secure Group 0.
+ *   GIC_SPURIOUS_INTERRUPT : there is no pending interrupt with
+ *                            sufficient priority to be signaled
+ ******************************************************************************/
+unsigned int gicv3_get_pending_interrupt_type(void)
+{
+	assert(IS_IN_EL3());
+	return read_icc_hppir0_el1() & HPPIR0_EL1_INTID_MASK;
+}
+
+/*******************************************************************************
+ * This function returns the type of the interrupt id depending upon the group
+ * this interrupt has been configured under by the interrupt controller i.e.
+ * group0 or group1 Secure / Non Secure. The return value can be one of the
+ * following :
+ *    INTR_GROUP0  : The interrupt type is a Secure Group 0 interrupt
+ *    INTR_GROUP1S : The interrupt type is a Secure Group 1 secure interrupt
+ *    INTR_GROUP1NS: The interrupt type is a Secure Group 1 non secure
+ *                   interrupt.
+ ******************************************************************************/
+unsigned int gicv3_get_interrupt_type(unsigned int id,
+					  unsigned int proc_num)
+{
+	unsigned int igroup, grpmodr;
+	uintptr_t gicr_base;
+
+	assert(IS_IN_EL3());
+	assert(gicv3_driver_data);
+
+	/* Ensure the parameters are valid */
+	assert(id < PENDING_G1S_INTID || id >= MIN_LPI_ID);
+	assert(proc_num < gicv3_driver_data->rdistif_num);
+
+	/* All LPI interrupts are Group 1 non secure */
+	if (id >= MIN_LPI_ID)
+		return INTR_GROUP1NS;
+
+	if (id < MIN_SPI_ID) {
+		assert(gicv3_driver_data->rdistif_base_addrs);
+		gicr_base = gicv3_driver_data->rdistif_base_addrs[proc_num];
+		igroup = gicr_get_igroupr0(gicr_base, id);
+		grpmodr = gicr_get_igrpmodr0(gicr_base, id);
+	} else {
+		assert(gicv3_driver_data->gicd_base);
+		igroup = gicd_get_igroupr(gicv3_driver_data->gicd_base, id);
+		grpmodr = gicd_get_igrpmodr(gicv3_driver_data->gicd_base, id);
+	}
+
+	/*
+	 * If the IGROUP bit is set, then it is a Group 1 Non secure
+	 * interrupt
+	 */
+	if (igroup)
+		return INTR_GROUP1NS;
+
+	/* If the GRPMOD bit is set, then it is a Group 1 Secure interrupt */
+	if (grpmodr)
+		return INTR_GROUP1S;
+
+	/* Else it is a Group 0 Secure interrupt */
+	return INTR_GROUP0;
+}
+
+/*****************************************************************************
+ * Function to save and disable the GIC ITS register context. The power
+ * management of GIC ITS is implementation-defined and this function doesn't
+ * save any memory structures required to support ITS. As the sequence to save
+ * this state is implementation defined, it should be executed in platform
+ * specific code. Calling this function alone and then powering down the GIC and
+ * ITS without implementing the aforementioned platform specific code will
+ * corrupt the ITS state.
+ *
+ * This function must be invoked after the GIC CPU interface is disabled.
+ *****************************************************************************/
+void gicv3_its_save_disable(uintptr_t gits_base, gicv3_its_ctx_t * const its_ctx)
+{
+	int i;
+
+	assert(gicv3_driver_data);
+	assert(IS_IN_EL3());
+	assert(its_ctx);
+	assert(gits_base);
+
+	its_ctx->gits_ctlr = gits_read_ctlr(gits_base);
+
+	/* Disable the ITS */
+	gits_write_ctlr(gits_base, its_ctx->gits_ctlr &
+					(~GITS_CTLR_ENABLED_BIT));
+
+	/* Wait for quiescent state */
+	gits_wait_for_quiescent_bit(gits_base);
+
+	its_ctx->gits_cbaser = gits_read_cbaser(gits_base);
+	its_ctx->gits_cwriter = gits_read_cwriter(gits_base);
+
+	for (i = 0; i < ARRAY_SIZE(its_ctx->gits_baser); i++)
+		its_ctx->gits_baser[i] = gits_read_baser(gits_base, i);
+}
+
+/*****************************************************************************
+ * Function to restore the GIC ITS register context. The power
+ * management of GIC ITS is implementation defined and this function doesn't
+ * restore any memory structures required to support ITS. The assumption is
+ * that these structures are in memory and are retained during system suspend.
+ *
+ * This must be invoked before the GIC CPU interface is enabled.
+ *****************************************************************************/
+void gicv3_its_restore(uintptr_t gits_base, const gicv3_its_ctx_t * const its_ctx)
+{
+	int i;
+
+	assert(gicv3_driver_data);
+	assert(IS_IN_EL3());
+	assert(its_ctx);
+	assert(gits_base);
+
+	/* Assert that the GITS is disabled and quiescent */
+	assert((gits_read_ctlr(gits_base) & GITS_CTLR_ENABLED_BIT) == 0);
+	assert((gits_read_ctlr(gits_base) & GITS_CTLR_QUIESCENT_BIT) != 0);
+
+	gits_write_cbaser(gits_base, its_ctx->gits_cbaser);
+	gits_write_cwriter(gits_base, its_ctx->gits_cwriter);
+
+	for (i = 0; i < ARRAY_SIZE(its_ctx->gits_baser); i++)
+		gits_write_baser(gits_base, i, its_ctx->gits_baser[i]);
+
+	/* Restore the ITS CTLR but leave the ITS disabled */
+	gits_write_ctlr(gits_base, its_ctx->gits_ctlr &
+			(~GITS_CTLR_ENABLED_BIT));
+}
+
+/*****************************************************************************
+ * Function to save the GIC Redistributor register context. This function
+ * must be invoked after CPU interface disable and prior to Distributor save.
+ *****************************************************************************/
+void gicv3_rdistif_save(unsigned int proc_num, gicv3_redist_ctx_t * const rdist_ctx)
+{
+	uintptr_t gicr_base;
+	unsigned int int_id;
+
+	assert(gicv3_driver_data);
+	assert(proc_num < gicv3_driver_data->rdistif_num);
+	assert(gicv3_driver_data->rdistif_base_addrs);
+	assert(IS_IN_EL3());
+	assert(rdist_ctx);
+
+	gicr_base = gicv3_driver_data->rdistif_base_addrs[proc_num];
+
+	/*
+	 * Wait for any write to GICR_CTLR to complete before trying to save any
+	 * state.
+	 */
+	gicr_wait_for_pending_write(gicr_base);
+
+	rdist_ctx->gicr_ctlr = gicr_read_ctlr(gicr_base);
+
+	rdist_ctx->gicr_propbaser = gicr_read_propbaser(gicr_base);
+	rdist_ctx->gicr_pendbaser = gicr_read_pendbaser(gicr_base);
+
+	rdist_ctx->gicr_igroupr0 = gicr_read_igroupr0(gicr_base);
+	rdist_ctx->gicr_isenabler0 = gicr_read_isenabler0(gicr_base);
+	rdist_ctx->gicr_ispendr0 = gicr_read_ispendr0(gicr_base);
+	rdist_ctx->gicr_isactiver0 = gicr_read_isactiver0(gicr_base);
+	rdist_ctx->gicr_icfgr0 = gicr_read_icfgr0(gicr_base);
+	rdist_ctx->gicr_icfgr1 = gicr_read_icfgr1(gicr_base);
+	rdist_ctx->gicr_igrpmodr0 = gicr_read_igrpmodr0(gicr_base);
+	rdist_ctx->gicr_nsacr = gicr_read_nsacr(gicr_base);
+	for (int_id = MIN_SGI_ID; int_id < TOTAL_PCPU_INTR_NUM;
+			int_id += (1 << IPRIORITYR_SHIFT)) {
+		rdist_ctx->gicr_ipriorityr[(int_id - MIN_SGI_ID) >> IPRIORITYR_SHIFT] =
+				gicr_read_ipriorityr(gicr_base, int_id);
+	}
+
+
+	/*
+	 * Call the pre-save hook that implements the IMP DEF sequence that may
+	 * be required on some GIC implementations. As this may need to access
+	 * the Redistributor registers, we pass it proc_num.
+	 */
+	gicv3_distif_pre_save(proc_num);
+}
+
+/*****************************************************************************
+ * Function to restore the GIC Redistributor register context. We disable
+ * LPI and per-cpu interrupts before we start restore of the Redistributor.
+ * This function must be invoked after Distributor restore but prior to
+ * CPU interface enable. The pending and active interrupts are restored
+ * after the interrupts are fully configured and enabled.
+ *****************************************************************************/
+void gicv3_rdistif_init_restore(unsigned int proc_num,
+				const gicv3_redist_ctx_t * const rdist_ctx)
+{
+	uintptr_t gicr_base;
+	unsigned int int_id;
+
+	assert(gicv3_driver_data);
+	assert(proc_num < gicv3_driver_data->rdistif_num);
+	assert(gicv3_driver_data->rdistif_base_addrs);
+	assert(IS_IN_EL3());
+	assert(rdist_ctx);
+
+	gicr_base = gicv3_driver_data->rdistif_base_addrs[proc_num];
+
+	/* Power on redistributor */
+	gicv3_rdistif_on(proc_num);
+
+	/*
+	 * Call the post-restore hook that implements the IMP DEF sequence that
+	 * may be required on some GIC implementations. As this may need to
+	 * access the Redistributor registers, we pass it proc_num.
+	 */
+	gicv3_distif_post_restore(proc_num);
+
+	/*
+	 * Disable all SGIs (imp. def.)/PPIs before configuring them. This is a
+	 * more scalable approach as it avoids clearing the enable bits in the
+	 * GICD_CTLR
+	 */
+	gicr_write_icenabler0(gicr_base, ~0);
+	/* Wait for pending writes to GICR_ICENABLER */
+	gicr_wait_for_pending_write(gicr_base);
+
+	/*
+	 * Disable the LPIs to avoid unpredictable behavior when writing to
+	 * GICR_PROPBASER and GICR_PENDBASER.
+	 */
+	gicr_write_ctlr(gicr_base,
+			rdist_ctx->gicr_ctlr & ~(GICR_CTLR_EN_LPIS_BIT));
+
+	/* Restore registers' content */
+	gicr_write_propbaser(gicr_base, rdist_ctx->gicr_propbaser);
+	gicr_write_pendbaser(gicr_base, rdist_ctx->gicr_pendbaser);
+
+	gicr_write_igroupr0(gicr_base, rdist_ctx->gicr_igroupr0);
+
+	for (int_id = MIN_SGI_ID; int_id < TOTAL_PCPU_INTR_NUM;
+			int_id += (1 << IPRIORITYR_SHIFT)) {
+		gicr_write_ipriorityr(gicr_base, int_id,
+		rdist_ctx->gicr_ipriorityr[
+				(int_id - MIN_SGI_ID) >> IPRIORITYR_SHIFT]);
+	}
+
+	gicr_write_icfgr0(gicr_base, rdist_ctx->gicr_icfgr0);
+	gicr_write_icfgr1(gicr_base, rdist_ctx->gicr_icfgr1);
+	gicr_write_igrpmodr0(gicr_base, rdist_ctx->gicr_igrpmodr0);
+	gicr_write_nsacr(gicr_base, rdist_ctx->gicr_nsacr);
+
+	/* Restore after group and priorities are set */
+	gicr_write_ispendr0(gicr_base, rdist_ctx->gicr_ispendr0);
+	gicr_write_isactiver0(gicr_base, rdist_ctx->gicr_isactiver0);
+
+	/*
+	 * Wait for all writes to the Distributor to complete before enabling
+	 * the SGI and PPIs.
+	 */
+	gicr_wait_for_upstream_pending_write(gicr_base);
+	gicr_write_isenabler0(gicr_base, rdist_ctx->gicr_isenabler0);
+
+	/*
+	 * Restore GICR_CTLR.Enable_LPIs bit and wait for pending writes in case
+	 * the first write to GICR_CTLR was still in flight (this write only
+	 * restores GICR_CTLR.Enable_LPIs and no waiting is required for this
+	 * bit).
+	 */
+	gicr_write_ctlr(gicr_base, rdist_ctx->gicr_ctlr);
+	gicr_wait_for_pending_write(gicr_base);
+}
+
+/*****************************************************************************
+ * Function to save the GIC Distributor register context. This function
+ * must be invoked after CPU interface disable and Redistributor save.
+ *****************************************************************************/
+void gicv3_distif_save(gicv3_dist_ctx_t * const dist_ctx)
+{
+	unsigned int num_ints;
+
+	assert(gicv3_driver_data);
+	assert(gicv3_driver_data->gicd_base);
+	assert(IS_IN_EL3());
+	assert(dist_ctx);
+
+	uintptr_t gicd_base = gicv3_driver_data->gicd_base;
+
+	num_ints = gicd_read_typer(gicd_base);
+	num_ints &= TYPER_IT_LINES_NO_MASK;
+	num_ints = (num_ints + 1) << 5;
+
+	assert(num_ints <= MAX_SPI_ID + 1);
+
+	/* Wait for pending write to complete */
+	gicd_wait_for_pending_write(gicd_base);
+
+	/* Save the GICD_CTLR */
+	dist_ctx->gicd_ctlr = gicd_read_ctlr(gicd_base);
+
+	/* Save GICD_IGROUPR for INTIDs 32 - 1020 */
+	SAVE_GICD_REGS(gicd_base, dist_ctx, num_ints, igroupr, IGROUPR);
+
+	/* Save GICD_ISENABLER for INT_IDs 32 - 1020 */
+	SAVE_GICD_REGS(gicd_base, dist_ctx, num_ints, isenabler, ISENABLER);
+
+	/* Save GICD_ISPENDR for INTIDs 32 - 1020 */
+	SAVE_GICD_REGS(gicd_base, dist_ctx, num_ints, ispendr, ISPENDR);
+
+	/* Save GICD_ISACTIVER for INTIDs 32 - 1020 */
+	SAVE_GICD_REGS(gicd_base, dist_ctx, num_ints, isactiver, ISACTIVER);
+
+	/* Save GICD_IPRIORITYR for INTIDs 32 - 1020 */
+	SAVE_GICD_REGS(gicd_base, dist_ctx, num_ints, ipriorityr, IPRIORITYR);
+
+	/* Save GICD_ICFGR for INTIDs 32 - 1020 */
+	SAVE_GICD_REGS(gicd_base, dist_ctx, num_ints, icfgr, ICFGR);
+
+	/* Save GICD_IGRPMODR for INTIDs 32 - 1020 */
+	SAVE_GICD_REGS(gicd_base, dist_ctx, num_ints, igrpmodr, IGRPMODR);
+
+	/* Save GICD_NSACR for INTIDs 32 - 1020 */
+	SAVE_GICD_REGS(gicd_base, dist_ctx, num_ints, nsacr, NSACR);
+
+	/* Save GICD_IROUTER for INTIDs 32 - 1024 */
+	SAVE_GICD_REGS(gicd_base, dist_ctx, num_ints, irouter, IROUTER);
+
+	/*
+	 * GICD_ITARGETSR<n> and GICD_SPENDSGIR<n> are RAZ/WI when
+	 * GICD_CTLR.ARE_(S|NS) bits are set which is the case for our GICv3
+	 * driver.
+	 */
+}
+
+/*****************************************************************************
+ * Function to restore the GIC Distributor register context. We disable G0, G1S
+ * and G1NS interrupt groups before we start restore of the Distributor. This
+ * function must be invoked prior to Redistributor restore and CPU interface
+ * enable. The pending and active interrupts are restored after the interrupts
+ * are fully configured and enabled.
+ *****************************************************************************/
+void gicv3_distif_init_restore(const gicv3_dist_ctx_t * const dist_ctx)
+{
+	unsigned int num_ints = 0;
+
+	assert(gicv3_driver_data);
+	assert(gicv3_driver_data->gicd_base);
+	assert(IS_IN_EL3());
+	assert(dist_ctx);
+
+	uintptr_t gicd_base = gicv3_driver_data->gicd_base;
+
+	/*
+	 * Clear the "enable" bits for G0/G1S/G1NS interrupts before configuring
+	 * the ARE_S bit. The Distributor might generate a system error
+	 * otherwise.
+	 */
+	gicd_clr_ctlr(gicd_base,
+		      CTLR_ENABLE_G0_BIT |
+		      CTLR_ENABLE_G1S_BIT |
+		      CTLR_ENABLE_G1NS_BIT,
+		      RWP_TRUE);
+
+	/* Set the ARE_S and ARE_NS bit now that interrupts have been disabled */
+	gicd_set_ctlr(gicd_base, CTLR_ARE_S_BIT | CTLR_ARE_NS_BIT, RWP_TRUE);
+
+	num_ints = gicd_read_typer(gicd_base);
+	num_ints &= TYPER_IT_LINES_NO_MASK;
+	num_ints = (num_ints + 1) << 5;
+
+	assert(num_ints <= MAX_SPI_ID + 1);
+
+	/* Restore GICD_IGROUPR for INTIDs 32 - 1020 */
+	RESTORE_GICD_REGS(gicd_base, dist_ctx, num_ints, igroupr, IGROUPR);
+
+	/* Restore GICD_IPRIORITYR for INTIDs 32 - 1020 */
+	RESTORE_GICD_REGS(gicd_base, dist_ctx, num_ints, ipriorityr, IPRIORITYR);
+
+	/* Restore GICD_ICFGR for INTIDs 32 - 1020 */
+	RESTORE_GICD_REGS(gicd_base, dist_ctx, num_ints, icfgr, ICFGR);
+
+	/* Restore GICD_IGRPMODR for INTIDs 32 - 1020 */
+	RESTORE_GICD_REGS(gicd_base, dist_ctx, num_ints, igrpmodr, IGRPMODR);
+
+	/* Restore GICD_NSACR for INTIDs 32 - 1020 */
+	RESTORE_GICD_REGS(gicd_base, dist_ctx, num_ints, nsacr, NSACR);
+
+	/* Restore GICD_IROUTER for INTIDs 32 - 1020 */
+	RESTORE_GICD_REGS(gicd_base, dist_ctx, num_ints, irouter, IROUTER);
+
+	/*
+	 * Restore ISENABLER, ISPENDR and ISACTIVER after the interrupts are
+	 * configured.
+	 */
+
+	/* Restore GICD_ISENABLER for INT_IDs 32 - 1020 */
+	RESTORE_GICD_REGS(gicd_base, dist_ctx, num_ints, isenabler, ISENABLER);
+
+	/* Restore GICD_ISPENDR for INTIDs 32 - 1020 */
+	RESTORE_GICD_REGS(gicd_base, dist_ctx, num_ints, ispendr, ISPENDR);
+
+	/* Restore GICD_ISACTIVER for INTIDs 32 - 1020 */
+	RESTORE_GICD_REGS(gicd_base, dist_ctx, num_ints, isactiver, ISACTIVER);
+
+	/* Restore the GICD_CTLR */
+	gicd_write_ctlr(gicd_base, dist_ctx->gicd_ctlr);
+	gicd_wait_for_pending_write(gicd_base);
+
+}
+
+/*******************************************************************************
+ * This function gets the priority of the interrupt the processor is currently
+ * servicing.
+ ******************************************************************************/
+unsigned int gicv3_get_running_priority(void)
+{
+	return read_icc_rpr_el1();
+}
+
+/*******************************************************************************
+ * This function checks if the interrupt identified by id is active (whether the
+ * state is either active, or active and pending). The proc_num is used if the
+ * interrupt is SGI or PPI and programs the corresponding Redistributor
+ * interface.
+ ******************************************************************************/
+unsigned int gicv3_get_interrupt_active(unsigned int id, unsigned int proc_num)
+{
+	unsigned int value;
+
+	assert(gicv3_driver_data);
+	assert(gicv3_driver_data->gicd_base);
+	assert(proc_num < gicv3_driver_data->rdistif_num);
+	assert(gicv3_driver_data->rdistif_base_addrs);
+	assert(id <= MAX_SPI_ID);
+
+	if (id < MIN_SPI_ID) {
+		/* For SGIs and PPIs */
+		value = gicr_get_isactiver0(
+				gicv3_driver_data->rdistif_base_addrs[proc_num], id);
+	} else {
+		value = gicd_get_isactiver(gicv3_driver_data->gicd_base, id);
+	}
+
+	return value;
+}
+
+/*******************************************************************************
+ * This function enables the interrupt identified by id. The proc_num
+ * is used if the interrupt is SGI or PPI, and programs the corresponding
+ * Redistributor interface.
+ ******************************************************************************/
+void gicv3_enable_interrupt(unsigned int id, unsigned int proc_num)
+{
+	assert(gicv3_driver_data);
+	assert(gicv3_driver_data->gicd_base);
+	assert(proc_num < gicv3_driver_data->rdistif_num);
+	assert(gicv3_driver_data->rdistif_base_addrs);
+	assert(id <= MAX_SPI_ID);
+
+	/*
+	 * Ensure that any shared variable updates depending on out of band
+	 * interrupt trigger are observed before enabling interrupt.
+	 */
+	dsbishst();
+	if (id < MIN_SPI_ID) {
+		/* For SGIs and PPIs */
+		gicr_set_isenabler0(
+				gicv3_driver_data->rdistif_base_addrs[proc_num],
+				id);
+	} else {
+		gicd_set_isenabler(gicv3_driver_data->gicd_base, id);
+	}
+}
+
+/*******************************************************************************
+ * This function disables the interrupt identified by id. The proc_num
+ * is used if the interrupt is SGI or PPI, and programs the corresponding
+ * Redistributor interface.
+ ******************************************************************************/
+void gicv3_disable_interrupt(unsigned int id, unsigned int proc_num)
+{
+	assert(gicv3_driver_data);
+	assert(gicv3_driver_data->gicd_base);
+	assert(proc_num < gicv3_driver_data->rdistif_num);
+	assert(gicv3_driver_data->rdistif_base_addrs);
+	assert(id <= MAX_SPI_ID);
+
+	/*
+	 * Disable interrupt, and ensure that any shared variable updates
+	 * depending on out of band interrupt trigger are observed afterwards.
+	 */
+	if (id < MIN_SPI_ID) {
+		/* For SGIs and PPIs */
+		gicr_set_icenabler0(
+				gicv3_driver_data->rdistif_base_addrs[proc_num],
+				id);
+
+		/* Write to clear enable requires waiting for pending writes */
+		gicr_wait_for_pending_write(
+				gicv3_driver_data->rdistif_base_addrs[proc_num]);
+	} else {
+		gicd_set_icenabler(gicv3_driver_data->gicd_base, id);
+
+		/* Write to clear enable requires waiting for pending writes */
+		gicd_wait_for_pending_write(gicv3_driver_data->gicd_base);
+	}
+
+	dsbishst();
+}
+
+/*******************************************************************************
+ * This function sets the interrupt priority as supplied for the given interrupt
+ * id.
+ ******************************************************************************/
+void gicv3_set_interrupt_priority(unsigned int id, unsigned int proc_num,
+		unsigned int priority)
+{
+	uintptr_t gicr_base;
+
+	assert(gicv3_driver_data);
+	assert(gicv3_driver_data->gicd_base);
+	assert(proc_num < gicv3_driver_data->rdistif_num);
+	assert(gicv3_driver_data->rdistif_base_addrs);
+	assert(id <= MAX_SPI_ID);
+
+	if (id < MIN_SPI_ID) {
+		gicr_base = gicv3_driver_data->rdistif_base_addrs[proc_num];
+		gicr_set_ipriorityr(gicr_base, id, priority);
+	} else {
+		gicd_set_ipriorityr(gicv3_driver_data->gicd_base, id, priority);
+	}
+}
+
+/*******************************************************************************
+ * This function assigns group for the interrupt identified by id. The proc_num
+ * is used if the interrupt is SGI or PPI, and programs the corresponding
+ * Redistributor interface. The group can be any of GICV3_INTR_GROUP*
+ ******************************************************************************/
+void gicv3_set_interrupt_type(unsigned int id, unsigned int proc_num,
+		unsigned int type)
+{
+	unsigned int igroup = 0, grpmod = 0;
+	uintptr_t gicr_base;
+
+	assert(gicv3_driver_data);
+	assert(gicv3_driver_data->gicd_base);
+	assert(proc_num < gicv3_driver_data->rdistif_num);
+	assert(gicv3_driver_data->rdistif_base_addrs);
+
+	switch (type) {
+	case INTR_GROUP1S:
+		igroup = 0;
+		grpmod = 1;
+		break;
+	case INTR_GROUP0:
+		igroup = 0;
+		grpmod = 0;
+		break;
+	case INTR_GROUP1NS:
+		igroup = 1;
+		grpmod = 0;
+		break;
+	default:
+		assert(0);
+	}
+
+	if (id < MIN_SPI_ID) {
+		gicr_base = gicv3_driver_data->rdistif_base_addrs[proc_num];
+		if (igroup)
+			gicr_set_igroupr0(gicr_base, id);
+		else
+			gicr_clr_igroupr0(gicr_base, id);
+
+		if (grpmod)
+			gicr_set_igrpmodr0(gicr_base, id);
+		else
+			gicr_clr_igrpmodr0(gicr_base, id);
+	} else {
+		/* Serialize read-modify-write to Distributor registers */
+		spin_lock(&gic_lock);
+		if (igroup)
+			gicd_set_igroupr(gicv3_driver_data->gicd_base, id);
+		else
+			gicd_clr_igroupr(gicv3_driver_data->gicd_base, id);
+
+		if (grpmod)
+			gicd_set_igrpmodr(gicv3_driver_data->gicd_base, id);
+		else
+			gicd_clr_igrpmodr(gicv3_driver_data->gicd_base, id);
+		spin_unlock(&gic_lock);
+	}
+}
+
+/*******************************************************************************
+ * This function raises the specified Secure Group 0 SGI.
+ *
+ * The target parameter must be a valid MPIDR in the system.
+ ******************************************************************************/
+void gicv3_raise_secure_g0_sgi(int sgi_num, u_register_t target)
+{
+	unsigned int tgt, aff3, aff2, aff1, aff0;
+	uint64_t sgi_val;
+
+	/* Verify interrupt number is in the SGI range */
+	assert((sgi_num >= MIN_SGI_ID) && (sgi_num < MIN_PPI_ID));
+
+	/* Extract affinity fields from target */
+	aff0 = MPIDR_AFFLVL0_VAL(target);
+	aff1 = MPIDR_AFFLVL1_VAL(target);
+	aff2 = MPIDR_AFFLVL2_VAL(target);
+	aff3 = MPIDR_AFFLVL3_VAL(target);
+
+	/*
+	 * Make target list from affinity 0, and ensure GICv3 SGI can target
+	 * this PE.
+	 */
+	assert(aff0 < GICV3_MAX_SGI_TARGETS);
+	tgt = BIT(aff0);
+
+	/* Raise SGI to PE specified by its affinity */
+	sgi_val = GICV3_SGIR_VALUE(aff3, aff2, aff1, sgi_num, SGIR_IRM_TO_AFF,
+			tgt);
+
+	/*
+	 * Ensure that any shared variable updates depending on out of band
+	 * interrupt trigger are observed before raising SGI.
+	 */
+	dsbishst();
+	write_icc_sgi0r_el1(sgi_val);
+	isb();
+}
+
+/*******************************************************************************
+ * This function sets the interrupt routing for the given SPI interrupt id.
+ * The interrupt routing is specified in routing mode and mpidr.
+ *
+ * The routing mode can be either of:
+ *  - GICV3_IRM_ANY
+ *  - GICV3_IRM_PE
+ *
+ * The mpidr is the affinity of the PE to which the interrupt will be routed,
+ * and is ignored for routing mode GICV3_IRM_ANY.
+ ******************************************************************************/
+void gicv3_set_spi_routing(unsigned int id, unsigned int irm, u_register_t mpidr)
+{
+	unsigned long long aff;
+	uint64_t router;
+
+	assert(gicv3_driver_data);
+	assert(gicv3_driver_data->gicd_base);
+
+	assert((irm == GICV3_IRM_ANY) || (irm == GICV3_IRM_PE));
+	assert(id >= MIN_SPI_ID && id <= MAX_SPI_ID);
+
+	aff = gicd_irouter_val_from_mpidr(mpidr, irm);
+	gicd_write_irouter(gicv3_driver_data->gicd_base, id, aff);
+
+	/*
+	 * In implementations that do not require 1 of N distribution of SPIs,
+	 * IRM might be RAZ/WI. Read back and verify IRM bit.
+	 */
+	if (irm == GICV3_IRM_ANY) {
+		router = gicd_read_irouter(gicv3_driver_data->gicd_base, id);
+		if (!((router >> IROUTER_IRM_SHIFT) & IROUTER_IRM_MASK)) {
+			ERROR("GICv3 implementation doesn't support routing ANY\n");
+			panic();
+		}
+	}
+}
+
+/*******************************************************************************
+ * This function clears the pending status of an interrupt identified by id.
+ * The proc_num is used if the interrupt is SGI or PPI, and programs the
+ * corresponding Redistributor interface.
+ ******************************************************************************/
+void gicv3_clear_interrupt_pending(unsigned int id, unsigned int proc_num)
+{
+	assert(gicv3_driver_data);
+	assert(gicv3_driver_data->gicd_base);
+	assert(proc_num < gicv3_driver_data->rdistif_num);
+	assert(gicv3_driver_data->rdistif_base_addrs);
+
+	/*
+	 * Clear pending interrupt, and ensure that any shared variable updates
+	 * depending on out of band interrupt trigger are observed afterwards.
+	 */
+	if (id < MIN_SPI_ID) {
+		/* For SGIs and PPIs */
+		gicr_set_icpendr0(gicv3_driver_data->rdistif_base_addrs[proc_num],
+				id);
+	} else {
+		gicd_set_icpendr(gicv3_driver_data->gicd_base, id);
+	}
+	dsbishst();
+}
+
+/*******************************************************************************
+ * This function sets the pending status of an interrupt identified by id.
+ * The proc_num is used if the interrupt is SGI or PPI and programs the
+ * corresponding Redistributor interface.
+ ******************************************************************************/
+void gicv3_set_interrupt_pending(unsigned int id, unsigned int proc_num)
+{
+	assert(gicv3_driver_data);
+	assert(gicv3_driver_data->gicd_base);
+	assert(proc_num < gicv3_driver_data->rdistif_num);
+	assert(gicv3_driver_data->rdistif_base_addrs);
+
+	/*
+	 * Ensure that any shared variable updates depending on out of band
+	 * interrupt trigger are observed before setting interrupt pending.
+	 */
+	dsbishst();
+	if (id < MIN_SPI_ID) {
+		/* For SGIs and PPIs */
+		gicr_set_ispendr0(gicv3_driver_data->rdistif_base_addrs[proc_num],
+				id);
+	} else {
+		gicd_set_ispendr(gicv3_driver_data->gicd_base, id);
+	}
+}
+
+/*******************************************************************************
+ * This function sets the PMR register with the supplied value. Returns the
+ * original PMR.
+ ******************************************************************************/
+unsigned int gicv3_set_pmr(unsigned int mask)
+{
+	unsigned int old_mask;
+
+	old_mask = read_icc_pmr_el1();
+
+	/*
+	 * Order memory updates w.r.t. PMR write, and ensure they're visible
+	 * before potential out of band interrupt trigger because of PMR update.
+	 * PMR system register writes are self-synchronizing, so no ISB required
+	 * thereafter.
+	 */
+	dsbishst();
+	write_icc_pmr_el1(mask);
+
+	return old_mask;
+}
diff --git a/drivers/arm/gic/v3/gicv3_private.h b/drivers/arm/gic/v3/gicv3_private.h
new file mode 100644
index 0000000..a5093d0
--- /dev/null
+++ b/drivers/arm/gic/v3/gicv3_private.h
@@ -0,0 +1,390 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __GICV3_PRIVATE_H__
+#define __GICV3_PRIVATE_H__
+
+#include <assert.h>
+#include <gic_common.h>
+#include <gicv3.h>
+#include <mmio.h>
+#include <stdint.h>
+#include "../common/gic_common_private.h"
+
+/*******************************************************************************
+ * GICv3 private macro definitions
+ ******************************************************************************/
+
+/* Constants to indicate the status of the RWP bit */
+#define RWP_TRUE		1
+#define RWP_FALSE		0
+
+/*
+ * Macro to convert an mpidr to a value suitable for programming into a
+ * GICD_IROUTER. Bits[31:24] in the MPIDR are cleared as they are not relevant
+ * to GICv3.
+ */
+#define gicd_irouter_val_from_mpidr(mpidr, irm)		\
+	((mpidr & ~(0xff << 24)) |			\
+	 (irm & IROUTER_IRM_MASK) << IROUTER_IRM_SHIFT)
+
+/*
+ * Macro to convert a GICR_TYPER affinity value into a MPIDR value. Bits[31:24]
+ * are zeroes.
+ */
+#ifdef AARCH32
+#define mpidr_from_gicr_typer(typer_val)	(((typer_val) >> 32) & 0xffffff)
+#else
+#define mpidr_from_gicr_typer(typer_val)				 \
+	(((((typer_val) >> 56) & MPIDR_AFFLVL_MASK) << MPIDR_AFF3_SHIFT) | \
+	 (((typer_val) >> 32) & 0xffffff))
+#endif
+
+/*******************************************************************************
+ * GICv3 private global variables declarations
+ ******************************************************************************/
+extern const gicv3_driver_data_t *gicv3_driver_data;
+
+/*******************************************************************************
+ * Private GICv3 function prototypes for accessing entire registers.
+ * Note: The raw register values correspond to multiple interrupt IDs and
+ * the number of interrupt IDs involved depends on the register accessed.
+ ******************************************************************************/
+unsigned int gicd_read_igrpmodr(uintptr_t base, unsigned int id);
+unsigned int gicr_read_ipriorityr(uintptr_t base, unsigned int id);
+void gicd_write_igrpmodr(uintptr_t base, unsigned int id, unsigned int val);
+void gicr_write_ipriorityr(uintptr_t base, unsigned int id, unsigned int val);
+
+/*******************************************************************************
+ * Private GICv3 function prototypes for accessing the GIC registers
+ * corresponding to a single interrupt ID. These functions use bitwise
+ * operations or appropriate register accesses to modify or return
+ * the bit-field corresponding the single interrupt ID.
+ ******************************************************************************/
+unsigned int gicd_get_igrpmodr(uintptr_t base, unsigned int id);
+unsigned int gicr_get_igrpmodr0(uintptr_t base, unsigned int id);
+unsigned int gicr_get_igroupr0(uintptr_t base, unsigned int id);
+unsigned int gicr_get_isactiver0(uintptr_t base, unsigned int id);
+void gicd_set_igrpmodr(uintptr_t base, unsigned int id);
+void gicr_set_igrpmodr0(uintptr_t base, unsigned int id);
+void gicr_set_isenabler0(uintptr_t base, unsigned int id);
+void gicr_set_icenabler0(uintptr_t base, unsigned int id);
+void gicr_set_ispendr0(uintptr_t base, unsigned int id);
+void gicr_set_icpendr0(uintptr_t base, unsigned int id);
+void gicr_set_igroupr0(uintptr_t base, unsigned int id);
+void gicd_clr_igrpmodr(uintptr_t base, unsigned int id);
+void gicr_clr_igrpmodr0(uintptr_t base, unsigned int id);
+void gicr_clr_igroupr0(uintptr_t base, unsigned int id);
+void gicr_set_ipriorityr(uintptr_t base, unsigned int id, unsigned int pri);
+
+/*******************************************************************************
+ * Private GICv3 helper function prototypes
+ ******************************************************************************/
+void gicv3_spis_configure_defaults(uintptr_t gicd_base);
+void gicv3_ppi_sgi_configure_defaults(uintptr_t gicr_base);
+#if !ERROR_DEPRECATED
+void gicv3_secure_spis_configure(uintptr_t gicd_base,
+				     unsigned int num_ints,
+				     const unsigned int *sec_intr_list,
+				     unsigned int int_grp);
+void gicv3_secure_ppi_sgi_configure(uintptr_t gicr_base,
+					unsigned int num_ints,
+					const unsigned int *sec_intr_list,
+					unsigned int int_grp);
+#endif
+void gicv3_secure_ppi_sgi_configure_props(uintptr_t gicr_base,
+		const interrupt_prop_t *interrupt_props,
+		unsigned int interrupt_props_num);
+unsigned int gicv3_secure_spis_configure_props(uintptr_t gicd_base,
+		const interrupt_prop_t *interrupt_props,
+		unsigned int interrupt_props_num);
+void gicv3_rdistif_base_addrs_probe(uintptr_t *rdistif_base_addrs,
+					unsigned int rdistif_num,
+					uintptr_t gicr_base,
+					mpidr_hash_fn mpidr_to_core_pos);
+void gicv3_rdistif_mark_core_awake(uintptr_t gicr_base);
+void gicv3_rdistif_mark_core_asleep(uintptr_t gicr_base);
+
+/*******************************************************************************
+ * GIC Distributor interface accessors
+ ******************************************************************************/
+/*
+ * Wait for updates to :
+ * GICD_CTLR[2:0] - the Group Enables
+ * GICD_CTLR[5:4] - the ARE bits
+ * GICD_ICENABLERn - the clearing of enable state for SPIs
+ */
+static inline void gicd_wait_for_pending_write(uintptr_t gicd_base)
+{
+	while (gicd_read_ctlr(gicd_base) & GICD_CTLR_RWP_BIT)
+		;
+}
+
+static inline unsigned int gicd_read_pidr2(uintptr_t base)
+{
+	return mmio_read_32(base + GICD_PIDR2_GICV3);
+}
+
+static inline unsigned long long gicd_read_irouter(uintptr_t base, unsigned int id)
+{
+	assert(id >= MIN_SPI_ID);
+	return mmio_read_64(base + GICD_IROUTER + (id << 3));
+}
+
+static inline void gicd_write_irouter(uintptr_t base,
+				      unsigned int id,
+				      unsigned long long affinity)
+{
+	assert(id >= MIN_SPI_ID);
+	mmio_write_64(base + GICD_IROUTER + (id << 3), affinity);
+}
+
+static inline void gicd_clr_ctlr(uintptr_t base,
+				 unsigned int bitmap,
+				 unsigned int rwp)
+{
+	gicd_write_ctlr(base, gicd_read_ctlr(base) & ~bitmap);
+	if (rwp)
+		gicd_wait_for_pending_write(base);
+}
+
+static inline void gicd_set_ctlr(uintptr_t base,
+				 unsigned int bitmap,
+				 unsigned int rwp)
+{
+	gicd_write_ctlr(base, gicd_read_ctlr(base) | bitmap);
+	if (rwp)
+		gicd_wait_for_pending_write(base);
+}
+
+/*******************************************************************************
+ * GIC Redistributor interface accessors
+ ******************************************************************************/
+static inline unsigned long long gicr_read_ctlr(uintptr_t base)
+{
+	return mmio_read_64(base + GICR_CTLR);
+}
+
+static inline void gicr_write_ctlr(uintptr_t base, uint64_t val)
+{
+	mmio_write_64(base + GICR_CTLR, val);
+}
+
+static inline unsigned long long gicr_read_typer(uintptr_t base)
+{
+	return mmio_read_64(base + GICR_TYPER);
+}
+
+static inline unsigned int gicr_read_waker(uintptr_t base)
+{
+	return mmio_read_32(base + GICR_WAKER);
+}
+
+static inline void gicr_write_waker(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICR_WAKER, val);
+}
+
+/*
+ * Wait for updates to :
+ * GICR_ICENABLER0
+ * GICR_CTLR.DPG1S
+ * GICR_CTLR.DPG1NS
+ * GICR_CTLR.DPG0
+ */
+static inline void gicr_wait_for_pending_write(uintptr_t gicr_base)
+{
+	while (gicr_read_ctlr(gicr_base) & GICR_CTLR_RWP_BIT)
+		;
+}
+
+static inline void gicr_wait_for_upstream_pending_write(uintptr_t gicr_base)
+{
+	while (gicr_read_ctlr(gicr_base) & GICR_CTLR_UWP_BIT)
+		;
+}
+
+/* Private implementation of Distributor power control hooks */
+void arm_gicv3_distif_pre_save(unsigned int rdist_proc_num);
+void arm_gicv3_distif_post_restore(unsigned int rdist_proc_num);
+
+/*******************************************************************************
+ * GIC Re-distributor functions for accessing entire registers.
+ * Note: The raw register values correspond to multiple interrupt IDs and
+ * the number of interrupt IDs involved depends on the register accessed.
+ ******************************************************************************/
+static inline unsigned int gicr_read_icenabler0(uintptr_t base)
+{
+	return mmio_read_32(base + GICR_ICENABLER0);
+}
+
+static inline void gicr_write_icenabler0(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICR_ICENABLER0, val);
+}
+
+static inline unsigned int gicr_read_isenabler0(uintptr_t base)
+{
+	return mmio_read_32(base + GICR_ISENABLER0);
+}
+
+static inline void gicr_write_icpendr0(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICR_ICPENDR0, val);
+}
+
+static inline void gicr_write_isenabler0(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICR_ISENABLER0, val);
+}
+
+static inline unsigned int gicr_read_igroupr0(uintptr_t base)
+{
+	return mmio_read_32(base + GICR_IGROUPR0);
+}
+
+static inline unsigned int gicr_read_ispendr0(uintptr_t base)
+{
+	return mmio_read_32(base + GICR_ISPENDR0);
+}
+
+static inline void gicr_write_ispendr0(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICR_ISPENDR0, val);
+}
+
+static inline void gicr_write_igroupr0(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICR_IGROUPR0, val);
+}
+
+static inline unsigned int gicr_read_igrpmodr0(uintptr_t base)
+{
+	return mmio_read_32(base + GICR_IGRPMODR0);
+}
+
+static inline void gicr_write_igrpmodr0(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICR_IGRPMODR0, val);
+}
+
+static inline unsigned int gicr_read_nsacr(uintptr_t base)
+{
+	return mmio_read_32(base + GICR_NSACR);
+}
+
+static inline void gicr_write_nsacr(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICR_NSACR, val);
+}
+
+static inline unsigned int gicr_read_isactiver0(uintptr_t base)
+{
+	return mmio_read_32(base + GICR_ISACTIVER0);
+}
+
+static inline void gicr_write_isactiver0(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICR_ISACTIVER0, val);
+}
+
+static inline unsigned int gicr_read_icfgr0(uintptr_t base)
+{
+	return mmio_read_32(base + GICR_ICFGR0);
+}
+
+static inline unsigned int gicr_read_icfgr1(uintptr_t base)
+{
+	return mmio_read_32(base + GICR_ICFGR1);
+}
+
+static inline void gicr_write_icfgr0(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICR_ICFGR0, val);
+}
+
+static inline void gicr_write_icfgr1(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICR_ICFGR1, val);
+}
+
+static inline unsigned int gicr_read_propbaser(uintptr_t base)
+{
+	return mmio_read_32(base + GICR_PROPBASER);
+}
+
+static inline void gicr_write_propbaser(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICR_PROPBASER, val);
+}
+
+static inline unsigned int gicr_read_pendbaser(uintptr_t base)
+{
+	return mmio_read_32(base + GICR_PENDBASER);
+}
+
+static inline void gicr_write_pendbaser(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GICR_PENDBASER, val);
+}
+
+/*******************************************************************************
+ * GIC ITS functions to read and write entire ITS registers.
+ ******************************************************************************/
+static inline uint32_t gits_read_ctlr(uintptr_t base)
+{
+	return mmio_read_32(base + GITS_CTLR);
+}
+
+static inline void gits_write_ctlr(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GITS_CTLR, val);
+}
+
+static inline uint64_t gits_read_cbaser(uintptr_t base)
+{
+	return mmio_read_64(base + GITS_CBASER);
+}
+
+static inline void gits_write_cbaser(uintptr_t base, uint64_t val)
+{
+	mmio_write_32(base + GITS_CBASER, val);
+}
+
+static inline uint64_t gits_read_cwriter(uintptr_t base)
+{
+	return mmio_read_64(base + GITS_CWRITER);
+}
+
+static inline void gits_write_cwriter(uintptr_t base, uint64_t val)
+{
+	mmio_write_32(base + GITS_CWRITER, val);
+}
+
+static inline uint64_t gits_read_baser(uintptr_t base, unsigned int its_table_id)
+{
+	assert(its_table_id < 8);
+	return mmio_read_64(base + GITS_BASER + (8 * its_table_id));
+}
+
+static inline void gits_write_baser(uintptr_t base, unsigned int its_table_id, uint64_t val)
+{
+	assert(its_table_id < 8);
+	mmio_write_64(base + GITS_BASER + (8 * its_table_id), val);
+}
+
+/*
+ * Wait for Quiescent bit when GIC ITS is disabled
+ */
+static inline void gits_wait_for_quiescent_bit(uintptr_t gits_base)
+{
+	assert(!(gits_read_ctlr(gits_base) & GITS_CTLR_ENABLED_BIT));
+	while ((gits_read_ctlr(gits_base) & GITS_CTLR_QUIESCENT_BIT) == 0)
+		;
+}
+
+
+#endif /* __GICV3_PRIVATE_H__ */
diff --git a/drivers/arm/gpio/gpio.c b/drivers/arm/gpio/gpio.c
deleted file mode 100644
index a38db94..0000000
--- a/drivers/arm/gpio/gpio.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * GPIO driver for PL061
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <console.h>
-#include <debug.h>
-#include <errno.h>
-#include <gpio.h>
-#include <mmio.h>
-
-#define MAX_GPIO_DEVICES	32
-#define GPIOS_PER_DEV		8
-#define GPIO_DIR		0x400
-
-#define BIT(nr)			(1UL << (nr))
-
-struct gpio_device_t {
-	unsigned int	base[MAX_GPIO_DEVICES];
-	unsigned int	count;
-};
-
-static struct gpio_device_t gpio_dev;
-
-/* return 0 for failure */
-static unsigned int find_gc_base(unsigned int gpio)
-{
-	int gc;
-
-	gc = gpio / GPIOS_PER_DEV;
-	if (gc >= gpio_dev.count)
-		return 0;
-	return gpio_dev.base[gc];
-}
-
-int gpio_direction_input(unsigned int gpio)
-{
-	unsigned int gc_base, offset, data;
-
-	gc_base = find_gc_base(gpio);
-	if (!gc_base)
-		return -EINVAL;
-	offset = gpio % GPIOS_PER_DEV;
-
-	data = mmio_read_8(gc_base + GPIO_DIR);
-	data &= ~(1 << offset);
-	mmio_write_8(gc_base + GPIO_DIR, data);
-	return 0;
-}
-
-int gpio_direction_output(unsigned int gpio)
-{
-	unsigned int gc_base, offset, data;
-
-	gc_base = find_gc_base(gpio);
-	if (!gc_base)
-		return -EINVAL;
-	offset = gpio % 8;
-
-	data = mmio_read_8(gc_base + GPIO_DIR);
-	data |= 1 << offset;
-	mmio_write_8(gc_base + GPIO_DIR, data);
-	return 0;
-}
-
-int gpio_get_value(unsigned int gpio)
-{
-	unsigned int gc_base, offset;
-
-	gc_base = find_gc_base(gpio);
-	if (!gc_base)
-		return -EINVAL;
-	offset = gpio % 8;
-
-	return !!mmio_read_8(gc_base + (BIT(offset + 2)));
-}
-
-int gpio_set_value(unsigned int gpio, unsigned int value)
-{
-	unsigned int gc_base, offset;
-
-	gc_base = find_gc_base(gpio);
-	if (!gc_base)
-		return -EINVAL;
-	offset = gpio % 8;
-	mmio_write_8(gc_base + (BIT(offset + 2)), !!value << offset); 
-	return 0;
-}
-
-int gpio_register_device(unsigned int base)
-{
-	int i;
-	if (gpio_dev.count > MAX_GPIO_DEVICES)
-		return -EINVAL;
-	for (i = 0; i < gpio_dev.count; i++) {
-		if (gpio_dev.base[i] == base) {
-			WARN("%s: duplicated gpio base\n", __func__);
-			return -EINVAL;
-		}
-	}
-	gpio_dev.base[gpio_dev.count] = base;
-	gpio_dev.count++;
-	return 0;
-}
diff --git a/drivers/arm/pl011/aarch32/pl011_console.S b/drivers/arm/pl011/aarch32/pl011_console.S
new file mode 100644
index 0000000..3718fff
--- /dev/null
+++ b/drivers/arm/pl011/aarch32/pl011_console.S
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <pl011.h>
+
+/*
+ * Pull in generic functions to provide backwards compatibility for
+ * platform makefiles
+ */
+#include "../../../console/aarch32/console.S"
+
+	.globl	console_core_init
+	.globl	console_core_putc
+	.globl	console_core_getc
+	.globl	console_core_flush
+
+
+	/* -----------------------------------------------
+	 * int console_core_init(uintptr_t base_addr,
+	 * unsigned int uart_clk, unsigned int baud_rate)
+	 * Function to initialize the console without a
+	 * C Runtime to print debug information. This
+	 * function will be accessed by console_init and
+	 * crash reporting.
+	 * In: r0 - console base address
+	 *     r1 - Uart clock in Hz
+	 *     r2 - Baud rate
+	 * Out: return 1 on success else 0 on error
+	 * Clobber list : r1, r2, r3
+	 * -----------------------------------------------
+	 */
+func console_core_init
+	/* Check the input base address */
+	cmp	r0, #0
+	beq	core_init_fail
+#if !PL011_GENERIC_UART
+	/* Check baud rate and uart clock for sanity */
+	cmp	r1, #0
+	beq	core_init_fail
+	cmp	r2, #0
+	beq	core_init_fail
+	/* Disable the UART before initialization */
+	ldr	r3, [r0, #UARTCR]
+	bic	r3, r3, #PL011_UARTCR_UARTEN
+	str	r3, [r0, #UARTCR]
+	/* Program the baudrate */
+	/* Divisor =  (Uart clock * 4) / baudrate */
+	lsl	r1, r1, #2
+	udiv	r2, r1, r2
+	/* IBRD = Divisor >> 6 */
+	lsr	r1, r2, #6
+	/* Write the IBRD */
+	str	r1, [r0, #UARTIBRD]
+	/* FBRD = Divisor & 0x3F */
+	and	r1, r2, #0x3f
+	/* Write the FBRD */
+	str	r1, [r0, #UARTFBRD]
+	mov	r1, #PL011_LINE_CONTROL
+	str	r1, [r0, #UARTLCR_H]
+	/* Clear any pending errors */
+	mov	r1, #0
+	str	r1, [r0, #UARTECR]
+	/* Enable tx, rx, and uart overall */
+	ldr	r1, =(PL011_UARTCR_RXE | PL011_UARTCR_TXE | PL011_UARTCR_UARTEN)
+	str	r1, [r0, #UARTCR]
+#endif
+	mov	r0, #1
+	bx	lr
+core_init_fail:
+	mov	r0, #0
+	bx	lr
+endfunc console_core_init
+
+	/* --------------------------------------------------------
+	 * int console_core_putc(int c, uintptr_t base_addr)
+	 * Function to output a character over the console. It
+	 * returns the character printed on success or -1 on error.
+	 * In : r0 - character to be printed
+	 *      r1 - console base address
+	 * Out : return -1 on error else return character.
+	 * Clobber list : r2
+	 * --------------------------------------------------------
+	 */
+func console_core_putc
+	/* Check the input parameter */
+	cmp	r1, #0
+	beq	putc_error
+	/* Prepend '\r' to '\n' */
+	cmp	r0, #0xA
+	bne	2f
+1:
+	/* Check if the transmit FIFO is full */
+	ldr	r2, [r1, #UARTFR]
+	tst	r2, #PL011_UARTFR_TXFF
+	bne	1b
+	mov	r2, #0xD
+	str	r2, [r1, #UARTDR]
+2:
+	/* Check if the transmit FIFO is full */
+	ldr	r2, [r1, #UARTFR]
+	tst	r2, #PL011_UARTFR_TXFF
+	bne	2b
+	str	r0, [r1, #UARTDR]
+	bx	lr
+putc_error:
+	mov	r0, #-1
+	bx	lr
+endfunc console_core_putc
+
+	/* ---------------------------------------------
+	 * int console_core_getc(uintptr_t base_addr)
+	 * Function to get a character from the console.
+	 * It returns the character grabbed on success
+	 * or -1 on error.
+	 * In : r0 - console base address
+	 * Clobber list : r0, r1
+	 * ---------------------------------------------
+	 */
+func console_core_getc
+	cmp	r0, #0
+	beq	getc_error
+1:
+	/* Check if the receive FIFO is empty */
+	ldr	r1, [r0, #UARTFR]
+	tst	r1, #PL011_UARTFR_RXFE
+	bne	1b
+	ldr	r1, [r0, #UARTDR]
+	mov	r0, r1
+	bx	lr
+getc_error:
+	mov	r0, #-1
+	bx	lr
+endfunc console_core_getc
+
+	/* ---------------------------------------------
+	 * int console_core_flush(uintptr_t base_addr)
+	 * Function to force a write of all buffered
+	 * data that hasn't been output.
+	 * In : r0 - console base address
+	 * Out : return -1 on error else return 0.
+	 * Clobber list : r0, r1
+	 * ---------------------------------------------
+	 */
+func console_core_flush
+	cmp	r0, #0
+	beq	flush_error
+
+1:
+	/* Loop while the transmit FIFO is busy */
+	ldr	r1, [r0, #UARTFR]
+	tst	r1, #PL011_UARTFR_BUSY
+	bne	1b
+
+	mov	r0, #0
+	bx	lr
+flush_error:
+	mov	r0, #-1
+	bx	lr
+endfunc console_core_flush
diff --git a/drivers/arm/pl011/aarch64/pl011_console.S b/drivers/arm/pl011/aarch64/pl011_console.S
new file mode 100644
index 0000000..8b15d56
--- /dev/null
+++ b/drivers/arm/pl011/aarch64/pl011_console.S
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <pl011.h>
+
+/*
+ * Pull in generic functions to provide backwards compatibility for
+ * platform makefiles
+ */
+#include "../../../console/aarch64/console.S"
+
+
+	.globl	console_core_init
+	.globl	console_core_putc
+	.globl	console_core_getc
+	.globl	console_core_flush
+
+
+	/* -----------------------------------------------
+	 * int console_core_init(uintptr_t base_addr,
+	 * unsigned int uart_clk, unsigned int baud_rate)
+	 * Function to initialize the console without a
+	 * C Runtime to print debug information. This
+	 * function will be accessed by console_init and
+	 * crash reporting.
+	 * In: x0 - console base address
+	 *     w1 - Uart clock in Hz
+	 *     w2 - Baud rate
+	 * Out: return 1 on success else 0 on error
+	 * Clobber list : x1, x2, x3, x4
+	 * -----------------------------------------------
+	 */
+func console_core_init
+	/* Check the input base address */
+	cbz	x0, core_init_fail
+#if !PL011_GENERIC_UART
+	/* Check baud rate and uart clock for sanity */
+	cbz	w1, core_init_fail
+	cbz	w2, core_init_fail
+	/* Disable uart before programming */
+	ldr	w3, [x0, #UARTCR]
+	mov	w4, #PL011_UARTCR_UARTEN
+	bic	w3, w3, w4
+	str	w3, [x0, #UARTCR]
+	/* Program the baudrate */
+	/* Divisor =  (Uart clock * 4) / baudrate */
+	lsl	w1, w1, #2
+	udiv	w2, w1, w2
+	/* IBRD = Divisor >> 6 */
+	lsr	w1, w2, #6
+	/* Write the IBRD */
+	str	w1, [x0, #UARTIBRD]
+	/* FBRD = Divisor & 0x3F */
+	and	w1, w2, #0x3f
+	/* Write the FBRD */
+	str	w1, [x0, #UARTFBRD]
+	mov	w1, #PL011_LINE_CONTROL
+	str	w1, [x0, #UARTLCR_H]
+	/* Clear any pending errors */
+	str	wzr, [x0, #UARTECR]
+	/* Enable tx, rx, and uart overall */
+	mov	w1, #(PL011_UARTCR_RXE | PL011_UARTCR_TXE | PL011_UARTCR_UARTEN)
+	str	w1, [x0, #UARTCR]
+#endif
+	mov	w0, #1
+	ret
+core_init_fail:
+	mov	w0, wzr
+	ret
+endfunc console_core_init
+
+	/* --------------------------------------------------------
+	 * int console_core_putc(int c, uintptr_t base_addr)
+	 * Function to output a character over the console. It
+	 * returns the character printed on success or -1 on error.
+	 * In : w0 - character to be printed
+	 *      x1 - console base address
+	 * Out : return -1 on error else return character.
+	 * Clobber list : x2
+	 * --------------------------------------------------------
+	 */
+func console_core_putc
+	/* Check the input parameter */
+	cbz	x1, putc_error
+	/* Prepend '\r' to '\n' */
+	cmp	w0, #0xA
+	b.ne	2f
+1:
+	/* Check if the transmit FIFO is full */
+	ldr	w2, [x1, #UARTFR]
+	tbnz	w2, #PL011_UARTFR_TXFF_BIT, 1b
+	mov	w2, #0xD
+	str	w2, [x1, #UARTDR]
+2:
+	/* Check if the transmit FIFO is full */
+	ldr	w2, [x1, #UARTFR]
+	tbnz	w2, #PL011_UARTFR_TXFF_BIT, 2b
+	str	w0, [x1, #UARTDR]
+	ret
+putc_error:
+	mov	w0, #-1
+	ret
+endfunc console_core_putc
+
+	/* ---------------------------------------------
+	 * int console_core_getc(uintptr_t base_addr)
+	 * Function to get a character from the console.
+	 * It returns the character grabbed on success
+	 * or -1 on error.
+	 * In : x0 - console base address
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func console_core_getc
+	cbz	x0, getc_error
+1:
+	/* Check if the receive FIFO is empty */
+	ldr	w1, [x0, #UARTFR]
+	tbnz	w1, #PL011_UARTFR_RXFE_BIT, 1b
+	ldr	w1, [x0, #UARTDR]
+	mov	w0, w1
+	ret
+getc_error:
+	mov	w0, #-1
+	ret
+endfunc console_core_getc
+
+	/* ---------------------------------------------
+	 * int console_core_flush(uintptr_t base_addr)
+	 * Function to force a write of all buffered
+	 * data that hasn't been output.
+	 * In : x0 - console base address
+	 * Out : return -1 on error else return 0.
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func console_core_flush
+	cbz	x0, flush_error
+
+1:
+	/* Loop until the transmit FIFO is empty */
+	ldr	w1, [x0, #UARTFR]
+	tbnz	w1, #PL011_UARTFR_BUSY_BIT, 1b
+
+	mov	w0, #0
+	ret
+flush_error:
+	mov	w0, #-1
+	ret
+endfunc console_core_flush
diff --git a/drivers/arm/pl011/pl011_console.S b/drivers/arm/pl011/pl011_console.S
index 5ff1582..1789f15 100644
--- a/drivers/arm/pl011/pl011_console.S
+++ b/drivers/arm/pl011/pl011_console.S
@@ -1,178 +1,9 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
-#include <arch.h>
-#include <asm_macros.S>
-#include <pl011.h>
 
-	.globl	console_init
-	.globl	console_putc
-	.globl	console_core_init
-	.globl	console_core_putc
-	.globl	console_getc
-
-	/*
-	 *  The console base is in the data section and not in .bss
-	 *  even though it is zero-init. In particular, this allows
-	 *  the console functions to start using this variable before
-	 *  the runtime memory is initialized for images which do not
-	 *  need to copy the .data section from ROM to RAM.
-	 */
-.section .data.console_base ; .align 3
-	console_base: .quad 0x0
-
-	/* -----------------------------------------------
-	 * int console_init(unsigned long base_addr,
-	 * unsigned int uart_clk, unsigned int baud_rate)
-	 * Function to initialize the console without a
-	 * C Runtime to print debug information. It saves
-	 * the console base to the data section.
-	 * In: x0 - console base address
-	 *     w1 - Uart clock in Hz
-	 *     w2 - Baud rate
-	 * out: return 1 on success.
-	 * Clobber list : x1 - x3
-	 * -----------------------------------------------
-	 */
-func console_init
-	adrp	x3, console_base
-	str	x0, [x3, :lo12:console_base]
-	b	console_core_init
-
-	/* -----------------------------------------------
-	 * int console_core_init(unsigned long base_addr,
-	 * unsigned int uart_clk, unsigned int baud_rate)
-	 * Function to initialize the console without a
-	 * C Runtime to print debug information. This
-	 * function will be accessed by console_init and
-	 * crash reporting.
-	 * In: x0 - console base address
-	 *     w1 - Uart clock in Hz
-	 *     w2 - Baud rate
-	 * Out: return 1 on success
-	 * Clobber list : x1, x2
-	 * -----------------------------------------------
-	 */
-func console_core_init
-	/* Check the input base address */
-	cbz	x0, init_fail
-	/* Check baud rate and uart clock for sanity */
-	cbz	w1, init_fail
-	cbz	w2, init_fail
-	/* Program the baudrate */
-	/* Divisor =  (Uart clock * 4) / baudrate */
-	lsl	w1, w1, #2
-	udiv	w2, w1, w2
-	/* IBRD = Divisor >> 6 */
-	lsr	w1, w2, #6
-	/* Write the IBRD */
-	str	w1, [x0, #UARTIBRD]
-	/* FBRD = Divisor & 0x3F */
-	and	w1, w2, #0x3f
-	/* Write the FBRD */
-	str	w1, [x0, #UARTFBRD]
-	mov	w1, #PL011_LINE_CONTROL
-	str	w1, [x0, #UARTLCR_H]
-	/* Clear any pending errors */
-	str	wzr, [x0, #UARTECR]
-	/* Enable tx, rx, and uart overall */
-	mov	w1, #(PL011_UARTCR_RXE | PL011_UARTCR_TXE | PL011_UARTCR_UARTEN)
-	str	w1, [x0, #UARTCR]
-	mov	w0, #1
-init_fail:
-	ret
-
-	/* ---------------------------------------------
-	 * int console_putc(int c)
-	 * Function to output a character over the
-	 * console. It returns the character printed on
-	 * success or -1 on error.
-	 * In : x0 - character to be printed
-	 * Out : return -1 on error else return character.
-	 * Clobber list : x1, x2
-	 * ---------------------------------------------
-	 */
-func console_putc
-	adrp	x2, console_base
-	ldr	x1, [x2, :lo12:console_base]
-	b	console_core_putc
-
-	/* --------------------------------------------------------
-	 * int console_core_putc(int c, unsigned int base_addr)
-	 * Function to output a character over the console. It
-	 * returns the character printed on success or -1 on error.
-	 * In : w0 - character to be printed
-	 *      x1 - console base address
-	 * Out : return -1 on error else return character.
-	 * Clobber list : x2
-	 * --------------------------------------------------------
-	 */
-func console_core_putc
-	/* Check the input parameter */
-	cbz	x1, putc_error
-	/* Prepend '\r' to '\n' */
-	cmp	w0, #0xA
-	b.ne	2f
-1:
-	/* Check if the transmit FIFO is full */
-	ldr	w2, [x1, #UARTFR]
-	tbnz	w2, #PL011_UARTFR_TXFF_BIT, 1b
-	mov	w2, #0xD
-	str	w2, [x1, #UARTDR]
-2:
-	/* Check if the transmit FIFO is full */
-	ldr	w2, [x1, #UARTFR]
-	tbnz	w2, #PL011_UARTFR_TXFF_BIT, 2b
-	str	w0, [x1, #UARTDR]
-	ret
-putc_error:
-	mov	w0, #-1
-	ret
-
-	/* ---------------------------------------------
-	 * int console_getc(void)
-	 * Function to get a character from the console.
-	 * It returns the character grabbed on success
-	 * or -1 on error.
-	 * Clobber list : x0, x1
-	 * ---------------------------------------------
-	 */
-func console_getc
-	adrp	x0, console_base
-	ldr	x1, [x0, :lo12:console_base]
-	cbz	x1, getc_error
-1:
-	/* Check if the receive FIFO is empty */
-	ldr	w0, [x1, #UARTFR]
-	tbnz	w0, #PL011_UARTFR_RXFE_BIT, 1b
-	ldr	w0, [x1, #UARTDR]
-	ret
-getc_error:
-	mov	w0, #-1
-	ret
+#if !ERROR_DEPRECATED
+#include "./aarch64/pl011_console.S"
+#endif
diff --git a/drivers/arm/pl061/pl061_gpio.c b/drivers/arm/pl061/pl061_gpio.c
new file mode 100644
index 0000000..af980e5
--- /dev/null
+++ b/drivers/arm/pl061/pl061_gpio.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * ARM PL061 GPIO Driver.
+ * Reference to ARM DDI 0190B document.
+ *
+ */
+
+#include <assert.h>
+#include <cassert.h>
+#include <debug.h>
+#include <errno.h>
+#include <gpio.h>
+#include <mmio.h>
+#include <pl061_gpio.h>
+#include <utils.h>
+
+#if !PLAT_PL061_MAX_GPIOS
+# define PLAT_PL061_MAX_GPIOS	32
+#endif	/* PLAT_PL061_MAX_GPIOS */
+
+CASSERT(PLAT_PL061_MAX_GPIOS > 0, assert_plat_pl061_max_gpios);
+
+#define MAX_GPIO_DEVICES	((PLAT_PL061_MAX_GPIOS +		\
+				 (GPIOS_PER_PL061 - 1)) / GPIOS_PER_PL061)
+
+#define PL061_GPIO_DIR		0x400
+
+#define GPIOS_PER_PL061		8
+
+static int pl061_get_direction(int gpio);
+static void pl061_set_direction(int gpio, int direction);
+static int pl061_get_value(int gpio);
+static void pl061_set_value(int gpio, int value);
+
+static uintptr_t pl061_reg_base[MAX_GPIO_DEVICES];
+
+static const gpio_ops_t pl061_gpio_ops = {
+	.get_direction	= pl061_get_direction,
+	.set_direction	= pl061_set_direction,
+	.get_value	= pl061_get_value,
+	.set_value	= pl061_set_value,
+};
+
+static int pl061_get_direction(int gpio)
+{
+	uintptr_t base_addr;
+	unsigned int data, offset;
+
+	assert((gpio >= 0) && (gpio < PLAT_PL061_MAX_GPIOS));
+
+	base_addr = pl061_reg_base[gpio / GPIOS_PER_PL061];
+	offset = gpio % GPIOS_PER_PL061;
+	data = mmio_read_8(base_addr + PL061_GPIO_DIR);
+	if (data & BIT(offset))
+		return GPIO_DIR_OUT;
+	return GPIO_DIR_IN;
+}
+
+static void pl061_set_direction(int gpio, int direction)
+{
+	uintptr_t base_addr;
+	unsigned int data, offset;
+
+	assert((gpio >= 0) && (gpio < PLAT_PL061_MAX_GPIOS));
+
+	base_addr = pl061_reg_base[gpio / GPIOS_PER_PL061];
+	offset = gpio % GPIOS_PER_PL061;
+	if (direction == GPIO_DIR_OUT) {
+		data = mmio_read_8(base_addr + PL061_GPIO_DIR) | BIT(offset);
+		mmio_write_8(base_addr + PL061_GPIO_DIR, data);
+	} else {
+		data = mmio_read_8(base_addr + PL061_GPIO_DIR) & ~BIT(offset);
+		mmio_write_8(base_addr + PL061_GPIO_DIR, data);
+	}
+}
+
+/*
+ * The offset of GPIODATA register is 0.
+ * The values read from GPIODATA are determined for each bit, by the mask bit
+ * derived from the address used to access the data register, PADDR[9:2].
+ * Bits that are 1 in the address mask cause the corresponding bits in GPIODATA
+ * to be read, and bits that are 0 in the address mask cause the corresponding
+ * bits in GPIODATA to be read as 0, regardless of their value.
+ */
+static int pl061_get_value(int gpio)
+{
+	uintptr_t base_addr;
+	unsigned int offset;
+
+	assert((gpio >= 0) && (gpio < PLAT_PL061_MAX_GPIOS));
+
+	base_addr = pl061_reg_base[gpio / GPIOS_PER_PL061];
+	offset = gpio % GPIOS_PER_PL061;
+	if (mmio_read_8(base_addr + BIT(offset + 2)))
+		return GPIO_LEVEL_HIGH;
+	return GPIO_LEVEL_LOW;
+}
+
+/*
+ * In order to write GPIODATA, the corresponding bits in the mask, resulting
+ * from the address bus, PADDR[9:2], must be HIGH. Otherwise the bit values
+ * remain unchanged by the write.
+ */
+static void pl061_set_value(int gpio, int value)
+{
+	uintptr_t base_addr;
+	int offset;
+
+	assert((gpio >= 0) && (gpio < PLAT_PL061_MAX_GPIOS));
+
+	base_addr = pl061_reg_base[gpio / GPIOS_PER_PL061];
+	offset = gpio % GPIOS_PER_PL061;
+	if (value == GPIO_LEVEL_HIGH)
+		mmio_write_8(base_addr + BIT(offset + 2), BIT(offset));
+	else
+		mmio_write_8(base_addr + BIT(offset + 2), 0);
+}
+
+
+/*
+ * Register the PL061 GPIO controller with a base address and the offset
+ * of start pin in this GPIO controller.
+ * This function is called after pl061_gpio_ops_init().
+ */
+void pl061_gpio_register(uintptr_t base_addr, int gpio_dev)
+{
+	assert((gpio_dev >= 0) && (gpio_dev < MAX_GPIO_DEVICES));
+
+	pl061_reg_base[gpio_dev] = base_addr;
+}
+
+/*
+ * Initialize PL061 GPIO controller with the total GPIO numbers in SoC.
+ */
+void pl061_gpio_init(void)
+{
+	gpio_init(&pl061_gpio_ops);
+}
diff --git a/drivers/arm/smmu/smmu_v3.c b/drivers/arm/smmu/smmu_v3.c
new file mode 100644
index 0000000..cfe8c2a
--- /dev/null
+++ b/drivers/arm/smmu/smmu_v3.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <mmio.h>
+#include <smmu_v3.h>
+
+/* Test for pending invalidate */
+#define INVAL_PENDING(base)	\
+	smmuv3_read_s_init(base) & SMMU_S_INIT_INV_ALL_MASK
+
+static inline uint32_t smmuv3_read_s_idr1(uintptr_t base)
+{
+	return mmio_read_32(base + SMMU_S_IDR1);
+}
+
+static inline uint32_t smmuv3_read_s_init(uintptr_t base)
+{
+	return mmio_read_32(base + SMMU_S_INIT);
+}
+
+static inline void smmuv3_write_s_init(uintptr_t base, uint32_t value)
+{
+	mmio_write_32(base + SMMU_S_INIT, value);
+}
+
+/*
+ * Initialize the SMMU by invalidating all secure caches and TLBs.
+ *
+ * Returns 0 on success, and -1 on failure.
+ */
+int smmuv3_init(uintptr_t smmu_base)
+{
+	uint32_t idr1_reg;
+
+	/*
+	 * Invalidation of secure caches and TLBs is required only if the SMMU
+	 * supports secure state. If not, it's implementation defined as to how
+	 * SMMU_S_INIT register is accessed.
+	 */
+	idr1_reg = smmuv3_read_s_idr1(smmu_base);
+	if (!((idr1_reg >> SMMU_S_IDR1_SECURE_IMPL_SHIFT) &
+			SMMU_S_IDR1_SECURE_IMPL_MASK)) {
+		return -1;
+	}
+
+	/* Initiate invalidation, and wait for it to finish */
+	smmuv3_write_s_init(smmu_base, SMMU_S_INIT_INV_ALL_MASK);
+	while (INVAL_PENDING(smmu_base))
+		;
+
+	return 0;
+}
diff --git a/drivers/arm/sp804/sp804_delay_timer.c b/drivers/arm/sp804/sp804_delay_timer.c
new file mode 100644
index 0000000..8f1c5c5
--- /dev/null
+++ b/drivers/arm/sp804/sp804_delay_timer.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <delay_timer.h>
+#include <mmio.h>
+
+uintptr_t sp804_base_addr;
+
+#define SP804_TIMER1_LOAD	(sp804_base_addr + 0x000)
+#define SP804_TIMER1_VALUE	(sp804_base_addr + 0x004)
+#define SP804_TIMER1_CONTROL	(sp804_base_addr + 0x008)
+#define SP804_TIMER1_BGLOAD	(sp804_base_addr + 0x018)
+
+#define TIMER_CTRL_ONESHOT	(1 << 0)
+#define TIMER_CTRL_32BIT	(1 << 1)
+#define TIMER_CTRL_DIV1		(0 << 2)
+#define TIMER_CTRL_DIV16	(1 << 2)
+#define TIMER_CTRL_DIV256	(2 << 2)
+#define TIMER_CTRL_IE		(1 << 5)
+#define TIMER_CTRL_PERIODIC	(1 << 6)
+#define TIMER_CTRL_ENABLE	(1 << 7)
+
+/********************************************************************
+ * The SP804 timer delay function
+ ********************************************************************/
+uint32_t sp804_get_timer_value(void)
+{
+	return mmio_read_32(SP804_TIMER1_VALUE);
+}
+
+/********************************************************************
+ * Initialize the 1st timer in the SP804 dual timer with a base
+ * address and a timer ops
+ ********************************************************************/
+void sp804_timer_ops_init(uintptr_t base_addr, const timer_ops_t *ops)
+{
+	assert(base_addr != 0);
+	assert(ops != 0 && ops->get_timer_value == sp804_get_timer_value);
+
+	sp804_base_addr = base_addr;
+	timer_init(ops);
+
+	/* disable timer1 */
+	mmio_write_32(SP804_TIMER1_CONTROL, 0);
+	mmio_write_32(SP804_TIMER1_LOAD, UINT32_MAX);
+	mmio_write_32(SP804_TIMER1_VALUE, UINT32_MAX);
+
+	/* enable as a free running 32-bit counter */
+	mmio_write_32(SP804_TIMER1_CONTROL,
+			TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE);
+}
diff --git a/drivers/arm/sp805/sp805.c b/drivers/arm/sp805/sp805.c
new file mode 100644
index 0000000..98df7e2
--- /dev/null
+++ b/drivers/arm/sp805/sp805.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <mmio.h>
+#include <sp805.h>
+#include <stdint.h>
+
+/* Inline register access functions */
+
+static inline void sp805_write_wdog_load(uintptr_t base, unsigned long value)
+{
+	mmio_write_32(base + SP805_WDOG_LOAD_OFF, value);
+}
+
+static inline void sp805_write_wdog_ctrl(uintptr_t base, unsigned long value)
+{
+	mmio_write_32(base + SP805_WDOG_CTR_OFF, value);
+}
+
+static inline void sp805_write_wdog_lock(uintptr_t base, unsigned long value)
+{
+	mmio_write_32(base + SP805_WDOG_LOCK_OFF, value);
+}
+
+
+/* Public API implementation */
+
+void sp805_start(uintptr_t base, unsigned long ticks)
+{
+	sp805_write_wdog_load(base, ticks);
+	sp805_write_wdog_ctrl(base, SP805_CTR_RESEN | SP805_CTR_INTEN);
+	/* Lock registers access */
+	sp805_write_wdog_lock(base, 0);
+}
+
+void sp805_stop(uintptr_t base)
+{
+	sp805_write_wdog_lock(base, WDOG_UNLOCK_KEY);
+	sp805_write_wdog_ctrl(base, 0);
+}
+
+void sp805_refresh(uintptr_t base, unsigned long ticks)
+{
+	sp805_write_wdog_lock(base, WDOG_UNLOCK_KEY);
+	sp805_write_wdog_load(base, ticks);
+	sp805_write_wdog_lock(base, 0);
+}
diff --git a/drivers/arm/tzc/tzc400.c b/drivers/arm/tzc/tzc400.c
new file mode 100644
index 0000000..b817487
--- /dev/null
+++ b/drivers/arm/tzc/tzc400.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <mmio.h>
+#include <stddef.h>
+#include <tzc400.h>
+#include "tzc_common_private.h"
+
+/*
+ * Macros which will be used by common core functions.
+ */
+#define TZC_400_REGION_BASE_LOW_0_OFFSET	0x100
+#define TZC_400_REGION_BASE_HIGH_0_OFFSET	0x104
+#define TZC_400_REGION_TOP_LOW_0_OFFSET		0x108
+#define TZC_400_REGION_TOP_HIGH_0_OFFSET	0x10c
+#define TZC_400_REGION_ATTR_0_OFFSET		0x110
+#define TZC_400_REGION_ID_ACCESS_0_OFFSET	0x114
+
+/*
+ * Implementation defined values used to validate inputs later.
+ * Filters : max of 4 ; 0 to 3
+ * Regions : max of 9 ; 0 to 8
+ * Address width : Values between 32 to 64
+ */
+typedef struct tzc400_instance {
+	uintptr_t base;
+	uint8_t addr_width;
+	uint8_t num_filters;
+	uint8_t num_regions;
+} tzc400_instance_t;
+
+tzc400_instance_t tzc400;
+
+static inline unsigned int _tzc400_read_build_config(uintptr_t base)
+{
+	return mmio_read_32(base + BUILD_CONFIG_OFF);
+}
+
+static inline unsigned int _tzc400_read_gate_keeper(uintptr_t base)
+{
+	return mmio_read_32(base + GATE_KEEPER_OFF);
+}
+
+static inline void _tzc400_write_gate_keeper(uintptr_t base, unsigned int val)
+{
+	mmio_write_32(base + GATE_KEEPER_OFF, val);
+}
+
+/*
+ * Get the open status information for all filter units.
+ */
+#define get_gate_keeper_os(base)	((_tzc400_read_gate_keeper(base) >>	\
+					GATE_KEEPER_OS_SHIFT) &		\
+					GATE_KEEPER_OS_MASK)
+
+
+/* Define common core functions used across different TZC peripherals. */
+DEFINE_TZC_COMMON_WRITE_ACTION(400, 400)
+DEFINE_TZC_COMMON_WRITE_REGION_BASE(400, 400)
+DEFINE_TZC_COMMON_WRITE_REGION_TOP(400, 400)
+DEFINE_TZC_COMMON_WRITE_REGION_ATTRIBUTES(400, 400)
+DEFINE_TZC_COMMON_WRITE_REGION_ID_ACCESS(400, 400)
+DEFINE_TZC_COMMON_CONFIGURE_REGION0(400)
+DEFINE_TZC_COMMON_CONFIGURE_REGION(400)
+
+static unsigned int _tzc400_get_gate_keeper(uintptr_t base,
+				unsigned int filter)
+{
+	unsigned int open_status;
+
+	open_status = get_gate_keeper_os(base);
+
+	return (open_status >> filter) & GATE_KEEPER_FILTER_MASK;
+}
+
+/* This function is not MP safe. */
+static void _tzc400_set_gate_keeper(uintptr_t base,
+				unsigned int filter,
+				int val)
+{
+	unsigned int open_status;
+
+	/* Upper half is current state. Lower half is requested state. */
+	open_status = get_gate_keeper_os(base);
+
+	if (val)
+		open_status |=  (1 << filter);
+	else
+		open_status &= ~(1 << filter);
+
+	_tzc400_write_gate_keeper(base, (open_status & GATE_KEEPER_OR_MASK) <<
+			      GATE_KEEPER_OR_SHIFT);
+
+	/* Wait here until we see the change reflected in the TZC status. */
+	while ((get_gate_keeper_os(base)) != open_status)
+		;
+}
+
+void tzc400_set_action(tzc_action_t action)
+{
+	assert(tzc400.base);
+	assert(action <= TZC_ACTION_ERR_INT);
+
+	/*
+	 * - Currently no handler is provided to trap an error via interrupt
+	 *   or exception.
+	 * - The interrupt action has not been tested.
+	 */
+	_tzc400_write_action(tzc400.base, action);
+}
+
+void tzc400_init(uintptr_t base)
+{
+#if DEBUG
+	unsigned int tzc400_id;
+#endif
+	unsigned int tzc400_build;
+
+	assert(base);
+	tzc400.base = base;
+
+#if DEBUG
+	tzc400_id = _tzc_read_peripheral_id(base);
+	if (tzc400_id != TZC_400_PERIPHERAL_ID) {
+		ERROR("TZC-400 : Wrong device ID (0x%x).\n", tzc400_id);
+		panic();
+	}
+#endif
+
+	/* Save values we will use later. */
+	tzc400_build = _tzc400_read_build_config(tzc400.base);
+	tzc400.num_filters = ((tzc400_build >> BUILD_CONFIG_NF_SHIFT) &
+			   BUILD_CONFIG_NF_MASK) + 1;
+	tzc400.addr_width  = ((tzc400_build >> BUILD_CONFIG_AW_SHIFT) &
+			   BUILD_CONFIG_AW_MASK) + 1;
+	tzc400.num_regions = ((tzc400_build >> BUILD_CONFIG_NR_SHIFT) &
+			   BUILD_CONFIG_NR_MASK) + 1;
+}
+
+/*
+ * `tzc400_configure_region0` is used to program region 0 into the TrustZone
+ * controller. Region 0 covers the whole address space that is not mapped
+ * to any other region, and is enabled on all filters; this cannot be
+ * changed. This function only changes the access permissions.
+ */
+void tzc400_configure_region0(tzc_region_attributes_t sec_attr,
+			   unsigned int ns_device_access)
+{
+	assert(tzc400.base);
+	assert(sec_attr <= TZC_REGION_S_RDWR);
+
+	_tzc400_configure_region0(tzc400.base, sec_attr, ns_device_access);
+}
+
+/*
+ * `tzc400_configure_region` is used to program regions into the TrustZone
+ * controller. A region can be associated with more than one filter. The
+ * associated filters are passed in as a bitmap (bit0 = filter0).
+ * NOTE:
+ * Region 0 is special; it is preferable to use tzc400_configure_region0
+ * for this region (see comment for that function).
+ */
+void tzc400_configure_region(unsigned int filters,
+			  int region,
+			  unsigned long long region_base,
+			  unsigned long long region_top,
+			  tzc_region_attributes_t sec_attr,
+			  unsigned int nsaid_permissions)
+{
+	assert(tzc400.base);
+
+	/* Do range checks on filters and regions. */
+	assert(((filters >> tzc400.num_filters) == 0) &&
+	       (region >= 0) && (region < tzc400.num_regions));
+
+	/*
+	 * Do address range check based on TZC configuration. A 64bit address is
+	 * the max and expected case.
+	 */
+	assert(((region_top <= _tzc_get_max_top_addr(tzc400.addr_width)) &&
+		(region_base < region_top)));
+
+	/* region_base and (region_top + 1) must be 4KB aligned */
+	assert(((region_base | (region_top + 1)) & (4096 - 1)) == 0);
+
+	assert(sec_attr <= TZC_REGION_S_RDWR);
+
+	_tzc400_configure_region(tzc400.base, filters, region, region_base,
+						region_top,
+						sec_attr, nsaid_permissions);
+}
+
+void tzc400_enable_filters(void)
+{
+	unsigned int state;
+	unsigned int filter;
+
+	assert(tzc400.base);
+
+	for (filter = 0; filter < tzc400.num_filters; filter++) {
+		state = _tzc400_get_gate_keeper(tzc400.base, filter);
+		if (state) {
+			/* The TZC filter is already configured. Changing the
+			 * programmer's view in an active system can cause
+			 * unpredictable behavior therefore panic for now rather
+			 * than try to determine whether this is safe in this
+			 * instance. See:
+			 * http://infocenter.arm.com/help/index.jsp?\
+			 * topic=/com.arm.doc.ddi0504c/CJHHECBF.html */
+			ERROR("TZC-400 : Filter %d Gatekeeper already"
+				" enabled.\n", filter);
+			panic();
+		}
+		_tzc400_set_gate_keeper(tzc400.base, filter, 1);
+	}
+}
+
+void tzc400_disable_filters(void)
+{
+	unsigned int filter;
+
+	assert(tzc400.base);
+
+	/*
+	 * We don't do the same state check as above as the Gatekeepers are
+	 * disabled after reset.
+	 */
+	for (filter = 0; filter < tzc400.num_filters; filter++)
+		_tzc400_set_gate_keeper(tzc400.base, filter, 0);
+}
diff --git a/drivers/arm/tzc/tzc_common_private.h b/drivers/arm/tzc/tzc_common_private.h
new file mode 100644
index 0000000..f092cf1
--- /dev/null
+++ b/drivers/arm/tzc/tzc_common_private.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TZC_COMMON_PRIVATE_H__
+#define __TZC_COMMON_PRIVATE_H__
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <mmio.h>
+#include <tzc_common.h>
+
+#define DEFINE_TZC_COMMON_WRITE_ACTION(fn_name, macro_name)		\
+	static inline void _tzc##fn_name##_write_action(		\
+					uintptr_t base,			\
+					tzc_action_t action)		\
+	{								\
+		mmio_write_32(base + TZC_##macro_name##_ACTION_OFF,	\
+			action);					\
+	}
+
+#define DEFINE_TZC_COMMON_WRITE_REGION_BASE(fn_name, macro_name)	\
+	static inline void _tzc##fn_name##_write_region_base(		\
+					uintptr_t base,			\
+					int region_no,			\
+					unsigned long long region_base)	\
+	{								\
+		mmio_write_32(base +					\
+			TZC_REGION_OFFSET(				\
+				TZC_##macro_name##_REGION_SIZE,		\
+				region_no) +				\
+			TZC_##macro_name##_REGION_BASE_LOW_0_OFFSET,	\
+			(uint32_t)region_base);				\
+		mmio_write_32(base +					\
+			TZC_REGION_OFFSET(				\
+				TZC_##macro_name##_REGION_SIZE,		\
+				region_no) +				\
+			TZC_##macro_name##_REGION_BASE_HIGH_0_OFFSET,	\
+			(uint32_t)(region_base >> 32));			\
+	}
+
+#define DEFINE_TZC_COMMON_WRITE_REGION_TOP(fn_name, macro_name)		\
+	static inline void _tzc##fn_name##_write_region_top(		\
+					uintptr_t base,			\
+					int region_no,			\
+					unsigned long long region_top)	\
+	{								\
+		mmio_write_32(base +					\
+			TZC_REGION_OFFSET				\
+				(TZC_##macro_name##_REGION_SIZE,	\
+				region_no) +				\
+			TZC_##macro_name##_REGION_TOP_LOW_0_OFFSET,	\
+			(uint32_t)region_top);			\
+		mmio_write_32(base +					\
+			TZC_REGION_OFFSET(				\
+				TZC_##macro_name##_REGION_SIZE,		\
+				region_no) +				\
+			TZC_##macro_name##_REGION_TOP_HIGH_0_OFFSET,	\
+			(uint32_t)(region_top >> 32));		\
+	}
+
+#define DEFINE_TZC_COMMON_WRITE_REGION_ATTRIBUTES(fn_name, macro_name)	\
+	static inline void _tzc##fn_name##_write_region_attributes(	\
+						uintptr_t base,		\
+						int region_no,		\
+						unsigned int attr)	\
+	{								\
+		mmio_write_32(base +					\
+			TZC_REGION_OFFSET(				\
+				TZC_##macro_name##_REGION_SIZE,		\
+				region_no) +				\
+			TZC_##macro_name##_REGION_ATTR_0_OFFSET,	\
+			attr);						\
+	}
+
+#define DEFINE_TZC_COMMON_WRITE_REGION_ID_ACCESS(fn_name, macro_name)	\
+	static inline void _tzc##fn_name##_write_region_id_access(	\
+						uintptr_t base,		\
+						int region_no,		\
+						unsigned int val)	\
+	{								\
+		mmio_write_32(base +					\
+			TZC_REGION_OFFSET(				\
+				TZC_##macro_name##_REGION_SIZE,		\
+				region_no) +				\
+			TZC_##macro_name##_REGION_ID_ACCESS_0_OFFSET,	\
+			val);						\
+	}
+
+/*
+ * It is used to program region 0 ATTRIBUTES and ACCESS register.
+ */
+#define DEFINE_TZC_COMMON_CONFIGURE_REGION0(fn_name)			\
+	void _tzc##fn_name##_configure_region0(uintptr_t base,		\
+			   tzc_region_attributes_t sec_attr,		\
+			   unsigned int ns_device_access)		\
+	{								\
+		assert(base);						\
+		VERBOSE("TrustZone : Configuring region 0 "		\
+			"(TZC Interface Base=%p sec_attr=0x%x,"		\
+			" ns_devs=0x%x)\n", (void *)base,		\
+			sec_attr, ns_device_access);			\
+									\
+		/* Set secure attributes on region 0 */			\
+		_tzc##fn_name##_write_region_attributes(base, 0,	\
+			sec_attr << TZC_REGION_ATTR_SEC_SHIFT);		\
+									\
+		/***************************************************/	\
+		/* Specify which non-secure devices have permission*/	\
+		/* to access region 0.				   */	\
+		/***************************************************/	\
+		_tzc##fn_name##_write_region_id_access(base,		\
+						0,			\
+						ns_device_access);	\
+	}
+
+/*
+ * It is used to program a region from 1 to 8 in the TrustZone controller.
+ * NOTE:
+ * Region 0 is special; it is preferable to use
+ * ##fn_name##_configure_region0 for this region (see comment for
+ * that function).
+ */
+#define DEFINE_TZC_COMMON_CONFIGURE_REGION(fn_name)			\
+	void _tzc##fn_name##_configure_region(uintptr_t base,		\
+				unsigned int filters,			\
+				int region_no,				\
+				unsigned long long region_base,		\
+				unsigned long long region_top,		\
+				tzc_region_attributes_t sec_attr,	\
+				unsigned int nsaid_permissions)	\
+	{								\
+		assert(base);						\
+		VERBOSE("TrustZone : Configuring region "		\
+			"(TZC Interface Base: %p, region_no = %d)"	\
+			"...\n", (void *)base, region_no);		\
+		VERBOSE("TrustZone : ... base = %llx, top = %llx,"	\
+			"\n", region_base, region_top);\
+		VERBOSE("TrustZone : ... sec_attr = 0x%x,"		\
+			" ns_devs = 0x%x)\n",				\
+			sec_attr, nsaid_permissions);			\
+									\
+		/***************************************************/	\
+		/* Inputs look ok, start programming registers.    */	\
+		/* All the address registers are 32 bits wide and  */	\
+		/* have a LOW and HIGH				   */	\
+		/* component used to construct an address up to a  */	\
+		/* 64bit.					   */	\
+		/***************************************************/	\
+		_tzc##fn_name##_write_region_base(base,			\
+					region_no, region_base);	\
+		_tzc##fn_name##_write_region_top(base,			\
+					region_no, region_top);		\
+									\
+		/* Enable filter to the region and set secure attributes */\
+		_tzc##fn_name##_write_region_attributes(base,		\
+				region_no,				\
+				(sec_attr << TZC_REGION_ATTR_SEC_SHIFT) |\
+				(filters << TZC_REGION_ATTR_F_EN_SHIFT));\
+									\
+		/***************************************************/	\
+		/* Specify which non-secure devices have permission*/	\
+		/* to access this region.			   */	\
+		/***************************************************/	\
+		_tzc##fn_name##_write_region_id_access(base,		\
+						region_no,		\
+						nsaid_permissions);	\
+	}
+
+#if ENABLE_ASSERTIONS
+
+static inline unsigned int _tzc_read_peripheral_id(uintptr_t base)
+{
+	unsigned int id;
+
+	id = mmio_read_32(base + PID0_OFF);
+	/* Masks DESC part in PID1 */
+	id |= ((mmio_read_32(base + PID1_OFF) & 0xF) << 8);
+
+	return id;
+}
+
+#ifdef AARCH32
+static inline unsigned long long _tzc_get_max_top_addr(int addr_width)
+{
+	/*
+	 * Assume at least 32 bit wide address and initialize the max.
+	 * This function doesn't use 64-bit integer arithmetic to avoid
+	 * having to implement additional compiler library functions.
+	 */
+	unsigned long long addr_mask = 0xFFFFFFFF;
+	uint32_t *addr_ptr = (uint32_t *)&addr_mask;
+
+	assert(addr_width >= 32);
+
+	/* This logic works only on little - endian platforms */
+	assert((read_sctlr() & SCTLR_EE_BIT) == 0);
+
+	/*
+	 * If required address width is greater than 32, populate the higher
+	 * 32 bits of the 64 bit field with the max address.
+	 */
+	if (addr_width > 32)
+		*(addr_ptr + 1) = ((1 << (addr_width - 32)) - 1);
+
+	return addr_mask;
+}
+#else
+#define _tzc_get_max_top_addr(addr_width)\
+	(UINT64_MAX >> (64 - (addr_width)))
+#endif /* AARCH32 */
+
+#endif /* ENABLE_ASSERTIONS */
+
+#endif /* __TZC_COMMON_PRIVATE_H__ */
diff --git a/drivers/arm/tzc/tzc_dmc500.c b/drivers/arm/tzc/tzc_dmc500.c
new file mode 100644
index 0000000..7350b2c
--- /dev/null
+++ b/drivers/arm/tzc/tzc_dmc500.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <mmio.h>
+#include <tzc_dmc500.h>
+#include "tzc_common.h"
+#include "tzc_common_private.h"
+
+/*
+ * Macros which will be used by common core functions.
+ */
+#define TZC_DMC500_REGION_BASE_LOW_0_OFFSET		0x054
+#define TZC_DMC500_REGION_BASE_HIGH_0_OFFSET		0x058
+#define TZC_DMC500_REGION_TOP_LOW_0_OFFSET		0x05C
+#define TZC_DMC500_REGION_TOP_HIGH_0_OFFSET		0x060
+#define TZC_DMC500_REGION_ATTR_0_OFFSET			0x064
+#define TZC_DMC500_REGION_ID_ACCESS_0_OFFSET		0x068
+
+#define TZC_DMC500_ACTION_OFF				0x50
+
+/* Pointer to the tzc_dmc500_driver_data structure populated by the platform */
+static const tzc_dmc500_driver_data_t *g_driver_data;
+
+#define verify_region_attr(region, attr)	\
+		((g_conf_regions[(region)].sec_attr ==			\
+			((attr) >> TZC_REGION_ATTR_SEC_SHIFT))		\
+		&& ((attr) & (0x1 << TZC_REGION_ATTR_F_EN_SHIFT)))
+
+/*
+ * Structure for configured regions attributes in DMC500.
+ */
+typedef struct tzc_dmc500_regions {
+	tzc_region_attributes_t sec_attr;
+	int is_enabled;
+} tzc_dmc500_regions_t;
+
+/*
+ * Array storing the attributes of the configured regions. This array
+ * will be used by the `tzc_dmc500_verify_complete` to verify the flush
+ * completion.
+ */
+static tzc_dmc500_regions_t g_conf_regions[MAX_REGION_VAL + 1];
+
+/* Helper Macros for making the code readable */
+#define DMC_INST_BASE_ADDR(instance) (g_driver_data->dmc_base[instance])
+#define DMC_INST_SI_BASE(instance, interface) \
+		(DMC_INST_BASE_ADDR(instance) + IFACE_OFFSET(interface))
+
+DEFINE_TZC_COMMON_WRITE_ACTION(_dmc500, DMC500)
+DEFINE_TZC_COMMON_WRITE_REGION_BASE(_dmc500, DMC500)
+DEFINE_TZC_COMMON_WRITE_REGION_TOP(_dmc500, DMC500)
+DEFINE_TZC_COMMON_WRITE_REGION_ATTRIBUTES(_dmc500, DMC500)
+DEFINE_TZC_COMMON_WRITE_REGION_ID_ACCESS(_dmc500, DMC500)
+
+DEFINE_TZC_COMMON_CONFIGURE_REGION0(_dmc500)
+DEFINE_TZC_COMMON_CONFIGURE_REGION(_dmc500)
+
+static inline unsigned int _tzc_dmc500_read_region_attr_0(
+					uintptr_t dmc_si_base,
+					int region_no)
+{
+	return mmio_read_32(dmc_si_base +
+			TZC_REGION_OFFSET(TZC_DMC500_REGION_SIZE, region_no) +
+			TZC_DMC500_REGION_ATTR_0_OFFSET);
+}
+
+static inline void _tzc_dmc500_write_flush_control(uintptr_t dmc_si_base)
+{
+	mmio_write_32(dmc_si_base + SI_FLUSH_CTRL_OFFSET, 1);
+}
+
+/*
+ * Sets the Flush controls for all the DMC Instances and System Interfaces.
+ * This initiates the flush of configuration settings from the shadow
+ * registers to the actual configuration register. The caller should poll
+ * changed register to confirm update.
+ */
+void tzc_dmc500_config_complete(void)
+{
+	int dmc_inst, sys_if;
+
+	assert(g_driver_data);
+
+	for (dmc_inst = 0; dmc_inst < g_driver_data->dmc_count; dmc_inst++) {
+		assert(DMC_INST_BASE_ADDR(dmc_inst));
+		for (sys_if = 0; sys_if < MAX_SYS_IF_COUNT; sys_if++)
+			_tzc_dmc500_write_flush_control(
+					DMC_INST_SI_BASE(dmc_inst, sys_if));
+	}
+}
+
+/*
+ * This function reads back the secure attributes from the configuration
+ * register for each DMC Instance and System Interface and compares it with
+ * the configured value. The successful verification of the region attributes
+ * confirms that the flush operation has completed.
+ * If the verification fails, the caller is expected to invoke this API again
+ * till it succeeds.
+ * Returns 0 on success and 1 on failure.
+ */
+int tzc_dmc500_verify_complete(void)
+{
+	int dmc_inst, sys_if, region_no;
+	unsigned int attr;
+
+	assert(g_driver_data);
+	/* Region 0 must be configured */
+	assert(g_conf_regions[0].is_enabled);
+
+	/* Iterate over all configured regions */
+	for (region_no = 0; region_no <= MAX_REGION_VAL; region_no++) {
+		if (!g_conf_regions[region_no].is_enabled)
+			continue;
+		for (dmc_inst = 0; dmc_inst < g_driver_data->dmc_count;
+								dmc_inst++) {
+			assert(DMC_INST_BASE_ADDR(dmc_inst));
+			for (sys_if = 0; sys_if < MAX_SYS_IF_COUNT;
+							sys_if++) {
+				attr = _tzc_dmc500_read_region_attr_0(
+					DMC_INST_SI_BASE(dmc_inst, sys_if),
+					region_no);
+				VERBOSE("Verifying DMC500 region:%d"
+					" dmc_inst:%d sys_if:%d attr:%x\n",
+					region_no, dmc_inst, sys_if, attr);
+				if (!verify_region_attr(region_no, attr))
+					return 1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * `tzc_dmc500_configure_region0` is used to program region 0 in both the
+ * system interfaces of all the DMC-500 instances. Region 0 covers the whole
+ * address space that is not mapped to any other region for a system interface,
+ * and is always enabled; this cannot be changed. This function only changes
+ * the access permissions.
+ */
+void tzc_dmc500_configure_region0(tzc_region_attributes_t sec_attr,
+					unsigned int nsaid_permissions)
+{
+	int dmc_inst, sys_if;
+
+	/* Assert if DMC-500 is not initialized */
+	assert(g_driver_data);
+
+	/* Configure region_0 in all DMC instances */
+	for (dmc_inst = 0; dmc_inst < g_driver_data->dmc_count; dmc_inst++) {
+		assert(DMC_INST_BASE_ADDR(dmc_inst));
+		for (sys_if = 0; sys_if < MAX_SYS_IF_COUNT; sys_if++)
+			_tzc_dmc500_configure_region0(
+					DMC_INST_SI_BASE(dmc_inst, sys_if),
+					sec_attr, nsaid_permissions);
+	}
+
+	g_conf_regions[0].sec_attr = sec_attr;
+	g_conf_regions[0].is_enabled = 1;
+}
+
+/*
+ * `tzc_dmc500_configure_region` is used to program a region into all system
+ * interfaces of all the DMC instances.
+ * NOTE:
+ * Region 0 is special; it is preferable to use tzc_dmc500_configure_region0
+ * for this region (see comment for that function).
+ */
+void tzc_dmc500_configure_region(int region_no,
+			unsigned long long region_base,
+			unsigned long long region_top,
+			tzc_region_attributes_t sec_attr,
+			unsigned int nsaid_permissions)
+{
+	int dmc_inst, sys_if;
+
+	assert(g_driver_data);
+	/* Do range checks on regions. */
+	assert(region_no >= 0 && region_no <= MAX_REGION_VAL);
+
+	/*
+	 * Do address range check based on DMC-TZ configuration. A 43bit address
+	 * is the max and expected case.
+	 */
+	assert(((region_top <= _tzc_get_max_top_addr(43)) &&
+		(region_base < region_top)));
+
+	/* region_base and (region_top + 1) must be 4KB aligned */
+	assert(((region_base | (region_top + 1)) & (4096 - 1)) == 0);
+
+	for (dmc_inst = 0; dmc_inst < g_driver_data->dmc_count; dmc_inst++) {
+		assert(DMC_INST_BASE_ADDR(dmc_inst));
+		for (sys_if = 0; sys_if < MAX_SYS_IF_COUNT; sys_if++)
+			_tzc_dmc500_configure_region(
+					DMC_INST_SI_BASE(dmc_inst, sys_if),
+					TZC_DMC500_REGION_ATTR_F_EN_MASK,
+					region_no, region_base, region_top,
+					sec_attr, nsaid_permissions);
+	}
+
+	g_conf_regions[region_no].sec_attr = sec_attr;
+	g_conf_regions[region_no].is_enabled = 1;
+}
+
+/* Sets the action value for all the DMC instances */
+void tzc_dmc500_set_action(tzc_action_t action)
+{
+	int dmc_inst;
+
+	assert(g_driver_data);
+
+	for (dmc_inst = 0; dmc_inst < g_driver_data->dmc_count; dmc_inst++) {
+		assert(DMC_INST_BASE_ADDR(dmc_inst));
+		/*
+		 * - Currently no handler is provided to trap an error via
+		 *   interrupt or exception.
+		 * - The interrupt action has not been tested.
+		 */
+		_tzc_dmc500_write_action(DMC_INST_BASE_ADDR(dmc_inst), action);
+	}
+}
+
+/*
+ * A DMC-500 instance must be present at each base address provided by the
+ * platform. It also expects platform to pass at least one instance of
+ * DMC-500.
+ */
+static void validate_plat_driver_data(
+			const tzc_dmc500_driver_data_t *plat_driver_data)
+{
+#if ENABLE_ASSERTIONS
+	int i;
+	unsigned int dmc_id;
+	uintptr_t dmc_base;
+
+	assert(plat_driver_data);
+	assert(plat_driver_data->dmc_count > 0 &&
+		(plat_driver_data->dmc_count <= MAX_DMC_COUNT));
+
+	for (i = 0; i < plat_driver_data->dmc_count; i++) {
+		dmc_base = plat_driver_data->dmc_base[i];
+		assert(dmc_base);
+
+		dmc_id = _tzc_read_peripheral_id(dmc_base);
+		assert(dmc_id == DMC500_PERIPHERAL_ID);
+	}
+#endif /* ENABLE_ASSERTIONS */
+}
+
+
+/*
+ * Initializes the base address and count of DMC instances.
+ *
+ * Note : Only pointer to plat_driver_data is saved, so it is caller's
+ * responsibility to keep it valid until the driver is used.
+ */
+void tzc_dmc500_driver_init(const tzc_dmc500_driver_data_t *plat_driver_data)
+{
+	/* Check valid pointer is passed */
+	assert(plat_driver_data);
+
+	/*
+	 * NOTE: This driver expects the DMC-500 controller is already in
+	 * READY state. Hence, it uses the reconfiguration method for
+	 * programming TrustZone regions
+	 */
+	/* Validates the information passed by platform */
+	validate_plat_driver_data(plat_driver_data);
+	g_driver_data = plat_driver_data;
+}
diff --git a/drivers/arm/tzc400/tzc400.c b/drivers/arm/tzc400/tzc400.c
index df52c9c..ff2ebc7 100644
--- a/drivers/arm/tzc400/tzc400.c
+++ b/drivers/arm/tzc400/tzc400.c
@@ -1,307 +1,11 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
-#include <assert.h>
-#include <debug.h>
-#include <mmio.h>
-#include <stddef.h>
-#include <tzc400.h>
-
-/*
- * Implementation defined values used to validate inputs later.
- * Filters : max of 4 ; 0 to 3
- * Regions : max of 9 ; 0 to 8
- * Address width : Values between 32 to 64
- */
-typedef struct tzc_instance {
-	uint64_t base;
-	uint8_t addr_width;
-	uint8_t num_filters;
-	uint8_t num_regions;
-} tzc_instance_t;
-
-tzc_instance_t tzc;
-
-
-static inline uint32_t tzc_read_build_config(uint64_t base)
-{
-	return mmio_read_32(base + BUILD_CONFIG_OFF);
-}
-
-static inline uint32_t tzc_read_gate_keeper(uint64_t base)
-{
-	return mmio_read_32(base + GATE_KEEPER_OFF);
-}
-
-static inline void tzc_write_gate_keeper(uint64_t base, uint32_t val)
-{
-	mmio_write_32(base + GATE_KEEPER_OFF, val);
-}
-
-static inline void tzc_write_action(uint64_t base, tzc_action_t action)
-{
-	mmio_write_32(base + ACTION_OFF, action);
-}
-
-static inline void tzc_write_region_base_low(uint64_t base,
-					uint32_t region,
-					uint32_t val)
-{
-	mmio_write_32(base + REGION_BASE_LOW_OFF +
-		REGION_NUM_OFF(region), val);
-}
-
-static inline void tzc_write_region_base_high(uint64_t base,
-					uint32_t region,
-					uint32_t val)
-{
-	mmio_write_32(base + REGION_BASE_HIGH_OFF +
-		REGION_NUM_OFF(region), val);
-}
-
-static inline void tzc_write_region_top_low(uint64_t base,
-					uint32_t region,
-					uint32_t val)
-{
-	mmio_write_32(base + REGION_TOP_LOW_OFF +
-		REGION_NUM_OFF(region), val);
-}
-
-static inline void tzc_write_region_top_high(uint64_t base,
-					uint32_t region,
-					uint32_t val)
-{
-	mmio_write_32(base + REGION_TOP_HIGH_OFF +
-		REGION_NUM_OFF(region), val);
-}
-
-static inline void tzc_write_region_attributes(uint64_t base,
-					uint32_t region,
-					uint32_t val)
-{
-	mmio_write_32(base + REGION_ATTRIBUTES_OFF +
-		REGION_NUM_OFF(region), val);
-}
-
-static inline void tzc_write_region_id_access(uint64_t base,
-					uint32_t region,
-					uint32_t val)
-{
-	mmio_write_32(base + REGION_ID_ACCESS_OFF +
-		REGION_NUM_OFF(region), val);
-}
-
-static uint32_t tzc_read_component_id(uint64_t base)
-{
-	uint32_t id;
-
-	id = mmio_read_8(base + CID0_OFF);
-	id |= (mmio_read_8(base + CID1_OFF) << 8);
-	id |= (mmio_read_8(base + CID2_OFF) << 16);
-	id |= (mmio_read_8(base + CID3_OFF) << 24);
-
-	return id;
-}
-
-static uint32_t tzc_get_gate_keeper(uint64_t base, uint8_t filter)
-{
-	uint32_t tmp;
-
-	tmp = (tzc_read_gate_keeper(base) >> GATE_KEEPER_OS_SHIFT) &
-		GATE_KEEPER_OS_MASK;
-
-	return (tmp >> filter) & GATE_KEEPER_FILTER_MASK;
-}
-
-/* This function is not MP safe. */
-static void tzc_set_gate_keeper(uint64_t base, uint8_t filter, uint32_t val)
-{
-	uint32_t tmp;
-
-	/* Upper half is current state. Lower half is requested state. */
-	tmp = (tzc_read_gate_keeper(base) >> GATE_KEEPER_OS_SHIFT) &
-		GATE_KEEPER_OS_MASK;
-
-	if (val)
-		tmp |=  (1 << filter);
-	else
-		tmp &= ~(1 << filter);
-
-	tzc_write_gate_keeper(base, (tmp & GATE_KEEPER_OR_MASK) <<
-			      GATE_KEEPER_OR_SHIFT);
-
-	/* Wait here until we see the change reflected in the TZC status. */
-	while (((tzc_read_gate_keeper(base) >> GATE_KEEPER_OS_SHIFT) &
-		GATE_KEEPER_OS_MASK) != tmp)
-	  ;
-}
-
-
-void tzc_init(uint64_t base)
-{
-	uint32_t tzc_id, tzc_build;
-
-	assert(base);
-	tzc.base = base;
-
-	/*
-	 * We expect to see a tzc400. Check component ID. The TZC-400 TRM shows
-	 * component ID is expected to be "0xB105F00D".
-	 */
-	tzc_id = tzc_read_component_id(tzc.base);
-	if (tzc_id != TZC400_COMPONENT_ID) {
-		ERROR("TZC : Wrong device ID (0x%x).\n", tzc_id);
-		panic();
-	}
-
-	/* Save values we will use later. */
-	tzc_build = tzc_read_build_config(tzc.base);
-	tzc.num_filters = ((tzc_build >> BUILD_CONFIG_NF_SHIFT) &
-			   BUILD_CONFIG_NF_MASK) + 1;
-	tzc.addr_width  = ((tzc_build >> BUILD_CONFIG_AW_SHIFT) &
-			   BUILD_CONFIG_AW_MASK) + 1;
-	tzc.num_regions = ((tzc_build >> BUILD_CONFIG_NR_SHIFT) &
-			   BUILD_CONFIG_NR_MASK) + 1;
-}
-
-
-/*
- * `tzc_configure_region` is used to program regions into the TrustZone
- * controller. A region can be associated with more than one filter. The
- * associated filters are passed in as a bitmap (bit0 = filter0).
- * NOTE:
- * The region 0 covers the whole address space and is enabled on all filters,
- * this cannot be changed. It is, however, possible to change some region 0
- * permissions.
- */
-void tzc_configure_region(uint32_t filters,
-			  uint8_t  region,
-			  uint64_t region_base,
-			  uint64_t region_top,
-			  tzc_region_attributes_t sec_attr,
-			  uint32_t ns_device_access)
-{
-	assert(tzc.base);
-
-	/* Do range checks on filters and regions. */
-	assert(((filters >> tzc.num_filters) == 0) &&
-	       (region < tzc.num_regions));
-
-	/*
-	 * Do address range check based on TZC configuration. A 64bit address is
-	 * the max and expected case.
-	 */
-	assert(((region_top <= (UINT64_MAX >> (64 - tzc.addr_width))) &&
-		(region_base < region_top)));
-
-	/* region_base and (region_top + 1) must be 4KB aligned */
-	assert(((region_base | (region_top + 1)) & (4096 - 1)) == 0);
-
-	assert(sec_attr <= TZC_REGION_S_RDWR);
-
-	/*
-	 * Inputs look ok, start programming registers.
-	 * All the address registers are 32 bits wide and have a LOW and HIGH
-	 * component used to construct a up to a 64bit address.
-	 */
-	tzc_write_region_base_low(tzc.base, region,
-				(uint32_t)(region_base));
-	tzc_write_region_base_high(tzc.base, region,
-				(uint32_t)(region_base >> 32));
-
-	tzc_write_region_top_low(tzc.base, region,
-				(uint32_t)(region_top));
-	tzc_write_region_top_high(tzc.base, region,
-				(uint32_t)(region_top >> 32));
-
-	/* Assign the region to a filter and set secure attributes */
-	tzc_write_region_attributes(tzc.base, region,
-		(sec_attr << REG_ATTR_SEC_SHIFT) | filters);
-
-	/*
-	 * Specify which non-secure devices have permission to access this
-	 * region.
-	 */
-	tzc_write_region_id_access(tzc.base, region, ns_device_access);
-}
-
-
-void tzc_set_action(tzc_action_t action)
-{
-	assert(tzc.base);
-
-	/*
-	 * - Currently no handler is provided to trap an error via interrupt
-	 *   or exception.
-	 * - The interrupt action has not been tested.
-	 */
-	tzc_write_action(tzc.base, action);
-}
-
-
-void tzc_enable_filters(void)
-{
-	uint32_t state;
-	uint32_t filter;
-
-	assert(tzc.base);
-
-	for (filter = 0; filter < tzc.num_filters; filter++) {
-		state = tzc_get_gate_keeper(tzc.base, filter);
-		if (state) {
-			/* The TZC filter is already configured. Changing the
-			 * programmer's view in an active system can cause
-			 * unpredictable behavior therefore panic for now rather
-			 * than try to determine whether this is safe in this
-			 * instance. See:
-			 * http://infocenter.arm.com/help/index.jsp?\
-			 * topic=/com.arm.doc.ddi0504c/CJHHECBF.html */
-			ERROR("TZC : Filter %d Gatekeeper already enabled.\n",
-				filter);
-			panic();
-		}
-		tzc_set_gate_keeper(tzc.base, filter, 1);
-	}
-}
-
-
-void tzc_disable_filters(void)
-{
-	uint32_t filter;
-
-	assert(tzc.base);
-
-	/*
-	 * We don't do the same state check as above as the Gatekeepers are
-	 * disabled after reset.
-	 */
-	for (filter = 0; filter < tzc.num_filters; filter++)
-		tzc_set_gate_keeper(tzc.base, filter, 0);
-}
+#if ERROR_DEPRECATED
+#error "Using deprecated TZC-400 source file"
+#else
+#include "../tzc/tzc400.c"
+#endif /* ERROR_DEPRECATED */
diff --git a/drivers/auth/auth_mod.c b/drivers/auth/auth_mod.c
new file mode 100644
index 0000000..1cea60b
--- /dev/null
+++ b/drivers/auth/auth_mod.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <auth_common.h>
+#include <auth_mod.h>
+#include <cot_def.h>
+#include <crypto_mod.h>
+#include <debug.h>
+#include <img_parser_mod.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <stdint.h>
+#include <string.h>
+
+/* ASN.1 tags */
+#define ASN1_INTEGER                 0x02
+
+#define return_if_error(rc) \
+	do { \
+		if (rc != 0) { \
+			return rc; \
+		} \
+	} while (0)
+
+#pragma weak plat_set_nv_ctr2
+
+/* Pointer to CoT */
+extern const auth_img_desc_t *const cot_desc_ptr;
+extern unsigned int auth_img_flags[];
+
+static int cmp_auth_param_type_desc(const auth_param_type_desc_t *a,
+		const auth_param_type_desc_t *b)
+{
+	if ((a->type == b->type) && (a->cookie == b->cookie)) {
+		return 0;
+	}
+	return 1;
+}
+
+/*
+ * This function obtains the requested authentication parameter data from the
+ * information extracted from the parent image after its authentication.
+ */
+static int auth_get_param(const auth_param_type_desc_t *param_type_desc,
+			  const auth_img_desc_t *img_desc,
+			  void **param, unsigned int *len)
+{
+	int i;
+
+	for (i = 0 ; i < COT_MAX_VERIFIED_PARAMS ; i++) {
+		if (0 == cmp_auth_param_type_desc(param_type_desc,
+				img_desc->authenticated_data[i].type_desc)) {
+			*param = img_desc->authenticated_data[i].data.ptr;
+			*len = img_desc->authenticated_data[i].data.len;
+			return 0;
+		}
+	}
+
+	return 1;
+}
+
+/*
+ * Authenticate an image by matching the data hash
+ *
+ * This function implements 'AUTH_METHOD_HASH'. To authenticate an image using
+ * this method, the image must contain:
+ *
+ *   - The data to calculate the hash from
+ *
+ * The parent image must contain:
+ *
+ *   - The hash to be matched with (including hash algorithm)
+ *
+ * For a successful authentication, both hashes must match. The function calls
+ * the crypto-module to check this matching.
+ *
+ * Parameters:
+ *   param: parameters to perform the hash authentication
+ *   img_desc: pointer to image descriptor so we can know the image type
+ *             and parent image
+ *   img: pointer to image in memory
+ *   img_len: length of image (in bytes)
+ *
+ * Return:
+ *   0 = success, Otherwise = error
+ */
+static int auth_hash(const auth_method_param_hash_t *param,
+		     const auth_img_desc_t *img_desc,
+		     void *img, unsigned int img_len)
+{
+	void *data_ptr, *hash_der_ptr;
+	unsigned int data_len, hash_der_len;
+	int rc = 0;
+
+	/* Get the hash from the parent image. This hash will be DER encoded
+	 * and contain the hash algorithm */
+	rc = auth_get_param(param->hash, img_desc->parent,
+			&hash_der_ptr, &hash_der_len);
+	return_if_error(rc);
+
+	/* Get the data to be hashed from the current image */
+	rc = img_parser_get_auth_param(img_desc->img_type, param->data,
+			img, img_len, &data_ptr, &data_len);
+	return_if_error(rc);
+
+	/* Ask the crypto module to verify this hash */
+	rc = crypto_mod_verify_hash(data_ptr, data_len,
+				    hash_der_ptr, hash_der_len);
+
+	return rc;
+}
+
+/*
+ * Authenticate by digital signature
+ *
+ * This function implements 'AUTH_METHOD_SIG'. To authenticate an image using
+ * this method, the image must contain:
+ *
+ *   - Data to be signed
+ *   - Signature
+ *   - Signature algorithm
+ *
+ * We rely on the image parser module to extract this data from the image.
+ * The parent image must contain:
+ *
+ *   - Public key (or a hash of it)
+ *
+ * If the parent image contains only a hash of the key, we will try to obtain
+ * the public key from the image itself (i.e. self-signed certificates). In that
+ * case, the signature verification is considered just an integrity check and
+ * the authentication is established by calculating the hash of the key and
+ * comparing it with the hash obtained from the parent.
+ *
+ * If the image has no parent (NULL), it means it has to be authenticated using
+ * the ROTPK stored in the platform. Again, this ROTPK could be the key itself
+ * or a hash of it.
+ *
+ * Return: 0 = success, Otherwise = error
+ */
+static int auth_signature(const auth_method_param_sig_t *param,
+			  const auth_img_desc_t *img_desc,
+			  void *img, unsigned int img_len)
+{
+	void *data_ptr, *pk_ptr, *pk_hash_ptr, *sig_ptr, *sig_alg_ptr;
+	unsigned int data_len, pk_len, pk_hash_len, sig_len, sig_alg_len;
+	unsigned int flags = 0;
+	int rc = 0;
+
+	/* Get the data to be signed from current image */
+	rc = img_parser_get_auth_param(img_desc->img_type, param->data,
+			img, img_len, &data_ptr, &data_len);
+	return_if_error(rc);
+
+	/* Get the signature from current image */
+	rc = img_parser_get_auth_param(img_desc->img_type, param->sig,
+			img, img_len, &sig_ptr, &sig_len);
+	return_if_error(rc);
+
+	/* Get the signature algorithm from current image */
+	rc = img_parser_get_auth_param(img_desc->img_type, param->alg,
+			img, img_len, &sig_alg_ptr, &sig_alg_len);
+	return_if_error(rc);
+
+	/* Get the public key from the parent. If there is no parent (NULL),
+	 * the certificate has been signed with the ROTPK, so we have to get
+	 * the PK from the platform */
+	if (img_desc->parent) {
+		rc = auth_get_param(param->pk, img_desc->parent,
+				&pk_ptr, &pk_len);
+	} else {
+		rc = plat_get_rotpk_info(param->pk->cookie, &pk_ptr, &pk_len,
+				&flags);
+	}
+	return_if_error(rc);
+
+	if (flags & (ROTPK_IS_HASH | ROTPK_NOT_DEPLOYED)) {
+		/* If the PK is a hash of the key or if the ROTPK is not
+		   deployed on the platform, retrieve the key from the image */
+		pk_hash_ptr = pk_ptr;
+		pk_hash_len = pk_len;
+		rc = img_parser_get_auth_param(img_desc->img_type,
+					param->pk, img, img_len,
+					&pk_ptr, &pk_len);
+		return_if_error(rc);
+
+		/* Ask the crypto module to verify the signature */
+		rc = crypto_mod_verify_signature(data_ptr, data_len,
+						 sig_ptr, sig_len,
+						 sig_alg_ptr, sig_alg_len,
+						 pk_ptr, pk_len);
+		return_if_error(rc);
+
+		if (flags & ROTPK_NOT_DEPLOYED) {
+			NOTICE("ROTPK is not deployed on platform. "
+				"Skipping ROTPK verification.\n");
+		} else {
+			/* Ask the crypto-module to verify the key hash */
+			rc = crypto_mod_verify_hash(pk_ptr, pk_len,
+				    pk_hash_ptr, pk_hash_len);
+		}
+	} else {
+		/* Ask the crypto module to verify the signature */
+		rc = crypto_mod_verify_signature(data_ptr, data_len,
+						 sig_ptr, sig_len,
+						 sig_alg_ptr, sig_alg_len,
+						 pk_ptr, pk_len);
+	}
+
+	return rc;
+}
+
+/*
+ * Authenticate by Non-Volatile counter
+ *
+ * To protect the system against rollback, the platform includes a non-volatile
+ * counter whose value can only be increased. All certificates include a counter
+ * value that should not be lower than the value stored in the platform. If the
+ * value is larger, the counter in the platform must be updated to the new
+ * value.
+ *
+ * Return: 0 = success, Otherwise = error
+ */
+static int auth_nvctr(const auth_method_param_nv_ctr_t *param,
+		      const auth_img_desc_t *img_desc,
+		      void *img, unsigned int img_len)
+{
+	char *p;
+	void *data_ptr = NULL;
+	unsigned int data_len, len, i;
+	unsigned int cert_nv_ctr, plat_nv_ctr;
+	int rc = 0;
+
+	/* Get the counter value from current image. The AM expects the IPM
+	 * to return the counter value as a DER encoded integer */
+	rc = img_parser_get_auth_param(img_desc->img_type, param->cert_nv_ctr,
+				       img, img_len, &data_ptr, &data_len);
+	return_if_error(rc);
+
+	/* Parse the DER encoded integer */
+	assert(data_ptr);
+	p = (char *)data_ptr;
+	if (*p != ASN1_INTEGER) {
+		/* Invalid ASN.1 integer */
+		return 1;
+	}
+	p++;
+
+	/* NV-counters are unsigned integers up to 32-bit */
+	len = (unsigned int)(*p & 0x7f);
+	if ((*p & 0x80) || (len > 4)) {
+		return 1;
+	}
+	p++;
+
+	/* Check the number is not negative */
+	if (*p & 0x80) {
+		return 1;
+	}
+
+	/* Convert to unsigned int. This code is for a little-endian CPU */
+	cert_nv_ctr = 0;
+	for (i = 0; i < len; i++) {
+		cert_nv_ctr = (cert_nv_ctr << 8) | *p++;
+	}
+
+	/* Get the counter from the platform */
+	rc = plat_get_nv_ctr(param->plat_nv_ctr->cookie, &plat_nv_ctr);
+	return_if_error(rc);
+
+	if (cert_nv_ctr < plat_nv_ctr) {
+		/* Invalid NV-counter */
+		return 1;
+	} else if (cert_nv_ctr > plat_nv_ctr) {
+		rc = plat_set_nv_ctr2(param->plat_nv_ctr->cookie,
+			img_desc, cert_nv_ctr);
+		return_if_error(rc);
+	}
+
+	return 0;
+}
+
+int plat_set_nv_ctr2(void *cookie, const auth_img_desc_t *img_desc __unused,
+		unsigned int nv_ctr)
+{
+	return plat_set_nv_ctr(cookie, nv_ctr);
+}
+
+/*
+ * Return the parent id in the output parameter '*parent_id'
+ *
+ * Return value:
+ *   0 = Image has parent, 1 = Image has no parent or parent is authenticated
+ */
+int auth_mod_get_parent_id(unsigned int img_id, unsigned int *parent_id)
+{
+	const auth_img_desc_t *img_desc = NULL;
+
+	assert(parent_id != NULL);
+
+	/* Get the image descriptor */
+	img_desc = &cot_desc_ptr[img_id];
+
+	/* Check if the image has no parent (ROT) */
+	if (img_desc->parent == NULL) {
+		*parent_id = 0;
+		return 1;
+	}
+
+	/* Check if the parent has already been authenticated */
+	if (auth_img_flags[img_desc->parent->img_id] & IMG_FLAG_AUTHENTICATED) {
+		*parent_id = 0;
+		return 1;
+	}
+
+	*parent_id = img_desc->parent->img_id;
+	return 0;
+}
+
+/*
+ * Initialize the different modules in the authentication framework
+ */
+void auth_mod_init(void)
+{
+	/* Check we have a valid CoT registered */
+	assert(cot_desc_ptr != NULL);
+
+	/* Crypto module */
+	crypto_mod_init();
+
+	/* Image parser module */
+	img_parser_init();
+}
+
+/*
+ * Authenticate a certificate/image
+ *
+ * Return: 0 = success, Otherwise = error
+ */
+int auth_mod_verify_img(unsigned int img_id,
+			void *img_ptr,
+			unsigned int img_len)
+{
+	const auth_img_desc_t *img_desc = NULL;
+	const auth_method_desc_t *auth_method = NULL;
+	void *param_ptr;
+	unsigned int param_len;
+	int rc, i;
+
+	/* Get the image descriptor from the chain of trust */
+	img_desc = &cot_desc_ptr[img_id];
+
+	/* Ask the parser to check the image integrity */
+	rc = img_parser_check_integrity(img_desc->img_type, img_ptr, img_len);
+	return_if_error(rc);
+
+	/* Authenticate the image using the methods indicated in the image
+	 * descriptor. */
+	for (i = 0 ; i < AUTH_METHOD_NUM ; i++) {
+		auth_method = &img_desc->img_auth_methods[i];
+		switch (auth_method->type) {
+		case AUTH_METHOD_NONE:
+			rc = 0;
+			break;
+		case AUTH_METHOD_HASH:
+			rc = auth_hash(&auth_method->param.hash,
+					img_desc, img_ptr, img_len);
+			break;
+		case AUTH_METHOD_SIG:
+			rc = auth_signature(&auth_method->param.sig,
+					img_desc, img_ptr, img_len);
+			break;
+		case AUTH_METHOD_NV_CTR:
+			rc = auth_nvctr(&auth_method->param.nv_ctr,
+					img_desc, img_ptr, img_len);
+			break;
+		default:
+			/* Unknown authentication method */
+			rc = 1;
+			break;
+		}
+		return_if_error(rc);
+	}
+
+	/* Extract the parameters indicated in the image descriptor to
+	 * authenticate the children images. */
+	for (i = 0 ; i < COT_MAX_VERIFIED_PARAMS ; i++) {
+		if (img_desc->authenticated_data[i].type_desc == NULL) {
+			continue;
+		}
+
+		/* Get the parameter from the image parser module */
+		rc = img_parser_get_auth_param(img_desc->img_type,
+				img_desc->authenticated_data[i].type_desc,
+				img_ptr, img_len, &param_ptr, &param_len);
+		return_if_error(rc);
+
+		/* Check parameter size */
+		if (param_len > img_desc->authenticated_data[i].data.len) {
+			return 1;
+		}
+
+		/* Copy the parameter for later use */
+		memcpy((void *)img_desc->authenticated_data[i].data.ptr,
+				(void *)param_ptr, param_len);
+	}
+
+	/* Mark image as authenticated */
+	auth_img_flags[img_desc->img_id] |= IMG_FLAG_AUTHENTICATED;
+
+	return 0;
+}
diff --git a/drivers/auth/crypto_mod.c b/drivers/auth/crypto_mod.c
new file mode 100644
index 0000000..3b3e3ac
--- /dev/null
+++ b/drivers/auth/crypto_mod.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <crypto_mod.h>
+#include <debug.h>
+
+/* Variable exported by the crypto library through REGISTER_CRYPTO_LIB() */
+extern const crypto_lib_desc_t crypto_lib_desc;
+
+/*
+ * The crypto module is responsible for verifying digital signatures and hashes.
+ * It relies on a crypto library to perform the cryptographic operations.
+ *
+ * The crypto module itself does not impose any specific format on signatures,
+ * signature algorithm, keys or hashes, but most cryptographic libraries will
+ * take the parameters as the following DER encoded ASN.1 structures:
+ *
+ *     AlgorithmIdentifier ::= SEQUENCE  {
+ *         algorithm        OBJECT IDENTIFIER,
+ *         parameters       ANY DEFINED BY algorithm OPTIONAL
+ *     }
+ *
+ *     DigestInfo ::= SEQUENCE {
+ *         digestAlgorithm  AlgorithmIdentifier,
+ *         digest           OCTET STRING
+ *     }
+ *
+ *     SubjectPublicKeyInfo ::= SEQUENCE  {
+ *         algorithm        AlgorithmIdentifier,
+ *         subjectPublicKey BIT STRING
+ *     }
+ *
+ *     SignatureAlgorithm ::= AlgorithmIdentifier
+ *
+ *     SignatureValue ::= BIT STRING
+ */
+
+/*
+ * Perform some static checking and call the library initialization function
+ */
+void crypto_mod_init(void)
+{
+	assert(crypto_lib_desc.name != NULL);
+	assert(crypto_lib_desc.init != NULL);
+	assert(crypto_lib_desc.verify_signature != NULL);
+	assert(crypto_lib_desc.verify_hash != NULL);
+
+	/* Initialize the cryptographic library */
+	crypto_lib_desc.init();
+	INFO("Using crypto library '%s'\n", crypto_lib_desc.name);
+}
+
+/*
+ * Function to verify a digital signature
+ *
+ * Parameters:
+ *
+ *   data_ptr, data_len: signed data
+ *   sig_ptr, sig_len: the digital signature
+ *   sig_alg_ptr, sig_alg_len: the digital signature algorithm
+ *   pk_ptr, pk_len: the public key
+ */
+int crypto_mod_verify_signature(void *data_ptr, unsigned int data_len,
+				void *sig_ptr, unsigned int sig_len,
+				void *sig_alg_ptr, unsigned int sig_alg_len,
+				void *pk_ptr, unsigned int pk_len)
+{
+	assert(data_ptr != NULL);
+	assert(data_len != 0);
+	assert(sig_ptr != NULL);
+	assert(sig_len != 0);
+	assert(sig_alg_ptr != NULL);
+	assert(sig_alg_len != 0);
+	assert(pk_ptr != NULL);
+	assert(pk_len != 0);
+
+	return crypto_lib_desc.verify_signature(data_ptr, data_len,
+						sig_ptr, sig_len,
+						sig_alg_ptr, sig_alg_len,
+						pk_ptr, pk_len);
+}
+
+/*
+ * Verify a hash by comparison
+ *
+ * Parameters:
+ *
+ *   data_ptr, data_len: data to be hashed
+ *   digest_info_ptr, digest_info_len: hash to be compared
+ */
+int crypto_mod_verify_hash(void *data_ptr, unsigned int data_len,
+			   void *digest_info_ptr, unsigned int digest_info_len)
+{
+	assert(data_ptr != NULL);
+	assert(data_len != 0);
+	assert(digest_info_ptr != NULL);
+	assert(digest_info_len != 0);
+
+	return crypto_lib_desc.verify_hash(data_ptr, data_len,
+					   digest_info_ptr, digest_info_len);
+}
diff --git a/drivers/auth/cryptocell/cryptocell_crypto.c b/drivers/auth/cryptocell/cryptocell_crypto.c
new file mode 100644
index 0000000..80c1093
--- /dev/null
+++ b/drivers/auth/cryptocell/cryptocell_crypto.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <crypto_driver.h>
+#include <crypto_mod.h>
+#include <debug.h>
+#include <mbedtls_common.h>
+#include <platform_def.h>
+#include <rsa.h>
+#include <sbrom_bsv_api.h>
+#include <secureboot_base_func.h>
+#include <secureboot_gen_defs.h>
+#include <stddef.h>
+#include <string.h>
+#include <util.h>
+#include <utils.h>
+
+#include <mbedtls/oid.h>
+
+#define LIB_NAME		"CryptoCell SBROM"
+#define RSA_SALT_LEN		32
+#define RSA_EXPONENT		65537
+
+/*
+ * AlgorithmIdentifier  ::=  SEQUENCE  {
+ *     algorithm            OBJECT IDENTIFIER,
+ *     parameters           ANY DEFINED BY algorithm OPTIONAL
+ * }
+ *
+ * SubjectPublicKeyInfo  ::=  SEQUENCE  {
+ *     algorithm            AlgorithmIdentifier,
+ *     subjectPublicKey     BIT STRING
+ * }
+ *
+ * DigestInfo ::= SEQUENCE {
+ *     digestAlgorithm      AlgorithmIdentifier,
+ *     digest               OCTET STRING
+ * }
+ *
+ *  RSASSA-PSS-params ::= SEQUENCE {
+ *     hashAlgorithm        [0] HashAlgorithm,
+ *     maskGenAlgorithm     [1] MaskGenAlgorithm,
+ *     saltLength           [2] INTEGER,
+ *     trailerField         [3] TrailerField    DEFAULT trailerFieldBC
+ * }
+ */
+
+/*
+ * Initialize the library and export the descriptor
+ */
+static void init(void)
+{
+	CCError_t ret;
+	uint32_t lcs;
+
+	/* Initialize CC SBROM */
+	ret = CC_BsvSbromInit((uintptr_t)PLAT_CRYPTOCELL_BASE);
+	if (ret != CC_OK) {
+		ERROR("CryptoCell CC_BsvSbromInit() error %x\n", ret);
+		panic();
+	}
+
+	/* Initialize lifecycle state */
+	ret = CC_BsvLcsGetAndInit((uintptr_t)PLAT_CRYPTOCELL_BASE, &lcs);
+	if (ret != CC_OK) {
+		ERROR("CryptoCell CC_BsvLcsGetAndInit() error %x\n", ret);
+		panic();
+	}
+
+	/* If the lifecyclestate is `SD`, then stop further execution */
+	if (lcs == CC_BSV_SECURITY_DISABLED_LCS) {
+		ERROR("CryptoCell LCS is security-disabled\n");
+		panic();
+	}
+}
+
+/*
+ * Verify a signature.
+ *
+ * Parameters are passed using the DER encoding format following the ASN.1
+ * structures detailed above.
+ */
+static int verify_signature(void *data_ptr, unsigned int data_len,
+			    void *sig_ptr, unsigned int sig_len,
+			    void *sig_alg, unsigned int sig_alg_len,
+			    void *pk_ptr, unsigned int pk_len)
+{
+	CCError_t error;
+	CCSbNParams_t pk;
+	CCSbSignature_t signature;
+	int rc, exp;
+	mbedtls_asn1_buf sig_oid, alg_oid, params;
+	mbedtls_md_type_t md_alg;
+	mbedtls_pk_type_t pk_alg;
+	mbedtls_pk_rsassa_pss_options pss_opts;
+	size_t len;
+	uint8_t *p, *end;
+	/* Temp buf to store the public key modulo (N) in LE format */
+	uint32_t RevN[SB_RSA_MOD_SIZE_IN_WORDS];
+
+	/* Verify the signature algorithm */
+	/* Get pointers to signature OID and parameters */
+	p = sig_alg;
+	end = p + sig_alg_len;
+	rc = mbedtls_asn1_get_alg(&p, end, &sig_oid, &params);
+	if (rc != 0)
+		return CRYPTO_ERR_SIGNATURE;
+
+	/* Get the actual signature algorithm (MD + PK) */
+	rc = mbedtls_oid_get_sig_alg(&sig_oid, &md_alg, &pk_alg);
+	if (rc != 0)
+		return CRYPTO_ERR_SIGNATURE;
+
+	/* The CryptoCell only supports RSASSA-PSS signature */
+	if (pk_alg != MBEDTLS_PK_RSASSA_PSS || md_alg != MBEDTLS_MD_NONE)
+		return CRYPTO_ERR_SIGNATURE;
+
+	/* Verify the RSASSA-PSS params */
+	/* The trailer field is verified to be 0xBC internally by this API */
+	rc = mbedtls_x509_get_rsassa_pss_params(&params, &md_alg,
+			&pss_opts.mgf1_hash_id,
+			&pss_opts.expected_salt_len);
+	if (rc != 0)
+		return CRYPTO_ERR_SIGNATURE;
+
+	/* The CryptoCell only supports SHA256 as hash algorithm */
+	if (md_alg != MBEDTLS_MD_SHA256 || pss_opts.mgf1_hash_id != MBEDTLS_MD_SHA256)
+		return CRYPTO_ERR_SIGNATURE;
+
+	if (pss_opts.expected_salt_len != RSA_SALT_LEN)
+		return CRYPTO_ERR_SIGNATURE;
+
+	/* Parse the public key */
+	p = pk_ptr;
+	end = p + pk_len;
+	rc = mbedtls_asn1_get_tag(&p, end, &len,
+			MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE);
+	if (rc != 0)
+		return CRYPTO_ERR_SIGNATURE;
+
+	end = p + len;
+	rc = mbedtls_asn1_get_alg_null(&p, end, &alg_oid);
+	if (rc != 0)
+		return CRYPTO_ERR_SIGNATURE;
+
+	if (mbedtls_oid_get_pk_alg(&alg_oid, &pk_alg) != 0)
+		return CRYPTO_ERR_SIGNATURE;
+
+	if (pk_alg != MBEDTLS_PK_RSA)
+		return CRYPTO_ERR_SIGNATURE;
+
+	rc = mbedtls_asn1_get_bitstring_null(&p, end, &len);
+	if (rc != 0)
+		return CRYPTO_ERR_SIGNATURE;
+
+	rc = mbedtls_asn1_get_tag(&p, end, &len,
+				MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE);
+	if (rc != 0)
+		return CRYPTO_ERR_SIGNATURE;
+
+	rc = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_INTEGER);
+	if (rc != 0)
+		return CRYPTO_ERR_SIGNATURE;
+
+	if (*p == 0) {
+		p++; len--;
+	}
+	if (len != RSA_MOD_SIZE_IN_BYTES || ((p + len) > end))
+		return CRYPTO_ERR_SIGNATURE;
+
+	/*
+	 * The CCSbVerifySignature() API expects N and Np in BE format and
+	 * the signature in LE format. Copy N from certificate.
+	 */
+	memcpy(pk.N, p, RSA_MOD_SIZE_IN_BYTES);
+
+	/* Verify the RSA exponent */
+	p += len;
+	rc = mbedtls_asn1_get_int(&p, end, &exp);
+	if (rc != 0)
+		return CRYPTO_ERR_SIGNATURE;
+
+	if (exp != RSA_EXPONENT)
+		return CRYPTO_ERR_SIGNATURE;
+
+	/*
+	 * Calculate the Np (Barrett n' value). The RSA_CalcNp() API expects
+	 * N in LE format. Hence reverse N into a temporary buffer `RevN`.
+	 */
+	UTIL_ReverseMemCopy((uint8_t *)RevN, (uint8_t *)pk.N, sizeof(RevN));
+
+	RSA_CalcNp((uintptr_t)PLAT_CRYPTOCELL_BASE, RevN, pk.Np);
+
+	/* Np is in LE format. Reverse it to BE */
+	UTIL_ReverseBuff((uint8_t *)pk.Np, sizeof(pk.Np));
+
+	/* Get the signature (bitstring) */
+	p = sig_ptr;
+	end = p + sig_len;
+	rc = mbedtls_asn1_get_bitstring_null(&p, end, &len);
+	if (rc != 0)
+		return CRYPTO_ERR_SIGNATURE;
+
+	if (len != RSA_MOD_SIZE_IN_BYTES || ((p + len) > end))
+		return CRYPTO_ERR_SIGNATURE;
+
+	/*
+	 *  The signature is BE format. Convert it to LE before calling
+	 *  CCSbVerifySignature().
+	 */
+	UTIL_ReverseMemCopy((uint8_t *)signature.sig, p, RSA_MOD_SIZE_IN_BYTES);
+
+	/*
+	 * CryptoCell utilises DMA internally to transfer data. Flush the data
+	 * from caches.
+	 */
+	flush_dcache_range((uintptr_t)data_ptr, data_len);
+
+	/* Verify the signature */
+	error = CCSbVerifySignature((uintptr_t)PLAT_CRYPTOCELL_BASE,
+			(uint32_t *)data_ptr, &pk, &signature,
+			data_len, RSA_PSS_2048);
+	if (error != CC_OK)
+		return CRYPTO_ERR_SIGNATURE;
+
+	/* Signature verification success */
+	return CRYPTO_SUCCESS;
+}
+
+/*
+ * Match a hash
+ *
+ * Digest info is passed in DER format following the ASN.1 structure detailed
+ * above.
+ */
+static int verify_hash(void *data_ptr, unsigned int data_len,
+		       void *digest_info_ptr, unsigned int digest_info_len)
+{
+	mbedtls_asn1_buf hash_oid, params;
+	mbedtls_md_type_t md_alg;
+	uint8_t *p, *end, *hash;
+	CCHashResult_t pubKeyHash;
+	size_t len;
+	int rc;
+	CCError_t error;
+
+	/* Digest info should be an MBEDTLS_ASN1_SEQUENCE */
+	p = digest_info_ptr;
+	end = p + digest_info_len;
+	rc = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONSTRUCTED |
+				  MBEDTLS_ASN1_SEQUENCE);
+	if (rc != 0)
+		return CRYPTO_ERR_HASH;
+
+	/* Get the hash algorithm */
+	rc = mbedtls_asn1_get_alg(&p, end, &hash_oid, &params);
+	if (rc != 0)
+		return CRYPTO_ERR_HASH;
+
+	rc = mbedtls_oid_get_md_alg(&hash_oid, &md_alg);
+	if (rc != 0)
+		return CRYPTO_ERR_HASH;
+	/* Verify that hash algorithm is SHA256 */
+	if (md_alg != MBEDTLS_MD_SHA256)
+		return CRYPTO_ERR_HASH;
+
+	/* Hash should be octet string type */
+	rc = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_OCTET_STRING);
+	if (rc != 0)
+		return CRYPTO_ERR_HASH;
+
+	/* Length of hash must match the algorithm's size */
+	if (len != HASH_RESULT_SIZE_IN_BYTES)
+		return CRYPTO_ERR_HASH;
+
+	/*
+	 * CryptoCell utilises DMA internally to transfer data. Flush the data
+	 * from caches.
+	 */
+	flush_dcache_range((uintptr_t)data_ptr, data_len);
+
+	hash = p;
+	error = SBROM_CryptoHash((uintptr_t)PLAT_CRYPTOCELL_BASE,
+			(uintptr_t)data_ptr, data_len, pubKeyHash);
+	if (error != CC_OK)
+		return CRYPTO_ERR_HASH;
+
+	rc = memcmp(pubKeyHash, hash, HASH_RESULT_SIZE_IN_BYTES);
+	if (rc != 0)
+		return CRYPTO_ERR_HASH;
+
+	return CRYPTO_SUCCESS;
+}
+
+/*
+ * Register crypto library descriptor
+ */
+REGISTER_CRYPTO_LIB(LIB_NAME, init, verify_signature, verify_hash);
+
+
diff --git a/drivers/auth/cryptocell/cryptocell_crypto.mk b/drivers/auth/cryptocell/cryptocell_crypto.mk
new file mode 100644
index 0000000..a88dcfc
--- /dev/null
+++ b/drivers/auth/cryptocell/cryptocell_crypto.mk
@@ -0,0 +1,28 @@
+#
+# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+include drivers/auth/mbedtls/mbedtls_common.mk
+
+# The algorithm is RSA when using Cryptocell crypto driver
+TF_MBEDTLS_KEY_ALG_ID		:=	TF_MBEDTLS_RSA
+
+# Needs to be set to drive mbed TLS configuration correctly
+$(eval $(call add_define,TF_MBEDTLS_KEY_ALG_ID))
+
+# CCSBROM_LIB_PATH must be set to the Cryptocell SBROM library path
+ifeq (${CCSBROM_LIB_PATH},)
+  $(error Error: CCSBROM_LIB_PATH not set)
+endif
+
+TF_LDFLAGS		+= -L$(CCSBROM_LIB_PATH)
+LDLIBS			+= -lcc_712sbromx509
+
+INCLUDES		+=	-Iinclude/drivers/arm/cryptocell
+
+CRYPTOCELL_SOURCES	:=	drivers/auth/cryptocell/cryptocell_crypto.c
+
+BL1_SOURCES		+=	${CRYPTOCELL_SOURCES}
+BL2_SOURCES		+=	${CRYPTOCELL_SOURCES}
\ No newline at end of file
diff --git a/drivers/auth/img_parser_mod.c b/drivers/auth/img_parser_mod.c
new file mode 100644
index 0000000..6a01071
--- /dev/null
+++ b/drivers/auth/img_parser_mod.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <auth_common.h>
+#include <debug.h>
+#include <errno.h>
+#include <img_parser_mod.h>
+#include <limits.h>
+#include <stdint.h>
+#include <string.h>
+
+extern uintptr_t __PARSER_LIB_DESCS_START__;
+extern uintptr_t __PARSER_LIB_DESCS_END__;
+#define PARSER_LIB_DESCS_START	((uintptr_t) (&__PARSER_LIB_DESCS_START__))
+#define PARSER_LIB_DESCS_END	((uintptr_t) (&__PARSER_LIB_DESCS_END__))
+static unsigned int parser_lib_indices[IMG_MAX_TYPES];
+static img_parser_lib_desc_t *parser_lib_descs;
+
+#define INVALID_IDX		UINT_MAX
+
+static void validate_desc(img_parser_lib_desc_t *desc)
+{
+	assert(desc != NULL);
+	assert(desc->init != NULL);
+	assert(desc->name != NULL);
+	assert(desc->check_integrity != NULL);
+	assert(desc->get_auth_param != NULL);
+}
+
+void img_parser_init(void)
+{
+	unsigned int index, mod_num;
+
+	/* Initialise internal variables to invalid state */
+	for (index = 0; index < IMG_MAX_TYPES; index++) {
+		parser_lib_indices[index] = INVALID_IDX;
+	}
+
+	/* Calculate how many image parsers are registered. At least one parser
+	 * must be present */
+	mod_num = PARSER_LIB_DESCS_END - PARSER_LIB_DESCS_START;
+	mod_num /= sizeof(img_parser_lib_desc_t);
+	assert(mod_num > 0);
+
+	parser_lib_descs = (img_parser_lib_desc_t *) PARSER_LIB_DESCS_START;
+	for (index = 0; index < mod_num; index++) {
+
+		/* Check that the image parser library descriptor is valid */
+		validate_desc(&parser_lib_descs[index]);
+
+		/* Initialize image parser */
+		parser_lib_descs[index].init();
+
+		/* Ensure only one parser is registered for each image type */
+		assert(parser_lib_indices[parser_lib_descs[index].img_type] ==
+				INVALID_IDX);
+
+		/* Keep the index of this hash calculator */
+		parser_lib_indices[parser_lib_descs[index].img_type] = index;
+	}
+}
+
+int img_parser_check_integrity(img_type_t img_type,
+			       void *img_ptr, unsigned int img_len)
+{
+	unsigned int idx;
+
+	assert(img_ptr != NULL);
+	assert(img_len != 0);
+
+	/* No integrity checks on raw images */
+	if (img_type == IMG_RAW) {
+		return IMG_PARSER_OK;
+	}
+
+	/* Find the index of the required image parser */
+	idx = parser_lib_indices[img_type];
+	assert(idx != INVALID_IDX);
+
+	/* Call the function to check the image integrity */
+	return parser_lib_descs[idx].check_integrity(img_ptr, img_len);
+}
+
+/*
+ * Extract an authentication parameter from an image
+ *
+ * Parameters:
+ *   img_type: image type (certificate, raw image, etc)
+ *   type_desc: provides info to obtain the parameter
+ *   img_ptr: pointer to image data
+ *   img_len: image length
+ *   param_ptr: [out] stores a pointer to the parameter
+ *   param_len: [out] stores the length of the parameter
+ */
+int img_parser_get_auth_param(img_type_t img_type,
+			      const auth_param_type_desc_t *type_desc,
+			      void *img_ptr, unsigned int img_len,
+			      void **param_ptr, unsigned int *param_len)
+{
+	unsigned int idx;
+
+	assert(type_desc != NULL);
+	assert(img_ptr != NULL);
+	assert(img_len != 0);
+	assert(param_ptr != NULL);
+	assert(param_len != NULL);
+
+	/* In a raw image we can only get the data itself */
+	if (img_type == IMG_RAW) {
+		assert(type_desc->type == AUTH_PARAM_RAW_DATA);
+		*param_ptr = img_ptr;
+		*param_len = img_len;
+		return IMG_PARSER_OK;
+	}
+
+	/* Find the index of the required image parser library */
+	idx = parser_lib_indices[img_type];
+	assert(idx != INVALID_IDX);
+
+	/* Call the function to obtain the parameter */
+	return parser_lib_descs[idx].get_auth_param(type_desc, img_ptr, img_len,
+			param_ptr, param_len);
+}
diff --git a/drivers/auth/mbedtls/mbedtls_common.c b/drivers/auth/mbedtls/mbedtls_common.c
new file mode 100644
index 0000000..205c243
--- /dev/null
+++ b/drivers/auth/mbedtls/mbedtls_common.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+
+/* mbed TLS headers */
+#include <mbedtls/memory_buffer_alloc.h>
+#include <mbedtls/platform.h>
+#include <mbedtls_config.h>
+
+/*
+ * mbed TLS heap
+ */
+#if (TF_MBEDTLS_KEY_ALG_ID == TF_MBEDTLS_ECDSA) \
+	|| (TF_MBEDTLS_KEY_ALG_ID == TF_MBEDTLS_RSA_AND_ECDSA)
+#define MBEDTLS_HEAP_SIZE		(13*1024)
+#elif (TF_MBEDTLS_KEY_ALG_ID == TF_MBEDTLS_RSA)
+#define MBEDTLS_HEAP_SIZE		(7*1024)
+#endif
+static unsigned char heap[MBEDTLS_HEAP_SIZE];
+
+/*
+ * mbed TLS initialization function
+ */
+void mbedtls_init(void)
+{
+	static int ready;
+
+	if (!ready) {
+		/* Initialize the mbed TLS heap */
+		mbedtls_memory_buffer_alloc_init(heap, MBEDTLS_HEAP_SIZE);
+
+#ifdef MBEDTLS_PLATFORM_SNPRINTF_ALT
+		/* Use reduced version of snprintf to save space. */
+		mbedtls_platform_set_snprintf(tf_snprintf);
+#endif
+
+		ready = 1;
+	}
+}
diff --git a/drivers/auth/mbedtls/mbedtls_common.mk b/drivers/auth/mbedtls/mbedtls_common.mk
new file mode 100644
index 0000000..f2b6f6e
--- /dev/null
+++ b/drivers/auth/mbedtls/mbedtls_common.mk
@@ -0,0 +1,35 @@
+#
+# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${MBEDTLS_COMMON_MK},1)
+MBEDTLS_COMMON_MK	:=	1
+
+# MBEDTLS_DIR must be set to the mbed TLS main directory (it must contain
+# the 'include' and 'library' subdirectories).
+ifeq (${MBEDTLS_DIR},)
+  $(error Error: MBEDTLS_DIR not set)
+endif
+
+INCLUDES		+=	-I${MBEDTLS_DIR}/include		\
+				-Iinclude/drivers/auth/mbedtls
+
+# Specify mbed TLS configuration file
+MBEDTLS_CONFIG_FILE	:=	"<mbedtls_config.h>"
+$(eval $(call add_define,MBEDTLS_CONFIG_FILE))
+
+MBEDTLS_COMMON_SOURCES	:=	drivers/auth/mbedtls/mbedtls_common.c	\
+				$(addprefix ${MBEDTLS_DIR}/library/,	\
+				asn1parse.c 				\
+				asn1write.c 				\
+				memory_buffer_alloc.c			\
+				oid.c 					\
+				platform.c 				\
+				)
+
+BL1_SOURCES		+=	${MBEDTLS_COMMON_SOURCES}
+BL2_SOURCES		+=	${MBEDTLS_COMMON_SOURCES}
+
+endif
diff --git a/drivers/auth/mbedtls/mbedtls_crypto.c b/drivers/auth/mbedtls/mbedtls_crypto.c
new file mode 100644
index 0000000..d8810d6
--- /dev/null
+++ b/drivers/auth/mbedtls/mbedtls_crypto.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <crypto_mod.h>
+#include <debug.h>
+#include <mbedtls_common.h>
+#include <stddef.h>
+#include <string.h>
+
+/* mbed TLS headers */
+#include <mbedtls/md.h>
+#include <mbedtls/memory_buffer_alloc.h>
+#include <mbedtls/oid.h>
+#include <mbedtls/platform.h>
+
+#define LIB_NAME		"mbed TLS"
+
+/*
+ * AlgorithmIdentifier  ::=  SEQUENCE  {
+ *     algorithm               OBJECT IDENTIFIER,
+ *     parameters              ANY DEFINED BY algorithm OPTIONAL
+ * }
+ *
+ * SubjectPublicKeyInfo  ::=  SEQUENCE  {
+ *     algorithm            AlgorithmIdentifier,
+ *     subjectPublicKey     BIT STRING
+ * }
+ *
+ * DigestInfo ::= SEQUENCE {
+ *     digestAlgorithm AlgorithmIdentifier,
+ *     digest OCTET STRING
+ * }
+ */
+
+/*
+ * Initialize the library and export the descriptor
+ */
+static void init(void)
+{
+	/* Initialize mbed TLS */
+	mbedtls_init();
+}
+
+/*
+ * Verify a signature.
+ *
+ * Parameters are passed using the DER encoding format following the ASN.1
+ * structures detailed above.
+ */
+static int verify_signature(void *data_ptr, unsigned int data_len,
+			    void *sig_ptr, unsigned int sig_len,
+			    void *sig_alg, unsigned int sig_alg_len,
+			    void *pk_ptr, unsigned int pk_len)
+{
+	mbedtls_asn1_buf sig_oid, sig_params;
+	mbedtls_asn1_buf signature;
+	mbedtls_md_type_t md_alg;
+	mbedtls_pk_type_t pk_alg;
+	mbedtls_pk_context pk = {0};
+	int rc;
+	void *sig_opts = NULL;
+	const mbedtls_md_info_t *md_info;
+	unsigned char *p, *end;
+	unsigned char hash[MBEDTLS_MD_MAX_SIZE];
+
+	/* Get pointers to signature OID and parameters */
+	p = (unsigned char *)sig_alg;
+	end = (unsigned char *)(p + sig_alg_len);
+	rc = mbedtls_asn1_get_alg(&p, end, &sig_oid, &sig_params);
+	if (rc != 0) {
+		return CRYPTO_ERR_SIGNATURE;
+	}
+
+	/* Get the actual signature algorithm (MD + PK) */
+	rc = mbedtls_x509_get_sig_alg(&sig_oid, &sig_params, &md_alg, &pk_alg, &sig_opts);
+	if (rc != 0) {
+		return CRYPTO_ERR_SIGNATURE;
+	}
+
+	/* Parse the public key */
+	mbedtls_pk_init(&pk);
+	p = (unsigned char *)pk_ptr;
+	end = (unsigned char *)(p + pk_len);
+	rc = mbedtls_pk_parse_subpubkey(&p, end, &pk);
+	if (rc != 0) {
+		rc = CRYPTO_ERR_SIGNATURE;
+		goto end2;
+	}
+
+	/* Get the signature (bitstring) */
+	p = (unsigned char *)sig_ptr;
+	end = (unsigned char *)(p + sig_len);
+	signature.tag = *p;
+	rc = mbedtls_asn1_get_bitstring_null(&p, end, &signature.len);
+	if (rc != 0) {
+		rc = CRYPTO_ERR_SIGNATURE;
+		goto end1;
+	}
+	signature.p = p;
+
+	/* Calculate the hash of the data */
+	md_info = mbedtls_md_info_from_type(md_alg);
+	if (md_info == NULL) {
+		rc = CRYPTO_ERR_SIGNATURE;
+		goto end1;
+	}
+	p = (unsigned char *)data_ptr;
+	rc = mbedtls_md(md_info, p, data_len, hash);
+	if (rc != 0) {
+		rc = CRYPTO_ERR_SIGNATURE;
+		goto end1;
+	}
+
+	/* Verify the signature */
+	rc = mbedtls_pk_verify_ext(pk_alg, sig_opts, &pk, md_alg, hash,
+			mbedtls_md_get_size(md_info),
+			signature.p, signature.len);
+	if (rc != 0) {
+		rc = CRYPTO_ERR_SIGNATURE;
+		goto end1;
+	}
+
+	/* Signature verification success */
+	rc = CRYPTO_SUCCESS;
+
+end1:
+	mbedtls_pk_free(&pk);
+end2:
+	mbedtls_free(sig_opts);
+	return rc;
+}
+
+/*
+ * Match a hash
+ *
+ * Digest info is passed in DER format following the ASN.1 structure detailed
+ * above.
+ */
+static int verify_hash(void *data_ptr, unsigned int data_len,
+		       void *digest_info_ptr, unsigned int digest_info_len)
+{
+	mbedtls_asn1_buf hash_oid, params;
+	mbedtls_md_type_t md_alg;
+	const mbedtls_md_info_t *md_info;
+	unsigned char *p, *end, *hash;
+	unsigned char data_hash[MBEDTLS_MD_MAX_SIZE];
+	size_t len;
+	int rc;
+
+	/* Digest info should be an MBEDTLS_ASN1_SEQUENCE */
+	p = (unsigned char *)digest_info_ptr;
+	end = p + digest_info_len;
+	rc = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONSTRUCTED |
+				  MBEDTLS_ASN1_SEQUENCE);
+	if (rc != 0) {
+		return CRYPTO_ERR_HASH;
+	}
+
+	/* Get the hash algorithm */
+	rc = mbedtls_asn1_get_alg(&p, end, &hash_oid, &params);
+	if (rc != 0) {
+		return CRYPTO_ERR_HASH;
+	}
+
+	rc = mbedtls_oid_get_md_alg(&hash_oid, &md_alg);
+	if (rc != 0) {
+		return CRYPTO_ERR_HASH;
+	}
+
+	md_info = mbedtls_md_info_from_type(md_alg);
+	if (md_info == NULL) {
+		return CRYPTO_ERR_HASH;
+	}
+
+	/* Hash should be octet string type */
+	rc = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_OCTET_STRING);
+	if (rc != 0) {
+		return CRYPTO_ERR_HASH;
+	}
+
+	/* Length of hash must match the algorithm's size */
+	if (len != mbedtls_md_get_size(md_info)) {
+		return CRYPTO_ERR_HASH;
+	}
+	hash = p;
+
+	/* Calculate the hash of the data */
+	p = (unsigned char *)data_ptr;
+	rc = mbedtls_md(md_info, p, data_len, data_hash);
+	if (rc != 0) {
+		return CRYPTO_ERR_HASH;
+	}
+
+	/* Compare values */
+	rc = memcmp(data_hash, hash, mbedtls_md_get_size(md_info));
+	if (rc != 0) {
+		return CRYPTO_ERR_HASH;
+	}
+
+	return CRYPTO_SUCCESS;
+}
+
+/*
+ * Register crypto library descriptor
+ */
+REGISTER_CRYPTO_LIB(LIB_NAME, init, verify_signature, verify_hash);
diff --git a/drivers/auth/mbedtls/mbedtls_crypto.mk b/drivers/auth/mbedtls/mbedtls_crypto.mk
new file mode 100644
index 0000000..d6fc7eb
--- /dev/null
+++ b/drivers/auth/mbedtls/mbedtls_crypto.mk
@@ -0,0 +1,72 @@
+#
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+include drivers/auth/mbedtls/mbedtls_common.mk
+
+# The platform may define the variable 'TF_MBEDTLS_KEY_ALG' to select the key
+# algorithm to use. If the variable is not defined, select it based on algorithm
+# used for key generation `KEY_ALG`. If `KEY_ALG` is not defined or is
+# defined to `rsa`/`rsa_1_5`, then set the variable to `rsa`.
+ifeq (${TF_MBEDTLS_KEY_ALG},)
+    ifeq (${KEY_ALG}, ecdsa)
+        TF_MBEDTLS_KEY_ALG		:=	ecdsa
+    else
+        TF_MBEDTLS_KEY_ALG		:=	rsa
+    endif
+endif
+
+# If MBEDTLS_KEY_ALG build flag is defined use it to set TF_MBEDTLS_KEY_ALG for
+# backward compatibility
+ifdef MBEDTLS_KEY_ALG
+    ifeq (${ERROR_DEPRECATED},1)
+        $(error "MBEDTLS_KEY_ALG is deprecated. Please use the new build flag TF_MBEDTLS_KEY_ALG")
+    endif
+    $(warning "MBEDTLS_KEY_ALG is deprecated. Please use the new build flag TF_MBEDTLS_KEY_ALG")
+    TF_MBEDTLS_KEY_ALG	:= ${MBEDTLS_KEY_ALG}
+endif
+
+MBEDTLS_CRYPTO_SOURCES		:=	drivers/auth/mbedtls/mbedtls_crypto.c	\
+					$(addprefix ${MBEDTLS_DIR}/library/,	\
+					bignum.c				\
+					md.c					\
+					md_wrap.c				\
+					pk.c 					\
+					pk_wrap.c 				\
+					pkparse.c 				\
+					pkwrite.c 				\
+					sha256.c				\
+					)
+
+# Key algorithm specific files
+MBEDTLS_ECDSA_CRYPTO_SOURCES	+=	$(addprefix ${MBEDTLS_DIR}/library/,	\
+					ecdsa.c					\
+					ecp_curves.c				\
+					ecp.c					\
+					)
+
+MBEDTLS_RSA_CRYPTO_SOURCES	+=	$(addprefix ${MBEDTLS_DIR}/library/,	\
+					rsa.c					\
+					)
+
+ifeq (${TF_MBEDTLS_KEY_ALG},ecdsa)
+    MBEDTLS_CRYPTO_SOURCES	+=	$(MBEDTLS_ECDSA_CRYPTO_SOURCES)
+    TF_MBEDTLS_KEY_ALG_ID	:=	TF_MBEDTLS_ECDSA
+else ifeq (${TF_MBEDTLS_KEY_ALG},rsa)
+    MBEDTLS_CRYPTO_SOURCES	+=	$(MBEDTLS_RSA_CRYPTO_SOURCES)
+    TF_MBEDTLS_KEY_ALG_ID	:=	TF_MBEDTLS_RSA
+else ifeq (${TF_MBEDTLS_KEY_ALG},rsa+ecdsa)
+    MBEDTLS_CRYPTO_SOURCES	+=	$(MBEDTLS_ECDSA_CRYPTO_SOURCES)
+    MBEDTLS_CRYPTO_SOURCES	+=	$(MBEDTLS_RSA_CRYPTO_SOURCES)
+    TF_MBEDTLS_KEY_ALG_ID	:=	TF_MBEDTLS_RSA_AND_ECDSA
+else
+    $(error "TF_MBEDTLS_KEY_ALG=${TF_MBEDTLS_KEY_ALG} not supported on mbed TLS")
+endif
+
+# Needs to be set to drive mbed TLS configuration correctly
+$(eval $(call add_define,TF_MBEDTLS_KEY_ALG_ID))
+
+BL1_SOURCES			+=	${MBEDTLS_CRYPTO_SOURCES}
+BL2_SOURCES			+=	${MBEDTLS_CRYPTO_SOURCES}
diff --git a/drivers/auth/mbedtls/mbedtls_x509.mk b/drivers/auth/mbedtls/mbedtls_x509.mk
new file mode 100644
index 0000000..0f28b65
--- /dev/null
+++ b/drivers/auth/mbedtls/mbedtls_x509.mk
@@ -0,0 +1,16 @@
+#
+# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+include drivers/auth/mbedtls/mbedtls_common.mk
+
+MBEDTLS_X509_SOURCES	:=	drivers/auth/mbedtls/mbedtls_x509_parser.c	\
+				$(addprefix ${MBEDTLS_DIR}/library/,		\
+				x509.c 						\
+				x509_crt.c 					\
+				)
+
+BL1_SOURCES		+=	${MBEDTLS_X509_SOURCES}
+BL2_SOURCES		+=	${MBEDTLS_X509_SOURCES}
diff --git a/drivers/auth/mbedtls/mbedtls_x509_parser.c b/drivers/auth/mbedtls/mbedtls_x509_parser.c
new file mode 100644
index 0000000..bda1208
--- /dev/null
+++ b/drivers/auth/mbedtls/mbedtls_x509_parser.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * X509 parser based on mbed TLS
+ *
+ * This module implements functions to check the integrity of a X509v3
+ * certificate ASN.1 structure and extract authentication parameters from the
+ * extensions field, such as an image hash or a public key.
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <img_parser_mod.h>
+#include <mbedtls_common.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <utils.h>
+
+/* mbed TLS headers */
+#include <mbedtls/asn1.h>
+#include <mbedtls/oid.h>
+#include <mbedtls/platform.h>
+
+/* Maximum OID string length ("a.b.c.d.e.f ...") */
+#define MAX_OID_STR_LEN			64
+
+#define LIB_NAME	"mbed TLS X509v3"
+
+/* Temporary variables to speed up the authentication parameters search. These
+ * variables are assigned once during the integrity check and used any time an
+ * authentication parameter is requested, so we do not have to parse the image
+ * again */
+static mbedtls_asn1_buf tbs;
+static mbedtls_asn1_buf v3_ext;
+static mbedtls_asn1_buf pk;
+static mbedtls_asn1_buf sig_alg;
+static mbedtls_asn1_buf signature;
+
+/*
+ * Clear all static temporary variables.
+ */
+static void clear_temp_vars(void)
+{
+#define ZERO_AND_CLEAN(x)					\
+	do {							\
+		zeromem(&x, sizeof(x));				\
+		clean_dcache_range((uintptr_t)&x, sizeof(x));	\
+	} while (0);
+
+	ZERO_AND_CLEAN(tbs)
+	ZERO_AND_CLEAN(v3_ext);
+	ZERO_AND_CLEAN(pk);
+	ZERO_AND_CLEAN(sig_alg);
+	ZERO_AND_CLEAN(signature);
+
+#undef ZERO_AND_CLEAN
+}
+
+/*
+ * Get X509v3 extension
+ *
+ * Global variable 'v3_ext' must point to the extensions region
+ * in the certificate. No need to check for errors since the image has passed
+ * the integrity check.
+ */
+static int get_ext(const char *oid, void **ext, unsigned int *ext_len)
+{
+	int oid_len;
+	size_t len;
+	unsigned char *end_ext_data, *end_ext_octet;
+	unsigned char *p;
+	const unsigned char *end;
+	char oid_str[MAX_OID_STR_LEN];
+	mbedtls_asn1_buf extn_oid;
+	int is_critical;
+
+	assert(oid != NULL);
+
+	p = v3_ext.p;
+	end = v3_ext.p + v3_ext.len;
+
+	mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONSTRUCTED |
+			     MBEDTLS_ASN1_SEQUENCE);
+
+	while (p < end) {
+		zeromem(&extn_oid, sizeof(extn_oid));
+		is_critical = 0; /* DEFAULT FALSE */
+
+		mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONSTRUCTED |
+				     MBEDTLS_ASN1_SEQUENCE);
+		end_ext_data = p + len;
+
+		/* Get extension ID */
+		extn_oid.tag = *p;
+		mbedtls_asn1_get_tag(&p, end, &extn_oid.len, MBEDTLS_ASN1_OID);
+		extn_oid.p = p;
+		p += extn_oid.len;
+
+		/* Get optional critical */
+		mbedtls_asn1_get_bool(&p, end_ext_data, &is_critical);
+
+		/* Extension data */
+		mbedtls_asn1_get_tag(&p, end_ext_data, &len,
+				     MBEDTLS_ASN1_OCTET_STRING);
+		end_ext_octet = p + len;
+
+		/* Detect requested extension */
+		oid_len = mbedtls_oid_get_numeric_string(oid_str,
+							 MAX_OID_STR_LEN,
+							 &extn_oid);
+		if (oid_len == MBEDTLS_ERR_OID_BUF_TOO_SMALL) {
+			return IMG_PARSER_ERR;
+		}
+		if ((oid_len == strlen(oid_str)) && !strcmp(oid, oid_str)) {
+			*ext = (void *)p;
+			*ext_len = (unsigned int)len;
+			return IMG_PARSER_OK;
+		}
+
+		/* Next */
+		p = end_ext_octet;
+	}
+
+	return IMG_PARSER_ERR_NOT_FOUND;
+}
+
+
+/*
+ * Check the integrity of the certificate ASN.1 structure.
+ *
+ * Extract the relevant data that will be used later during authentication.
+ *
+ * This function doesn't clear the static variables located on the top of this
+ * file in case of an error. It is only called from check_integrity(), which
+ * performs the cleanup if necessary.
+ */
+static int cert_parse(void *img, unsigned int img_len)
+{
+	int ret, is_critical;
+	size_t len;
+	unsigned char *p, *end, *crt_end;
+	mbedtls_asn1_buf sig_alg1, sig_alg2;
+
+	p = (unsigned char *)img;
+	len = img_len;
+	end = p + len;
+
+	/*
+	 * Certificate  ::=  SEQUENCE  {
+	 *      tbsCertificate       TBSCertificate,
+	 *      signatureAlgorithm   AlgorithmIdentifier,
+	 *      signatureValue       BIT STRING  }
+	 */
+	ret = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONSTRUCTED |
+				   MBEDTLS_ASN1_SEQUENCE);
+	if (ret != 0) {
+		return IMG_PARSER_ERR_FORMAT;
+	}
+
+	if (len > (size_t)(end - p)) {
+		return IMG_PARSER_ERR_FORMAT;
+	}
+	crt_end = p + len;
+
+	/*
+	 * TBSCertificate  ::=  SEQUENCE  {
+	 */
+	tbs.p = p;
+	ret = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONSTRUCTED |
+				   MBEDTLS_ASN1_SEQUENCE);
+	if (ret != 0) {
+		return IMG_PARSER_ERR_FORMAT;
+	}
+	end = p + len;
+	tbs.len = end - tbs.p;
+
+	/*
+	 * Version  ::=  INTEGER  {  v1(0), v2(1), v3(2)  }
+	 */
+	ret = mbedtls_asn1_get_tag(&p, end, &len,
+				   MBEDTLS_ASN1_CONTEXT_SPECIFIC |
+				   MBEDTLS_ASN1_CONSTRUCTED | 0);
+	if (ret != 0) {
+		return IMG_PARSER_ERR_FORMAT;
+	}
+	p += len;
+
+	/*
+	 * CertificateSerialNumber  ::=  INTEGER
+	 */
+	ret = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_INTEGER);
+	if (ret != 0) {
+		return IMG_PARSER_ERR_FORMAT;
+	}
+	p += len;
+
+	/*
+	 * signature            AlgorithmIdentifier
+	 */
+	sig_alg1.p = p;
+	ret = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONSTRUCTED |
+				   MBEDTLS_ASN1_SEQUENCE);
+	if (ret != 0) {
+		return IMG_PARSER_ERR_FORMAT;
+	}
+	if ((end - p) < 1) {
+		return IMG_PARSER_ERR_FORMAT;
+	}
+	sig_alg1.len = (p + len) - sig_alg1.p;
+	p += len;
+
+	/*
+	 * issuer               Name
+	 */
+	ret = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONSTRUCTED |
+				   MBEDTLS_ASN1_SEQUENCE);
+	if (ret != 0) {
+		return IMG_PARSER_ERR_FORMAT;
+	}
+	p += len;
+
+	/*
+	 * Validity ::= SEQUENCE {
+	 *      notBefore      Time,
+	 *      notAfter       Time }
+	 *
+	 */
+	ret = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONSTRUCTED |
+				   MBEDTLS_ASN1_SEQUENCE);
+	if (ret != 0) {
+		return IMG_PARSER_ERR_FORMAT;
+	}
+	p += len;
+
+	/*
+	 * subject              Name
+	 */
+	ret = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONSTRUCTED |
+				   MBEDTLS_ASN1_SEQUENCE);
+	if (ret != 0) {
+		return IMG_PARSER_ERR_FORMAT;
+	}
+	p += len;
+
+	/*
+	 * SubjectPublicKeyInfo
+	 */
+	pk.p = p;
+	ret = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONSTRUCTED |
+				   MBEDTLS_ASN1_SEQUENCE);
+	if (ret != 0) {
+		return IMG_PARSER_ERR_FORMAT;
+	}
+	pk.len = (p + len) - pk.p;
+	p += len;
+
+	/*
+	 * issuerUniqueID  [1]  IMPLICIT UniqueIdentifier OPTIONAL,
+	 */
+	ret = mbedtls_asn1_get_tag(&p, end, &len,
+				   MBEDTLS_ASN1_CONTEXT_SPECIFIC |
+				   MBEDTLS_ASN1_CONSTRUCTED | 1);
+	if (ret != 0) {
+		if (ret != MBEDTLS_ERR_ASN1_UNEXPECTED_TAG) {
+			return IMG_PARSER_ERR_FORMAT;
+		}
+	} else {
+		p += len;
+	}
+
+	/*
+	 * subjectUniqueID [2]  IMPLICIT UniqueIdentifier OPTIONAL,
+	 */
+	ret = mbedtls_asn1_get_tag(&p, end, &len,
+				   MBEDTLS_ASN1_CONTEXT_SPECIFIC |
+				   MBEDTLS_ASN1_CONSTRUCTED | 2);
+	if (ret != 0) {
+		if (ret != MBEDTLS_ERR_ASN1_UNEXPECTED_TAG) {
+			return IMG_PARSER_ERR_FORMAT;
+		}
+	} else {
+		p += len;
+	}
+
+	/*
+	 * extensions      [3]  EXPLICIT Extensions OPTIONAL
+	 */
+	ret = mbedtls_asn1_get_tag(&p, end, &len,
+				   MBEDTLS_ASN1_CONTEXT_SPECIFIC |
+				   MBEDTLS_ASN1_CONSTRUCTED | 3);
+	if (ret != 0) {
+		return IMG_PARSER_ERR_FORMAT;
+	}
+
+	/*
+	 * Extensions  ::=  SEQUENCE SIZE (1..MAX) OF Extension
+	 */
+	v3_ext.p = p;
+	ret = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONSTRUCTED |
+				   MBEDTLS_ASN1_SEQUENCE);
+	if (ret != 0) {
+		return IMG_PARSER_ERR_FORMAT;
+	}
+	v3_ext.len = (p + len) - v3_ext.p;
+
+	/*
+	 * Check extensions integrity
+	 */
+	while (p < end) {
+		ret = mbedtls_asn1_get_tag(&p, end, &len,
+					   MBEDTLS_ASN1_CONSTRUCTED |
+					   MBEDTLS_ASN1_SEQUENCE);
+		if (ret != 0) {
+			return IMG_PARSER_ERR_FORMAT;
+		}
+
+		/* Get extension ID */
+		ret = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_OID);
+		if (ret != 0) {
+			return IMG_PARSER_ERR_FORMAT;
+		}
+		p += len;
+
+		/* Get optional critical */
+		ret = mbedtls_asn1_get_bool(&p, end, &is_critical);
+		if ((ret != 0) && (ret != MBEDTLS_ERR_ASN1_UNEXPECTED_TAG)) {
+			return IMG_PARSER_ERR_FORMAT;
+		}
+
+		/* Data should be octet string type */
+		ret = mbedtls_asn1_get_tag(&p, end, &len,
+					   MBEDTLS_ASN1_OCTET_STRING);
+		if (ret != 0) {
+			return IMG_PARSER_ERR_FORMAT;
+		}
+		p += len;
+	}
+
+	if (p != end) {
+		return IMG_PARSER_ERR_FORMAT;
+	}
+
+	end = crt_end;
+
+	/*
+	 *  }
+	 *  -- end of TBSCertificate
+	 *
+	 *  signatureAlgorithm   AlgorithmIdentifier
+	 */
+	sig_alg2.p = p;
+	ret = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_CONSTRUCTED |
+				   MBEDTLS_ASN1_SEQUENCE);
+	if (ret != 0) {
+		return IMG_PARSER_ERR_FORMAT;
+	}
+	if ((end - p) < 1) {
+		return IMG_PARSER_ERR_FORMAT;
+	}
+	sig_alg2.len = (p + len) - sig_alg2.p;
+	p += len;
+
+	/* Compare both signature algorithms */
+	if (sig_alg1.len != sig_alg2.len) {
+		return IMG_PARSER_ERR_FORMAT;
+	}
+	if (0 != memcmp(sig_alg1.p, sig_alg2.p, sig_alg1.len)) {
+		return IMG_PARSER_ERR_FORMAT;
+	}
+	memcpy(&sig_alg, &sig_alg1, sizeof(sig_alg));
+
+	/*
+	 * signatureValue       BIT STRING
+	 */
+	signature.p = p;
+	ret = mbedtls_asn1_get_tag(&p, end, &len, MBEDTLS_ASN1_BIT_STRING);
+	if (ret != 0) {
+		return IMG_PARSER_ERR_FORMAT;
+	}
+	signature.len = (p + len) - signature.p;
+	p += len;
+
+	/* Check certificate length */
+	if (p != end) {
+		return IMG_PARSER_ERR_FORMAT;
+	}
+
+	return IMG_PARSER_OK;
+}
+
+
+/* Exported functions */
+
+static void init(void)
+{
+	mbedtls_init();
+}
+
+/*
+ * Wrapper for cert_parse() that clears the static variables used by it in case
+ * of an error.
+ */
+static int check_integrity(void *img, unsigned int img_len)
+{
+	int rc = cert_parse(img, img_len);
+
+	if (rc != IMG_PARSER_OK)
+		clear_temp_vars();
+
+	return rc;
+}
+
+/*
+ * Extract an authentication parameter from an X509v3 certificate
+ *
+ * This function returns a pointer to the extracted data and its length.
+ * Depending on the type of parameter, a pointer to the data stored in the
+ * certificate may be returned (i.e. an octet string containing a hash). Other
+ * data may need to be copied and formatted (i.e. integers). In the later case,
+ * a buffer of the correct type needs to be statically allocated, filled and
+ * returned.
+ */
+static int get_auth_param(const auth_param_type_desc_t *type_desc,
+		void *img, unsigned int img_len,
+		void **param, unsigned int *param_len)
+{
+	int rc = IMG_PARSER_OK;
+
+	/* We do not use img because the check_integrity function has already
+	 * extracted the relevant data (v3_ext, pk, sig_alg, etc) */
+
+	switch (type_desc->type) {
+	case AUTH_PARAM_RAW_DATA:
+		/* Data to be signed */
+		*param = (void *)tbs.p;
+		*param_len = (unsigned int)tbs.len;
+		break;
+	case AUTH_PARAM_HASH:
+	case AUTH_PARAM_NV_CTR:
+		/* All these parameters are included as X509v3 extensions */
+		rc = get_ext(type_desc->cookie, param, param_len);
+		break;
+	case AUTH_PARAM_PUB_KEY:
+		if (type_desc->cookie != 0) {
+			/* Get public key from extension */
+			rc = get_ext(type_desc->cookie, param, param_len);
+		} else {
+			/* Get the subject public key */
+			*param = (void *)pk.p;
+			*param_len = (unsigned int)pk.len;
+		}
+		break;
+	case AUTH_PARAM_SIG_ALG:
+		/* Get the certificate signature algorithm */
+		*param = (void *)sig_alg.p;
+		*param_len = (unsigned int)sig_alg.len;
+		break;
+	case AUTH_PARAM_SIG:
+		/* Get the certificate signature */
+		*param = (void *)signature.p;
+		*param_len = (unsigned int)signature.len;
+		break;
+	default:
+		rc = IMG_PARSER_ERR_NOT_FOUND;
+		break;
+	}
+
+	return rc;
+}
+
+REGISTER_IMG_PARSER_LIB(IMG_CERT, LIB_NAME, init, \
+		       check_integrity, get_auth_param);
diff --git a/drivers/auth/tbbr/tbbr_cot.c b/drivers/auth/tbbr/tbbr_cot.c
new file mode 100644
index 0000000..4aaab39
--- /dev/null
+++ b/drivers/auth/tbbr/tbbr_cot.c
@@ -0,0 +1,649 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <auth_mod.h>
+#include <platform_def.h>
+#include <stddef.h>
+
+#if USE_TBBR_DEFS
+#include <tbbr_oid.h>
+#else
+#include <platform_oid.h>
+#endif
+
+
+/*
+ * Maximum key and hash sizes (in DER format)
+ */
+#define PK_DER_LEN			294
+#define HASH_DER_LEN			51
+
+/*
+ * The platform must allocate buffers to store the authentication parameters
+ * extracted from the certificates. In this case, because of the way the CoT is
+ * established, we can reuse some of the buffers on different stages
+ */
+static unsigned char tb_fw_hash_buf[HASH_DER_LEN];
+static unsigned char scp_fw_hash_buf[HASH_DER_LEN];
+static unsigned char soc_fw_hash_buf[HASH_DER_LEN];
+static unsigned char tos_fw_hash_buf[HASH_DER_LEN];
+static unsigned char tos_fw_extra1_hash_buf[HASH_DER_LEN];
+static unsigned char tos_fw_extra2_hash_buf[HASH_DER_LEN];
+static unsigned char nt_world_bl_hash_buf[HASH_DER_LEN];
+static unsigned char trusted_world_pk_buf[PK_DER_LEN];
+static unsigned char non_trusted_world_pk_buf[PK_DER_LEN];
+static unsigned char content_pk_buf[PK_DER_LEN];
+
+/*
+ * Parameter type descriptors
+ */
+static auth_param_type_desc_t trusted_nv_ctr = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_NV_CTR, TRUSTED_FW_NVCOUNTER_OID);
+static auth_param_type_desc_t non_trusted_nv_ctr = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_NV_CTR, NON_TRUSTED_FW_NVCOUNTER_OID);
+
+static auth_param_type_desc_t subject_pk = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_PUB_KEY, 0);
+static auth_param_type_desc_t sig = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_SIG, 0);
+static auth_param_type_desc_t sig_alg = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_SIG_ALG, 0);
+static auth_param_type_desc_t raw_data = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_RAW_DATA, 0);
+
+static auth_param_type_desc_t trusted_world_pk = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_PUB_KEY, TRUSTED_WORLD_PK_OID);
+static auth_param_type_desc_t non_trusted_world_pk = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_PUB_KEY, NON_TRUSTED_WORLD_PK_OID);
+
+static auth_param_type_desc_t scp_fw_content_pk = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_PUB_KEY, SCP_FW_CONTENT_CERT_PK_OID);
+static auth_param_type_desc_t soc_fw_content_pk = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_PUB_KEY, SOC_FW_CONTENT_CERT_PK_OID);
+static auth_param_type_desc_t tos_fw_content_pk = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_PUB_KEY, TRUSTED_OS_FW_CONTENT_CERT_PK_OID);
+static auth_param_type_desc_t nt_fw_content_pk = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_PUB_KEY, NON_TRUSTED_FW_CONTENT_CERT_PK_OID);
+
+static auth_param_type_desc_t tb_fw_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, TRUSTED_BOOT_FW_HASH_OID);
+static auth_param_type_desc_t scp_fw_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, SCP_FW_HASH_OID);
+static auth_param_type_desc_t soc_fw_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, SOC_AP_FW_HASH_OID);
+static auth_param_type_desc_t tos_fw_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, TRUSTED_OS_FW_HASH_OID);
+static auth_param_type_desc_t tos_fw_extra1_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, TRUSTED_OS_FW_EXTRA1_HASH_OID);
+static auth_param_type_desc_t tos_fw_extra2_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, TRUSTED_OS_FW_EXTRA2_HASH_OID);
+static auth_param_type_desc_t nt_world_bl_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, NON_TRUSTED_WORLD_BOOTLOADER_HASH_OID);
+static auth_param_type_desc_t scp_bl2u_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, SCP_FWU_CFG_HASH_OID);
+static auth_param_type_desc_t bl2u_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, AP_FWU_CFG_HASH_OID);
+static auth_param_type_desc_t ns_bl2u_hash = AUTH_PARAM_TYPE_DESC(
+		AUTH_PARAM_HASH, FWU_HASH_OID);
+
+/*
+ * TBBR Chain of trust definition
+ */
+static const auth_img_desc_t cot_desc[] = {
+	/*
+	 * BL2
+	 */
+	[TRUSTED_BOOT_FW_CERT_ID] = {
+		.img_id = TRUSTED_BOOT_FW_CERT_ID,
+		.img_type = IMG_CERT,
+		.parent = NULL,
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_SIG,
+				.param.sig = {
+					.pk = &subject_pk,
+					.sig = &sig,
+					.alg = &sig_alg,
+					.data = &raw_data,
+				}
+			},
+			[1] = {
+				.type = AUTH_METHOD_NV_CTR,
+				.param.nv_ctr = {
+					.cert_nv_ctr = &trusted_nv_ctr,
+					.plat_nv_ctr = &trusted_nv_ctr
+				}
+			}
+		},
+		.authenticated_data = {
+			[0] = {
+				.type_desc = &tb_fw_hash,
+				.data = {
+					.ptr = (void *)tb_fw_hash_buf,
+					.len = (unsigned int)HASH_DER_LEN
+				}
+			}
+		}
+	},
+	[BL2_IMAGE_ID] = {
+		.img_id = BL2_IMAGE_ID,
+		.img_type = IMG_RAW,
+		.parent = &cot_desc[TRUSTED_BOOT_FW_CERT_ID],
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_HASH,
+				.param.hash = {
+					.data = &raw_data,
+					.hash = &tb_fw_hash,
+				}
+			}
+		}
+	},
+	/*
+	 * Trusted key certificate
+	 */
+	[TRUSTED_KEY_CERT_ID] = {
+		.img_id = TRUSTED_KEY_CERT_ID,
+		.img_type = IMG_CERT,
+		.parent = NULL,
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_SIG,
+				.param.sig = {
+					.pk = &subject_pk,
+					.sig = &sig,
+					.alg = &sig_alg,
+					.data = &raw_data,
+				}
+			},
+			[1] = {
+				.type = AUTH_METHOD_NV_CTR,
+				.param.nv_ctr = {
+					.cert_nv_ctr = &trusted_nv_ctr,
+					.plat_nv_ctr = &trusted_nv_ctr
+				}
+			}
+		},
+		.authenticated_data = {
+			[0] = {
+				.type_desc = &trusted_world_pk,
+				.data = {
+					.ptr = (void *)trusted_world_pk_buf,
+					.len = (unsigned int)PK_DER_LEN
+				}
+			},
+			[1] = {
+				.type_desc = &non_trusted_world_pk,
+				.data = {
+					.ptr = (void *)non_trusted_world_pk_buf,
+					.len = (unsigned int)PK_DER_LEN
+				}
+			}
+		}
+	},
+	/*
+	 * SCP Firmware
+	 */
+	[SCP_FW_KEY_CERT_ID] = {
+		.img_id = SCP_FW_KEY_CERT_ID,
+		.img_type = IMG_CERT,
+		.parent = &cot_desc[TRUSTED_KEY_CERT_ID],
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_SIG,
+				.param.sig = {
+					.pk = &trusted_world_pk,
+					.sig = &sig,
+					.alg = &sig_alg,
+					.data = &raw_data,
+				}
+			},
+			[1] = {
+				.type = AUTH_METHOD_NV_CTR,
+				.param.nv_ctr = {
+					.cert_nv_ctr = &trusted_nv_ctr,
+					.plat_nv_ctr = &trusted_nv_ctr
+				}
+			}
+		},
+		.authenticated_data = {
+			[0] = {
+				.type_desc = &scp_fw_content_pk,
+				.data = {
+					.ptr = (void *)content_pk_buf,
+					.len = (unsigned int)PK_DER_LEN
+				}
+			}
+		}
+	},
+	[SCP_FW_CONTENT_CERT_ID] = {
+		.img_id = SCP_FW_CONTENT_CERT_ID,
+		.img_type = IMG_CERT,
+		.parent = &cot_desc[SCP_FW_KEY_CERT_ID],
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_SIG,
+				.param.sig = {
+					.pk = &scp_fw_content_pk,
+					.sig = &sig,
+					.alg = &sig_alg,
+					.data = &raw_data,
+				}
+			},
+			[1] = {
+				.type = AUTH_METHOD_NV_CTR,
+				.param.nv_ctr = {
+					.cert_nv_ctr = &trusted_nv_ctr,
+					.plat_nv_ctr = &trusted_nv_ctr
+				}
+			}
+		},
+		.authenticated_data = {
+			[0] = {
+				.type_desc = &scp_fw_hash,
+				.data = {
+					.ptr = (void *)scp_fw_hash_buf,
+					.len = (unsigned int)HASH_DER_LEN
+				}
+			}
+		}
+	},
+	[SCP_BL2_IMAGE_ID] = {
+		.img_id = SCP_BL2_IMAGE_ID,
+		.img_type = IMG_RAW,
+		.parent = &cot_desc[SCP_FW_CONTENT_CERT_ID],
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_HASH,
+				.param.hash = {
+					.data = &raw_data,
+					.hash = &scp_fw_hash,
+				}
+			}
+		}
+	},
+	/*
+	 * SoC Firmware
+	 */
+	[SOC_FW_KEY_CERT_ID] = {
+		.img_id = SOC_FW_KEY_CERT_ID,
+		.img_type = IMG_CERT,
+		.parent = &cot_desc[TRUSTED_KEY_CERT_ID],
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_SIG,
+				.param.sig = {
+					.pk = &trusted_world_pk,
+					.sig = &sig,
+					.alg = &sig_alg,
+					.data = &raw_data,
+				}
+			},
+			[1] = {
+				.type = AUTH_METHOD_NV_CTR,
+				.param.nv_ctr = {
+					.cert_nv_ctr = &trusted_nv_ctr,
+					.plat_nv_ctr = &trusted_nv_ctr
+				}
+			}
+		},
+		.authenticated_data = {
+			[0] = {
+				.type_desc = &soc_fw_content_pk,
+				.data = {
+					.ptr = (void *)content_pk_buf,
+					.len = (unsigned int)PK_DER_LEN
+				}
+			}
+		}
+	},
+	[SOC_FW_CONTENT_CERT_ID] = {
+		.img_id = SOC_FW_CONTENT_CERT_ID,
+		.img_type = IMG_CERT,
+		.parent = &cot_desc[SOC_FW_KEY_CERT_ID],
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_SIG,
+				.param.sig = {
+					.pk = &soc_fw_content_pk,
+					.sig = &sig,
+					.alg = &sig_alg,
+					.data = &raw_data,
+				}
+			},
+			[1] = {
+				.type = AUTH_METHOD_NV_CTR,
+				.param.nv_ctr = {
+					.cert_nv_ctr = &trusted_nv_ctr,
+					.plat_nv_ctr = &trusted_nv_ctr
+				}
+			}
+		},
+		.authenticated_data = {
+			[0] = {
+				.type_desc = &soc_fw_hash,
+				.data = {
+					.ptr = (void *)soc_fw_hash_buf,
+					.len = (unsigned int)HASH_DER_LEN
+				}
+			}
+		}
+	},
+	[BL31_IMAGE_ID] = {
+		.img_id = BL31_IMAGE_ID,
+		.img_type = IMG_RAW,
+		.parent = &cot_desc[SOC_FW_CONTENT_CERT_ID],
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_HASH,
+				.param.hash = {
+					.data = &raw_data,
+					.hash = &soc_fw_hash,
+				}
+			}
+		}
+	},
+	/*
+	 * Trusted OS Firmware
+	 */
+	[TRUSTED_OS_FW_KEY_CERT_ID] = {
+		.img_id = TRUSTED_OS_FW_KEY_CERT_ID,
+		.img_type = IMG_CERT,
+		.parent = &cot_desc[TRUSTED_KEY_CERT_ID],
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_SIG,
+				.param.sig = {
+					.pk = &trusted_world_pk,
+					.sig = &sig,
+					.alg = &sig_alg,
+					.data = &raw_data,
+				}
+			},
+			[1] = {
+				.type = AUTH_METHOD_NV_CTR,
+				.param.nv_ctr = {
+					.cert_nv_ctr = &trusted_nv_ctr,
+					.plat_nv_ctr = &trusted_nv_ctr
+				}
+			}
+		},
+		.authenticated_data = {
+			[0] = {
+				.type_desc = &tos_fw_content_pk,
+				.data = {
+					.ptr = (void *)content_pk_buf,
+					.len = (unsigned int)PK_DER_LEN
+				}
+			}
+		}
+	},
+	[TRUSTED_OS_FW_CONTENT_CERT_ID] = {
+		.img_id = TRUSTED_OS_FW_CONTENT_CERT_ID,
+		.img_type = IMG_CERT,
+		.parent = &cot_desc[TRUSTED_OS_FW_KEY_CERT_ID],
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_SIG,
+				.param.sig = {
+					.pk = &tos_fw_content_pk,
+					.sig = &sig,
+					.alg = &sig_alg,
+					.data = &raw_data,
+				}
+			},
+			[1] = {
+				.type = AUTH_METHOD_NV_CTR,
+				.param.nv_ctr = {
+					.cert_nv_ctr = &trusted_nv_ctr,
+					.plat_nv_ctr = &trusted_nv_ctr
+				}
+			}
+		},
+		.authenticated_data = {
+			[0] = {
+				.type_desc = &tos_fw_hash,
+				.data = {
+					.ptr = (void *)tos_fw_hash_buf,
+					.len = (unsigned int)HASH_DER_LEN
+				}
+			},
+			[1] = {
+				.type_desc = &tos_fw_extra1_hash,
+				.data = {
+					.ptr = (void *)tos_fw_extra1_hash_buf,
+					.len = (unsigned int)HASH_DER_LEN
+				}
+			},
+			[2] = {
+				.type_desc = &tos_fw_extra2_hash,
+				.data = {
+					.ptr = (void *)tos_fw_extra2_hash_buf,
+					.len = (unsigned int)HASH_DER_LEN
+				}
+			}
+		}
+	},
+	[BL32_IMAGE_ID] = {
+		.img_id = BL32_IMAGE_ID,
+		.img_type = IMG_RAW,
+		.parent = &cot_desc[TRUSTED_OS_FW_CONTENT_CERT_ID],
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_HASH,
+				.param.hash = {
+					.data = &raw_data,
+					.hash = &tos_fw_hash,
+				}
+			}
+		}
+	},
+	[BL32_EXTRA1_IMAGE_ID] = {
+		.img_id = BL32_EXTRA1_IMAGE_ID,
+		.img_type = IMG_RAW,
+		.parent = &cot_desc[TRUSTED_OS_FW_CONTENT_CERT_ID],
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_HASH,
+				.param.hash = {
+					.data = &raw_data,
+					.hash = &tos_fw_extra1_hash,
+				}
+			}
+		}
+	},
+	[BL32_EXTRA2_IMAGE_ID] = {
+		.img_id = BL32_EXTRA2_IMAGE_ID,
+		.img_type = IMG_RAW,
+		.parent = &cot_desc[TRUSTED_OS_FW_CONTENT_CERT_ID],
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_HASH,
+				.param.hash = {
+					.data = &raw_data,
+					.hash = &tos_fw_extra2_hash,
+				}
+			}
+		}
+	},
+	/*
+	 * Non-Trusted Firmware
+	 */
+	[NON_TRUSTED_FW_KEY_CERT_ID] = {
+		.img_id = NON_TRUSTED_FW_KEY_CERT_ID,
+		.img_type = IMG_CERT,
+		.parent = &cot_desc[TRUSTED_KEY_CERT_ID],
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_SIG,
+				.param.sig = {
+					.pk = &non_trusted_world_pk,
+					.sig = &sig,
+					.alg = &sig_alg,
+					.data = &raw_data,
+				}
+			},
+			[1] = {
+				.type = AUTH_METHOD_NV_CTR,
+				.param.nv_ctr = {
+					.cert_nv_ctr = &non_trusted_nv_ctr,
+					.plat_nv_ctr = &non_trusted_nv_ctr
+				}
+			}
+		},
+		.authenticated_data = {
+			[0] = {
+				.type_desc = &nt_fw_content_pk,
+				.data = {
+					.ptr = (void *)content_pk_buf,
+					.len = (unsigned int)PK_DER_LEN
+				}
+			}
+		}
+	},
+	[NON_TRUSTED_FW_CONTENT_CERT_ID] = {
+		.img_id = NON_TRUSTED_FW_CONTENT_CERT_ID,
+		.img_type = IMG_CERT,
+		.parent = &cot_desc[NON_TRUSTED_FW_KEY_CERT_ID],
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_SIG,
+				.param.sig = {
+					.pk = &nt_fw_content_pk,
+					.sig = &sig,
+					.alg = &sig_alg,
+					.data = &raw_data,
+				}
+			},
+			[1] = {
+				.type = AUTH_METHOD_NV_CTR,
+				.param.nv_ctr = {
+					.cert_nv_ctr = &non_trusted_nv_ctr,
+					.plat_nv_ctr = &non_trusted_nv_ctr
+				}
+			}
+		},
+		.authenticated_data = {
+			[0] = {
+				.type_desc = &nt_world_bl_hash,
+				.data = {
+					.ptr = (void *)nt_world_bl_hash_buf,
+					.len = (unsigned int)HASH_DER_LEN
+				}
+			}
+		}
+	},
+	[BL33_IMAGE_ID] = {
+		.img_id = BL33_IMAGE_ID,
+		.img_type = IMG_RAW,
+		.parent = &cot_desc[NON_TRUSTED_FW_CONTENT_CERT_ID],
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_HASH,
+				.param.hash = {
+					.data = &raw_data,
+					.hash = &nt_world_bl_hash,
+				}
+			}
+		}
+	},
+	/*
+	 * FWU auth descriptor.
+	 */
+	[FWU_CERT_ID] = {
+		.img_id = FWU_CERT_ID,
+		.img_type = IMG_CERT,
+		.parent = NULL,
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_SIG,
+				.param.sig = {
+					.pk = &subject_pk,
+					.sig = &sig,
+					.alg = &sig_alg,
+					.data = &raw_data,
+				}
+			}
+		},
+		.authenticated_data = {
+			[0] = {
+				.type_desc = &scp_bl2u_hash,
+				.data = {
+					.ptr = (void *)scp_fw_hash_buf,
+					.len = (unsigned int)HASH_DER_LEN
+				}
+			},
+			[1] = {
+				.type_desc = &bl2u_hash,
+				.data = {
+					.ptr = (void *)tb_fw_hash_buf,
+					.len = (unsigned int)HASH_DER_LEN
+				}
+			},
+			[2] = {
+				.type_desc = &ns_bl2u_hash,
+				.data = {
+					.ptr = (void *)nt_world_bl_hash_buf,
+					.len = (unsigned int)HASH_DER_LEN
+				}
+			}
+		}
+	},
+	/*
+	 * SCP_BL2U
+	 */
+	[SCP_BL2U_IMAGE_ID] = {
+		.img_id = SCP_BL2U_IMAGE_ID,
+		.img_type = IMG_RAW,
+		.parent = &cot_desc[FWU_CERT_ID],
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_HASH,
+				.param.hash = {
+					.data = &raw_data,
+					.hash = &scp_bl2u_hash,
+				}
+			}
+		}
+	},
+	/*
+	 * BL2U
+	 */
+	[BL2U_IMAGE_ID] = {
+		.img_id = BL2U_IMAGE_ID,
+		.img_type = IMG_RAW,
+		.parent = &cot_desc[FWU_CERT_ID],
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_HASH,
+				.param.hash = {
+					.data = &raw_data,
+					.hash = &bl2u_hash,
+				}
+			}
+		}
+	},
+	/*
+	 * NS_BL2U
+	 */
+	[NS_BL2U_IMAGE_ID] = {
+		.img_id = NS_BL2U_IMAGE_ID,
+		.img_type = IMG_RAW,
+		.parent = &cot_desc[FWU_CERT_ID],
+		.img_auth_methods = {
+			[0] = {
+				.type = AUTH_METHOD_HASH,
+				.param.hash = {
+					.data = &raw_data,
+					.hash = &ns_bl2u_hash,
+				}
+			}
+		}
+	}
+};
+
+/* Register the CoT in the authentication module */
+REGISTER_COT(cot_desc);
diff --git a/drivers/cadence/uart/aarch64/cdns_console.S b/drivers/cadence/uart/aarch64/cdns_console.S
new file mode 100644
index 0000000..f6a1532
--- /dev/null
+++ b/drivers/cadence/uart/aarch64/cdns_console.S
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <cadence/cdns_uart.h>
+
+	.globl  console_core_init
+	.globl  console_core_putc
+	.globl  console_core_getc
+	.globl	console_core_flush
+
+	/* -----------------------------------------------
+	 * int console_core_init(unsigned long base_addr,
+	 * unsigned int uart_clk, unsigned int baud_rate)
+	 * Function to initialize the console without a
+	 * C Runtime to print debug information. This
+	 * function will be accessed by console_init and
+	 * crash reporting.
+	 * We assume that the bootloader already set up
+	 * the HW (baud, ...) and only enable the trans-
+	 * mitter and receiver here.
+	 * In: x0 - console base address
+	 *     w1 - Uart clock in Hz
+	 *     w2 - Baud rate
+	 * Out: return 1 on success else 0 on error
+	 * Clobber list : x1, x2, x3
+	 * -----------------------------------------------
+	 */
+func console_core_init
+	/* Check the input base address */
+	cbz	x0, core_init_fail
+	/* Check baud rate and uart clock for sanity */
+	cbz	w1, core_init_fail
+	cbz	w2, core_init_fail
+
+	/* RX/TX enabled & reset */
+	mov	w3, #(R_UART_CR_TX_EN | R_UART_CR_RX_EN | R_UART_CR_TXRST | R_UART_CR_RXRST)
+	str	w3, [x0, #R_UART_CR]
+
+	mov	w0, #1
+	ret
+core_init_fail:
+	mov	w0, wzr
+	ret
+endfunc console_core_init
+
+	/* --------------------------------------------------------
+	 * int console_core_putc(int c, unsigned long base_addr)
+	 * Function to output a character over the console. It
+	 * returns the character printed on success or -1 on error.
+	 * In : w0 - character to be printed
+	 *      x1 - console base address
+	 * Out : return -1 on error else return character.
+	 * Clobber list : x2
+	 * --------------------------------------------------------
+	 */
+func console_core_putc
+	/* Check the input parameter */
+	cbz	x1, putc_error
+	/* Prepend '\r' to '\n' */
+	cmp	w0, #0xA
+	b.ne	2f
+1:
+	/* Check if the transmit FIFO is full */
+	ldr	w2, [x1, #R_UART_SR]
+	tbnz	w2, #UART_SR_INTR_TFUL_BIT, 1b
+	mov	w2, #0xD
+	str	w2, [x1, #R_UART_TX]
+2:
+	/* Check if the transmit FIFO is full */
+	ldr	w2, [x1, #R_UART_SR]
+	tbnz	w2, #UART_SR_INTR_TFUL_BIT, 2b
+	str	w0, [x1, #R_UART_TX]
+	ret
+putc_error:
+	mov	w0, #-1
+	ret
+endfunc console_core_putc
+
+	/* ---------------------------------------------
+	 * int console_core_getc(unsigned long base_addr)
+	 * Function to get a character from the console.
+	 * It returns the character grabbed on success
+	 * or -1 on error.
+	 * In : x0 - console base address
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func console_core_getc
+	cbz	x0, getc_error
+1:
+	/* Check if the receive FIFO is empty */
+	ldr	w1, [x0, #R_UART_SR]
+	tbnz	w1, #UART_SR_INTR_REMPTY_BIT, 1b
+	ldr	w1, [x0, #R_UART_RX]
+	mov	w0, w1
+	ret
+getc_error:
+	mov	w0, #-1
+	ret
+endfunc console_core_getc
+
+	/* ---------------------------------------------
+	 * int console_core_flush(uintptr_t base_addr)
+	 * Function to force a write of all buffered
+	 * data that hasn't been output.
+	 * In : x0 - console base address
+	 * Out : return -1 on error else return 0.
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func console_core_flush
+	/* Placeholder */
+	mov	w0, #0
+	ret
+endfunc console_core_flush
diff --git a/drivers/cadence/uart/cdns_console.S b/drivers/cadence/uart/cdns_console.S
new file mode 100644
index 0000000..6da8f91
--- /dev/null
+++ b/drivers/cadence/uart/cdns_console.S
@@ -0,0 +1,9 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#if !ERROR_DEPRECATED
+#include "./aarch64/cdns_console.S"
+#endif
diff --git a/drivers/console/aarch32/console.S b/drivers/console/aarch32/console.S
new file mode 100644
index 0000000..a3c6546
--- /dev/null
+++ b/drivers/console/aarch32/console.S
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <asm_macros.S>
+
+	.globl	console_init
+	.globl	console_uninit
+	.globl	console_putc
+	.globl	console_getc
+	.globl	console_flush
+
+	/*
+	 *  The console base is in the data section and not in .bss
+	 *  even though it is zero-init. In particular, this allows
+	 *  the console functions to start using this variable before
+	 *  the runtime memory is initialized for images which do not
+	 *  need to copy the .data section from ROM to RAM.
+	 */
+.section .data.console_base ; .align 2
+	console_base: .word 0x0
+
+	/* -----------------------------------------------
+	 * int console_init(uintptr_t base_addr,
+	 * unsigned int uart_clk, unsigned int baud_rate)
+	 * Function to initialize the console without a
+	 * C Runtime to print debug information. It saves
+	 * the console base to the data section.
+	 * In: r0 - console base address
+	 *     r1 - Uart clock in Hz
+	 *     r2 - Baud rate
+	 * out: return 1 on success else 0 on error
+	 * Clobber list : r1 - r3
+	 * -----------------------------------------------
+	 */
+func console_init
+	/* Check the input base address */
+	cmp	r0, #0
+	beq	init_fail
+	ldr	r3, =console_base
+	str	r0, [r3]
+	b	console_core_init
+init_fail:
+	bx	lr
+endfunc console_init
+
+	/* -----------------------------------------------
+	 * void console_uninit(void)
+	 * Function to finish the use of console driver.
+	 * It sets the console_base as NULL so that any
+	 * further invocation of `console_putc` or
+	 * `console_getc` APIs would return error.
+	 * -----------------------------------------------
+	 */
+func console_uninit
+	mov	r0, #0
+	ldr	r3, =console_base
+	str	r0, [r3]
+	bx	lr
+endfunc console_uninit
+
+	/* ---------------------------------------------
+	 * int console_putc(int c)
+	 * Function to output a character over the
+	 * console. It returns the character printed on
+	 * success or -1 on error.
+	 * In : r0 - character to be printed
+	 * Out : return -1 on error else return character.
+	 * Clobber list : r1, r2
+	 * ---------------------------------------------
+	 */
+func console_putc
+	ldr	r2, =console_base
+	ldr	r1, [r2]
+	b	console_core_putc
+endfunc console_putc
+
+	/* ---------------------------------------------
+	 * int console_getc(void)
+	 * Function to get a character from the console.
+	 * It returns the character grabbed on success
+	 * or -1 on error.
+	 * Clobber list : r0, r1
+	 * ---------------------------------------------
+	 */
+func console_getc
+	ldr	r1, =console_base
+	ldr	r0, [r1]
+	b	console_core_getc
+endfunc console_getc
+
+	/* ---------------------------------------------
+	 * int console_flush(void)
+	 * Function to force a write of all buffered
+	 * data that hasn't been output. It returns 0
+	 * upon successful completion, otherwise it
+	 * returns -1.
+	 * Clobber list : r0, r1
+	 * ---------------------------------------------
+	 */
+func console_flush
+	ldr	r1, =console_base
+	ldr	r0, [r1]
+	b	console_core_flush
+endfunc console_flush
diff --git a/drivers/console/aarch32/skeleton_console.S b/drivers/console/aarch32/skeleton_console.S
new file mode 100644
index 0000000..da4cecd
--- /dev/null
+++ b/drivers/console/aarch32/skeleton_console.S
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <asm_macros.S>
+
+	/*
+	 * This file contains a skeleton console implementation that can
+	 * be used as basis for a real console implementation by platforms
+	 * that do not contain PL011 hardware.
+	 */
+
+	.globl	console_core_init
+	.globl	console_core_putc
+	.globl	console_core_getc
+	.globl	console_core_flush
+
+	/* -----------------------------------------------
+	 * int console_core_init(uintptr_t base_addr,
+	 * unsigned int uart_clk, unsigned int baud_rate)
+	 * Function to initialize the console without a
+	 * C Runtime to print debug information. This
+	 * function will be accessed by console_init and
+	 * crash reporting.
+	 * In: r0 - console base address
+	 *     r1 - Uart clock in Hz
+	 *     r2 - Baud rate
+	 * Out: return 1 on success else 0 on error
+	 * Clobber list : r1, r2
+	 * -----------------------------------------------
+	 */
+func console_core_init
+	/* Check the input base address */
+	cmp	r0, #0
+	beq	core_init_fail
+	/* Check baud rate and uart clock for sanity */
+	cmp	r1, #0
+	beq	core_init_fail
+	cmp	r2, #0
+	beq	core_init_fail
+	/* Insert implementation here */
+	mov	r0, #1
+	bx	lr
+core_init_fail:
+	mov	r0, #0
+	bx	lr
+endfunc console_core_init
+
+	/* --------------------------------------------------------
+	 * int console_core_putc(int c, uintptr_t base_addr)
+	 * Function to output a character over the console. It
+	 * returns the character printed on success or -1 on error.
+	 * In : r0 - character to be printed
+	 *      r1 - console base address
+	 * Out : return -1 on error else return character.
+	 * Clobber list : r2
+	 * --------------------------------------------------------
+	 */
+func console_core_putc
+	/* Check the input parameter */
+	cmp	r1, #0
+	beq	putc_error
+	/* Insert implementation here */
+	bx	lr
+putc_error:
+	mov	r0, #-1
+	bx	lr
+endfunc console_core_putc
+
+	/* ---------------------------------------------
+	 * int console_core_getc(uintptr_t base_addr)
+	 * Function to get a character from the console.
+	 * It returns the character grabbed on success
+	 * or -1 on error.
+	 * In : r0 - console base address
+	 * Clobber list : r0, r1
+	 * ---------------------------------------------
+	 */
+func console_core_getc
+	cmp	r0, #0
+	beq	getc_error
+	/* Insert implementation here */
+	bx	lr
+getc_error:
+	mov	r0, #-1
+	bx	lr
+endfunc console_core_getc
+
+	/* ---------------------------------------------
+	 * int console_core_flush(uintptr_t base_addr)
+	 * Function to force a write of all buffered
+	 * data that hasn't been output.
+	 * In : r0 - console base address
+	 * Out : return -1 on error else return 0.
+	 * Clobber list : r0, r1
+	 * ---------------------------------------------
+	 */
+func console_core_flush
+	cmp	r0, #0
+	beq	flush_error
+	/* Insert implementation here */
+	mov	r0, #0
+	bx	lr
+flush_error:
+	mov	r0, #-1
+	bx	lr
+endfunc console_core_flush
diff --git a/drivers/console/aarch64/console.S b/drivers/console/aarch64/console.S
new file mode 100644
index 0000000..7cc04dd
--- /dev/null
+++ b/drivers/console/aarch64/console.S
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <asm_macros.S>
+
+	.globl	console_init
+	.globl	console_uninit
+	.globl	console_putc
+	.globl	console_getc
+	.globl	console_flush
+
+	/*
+	 *  The console base is in the data section and not in .bss
+	 *  even though it is zero-init. In particular, this allows
+	 *  the console functions to start using this variable before
+	 *  the runtime memory is initialized for images which do not
+	 *  need to copy the .data section from ROM to RAM.
+	 */
+.section .data.console_base ; .align 3
+	console_base: .quad 0x0
+
+	/* -----------------------------------------------
+	 * int console_init(uintptr_t base_addr,
+	 * unsigned int uart_clk, unsigned int baud_rate)
+	 * Function to initialize the console without a
+	 * C Runtime to print debug information. It saves
+	 * the console base to the data section.
+	 * In: x0 - console base address
+	 *     w1 - Uart clock in Hz
+	 *     w2 - Baud rate
+	 * out: return 1 on success else 0 on error
+	 * Clobber list : x1 - x4
+	 * -----------------------------------------------
+	 */
+func console_init
+	/* Check the input base address */
+	cbz	x0, init_fail
+	adrp	x3, console_base
+	str	x0, [x3, :lo12:console_base]
+	b	console_core_init
+init_fail:
+	ret
+endfunc console_init
+
+	/* -----------------------------------------------
+	 * void console_uninit(void)
+	 * Function to finish the use of console driver.
+	 * It sets the console_base as NULL so that any
+	 * further invocation of `console_putc` or
+	 * `console_getc` APIs would return error.
+	 * -----------------------------------------------
+	 */
+func console_uninit
+	mov	x0, #0
+	adrp	x3, console_base
+	str	x0, [x3, :lo12:console_base]
+	ret
+endfunc console_uninit
+
+	/* ---------------------------------------------
+	 * int console_putc(int c)
+	 * Function to output a character over the
+	 * console. It returns the character printed on
+	 * success or -1 on error.
+	 * In : x0 - character to be printed
+	 * Out : return -1 on error else return character.
+	 * Clobber list : x1, x2
+	 * ---------------------------------------------
+	 */
+func console_putc
+	adrp	x2, console_base
+	ldr	x1, [x2, :lo12:console_base]
+	b	console_core_putc
+endfunc console_putc
+
+	/* ---------------------------------------------
+	 * int console_getc(void)
+	 * Function to get a character from the console.
+	 * It returns the character grabbed on success
+	 * or -1 on error.
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func console_getc
+	adrp	x1, console_base
+	ldr	x0, [x1, :lo12:console_base]
+	b	console_core_getc
+endfunc console_getc
+
+	/* ---------------------------------------------
+	 * int console_flush(void)
+	 * Function to force a write of all buffered
+	 * data that hasn't been output. It returns 0
+	 * upon successful completion, otherwise it
+	 * returns -1.
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func console_flush
+	adrp	x1, console_base
+	ldr	x0, [x1, :lo12:console_base]
+	b	console_core_flush
+endfunc console_flush
diff --git a/drivers/console/aarch64/skeleton_console.S b/drivers/console/aarch64/skeleton_console.S
new file mode 100644
index 0000000..9db6157
--- /dev/null
+++ b/drivers/console/aarch64/skeleton_console.S
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <asm_macros.S>
+
+	/*
+	 * This file contains a skeleton console implementation that can
+	 * be used as basis for a real console implementation by platforms
+	 * that do not contain PL011 hardware.
+	 */
+
+	.globl	console_core_init
+	.globl	console_core_putc
+	.globl	console_core_getc
+	.globl	console_core_flush
+
+	/* -----------------------------------------------
+	 * int console_core_init(uintptr_t base_addr,
+	 * unsigned int uart_clk, unsigned int baud_rate)
+	 * Function to initialize the console without a
+	 * C Runtime to print debug information. This
+	 * function will be accessed by console_init and
+	 * crash reporting.
+	 * In: x0 - console base address
+	 *     w1 - Uart clock in Hz
+	 *     w2 - Baud rate
+	 * Out: return 1 on success else 0 on error
+	 * Clobber list : x1, x2
+	 * -----------------------------------------------
+	 */
+func console_core_init
+	/* Check the input base address */
+	cbz	x0, core_init_fail
+	/* Check baud rate and uart clock for sanity */
+	cbz	w1, core_init_fail
+	cbz	w2, core_init_fail
+	/* Insert implementation here */
+	mov	w0, #1
+	ret
+core_init_fail:
+	mov	w0, wzr
+	ret
+endfunc console_core_init
+
+	/* --------------------------------------------------------
+	 * int console_core_putc(int c, uintptr_t base_addr)
+	 * Function to output a character over the console. It
+	 * returns the character printed on success or -1 on error.
+	 * In : w0 - character to be printed
+	 *      x1 - console base address
+	 * Out : return -1 on error else return character.
+	 * Clobber list : x2
+	 * --------------------------------------------------------
+	 */
+func console_core_putc
+	/* Check the input parameter */
+	cbz	x1, putc_error
+	/* Insert implementation here */
+	ret
+putc_error:
+	mov	w0, #-1
+	ret
+endfunc console_core_putc
+
+	/* ---------------------------------------------
+	 * int console_core_getc(uintptr_t base_addr)
+	 * Function to get a character from the console.
+	 * It returns the character grabbed on success
+	 * or -1 on error.
+	 * In : x0 - console base address
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func console_core_getc
+	cbz	x0, getc_error
+	/* Insert implementation here */
+	ret
+getc_error:
+	mov	w0, #-1
+	ret
+endfunc console_core_getc
+
+	/* ---------------------------------------------
+	 * int console_core_flush(uintptr_t base_addr)
+	 * Function to force a write of all buffered
+	 * data that hasn't been output.
+	 * In : x0 - console base address
+	 * Out : return -1 on error else return 0.
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func console_core_flush
+	cbz	x0, flush_error
+	/* Insert implementation here */
+	mov	w0, #0
+	ret
+flush_error:
+	mov	w0, #-1
+	ret
+endfunc console_core_flush
diff --git a/drivers/console/console.S b/drivers/console/console.S
new file mode 100644
index 0000000..c48530c
--- /dev/null
+++ b/drivers/console/console.S
@@ -0,0 +1,9 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#if !ERROR_DEPRECATED
+#include "./aarch64/console.S"
+#endif
diff --git a/drivers/console/skeleton_console.S b/drivers/console/skeleton_console.S
new file mode 100644
index 0000000..905370d
--- /dev/null
+++ b/drivers/console/skeleton_console.S
@@ -0,0 +1,9 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#if !ERROR_DEPRECATED
+#include "./aarch64/skeleton_console.S"
+#endif
diff --git a/drivers/delay_timer/delay_timer.c b/drivers/delay_timer/delay_timer.c
new file mode 100644
index 0000000..43f5af7
--- /dev/null
+++ b/drivers/delay_timer/delay_timer.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <delay_timer.h>
+#include <platform_def.h>
+
+/***********************************************************
+ * The delay timer implementation
+ ***********************************************************/
+static const timer_ops_t *ops;
+
+/***********************************************************
+ * Delay for the given number of microseconds. The driver must
+ * be initialized before calling this function.
+ ***********************************************************/
+void udelay(uint32_t usec)
+{
+	assert(ops != NULL &&
+		(ops->clk_mult != 0) &&
+		(ops->clk_div != 0) &&
+		(ops->get_timer_value != NULL));
+
+	uint32_t start, delta, total_delta;
+
+	assert(usec < UINT32_MAX / ops->clk_div);
+
+	start = ops->get_timer_value();
+
+	total_delta = (usec * ops->clk_div) / ops->clk_mult;
+
+	do {
+		/*
+		 * If the timer value wraps around, the subtraction will
+		 * overflow and it will still give the correct result.
+		 */
+		delta = start - ops->get_timer_value(); /* Decreasing counter */
+
+	} while (delta < total_delta);
+}
+
+/***********************************************************
+ * Delay for the given number of milliseconds. The driver must
+ * be initialized before calling this function.
+ ***********************************************************/
+void mdelay(uint32_t msec)
+{
+	udelay(msec*1000);
+}
+
+/***********************************************************
+ * Initialize the timer. The fields in the provided timer
+ * ops pointer must be valid.
+ ***********************************************************/
+void timer_init(const timer_ops_t *ops_ptr)
+{
+	assert(ops_ptr != NULL  &&
+		(ops_ptr->clk_mult != 0) &&
+		(ops_ptr->clk_div != 0) &&
+		(ops_ptr->get_timer_value != NULL));
+
+	ops = ops_ptr;
+}
diff --git a/drivers/delay_timer/generic_delay_timer.c b/drivers/delay_timer/generic_delay_timer.c
new file mode 100644
index 0000000..8a36c8a
--- /dev/null
+++ b/drivers/delay_timer/generic_delay_timer.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <generic_delay_timer.h>
+#include <platform.h>
+
+/* Ticks elapsed in one second by a signal of 1 MHz */
+#define MHZ_TICKS_PER_SEC 1000000
+
+static timer_ops_t ops;
+
+static uint32_t get_timer_value(void)
+{
+	/*
+	 * Generic delay timer implementation expects the timer to be a down
+	 * counter. We apply bitwise NOT operator to the tick values returned
+	 * by read_cntpct_el0() to simulate the down counter. The value is
+	 * clipped from 64 to 32 bits.
+	 */
+	return (uint32_t)(~read_cntpct_el0());
+}
+
+void generic_delay_timer_init_args(uint32_t mult, uint32_t div)
+{
+	ops.get_timer_value	= get_timer_value;
+	ops.clk_mult		= mult;
+	ops.clk_div		= div;
+
+	timer_init(&ops);
+
+	VERBOSE("Generic delay timer configured with mult=%u and div=%u\n",
+		mult, div);
+}
+
+void generic_delay_timer_init(void)
+{
+	/* Value in ticks */
+	unsigned int mult = MHZ_TICKS_PER_SEC;
+
+	/* Value in ticks per second (Hz) */
+	unsigned int div  = plat_get_syscnt_freq2();
+
+	/* Reduce multiplier and divider by dividing them repeatedly by 10 */
+	while ((mult % 10 == 0) && (div % 10 == 0)) {
+		mult /= 10;
+		div /= 10;
+	}
+
+	generic_delay_timer_init_args(mult, div);
+}
+
diff --git a/drivers/emmc/emmc.c b/drivers/emmc/emmc.c
new file mode 100644
index 0000000..bcdc82c
--- /dev/null
+++ b/drivers/emmc/emmc.c
@@ -0,0 +1,361 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Defines a simple and generic interface to access eMMC device.
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <emmc.h>
+#include <errno.h>
+#include <string.h>
+#include <utils.h>
+
+static const emmc_ops_t *ops;
+static unsigned int emmc_ocr_value;
+static emmc_csd_t emmc_csd;
+static unsigned int emmc_flags;
+
+static int is_cmd23_enabled(void)
+{
+	return (!!(emmc_flags & EMMC_FLAG_CMD23));
+}
+
+static int emmc_device_state(void)
+{
+	emmc_cmd_t cmd;
+	int ret;
+
+	do {
+		zeromem(&cmd, sizeof(emmc_cmd_t));
+		cmd.cmd_idx = EMMC_CMD13;
+		cmd.cmd_arg = EMMC_FIX_RCA << RCA_SHIFT_OFFSET;
+		cmd.resp_type = EMMC_RESPONSE_R1;
+		ret = ops->send_cmd(&cmd);
+		assert(ret == 0);
+		assert((cmd.resp_data[0] & STATUS_SWITCH_ERROR) == 0);
+		/* Ignore improbable errors in release builds */
+		(void)ret;
+	} while ((cmd.resp_data[0] & STATUS_READY_FOR_DATA) == 0);
+	return EMMC_GET_STATE(cmd.resp_data[0]);
+}
+
+static void emmc_set_ext_csd(unsigned int ext_cmd, unsigned int value)
+{
+	emmc_cmd_t cmd;
+	int ret, state;
+
+	zeromem(&cmd, sizeof(emmc_cmd_t));
+	cmd.cmd_idx = EMMC_CMD6;
+	cmd.cmd_arg = EXTCSD_WRITE_BYTES | EXTCSD_CMD(ext_cmd) |
+		      EXTCSD_VALUE(value) | 1;
+	ret = ops->send_cmd(&cmd);
+	assert(ret == 0);
+
+	/* wait to exit PRG state */
+	do {
+		state = emmc_device_state();
+	} while (state == EMMC_STATE_PRG);
+	/* Ignore improbable errors in release builds */
+	(void)ret;
+}
+
+static void emmc_set_ios(int clk, int bus_width)
+{
+	int ret;
+
+	/* set IO speed & IO bus width */
+	if (emmc_csd.spec_vers == 4)
+		emmc_set_ext_csd(CMD_EXTCSD_BUS_WIDTH, bus_width);
+	ret = ops->set_ios(clk, bus_width);
+	assert(ret == 0);
+	/* Ignore improbable errors in release builds */
+	(void)ret;
+}
+
+static int emmc_enumerate(int clk, int bus_width)
+{
+	emmc_cmd_t cmd;
+	int ret, state;
+
+	ops->init();
+
+	/* CMD0: reset to IDLE */
+	zeromem(&cmd, sizeof(emmc_cmd_t));
+	cmd.cmd_idx = EMMC_CMD0;
+	ret = ops->send_cmd(&cmd);
+	assert(ret == 0);
+
+	while (1) {
+		/* CMD1: get OCR register */
+		zeromem(&cmd, sizeof(emmc_cmd_t));
+		cmd.cmd_idx = EMMC_CMD1;
+		cmd.cmd_arg = OCR_SECTOR_MODE | OCR_VDD_MIN_2V7 |
+			      OCR_VDD_MIN_1V7;
+		cmd.resp_type = EMMC_RESPONSE_R3;
+		ret = ops->send_cmd(&cmd);
+		assert(ret == 0);
+		emmc_ocr_value = cmd.resp_data[0];
+		if (emmc_ocr_value & OCR_POWERUP)
+			break;
+	}
+
+	/* CMD2: Card Identification */
+	zeromem(&cmd, sizeof(emmc_cmd_t));
+	cmd.cmd_idx = EMMC_CMD2;
+	cmd.resp_type = EMMC_RESPONSE_R2;
+	ret = ops->send_cmd(&cmd);
+	assert(ret == 0);
+
+	/* CMD3: Set Relative Address */
+	zeromem(&cmd, sizeof(emmc_cmd_t));
+	cmd.cmd_idx = EMMC_CMD3;
+	cmd.cmd_arg = EMMC_FIX_RCA << RCA_SHIFT_OFFSET;
+	cmd.resp_type = EMMC_RESPONSE_R1;
+	ret = ops->send_cmd(&cmd);
+	assert(ret == 0);
+
+	/* CMD9: CSD Register */
+	zeromem(&cmd, sizeof(emmc_cmd_t));
+	cmd.cmd_idx = EMMC_CMD9;
+	cmd.cmd_arg = EMMC_FIX_RCA << RCA_SHIFT_OFFSET;
+	cmd.resp_type = EMMC_RESPONSE_R2;
+	ret = ops->send_cmd(&cmd);
+	assert(ret == 0);
+	memcpy(&emmc_csd, &cmd.resp_data, sizeof(cmd.resp_data));
+
+	/* CMD7: Select Card */
+	zeromem(&cmd, sizeof(emmc_cmd_t));
+	cmd.cmd_idx = EMMC_CMD7;
+	cmd.cmd_arg = EMMC_FIX_RCA << RCA_SHIFT_OFFSET;
+	cmd.resp_type = EMMC_RESPONSE_R1;
+	ret = ops->send_cmd(&cmd);
+	assert(ret == 0);
+	/* wait to TRAN state */
+	do {
+		state = emmc_device_state();
+	} while (state != EMMC_STATE_TRAN);
+
+	emmc_set_ios(clk, bus_width);
+	return ret;
+}
+
+size_t emmc_read_blocks(int lba, uintptr_t buf, size_t size)
+{
+	emmc_cmd_t cmd;
+	int ret;
+
+	assert((ops != 0) &&
+	       (ops->read != 0) &&
+	       ((buf & EMMC_BLOCK_MASK) == 0) &&
+	       ((size & EMMC_BLOCK_MASK) == 0));
+
+	inv_dcache_range(buf, size);
+	ret = ops->prepare(lba, buf, size);
+	assert(ret == 0);
+
+	if (is_cmd23_enabled()) {
+		zeromem(&cmd, sizeof(emmc_cmd_t));
+		/* set block count */
+		cmd.cmd_idx = EMMC_CMD23;
+		cmd.cmd_arg = size / EMMC_BLOCK_SIZE;
+		cmd.resp_type = EMMC_RESPONSE_R1;
+		ret = ops->send_cmd(&cmd);
+		assert(ret == 0);
+
+		zeromem(&cmd, sizeof(emmc_cmd_t));
+		cmd.cmd_idx = EMMC_CMD18;
+	} else {
+		if (size > EMMC_BLOCK_SIZE)
+			cmd.cmd_idx = EMMC_CMD18;
+		else
+			cmd.cmd_idx = EMMC_CMD17;
+	}
+	if ((emmc_ocr_value & OCR_ACCESS_MODE_MASK) == OCR_BYTE_MODE)
+		cmd.cmd_arg = lba * EMMC_BLOCK_SIZE;
+	else
+		cmd.cmd_arg = lba;
+	cmd.resp_type = EMMC_RESPONSE_R1;
+	ret = ops->send_cmd(&cmd);
+	assert(ret == 0);
+
+	ret = ops->read(lba, buf, size);
+	assert(ret == 0);
+
+	/* wait buffer empty */
+	emmc_device_state();
+
+	if (is_cmd23_enabled() == 0) {
+		if (size > EMMC_BLOCK_SIZE) {
+			zeromem(&cmd, sizeof(emmc_cmd_t));
+			cmd.cmd_idx = EMMC_CMD12;
+			ret = ops->send_cmd(&cmd);
+			assert(ret == 0);
+		}
+	}
+	/* Ignore improbable errors in release builds */
+	(void)ret;
+	return size;
+}
+
+size_t emmc_write_blocks(int lba, const uintptr_t buf, size_t size)
+{
+	emmc_cmd_t cmd;
+	int ret;
+
+	assert((ops != 0) &&
+	       (ops->write != 0) &&
+	       ((buf & EMMC_BLOCK_MASK) == 0) &&
+	       ((size & EMMC_BLOCK_MASK) == 0));
+
+	clean_dcache_range(buf, size);
+	ret = ops->prepare(lba, buf, size);
+	assert(ret == 0);
+
+	if (is_cmd23_enabled()) {
+		/* set block count */
+		zeromem(&cmd, sizeof(emmc_cmd_t));
+		cmd.cmd_idx = EMMC_CMD23;
+		cmd.cmd_arg = size / EMMC_BLOCK_SIZE;
+		cmd.resp_type = EMMC_RESPONSE_R1;
+		ret = ops->send_cmd(&cmd);
+		assert(ret == 0);
+
+		zeromem(&cmd, sizeof(emmc_cmd_t));
+		cmd.cmd_idx = EMMC_CMD25;
+	} else {
+		zeromem(&cmd, sizeof(emmc_cmd_t));
+		if (size > EMMC_BLOCK_SIZE)
+			cmd.cmd_idx = EMMC_CMD25;
+		else
+			cmd.cmd_idx = EMMC_CMD24;
+	}
+	if ((emmc_ocr_value & OCR_ACCESS_MODE_MASK) == OCR_BYTE_MODE)
+		cmd.cmd_arg = lba * EMMC_BLOCK_SIZE;
+	else
+		cmd.cmd_arg = lba;
+	cmd.resp_type = EMMC_RESPONSE_R1;
+	ret = ops->send_cmd(&cmd);
+	assert(ret == 0);
+
+	ret = ops->write(lba, buf, size);
+	assert(ret == 0);
+
+	/* wait buffer empty */
+	emmc_device_state();
+
+	if (is_cmd23_enabled() == 0) {
+		if (size > EMMC_BLOCK_SIZE) {
+			zeromem(&cmd, sizeof(emmc_cmd_t));
+			cmd.cmd_idx = EMMC_CMD12;
+			ret = ops->send_cmd(&cmd);
+			assert(ret == 0);
+		}
+	}
+	/* Ignore improbable errors in release builds */
+	(void)ret;
+	return size;
+}
+
+size_t emmc_erase_blocks(int lba, size_t size)
+{
+	emmc_cmd_t cmd;
+	int ret, state;
+
+	assert(ops != 0);
+	assert((size != 0) && ((size % EMMC_BLOCK_SIZE) == 0));
+
+	zeromem(&cmd, sizeof(emmc_cmd_t));
+	cmd.cmd_idx = EMMC_CMD35;
+	cmd.cmd_arg = lba;
+	cmd.resp_type = EMMC_RESPONSE_R1;
+	ret = ops->send_cmd(&cmd);
+	assert(ret == 0);
+
+	zeromem(&cmd, sizeof(emmc_cmd_t));
+	cmd.cmd_idx = EMMC_CMD36;
+	cmd.cmd_arg = lba + (size / EMMC_BLOCK_SIZE) - 1;
+	cmd.resp_type = EMMC_RESPONSE_R1;
+	ret = ops->send_cmd(&cmd);
+	assert(ret == 0);
+
+	zeromem(&cmd, sizeof(emmc_cmd_t));
+	cmd.cmd_idx = EMMC_CMD38;
+	cmd.resp_type = EMMC_RESPONSE_R1B;
+	ret = ops->send_cmd(&cmd);
+	assert(ret == 0);
+
+	/* wait to TRAN state */
+	do {
+		state = emmc_device_state();
+	} while (state != EMMC_STATE_TRAN);
+	/* Ignore improbable errors in release builds */
+	(void)ret;
+	return size;
+}
+
+static inline void emmc_rpmb_enable(void)
+{
+	emmc_set_ext_csd(CMD_EXTCSD_PARTITION_CONFIG,
+			PART_CFG_BOOT_PARTITION1_ENABLE |
+			PART_CFG_PARTITION1_ACCESS);
+}
+
+static inline void emmc_rpmb_disable(void)
+{
+	emmc_set_ext_csd(CMD_EXTCSD_PARTITION_CONFIG,
+			PART_CFG_BOOT_PARTITION1_ENABLE);
+}
+
+size_t emmc_rpmb_read_blocks(int lba, uintptr_t buf, size_t size)
+{
+	size_t size_read;
+
+	emmc_rpmb_enable();
+	size_read = emmc_read_blocks(lba, buf, size);
+	emmc_rpmb_disable();
+	return size_read;
+}
+
+size_t emmc_rpmb_write_blocks(int lba, const uintptr_t buf, size_t size)
+{
+	size_t size_written;
+
+	emmc_rpmb_enable();
+	size_written = emmc_write_blocks(lba, buf, size);
+	emmc_rpmb_disable();
+	return size_written;
+}
+
+size_t emmc_rpmb_erase_blocks(int lba, size_t size)
+{
+	size_t size_erased;
+
+	emmc_rpmb_enable();
+	size_erased = emmc_erase_blocks(lba, size);
+	emmc_rpmb_disable();
+	return size_erased;
+}
+
+void emmc_init(const emmc_ops_t *ops_ptr, int clk, int width,
+	       unsigned int flags)
+{
+	assert((ops_ptr != 0) &&
+	       (ops_ptr->init != 0) &&
+	       (ops_ptr->send_cmd != 0) &&
+	       (ops_ptr->set_ios != 0) &&
+	       (ops_ptr->prepare != 0) &&
+	       (ops_ptr->read != 0) &&
+	       (ops_ptr->write != 0) &&
+	       (clk != 0) &&
+	       ((width == EMMC_BUS_WIDTH_1) ||
+		(width == EMMC_BUS_WIDTH_4) ||
+		(width == EMMC_BUS_WIDTH_8)));
+	ops = ops_ptr;
+	emmc_flags = flags;
+
+	emmc_enumerate(clk, width);
+}
diff --git a/drivers/gpio/gpio.c b/drivers/gpio/gpio.c
new file mode 100644
index 0000000..62f3dc2
--- /dev/null
+++ b/drivers/gpio/gpio.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * GPIO -- General Purpose Input/Output
+ *
+ * Defines a simple and generic interface to access GPIO device.
+ *
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <gpio.h>
+
+/*
+ * The gpio implementation
+ */
+static const gpio_ops_t *ops;
+
+int gpio_get_direction(int gpio)
+{
+	assert(ops);
+	assert(ops->get_direction != 0);
+	assert(gpio >= 0);
+
+	return ops->get_direction(gpio);
+}
+
+void gpio_set_direction(int gpio, int direction)
+{
+	assert(ops);
+	assert(ops->set_direction != 0);
+	assert((direction == GPIO_DIR_OUT) || (direction == GPIO_DIR_IN));
+	assert(gpio >= 0);
+
+	ops->set_direction(gpio, direction);
+}
+
+int gpio_get_value(int gpio)
+{
+	assert(ops);
+	assert(ops->get_value != 0);
+	assert(gpio >= 0);
+
+	return ops->get_value(gpio);
+}
+
+void gpio_set_value(int gpio, int value)
+{
+	assert(ops);
+	assert(ops->set_value != 0);
+	assert((value == GPIO_LEVEL_LOW) || (value == GPIO_LEVEL_HIGH));
+	assert(gpio >= 0);
+
+	ops->set_value(gpio, value);
+}
+
+void gpio_set_pull(int gpio, int pull)
+{
+	assert(ops);
+	assert(ops->set_pull != 0);
+	assert((pull == GPIO_PULL_NONE) || (pull == GPIO_PULL_UP) ||
+	       (pull == GPIO_PULL_DOWN));
+	assert(gpio >= 0);
+
+	ops->set_pull(gpio, pull);
+}
+
+int gpio_get_pull(int gpio)
+{
+	assert(ops);
+	assert(ops->get_pull != 0);
+	assert(gpio >= 0);
+
+	return ops->get_pull(gpio);
+}
+
+/*
+ * Initialize the gpio. The fields in the provided gpio
+ * ops pointer must be valid.
+ */
+void gpio_init(const gpio_ops_t *ops_ptr)
+{
+	assert(ops_ptr != 0  &&
+	       (ops_ptr->get_direction != 0) &&
+	       (ops_ptr->set_direction != 0) &&
+	       (ops_ptr->get_value != 0) &&
+	       (ops_ptr->set_value != 0));
+
+	ops = ops_ptr;
+}
diff --git a/drivers/io/io_block.c b/drivers/io/io_block.c
index 4047227..128246f 100644
--- a/drivers/io/io_block.c
+++ b/drivers/io/io_block.c
@@ -1,293 +1,425 @@
 /*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <assert.h>
-#include <bl_common.h>
 #include <debug.h>
 #include <errno.h>
 #include <io_block.h>
 #include <io_driver.h>
 #include <io_storage.h>
-#include <mmio.h>
-#include <stdint.h>
+#include <platform_def.h>
 #include <string.h>
+#include <utils.h>
 
-/* As we need to be able to keep state for seek, only one file can be open
- * at a time. Make this a structure and point to the entity->info. When we
- * can malloc memory we can change this to support more open files.
- */
 typedef struct {
-	/* Use the 'in_use' flag as any value for base and file_pos could be
-	 * valid.
-	 */
-	int		in_use;
-	uintptr_t	base;
-	size_t		file_pos;
-	uint32_t	flags;
-} file_state_t;
+	io_block_dev_spec_t	*dev_spec;
+	uintptr_t		base;
+	size_t			file_pos;
+	size_t			size;
+} block_dev_state_t;
 
-struct block_info {
-	struct block_ops	ops;
-	int			init;
-	uint32_t		flags;
-};
+#define is_power_of_2(x)	((x != 0) && ((x & (x - 1)) == 0))
 
-static file_state_t current_file = {0};
-
-static struct block_info block_info;
+io_type_t device_type_block(void);
 
 static int block_open(io_dev_info_t *dev_info, const uintptr_t spec,
 		      io_entity_t *entity);
 static int block_seek(io_entity_t *entity, int mode, ssize_t offset);
-static int block_read(io_entity_t *entity, uintptr_t buffer,
-		      size_t length, size_t *length_read);
-static int block_write(io_entity_t *entity, uintptr_t buffer,
+static int block_read(io_entity_t *entity, uintptr_t buffer, size_t length,
+		      size_t *length_read);
+static int block_write(io_entity_t *entity, const uintptr_t buffer,
 		       size_t length, size_t *length_written);
 static int block_close(io_entity_t *entity);
+static int block_dev_open(const uintptr_t dev_spec, io_dev_info_t **dev_info);
+static int block_dev_close(io_dev_info_t *dev_info);
 
-static int blk_dev_init(io_dev_info_t *dev_info,
-			const uintptr_t init_params);
-static int blk_dev_open(const uintptr_t dev_spec, io_dev_info_t **dev_info);
-static int blk_dev_close(io_dev_info_t *dev_info);
+static const io_dev_connector_t block_dev_connector = {
+	.dev_open	= block_dev_open
+};
 
-/* Identify the device type as block */
+static const io_dev_funcs_t block_dev_funcs = {
+	.type		= device_type_block,
+	.open		= block_open,
+	.seek		= block_seek,
+	.size		= NULL,
+	.read		= block_read,
+	.write		= block_write,
+	.close		= block_close,
+	.dev_init	= NULL,
+	.dev_close	= block_dev_close,
+};
+
+static block_dev_state_t state_pool[MAX_IO_BLOCK_DEVICES];
+static io_dev_info_t dev_info_pool[MAX_IO_BLOCK_DEVICES];
+
+/* Track number of allocated block state */
+static unsigned int block_dev_count;
+
 io_type_t device_type_block(void)
 {
 	return IO_TYPE_BLOCK;
 }
 
-static const io_dev_connector_t blk_dev_connector = {
-	.dev_open = blk_dev_open
-};
-
-static const io_dev_funcs_t blk_dev_funcs = {
-	.type = device_type_block,
-	.open = block_open,
-	.seek = block_seek,
-	.size = NULL,
-	.read = block_read,
-	.write = block_write,
-	.close = block_close,
-	.dev_init = blk_dev_init,
-	.dev_close = blk_dev_close,
-};
-
-
-/* No state associated with this device so structure can be const */
-static const io_dev_info_t blk_dev_info = {
-	.funcs = &blk_dev_funcs,
-	.info = (uintptr_t)&block_info,
-};
-
-/* Open a connection to the block device */
-static int blk_dev_open(const uintptr_t dev_spec __attribute__((unused)),
-			   io_dev_info_t **dev_info)
+/* Locate a block state in the pool, specified by address */
+static int find_first_block_state(const io_block_dev_spec_t *dev_spec,
+				  unsigned int *index_out)
 {
-	struct block_ops	*funcs, *block_spec;
-
-	assert(dev_info != NULL);
-	*dev_info = (io_dev_info_t *)&blk_dev_info; /* cast away const */
-
-	if (dev_spec) {
-		funcs = &block_info.ops;
-		block_spec = (struct block_ops *)dev_spec;
-		funcs->init = block_spec->init;
-		funcs->read = block_spec->read;
-		funcs->write = block_spec->write;
+	int result = -ENOENT;
+	for (int index = 0; index < MAX_IO_BLOCK_DEVICES; ++index) {
+		/* dev_spec is used as identifier since it's unique */
+		if (state_pool[index].dev_spec == dev_spec) {
+			result = 0;
+			*index_out = index;
+			break;
+		}
 	}
-
-	return IO_SUCCESS;
+	return result;
 }
 
-/* Close a connection to the block device */
-static int blk_dev_close(io_dev_info_t *dev_info)
+/* Allocate a device info from the pool and return a pointer to it */
+static int allocate_dev_info(io_dev_info_t **dev_info)
 {
-	/* NOP */
-	/* TODO: Consider tracking open files and cleaning them up here */
-	return IO_SUCCESS;
-}
+	int result = -ENOMEM;
+	assert(dev_info != NULL);
 
-
-/* Open a file on the block device */
-/* TODO: Can we do any sensible limit checks on requested memory */
-static int block_open(io_dev_info_t *dev_info, const uintptr_t spec,
-			     io_entity_t *entity)
-{
-	int result = IO_FAIL;
-	const io_block_spec_t *block_spec = (io_block_spec_t *)spec;
-	struct block_info *info = (struct block_info *)(dev_info->info);
-
-	/* Since we need to track open state for seek() we only allow one open
-	 * spec at a time. When we have dynamic memory we can malloc and set
-	 * entity->info.
-	 */
-	if (current_file.in_use == 0) {
-		assert(block_spec != NULL);
-		assert(entity != NULL);
-
-		current_file.in_use = 1;
-		current_file.base = block_spec->offset;
-		/* File cursor offset for seek and incremental reads etc. */
-		current_file.file_pos = 0;
-		current_file.flags = info->flags;
-		entity->info = (uintptr_t)&current_file;
-		result = IO_SUCCESS;
-	} else {
-		WARN("A block device is already active. Close first.\n");
-		result = IO_RESOURCES_EXHAUSTED;
+	if (block_dev_count < MAX_IO_BLOCK_DEVICES) {
+		unsigned int index = 0;
+		result = find_first_block_state(NULL, &index);
+		assert(result == 0);
+		/* initialize dev_info */
+		dev_info_pool[index].funcs = &block_dev_funcs;
+		dev_info_pool[index].info = (uintptr_t)&state_pool[index];
+		*dev_info = &dev_info_pool[index];
+		++block_dev_count;
 	}
 
 	return result;
 }
 
-/* Seek to a particular file offset on the block device */
+
+/* Release a device info to the pool */
+static int free_dev_info(io_dev_info_t *dev_info)
+{
+	int result;
+	unsigned int index = 0;
+	block_dev_state_t *state;
+	assert(dev_info != NULL);
+
+	state = (block_dev_state_t *)dev_info->info;
+	result = find_first_block_state(state->dev_spec, &index);
+	if (result ==  0) {
+		/* free if device info is valid */
+		zeromem(state, sizeof(block_dev_state_t));
+		zeromem(dev_info, sizeof(io_dev_info_t));
+		--block_dev_count;
+	}
+
+	return result;
+}
+
+static int block_open(io_dev_info_t *dev_info, const uintptr_t spec,
+		      io_entity_t *entity)
+{
+	block_dev_state_t *cur;
+	io_block_spec_t *region;
+
+	assert((dev_info->info != (uintptr_t)NULL) &&
+	       (spec != (uintptr_t)NULL) &&
+	       (entity->info == (uintptr_t)NULL));
+
+	region = (io_block_spec_t *)spec;
+	cur = (block_dev_state_t *)dev_info->info;
+	assert(((region->offset % cur->dev_spec->block_size) == 0) &&
+	       ((region->length % cur->dev_spec->block_size) == 0));
+
+	cur->base = region->offset;
+	cur->size = region->length;
+	cur->file_pos = 0;
+
+	entity->info = (uintptr_t)cur;
+	return 0;
+}
+
+/* parameter offset is relative address at here */
 static int block_seek(io_entity_t *entity, int mode, ssize_t offset)
 {
-	int result = IO_FAIL;
+	block_dev_state_t *cur;
 
-	/* We only support IO_SEEK_SET for the moment. */
-	if (mode == IO_SEEK_SET) {
-		assert(entity != NULL);
+	assert(entity->info != (uintptr_t)NULL);
 
-		/* TODO: can we do some basic limit checks on seek? */
-		((file_state_t *)entity->info)->file_pos = offset;
-		result = IO_SUCCESS;
-	} else {
-		result = IO_FAIL;
+	cur = (block_dev_state_t *)entity->info;
+	assert((offset >= 0) && (offset < cur->size));
+
+	switch (mode) {
+	case IO_SEEK_SET:
+		cur->file_pos = offset;
+		break;
+	case IO_SEEK_CUR:
+		cur->file_pos += offset;
+		break;
+	default:
+		return -EINVAL;
 	}
-
-	return result;
+	assert(cur->file_pos < cur->size);
+	return 0;
 }
 
-
-/* Read data from a file on the block device */
-static int block_read(io_entity_t *entity, uintptr_t buffer,
-		      size_t length, size_t *length_read)
+static int block_read(io_entity_t *entity, uintptr_t buffer, size_t length,
+		      size_t *length_read)
 {
-	file_state_t *fp;
-	int result;
+	block_dev_state_t *cur;
+	io_block_spec_t *buf;
+	io_block_ops_t *ops;
+	size_t aligned_length, skip, count, left, padding, block_size;
+	int lba;
+	int buffer_not_aligned;
 
-	assert(entity != NULL);
-	assert(buffer != (uintptr_t)NULL);
-	assert(length_read != NULL);
+	assert(entity->info != (uintptr_t)NULL);
+	cur = (block_dev_state_t *)entity->info;
+	ops = &(cur->dev_spec->ops);
+	buf = &(cur->dev_spec->buffer);
+	block_size = cur->dev_spec->block_size;
+	assert((length <= cur->size) &&
+	       (length > 0) &&
+	       (ops->read != 0));
 
-	fp = (file_state_t *)entity->info;
-
-	if (!block_info.ops.read) {
-		ERROR("There's no read function on the block device.\n");
-		return IO_NOT_SUPPORTED;
-	}
-	result = block_info.ops.read(fp->base + fp->file_pos, length,
-				     buffer, fp->flags);
-	if (result) {
-		WARN("Failed to read block offset 0x%x\n",
-		     fp->base + fp->file_pos);
-		return result;
+	if ((buffer & (block_size - 1)) != 0) {
+		/*
+		 * buffer isn't aligned with block size.
+		 * Block device always relies on DMA operation.
+		 * It's better to make the buffer as block size aligned.
+		 */
+		buffer_not_aligned = 1;
+	} else {
+		buffer_not_aligned = 0;
 	}
 
+	skip = cur->file_pos % block_size;
+	aligned_length = ((skip + length) + (block_size - 1)) &
+			 ~(block_size - 1);
+	padding = aligned_length - (skip + length);
+	left = aligned_length;
+	do {
+		lba = (cur->file_pos + cur->base) / block_size;
+		if (left >= buf->length) {
+			/*
+			 * Since left is larger, it's impossible to padding.
+			 *
+			 * If buffer isn't aligned, we need to use aligned
+			 * buffer instead.
+			 */
+			if (skip || buffer_not_aligned) {
+				/*
+				 * The beginning address (file_pos) isn't
+				 * aligned with block size, we need to use
+				 * block buffer to read block. Since block
+				 * device is always relied on DMA operation.
+				 */
+				count = ops->read(lba, buf->offset,
+						  buf->length);
+			} else {
+				count = ops->read(lba, buffer, buf->length);
+			}
+			assert(count == buf->length);
+			cur->file_pos += count - skip;
+			if (skip || buffer_not_aligned) {
+				/*
+				 * Since there's not aligned block size caused
+				 * by skip or not aligned buffer, block buffer
+				 * is used to store data.
+				 */
+				memcpy((void *)buffer,
+				       (void *)(buf->offset + skip),
+				       count - skip);
+			}
+			left = left - (count - skip);
+		} else {
+			if (skip || padding || buffer_not_aligned) {
+				/*
+				 * The beginning address (file_pos) isn't
+				 * aligned with block size, we have to read
+				 * full block by block buffer instead.
+				 * The size isn't aligned with block size.
+				 * Use block buffer to avoid overflow.
+				 *
+				 * If buffer isn't aligned, use block buffer
+				 * to avoid DMA error.
+				 */
+				count = ops->read(lba, buf->offset, left);
+			} else
+				count = ops->read(lba, buffer, left);
+			assert(count == left);
+			left = left - (skip + padding);
+			cur->file_pos += left;
+			if (skip || padding || buffer_not_aligned) {
+				/*
+				 * Since there's not aligned block size or
+				 * buffer, block buffer is used to store data.
+				 */
+				memcpy((void *)buffer,
+				       (void *)(buf->offset + skip),
+				       left);
+			}
+			/* It's already the last block operation */
+			left = 0;
+		}
+		skip = cur->file_pos % block_size;
+	} while (left > 0);
 	*length_read = length;
-	/* advance the file 'cursor' for incremental reads */
-	fp->file_pos += length;
 
-	return IO_SUCCESS;
+	return 0;
 }
 
-static int block_write(io_entity_t *entity, uintptr_t buffer,
+static int block_write(io_entity_t *entity, const uintptr_t buffer,
 		       size_t length, size_t *length_written)
 {
-	file_state_t *fp;
-	int result;
+	block_dev_state_t *cur;
+	io_block_spec_t *buf;
+	io_block_ops_t *ops;
+	size_t aligned_length, skip, count, left, padding, block_size;
+	int lba;
+	int buffer_not_aligned;
 
-	assert(entity != NULL);
-	assert(buffer != (uintptr_t)NULL);
-	assert(length_written != NULL);
+	assert(entity->info != (uintptr_t)NULL);
+	cur = (block_dev_state_t *)entity->info;
+	ops = &(cur->dev_spec->ops);
+	buf = &(cur->dev_spec->buffer);
+	block_size = cur->dev_spec->block_size;
+	assert((length <= cur->size) &&
+	       (length > 0) &&
+	       (ops->read != 0) &&
+	       (ops->write != 0));
 
-	fp = (file_state_t *)entity->info;
-
-	if (!block_info.ops.write) {
-		ERROR("There's no write function on the block device.\n");
-		return IO_NOT_SUPPORTED;
-	}
-	result = block_info.ops.write(fp->base + fp->file_pos, length,
-				      buffer, fp->flags);
-	if (result) {
-		WARN("Failed to write block offset 0x%x\n",
-		     fp->base + fp->file_pos);
-		return result;
+	if ((buffer & (block_size - 1)) != 0) {
+		/*
+		 * buffer isn't aligned with block size.
+		 * Block device always relies on DMA operation.
+		 * It's better to make the buffer as block size aligned.
+		 */
+		buffer_not_aligned = 1;
+	} else {
+		buffer_not_aligned = 0;
 	}
 
+	skip = cur->file_pos % block_size;
+	aligned_length = ((skip + length) + (block_size - 1)) &
+			 ~(block_size - 1);
+	padding = aligned_length - (skip + length);
+	left = aligned_length;
+	do {
+		lba = (cur->file_pos + cur->base) / block_size;
+		if (left >= buf->length) {
+			/* Since left is larger, it's impossible to padding. */
+			if (skip || buffer_not_aligned) {
+				/*
+				 * The beginning address (file_pos) isn't
+				 * aligned with block size or buffer isn't
+				 * aligned, we need to use block buffer to
+				 * write block.
+				 */
+				count = ops->read(lba, buf->offset,
+						  buf->length);
+				assert(count == buf->length);
+				memcpy((void *)(buf->offset + skip),
+				       (void *)buffer,
+				       count - skip);
+				count = ops->write(lba, buf->offset,
+						   buf->length);
+			} else
+				count = ops->write(lba, buffer, buf->length);
+			assert(count == buf->length);
+			cur->file_pos += count - skip;
+			left = left - (count - skip);
+		} else {
+			if (skip || padding || buffer_not_aligned) {
+				/*
+				 * The beginning address (file_pos) isn't
+				 * aligned with block size, we need to avoid
+				 * poluate data in the beginning. Reading and
+				 * skipping the beginning is the only way.
+				 * The size isn't aligned with block size.
+				 * Use block buffer to avoid overflow.
+				 *
+				 * If buffer isn't aligned, use block buffer
+				 * to avoid DMA error.
+				 */
+				count = ops->read(lba, buf->offset, left);
+				assert(count == left);
+				memcpy((void *)(buf->offset + skip),
+				       (void *)buffer,
+				       left - skip - padding);
+				count = ops->write(lba, buf->offset, left);
+			} else
+				count = ops->write(lba, buffer, left);
+			assert(count == left);
+			cur->file_pos += left - (skip + padding);
+			/* It's already the last block operation */
+			left = 0;
+		}
+		skip = cur->file_pos % block_size;
+	} while (left > 0);
 	*length_written = length;
-	/* advance the file 'cursor' for incremental reads */
-	fp->file_pos += length;
-
-	return IO_SUCCESS;
+	return 0;
 }
 
-/* Close a file on the BLOCK device */
 static int block_close(io_entity_t *entity)
 {
-	assert(entity != NULL);
-
-	entity->info = 0;
-
-	/* This would be a mem free() if we had malloc.*/
-	memset((void *)&current_file, 0, sizeof(current_file));
-
-	return IO_SUCCESS;
+	entity->info = (uintptr_t)NULL;
+	return 0;
 }
 
-static int blk_dev_init(io_dev_info_t *dev_info, const uintptr_t init_params)
+static int block_dev_open(const uintptr_t dev_spec, io_dev_info_t **dev_info)
 {
-	struct block_info *info = (struct block_info *)(dev_info->info);
+	block_dev_state_t *cur;
+	io_block_spec_t *buffer;
+	io_dev_info_t *info;
+	size_t block_size;
+	int result;
 
-	if (!info->init) {
-		if (block_info.ops.init)
-			block_info.ops.init();
-		info->init = 1;
-	}
-	info->flags = init_params;
-	return IO_SUCCESS;
+	assert(dev_info != NULL);
+	result = allocate_dev_info(&info);
+	if (result)
+		return -ENOENT;
+
+	cur = (block_dev_state_t *)info->info;
+	/* dev_spec is type of io_block_dev_spec_t. */
+	cur->dev_spec = (io_block_dev_spec_t *)dev_spec;
+	buffer = &(cur->dev_spec->buffer);
+	block_size = cur->dev_spec->block_size;
+	assert((block_size > 0) &&
+	       (is_power_of_2(block_size) != 0) &&
+	       ((buffer->offset % block_size) == 0) &&
+	       ((buffer->length % block_size) == 0));
+
+	*dev_info = info;	/* cast away const */
+	(void)block_size;
+	(void)buffer;
+	return 0;
+}
+
+static int block_dev_close(io_dev_info_t *dev_info)
+{
+	return free_dev_info(dev_info);
 }
 
 /* Exported functions */
 
-/* Register the block driver with the IO abstraction */
+/* Register the Block driver with the IO abstraction */
 int register_io_dev_block(const io_dev_connector_t **dev_con)
 {
-	int result = IO_FAIL;
+	int result;
+
 	assert(dev_con != NULL);
 
-	result = io_register_device(&blk_dev_info);
-	if (result == IO_SUCCESS)
-		*dev_con = &blk_dev_connector;
-
+	/*
+	 * Since dev_info isn't really used in io_register_device, always
+	 * use the same device info at here instead.
+	 */
+	result = io_register_device(&dev_info_pool[0]);
+	if (result == 0)
+		*dev_con = &block_dev_connector;
 	return result;
 }
diff --git a/drivers/io/io_dummy.c b/drivers/io/io_dummy.c
new file mode 100644
index 0000000..d4020e3
--- /dev/null
+++ b/drivers/io/io_dummy.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <io_driver.h>
+#include <io_dummy.h>
+#include <io_storage.h>
+#include <string.h>
+
+struct file_state {
+	int in_use;
+	size_t size;
+};
+
+static struct file_state current_file = {0};
+
+/* Identify the device type as dummy */
+static io_type_t device_type_dummy(void)
+{
+	return IO_TYPE_DUMMY;
+}
+
+/* Dummy device functions */
+static int dummy_dev_open(const uintptr_t dev_spec, io_dev_info_t **dev_info);
+static int dummy_block_open(io_dev_info_t *dev_info, const uintptr_t spec,
+			     io_entity_t *entity);
+static int dummy_block_len(io_entity_t *entity, size_t *length);
+static int dummy_block_read(io_entity_t *entity, uintptr_t buffer,
+			     size_t length, size_t *length_read);
+static int dummy_block_close(io_entity_t *entity);
+static int dummy_dev_close(io_dev_info_t *dev_info);
+
+
+static const io_dev_connector_t dummy_dev_connector = {
+	.dev_open = dummy_dev_open
+};
+
+
+static const io_dev_funcs_t dummy_dev_funcs = {
+	.type = device_type_dummy,
+	.open = dummy_block_open,
+	.seek = NULL,
+	.size = dummy_block_len,
+	.read = dummy_block_read,
+	.write = NULL,
+	.close = dummy_block_close,
+	.dev_init = NULL,
+	.dev_close = dummy_dev_close,
+};
+
+
+static const io_dev_info_t dummy_dev_info = {
+	.funcs = &dummy_dev_funcs,
+	.info = (uintptr_t)NULL
+};
+
+
+/* Open a connection to the dummy device */
+static int dummy_dev_open(const uintptr_t dev_spec __attribute__((unused)),
+			   io_dev_info_t **dev_info)
+{
+	assert(dev_info != NULL);
+	*dev_info = (io_dev_info_t *)&dummy_dev_info;
+
+	return 0;
+}
+
+
+/* Close a connection to the dummy device */
+static int dummy_dev_close(io_dev_info_t *dev_info)
+{
+	return 0;
+}
+
+
+/* Open a file on the dummy device */
+static int dummy_block_open(io_dev_info_t *dev_info, const uintptr_t spec,
+			     io_entity_t *entity)
+{
+	int result;
+	const io_block_spec_t *block_spec = (io_block_spec_t *)spec;
+
+	if (current_file.in_use == 0) {
+		assert(block_spec != NULL);
+		assert(entity != NULL);
+
+		current_file.in_use = 1;
+		current_file.size = block_spec->length;
+		entity->info = (uintptr_t)&current_file;
+		result = 0;
+	} else {
+		WARN("A Dummy device is already active. Close first.\n");
+		result = -ENOMEM;
+	}
+
+	return result;
+}
+
+
+/* Return the size of a file on the dummy device */
+static int dummy_block_len(io_entity_t *entity, size_t *length)
+{
+	assert(entity != NULL);
+	assert(length != NULL);
+
+	*length =  ((struct file_state *)entity->info)->size;
+
+	return 0;
+}
+
+
+/* Read data from a file on the dummy device */
+static int dummy_block_read(io_entity_t *entity, uintptr_t buffer,
+			     size_t length, size_t *length_read)
+{
+	assert(length_read != NULL);
+
+	*length_read = length;
+
+	return 0;
+}
+
+
+/* Close a file on the dummy device */
+static int dummy_block_close(io_entity_t *entity)
+{
+	assert(entity != NULL);
+
+	entity->info = 0;
+	current_file.in_use = 0;
+
+	return 0;
+}
+
+
+/* Exported functions */
+
+/* Register the dummy driver with the IO abstraction */
+int register_io_dev_dummy(const io_dev_connector_t **dev_con)
+{
+	int result;
+
+	assert(dev_con != NULL);
+
+	result = io_register_device(&dummy_dev_info);
+	if (result == 0)
+		*dev_con = &dummy_dev_connector;
+
+	return result;
+}
diff --git a/drivers/io/io_fip.c b/drivers/io/io_fip.c
index 0cec804..abb3511 100644
--- a/drivers/io/io_fip.c
+++ b/drivers/io/io_fip.c
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <assert.h>
@@ -40,6 +16,7 @@
 #include <platform_def.h>
 #include <stdint.h>
 #include <string.h>
+#include <utils.h>
 #include <uuid.h>
 
 /* Useful for printing UUIDs when debugging.*/
@@ -51,11 +28,6 @@
 		x.node[4], x.node[5]
 
 typedef struct {
-	const char	*name;
-	const uuid_t	 uuid;
-} plat_fip_name_uuid_t;
-
-typedef struct {
 	/* Put file_pos above the struct to allow {0} on static init.
 	 * It is a workaround for a known bug in GCC
 	 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=53119
@@ -64,37 +36,6 @@
 	fip_toc_entry_t entry;
 } file_state_t;
 
-static const plat_fip_name_uuid_t name_uuid[] = {
-	{BL2_IMAGE_NAME, UUID_TRUSTED_BOOT_FIRMWARE_BL2},
-#ifdef BL30_IMAGE_NAME
-	/* BL3-0 is optional in the platform */
-	{BL30_IMAGE_NAME, UUID_SCP_FIRMWARE_BL30},
-#endif /* BL30_IMAGE_NAME */
-	{BL31_IMAGE_NAME, UUID_EL3_RUNTIME_FIRMWARE_BL31},
-#ifdef BL32_IMAGE_NAME
-	/* BL3-2 is optional in the platform */
-	{BL32_IMAGE_NAME, UUID_SECURE_PAYLOAD_BL32},
-#endif /* BL32_IMAGE_NAME */
-	{BL33_IMAGE_NAME, UUID_NON_TRUSTED_FIRMWARE_BL33},
-#if TRUSTED_BOARD_BOOT
-	/* Certificates */
-	{BL2_CERT_NAME, UUID_TRUSTED_BOOT_FIRMWARE_BL2_CERT},
-	{TRUSTED_KEY_CERT_NAME, UUID_TRUSTED_KEY_CERT},
-#ifdef BL30_KEY_CERT_NAME
-	{BL30_KEY_CERT_NAME, UUID_SCP_FIRMWARE_BL30_KEY_CERT},
-#endif
-	{BL31_KEY_CERT_NAME, UUID_EL3_RUNTIME_FIRMWARE_BL31_KEY_CERT},
-	{BL32_KEY_CERT_NAME, UUID_SECURE_PAYLOAD_BL32_KEY_CERT},
-	{BL33_KEY_CERT_NAME, UUID_NON_TRUSTED_FIRMWARE_BL33_KEY_CERT},
-#ifdef BL30_CERT_NAME
-	{BL30_CERT_NAME, UUID_SCP_FIRMWARE_BL30_CERT},
-#endif
-	{BL31_CERT_NAME, UUID_EL3_RUNTIME_FIRMWARE_BL31_CERT},
-	{BL32_CERT_NAME, UUID_SECURE_PAYLOAD_BL32_CERT},
-	{BL33_CERT_NAME, UUID_NON_TRUSTED_FIRMWARE_BL33_CERT},
-#endif /* TRUSTED_BOARD_BOOT */
-};
-
 static const uuid_t uuid_null = {0};
 static file_state_t current_file = {0};
 static uintptr_t backend_dev_handle;
@@ -113,13 +54,6 @@
 static int fip_dev_close(io_dev_info_t *dev_info);
 
 
-static inline int copy_uuid(uuid_t *dst, const uuid_t *src)
-{
-	memcpy(dst, src, sizeof(uuid_t));
-	return 0;
-}
-
-
 /* Return 0 for equal uuids. */
 static inline int compare_uuids(const uuid_t *uuid1, const uuid_t *uuid2)
 {
@@ -138,22 +72,6 @@
 }
 
 
-static int file_to_uuid(const char *filename, uuid_t *uuid)
-{
-	int i;
-	int status = -EINVAL;
-
-	for (i = 0; i < (sizeof(name_uuid) / sizeof(name_uuid[0])); i++) {
-		if (strcmp(filename, name_uuid[i].name) == 0) {
-			copy_uuid(uuid, &name_uuid[i].uuid);
-			status = 0;
-			break;
-		}
-	}
-	return status;
-}
-
-
 /* Identify the device type as a virtual driver */
 io_type_t device_type_fip(void)
 {
@@ -187,50 +105,50 @@
 
 
 /* Open a connection to the FIP device */
-static int fip_dev_open(const uintptr_t dev_spec __attribute__((unused)),
+static int fip_dev_open(const uintptr_t dev_spec __unused,
 			 io_dev_info_t **dev_info)
 {
 	assert(dev_info != NULL);
 	*dev_info = (io_dev_info_t *)&fip_dev_info; /* cast away const */
 
-	return IO_SUCCESS;
+	return 0;
 }
 
 
 /* Do some basic package checks. */
 static int fip_dev_init(io_dev_info_t *dev_info, const uintptr_t init_params)
 {
-	int result = IO_FAIL;
-	char *image_name = (char *)init_params;
+	int result;
+	unsigned int image_id = (unsigned int)init_params;
 	uintptr_t backend_handle;
 	fip_toc_header_t header;
 	size_t bytes_read;
 
 	/* Obtain a reference to the image by querying the platform layer */
-	result = plat_get_image_source(image_name, &backend_dev_handle,
+	result = plat_get_image_source(image_id, &backend_dev_handle,
 				       &backend_image_spec);
-	if (result != IO_SUCCESS) {
-		WARN("Failed to obtain reference to image '%s' (%i)\n",
-			image_name, result);
-		result = IO_FAIL;
+	if (result != 0) {
+		WARN("Failed to obtain reference to image id=%u (%i)\n",
+			image_id, result);
+		result = -ENOENT;
 		goto fip_dev_init_exit;
 	}
 
 	/* Attempt to access the FIP image */
 	result = io_open(backend_dev_handle, backend_image_spec,
 			 &backend_handle);
-	if (result != IO_SUCCESS) {
-		WARN("Failed to access image '%s' (%i)\n", image_name, result);
-		result = IO_FAIL;
+	if (result != 0) {
+		WARN("Failed to access image id=%u (%i)\n", image_id, result);
+		result = -ENOENT;
 		goto fip_dev_init_exit;
 	}
 
 	result = io_read(backend_handle, (uintptr_t)&header, sizeof(header),
 			&bytes_read);
-	if (result == IO_SUCCESS) {
+	if (result == 0) {
 		if (!is_valid_header(&header)) {
 			WARN("Firmware Image Package header check failed.\n");
-			result = IO_FAIL;
+			result = -ENOENT;
 		} else {
 			VERBOSE("FIP header looks OK.\n");
 		}
@@ -251,7 +169,7 @@
 	backend_dev_handle = (uintptr_t)NULL;
 	backend_image_spec = (uintptr_t)NULL;
 
-	return IO_SUCCESS;
+	return 0;
 }
 
 
@@ -259,14 +177,13 @@
 static int fip_file_open(io_dev_info_t *dev_info, const uintptr_t spec,
 			 io_entity_t *entity)
 {
-	int result = IO_FAIL;
+	int result;
 	uintptr_t backend_handle;
-	uuid_t file_uuid;
-	const io_file_spec_t *file_spec = (io_file_spec_t *)spec;
+	const io_uuid_spec_t *uuid_spec = (io_uuid_spec_t *)spec;
 	size_t bytes_read;
 	int found_file = 0;
 
-	assert(file_spec != NULL);
+	assert(uuid_spec != NULL);
 	assert(entity != NULL);
 
 	/* Can only have one file open at a time for the moment. We need to
@@ -277,37 +194,35 @@
 	 */
 	if (current_file.entry.offset_address != 0) {
 		WARN("fip_file_open : Only one open file at a time.\n");
-		return IO_RESOURCES_EXHAUSTED;
+		return -ENOMEM;
 	}
 
 	/* Attempt to access the FIP image */
 	result = io_open(backend_dev_handle, backend_image_spec,
 			 &backend_handle);
-	if (result != IO_SUCCESS) {
+	if (result != 0) {
 		WARN("Failed to open Firmware Image Package (%i)\n", result);
-		result = IO_FAIL;
+		result = -ENOENT;
 		goto fip_file_open_exit;
 	}
 
 	/* Seek past the FIP header into the Table of Contents */
 	result = io_seek(backend_handle, IO_SEEK_SET, sizeof(fip_toc_header_t));
-	if (result != IO_SUCCESS) {
+	if (result != 0) {
 		WARN("fip_file_open: failed to seek\n");
-		result = IO_FAIL;
+		result = -ENOENT;
 		goto fip_file_open_close;
 	}
 
-	file_to_uuid(file_spec->path, &file_uuid);
-
 	found_file = 0;
 	do {
 		result = io_read(backend_handle,
 				 (uintptr_t)&current_file.entry,
 				 sizeof(current_file.entry),
 				 &bytes_read);
-		if (result == IO_SUCCESS) {
+		if (result == 0) {
 			if (compare_uuids(&current_file.entry.uuid,
-					  &file_uuid) == 0) {
+					  &uuid_spec->uuid) == 0) {
 				found_file = 1;
 				break;
 			}
@@ -327,7 +242,7 @@
 	} else {
 		/* Did not find the file in the FIP. */
 		current_file.entry.offset_address = 0;
-		result = IO_FAIL;
+		result = -ENOENT;
 	}
 
  fip_file_open_close:
@@ -346,7 +261,7 @@
 
 	*length =  ((file_state_t *)entity->info)->entry.size;
 
-	return IO_SUCCESS;
+	return 0;
 }
 
 
@@ -354,7 +269,7 @@
 static int fip_file_read(io_entity_t *entity, uintptr_t buffer, size_t length,
 			  size_t *length_read)
 {
-	int result = IO_FAIL;
+	int result;
 	file_state_t *fp;
 	size_t file_offset;
 	size_t bytes_read;
@@ -368,9 +283,9 @@
 	/* Open the backend, attempt to access the blob image */
 	result = io_open(backend_dev_handle, backend_image_spec,
 			 &backend_handle);
-	if (result != IO_SUCCESS) {
+	if (result != 0) {
 		WARN("Failed to open FIP (%i)\n", result);
-		result = IO_FAIL;
+		result = -ENOENT;
 		goto fip_file_read_exit;
 	}
 
@@ -379,17 +294,17 @@
 	/* Seek to the position in the FIP where the payload lives */
 	file_offset = fp->entry.offset_address + fp->file_pos;
 	result = io_seek(backend_handle, IO_SEEK_SET, file_offset);
-	if (result != IO_SUCCESS) {
+	if (result != 0) {
 		WARN("fip_file_read: failed to seek\n");
-		result = IO_FAIL;
+		result = -ENOENT;
 		goto fip_file_read_close;
 	}
 
 	result = io_read(backend_handle, buffer, length, &bytes_read);
-	if (result != IO_SUCCESS) {
+	if (result != 0) {
 		/* We cannot read our data. Fail. */
 		WARN("Failed to read payload (%i)\n", result);
-		result = IO_FAIL;
+		result = -ENOENT;
 		goto fip_file_read_close;
 	} else {
 		/* Set caller length and new file position. */
@@ -413,13 +328,13 @@
 	 * If we had malloc() we would free() here.
 	 */
 	if (current_file.entry.offset_address != 0) {
-		memset(&current_file, 0, sizeof(current_file));
+		zeromem(&current_file, sizeof(current_file));
 	}
 
 	/* Clear the Entity info. */
 	entity->info = 0;
 
-	return IO_SUCCESS;
+	return 0;
 }
 
 /* Exported functions */
@@ -427,11 +342,11 @@
 /* Register the Firmware Image Package driver with the IO abstraction */
 int register_io_dev_fip(const io_dev_connector_t **dev_con)
 {
-	int result = IO_FAIL;
+	int result;
 	assert(dev_con != NULL);
 
 	result = io_register_device(&fip_dev_info);
-	if (result == IO_SUCCESS)
+	if (result == 0)
 		*dev_con = &fip_dev_connector;
 
 	return result;
diff --git a/drivers/io/io_memmap.c b/drivers/io/io_memmap.c
index fc06fbb..b7e26bd 100644
--- a/drivers/io/io_memmap.c
+++ b/drivers/io/io_memmap.c
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <assert.h>
@@ -33,6 +9,7 @@
 #include <io_driver.h>
 #include <io_storage.h>
 #include <string.h>
+#include <utils.h>
 
 /* As we need to be able to keep state for seek, only one file can be open
  * at a time. Make this a structure and point to the entity->info. When we
@@ -45,6 +22,7 @@
 	int		in_use;
 	uintptr_t	base;
 	size_t		file_pos;
+	size_t		size;
 } file_state_t;
 
 static file_state_t current_file = {0};
@@ -61,6 +39,7 @@
 			     io_entity_t *entity);
 static int memmap_block_seek(io_entity_t *entity, int mode,
 			     ssize_t offset);
+static int memmap_block_len(io_entity_t *entity, size_t *length);
 static int memmap_block_read(io_entity_t *entity, uintptr_t buffer,
 			     size_t length, size_t *length_read);
 static int memmap_block_write(io_entity_t *entity, const uintptr_t buffer,
@@ -78,7 +57,7 @@
 	.type = device_type_memmap,
 	.open = memmap_block_open,
 	.seek = memmap_block_seek,
-	.size = NULL,
+	.size = memmap_block_len,
 	.read = memmap_block_read,
 	.write = memmap_block_write,
 	.close = memmap_block_close,
@@ -95,13 +74,13 @@
 
 
 /* Open a connection to the memmap device */
-static int memmap_dev_open(const uintptr_t dev_spec __attribute__((unused)),
+static int memmap_dev_open(const uintptr_t dev_spec __unused,
 			   io_dev_info_t **dev_info)
 {
 	assert(dev_info != NULL);
 	*dev_info = (io_dev_info_t *)&memmap_dev_info; /* cast away const */
 
-	return IO_SUCCESS;
+	return 0;
 }
 
 
@@ -111,16 +90,15 @@
 {
 	/* NOP */
 	/* TODO: Consider tracking open files and cleaning them up here */
-	return IO_SUCCESS;
+	return 0;
 }
 
 
 /* Open a file on the memmap device */
-/* TODO: Can we do any sensible limit checks on requested memory */
 static int memmap_block_open(io_dev_info_t *dev_info, const uintptr_t spec,
 			     io_entity_t *entity)
 {
-	int result = IO_FAIL;
+	int result = -ENOMEM;
 	const io_block_spec_t *block_spec = (io_block_spec_t *)spec;
 
 	/* Since we need to track open state for seek() we only allow one open
@@ -135,11 +113,11 @@
 		current_file.base = block_spec->offset;
 		/* File cursor offset for seek and incremental reads etc. */
 		current_file.file_pos = 0;
+		current_file.size = block_spec->length;
 		entity->info = (uintptr_t)&current_file;
-		result = IO_SUCCESS;
+		result = 0;
 	} else {
 		WARN("A Memmap device is already active. Close first.\n");
-		result = IO_RESOURCES_EXHAUSTED;
 	}
 
 	return result;
@@ -149,42 +127,64 @@
 /* Seek to a particular file offset on the memmap device */
 static int memmap_block_seek(io_entity_t *entity, int mode, ssize_t offset)
 {
-	int result = IO_FAIL;
+	int result = -ENOENT;
+	file_state_t *fp;
 
 	/* We only support IO_SEEK_SET for the moment. */
 	if (mode == IO_SEEK_SET) {
 		assert(entity != NULL);
 
-		/* TODO: can we do some basic limit checks on seek? */
-		((file_state_t *)entity->info)->file_pos = offset;
-		result = IO_SUCCESS;
-	} else {
-		result = IO_FAIL;
+		fp = (file_state_t *) entity->info;
+
+		/* Assert that new file position is valid */
+		assert((offset >= 0) && (offset < fp->size));
+
+		/* Reset file position */
+		fp->file_pos = offset;
+		result = 0;
 	}
 
 	return result;
 }
 
 
+/* Return the size of a file on the memmap device */
+static int memmap_block_len(io_entity_t *entity, size_t *length)
+{
+	assert(entity != NULL);
+	assert(length != NULL);
+
+	*length = ((file_state_t *)entity->info)->size;
+
+	return 0;
+}
+
+
 /* Read data from a file on the memmap device */
 static int memmap_block_read(io_entity_t *entity, uintptr_t buffer,
 			     size_t length, size_t *length_read)
 {
 	file_state_t *fp;
+	size_t pos_after;
 
 	assert(entity != NULL);
 	assert(buffer != (uintptr_t)NULL);
 	assert(length_read != NULL);
 
-	fp = (file_state_t *)entity->info;
+	fp = (file_state_t *) entity->info;
+
+	/* Assert that file position is valid for this read operation */
+	pos_after = fp->file_pos + length;
+	assert((pos_after >= fp->file_pos) && (pos_after <= fp->size));
 
 	memcpy((void *)buffer, (void *)(fp->base + fp->file_pos), length);
 
 	*length_read = length;
-	/* advance the file 'cursor' for incremental reads */
-	fp->file_pos += length;
 
-	return IO_SUCCESS;
+	/* Set file position after read */
+	fp->file_pos = pos_after;
+
+	return 0;
 }
 
 
@@ -193,21 +193,26 @@
 			      size_t length, size_t *length_written)
 {
 	file_state_t *fp;
+	size_t pos_after;
 
 	assert(entity != NULL);
 	assert(buffer != (uintptr_t)NULL);
 	assert(length_written != NULL);
 
-	fp = (file_state_t *)entity->info;
+	fp = (file_state_t *) entity->info;
+
+	/* Assert that file position is valid for this write operation */
+	pos_after = fp->file_pos + length;
+	assert((pos_after >= fp->file_pos) && (pos_after <= fp->size));
 
 	memcpy((void *)(fp->base + fp->file_pos), (void *)buffer, length);
 
 	*length_written = length;
 
-	/* advance the file 'cursor' for incremental writes */
-	fp->file_pos += length;
+	/* Set file position after write */
+	fp->file_pos = pos_after;
 
-	return IO_SUCCESS;
+	return 0;
 }
 
 
@@ -219,9 +224,9 @@
 	entity->info = 0;
 
 	/* This would be a mem free() if we had malloc.*/
-	memset((void *)&current_file, 0, sizeof(current_file));
+	zeromem((void *)&current_file, sizeof(current_file));
 
-	return IO_SUCCESS;
+	return 0;
 }
 
 
@@ -230,11 +235,11 @@
 /* Register the memmap driver with the IO abstraction */
 int register_io_dev_memmap(const io_dev_connector_t **dev_con)
 {
-	int result = IO_FAIL;
+	int result;
 	assert(dev_con != NULL);
 
 	result = io_register_device(&memmap_dev_info);
-	if (result == IO_SUCCESS)
+	if (result == 0)
 		*dev_con = &memmap_dev_connector;
 
 	return result;
diff --git a/drivers/io/io_semihosting.c b/drivers/io/io_semihosting.c
index 3c92c6d..f1dfa20 100644
--- a/drivers/io/io_semihosting.c
+++ b/drivers/io/io_semihosting.c
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <assert.h>
@@ -84,19 +60,18 @@
 static int sh_dev_open(const uintptr_t dev_spec __unused,
 		io_dev_info_t **dev_info)
 {
-	int result = IO_SUCCESS;
 	assert(dev_info != NULL);
 	*dev_info = (io_dev_info_t *)&sh_dev_info; /* cast away const */
-	return result;
+	return 0;
 }
 
 
 /* Open a file on the semi-hosting device */
-static int sh_file_open(io_dev_info_t *dev_info __attribute__((unused)),
+static int sh_file_open(io_dev_info_t *dev_info __unused,
 		const uintptr_t spec, io_entity_t *entity)
 {
-	int result = IO_FAIL;
-	long sh_result = -1;
+	int result = -ENOENT;
+	long sh_result;
 	const io_file_spec_t *file_spec = (const io_file_spec_t *)spec;
 
 	assert(file_spec != NULL);
@@ -106,9 +81,7 @@
 
 	if (sh_result > 0) {
 		entity->info = (uintptr_t)sh_result;
-		result = IO_SUCCESS;
-	} else {
-		result = IO_FAIL;
+		result = 0;
 	}
 	return result;
 }
@@ -117,7 +90,6 @@
 /* Seek to a particular file offset on the semi-hosting device */
 static int sh_file_seek(io_entity_t *entity, int mode, ssize_t offset)
 {
-	int result = IO_FAIL;
 	long file_handle, sh_result;
 
 	assert(entity != NULL);
@@ -126,16 +98,14 @@
 
 	sh_result = semihosting_file_seek(file_handle, offset);
 
-	result = (sh_result == 0) ? IO_SUCCESS : IO_FAIL;
-
-	return result;
+	return (sh_result == 0) ? 0 : -ENOENT;
 }
 
 
 /* Return the size of a file on the semi-hosting device */
 static int sh_file_len(io_entity_t *entity, size_t *length)
 {
-	int result = IO_FAIL;
+	int result = -ENOENT;
 
 	assert(entity != NULL);
 	assert(length != NULL);
@@ -144,7 +114,7 @@
 	long sh_result = semihosting_file_length(sh_handle);
 
 	if (sh_result >= 0) {
-		result = IO_SUCCESS;
+		result = 0;
 		*length = (size_t)sh_result;
 	}
 
@@ -156,8 +126,8 @@
 static int sh_file_read(io_entity_t *entity, uintptr_t buffer, size_t length,
 		size_t *length_read)
 {
-	int result = IO_FAIL;
-	long sh_result = -1;
+	int result = -ENOENT;
+	long sh_result;
 	size_t bytes = length;
 	long file_handle;
 
@@ -171,9 +141,8 @@
 
 	if (sh_result >= 0) {
 		*length_read = (bytes != length) ? bytes : length;
-		result = IO_SUCCESS;
-	} else
-		result = IO_FAIL;
+		result = 0;
+	}
 
 	return result;
 }
@@ -183,8 +152,7 @@
 static int sh_file_write(io_entity_t *entity, const uintptr_t buffer,
 		size_t length, size_t *length_written)
 {
-	int result = IO_FAIL;
-	long sh_result = -1;
+	long sh_result;
 	long file_handle;
 	size_t bytes = length;
 
@@ -196,21 +164,16 @@
 
 	sh_result = semihosting_file_write(file_handle, &bytes, buffer);
 
-	if (sh_result >= 0) {
-		*length_written = sh_result;
-		result = IO_SUCCESS;
-	} else
-		result = IO_FAIL;
+	*length_written = length - bytes;
 
-	return result;
+	return (sh_result == 0) ? 0 : -ENOENT;
 }
 
 
 /* Close a file on the semi-hosting device */
 static int sh_file_close(io_entity_t *entity)
 {
-	int result = IO_FAIL;
-	long sh_result = -1;
+	long sh_result;
 	long file_handle;
 
 	assert(entity != NULL);
@@ -219,9 +182,7 @@
 
 	sh_result = semihosting_file_close(file_handle);
 
-	result = (sh_result >= 0) ? IO_SUCCESS : IO_FAIL;
-
-	return result;
+	return (sh_result >= 0) ? 0 : -ENOENT;
 }
 
 
@@ -230,11 +191,11 @@
 /* Register the semi-hosting driver with the IO abstraction */
 int register_io_dev_sh(const io_dev_connector_t **dev_con)
 {
-	int result = IO_FAIL;
+	int result;
 	assert(dev_con != NULL);
 
 	result = io_register_device(&sh_dev_info);
-	if (result == IO_SUCCESS)
+	if (result == 0)
 		*dev_con = &sh_dev_connector;
 
 	return result;
diff --git a/drivers/io/io_storage.c b/drivers/io/io_storage.c
index a3a8186..0918de0 100644
--- a/drivers/io/io_storage.c
+++ b/drivers/io/io_storage.c
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <assert.h>
@@ -51,8 +27,8 @@
 /* Number of currently registered devices */
 static unsigned int dev_count;
 
-
-#if DEBUG	/* Extra validation functions only used in debug builds */
+/* Extra validation functions only used when asserts are enabled */
+#if ENABLE_ASSERTIONS
 
 /* Return a boolean value indicating whether a device connector is valid */
 static int is_valid_dev_connector(const io_dev_connector_t *dev_con)
@@ -89,14 +65,15 @@
 	return ((mode != IO_SEEK_INVALID) && (mode < IO_SEEK_MAX));
 }
 
-#endif	/* End of debug-only validation functions */
+#endif /* ENABLE_ASSERTIONS */
+/* End of extra validation functions only used when asserts are enabled */
 
 
 /* Open a connection to a specific device */
 static int dev_open(const io_dev_connector_t *dev_con, const uintptr_t dev_spec,
 		io_dev_info_t **dev_info)
 {
-	int result = IO_FAIL;
+	int result;
 	assert(dev_info != NULL);
 	assert(is_valid_dev_connector(dev_con));
 
@@ -116,10 +93,10 @@
 /* Locate an entity in the pool, specified by address */
 static int find_first_entity(const io_entity_t *entity, unsigned int *index_out)
 {
-	int result = IO_FAIL;
-	for (int index = 0; index < MAX_IO_HANDLES; ++index) {
+	int result = -ENOENT;
+	for (unsigned int index = 0; index < MAX_IO_HANDLES; ++index) {
 		if (entity_map[index] == entity) {
-			result = IO_SUCCESS;
+			result = 0;
 			*index_out = index;
 			break;
 		}
@@ -131,17 +108,16 @@
 /* Allocate an entity from the pool and return a pointer to it */
 static int allocate_entity(io_entity_t **entity)
 {
-	int result = IO_FAIL;
+	int result = -ENOMEM;
 	assert(entity != NULL);
 
 	if (entity_count < MAX_IO_HANDLES) {
 		unsigned int index = 0;
 		result = find_first_entity(NULL, &index);
-		assert(result == IO_SUCCESS);
+		assert(result == 0);
 		*entity = entity_map[index] = &entity_pool[index];
 		++entity_count;
-	} else
-		result = IO_RESOURCES_EXHAUSTED;
+	}
 
 	return result;
 }
@@ -150,12 +126,12 @@
 /* Release an entity back to the pool */
 static int free_entity(const io_entity_t *entity)
 {
-	int result = IO_FAIL;
+	int result;
 	unsigned int index = 0;
 	assert(entity != NULL);
 
 	result = find_first_entity(entity, &index);
-	if (result ==  IO_SUCCESS) {
+	if (result ==  0) {
 		entity_map[index] = NULL;
 		--entity_count;
 	}
@@ -169,15 +145,13 @@
 /* Register a device driver */
 int io_register_device(const io_dev_info_t *dev_info)
 {
-	int result = IO_FAIL;
+	int result = -ENOMEM;
 	assert(dev_info != NULL);
 
 	if (dev_count < MAX_IO_DEVICES) {
 		devices[dev_count] = dev_info;
 		dev_count++;
-		result = IO_SUCCESS;
-	} else {
-		result = IO_RESOURCES_EXHAUSTED;
+		result = 0;
 	}
 
 	return result;
@@ -188,7 +162,7 @@
 int io_dev_open(const io_dev_connector_t *dev_con, const uintptr_t dev_spec,
 		uintptr_t *handle)
 {
-	int result = IO_FAIL;
+	int result;
 	assert(handle != NULL);
 
 	result = dev_open(dev_con, dev_spec, (io_dev_info_t **)handle);
@@ -200,18 +174,17 @@
  * re-initialisation */
 int io_dev_init(uintptr_t dev_handle, const uintptr_t init_params)
 {
-	int result = IO_FAIL;
+	int result = 0;
 	assert(dev_handle != (uintptr_t)NULL);
 	assert(is_valid_dev(dev_handle));
 
 	io_dev_info_t *dev = (io_dev_info_t *)dev_handle;
 
+	/* Absence of registered function implies NOP here */
 	if (dev->funcs->dev_init != NULL) {
 		result = dev->funcs->dev_init(dev, init_params);
-	} else {
-		/* Absence of registered function implies NOP here */
-		result = IO_SUCCESS;
 	}
+
 	return result;
 }
 
@@ -221,17 +194,15 @@
 /* Close a connection to a device */
 int io_dev_close(uintptr_t dev_handle)
 {
-	int result = IO_FAIL;
+	int result = 0;
 	assert(dev_handle != (uintptr_t)NULL);
 	assert(is_valid_dev(dev_handle));
 
 	io_dev_info_t *dev = (io_dev_info_t *)dev_handle;
 
+	/* Absence of registered function implies NOP here */
 	if (dev->funcs->dev_close != NULL) {
 		result = dev->funcs->dev_close(dev);
-	} else {
-		/* Absence of registered function implies NOP here */
-		result = IO_SUCCESS;
 	}
 
 	return result;
@@ -244,7 +215,7 @@
 /* Open an IO entity */
 int io_open(uintptr_t dev_handle, const uintptr_t spec, uintptr_t *handle)
 {
-	int result = IO_FAIL;
+	int result;
 	assert((spec != (uintptr_t)NULL) && (handle != NULL));
 	assert(is_valid_dev(dev_handle));
 
@@ -253,11 +224,11 @@
 
 	result = allocate_entity(&entity);
 
-	if (result == IO_SUCCESS) {
+	if (result == 0) {
 		assert(dev->funcs->open != NULL);
 		result = dev->funcs->open(dev, spec, entity);
 
-		if (result == IO_SUCCESS) {
+		if (result == 0) {
 			entity->dev_handle = dev;
 			set_handle(handle, entity);
 		} else
@@ -270,7 +241,7 @@
 /* Seek to a specific position in an IO entity */
 int io_seek(uintptr_t handle, io_seek_mode_t mode, ssize_t offset)
 {
-	int result = IO_FAIL;
+	int result = -ENODEV;
 	assert(is_valid_entity(handle) && is_valid_seek_mode(mode));
 
 	io_entity_t *entity = (io_entity_t *)handle;
@@ -279,8 +250,6 @@
 
 	if (dev->funcs->seek != NULL)
 		result = dev->funcs->seek(entity, mode, offset);
-	else
-		result = IO_NOT_SUPPORTED;
 
 	return result;
 }
@@ -289,7 +258,7 @@
 /* Determine the length of an IO entity */
 int io_size(uintptr_t handle, size_t *length)
 {
-	int result = IO_FAIL;
+	int result = -ENODEV;
 	assert(is_valid_entity(handle) && (length != NULL));
 
 	io_entity_t *entity = (io_entity_t *)handle;
@@ -298,8 +267,6 @@
 
 	if (dev->funcs->size != NULL)
 		result = dev->funcs->size(entity, length);
-	else
-		result = IO_NOT_SUPPORTED;
 
 	return result;
 }
@@ -311,7 +278,7 @@
 		size_t length,
 		size_t *length_read)
 {
-	int result = IO_FAIL;
+	int result = -ENODEV;
 	assert(is_valid_entity(handle) && (buffer != (uintptr_t)NULL));
 
 	io_entity_t *entity = (io_entity_t *)handle;
@@ -320,8 +287,6 @@
 
 	if (dev->funcs->read != NULL)
 		result = dev->funcs->read(entity, buffer, length, length_read);
-	else
-		result = IO_NOT_SUPPORTED;
 
 	return result;
 }
@@ -333,7 +298,7 @@
 		size_t length,
 		size_t *length_written)
 {
-	int result = IO_FAIL;
+	int result = -ENODEV;
 	assert(is_valid_entity(handle) && (buffer != (uintptr_t)NULL));
 
 	io_entity_t *entity = (io_entity_t *)handle;
@@ -343,8 +308,7 @@
 	if (dev->funcs->write != NULL) {
 		result = dev->funcs->write(entity, buffer, length,
 				length_written);
-	} else
-		result = IO_NOT_SUPPORTED;
+	}
 
 	return result;
 }
@@ -353,19 +317,17 @@
 /* Close an IO entity */
 int io_close(uintptr_t handle)
 {
-	int result = IO_FAIL;
+	int result = 0;
 	assert(is_valid_entity(handle));
 
 	io_entity_t *entity = (io_entity_t *)handle;
 
 	io_dev_info_t *dev = entity->dev_handle;
 
+	/* Absence of registered function implies NOP here */
 	if (dev->funcs->close != NULL)
 		result = dev->funcs->close(entity);
-	else {
-		/* Absence of registered function implies NOP here */
-		result = IO_SUCCESS;
-	}
+
 	/* Ignore improbable free_entity failure */
 	(void)free_entity(entity);
 
diff --git a/drivers/partition/gpt.c b/drivers/partition/gpt.c
new file mode 100644
index 0000000..9cc917d
--- /dev/null
+++ b/drivers/partition/gpt.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <gpt.h>
+#include <string.h>
+#include <utils.h>
+
+static int unicode_to_ascii(unsigned short *str_in, unsigned char *str_out)
+{
+	uint8_t *name = (uint8_t *)str_in;
+	int i;
+
+	assert((str_in != NULL) && (str_out != NULL) && (name[0] != '\0'));
+
+	/* check whether the unicode string is valid */
+	for (i = 1; i < (EFI_NAMELEN << 1); i += 2) {
+		if (name[i] != '\0')
+			return -EINVAL;
+	}
+	/* convert the unicode string to ascii string */
+	for (i = 0; i < (EFI_NAMELEN << 1); i += 2) {
+		str_out[i >> 1] = name[i];
+		if (name[i] == '\0')
+			break;
+	}
+	return 0;
+}
+
+int parse_gpt_entry(gpt_entry_t *gpt_entry, partition_entry_t *entry)
+{
+	int result;
+
+	assert((gpt_entry != 0) && (entry != 0));
+
+	if ((gpt_entry->first_lba == 0) && (gpt_entry->last_lba == 0)) {
+		return -EINVAL;
+	}
+
+	zeromem(entry, sizeof(partition_entry_t));
+	result = unicode_to_ascii(gpt_entry->name, (uint8_t *)entry->name);
+	if (result != 0) {
+		return result;
+	}
+	entry->start = (uint64_t)gpt_entry->first_lba * PARTITION_BLOCK_SIZE;
+	entry->length = (uint64_t)(gpt_entry->last_lba -
+				   gpt_entry->first_lba + 1) *
+			PARTITION_BLOCK_SIZE;
+	return 0;
+}
diff --git a/drivers/partition/partition.c b/drivers/partition/partition.c
new file mode 100644
index 0000000..e2b4683
--- /dev/null
+++ b/drivers/partition/partition.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <gpt.h>
+#include <io_storage.h>
+#include <mbr.h>
+#include <partition.h>
+#include <platform.h>
+#include <string.h>
+
+static uint8_t mbr_sector[PARTITION_BLOCK_SIZE];
+partition_entry_list_t list;
+
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+static void dump_entries(int num)
+{
+	char name[EFI_NAMELEN];
+	int i, j, len;
+
+	VERBOSE("Partition table with %d entries:\n", num);
+	for (i = 0; i < num; i++) {
+		len = snprintf(name, EFI_NAMELEN, "%s", list.list[i].name);
+		for (j = 0; j < EFI_NAMELEN - len - 1; j++) {
+			name[len + j] = ' ';
+		}
+		name[EFI_NAMELEN - 1] = '\0';
+		VERBOSE("%d: %s %lx-%lx\n", i + 1, name, list.list[i].start,
+			list.list[i].start + list.list[i].length - 4);
+	}
+}
+#else
+#define dump_entries(num)	((void)num)
+#endif
+
+/*
+ * Load the first sector that carries MBR header.
+ * The MBR boot signature should be always valid whether it's MBR or GPT.
+ */
+static int load_mbr_header(uintptr_t image_handle, mbr_entry_t *mbr_entry)
+{
+	size_t bytes_read;
+	uintptr_t offset;
+	int result;
+
+	assert(mbr_entry != NULL);
+	/* MBR partition table is in LBA0. */
+	result = io_seek(image_handle, IO_SEEK_SET, MBR_OFFSET);
+	if (result != 0) {
+		WARN("Failed to seek (%i)\n", result);
+		return result;
+	}
+	result = io_read(image_handle, (uintptr_t)&mbr_sector,
+			 PARTITION_BLOCK_SIZE, &bytes_read);
+	if (result != 0) {
+		WARN("Failed to read data (%i)\n", result);
+		return result;
+	}
+
+	/* Check MBR boot signature. */
+	if ((mbr_sector[PARTITION_BLOCK_SIZE - 2] != MBR_SIGNATURE_FIRST) ||
+	    (mbr_sector[PARTITION_BLOCK_SIZE - 1] != MBR_SIGNATURE_SECOND)) {
+		return -ENOENT;
+	}
+	offset = (uintptr_t)&mbr_sector + MBR_PRIMARY_ENTRY_OFFSET;
+	memcpy(mbr_entry, (void *)offset, sizeof(mbr_entry_t));
+	return 0;
+}
+
+/*
+ * Load GPT header and check the GPT signature.
+ * If partiton numbers could be found, check & update it.
+ */
+static int load_gpt_header(uintptr_t image_handle)
+{
+	gpt_header_t header;
+	size_t bytes_read;
+	int result;
+
+	result = io_seek(image_handle, IO_SEEK_SET, GPT_HEADER_OFFSET);
+	if (result != 0) {
+		return result;
+	}
+	result = io_read(image_handle, (uintptr_t)&header,
+			 sizeof(gpt_header_t), &bytes_read);
+	if ((result != 0) || (sizeof(gpt_header_t) != bytes_read)) {
+		return result;
+	}
+	if (memcmp(header.signature, GPT_SIGNATURE,
+		   sizeof(header.signature)) != 0) {
+		return -EINVAL;
+	}
+
+	/* partition numbers can't exceed PLAT_PARTITION_MAX_ENTRIES */
+	list.entry_count = header.list_num;
+	if (list.entry_count > PLAT_PARTITION_MAX_ENTRIES) {
+		list.entry_count = PLAT_PARTITION_MAX_ENTRIES;
+	}
+	return 0;
+}
+
+static int load_gpt_entry(uintptr_t image_handle, gpt_entry_t *entry)
+{
+	size_t bytes_read;
+	int result;
+
+	assert(entry != NULL);
+	result = io_read(image_handle, (uintptr_t)entry, sizeof(gpt_entry_t),
+			 &bytes_read);
+	if (sizeof(gpt_entry_t) != bytes_read)
+		return -EINVAL;
+	return result;
+}
+
+static int verify_partition_gpt(uintptr_t image_handle)
+{
+	gpt_entry_t entry;
+	int result, i;
+
+	for (i = 0; i < list.entry_count; i++) {
+		result = load_gpt_entry(image_handle, &entry);
+		assert(result == 0);
+		result = parse_gpt_entry(&entry, &list.list[i]);
+		if (result != 0) {
+			break;
+		}
+	}
+	if (i == 0) {
+		return -EINVAL;
+	}
+	/*
+	 * Only records the valid partition number that is loaded from
+	 * partition table.
+	 */
+	list.entry_count = i;
+	dump_entries(list.entry_count);
+
+	return 0;
+}
+
+int load_partition_table(unsigned int image_id)
+{
+	uintptr_t dev_handle, image_handle, image_spec = 0;
+	mbr_entry_t mbr_entry;
+	int result;
+
+	result = plat_get_image_source(image_id, &dev_handle, &image_spec);
+	if (result != 0) {
+		WARN("Failed to obtain reference to image id=%u (%i)\n",
+			image_id, result);
+		return result;
+	}
+
+	result = io_open(dev_handle, image_spec, &image_handle);
+	if (result != 0) {
+		WARN("Failed to access image id=%u (%i)\n", image_id, result);
+		return result;
+	}
+
+	result = load_mbr_header(image_handle, &mbr_entry);
+	if (result != 0) {
+		WARN("Failed to access image id=%u (%i)\n", image_id, result);
+		return result;
+	}
+	if (mbr_entry.type == PARTITION_TYPE_GPT) {
+		result = load_gpt_header(image_handle);
+		assert(result == 0);
+		result = io_seek(image_handle, IO_SEEK_SET, GPT_ENTRY_OFFSET);
+		assert(result == 0);
+		result = verify_partition_gpt(image_handle);
+	} else {
+		/* MBR type isn't supported yet. */
+		result = -EINVAL;
+		goto exit;
+	}
+exit:
+	io_close(image_handle);
+	return result;
+}
+
+const partition_entry_t *get_partition_entry(const char *name)
+{
+	int i;
+
+	for (i = 0; i < list.entry_count; i++) {
+		if (strcmp(name, list.list[i].name) == 0) {
+			return &list.list[i];
+		}
+	}
+	return NULL;
+}
+
+const partition_entry_list_t *get_partition_entry_list(void)
+{
+	return &list;
+}
+
+void partition_init(unsigned int image_id)
+{
+	load_partition_table(image_id);
+}
diff --git a/drivers/synopsys/emmc/dw_mmc.c b/drivers/synopsys/emmc/dw_mmc.c
new file mode 100644
index 0000000..e6904d1
--- /dev/null
+++ b/drivers/synopsys/emmc/dw_mmc.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <dw_mmc.h>
+#include <emmc.h>
+#include <errno.h>
+#include <mmio.h>
+#include <string.h>
+
+#define DWMMC_CTRL			(0x00)
+#define CTRL_IDMAC_EN			(1 << 25)
+#define CTRL_DMA_EN			(1 << 5)
+#define CTRL_INT_EN			(1 << 4)
+#define CTRL_DMA_RESET			(1 << 2)
+#define CTRL_FIFO_RESET			(1 << 1)
+#define CTRL_RESET			(1 << 0)
+#define CTRL_RESET_ALL			(CTRL_DMA_RESET | CTRL_FIFO_RESET | \
+					 CTRL_RESET)
+
+#define DWMMC_PWREN			(0x04)
+#define DWMMC_CLKDIV			(0x08)
+#define DWMMC_CLKSRC			(0x0c)
+#define DWMMC_CLKENA			(0x10)
+#define DWMMC_TMOUT			(0x14)
+#define DWMMC_CTYPE			(0x18)
+#define CTYPE_8BIT			(1 << 16)
+#define CTYPE_4BIT			(1)
+#define CTYPE_1BIT			(0)
+
+#define DWMMC_BLKSIZ			(0x1c)
+#define DWMMC_BYTCNT			(0x20)
+#define DWMMC_INTMASK			(0x24)
+#define INT_EBE				(1 << 15)
+#define INT_SBE				(1 << 13)
+#define INT_HLE				(1 << 12)
+#define INT_FRUN			(1 << 11)
+#define INT_DRT				(1 << 9)
+#define INT_RTO				(1 << 8)
+#define INT_DCRC			(1 << 7)
+#define INT_RCRC			(1 << 6)
+#define INT_RXDR			(1 << 5)
+#define INT_TXDR			(1 << 4)
+#define INT_DTO				(1 << 3)
+#define INT_CMD_DONE			(1 << 2)
+#define INT_RE				(1 << 1)
+
+#define DWMMC_CMDARG			(0x28)
+#define DWMMC_CMD			(0x2c)
+#define CMD_START			(1 << 31)
+#define CMD_USE_HOLD_REG		(1 << 29)	/* 0 if SDR50/100 */
+#define CMD_UPDATE_CLK_ONLY		(1 << 21)
+#define CMD_SEND_INIT			(1 << 15)
+#define CMD_STOP_ABORT_CMD		(1 << 14)
+#define CMD_WAIT_PRVDATA_COMPLETE	(1 << 13)
+#define CMD_WRITE			(1 << 10)
+#define CMD_DATA_TRANS_EXPECT		(1 << 9)
+#define CMD_CHECK_RESP_CRC		(1 << 8)
+#define CMD_RESP_LEN			(1 << 7)
+#define CMD_RESP_EXPECT			(1 << 6)
+#define CMD(x)				(x & 0x3f)
+
+#define DWMMC_RESP0			(0x30)
+#define DWMMC_RESP1			(0x34)
+#define DWMMC_RESP2			(0x38)
+#define DWMMC_RESP3			(0x3c)
+#define DWMMC_RINTSTS			(0x44)
+#define DWMMC_STATUS			(0x48)
+#define STATUS_DATA_BUSY		(1 << 9)
+
+#define DWMMC_FIFOTH			(0x4c)
+#define FIFOTH_TWMARK(x)		(x & 0xfff)
+#define FIFOTH_RWMARK(x)		((x & 0x1ff) << 16)
+#define FIFOTH_DMA_BURST_SIZE(x)	((x & 0x7) << 28)
+
+#define DWMMC_DEBNCE			(0x64)
+#define DWMMC_BMOD			(0x80)
+#define BMOD_ENABLE			(1 << 7)
+#define BMOD_FB				(1 << 1)
+#define BMOD_SWRESET			(1 << 0)
+
+#define DWMMC_DBADDR			(0x88)
+#define DWMMC_IDSTS			(0x8c)
+#define DWMMC_IDINTEN			(0x90)
+#define DWMMC_CARDTHRCTL		(0x100)
+#define CARDTHRCTL_RD_THR(x)		((x & 0xfff) << 16)
+#define CARDTHRCTL_RD_THR_EN		(1 << 0)
+
+#define IDMAC_DES0_DIC			(1 << 1)
+#define IDMAC_DES0_LD			(1 << 2)
+#define IDMAC_DES0_FS			(1 << 3)
+#define IDMAC_DES0_CH			(1 << 4)
+#define IDMAC_DES0_ER			(1 << 5)
+#define IDMAC_DES0_CES			(1 << 30)
+#define IDMAC_DES0_OWN			(1 << 31)
+#define IDMAC_DES1_BS1(x)		((x) & 0x1fff)
+#define IDMAC_DES2_BS2(x)		(((x) & 0x1fff) << 13)
+
+#define DWMMC_DMA_MAX_BUFFER_SIZE	(512 * 8)
+
+#define DWMMC_8BIT_MODE			(1 << 6)
+
+#define TIMEOUT				100000
+
+struct dw_idmac_desc {
+	unsigned int	des0;
+	unsigned int	des1;
+	unsigned int	des2;
+	unsigned int	des3;
+};
+
+static void dw_init(void);
+static int dw_send_cmd(emmc_cmd_t *cmd);
+static int dw_set_ios(int clk, int width);
+static int dw_prepare(int lba, uintptr_t buf, size_t size);
+static int dw_read(int lba, uintptr_t buf, size_t size);
+static int dw_write(int lba, uintptr_t buf, size_t size);
+
+static const emmc_ops_t dw_mmc_ops = {
+	.init		= dw_init,
+	.send_cmd	= dw_send_cmd,
+	.set_ios	= dw_set_ios,
+	.prepare	= dw_prepare,
+	.read		= dw_read,
+	.write		= dw_write,
+};
+
+static dw_mmc_params_t dw_params;
+
+static void dw_update_clk(void)
+{
+	unsigned int data;
+
+	mmio_write_32(dw_params.reg_base + DWMMC_CMD,
+		      CMD_WAIT_PRVDATA_COMPLETE | CMD_UPDATE_CLK_ONLY |
+		      CMD_START);
+	while (1) {
+		data = mmio_read_32(dw_params.reg_base + DWMMC_CMD);
+		if ((data & CMD_START) == 0)
+			break;
+		data = mmio_read_32(dw_params.reg_base + DWMMC_RINTSTS);
+		assert(data & INT_HLE);
+	}
+}
+
+static void dw_set_clk(int clk)
+{
+	unsigned int data;
+	int div;
+
+	assert(clk > 0);
+
+	for (div = 1; div < 256; div++) {
+		if ((dw_params.clk_rate / (2 * div)) <= clk) {
+			break;
+		}
+	}
+	assert(div < 256);
+
+	/* wait until controller is idle */
+	do {
+		data = mmio_read_32(dw_params.reg_base + DWMMC_STATUS);
+	} while (data & STATUS_DATA_BUSY);
+
+	/* disable clock before change clock rate */
+	mmio_write_32(dw_params.reg_base + DWMMC_CLKENA, 0);
+	dw_update_clk();
+
+	mmio_write_32(dw_params.reg_base + DWMMC_CLKDIV, div);
+	dw_update_clk();
+
+	/* enable clock */
+	mmio_write_32(dw_params.reg_base + DWMMC_CLKENA, 1);
+	mmio_write_32(dw_params.reg_base + DWMMC_CLKSRC, 0);
+	dw_update_clk();
+}
+
+static void dw_init(void)
+{
+	unsigned int data;
+	uintptr_t base;
+
+	assert((dw_params.reg_base & EMMC_BLOCK_MASK) == 0);
+
+	base = dw_params.reg_base;
+	mmio_write_32(base + DWMMC_PWREN, 1);
+	mmio_write_32(base + DWMMC_CTRL, CTRL_RESET_ALL);
+	do {
+		data = mmio_read_32(base + DWMMC_CTRL);
+	} while (data);
+
+	/* enable DMA in CTRL */
+	data = CTRL_INT_EN | CTRL_DMA_EN | CTRL_IDMAC_EN;
+	mmio_write_32(base + DWMMC_CTRL, data);
+	mmio_write_32(base + DWMMC_RINTSTS, ~0);
+	mmio_write_32(base + DWMMC_INTMASK, 0);
+	mmio_write_32(base + DWMMC_TMOUT, ~0);
+	mmio_write_32(base + DWMMC_IDINTEN, ~0);
+	mmio_write_32(base + DWMMC_BLKSIZ, EMMC_BLOCK_SIZE);
+	mmio_write_32(base + DWMMC_BYTCNT, 256 * 1024);
+	mmio_write_32(base + DWMMC_DEBNCE, 0x00ffffff);
+	mmio_write_32(base + DWMMC_BMOD, BMOD_SWRESET);
+	do {
+		data = mmio_read_32(base + DWMMC_BMOD);
+	} while (data & BMOD_SWRESET);
+	/* enable DMA in BMOD */
+	data |= BMOD_ENABLE | BMOD_FB;
+	mmio_write_32(base + DWMMC_BMOD, data);
+
+	udelay(100);
+	dw_set_clk(EMMC_BOOT_CLK_RATE);
+	udelay(100);
+}
+
+static int dw_send_cmd(emmc_cmd_t *cmd)
+{
+	unsigned int op, data, err_mask;
+	uintptr_t base;
+	int timeout;
+
+	assert(cmd);
+
+	base = dw_params.reg_base;
+
+	switch (cmd->cmd_idx) {
+	case EMMC_CMD0:
+		op = CMD_SEND_INIT;
+		break;
+	case EMMC_CMD12:
+		op = CMD_STOP_ABORT_CMD;
+		break;
+	case EMMC_CMD13:
+		op = CMD_WAIT_PRVDATA_COMPLETE;
+		break;
+	case EMMC_CMD8:
+	case EMMC_CMD17:
+	case EMMC_CMD18:
+		op = CMD_DATA_TRANS_EXPECT | CMD_WAIT_PRVDATA_COMPLETE;
+		break;
+	case EMMC_CMD24:
+	case EMMC_CMD25:
+		op = CMD_WRITE | CMD_DATA_TRANS_EXPECT |
+		     CMD_WAIT_PRVDATA_COMPLETE;
+		break;
+	default:
+		op = 0;
+		break;
+	}
+	op |= CMD_USE_HOLD_REG | CMD_START;
+	switch (cmd->resp_type) {
+	case 0:
+		break;
+	case EMMC_RESPONSE_R2:
+		op |= CMD_RESP_EXPECT | CMD_CHECK_RESP_CRC |
+		      CMD_RESP_LEN;
+		break;
+	case EMMC_RESPONSE_R3:
+		op |= CMD_RESP_EXPECT;
+		break;
+	default:
+		op |= CMD_RESP_EXPECT | CMD_CHECK_RESP_CRC;
+		break;
+	}
+	timeout = TIMEOUT;
+	do {
+		data = mmio_read_32(base + DWMMC_STATUS);
+		if (--timeout <= 0)
+			panic();
+	} while (data & STATUS_DATA_BUSY);
+
+	mmio_write_32(base + DWMMC_RINTSTS, ~0);
+	mmio_write_32(base + DWMMC_CMDARG, cmd->cmd_arg);
+	mmio_write_32(base + DWMMC_CMD, op | cmd->cmd_idx);
+
+	err_mask = INT_EBE | INT_HLE | INT_RTO | INT_RCRC | INT_RE |
+		   INT_DCRC | INT_DRT | INT_SBE;
+	timeout = TIMEOUT;
+	do {
+		udelay(500);
+		data = mmio_read_32(base + DWMMC_RINTSTS);
+
+		if (data & err_mask)
+			return -EIO;
+		if (data & INT_DTO)
+			break;
+		if (--timeout == 0) {
+			ERROR("%s, RINTSTS:0x%x\n", __func__, data);
+			panic();
+		}
+	} while (!(data & INT_CMD_DONE));
+
+	if (op & CMD_RESP_EXPECT) {
+		cmd->resp_data[0] = mmio_read_32(base + DWMMC_RESP0);
+		if (op & CMD_RESP_LEN) {
+			cmd->resp_data[1] = mmio_read_32(base + DWMMC_RESP1);
+			cmd->resp_data[2] = mmio_read_32(base + DWMMC_RESP2);
+			cmd->resp_data[3] = mmio_read_32(base + DWMMC_RESP3);
+		}
+	}
+	return 0;
+}
+
+static int dw_set_ios(int clk, int width)
+{
+	switch (width) {
+	case EMMC_BUS_WIDTH_1:
+		mmio_write_32(dw_params.reg_base + DWMMC_CTYPE, CTYPE_1BIT);
+		break;
+	case EMMC_BUS_WIDTH_4:
+		mmio_write_32(dw_params.reg_base + DWMMC_CTYPE, CTYPE_4BIT);
+		break;
+	case EMMC_BUS_WIDTH_8:
+		mmio_write_32(dw_params.reg_base + DWMMC_CTYPE, CTYPE_8BIT);
+		break;
+	default:
+		assert(0);
+	}
+	dw_set_clk(clk);
+	return 0;
+}
+
+static int dw_prepare(int lba, uintptr_t buf, size_t size)
+{
+	struct dw_idmac_desc *desc;
+	int desc_cnt, i, last;
+	uintptr_t base;
+
+	assert(((buf & EMMC_BLOCK_MASK) == 0) &&
+	       ((size % EMMC_BLOCK_SIZE) == 0) &&
+	       (dw_params.desc_size > 0) &&
+	       ((dw_params.reg_base & EMMC_BLOCK_MASK) == 0) &&
+	       ((dw_params.desc_base & EMMC_BLOCK_MASK) == 0) &&
+	       ((dw_params.desc_size & EMMC_BLOCK_MASK) == 0));
+
+	desc_cnt = (size + DWMMC_DMA_MAX_BUFFER_SIZE - 1) /
+		   DWMMC_DMA_MAX_BUFFER_SIZE;
+	assert(desc_cnt * sizeof(struct dw_idmac_desc) < dw_params.desc_size);
+
+	base = dw_params.reg_base;
+	desc = (struct dw_idmac_desc *)dw_params.desc_base;
+	mmio_write_32(base + DWMMC_BYTCNT, size);
+	mmio_write_32(base + DWMMC_RINTSTS, ~0);
+	for (i = 0; i < desc_cnt; i++) {
+		desc[i].des0 = IDMAC_DES0_OWN | IDMAC_DES0_CH | IDMAC_DES0_DIC;
+		desc[i].des1 = IDMAC_DES1_BS1(DWMMC_DMA_MAX_BUFFER_SIZE);
+		desc[i].des2 = buf + DWMMC_DMA_MAX_BUFFER_SIZE * i;
+		desc[i].des3 = dw_params.desc_base +
+			       (sizeof(struct dw_idmac_desc)) * (i + 1);
+	}
+	/* first descriptor */
+	desc->des0 |= IDMAC_DES0_FS;
+	/* last descriptor */
+	last = desc_cnt - 1;
+	(desc + last)->des0 |= IDMAC_DES0_LD;
+	(desc + last)->des0 &= ~(IDMAC_DES0_DIC | IDMAC_DES0_CH);
+	(desc + last)->des1 = IDMAC_DES1_BS1(size - (last *
+				  DWMMC_DMA_MAX_BUFFER_SIZE));
+	/* set next descriptor address as 0 */
+	(desc + last)->des3 = 0;
+
+	mmio_write_32(base + DWMMC_DBADDR, dw_params.desc_base);
+	clean_dcache_range(dw_params.desc_base,
+			   desc_cnt * DWMMC_DMA_MAX_BUFFER_SIZE);
+
+	return 0;
+}
+
+static int dw_read(int lba, uintptr_t buf, size_t size)
+{
+	return 0;
+}
+
+static int dw_write(int lba, uintptr_t buf, size_t size)
+{
+	return 0;
+}
+
+void dw_mmc_init(dw_mmc_params_t *params)
+{
+	assert((params != 0) &&
+	       ((params->reg_base & EMMC_BLOCK_MASK) == 0) &&
+	       ((params->desc_base & EMMC_BLOCK_MASK) == 0) &&
+	       ((params->desc_size & EMMC_BLOCK_MASK) == 0) &&
+	       (params->desc_size > 0) &&
+	       (params->clk_rate > 0) &&
+	       ((params->bus_width == EMMC_BUS_WIDTH_1) ||
+		(params->bus_width == EMMC_BUS_WIDTH_4) ||
+		(params->bus_width == EMMC_BUS_WIDTH_8)));
+
+	memcpy(&dw_params, params, sizeof(dw_mmc_params_t));
+	emmc_init(&dw_mmc_ops, params->clk_rate, params->bus_width,
+		  params->flags);
+}
diff --git a/drivers/synopsys/ufs/dw_ufs.c b/drivers/synopsys/ufs/dw_ufs.c
new file mode 100644
index 0000000..d8ed5b6
--- /dev/null
+++ b/drivers/synopsys/ufs/dw_ufs.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <dw_ufs.h>
+#include <mmio.h>
+#include <stdint.h>
+#include <string.h>
+#include <ufs.h>
+
+static int dwufs_phy_init(ufs_params_t *params)
+{
+	uintptr_t base;
+	unsigned int fsm0, fsm1;
+	unsigned int data;
+	int result;
+
+	assert((params != NULL) && (params->reg_base != 0));
+
+	base = params->reg_base;
+
+	/* Unipro VS_MPHY disable */
+	ufshc_dme_set(VS_MPHY_DISABLE_OFFSET, 0, VS_MPHY_DISABLE_MPHYDIS);
+	ufshc_dme_set(PA_HS_SERIES_OFFSET, 0, 2);
+	/* MPHY CBRATESEL */
+	ufshc_dme_set(0x8114, 0, 1);
+	/* MPHY CBOVRCTRL2 */
+	ufshc_dme_set(0x8121, 0, 0x2d);
+	/* MPHY CBOVRCTRL3 */
+	ufshc_dme_set(0x8122, 0, 0x1);
+	ufshc_dme_set(VS_MPHY_CFG_UPDT_OFFSET, 0, 1);
+
+	/* MPHY RXOVRCTRL4 rx0 */
+	ufshc_dme_set(0x800d, 4, 0x58);
+	/* MPHY RXOVRCTRL4 rx1 */
+	ufshc_dme_set(0x800d, 5, 0x58);
+	/* MPHY RXOVRCTRL5 rx0 */
+	ufshc_dme_set(0x800e, 4, 0xb);
+	/* MPHY RXOVRCTRL5 rx1 */
+	ufshc_dme_set(0x800e, 5, 0xb);
+	/* MPHY RXSQCONTROL rx0 */
+	ufshc_dme_set(0x8009, 4, 0x1);
+	/* MPHY RXSQCONTROL rx1 */
+	ufshc_dme_set(0x8009, 5, 0x1);
+	ufshc_dme_set(VS_MPHY_CFG_UPDT_OFFSET, 0, 1);
+
+	ufshc_dme_set(0x8113, 0, 0x1);
+	ufshc_dme_set(VS_MPHY_CFG_UPDT_OFFSET, 0, 1);
+
+	ufshc_dme_set(RX_HS_G3_SYNC_LENGTH_CAP_OFFSET, 4, 0x4a);
+	ufshc_dme_set(RX_HS_G3_SYNC_LENGTH_CAP_OFFSET, 5, 0x4a);
+	ufshc_dme_set(RX_HS_G2_SYNC_LENGTH_CAP_OFFSET, 4, 0x4a);
+	ufshc_dme_set(RX_HS_G2_SYNC_LENGTH_CAP_OFFSET, 5, 0x4a);
+	ufshc_dme_set(RX_MIN_ACTIVATETIME_CAP_OFFSET, 4, 0x7);
+	ufshc_dme_set(RX_MIN_ACTIVATETIME_CAP_OFFSET, 5, 0x7);
+	ufshc_dme_set(TX_HIBERN8TIME_CAP_OFFSET, 0, 0x5);
+	ufshc_dme_set(TX_HIBERN8TIME_CAP_OFFSET, 1, 0x5);
+	ufshc_dme_set(VS_MPHY_CFG_UPDT_OFFSET, 0, 1);
+
+	result = ufshc_dme_get(VS_MPHY_DISABLE_OFFSET, 0, &data);
+	assert((result == 0) && (data == VS_MPHY_DISABLE_MPHYDIS));
+	/* enable Unipro VS MPHY */
+	ufshc_dme_set(VS_MPHY_DISABLE_OFFSET, 0, 0);
+
+	while (1) {
+		result = ufshc_dme_get(TX_FSM_STATE_OFFSET, 0, &fsm0);
+		assert(result == 0);
+		result = ufshc_dme_get(TX_FSM_STATE_OFFSET, 1, &fsm1);
+		assert(result == 0);
+		if ((fsm0 == TX_FSM_STATE_HIBERN8) &&
+		    (fsm1 == TX_FSM_STATE_HIBERN8))
+			break;
+	}
+
+	mmio_write_32(base + HCLKDIV, 0xE4);
+	mmio_clrbits_32(base + AHIT, 0x3FF);
+
+	ufshc_dme_set(PA_LOCAL_TX_LCC_ENABLE_OFFSET, 0, 0);
+	ufshc_dme_set(VS_MK2_EXTN_SUPPORT_OFFSET, 0, 0);
+
+	result = ufshc_dme_get(VS_MK2_EXTN_SUPPORT_OFFSET, 0, &data);
+	assert((result == 0) && (data == 0));
+
+	ufshc_dme_set(DL_AFC0_CREDIT_THRESHOLD_OFFSET, 0, 0);
+	ufshc_dme_set(DL_TC0_OUT_ACK_THRESHOLD_OFFSET, 0, 0);
+	ufshc_dme_set(DL_TC0_TX_FC_THRESHOLD_OFFSET, 0, 9);
+	(void)result;
+	return 0;
+}
+
+static int dwufs_phy_set_pwr_mode(ufs_params_t *params)
+{
+	int result;
+	unsigned int data, tx_lanes, rx_lanes;
+	uintptr_t base;
+
+	assert((params != NULL) && (params->reg_base != 0));
+
+	base = params->reg_base;
+
+	result = ufshc_dme_get(PA_TACTIVATE_OFFSET, 0, &data);
+	assert(result == 0);
+	if (data < 7) {
+		result = ufshc_dme_set(PA_TACTIVATE_OFFSET, 0, 7);
+		assert(result == 0);
+	}
+	result = ufshc_dme_get(PA_CONNECTED_TX_DATA_LANES_OFFSET, 0, &tx_lanes);
+	assert(result == 0);
+	result = ufshc_dme_get(PA_CONNECTED_RX_DATA_LANES_OFFSET, 0, &rx_lanes);
+	assert(result == 0);
+
+	result = ufshc_dme_set(PA_TX_SKIP_OFFSET, 0, 0);
+	assert(result == 0);
+	result = ufshc_dme_set(PA_TX_GEAR_OFFSET, 0, 3);
+	assert(result == 0);
+	result = ufshc_dme_set(PA_RX_GEAR_OFFSET, 0, 3);
+	assert(result == 0);
+	result = ufshc_dme_set(PA_HS_SERIES_OFFSET, 0, 2);
+	assert(result == 0);
+	result = ufshc_dme_set(PA_TX_TERMINATION_OFFSET, 0, 1);
+	assert(result == 0);
+	result = ufshc_dme_set(PA_RX_TERMINATION_OFFSET, 0, 1);
+	assert(result == 0);
+	result = ufshc_dme_set(PA_SCRAMBLING_OFFSET, 0, 0);
+	assert(result == 0);
+	result = ufshc_dme_set(PA_ACTIVE_TX_DATA_LANES_OFFSET, 0, tx_lanes);
+	assert(result == 0);
+	result = ufshc_dme_set(PA_ACTIVE_RX_DATA_LANES_OFFSET, 0, rx_lanes);
+	assert(result == 0);
+	result = ufshc_dme_set(PA_PWR_MODE_USER_DATA0_OFFSET, 0, 8191);
+	assert(result == 0);
+	result = ufshc_dme_set(PA_PWR_MODE_USER_DATA1_OFFSET, 0, 65535);
+	assert(result == 0);
+	result = ufshc_dme_set(PA_PWR_MODE_USER_DATA2_OFFSET, 0, 32767);
+	assert(result == 0);
+	result = ufshc_dme_set(DME_FC0_PROTECTION_TIMEOUT_OFFSET, 0, 8191);
+	assert(result == 0);
+	result = ufshc_dme_set(DME_TC0_REPLAY_TIMEOUT_OFFSET, 0, 65535);
+	assert(result == 0);
+	result = ufshc_dme_set(DME_AFC0_REQ_TIMEOUT_OFFSET, 0, 32767);
+	assert(result == 0);
+	result = ufshc_dme_set(PA_PWR_MODE_USER_DATA3_OFFSET, 0, 8191);
+	assert(result == 0);
+	result = ufshc_dme_set(PA_PWR_MODE_USER_DATA4_OFFSET, 0, 65535);
+	assert(result == 0);
+	result = ufshc_dme_set(PA_PWR_MODE_USER_DATA5_OFFSET, 0, 32767);
+	assert(result == 0);
+	result = ufshc_dme_set(DME_FC1_PROTECTION_TIMEOUT_OFFSET, 0, 8191);
+	assert(result == 0);
+	result = ufshc_dme_set(DME_TC1_REPLAY_TIMEOUT_OFFSET, 0, 65535);
+	assert(result == 0);
+	result = ufshc_dme_set(DME_AFC1_REQ_TIMEOUT_OFFSET, 0, 32767);
+	assert(result == 0);
+
+	result = ufshc_dme_set(PA_PWR_MODE_OFFSET, 0, 0x11);
+	assert(result == 0);
+	do {
+		data = mmio_read_32(base + IS);
+	} while ((data & UFS_INT_UPMS) == 0);
+	mmio_write_32(base + IS, UFS_INT_UPMS);
+	data = mmio_read_32(base + HCS);
+	if ((data & HCS_UPMCRS_MASK) == HCS_PWR_LOCAL)
+		INFO("ufs: change power mode success\n");
+	else
+		WARN("ufs: HCS.UPMCRS error, HCS:0x%x\n", data);
+	(void)result;
+	return 0;
+}
+
+const ufs_ops_t dw_ufs_ops = {
+	.phy_init		= dwufs_phy_init,
+	.phy_set_pwr_mode	= dwufs_phy_set_pwr_mode,
+};
+
+int dw_ufs_init(dw_ufs_params_t *params)
+{
+	ufs_params_t ufs_params;
+
+	memset(&ufs_params, 0, sizeof(ufs_params));
+	ufs_params.reg_base = params->reg_base;
+	ufs_params.desc_base = params->desc_base;
+	ufs_params.desc_size = params->desc_size;
+	ufs_params.flags = params->flags;
+	ufs_init(&dw_ufs_ops, &ufs_params);
+	return 0;
+}
diff --git a/drivers/ti/uart/16550_console.S b/drivers/ti/uart/16550_console.S
new file mode 100644
index 0000000..03ca526
--- /dev/null
+++ b/drivers/ti/uart/16550_console.S
@@ -0,0 +1,9 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#if !ERROR_DEPRECATED
+#include "./aarch64/16550_console.S"
+#endif
diff --git a/drivers/ti/uart/aarch64/16550_console.S b/drivers/ti/uart/aarch64/16550_console.S
new file mode 100644
index 0000000..f9ccd57
--- /dev/null
+++ b/drivers/ti/uart/aarch64/16550_console.S
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <uart_16550.h>
+
+	.globl	console_core_init
+	.globl	console_core_putc
+	.globl	console_core_getc
+	.globl	console_core_flush
+
+	/* -----------------------------------------------
+	 * int console_core_init(unsigned long base_addr,
+	 * unsigned int uart_clk, unsigned int baud_rate)
+	 * Function to initialize the console without a
+	 * C Runtime to print debug information. This
+	 * function will be accessed by console_init and
+	 * crash reporting.
+	 * In: x0 - console base address
+	 *     w1 - Uart clock in Hz
+	 *     w2 - Baud rate
+	 * Out: return 1 on success
+	 * Clobber list : x1, x2, x3
+	 * -----------------------------------------------
+	 */
+func console_core_init
+	/* Check the input base address */
+	cbz	x0, init_fail
+	/* Check baud rate and uart clock for sanity */
+	cbz	w1, init_fail
+	cbz	w2, init_fail
+
+	/* Program the baudrate */
+	/* Divisor =  Uart clock / (16 * baudrate) */
+	lsl	w2, w2, #4
+	udiv	w2, w1, w2
+	and	w1, w2, #0xff		/* w1 = DLL */
+	lsr	w2, w2, #8
+	and	w2, w2, #0xff		/* w2 = DLLM */
+	ldr	w3, [x0, #UARTLCR]
+	orr	w3, w3, #UARTLCR_DLAB
+	str	w3, [x0, #UARTLCR]	/* enable DLL, DLLM programming */
+	str	w1, [x0, #UARTDLL]	/* program DLL */
+	str	w2, [x0, #UARTDLLM]	/* program DLLM */
+	mov	w2, #~UARTLCR_DLAB
+	and	w3, w3, w2
+	str	w3, [x0, #UARTLCR]	/* disable DLL, DLLM programming */
+
+	/* 8n1 */
+	mov	w3, #3
+	str	w3, [x0, #UARTLCR]
+	/* no interrupt */
+	mov	w3, #0
+	str	w3, [x0, #UARTIER]
+	/* enable fifo, DMA */
+	mov	w3, #(UARTFCR_FIFOEN | UARTFCR_DMAEN)
+	str	w3, [x0, #UARTFCR]
+	/* DTR + RTS */
+	mov	w3, #3
+	str	w3, [x0, #UARTMCR]
+	mov	w0, #1
+init_fail:
+	ret
+endfunc console_core_init
+
+	/* --------------------------------------------------------
+	 * int console_core_putc(int c, unsigned int base_addr)
+	 * Function to output a character over the console. It
+	 * returns the character printed on success or -1 on error.
+	 * In : w0 - character to be printed
+	 *      x1 - console base address
+	 * Out : return -1 on error else return character.
+	 * Clobber list : x2
+	 * --------------------------------------------------------
+	 */
+func console_core_putc
+	/* Check the input parameter */
+	cbz	x1, putc_error
+
+	/* Prepend '\r' to '\n' */
+	cmp	w0, #0xA
+	b.ne	2f
+	/* Check if the transmit FIFO is full */
+1:	ldr	w2, [x1, #UARTLSR]
+	and	w2, w2, #(UARTLSR_TEMT | UARTLSR_THRE)
+	cmp	w2, #(UARTLSR_TEMT | UARTLSR_THRE)
+	b.ne	1b
+	mov	w2, #0xD		/* '\r' */
+	str	w2, [x1, #UARTTX]
+
+	/* Check if the transmit FIFO is full */
+2:	ldr	w2, [x1, #UARTLSR]
+	and	w2, w2, #(UARTLSR_TEMT | UARTLSR_THRE)
+	cmp	w2, #(UARTLSR_TEMT | UARTLSR_THRE)
+	b.ne	2b
+	str	w0, [x1, #UARTTX]
+	ret
+putc_error:
+	mov	w0, #-1
+	ret
+endfunc console_core_putc
+
+	/* ---------------------------------------------
+	 * int console_core_getc(void)
+	 * Function to get a character from the console.
+	 * It returns the character grabbed on success
+	 * or -1 on error.
+	 * In : w0 - console base address
+	 * Out : return -1 on error else return character.
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func console_core_getc
+	/* Check if the receive FIFO is empty */
+1:	ldr	w1, [x0, #UARTLSR]
+	tbz	w1, #UARTLSR_RDR_BIT, 1b
+	ldr	w0, [x0, #UARTRX]
+	ret
+getc_error:
+	mov	w0, #-1
+	ret
+endfunc console_core_getc
+
+	/* ---------------------------------------------
+	 * int console_core_flush(uintptr_t base_addr)
+	 * Function to force a write of all buffered
+	 * data that hasn't been output.
+	 * In : x0 - console base address
+	 * Out : return -1 on error else return 0.
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func console_core_flush
+	/* Placeholder */
+	mov	w0, #0
+	ret
+endfunc console_core_flush
diff --git a/drivers/ufs/ufs.c b/drivers/ufs/ufs.c
new file mode 100644
index 0000000..d513d0a
--- /dev/null
+++ b/drivers/ufs/ufs.c
@@ -0,0 +1,759 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <endian.h>
+#include <errno.h>
+#include <mmio.h>
+#include <platform_def.h>
+#include <stdint.h>
+#include <string.h>
+#include <ufs.h>
+
+#define CDB_ADDR_MASK			127
+#define ALIGN_CDB(x)			(((x) + CDB_ADDR_MASK) & ~CDB_ADDR_MASK)
+#define ALIGN_8(x)			(((x) + 7) & ~7)
+
+#define UFS_DESC_SIZE			0x400
+#define MAX_UFS_DESC_SIZE		0x8000		/* 32 descriptors */
+
+#define MAX_PRDT_SIZE			0x40000		/* 256KB */
+
+static ufs_params_t ufs_params;
+static int nutrs;	/* Number of UTP Transfer Request Slots */
+
+int ufshc_send_uic_cmd(uintptr_t base, uic_cmd_t *cmd)
+{
+	unsigned int data;
+
+	data = mmio_read_32(base + HCS);
+	if ((data & HCS_UCRDY) == 0)
+		return -EBUSY;
+	mmio_write_32(base + IS, ~0);
+	mmio_write_32(base + UCMDARG1, cmd->arg1);
+	mmio_write_32(base + UCMDARG2, cmd->arg2);
+	mmio_write_32(base + UCMDARG3, cmd->arg3);
+	mmio_write_32(base + UICCMD, cmd->op);
+
+	do {
+		data = mmio_read_32(base + IS);
+	} while ((data & UFS_INT_UCCS) == 0);
+	mmio_write_32(base + IS, UFS_INT_UCCS);
+	return mmio_read_32(base + UCMDARG2) & CONFIG_RESULT_CODE_MASK;
+}
+
+int ufshc_dme_get(unsigned int attr, unsigned int idx, unsigned int *val)
+{
+	uintptr_t base;
+	unsigned int data;
+	int retries;
+
+	assert((ufs_params.reg_base != 0) && (val != NULL));
+
+	base = ufs_params.reg_base;
+	for (retries = 0; retries < 100; retries++) {
+		data = mmio_read_32(base + HCS);
+		if ((data & HCS_UCRDY) != 0)
+			break;
+		mdelay(1);
+	}
+	if (retries >= 100)
+		return -EBUSY;
+
+	mmio_write_32(base + IS, ~0);
+	mmio_write_32(base + UCMDARG1, (attr << 16) | GEN_SELECTOR_IDX(idx));
+	mmio_write_32(base + UCMDARG2, 0);
+	mmio_write_32(base + UCMDARG3, 0);
+	mmio_write_32(base + UICCMD, DME_GET);
+	do {
+		data = mmio_read_32(base + IS);
+		if (data & UFS_INT_UE)
+			return -EINVAL;
+	} while ((data & UFS_INT_UCCS) == 0);
+	mmio_write_32(base + IS, UFS_INT_UCCS);
+	data = mmio_read_32(base + UCMDARG2) & CONFIG_RESULT_CODE_MASK;
+	assert(data == 0);
+
+	*val = mmio_read_32(base + UCMDARG3);
+	return 0;
+}
+
+int ufshc_dme_set(unsigned int attr, unsigned int idx, unsigned int val)
+{
+	uintptr_t base;
+	unsigned int data;
+
+	assert((ufs_params.reg_base != 0));
+
+	base = ufs_params.reg_base;
+	data = mmio_read_32(base + HCS);
+	if ((data & HCS_UCRDY) == 0)
+		return -EBUSY;
+	mmio_write_32(base + IS, ~0);
+	mmio_write_32(base + UCMDARG1, (attr << 16) | GEN_SELECTOR_IDX(idx));
+	mmio_write_32(base + UCMDARG2, 0);
+	mmio_write_32(base + UCMDARG3, val);
+	mmio_write_32(base + UICCMD, DME_SET);
+	do {
+		data = mmio_read_32(base + IS);
+		if (data & UFS_INT_UE)
+			return -EINVAL;
+	} while ((data & UFS_INT_UCCS) == 0);
+	mmio_write_32(base + IS, UFS_INT_UCCS);
+	data = mmio_read_32(base + UCMDARG2) & CONFIG_RESULT_CODE_MASK;
+	assert(data == 0);
+	return 0;
+}
+
+static void ufshc_reset(uintptr_t base)
+{
+	unsigned int data;
+
+	/* Enable Host Controller */
+	mmio_write_32(base + HCE, HCE_ENABLE);
+	/* Wait until basic initialization sequence completed */
+	do {
+		data = mmio_read_32(base + HCE);
+	} while ((data & HCE_ENABLE) == 0);
+
+	/* Enable Interrupts */
+	data = UFS_INT_UCCS | UFS_INT_ULSS | UFS_INT_UE | UFS_INT_UTPES |
+	       UFS_INT_DFES | UFS_INT_HCFES | UFS_INT_SBFES;
+	mmio_write_32(base + IE, data);
+}
+
+static int ufshc_link_startup(uintptr_t base)
+{
+	uic_cmd_t cmd;
+	int data, result;
+	int retries;
+
+	for (retries = 10; retries > 0; retries--) {
+		memset(&cmd, 0, sizeof(cmd));
+		cmd.op = DME_LINKSTARTUP;
+		result = ufshc_send_uic_cmd(base, &cmd);
+		if (result != 0)
+			continue;
+		while ((mmio_read_32(base + HCS) & HCS_DP) == 0)
+			;
+		data = mmio_read_32(base + IS);
+		if (data & UFS_INT_ULSS)
+			mmio_write_32(base + IS, UFS_INT_ULSS);
+		return 0;
+	}
+	return -EIO;
+}
+
+/* Check Door Bell register to get an empty slot */
+static int get_empty_slot(int *slot)
+{
+	unsigned int data;
+	int i;
+
+	data = mmio_read_32(ufs_params.reg_base + UTRLDBR);
+	for (i = 0; i < nutrs; i++) {
+		if ((data & 1) == 0)
+			break;
+		data = data >> 1;
+	}
+	if (i >= nutrs)
+		return -EBUSY;
+	*slot = i;
+	return 0;
+}
+
+static void get_utrd(utp_utrd_t *utrd)
+{
+	uintptr_t base;
+	int slot = 0, result;
+	utrd_header_t *hd;
+
+	assert(utrd != NULL);
+	result = get_empty_slot(&slot);
+	assert(result == 0);
+
+	/* clear utrd */
+	memset((void *)utrd, 0, sizeof(utp_utrd_t));
+	base = ufs_params.desc_base + (slot * UFS_DESC_SIZE);
+	/* clear the descriptor */
+	memset((void *)base, 0, UFS_DESC_SIZE);
+
+	utrd->header = base;
+	utrd->task_tag = slot + 1;
+	/* CDB address should be aligned with 128 bytes */
+	utrd->upiu = ALIGN_CDB(utrd->header + sizeof(utrd_header_t));
+	utrd->resp_upiu = ALIGN_8(utrd->upiu + sizeof(cmd_upiu_t));
+	utrd->size_upiu = utrd->resp_upiu - utrd->upiu;
+	utrd->size_resp_upiu = ALIGN_8(sizeof(resp_upiu_t));
+	utrd->prdt = utrd->resp_upiu + utrd->size_resp_upiu;
+
+	hd = (utrd_header_t *)utrd->header;
+	hd->ucdba = utrd->upiu & UINT32_MAX;
+	hd->ucdbau = (utrd->upiu >> 32) & UINT32_MAX;
+	/* Both RUL and RUO is based on DWORD */
+	hd->rul = utrd->size_resp_upiu >> 2;
+	hd->ruo = utrd->size_upiu >> 2;
+	(void)result;
+}
+
+/*
+ * Prepare UTRD, Command UPIU, Response UPIU.
+ */
+static int ufs_prepare_cmd(utp_utrd_t *utrd, uint8_t op, uint8_t lun,
+			   int lba, uintptr_t buf, size_t length)
+{
+	utrd_header_t *hd;
+	cmd_upiu_t *upiu;
+	prdt_t *prdt;
+	unsigned int ulba;
+	unsigned int lba_cnt;
+	int prdt_size;
+
+
+	mmio_write_32(ufs_params.reg_base + UTRLBA,
+		      utrd->header & UINT32_MAX);
+	mmio_write_32(ufs_params.reg_base + UTRLBAU,
+		      (utrd->upiu >> 32) & UINT32_MAX);
+
+	hd = (utrd_header_t *)utrd->header;
+	upiu = (cmd_upiu_t *)utrd->upiu;
+
+	hd->i = 1;
+	hd->ct = CT_UFS_STORAGE;
+	hd->ocs = OCS_MASK;
+
+	upiu->trans_type = CMD_UPIU;
+	upiu->task_tag = utrd->task_tag;
+	upiu->cdb[0] = op;
+	ulba = (unsigned int)lba;
+	lba_cnt = (unsigned int)(length >> UFS_BLOCK_SHIFT);
+	switch (op) {
+	case CDBCMD_TEST_UNIT_READY:
+		break;
+	case CDBCMD_READ_CAPACITY_10:
+		hd->dd = DD_OUT;
+		upiu->flags = UPIU_FLAGS_R | UPIU_FLAGS_ATTR_S;
+		upiu->lun = lun;
+		break;
+	case CDBCMD_READ_10:
+		hd->dd = DD_OUT;
+		upiu->flags = UPIU_FLAGS_R | UPIU_FLAGS_ATTR_S;
+		upiu->lun = lun;
+		upiu->cdb[1] = RW_WITHOUT_CACHE;
+		/* set logical block address */
+		upiu->cdb[2] = (ulba >> 24) & 0xff;
+		upiu->cdb[3] = (ulba >> 16) & 0xff;
+		upiu->cdb[4] = (ulba >> 8) & 0xff;
+		upiu->cdb[5] = ulba & 0xff;
+		/* set transfer length */
+		upiu->cdb[7] = (lba_cnt >> 8) & 0xff;
+		upiu->cdb[8] = lba_cnt & 0xff;
+		break;
+	case CDBCMD_WRITE_10:
+		hd->dd = DD_IN;
+		upiu->flags = UPIU_FLAGS_W | UPIU_FLAGS_ATTR_S;
+		upiu->lun = lun;
+		upiu->cdb[1] = RW_WITHOUT_CACHE;
+		/* set logical block address */
+		upiu->cdb[2] = (ulba >> 24) & 0xff;
+		upiu->cdb[3] = (ulba >> 16) & 0xff;
+		upiu->cdb[4] = (ulba >> 8) & 0xff;
+		upiu->cdb[5] = ulba & 0xff;
+		/* set transfer length */
+		upiu->cdb[7] = (lba_cnt >> 8) & 0xff;
+		upiu->cdb[8] = lba_cnt & 0xff;
+		break;
+	default:
+		assert(0);
+	}
+	if (hd->dd == DD_IN)
+		flush_dcache_range(buf, length);
+	else if (hd->dd == DD_OUT)
+		inv_dcache_range(buf, length);
+	if (length) {
+		upiu->exp_data_trans_len = htobe32(length);
+		assert(lba_cnt <= UINT16_MAX);
+		prdt = (prdt_t *)utrd->prdt;
+
+		prdt_size = 0;
+		while (length > 0) {
+			prdt->dba = (unsigned int)(buf & UINT32_MAX);
+			prdt->dbau = (unsigned int)((buf >> 32) & UINT32_MAX);
+			/* prdt->dbc counts from 0 */
+			if (length > MAX_PRDT_SIZE) {
+				prdt->dbc = MAX_PRDT_SIZE - 1;
+				length = length - MAX_PRDT_SIZE;
+			} else {
+				prdt->dbc = length - 1;
+				length = 0;
+			}
+			buf += MAX_PRDT_SIZE;
+			prdt++;
+			prdt_size += sizeof(prdt_t);
+		}
+		utrd->size_prdt = ALIGN_8(prdt_size);
+		hd->prdtl = utrd->size_prdt >> 2;
+		hd->prdto = (utrd->size_upiu + utrd->size_resp_upiu) >> 2;
+	}
+
+	flush_dcache_range((uintptr_t)utrd, sizeof(utp_utrd_t));
+	flush_dcache_range((uintptr_t)utrd->header, UFS_DESC_SIZE);
+	return 0;
+}
+
+static int ufs_prepare_query(utp_utrd_t *utrd, uint8_t op, uint8_t idn,
+			     uint8_t index, uint8_t sel,
+			     uintptr_t buf, size_t length)
+{
+	utrd_header_t *hd;
+	query_upiu_t *query_upiu;
+
+
+	hd = (utrd_header_t *)utrd->header;
+	query_upiu = (query_upiu_t *)utrd->upiu;
+
+	mmio_write_32(ufs_params.reg_base + UTRLBA,
+		      utrd->header & UINT32_MAX);
+	mmio_write_32(ufs_params.reg_base + UTRLBAU,
+		      (utrd->header >> 32) & UINT32_MAX);
+
+
+	hd->i = 1;
+	hd->ct = CT_UFS_STORAGE;
+	hd->ocs = OCS_MASK;
+
+	query_upiu->trans_type = QUERY_REQUEST_UPIU;
+	query_upiu->task_tag = utrd->task_tag;
+	query_upiu->ts.desc.opcode = op;
+	query_upiu->ts.desc.idn = idn;
+	query_upiu->ts.desc.index = index;
+	query_upiu->ts.desc.selector = sel;
+	switch (op) {
+	case QUERY_READ_DESC:
+		query_upiu->query_func = QUERY_FUNC_STD_READ;
+		query_upiu->ts.desc.length = htobe16(length);
+		break;
+	case QUERY_WRITE_DESC:
+		query_upiu->query_func = QUERY_FUNC_STD_WRITE;
+		query_upiu->ts.desc.length = htobe16(length);
+		memcpy((void *)(utrd->upiu + sizeof(query_upiu_t)),
+		       (void *)buf, length);
+		break;
+	case QUERY_READ_ATTR:
+	case QUERY_READ_FLAG:
+		query_upiu->query_func = QUERY_FUNC_STD_READ;
+		break;
+	case QUERY_CLEAR_FLAG:
+	case QUERY_SET_FLAG:
+		query_upiu->query_func = QUERY_FUNC_STD_WRITE;
+		break;
+	case QUERY_WRITE_ATTR:
+		query_upiu->query_func = QUERY_FUNC_STD_WRITE;
+		memcpy((void *)&query_upiu->ts.attr.value, (void *)buf, length);
+		break;
+	default:
+		assert(0);
+	}
+	flush_dcache_range((uintptr_t)utrd, sizeof(utp_utrd_t));
+	flush_dcache_range((uintptr_t)utrd->header, UFS_DESC_SIZE);
+	return 0;
+}
+
+static void ufs_prepare_nop_out(utp_utrd_t *utrd)
+{
+	utrd_header_t *hd;
+	nop_out_upiu_t *nop_out;
+
+	mmio_write_32(ufs_params.reg_base + UTRLBA,
+		      utrd->header & UINT32_MAX);
+	mmio_write_32(ufs_params.reg_base + UTRLBAU,
+		      (utrd->header >> 32) & UINT32_MAX);
+
+	hd = (utrd_header_t *)utrd->header;
+	nop_out = (nop_out_upiu_t *)utrd->upiu;
+
+	hd->i = 1;
+	hd->ct = CT_UFS_STORAGE;
+	hd->ocs = OCS_MASK;
+
+	nop_out->trans_type = 0;
+	nop_out->task_tag = utrd->task_tag;
+	flush_dcache_range((uintptr_t)utrd, sizeof(utp_utrd_t));
+	flush_dcache_range((uintptr_t)utrd->header, UFS_DESC_SIZE);
+}
+
+static void ufs_send_request(int task_tag)
+{
+	unsigned int data;
+	int slot;
+
+	slot = task_tag - 1;
+	/* clear all interrupts */
+	mmio_write_32(ufs_params.reg_base + IS, ~0);
+
+	mmio_write_32(ufs_params.reg_base + UTRLRSR, 1);
+	do {
+		data = mmio_read_32(ufs_params.reg_base + UTRLRSR);
+	} while (data == 0);
+
+	data = UTRIACR_IAEN | UTRIACR_CTR | UTRIACR_IACTH(0x1F) |
+	       UTRIACR_IATOVAL(0xFF);
+	mmio_write_32(ufs_params.reg_base + UTRIACR, data);
+	/* send request */
+	mmio_setbits_32(ufs_params.reg_base + UTRLDBR, 1 << slot);
+}
+
+static int ufs_check_resp(utp_utrd_t *utrd, int trans_type)
+{
+	utrd_header_t *hd;
+	resp_upiu_t *resp;
+	unsigned int data;
+	int slot;
+
+	hd = (utrd_header_t *)utrd->header;
+	resp = (resp_upiu_t *)utrd->resp_upiu;
+	inv_dcache_range((uintptr_t)hd, UFS_DESC_SIZE);
+	inv_dcache_range((uintptr_t)utrd, sizeof(utp_utrd_t));
+	do {
+		data = mmio_read_32(ufs_params.reg_base + IS);
+		if ((data & ~(UFS_INT_UCCS | UFS_INT_UTRCS)) != 0)
+			return -EIO;
+	} while ((data & UFS_INT_UTRCS) == 0);
+	slot = utrd->task_tag - 1;
+
+	data = mmio_read_32(ufs_params.reg_base + UTRLDBR);
+	assert((data & (1 << slot)) == 0);
+	assert(hd->ocs == OCS_SUCCESS);
+	assert((resp->trans_type & TRANS_TYPE_CODE_MASK) == trans_type);
+	(void)resp;
+	(void)slot;
+	return 0;
+}
+
+#ifdef UFS_RESP_DEBUG
+static void dump_upiu(utp_utrd_t *utrd)
+{
+	utrd_header_t *hd;
+	int i;
+
+	hd = (utrd_header_t *)utrd->header;
+	INFO("utrd:0x%x, ruo:0x%x, rul:0x%x, ocs:0x%x, UTRLDBR:0x%x\n",
+		(unsigned int)(uintptr_t)utrd, hd->ruo, hd->rul, hd->ocs,
+		mmio_read_32(ufs_params.reg_base + UTRLDBR));
+	for (i = 0; i < sizeof(utrd_header_t); i += 4) {
+		INFO("[%lx]:0x%x\n",
+			(uintptr_t)utrd->header + i,
+			*(unsigned int *)((uintptr_t)utrd->header + i));
+	}
+
+	for (i = 0; i < sizeof(cmd_upiu_t); i += 4) {
+		INFO("cmd[%lx]:0x%x\n",
+			utrd->upiu + i,
+			*(unsigned int *)(utrd->upiu + i));
+	}
+	for (i = 0; i < sizeof(resp_upiu_t); i += 4) {
+		INFO("resp[%lx]:0x%x\n",
+			utrd->resp_upiu + i,
+			*(unsigned int *)(utrd->resp_upiu + i));
+	}
+	for (i = 0; i < sizeof(prdt_t); i += 4) {
+		INFO("prdt[%lx]:0x%x\n",
+			utrd->prdt + i,
+			*(unsigned int *)(utrd->prdt + i));
+	}
+}
+#endif
+
+static void ufs_verify_init(void)
+{
+	utp_utrd_t utrd;
+	int result;
+
+	get_utrd(&utrd);
+	ufs_prepare_nop_out(&utrd);
+	ufs_send_request(utrd.task_tag);
+	result = ufs_check_resp(&utrd, NOP_IN_UPIU);
+	assert(result == 0);
+	(void)result;
+}
+
+static void ufs_verify_ready(void)
+{
+	utp_utrd_t utrd;
+	int result;
+
+	get_utrd(&utrd);
+	ufs_prepare_cmd(&utrd, CDBCMD_TEST_UNIT_READY, 0, 0, 0, 0);
+	ufs_send_request(utrd.task_tag);
+	result = ufs_check_resp(&utrd, RESPONSE_UPIU);
+	assert(result == 0);
+	(void)result;
+}
+
+static void ufs_query(uint8_t op, uint8_t idn, uint8_t index, uint8_t sel,
+		      uintptr_t buf, size_t size)
+{
+	utp_utrd_t utrd;
+	query_resp_upiu_t *resp;
+	int result;
+
+	switch (op) {
+	case QUERY_READ_FLAG:
+	case QUERY_READ_ATTR:
+	case QUERY_READ_DESC:
+	case QUERY_WRITE_DESC:
+	case QUERY_WRITE_ATTR:
+		assert(((buf & 3) == 0) && (size != 0));
+		break;
+	}
+	get_utrd(&utrd);
+	ufs_prepare_query(&utrd, op, idn, index, sel, buf, size);
+	ufs_send_request(utrd.task_tag);
+	result = ufs_check_resp(&utrd, QUERY_RESPONSE_UPIU);
+	assert(result == 0);
+	resp = (query_resp_upiu_t *)utrd.resp_upiu;
+#ifdef UFS_RESP_DEBUG
+	dump_upiu(&utrd);
+#endif
+	assert(resp->query_resp == QUERY_RESP_SUCCESS);
+
+	switch (op) {
+	case QUERY_READ_FLAG:
+		*(uint32_t *)buf = (uint32_t)resp->ts.flag.value;
+		break;
+	case QUERY_READ_ATTR:
+	case QUERY_READ_DESC:
+		memcpy((void *)buf,
+		       (void *)(utrd.resp_upiu + sizeof(query_resp_upiu_t)),
+		       size);
+		break;
+	}
+	(void)result;
+}
+
+unsigned int ufs_read_attr(int idn)
+{
+	unsigned int value;
+
+	ufs_query(QUERY_READ_ATTR, idn, 0, 0,
+		  (uintptr_t)&value, sizeof(value));
+	return value;
+}
+
+void ufs_write_attr(int idn, unsigned int value)
+{
+	ufs_query(QUERY_WRITE_ATTR, idn, 0, 0,
+		  (uintptr_t)&value, sizeof(value));
+}
+
+unsigned int ufs_read_flag(int idn)
+{
+	unsigned int value;
+
+	ufs_query(QUERY_READ_FLAG, idn, 0, 0,
+		  (uintptr_t)&value, sizeof(value));
+	return value;
+}
+
+void ufs_set_flag(int idn)
+{
+	ufs_query(QUERY_SET_FLAG, idn, 0, 0, 0, 0);
+}
+
+void ufs_clear_flag(int idn)
+{
+	ufs_query(QUERY_CLEAR_FLAG, idn, 0, 0, 0, 0);
+}
+
+void ufs_read_desc(int idn, int index, uintptr_t buf, size_t size)
+{
+	ufs_query(QUERY_READ_DESC, idn, index, 0, buf, size);
+}
+
+void ufs_write_desc(int idn, int index, uintptr_t buf, size_t size)
+{
+	ufs_query(QUERY_WRITE_DESC, idn, index, 0, buf, size);
+}
+
+void ufs_read_capacity(int lun, unsigned int *num, unsigned int *size)
+{
+	utp_utrd_t utrd;
+	resp_upiu_t *resp;
+	sense_data_t *sense;
+	unsigned char data[CACHE_WRITEBACK_GRANULE << 1];
+	uintptr_t buf;
+	int result;
+	int retry;
+
+	assert((ufs_params.reg_base != 0) &&
+	       (ufs_params.desc_base != 0) &&
+	       (ufs_params.desc_size >= UFS_DESC_SIZE) &&
+	       (num != NULL) && (size != NULL));
+
+	/* align buf address */
+	buf = (uintptr_t)data;
+	buf = (buf + CACHE_WRITEBACK_GRANULE - 1) &
+	      ~(CACHE_WRITEBACK_GRANULE - 1);
+	memset((void *)buf, 0, CACHE_WRITEBACK_GRANULE);
+	flush_dcache_range(buf, CACHE_WRITEBACK_GRANULE);
+	do {
+		get_utrd(&utrd);
+		ufs_prepare_cmd(&utrd, CDBCMD_READ_CAPACITY_10, lun, 0,
+				buf, READ_CAPACITY_LENGTH);
+		ufs_send_request(utrd.task_tag);
+		result = ufs_check_resp(&utrd, RESPONSE_UPIU);
+		assert(result == 0);
+#ifdef UFS_RESP_DEBUG
+		dump_upiu(&utrd);
+#endif
+		resp = (resp_upiu_t *)utrd.resp_upiu;
+		retry = 0;
+		sense = &resp->sd.sense;
+		if (sense->resp_code == SENSE_DATA_VALID) {
+			if ((sense->sense_key == SENSE_KEY_UNIT_ATTENTION) &&
+			    (sense->asc == 0x29) && (sense->ascq == 0)) {
+				retry = 1;
+			}
+		}
+		inv_dcache_range(buf, CACHE_WRITEBACK_GRANULE);
+		/* last logical block address */
+		*num = be32toh(*(unsigned int *)buf);
+		if (*num)
+			*num += 1;
+		/* logical block length in bytes */
+		*size = be32toh(*(unsigned int *)(buf + 4));
+	} while (retry);
+	(void)result;
+}
+
+size_t ufs_read_blocks(int lun, int lba, uintptr_t buf, size_t size)
+{
+	utp_utrd_t utrd;
+	resp_upiu_t *resp;
+	int result;
+
+	assert((ufs_params.reg_base != 0) &&
+	       (ufs_params.desc_base != 0) &&
+	       (ufs_params.desc_size >= UFS_DESC_SIZE));
+
+	memset((void *)buf, 0, size);
+	get_utrd(&utrd);
+	ufs_prepare_cmd(&utrd, CDBCMD_READ_10, lun, lba, buf, size);
+	ufs_send_request(utrd.task_tag);
+	result = ufs_check_resp(&utrd, RESPONSE_UPIU);
+	assert(result == 0);
+#ifdef UFS_RESP_DEBUG
+	dump_upiu(&utrd);
+#endif
+	resp = (resp_upiu_t *)utrd.resp_upiu;
+	(void)result;
+	return size - resp->res_trans_cnt;
+}
+
+size_t ufs_write_blocks(int lun, int lba, const uintptr_t buf, size_t size)
+{
+	utp_utrd_t utrd;
+	resp_upiu_t *resp;
+	int result;
+
+	assert((ufs_params.reg_base != 0) &&
+	       (ufs_params.desc_base != 0) &&
+	       (ufs_params.desc_size >= UFS_DESC_SIZE));
+
+	memset((void *)buf, 0, size);
+	get_utrd(&utrd);
+	ufs_prepare_cmd(&utrd, CDBCMD_WRITE_10, lun, lba, buf, size);
+	ufs_send_request(utrd.task_tag);
+	result = ufs_check_resp(&utrd, RESPONSE_UPIU);
+	assert(result == 0);
+#ifdef UFS_RESP_DEBUG
+	dump_upiu(&utrd);
+#endif
+	resp = (resp_upiu_t *)utrd.resp_upiu;
+	(void)result;
+	return size - resp->res_trans_cnt;
+}
+
+static void ufs_enum(void)
+{
+	unsigned int blk_num, blk_size;
+	int i;
+
+	/* 0 means 1 slot */
+	nutrs = (mmio_read_32(ufs_params.reg_base + CAP) & CAP_NUTRS_MASK) + 1;
+	if (nutrs > (ufs_params.desc_size / UFS_DESC_SIZE))
+		nutrs = ufs_params.desc_size / UFS_DESC_SIZE;
+
+	ufs_verify_init();
+	ufs_verify_ready();
+
+	ufs_set_flag(FLAG_DEVICE_INIT);
+	mdelay(100);
+	/* dump available LUNs */
+	for (i = 0; i < UFS_MAX_LUNS; i++) {
+		ufs_read_capacity(i, &blk_num, &blk_size);
+		if (blk_num && blk_size) {
+			INFO("UFS LUN%d contains %d blocks with %d-byte size\n",
+			     i, blk_num, blk_size);
+		}
+	}
+}
+
+int ufs_init(const ufs_ops_t *ops, ufs_params_t *params)
+{
+	int result;
+	unsigned int data;
+	uic_cmd_t cmd;
+
+	assert((params != NULL) &&
+	       (params->reg_base != 0) &&
+	       (params->desc_base != 0) &&
+	       (params->desc_size >= UFS_DESC_SIZE));
+
+	memcpy(&ufs_params, params, sizeof(ufs_params_t));
+
+	if (ufs_params.flags & UFS_FLAGS_SKIPINIT) {
+		result = ufshc_dme_get(0x1571, 0, &data);
+		assert(result == 0);
+		result = ufshc_dme_get(0x41, 0, &data);
+		assert(result == 0);
+		if (data == 1) {
+			/* prepare to exit hibernate mode */
+			memset(&cmd, 0, sizeof(uic_cmd_t));
+			cmd.op = DME_HIBERNATE_EXIT;
+			result = ufshc_send_uic_cmd(ufs_params.reg_base,
+						    &cmd);
+			assert(result == 0);
+			data = mmio_read_32(ufs_params.reg_base + UCMDARG2);
+			assert(data == 0);
+			do {
+				data = mmio_read_32(ufs_params.reg_base + IS);
+			} while ((data & UFS_INT_UHXS) == 0);
+			mmio_write_32(ufs_params.reg_base + IS, UFS_INT_UHXS);
+			data = mmio_read_32(ufs_params.reg_base + HCS);
+			assert((data & HCS_UPMCRS_MASK) == HCS_PWR_LOCAL);
+		}
+		result = ufshc_dme_get(0x1568, 0, &data);
+		assert(result == 0);
+		assert((data > 0) && (data <= 3));
+	} else {
+		assert((ops != NULL) && (ops->phy_init != NULL) &&
+		       (ops->phy_set_pwr_mode != NULL));
+
+		ufshc_reset(ufs_params.reg_base);
+		ops->phy_init(&ufs_params);
+		result = ufshc_link_startup(ufs_params.reg_base);
+		assert(result == 0);
+		ops->phy_set_pwr_mode(&ufs_params);
+	}
+
+	ufs_enum();
+	(void)result;
+	return 0;
+}
diff --git a/fdts/fvp-base-gicv2-psci-aarch32.dtb b/fdts/fvp-base-gicv2-psci-aarch32.dtb
new file mode 100644
index 0000000..b044a76
--- /dev/null
+++ b/fdts/fvp-base-gicv2-psci-aarch32.dtb
Binary files differ
diff --git a/fdts/fvp-base-gicv2-psci-aarch32.dts b/fdts/fvp-base-gicv2-psci-aarch32.dts
new file mode 100644
index 0000000..1560a00
--- /dev/null
+++ b/fdts/fvp-base-gicv2-psci-aarch32.dts
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/dts-v1/;
+
+/memreserve/ 0x80000000 0x00010000;
+
+/ {
+};
+
+/ {
+	model = "FVP Base";
+	compatible = "arm,vfp-base", "arm,vexpress";
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	chosen { };
+
+	aliases {
+		serial0 = &v2m_serial0;
+		serial1 = &v2m_serial1;
+		serial2 = &v2m_serial2;
+		serial3 = &v2m_serial3;
+	};
+
+	psci {
+		compatible = "arm,psci-1.0", "arm,psci-0.2", "arm,psci";
+		method = "smc";
+		cpu_suspend = <0x84000001>;
+		cpu_off = <0x84000002>;
+		cpu_on = <0x84000003>;
+		sys_poweroff = <0x84000008>;
+		sys_reset = <0x84000009>;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&CPU0>;
+				};
+				core1 {
+					cpu = <&CPU1>;
+				};
+				core2 {
+					cpu = <&CPU2>;
+				};
+				core3 {
+					cpu = <&CPU3>;
+				};
+			};
+
+			cluster1 {
+				core0 {
+					cpu = <&CPU4>;
+				};
+				core1 {
+					cpu = <&CPU5>;
+				};
+				core2 {
+					cpu = <&CPU6>;
+				};
+				core3 {
+					cpu = <&CPU7>;
+				};
+			};
+		};
+
+		idle-states {
+			entry-method = "arm,psci";
+
+			CPU_SLEEP_0: cpu-sleep-0 {
+				compatible = "arm,idle-state";
+				local-timer-stop;
+				arm,psci-suspend-param = <0x0010000>;
+				entry-latency-us = <40>;
+				exit-latency-us = <100>;
+				min-residency-us = <150>;
+			};
+
+			CLUSTER_SLEEP_0: cluster-sleep-0 {
+				compatible = "arm,idle-state";
+				local-timer-stop;
+				arm,psci-suspend-param = <0x1010000>;
+				entry-latency-us = <500>;
+				exit-latency-us = <1000>;
+				min-residency-us = <2500>;
+			};
+		};
+
+		CPU0:cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU1:cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x1>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU2:cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x2>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU3:cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x3>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU4:cpu@100 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x100>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU5:cpu@101 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x101>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU6:cpu@102 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x102>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU7:cpu@103 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x103>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		L2_0: l2-cache0 {
+			compatible = "cache";
+		};
+	};
+
+	memory@80000000 {
+		device_type = "memory";
+		reg = <0x00000000 0x80000000 0 0x7F000000>,
+		      <0x00000008 0x80000000 0 0x80000000>;
+	};
+
+	gic: interrupt-controller@2f000000 {
+		compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+		#interrupt-cells = <3>;
+		#address-cells = <0>;
+		interrupt-controller;
+		reg = <0x0 0x2f000000 0 0x10000>,
+		      <0x0 0x2c000000 0 0x2000>,
+		      <0x0 0x2c010000 0 0x2000>,
+		      <0x0 0x2c02F000 0 0x2000>;
+		interrupts = <1 9 0xf04>;
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <1 13 0xff01>,
+			     <1 14 0xff01>,
+			     <1 11 0xff01>,
+			     <1 10 0xff01>;
+		clock-frequency = <100000000>;
+	};
+
+	timer@2a810000 {
+			compatible = "arm,armv7-timer-mem";
+			reg = <0x0 0x2a810000 0x0 0x10000>;
+			clock-frequency = <100000000>;
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges;
+			frame@2a830000 {
+				frame-number = <1>;
+				interrupts = <0 26 4>;
+				reg = <0x0 0x2a830000 0x0 0x10000>;
+			};
+	};
+
+	pmu {
+		compatible = "arm,armv8-pmuv3";
+		interrupts = <0 60 4>,
+			     <0 61 4>,
+			     <0 62 4>,
+			     <0 63 4>;
+	};
+
+	smb {
+		compatible = "simple-bus";
+
+		#address-cells = <2>;
+		#size-cells = <1>;
+		ranges = <0 0 0 0x08000000 0x04000000>,
+			 <1 0 0 0x14000000 0x04000000>,
+			 <2 0 0 0x18000000 0x04000000>,
+			 <3 0 0 0x1c000000 0x04000000>,
+			 <4 0 0 0x0c000000 0x04000000>,
+			 <5 0 0 0x10000000 0x04000000>;
+
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 63>;
+		interrupt-map = <0 0  0 &gic 0  0 4>,
+				<0 0  1 &gic 0  1 4>,
+				<0 0  2 &gic 0  2 4>,
+				<0 0  3 &gic 0  3 4>,
+				<0 0  4 &gic 0  4 4>,
+				<0 0  5 &gic 0  5 4>,
+				<0 0  6 &gic 0  6 4>,
+				<0 0  7 &gic 0  7 4>,
+				<0 0  8 &gic 0  8 4>,
+				<0 0  9 &gic 0  9 4>,
+				<0 0 10 &gic 0 10 4>,
+				<0 0 11 &gic 0 11 4>,
+				<0 0 12 &gic 0 12 4>,
+				<0 0 13 &gic 0 13 4>,
+				<0 0 14 &gic 0 14 4>,
+				<0 0 15 &gic 0 15 4>,
+				<0 0 16 &gic 0 16 4>,
+				<0 0 17 &gic 0 17 4>,
+				<0 0 18 &gic 0 18 4>,
+				<0 0 19 &gic 0 19 4>,
+				<0 0 20 &gic 0 20 4>,
+				<0 0 21 &gic 0 21 4>,
+				<0 0 22 &gic 0 22 4>,
+				<0 0 23 &gic 0 23 4>,
+				<0 0 24 &gic 0 24 4>,
+				<0 0 25 &gic 0 25 4>,
+				<0 0 26 &gic 0 26 4>,
+				<0 0 27 &gic 0 27 4>,
+				<0 0 28 &gic 0 28 4>,
+				<0 0 29 &gic 0 29 4>,
+				<0 0 30 &gic 0 30 4>,
+				<0 0 31 &gic 0 31 4>,
+				<0 0 32 &gic 0 32 4>,
+				<0 0 33 &gic 0 33 4>,
+				<0 0 34 &gic 0 34 4>,
+				<0 0 35 &gic 0 35 4>,
+				<0 0 36 &gic 0 36 4>,
+				<0 0 37 &gic 0 37 4>,
+				<0 0 38 &gic 0 38 4>,
+				<0 0 39 &gic 0 39 4>,
+				<0 0 40 &gic 0 40 4>,
+				<0 0 41 &gic 0 41 4>,
+				<0 0 42 &gic 0 42 4>;
+
+		/include/ "rtsm_ve-motherboard.dtsi"
+	};
+
+	panels {
+		panel@0 {
+			compatible	= "panel";
+			mode		= "XVGA";
+			refresh		= <60>;
+			xres		= <1024>;
+			yres		= <768>;
+			pixclock	= <15748>;
+			left_margin	= <152>;
+			right_margin	= <48>;
+			upper_margin	= <23>;
+			lower_margin	= <3>;
+			hsync_len	= <104>;
+			vsync_len	= <4>;
+			sync		= <0>;
+			vmode		= "FB_VMODE_NONINTERLACED";
+			tim2		= "TIM2_BCD", "TIM2_IPC";
+			cntl		= "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
+			caps		= "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
+			bpp		= <16>;
+		};
+	};
+};
diff --git a/fdts/fvp-base-gicv2-psci.dtb b/fdts/fvp-base-gicv2-psci.dtb
index b8a31ce..d86cb78 100644
--- a/fdts/fvp-base-gicv2-psci.dtb
+++ b/fdts/fvp-base-gicv2-psci.dtb
Binary files differ
diff --git a/fdts/fvp-base-gicv2-psci.dts b/fdts/fvp-base-gicv2-psci.dts
index c1c9efb..941040d 100644
--- a/fdts/fvp-base-gicv2-psci.dts
+++ b/fdts/fvp-base-gicv2-psci.dts
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 /dts-v1/;
@@ -102,7 +78,8 @@
 
 			CPU_SLEEP_0: cpu-sleep-0 {
 				compatible = "arm,idle-state";
-				entry-method-param = <0x0010000>;
+				local-timer-stop;
+				arm,psci-suspend-param = <0x0010000>;
 				entry-latency-us = <40>;
 				exit-latency-us = <100>;
 				min-residency-us = <150>;
@@ -110,7 +87,8 @@
 
 			CLUSTER_SLEEP_0: cluster-sleep-0 {
 				compatible = "arm,idle-state";
-				entry-method-param = <0x1010000>;
+				local-timer-stop;
+				arm,psci-suspend-param = <0x1010000>;
 				entry-latency-us = <500>;
 				exit-latency-us = <1000>;
 				min-residency-us = <2500>;
@@ -123,6 +101,7 @@
 			reg = <0x0 0x0>;
 			enable-method = "psci";
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
 		};
 
 		CPU1:cpu@1 {
@@ -131,6 +110,7 @@
 			reg = <0x0 0x1>;
 			enable-method = "psci";
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
 		};
 
 		CPU2:cpu@2 {
@@ -139,6 +119,7 @@
 			reg = <0x0 0x2>;
 			enable-method = "psci";
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
 		};
 
 		CPU3:cpu@3 {
@@ -147,6 +128,7 @@
 			reg = <0x0 0x3>;
 			enable-method = "psci";
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
 		};
 
 		CPU4:cpu@100 {
@@ -155,6 +137,7 @@
 			reg = <0x0 0x100>;
 			enable-method = "psci";
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
 		};
 
 		CPU5:cpu@101 {
@@ -163,6 +146,7 @@
 			reg = <0x0 0x101>;
 			enable-method = "psci";
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
 		};
 
 		CPU6:cpu@102 {
@@ -171,6 +155,7 @@
 			reg = <0x0 0x102>;
 			enable-method = "psci";
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
 		};
 
 		CPU7:cpu@103 {
@@ -179,6 +164,11 @@
 			reg = <0x0 0x103>;
 			enable-method = "psci";
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		L2_0: l2-cache0 {
+			compatible = "cache";
 		};
 	};
 
@@ -243,52 +233,6 @@
 			 <4 0 0 0x0c000000 0x04000000>,
 			 <5 0 0 0x10000000 0x04000000>;
 
-		#interrupt-cells = <1>;
-		interrupt-map-mask = <0 0 63>;
-		interrupt-map = <0 0  0 &gic 0  0 4>,
-				<0 0  1 &gic 0  1 4>,
-				<0 0  2 &gic 0  2 4>,
-				<0 0  3 &gic 0  3 4>,
-				<0 0  4 &gic 0  4 4>,
-				<0 0  5 &gic 0  5 4>,
-				<0 0  6 &gic 0  6 4>,
-				<0 0  7 &gic 0  7 4>,
-				<0 0  8 &gic 0  8 4>,
-				<0 0  9 &gic 0  9 4>,
-				<0 0 10 &gic 0 10 4>,
-				<0 0 11 &gic 0 11 4>,
-				<0 0 12 &gic 0 12 4>,
-				<0 0 13 &gic 0 13 4>,
-				<0 0 14 &gic 0 14 4>,
-				<0 0 15 &gic 0 15 4>,
-				<0 0 16 &gic 0 16 4>,
-				<0 0 17 &gic 0 17 4>,
-				<0 0 18 &gic 0 18 4>,
-				<0 0 19 &gic 0 19 4>,
-				<0 0 20 &gic 0 20 4>,
-				<0 0 21 &gic 0 21 4>,
-				<0 0 22 &gic 0 22 4>,
-				<0 0 23 &gic 0 23 4>,
-				<0 0 24 &gic 0 24 4>,
-				<0 0 25 &gic 0 25 4>,
-				<0 0 26 &gic 0 26 4>,
-				<0 0 27 &gic 0 27 4>,
-				<0 0 28 &gic 0 28 4>,
-				<0 0 29 &gic 0 29 4>,
-				<0 0 30 &gic 0 30 4>,
-				<0 0 31 &gic 0 31 4>,
-				<0 0 32 &gic 0 32 4>,
-				<0 0 33 &gic 0 33 4>,
-				<0 0 34 &gic 0 34 4>,
-				<0 0 35 &gic 0 35 4>,
-				<0 0 36 &gic 0 36 4>,
-				<0 0 37 &gic 0 37 4>,
-				<0 0 38 &gic 0 38 4>,
-				<0 0 39 &gic 0 39 4>,
-				<0 0 40 &gic 0 40 4>,
-				<0 0 41 &gic 0 41 4>,
-				<0 0 42 &gic 0 42 4>;
-
 		/include/ "rtsm_ve-motherboard.dtsi"
 	};
 
diff --git a/fdts/fvp-base-gicv2legacy-psci.dtb b/fdts/fvp-base-gicv2legacy-psci.dtb
deleted file mode 100644
index 4270623..0000000
--- a/fdts/fvp-base-gicv2legacy-psci.dtb
+++ /dev/null
Binary files differ
diff --git a/fdts/fvp-base-gicv2legacy-psci.dts b/fdts/fvp-base-gicv2legacy-psci.dts
deleted file mode 100644
index 7bd5ea2..0000000
--- a/fdts/fvp-base-gicv2legacy-psci.dts
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/dts-v1/;
-
-/memreserve/ 0x80000000 0x00010000;
-
-/ {
-};
-
-/ {
-	model = "FVP Base";
-	compatible = "arm,vfp-base", "arm,vexpress";
-	interrupt-parent = <&gic>;
-	#address-cells = <2>;
-	#size-cells = <2>;
-
-	chosen { };
-
-	aliases {
-		serial0 = &v2m_serial0;
-		serial1 = &v2m_serial1;
-		serial2 = &v2m_serial2;
-		serial3 = &v2m_serial3;
-	};
-
-	psci {
-		compatible = "arm,psci-1.0", "arm,psci-0.2", "arm,psci";
-		method = "smc";
-		cpu_suspend = <0xc4000001>;
-		cpu_off = <0x84000002>;
-		cpu_on = <0xc4000003>;
-		sys_poweroff = <0x84000008>;
-		sys_reset = <0x84000009>;
-	};
-
-	cpus {
-		#address-cells = <2>;
-		#size-cells = <0>;
-
-		cpu-map {
-			cluster0 {
-				core0 {
-					cpu = <&CPU0>;
-				};
-				core1 {
-					cpu = <&CPU1>;
-				};
-				core2 {
-					cpu = <&CPU2>;
-				};
-				core3 {
-					cpu = <&CPU3>;
-				};
-			};
-
-			cluster1 {
-				core0 {
-					cpu = <&CPU4>;
-				};
-				core1 {
-					cpu = <&CPU5>;
-				};
-				core2 {
-					cpu = <&CPU6>;
-				};
-				core3 {
-					cpu = <&CPU7>;
-				};
-			};
-		};
-
-		idle-states {
-			entry-method = "arm,psci";
-
-			CPU_SLEEP_0: cpu-sleep-0 {
-				compatible = "arm,idle-state";
-				entry-method-param = <0x0010000>;
-				entry-latency-us = <40>;
-				exit-latency-us = <100>;
-				min-residency-us = <150>;
-			};
-
-			CLUSTER_SLEEP_0: cluster-sleep-0 {
-				compatible = "arm,idle-state";
-				entry-method-param = <0x1010000>;
-				entry-latency-us = <500>;
-				exit-latency-us = <1000>;
-				min-residency-us = <2500>;
-			};
-		};
-
-		CPU0:cpu@0 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x0>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
-		};
-
-		CPU1:cpu@1 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x1>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
-		};
-
-		CPU2:cpu@2 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x2>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
-		};
-
-		CPU3:cpu@3 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x3>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
-		};
-
-		CPU4:cpu@100 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x100>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
-		};
-
-		CPU5:cpu@101 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x101>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
-		};
-
-		CPU6:cpu@102 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x102>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
-		};
-
-		CPU7:cpu@103 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x103>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
-		};
-	};
-
-	memory@80000000 {
-		device_type = "memory";
-		reg = <0x00000000 0x80000000 0 0x7F000000>,
-		      <0x00000008 0x80000000 0 0x80000000>;
-	};
-
-	gic: interrupt-controller@2c001000 {
-		compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
-		#interrupt-cells = <3>;
-		#address-cells = <0>;
-		interrupt-controller;
-		reg = <0x0 0x2c001000 0 0x1000>,
-		      <0x0 0x2c002000 0 0x1000>,
-		      <0x0 0x2c004000 0 0x2000>,
-		      <0x0 0x2c006000 0 0x2000>;
-		interrupts = <1 9 0xf04>;
-	};
-
-	timer {
-		compatible = "arm,armv8-timer";
-		interrupts = <1 13 0xff01>,
-			     <1 14 0xff01>,
-			     <1 11 0xff01>,
-			     <1 10 0xff01>;
-		clock-frequency = <100000000>;
-	};
-
-	timer@2a810000 {
-			compatible = "arm,armv7-timer-mem";
-			reg = <0x0 0x2a810000 0x0 0x10000>;
-			clock-frequency = <100000000>;
-			#address-cells = <2>;
-			#size-cells = <2>;
-			ranges;
-			frame@2a830000 {
-				frame-number = <1>;
-				interrupts = <0 26 4>;
-				reg = <0x0 0x2a830000 0x0 0x10000>;
-			};
-	};
-
-	pmu {
-		compatible = "arm,armv8-pmuv3";
-		interrupts = <0 60 4>,
-			     <0 61 4>,
-			     <0 62 4>,
-			     <0 63 4>;
-	};
-
-	smb {
-		compatible = "simple-bus";
-
-		#address-cells = <2>;
-		#size-cells = <1>;
-		ranges = <0 0 0 0x08000000 0x04000000>,
-			 <1 0 0 0x14000000 0x04000000>,
-			 <2 0 0 0x18000000 0x04000000>,
-			 <3 0 0 0x1c000000 0x04000000>,
-			 <4 0 0 0x0c000000 0x04000000>,
-			 <5 0 0 0x10000000 0x04000000>;
-
-		#interrupt-cells = <1>;
-		interrupt-map-mask = <0 0 63>;
-		interrupt-map = <0 0  0 &gic 0  0 4>,
-				<0 0  1 &gic 0  1 4>,
-				<0 0  2 &gic 0  2 4>,
-				<0 0  3 &gic 0  3 4>,
-				<0 0  4 &gic 0  4 4>,
-				<0 0  5 &gic 0  5 4>,
-				<0 0  6 &gic 0  6 4>,
-				<0 0  7 &gic 0  7 4>,
-				<0 0  8 &gic 0  8 4>,
-				<0 0  9 &gic 0  9 4>,
-				<0 0 10 &gic 0 10 4>,
-				<0 0 11 &gic 0 11 4>,
-				<0 0 12 &gic 0 12 4>,
-				<0 0 13 &gic 0 13 4>,
-				<0 0 14 &gic 0 14 4>,
-				<0 0 15 &gic 0 15 4>,
-				<0 0 16 &gic 0 16 4>,
-				<0 0 17 &gic 0 17 4>,
-				<0 0 18 &gic 0 18 4>,
-				<0 0 19 &gic 0 19 4>,
-				<0 0 20 &gic 0 20 4>,
-				<0 0 21 &gic 0 21 4>,
-				<0 0 22 &gic 0 22 4>,
-				<0 0 23 &gic 0 23 4>,
-				<0 0 24 &gic 0 24 4>,
-				<0 0 25 &gic 0 25 4>,
-				<0 0 26 &gic 0 26 4>,
-				<0 0 27 &gic 0 27 4>,
-				<0 0 28 &gic 0 28 4>,
-				<0 0 29 &gic 0 29 4>,
-				<0 0 30 &gic 0 30 4>,
-				<0 0 31 &gic 0 31 4>,
-				<0 0 32 &gic 0 32 4>,
-				<0 0 33 &gic 0 33 4>,
-				<0 0 34 &gic 0 34 4>,
-				<0 0 35 &gic 0 35 4>,
-				<0 0 36 &gic 0 36 4>,
-				<0 0 37 &gic 0 37 4>,
-				<0 0 38 &gic 0 38 4>,
-				<0 0 39 &gic 0 39 4>,
-				<0 0 40 &gic 0 40 4>,
-				<0 0 41 &gic 0 41 4>,
-				<0 0 42 &gic 0 42 4>;
-
-		/include/ "rtsm_ve-motherboard.dtsi"
-	};
-
-	panels {
-		panel@0 {
-			compatible	= "panel";
-			mode		= "XVGA";
-			refresh		= <60>;
-			xres		= <1024>;
-			yres		= <768>;
-			pixclock	= <15748>;
-			left_margin	= <152>;
-			right_margin	= <48>;
-			upper_margin	= <23>;
-			lower_margin	= <3>;
-			hsync_len	= <104>;
-			vsync_len	= <4>;
-			sync		= <0>;
-			vmode		= "FB_VMODE_NONINTERLACED";
-			tim2		= "TIM2_BCD", "TIM2_IPC";
-			cntl		= "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
-			caps		= "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
-			bpp		= <16>;
-		};
-	};
-};
diff --git a/fdts/fvp-base-gicv3-psci-1t.dtb b/fdts/fvp-base-gicv3-psci-1t.dtb
new file mode 100644
index 0000000..23d360f
--- /dev/null
+++ b/fdts/fvp-base-gicv3-psci-1t.dtb
Binary files differ
diff --git a/fdts/fvp-base-gicv3-psci-1t.dts b/fdts/fvp-base-gicv3-psci-1t.dts
new file mode 100644
index 0000000..36fbd44
--- /dev/null
+++ b/fdts/fvp-base-gicv3-psci-1t.dts
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/dts-v1/;
+
+/include/ "fvp-base-gicv3-psci-common.dtsi"
+
+&CPU0 {
+	reg = <0x0 0x0>;
+};
+
+&CPU1 {
+	reg = <0x0 0x100>;
+};
+
+&CPU2 {
+	reg = <0x0 0x200>;
+};
+
+&CPU3 {
+	reg = <0x0 0x300>;
+};
+
+&CPU4 {
+	reg = <0x0 0x10000>;
+};
+
+&CPU5 {
+	reg = <0x0 0x10100>;
+};
+
+&CPU6 {
+	reg = <0x0 0x10200>;
+};
+
+&CPU7 {
+	reg = <0x0 0x10300>;
+};
diff --git a/fdts/fvp-base-gicv3-psci-aarch32.dtb b/fdts/fvp-base-gicv3-psci-aarch32.dtb
new file mode 100644
index 0000000..474b188
--- /dev/null
+++ b/fdts/fvp-base-gicv3-psci-aarch32.dtb
Binary files differ
diff --git a/fdts/fvp-base-gicv3-psci-aarch32.dts b/fdts/fvp-base-gicv3-psci-aarch32.dts
new file mode 100644
index 0000000..dd884f5
--- /dev/null
+++ b/fdts/fvp-base-gicv3-psci-aarch32.dts
@@ -0,0 +1,316 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/dts-v1/;
+
+/memreserve/ 0x80000000 0x00010000;
+
+/ {
+};
+
+/ {
+	model = "FVP Base";
+	compatible = "arm,vfp-base", "arm,vexpress";
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	chosen { };
+
+	aliases {
+		serial0 = &v2m_serial0;
+		serial1 = &v2m_serial1;
+		serial2 = &v2m_serial2;
+		serial3 = &v2m_serial3;
+	};
+
+	psci {
+		compatible = "arm,psci-1.0", "arm,psci-0.2", "arm,psci";
+		method = "smc";
+		cpu_suspend = <0x84000001>;
+		cpu_off = <0x84000002>;
+		cpu_on = <0x84000003>;
+		sys_poweroff = <0x84000008>;
+		sys_reset = <0x84000009>;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&CPU0>;
+				};
+				core1 {
+					cpu = <&CPU1>;
+				};
+				core2 {
+					cpu = <&CPU2>;
+				};
+				core3 {
+					cpu = <&CPU3>;
+				};
+			};
+
+			cluster1 {
+				core0 {
+					cpu = <&CPU4>;
+				};
+				core1 {
+					cpu = <&CPU5>;
+				};
+				core2 {
+					cpu = <&CPU6>;
+				};
+				core3 {
+					cpu = <&CPU7>;
+				};
+			};
+		};
+
+		idle-states {
+			entry-method = "arm,psci";
+
+			CPU_SLEEP_0: cpu-sleep-0 {
+				compatible = "arm,idle-state";
+				local-timer-stop;
+				arm,psci-suspend-param = <0x0010000>;
+				entry-latency-us = <40>;
+				exit-latency-us = <100>;
+				min-residency-us = <150>;
+			};
+
+			CLUSTER_SLEEP_0: cluster-sleep-0 {
+				compatible = "arm,idle-state";
+				local-timer-stop;
+				arm,psci-suspend-param = <0x1010000>;
+				entry-latency-us = <500>;
+				exit-latency-us = <1000>;
+				min-residency-us = <2500>;
+			};
+		};
+
+		CPU0:cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU1:cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x1>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU2:cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x2>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU3:cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x3>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU4:cpu@100 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x100>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU5:cpu@101 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x101>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU6:cpu@102 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x102>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU7:cpu@103 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x103>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		L2_0: l2-cache0 {
+			compatible = "cache";
+		};
+	};
+
+	memory@80000000 {
+		device_type = "memory";
+		reg = <0x00000000 0x80000000 0 0x7F000000>,
+		      <0x00000008 0x80000000 0 0x80000000>;
+	};
+
+	gic: interrupt-controller@2f000000 {
+		compatible = "arm,gic-v3";
+		#interrupt-cells = <3>;
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+		interrupt-controller;
+		reg = <0x0 0x2f000000 0 0x10000>,	// GICD
+		      <0x0 0x2f100000 0 0x200000>,	// GICR
+		      <0x0 0x2c000000 0 0x2000>,	// GICC
+		      <0x0 0x2c010000 0 0x2000>,	// GICH
+		      <0x0 0x2c02f000 0 0x2000>;	// GICV
+		interrupts = <1 9 4>;
+
+		its: its@2f020000 {
+			compatible = "arm,gic-v3-its";
+			msi-controller;
+			reg = <0x0 0x2f020000 0x0 0x20000>; // GITS
+		};
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <1 13 0xff01>,
+			     <1 14 0xff01>,
+			     <1 11 0xff01>,
+			     <1 10 0xff01>;
+		clock-frequency = <100000000>;
+	};
+
+	timer@2a810000 {
+			compatible = "arm,armv7-timer-mem";
+			reg = <0x0 0x2a810000 0x0 0x10000>;
+			clock-frequency = <100000000>;
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges;
+			frame@2a830000 {
+				frame-number = <1>;
+				interrupts = <0 26 4>;
+				reg = <0x0 0x2a830000 0x0 0x10000>;
+			};
+	};
+
+	pmu {
+		compatible = "arm,armv8-pmuv3";
+		interrupts = <0 60 4>,
+			     <0 61 4>,
+			     <0 62 4>,
+			     <0 63 4>;
+	};
+
+	smb {
+		compatible = "simple-bus";
+
+		#address-cells = <2>;
+		#size-cells = <1>;
+		ranges = <0 0 0 0x08000000 0x04000000>,
+			 <1 0 0 0x14000000 0x04000000>,
+			 <2 0 0 0x18000000 0x04000000>,
+			 <3 0 0 0x1c000000 0x04000000>,
+			 <4 0 0 0x0c000000 0x04000000>,
+			 <5 0 0 0x10000000 0x04000000>;
+
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 63>;
+		interrupt-map = <0 0  0 &gic 0 0 0  0 4>,
+				<0 0  1 &gic 0 0 0  1 4>,
+				<0 0  2 &gic 0 0 0  2 4>,
+				<0 0  3 &gic 0 0 0  3 4>,
+				<0 0  4 &gic 0 0 0  4 4>,
+				<0 0  5 &gic 0 0 0  5 4>,
+				<0 0  6 &gic 0 0 0  6 4>,
+				<0 0  7 &gic 0 0 0  7 4>,
+				<0 0  8 &gic 0 0 0  8 4>,
+				<0 0  9 &gic 0 0 0  9 4>,
+				<0 0 10 &gic 0 0 0 10 4>,
+				<0 0 11 &gic 0 0 0 11 4>,
+				<0 0 12 &gic 0 0 0 12 4>,
+				<0 0 13 &gic 0 0 0 13 4>,
+				<0 0 14 &gic 0 0 0 14 4>,
+				<0 0 15 &gic 0 0 0 15 4>,
+				<0 0 16 &gic 0 0 0 16 4>,
+				<0 0 17 &gic 0 0 0 17 4>,
+				<0 0 18 &gic 0 0 0 18 4>,
+				<0 0 19 &gic 0 0 0 19 4>,
+				<0 0 20 &gic 0 0 0 20 4>,
+				<0 0 21 &gic 0 0 0 21 4>,
+				<0 0 22 &gic 0 0 0 22 4>,
+				<0 0 23 &gic 0 0 0 23 4>,
+				<0 0 24 &gic 0 0 0 24 4>,
+				<0 0 25 &gic 0 0 0 25 4>,
+				<0 0 26 &gic 0 0 0 26 4>,
+				<0 0 27 &gic 0 0 0 27 4>,
+				<0 0 28 &gic 0 0 0 28 4>,
+				<0 0 29 &gic 0 0 0 29 4>,
+				<0 0 30 &gic 0 0 0 30 4>,
+				<0 0 31 &gic 0 0 0 31 4>,
+				<0 0 32 &gic 0 0 0 32 4>,
+				<0 0 33 &gic 0 0 0 33 4>,
+				<0 0 34 &gic 0 0 0 34 4>,
+				<0 0 35 &gic 0 0 0 35 4>,
+				<0 0 36 &gic 0 0 0 36 4>,
+				<0 0 37 &gic 0 0 0 37 4>,
+				<0 0 38 &gic 0 0 0 38 4>,
+				<0 0 39 &gic 0 0 0 39 4>,
+				<0 0 40 &gic 0 0 0 40 4>,
+				<0 0 41 &gic 0 0 0 41 4>,
+				<0 0 42 &gic 0 0 0 42 4>;
+
+		/include/ "rtsm_ve-motherboard.dtsi"
+	};
+
+	panels {
+		panel@0 {
+			compatible	= "panel";
+			mode		= "XVGA";
+			refresh		= <60>;
+			xres		= <1024>;
+			yres		= <768>;
+			pixclock	= <15748>;
+			left_margin	= <152>;
+			right_margin	= <48>;
+			upper_margin	= <23>;
+			lower_margin	= <3>;
+			hsync_len	= <104>;
+			vsync_len	= <4>;
+			sync		= <0>;
+			vmode		= "FB_VMODE_NONINTERLACED";
+			tim2		= "TIM2_BCD", "TIM2_IPC";
+			cntl		= "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
+			caps		= "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
+			bpp		= <16>;
+		};
+	};
+};
diff --git a/fdts/fvp-base-gicv3-psci-common.dtsi b/fdts/fvp-base-gicv3-psci-common.dtsi
new file mode 100644
index 0000000..2ef2df8
--- /dev/null
+++ b/fdts/fvp-base-gicv3-psci-common.dtsi
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/memreserve/ 0x80000000 0x00010000;
+
+/ {
+};
+
+/ {
+	model = "FVP Base";
+	compatible = "arm,vfp-base", "arm,vexpress";
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	chosen { };
+
+	aliases {
+		serial0 = &v2m_serial0;
+		serial1 = &v2m_serial1;
+		serial2 = &v2m_serial2;
+		serial3 = &v2m_serial3;
+	};
+
+	psci {
+		compatible = "arm,psci-1.0", "arm,psci-0.2", "arm,psci";
+		method = "smc";
+		cpu_suspend = <0xc4000001>;
+		cpu_off = <0x84000002>;
+		cpu_on = <0xc4000003>;
+		sys_poweroff = <0x84000008>;
+		sys_reset = <0x84000009>;
+	};
+
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&CPU0>;
+				};
+				core1 {
+					cpu = <&CPU1>;
+				};
+				core2 {
+					cpu = <&CPU2>;
+				};
+				core3 {
+					cpu = <&CPU3>;
+				};
+			};
+
+			cluster1 {
+				core0 {
+					cpu = <&CPU4>;
+				};
+				core1 {
+					cpu = <&CPU5>;
+				};
+				core2 {
+					cpu = <&CPU6>;
+				};
+				core3 {
+					cpu = <&CPU7>;
+				};
+			};
+		};
+
+		idle-states {
+			entry-method = "arm,psci";
+
+			CPU_SLEEP_0: cpu-sleep-0 {
+				compatible = "arm,idle-state";
+				local-timer-stop;
+				arm,psci-suspend-param = <0x0010000>;
+				entry-latency-us = <40>;
+				exit-latency-us = <100>;
+				min-residency-us = <150>;
+			};
+
+			CLUSTER_SLEEP_0: cluster-sleep-0 {
+				compatible = "arm,idle-state";
+				local-timer-stop;
+				arm,psci-suspend-param = <0x1010000>;
+				entry-latency-us = <500>;
+				exit-latency-us = <1000>;
+				min-residency-us = <2500>;
+			};
+		};
+
+		CPU0:cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x0>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU1:cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x1>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU2:cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x2>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU3:cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x3>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU4:cpu@100 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x100>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU5:cpu@101 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x101>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU6:cpu@102 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x102>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		CPU7:cpu@103 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x103>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		L2_0: l2-cache0 {
+			compatible = "cache";
+		};
+	};
+
+	memory@80000000 {
+		device_type = "memory";
+		reg = <0x00000000 0x80000000 0 0x7F000000>,
+		      <0x00000008 0x80000000 0 0x80000000>;
+	};
+
+	gic: interrupt-controller@2f000000 {
+		compatible = "arm,gic-v3";
+		#interrupt-cells = <3>;
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+		interrupt-controller;
+		reg = <0x0 0x2f000000 0 0x10000>,	// GICD
+		      <0x0 0x2f100000 0 0x200000>,	// GICR
+		      <0x0 0x2c000000 0 0x2000>,	// GICC
+		      <0x0 0x2c010000 0 0x2000>,	// GICH
+		      <0x0 0x2c02f000 0 0x2000>;	// GICV
+		interrupts = <1 9 4>;
+
+		its: its@2f020000 {
+			compatible = "arm,gic-v3-its";
+			msi-controller;
+			reg = <0x0 0x2f020000 0x0 0x20000>; // GITS
+		};
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <1 13 0xff01>,
+			     <1 14 0xff01>,
+			     <1 11 0xff01>,
+			     <1 10 0xff01>;
+		clock-frequency = <100000000>;
+	};
+
+	timer@2a810000 {
+			compatible = "arm,armv7-timer-mem";
+			reg = <0x0 0x2a810000 0x0 0x10000>;
+			clock-frequency = <100000000>;
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges;
+			frame@2a830000 {
+				frame-number = <1>;
+				interrupts = <0 26 4>;
+				reg = <0x0 0x2a830000 0x0 0x10000>;
+			};
+	};
+
+	pmu {
+		compatible = "arm,armv8-pmuv3";
+		interrupts = <0 60 4>,
+			     <0 61 4>,
+			     <0 62 4>,
+			     <0 63 4>;
+	};
+
+	smb {
+		compatible = "simple-bus";
+
+		#address-cells = <2>;
+		#size-cells = <1>;
+		ranges = <0 0 0 0x08000000 0x04000000>,
+			 <1 0 0 0x14000000 0x04000000>,
+			 <2 0 0 0x18000000 0x04000000>,
+			 <3 0 0 0x1c000000 0x04000000>,
+			 <4 0 0 0x0c000000 0x04000000>,
+			 <5 0 0 0x10000000 0x04000000>;
+
+		/include/ "rtsm_ve-motherboard.dtsi"
+	};
+
+	panels {
+		panel@0 {
+			compatible	= "panel";
+			mode		= "XVGA";
+			refresh		= <60>;
+			xres		= <1024>;
+			yres		= <768>;
+			pixclock	= <15748>;
+			left_margin	= <152>;
+			right_margin	= <48>;
+			upper_margin	= <23>;
+			lower_margin	= <3>;
+			hsync_len	= <104>;
+			vsync_len	= <4>;
+			sync		= <0>;
+			vmode		= "FB_VMODE_NONINTERLACED";
+			tim2		= "TIM2_BCD", "TIM2_IPC";
+			cntl		= "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
+			caps		= "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
+			bpp		= <16>;
+		};
+	};
+};
diff --git a/fdts/fvp-base-gicv3-psci.dtb b/fdts/fvp-base-gicv3-psci.dtb
index 27c3f93..a105ae6 100644
--- a/fdts/fvp-base-gicv3-psci.dtb
+++ b/fdts/fvp-base-gicv3-psci.dtb
Binary files differ
diff --git a/fdts/fvp-base-gicv3-psci.dts b/fdts/fvp-base-gicv3-psci.dts
index 32e577a..3ea429c 100644
--- a/fdts/fvp-base-gicv3-psci.dts
+++ b/fdts/fvp-base-gicv3-psci.dts
@@ -1,324 +1,9 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 /dts-v1/;
 
-/memreserve/ 0x80000000 0x00010000;
-
-/ {
-};
-
-/ {
-	model = "FVP Base";
-	compatible = "arm,vfp-base", "arm,vexpress";
-	interrupt-parent = <&gic>;
-	#address-cells = <2>;
-	#size-cells = <2>;
-
-	chosen { };
-
-	aliases {
-		serial0 = &v2m_serial0;
-		serial1 = &v2m_serial1;
-		serial2 = &v2m_serial2;
-		serial3 = &v2m_serial3;
-	};
-
-	psci {
-		compatible = "arm,psci-1.0", "arm,psci-0.2", "arm,psci";
-		method = "smc";
-		cpu_suspend = <0xc4000001>;
-		cpu_off = <0x84000002>;
-		cpu_on = <0xc4000003>;
-	};
-
-	cpus {
-		#address-cells = <2>;
-		#size-cells = <0>;
-
-		cpu-map {
-			cluster0 {
-				core0 {
-					cpu = <&CPU0>;
-				};
-				core1 {
-					cpu = <&CPU1>;
-				};
-				core2 {
-					cpu = <&CPU2>;
-				};
-				core3 {
-					cpu = <&CPU3>;
-				};
-			};
-
-			cluster1 {
-				core0 {
-					cpu = <&CPU4>;
-				};
-				core1 {
-					cpu = <&CPU5>;
-				};
-				core2 {
-					cpu = <&CPU6>;
-				};
-				core3 {
-					cpu = <&CPU7>;
-				};
-			};
-		};
-
-		idle-states {
-			entry-method = "arm,psci";
-
-			CPU_SLEEP_0: cpu-sleep-0 {
-				compatible = "arm,idle-state";
-				entry-method-param = <0x0010000>;
-				entry-latency-us = <40>;
-				exit-latency-us = <100>;
-				min-residency-us = <150>;
-			};
-
-			CLUSTER_SLEEP_0: cluster-sleep-0 {
-				compatible = "arm,idle-state";
-				entry-method-param = <0x1010000>;
-				entry-latency-us = <500>;
-				exit-latency-us = <1000>;
-				min-residency-us = <2500>;
-			};
-		};
-
-		CPU0:cpu@0 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x0>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
-		};
-
-		CPU1:cpu@1 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x1>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
-		};
-
-		CPU2:cpu@2 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x2>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
-		};
-
-		CPU3:cpu@3 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x3>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
-		};
-
-		CPU4:cpu@100 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x100>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
-		};
-
-		CPU5:cpu@101 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x101>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
-		};
-
-		CPU6:cpu@102 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x102>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
-		};
-
-		CPU7:cpu@103 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x103>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
-		};
-	};
-
-	memory@80000000 {
-		device_type = "memory";
-		reg = <0x00000000 0x80000000 0 0x7F000000>,
-		      <0x00000008 0x80000000 0 0x80000000>;
-	};
-
-	gic: interrupt-controller@2f000000 {
-		compatible = "arm,gic-v3";
-		#interrupt-cells = <3>;
-		#address-cells = <2>;
-		#size-cells = <2>;
-		ranges;
-		interrupt-controller;
-		reg = <0x0 0x2f000000 0 0x10000>,	// GICD
-		      <0x0 0x2f100000 0 0x200000>,	// GICR
-		      <0x0 0x2c000000 0 0x2000>,	// GICC
-		      <0x0 0x2c010000 0 0x2000>,	// GICH
-		      <0x0 0x2c02f000 0 0x2000>;	// GICV
-		interrupts = <1 9 4>;
-
-		its: its@2f020000 {
-			compatible = "arm,gic-v3-its";
-			msi-controller;
-			reg = <0x0 0x2f020000 0x0 0x20000>; // GITS
-		};
-	};
-
-	timer {
-		compatible = "arm,armv8-timer";
-		interrupts = <1 13 0xff01>,
-			     <1 14 0xff01>,
-			     <1 11 0xff01>,
-			     <1 10 0xff01>;
-		clock-frequency = <100000000>;
-	};
-
-	timer@2a810000 {
-			compatible = "arm,armv7-timer-mem";
-			reg = <0x0 0x2a810000 0x0 0x10000>;
-			clock-frequency = <100000000>;
-			#address-cells = <2>;
-			#size-cells = <2>;
-			ranges;
-			frame@2a830000 {
-				frame-number = <1>;
-				interrupts = <0 26 4>;
-				reg = <0x0 0x2a830000 0x0 0x10000>;
-			};
-	};
-
-	pmu {
-		compatible = "arm,armv8-pmuv3";
-		interrupts = <0 60 4>,
-			     <0 61 4>,
-			     <0 62 4>,
-			     <0 63 4>;
-	};
-
-	smb {
-		compatible = "simple-bus";
-
-		#address-cells = <2>;
-		#size-cells = <1>;
-		ranges = <0 0 0 0x08000000 0x04000000>,
-			 <1 0 0 0x14000000 0x04000000>,
-			 <2 0 0 0x18000000 0x04000000>,
-			 <3 0 0 0x1c000000 0x04000000>,
-			 <4 0 0 0x0c000000 0x04000000>,
-			 <5 0 0 0x10000000 0x04000000>;
-
-		#interrupt-cells = <1>;
-		interrupt-map-mask = <0 0 63>;
-		interrupt-map = <0 0  0 &gic 0 0 0  0 4>,
-				<0 0  1 &gic 0 0 0  1 4>,
-				<0 0  2 &gic 0 0 0  2 4>,
-				<0 0  3 &gic 0 0 0  3 4>,
-				<0 0  4 &gic 0 0 0  4 4>,
-				<0 0  5 &gic 0 0 0  5 4>,
-				<0 0  6 &gic 0 0 0  6 4>,
-				<0 0  7 &gic 0 0 0  7 4>,
-				<0 0  8 &gic 0 0 0  8 4>,
-				<0 0  9 &gic 0 0 0  9 4>,
-				<0 0 10 &gic 0 0 0 10 4>,
-				<0 0 11 &gic 0 0 0 11 4>,
-				<0 0 12 &gic 0 0 0 12 4>,
-				<0 0 13 &gic 0 0 0 13 4>,
-				<0 0 14 &gic 0 0 0 14 4>,
-				<0 0 15 &gic 0 0 0 15 4>,
-				<0 0 16 &gic 0 0 0 16 4>,
-				<0 0 17 &gic 0 0 0 17 4>,
-				<0 0 18 &gic 0 0 0 18 4>,
-				<0 0 19 &gic 0 0 0 19 4>,
-				<0 0 20 &gic 0 0 0 20 4>,
-				<0 0 21 &gic 0 0 0 21 4>,
-				<0 0 22 &gic 0 0 0 22 4>,
-				<0 0 23 &gic 0 0 0 23 4>,
-				<0 0 24 &gic 0 0 0 24 4>,
-				<0 0 25 &gic 0 0 0 25 4>,
-				<0 0 26 &gic 0 0 0 26 4>,
-				<0 0 27 &gic 0 0 0 27 4>,
-				<0 0 28 &gic 0 0 0 28 4>,
-				<0 0 29 &gic 0 0 0 29 4>,
-				<0 0 30 &gic 0 0 0 30 4>,
-				<0 0 31 &gic 0 0 0 31 4>,
-				<0 0 32 &gic 0 0 0 32 4>,
-				<0 0 33 &gic 0 0 0 33 4>,
-				<0 0 34 &gic 0 0 0 34 4>,
-				<0 0 35 &gic 0 0 0 35 4>,
-				<0 0 36 &gic 0 0 0 36 4>,
-				<0 0 37 &gic 0 0 0 37 4>,
-				<0 0 38 &gic 0 0 0 38 4>,
-				<0 0 39 &gic 0 0 0 39 4>,
-				<0 0 40 &gic 0 0 0 40 4>,
-				<0 0 41 &gic 0 0 0 41 4>,
-				<0 0 42 &gic 0 0 0 42 4>;
-
-		/include/ "rtsm_ve-motherboard-no_psci.dtsi"
-	};
-
-	panels {
-		panel@0 {
-			compatible	= "panel";
-			mode		= "XVGA";
-			refresh		= <60>;
-			xres		= <1024>;
-			yres		= <768>;
-			pixclock	= <15748>;
-			left_margin	= <152>;
-			right_margin	= <48>;
-			upper_margin	= <23>;
-			lower_margin	= <3>;
-			hsync_len	= <104>;
-			vsync_len	= <4>;
-			sync		= <0>;
-			vmode		= "FB_VMODE_NONINTERLACED";
-			tim2		= "TIM2_BCD", "TIM2_IPC";
-			cntl		= "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
-			caps		= "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
-			bpp		= <16>;
-		};
-	};
-};
+/include/ "fvp-base-gicv3-psci-common.dtsi"
diff --git a/fdts/fvp-foundation-gicv2-psci.dtb b/fdts/fvp-foundation-gicv2-psci.dtb
index 5b92e5e..9d0cb92 100644
--- a/fdts/fvp-foundation-gicv2-psci.dtb
+++ b/fdts/fvp-foundation-gicv2-psci.dtb
Binary files differ
diff --git a/fdts/fvp-foundation-gicv2-psci.dts b/fdts/fvp-foundation-gicv2-psci.dts
index c04d535..03b61dd 100644
--- a/fdts/fvp-foundation-gicv2-psci.dts
+++ b/fdts/fvp-foundation-gicv2-psci.dts
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of the ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 /dts-v1/;
@@ -87,7 +63,8 @@
 
 			CPU_SLEEP_0: cpu-sleep-0 {
 				compatible = "arm,idle-state";
-				entry-method-param = <0x0010000>;
+				local-timer-stop;
+				arm,psci-suspend-param = <0x0010000>;
 				entry-latency-us = <40>;
 				exit-latency-us = <100>;
 				min-residency-us = <150>;
@@ -95,7 +72,8 @@
 
 			CLUSTER_SLEEP_0: cluster-sleep-0 {
 				compatible = "arm,idle-state";
-				entry-method-param = <0x1010000>;
+				local-timer-stop;
+				arm,psci-suspend-param = <0x1010000>;
 				entry-latency-us = <500>;
 				exit-latency-us = <1000>;
 				min-residency-us = <2500>;
@@ -108,6 +86,7 @@
 			reg = <0x0 0x0>;
 			enable-method = "psci";
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
 		};
 
 		CPU1:cpu@1 {
@@ -116,6 +95,7 @@
 			reg = <0x0 0x1>;
 			enable-method = "psci";
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
 		};
 
 		CPU2:cpu@2 {
@@ -124,6 +104,7 @@
 			reg = <0x0 0x2>;
 			enable-method = "psci";
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
 		};
 
 		CPU3:cpu@3 {
@@ -132,6 +113,11 @@
 			reg = <0x0 0x3>;
 			enable-method = "psci";
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		L2_0: l2-cache0 {
+			compatible = "cache";
 		};
 	};
 
@@ -196,52 +182,6 @@
 			 <4 0 0 0x0c000000 0x04000000>,
 			 <5 0 0 0x10000000 0x04000000>;
 
-		#interrupt-cells = <1>;
-		interrupt-map-mask = <0 0 63>;
-		interrupt-map = <0 0  0 &gic 0  0 4>,
-				<0 0  1 &gic 0  1 4>,
-				<0 0  2 &gic 0  2 4>,
-				<0 0  3 &gic 0  3 4>,
-				<0 0  4 &gic 0  4 4>,
-				<0 0  5 &gic 0  5 4>,
-				<0 0  6 &gic 0  6 4>,
-				<0 0  7 &gic 0  7 4>,
-				<0 0  8 &gic 0  8 4>,
-				<0 0  9 &gic 0  9 4>,
-				<0 0 10 &gic 0 10 4>,
-				<0 0 11 &gic 0 11 4>,
-				<0 0 12 &gic 0 12 4>,
-				<0 0 13 &gic 0 13 4>,
-				<0 0 14 &gic 0 14 4>,
-				<0 0 15 &gic 0 15 4>,
-				<0 0 16 &gic 0 16 4>,
-				<0 0 17 &gic 0 17 4>,
-				<0 0 18 &gic 0 18 4>,
-				<0 0 19 &gic 0 19 4>,
-				<0 0 20 &gic 0 20 4>,
-				<0 0 21 &gic 0 21 4>,
-				<0 0 22 &gic 0 22 4>,
-				<0 0 23 &gic 0 23 4>,
-				<0 0 24 &gic 0 24 4>,
-				<0 0 25 &gic 0 25 4>,
-				<0 0 26 &gic 0 26 4>,
-				<0 0 27 &gic 0 27 4>,
-				<0 0 28 &gic 0 28 4>,
-				<0 0 29 &gic 0 29 4>,
-				<0 0 30 &gic 0 30 4>,
-				<0 0 31 &gic 0 31 4>,
-				<0 0 32 &gic 0 32 4>,
-				<0 0 33 &gic 0 33 4>,
-				<0 0 34 &gic 0 34 4>,
-				<0 0 35 &gic 0 35 4>,
-				<0 0 36 &gic 0 36 4>,
-				<0 0 37 &gic 0 37 4>,
-				<0 0 38 &gic 0 38 4>,
-				<0 0 39 &gic 0 39 4>,
-				<0 0 40 &gic 0 40 4>,
-				<0 0 41 &gic 0 41 4>,
-				<0 0 42 &gic 0 42 4>;
-
 		/include/ "fvp-foundation-motherboard.dtsi"
 	};
 };
diff --git a/fdts/fvp-foundation-gicv2legacy-psci.dtb b/fdts/fvp-foundation-gicv2legacy-psci.dtb
deleted file mode 100644
index 71f6ae2..0000000
--- a/fdts/fvp-foundation-gicv2legacy-psci.dtb
+++ /dev/null
Binary files differ
diff --git a/fdts/fvp-foundation-gicv2legacy-psci.dts b/fdts/fvp-foundation-gicv2legacy-psci.dts
deleted file mode 100644
index 8dba04c..0000000
--- a/fdts/fvp-foundation-gicv2legacy-psci.dts
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of the ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/dts-v1/;
-
-/memreserve/ 0x80000000 0x00010000;
-
-/ {
-};
-
-/ {
-	model = "FVP Foundation";
-	compatible = "arm,fvp-base", "arm,vexpress";
-	interrupt-parent = <&gic>;
-	#address-cells = <2>;
-	#size-cells = <2>;
-
-	chosen { };
-
-	aliases {
-		serial0 = &v2m_serial0;
-		serial1 = &v2m_serial1;
-		serial2 = &v2m_serial2;
-		serial3 = &v2m_serial3;
-	};
-
-	psci {
-		compatible = "arm,psci";
-		method = "smc";
-		cpu_suspend = <0xc4000001>;
-		cpu_off = <0x84000002>;
-		cpu_on = <0xc4000003>;
-		sys_poweroff = <0x84000008>;
-		sys_reset = <0x84000009>;
-	};
-
-	cpus {
-		#address-cells = <2>;
-		#size-cells = <0>;
-
-		cpu-map {
-			cluster0 {
-				core0 {
-					cpu = <&CPU0>;
-				};
-				core1 {
-					cpu = <&CPU1>;
-				};
-				core2 {
-					cpu = <&CPU2>;
-				};
-				core3 {
-					cpu = <&CPU3>;
-				};
-			};
-		};
-
-		idle-states {
-			entry-method = "arm,psci-1.0", "arm,psci-0.2", "arm,psci";
-
-			CPU_SLEEP_0: cpu-sleep-0 {
-				compatible = "arm,idle-state";
-				entry-method-param = <0x0010000>;
-				entry-latency-us = <40>;
-				exit-latency-us = <100>;
-				min-residency-us = <150>;
-			};
-
-			CLUSTER_SLEEP_0: cluster-sleep-0 {
-				compatible = "arm,idle-state";
-				entry-method-param = <0x1010000>;
-				entry-latency-us = <500>;
-				exit-latency-us = <1000>;
-				min-residency-us = <2500>;
-			};
-		};
-
-		CPU0:cpu@0 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x0>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
-		};
-
-		CPU1:cpu@1 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x1>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
-		};
-
-		CPU2:cpu@2 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x2>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
-		};
-
-		CPU3:cpu@3 {
-			device_type = "cpu";
-			compatible = "arm,armv8";
-			reg = <0x0 0x3>;
-			enable-method = "psci";
-			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
-		};
-	};
-
-	memory@80000000 {
-		device_type = "memory";
-		reg = <0x00000000 0x80000000 0 0x7F000000>,
-		      <0x00000008 0x80000000 0 0x80000000>;
-	};
-
-	gic: interrupt-controller@2c001000 {
-		compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
-		#interrupt-cells = <3>;
-		#address-cells = <0>;
-		interrupt-controller;
-		reg = <0x0 0x2c001000 0 0x1000>,
-		      <0x0 0x2c002000 0 0x1000>,
-		      <0x0 0x2c004000 0 0x2000>,
-		      <0x0 0x2c006000 0 0x2000>;
-		interrupts = <1 9 0xf04>;
-	};
-
-	timer {
-		compatible = "arm,armv8-timer";
-		interrupts = <1 13 0xff01>,
-			     <1 14 0xff01>,
-			     <1 11 0xff01>,
-			     <1 10 0xff01>;
-		clock-frequency = <100000000>;
-	};
-
-	timer@2a810000 {
-			compatible = "arm,armv7-timer-mem";
-			reg = <0x0 0x2a810000 0x0 0x10000>;
-			clock-frequency = <100000000>;
-			#address-cells = <2>;
-			#size-cells = <2>;
-			ranges;
-			frame@2a830000 {
-				frame-number = <1>;
-				interrupts = <0 26 4>;
-				reg = <0x0 0x2a830000 0x0 0x10000>;
-			};
-	};
-
-	pmu {
-		compatible = "arm,armv8-pmuv3";
-		interrupts = <0 60 4>,
-			     <0 61 4>,
-			     <0 62 4>,
-			     <0 63 4>;
-	};
-
-	smb {
-		compatible = "simple-bus";
-
-		#address-cells = <2>;
-		#size-cells = <1>;
-		ranges = <0 0 0 0x08000000 0x04000000>,
-			 <1 0 0 0x14000000 0x04000000>,
-			 <2 0 0 0x18000000 0x04000000>,
-			 <3 0 0 0x1c000000 0x04000000>,
-			 <4 0 0 0x0c000000 0x04000000>,
-			 <5 0 0 0x10000000 0x04000000>;
-
-		#interrupt-cells = <1>;
-		interrupt-map-mask = <0 0 63>;
-		interrupt-map = <0 0  0 &gic 0  0 4>,
-				<0 0  1 &gic 0  1 4>,
-				<0 0  2 &gic 0  2 4>,
-				<0 0  3 &gic 0  3 4>,
-				<0 0  4 &gic 0  4 4>,
-				<0 0  5 &gic 0  5 4>,
-				<0 0  6 &gic 0  6 4>,
-				<0 0  7 &gic 0  7 4>,
-				<0 0  8 &gic 0  8 4>,
-				<0 0  9 &gic 0  9 4>,
-				<0 0 10 &gic 0 10 4>,
-				<0 0 11 &gic 0 11 4>,
-				<0 0 12 &gic 0 12 4>,
-				<0 0 13 &gic 0 13 4>,
-				<0 0 14 &gic 0 14 4>,
-				<0 0 15 &gic 0 15 4>,
-				<0 0 16 &gic 0 16 4>,
-				<0 0 17 &gic 0 17 4>,
-				<0 0 18 &gic 0 18 4>,
-				<0 0 19 &gic 0 19 4>,
-				<0 0 20 &gic 0 20 4>,
-				<0 0 21 &gic 0 21 4>,
-				<0 0 22 &gic 0 22 4>,
-				<0 0 23 &gic 0 23 4>,
-				<0 0 24 &gic 0 24 4>,
-				<0 0 25 &gic 0 25 4>,
-				<0 0 26 &gic 0 26 4>,
-				<0 0 27 &gic 0 27 4>,
-				<0 0 28 &gic 0 28 4>,
-				<0 0 29 &gic 0 29 4>,
-				<0 0 30 &gic 0 30 4>,
-				<0 0 31 &gic 0 31 4>,
-				<0 0 32 &gic 0 32 4>,
-				<0 0 33 &gic 0 33 4>,
-				<0 0 34 &gic 0 34 4>,
-				<0 0 35 &gic 0 35 4>,
-				<0 0 36 &gic 0 36 4>,
-				<0 0 37 &gic 0 37 4>,
-				<0 0 38 &gic 0 38 4>,
-				<0 0 39 &gic 0 39 4>,
-				<0 0 40 &gic 0 40 4>,
-				<0 0 41 &gic 0 41 4>,
-				<0 0 42 &gic 0 42 4>;
-
-		/include/ "fvp-foundation-motherboard.dtsi"
-	};
-};
diff --git a/fdts/fvp-foundation-gicv3-psci.dtb b/fdts/fvp-foundation-gicv3-psci.dtb
index d7d9e14..a3164ab 100644
--- a/fdts/fvp-foundation-gicv3-psci.dtb
+++ b/fdts/fvp-foundation-gicv3-psci.dtb
Binary files differ
diff --git a/fdts/fvp-foundation-gicv3-psci.dts b/fdts/fvp-foundation-gicv3-psci.dts
index 48a1afc..1488ed7 100644
--- a/fdts/fvp-foundation-gicv3-psci.dts
+++ b/fdts/fvp-foundation-gicv3-psci.dts
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 /dts-v1/;
@@ -57,6 +33,8 @@
 		cpu_suspend = <0xc4000001>;
 		cpu_off = <0x84000002>;
 		cpu_on = <0xc4000003>;
+		sys_poweroff = <0x84000008>;
+		sys_reset = <0x84000009>;
 	};
 
 	cpus {
@@ -85,7 +63,8 @@
 
 			CPU_SLEEP_0: cpu-sleep-0 {
 				compatible = "arm,idle-state";
-				entry-method-param = <0x0010000>;
+				local-timer-stop;
+				arm,psci-suspend-param = <0x0010000>;
 				entry-latency-us = <40>;
 				exit-latency-us = <100>;
 				min-residency-us = <150>;
@@ -93,7 +72,8 @@
 
 			CLUSTER_SLEEP_0: cluster-sleep-0 {
 				compatible = "arm,idle-state";
-				entry-method-param = <0x1010000>;
+				local-timer-stop;
+				arm,psci-suspend-param = <0x1010000>;
 				entry-latency-us = <500>;
 				exit-latency-us = <1000>;
 				min-residency-us = <2500>;
@@ -106,6 +86,7 @@
 			reg = <0x0 0x0>;
 			enable-method = "psci";
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
 		};
 
 		CPU1:cpu@1 {
@@ -114,6 +95,7 @@
 			reg = <0x0 0x1>;
 			enable-method = "psci";
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
 		};
 
 		CPU2:cpu@2 {
@@ -122,6 +104,7 @@
 			reg = <0x0 0x2>;
 			enable-method = "psci";
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
 		};
 
 		CPU3:cpu@3 {
@@ -130,6 +113,11 @@
 			reg = <0x0 0x3>;
 			enable-method = "psci";
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			next-level-cache = <&L2_0>;
+		};
+
+		L2_0: l2-cache0 {
+			compatible = "cache";
 		};
 	};
 
@@ -203,52 +191,6 @@
 			 <4 0 0 0x0c000000 0x04000000>,
 			 <5 0 0 0x10000000 0x04000000>;
 
-		#interrupt-cells = <1>;
-		interrupt-map-mask = <0 0 63>;
-		interrupt-map = <0 0  0 &gic 0 0 0  0 4>,
-				<0 0  1 &gic 0 0 0  1 4>,
-				<0 0  2 &gic 0 0 0  2 4>,
-				<0 0  3 &gic 0 0 0  3 4>,
-				<0 0  4 &gic 0 0 0  4 4>,
-				<0 0  5 &gic 0 0 0  5 4>,
-				<0 0  6 &gic 0 0 0  6 4>,
-				<0 0  7 &gic 0 0 0  7 4>,
-				<0 0  8 &gic 0 0 0  8 4>,
-				<0 0  9 &gic 0 0 0  9 4>,
-				<0 0 10 &gic 0 0 0 10 4>,
-				<0 0 11 &gic 0 0 0 11 4>,
-				<0 0 12 &gic 0 0 0 12 4>,
-				<0 0 13 &gic 0 0 0 13 4>,
-				<0 0 14 &gic 0 0 0 14 4>,
-				<0 0 15 &gic 0 0 0 15 4>,
-				<0 0 16 &gic 0 0 0 16 4>,
-				<0 0 17 &gic 0 0 0 17 4>,
-				<0 0 18 &gic 0 0 0 18 4>,
-				<0 0 19 &gic 0 0 0 19 4>,
-				<0 0 20 &gic 0 0 0 20 4>,
-				<0 0 21 &gic 0 0 0 21 4>,
-				<0 0 22 &gic 0 0 0 22 4>,
-				<0 0 23 &gic 0 0 0 23 4>,
-				<0 0 24 &gic 0 0 0 24 4>,
-				<0 0 25 &gic 0 0 0 25 4>,
-				<0 0 26 &gic 0 0 0 26 4>,
-				<0 0 27 &gic 0 0 0 27 4>,
-				<0 0 28 &gic 0 0 0 28 4>,
-				<0 0 29 &gic 0 0 0 29 4>,
-				<0 0 30 &gic 0 0 0 30 4>,
-				<0 0 31 &gic 0 0 0 31 4>,
-				<0 0 32 &gic 0 0 0 32 4>,
-				<0 0 33 &gic 0 0 0 33 4>,
-				<0 0 34 &gic 0 0 0 34 4>,
-				<0 0 35 &gic 0 0 0 35 4>,
-				<0 0 36 &gic 0 0 0 36 4>,
-				<0 0 37 &gic 0 0 0 37 4>,
-				<0 0 38 &gic 0 0 0 38 4>,
-				<0 0 39 &gic 0 0 0 39 4>,
-				<0 0 40 &gic 0 0 0 40 4>,
-				<0 0 41 &gic 0 0 0 41 4>,
-				<0 0 42 &gic 0 0 0 42 4>;
-
-		/include/ "fvp-foundation-motherboard-no_psci.dtsi"
+		/include/ "fvp-foundation-motherboard.dtsi"
 	};
 };
diff --git a/fdts/fvp-foundation-motherboard-no_psci.dtsi b/fdts/fvp-foundation-motherboard-no_psci.dtsi
deleted file mode 100644
index fd41c8a..0000000
--- a/fdts/fvp-foundation-motherboard-no_psci.dtsi
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of the ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-	motherboard {
-		arm,v2m-memory-map = "rs1";
-		compatible = "arm,vexpress,v2m-p1", "simple-bus";
-		#address-cells = <2>; /* SMB chipselect number and offset */
-		#size-cells = <1>;
-		#interrupt-cells = <1>;
-		ranges;
-
-		ethernet@2,02000000 {
-			compatible = "smsc,lan91c111";
-			reg = <2 0x02000000 0x10000>;
-			interrupts = <15>;
-		};
-
-		v2m_clk24mhz: clk24mhz {
-			compatible = "fixed-clock";
-			#clock-cells = <0>;
-			clock-frequency = <24000000>;
-			clock-output-names = "v2m:clk24mhz";
-		};
-
-		v2m_refclk1mhz: refclk1mhz {
-			compatible = "fixed-clock";
-			#clock-cells = <0>;
-			clock-frequency = <1000000>;
-			clock-output-names = "v2m:refclk1mhz";
-		};
-
-		v2m_refclk32khz: refclk32khz {
-			compatible = "fixed-clock";
-			#clock-cells = <0>;
-			clock-frequency = <32768>;
-			clock-output-names = "v2m:refclk32khz";
-		};
-
-		iofpga@3,00000000 {
-			compatible = "arm,amba-bus", "simple-bus";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			ranges = <0 3 0 0x200000>;
-
-			v2m_sysreg: sysreg@010000 {
-				compatible = "arm,vexpress-sysreg";
-				reg = <0x010000 0x1000>;
-				gpio-controller;
-				#gpio-cells = <2>;
-			};
-
-			v2m_sysctl: sysctl@020000 {
-				compatible = "arm,sp810", "arm,primecell";
-				reg = <0x020000 0x1000>;
-				clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&v2m_clk24mhz>;
-				clock-names = "refclk", "timclk", "apb_pclk";
-				#clock-cells = <1>;
-				clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
-			};
-
-			v2m_serial0: uart@090000 {
-				compatible = "arm,pl011", "arm,primecell";
-				reg = <0x090000 0x1000>;
-				interrupts = <5>;
-				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
-				clock-names = "uartclk", "apb_pclk";
-			};
-
-			v2m_serial1: uart@0a0000 {
-				compatible = "arm,pl011", "arm,primecell";
-				reg = <0x0a0000 0x1000>;
-				interrupts = <6>;
-				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
-				clock-names = "uartclk", "apb_pclk";
-			};
-
-			v2m_serial2: uart@0b0000 {
-				compatible = "arm,pl011", "arm,primecell";
-				reg = <0x0b0000 0x1000>;
-				interrupts = <7>;
-				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
-				clock-names = "uartclk", "apb_pclk";
-			};
-
-			v2m_serial3: uart@0c0000 {
-				compatible = "arm,pl011", "arm,primecell";
-				reg = <0x0c0000 0x1000>;
-				interrupts = <8>;
-				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
-				clock-names = "uartclk", "apb_pclk";
-			};
-
-			wdt@0f0000 {
-				compatible = "arm,sp805", "arm,primecell";
-				reg = <0x0f0000 0x1000>;
-				interrupts = <0>;
-				clocks = <&v2m_refclk32khz>, <&v2m_clk24mhz>;
-				clock-names = "wdogclk", "apb_pclk";
-			};
-
-			v2m_timer01: timer@110000 {
-				compatible = "arm,sp804", "arm,primecell";
-				reg = <0x110000 0x1000>;
-				interrupts = <2>;
-				clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&v2m_clk24mhz>;
-				clock-names = "timclken1", "timclken2", "apb_pclk";
-			};
-
-			v2m_timer23: timer@120000 {
-				compatible = "arm,sp804", "arm,primecell";
-				reg = <0x120000 0x1000>;
-				interrupts = <3>;
-				clocks = <&v2m_sysctl 2>, <&v2m_sysctl 3>, <&v2m_clk24mhz>;
-				clock-names = "timclken1", "timclken2", "apb_pclk";
-			};
-
-			rtc@170000 {
-				compatible = "arm,pl031", "arm,primecell";
-				reg = <0x170000 0x1000>;
-				interrupts = <4>;
-				clocks = <&v2m_clk24mhz>;
-				clock-names = "apb_pclk";
-			};
-
-			virtio_block@0130000 {
-				compatible = "virtio,mmio";
-				reg = <0x130000 0x1000>;
-				interrupts = <0x2a>;
-			};
-		};
-
-		v2m_fixed_3v3: fixedregulator@0 {
-			compatible = "regulator-fixed";
-			regulator-name = "3V3";
-			regulator-min-microvolt = <3300000>;
-			regulator-max-microvolt = <3300000>;
-			regulator-always-on;
-		};
-
-
-		mcc {
-			compatible = "arm,vexpress,config-bus", "simple-bus";
-			arm,vexpress,config-bridge = <&v2m_sysreg>;
-
-			reset@0 {
-				compatible = "arm,vexpress-reset";
-				arm,vexpress-sysreg,func = <5 0>;
-			};
-
-			muxfpga@0 {
-				compatible = "arm,vexpress-muxfpga";
-				arm,vexpress-sysreg,func = <7 0>;
-			};
-
-			shutdown@0 {
-				compatible = "arm,vexpress-shutdown";
-				arm,vexpress-sysreg,func = <8 0>;
-			};
-
-			reboot@0 {
-				compatible = "arm,vexpress-reboot";
-				arm,vexpress-sysreg,func = <9 0>;
-			};
-
-			dvimode@0 {
-				compatible = "arm,vexpress-dvimode";
-				arm,vexpress-sysreg,func = <11 0>;
-			};
-		};
-	};
diff --git a/fdts/fvp-foundation-motherboard.dtsi b/fdts/fvp-foundation-motherboard.dtsi
index 9d29e48..ae7237b 100644
--- a/fdts/fvp-foundation-motherboard.dtsi
+++ b/fdts/fvp-foundation-motherboard.dtsi
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of the ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 	motherboard {
@@ -33,13 +9,12 @@
 		compatible = "arm,vexpress,v2m-p1", "simple-bus";
 		#address-cells = <2>; /* SMB chipselect number and offset */
 		#size-cells = <1>;
-		#interrupt-cells = <1>;
 		ranges;
 
 		ethernet@2,02000000 {
 			compatible = "smsc,lan91c111";
 			reg = <2 0x02000000 0x10000>;
-			interrupts = <15>;
+			interrupts = <0 15 4>;
 		};
 
 		v2m_clk24mhz: clk24mhz {
@@ -88,7 +63,7 @@
 			v2m_serial0: uart@090000 {
 				compatible = "arm,pl011", "arm,primecell";
 				reg = <0x090000 0x1000>;
-				interrupts = <5>;
+				interrupts = <0 5 4>;
 				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
 				clock-names = "uartclk", "apb_pclk";
 			};
@@ -96,7 +71,7 @@
 			v2m_serial1: uart@0a0000 {
 				compatible = "arm,pl011", "arm,primecell";
 				reg = <0x0a0000 0x1000>;
-				interrupts = <6>;
+				interrupts = <0 6 4>;
 				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
 				clock-names = "uartclk", "apb_pclk";
 			};
@@ -104,7 +79,7 @@
 			v2m_serial2: uart@0b0000 {
 				compatible = "arm,pl011", "arm,primecell";
 				reg = <0x0b0000 0x1000>;
-				interrupts = <7>;
+				interrupts = <0 7 4>;
 				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
 				clock-names = "uartclk", "apb_pclk";
 			};
@@ -112,7 +87,7 @@
 			v2m_serial3: uart@0c0000 {
 				compatible = "arm,pl011", "arm,primecell";
 				reg = <0x0c0000 0x1000>;
-				interrupts = <8>;
+				interrupts = <0 8 4>;
 				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
 				clock-names = "uartclk", "apb_pclk";
 			};
@@ -120,7 +95,7 @@
 			wdt@0f0000 {
 				compatible = "arm,sp805", "arm,primecell";
 				reg = <0x0f0000 0x1000>;
-				interrupts = <0>;
+				interrupts = <0 0 4>;
 				clocks = <&v2m_refclk32khz>, <&v2m_clk24mhz>;
 				clock-names = "wdogclk", "apb_pclk";
 			};
@@ -128,7 +103,7 @@
 			v2m_timer01: timer@110000 {
 				compatible = "arm,sp804", "arm,primecell";
 				reg = <0x110000 0x1000>;
-				interrupts = <2>;
+				interrupts = <0 2 4>;
 				clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&v2m_clk24mhz>;
 				clock-names = "timclken1", "timclken2", "apb_pclk";
 			};
@@ -136,7 +111,7 @@
 			v2m_timer23: timer@120000 {
 				compatible = "arm,sp804", "arm,primecell";
 				reg = <0x120000 0x1000>;
-				interrupts = <3>;
+				interrupts = <0 3 4>;
 				clocks = <&v2m_sysctl 2>, <&v2m_sysctl 3>, <&v2m_clk24mhz>;
 				clock-names = "timclken1", "timclken2", "apb_pclk";
 			};
@@ -144,7 +119,7 @@
 			rtc@170000 {
 				compatible = "arm,pl031", "arm,primecell";
 				reg = <0x170000 0x1000>;
-				interrupts = <4>;
+				interrupts = <0 4 4>;
 				clocks = <&v2m_clk24mhz>;
 				clock-names = "apb_pclk";
 			};
@@ -152,7 +127,7 @@
 			virtio_block@0130000 {
 				compatible = "virtio,mmio";
 				reg = <0x130000 0x1000>;
-				interrupts = <0x2a>;
+				interrupts = <0 0x2a 4>;
 			};
 		};
 
diff --git a/fdts/rtsm_ve-motherboard-no_psci.dtsi b/fdts/rtsm_ve-motherboard-no_psci.dtsi
deleted file mode 100644
index 7ba575e..0000000
--- a/fdts/rtsm_ve-motherboard-no_psci.dtsi
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-	motherboard {
-		arm,v2m-memory-map = "rs1";
-		compatible = "arm,vexpress,v2m-p1", "simple-bus";
-		#address-cells = <2>; /* SMB chipselect number and offset */
-		#size-cells = <1>;
-		#interrupt-cells = <1>;
-		ranges;
-
-		flash@0,00000000 {
-			compatible = "arm,vexpress-flash", "cfi-flash";
-			reg = <0 0x00000000 0x04000000>,
-			      <4 0x00000000 0x04000000>;
-			bank-width = <4>;
-		};
-
-		vram@2,00000000 {
-			compatible = "arm,vexpress-vram";
-			reg = <2 0x00000000 0x00800000>;
-		};
-
-		ethernet@2,02000000 {
-			compatible = "smsc,lan91c111";
-			reg = <2 0x02000000 0x10000>;
-			interrupts = <15>;
-		};
-
-		v2m_clk24mhz: clk24mhz {
-			compatible = "fixed-clock";
-			#clock-cells = <0>;
-			clock-frequency = <24000000>;
-			clock-output-names = "v2m:clk24mhz";
-		};
-
-		v2m_refclk1mhz: refclk1mhz {
-			compatible = "fixed-clock";
-			#clock-cells = <0>;
-			clock-frequency = <1000000>;
-			clock-output-names = "v2m:refclk1mhz";
-		};
-
-		v2m_refclk32khz: refclk32khz {
-			compatible = "fixed-clock";
-			#clock-cells = <0>;
-			clock-frequency = <32768>;
-			clock-output-names = "v2m:refclk32khz";
-		};
-
-		iofpga@3,00000000 {
-			compatible = "arm,amba-bus", "simple-bus";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			ranges = <0 3 0 0x200000>;
-
-			v2m_sysreg: sysreg@010000 {
-				compatible = "arm,vexpress-sysreg";
-				reg = <0x010000 0x1000>;
-				gpio-controller;
-				#gpio-cells = <2>;
-			};
-
-			v2m_sysctl: sysctl@020000 {
-				compatible = "arm,sp810", "arm,primecell";
-				reg = <0x020000 0x1000>;
-				clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&v2m_clk24mhz>;
-				clock-names = "refclk", "timclk", "apb_pclk";
-				#clock-cells = <1>;
-				clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
-			};
-
-			aaci@040000 {
-				compatible = "arm,pl041", "arm,primecell";
-				reg = <0x040000 0x1000>;
-				interrupts = <11>;
-				clocks = <&v2m_clk24mhz>;
-				clock-names = "apb_pclk";
-			};
-
-			mmci@050000 {
-				compatible = "arm,pl180", "arm,primecell";
-				reg = <0x050000 0x1000>;
-				interrupts = <9 10>;
-				cd-gpios = <&v2m_sysreg 0 0>;
-				wp-gpios = <&v2m_sysreg 1 0>;
-				max-frequency = <12000000>;
-				vmmc-supply = <&v2m_fixed_3v3>;
-				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
-				clock-names = "mclk", "apb_pclk";
-			};
-
-			kmi@060000 {
-				compatible = "arm,pl050", "arm,primecell";
-				reg = <0x060000 0x1000>;
-				interrupts = <12>;
-				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
-				clock-names = "KMIREFCLK", "apb_pclk";
-			};
-
-			kmi@070000 {
-				compatible = "arm,pl050", "arm,primecell";
-				reg = <0x070000 0x1000>;
-				interrupts = <13>;
-				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
-				clock-names = "KMIREFCLK", "apb_pclk";
-			};
-
-			v2m_serial0: uart@090000 {
-				compatible = "arm,pl011", "arm,primecell";
-				reg = <0x090000 0x1000>;
-				interrupts = <5>;
-				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
-				clock-names = "uartclk", "apb_pclk";
-			};
-
-			v2m_serial1: uart@0a0000 {
-				compatible = "arm,pl011", "arm,primecell";
-				reg = <0x0a0000 0x1000>;
-				interrupts = <6>;
-				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
-				clock-names = "uartclk", "apb_pclk";
-			};
-
-			v2m_serial2: uart@0b0000 {
-				compatible = "arm,pl011", "arm,primecell";
-				reg = <0x0b0000 0x1000>;
-				interrupts = <7>;
-				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
-				clock-names = "uartclk", "apb_pclk";
-			};
-
-			v2m_serial3: uart@0c0000 {
-				compatible = "arm,pl011", "arm,primecell";
-				reg = <0x0c0000 0x1000>;
-				interrupts = <8>;
-				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
-				clock-names = "uartclk", "apb_pclk";
-			};
-
-			wdt@0f0000 {
-				compatible = "arm,sp805", "arm,primecell";
-				reg = <0x0f0000 0x1000>;
-				interrupts = <0>;
-				clocks = <&v2m_refclk32khz>, <&v2m_clk24mhz>;
-				clock-names = "wdogclk", "apb_pclk";
-			};
-
-			v2m_timer01: timer@110000 {
-				compatible = "arm,sp804", "arm,primecell";
-				reg = <0x110000 0x1000>;
-				interrupts = <2>;
-				clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&v2m_clk24mhz>;
-				clock-names = "timclken1", "timclken2", "apb_pclk";
-			};
-
-			v2m_timer23: timer@120000 {
-				compatible = "arm,sp804", "arm,primecell";
-				reg = <0x120000 0x1000>;
-				interrupts = <3>;
-				clocks = <&v2m_sysctl 2>, <&v2m_sysctl 3>, <&v2m_clk24mhz>;
-				clock-names = "timclken1", "timclken2", "apb_pclk";
-			};
-
-			rtc@170000 {
-				compatible = "arm,pl031", "arm,primecell";
-				reg = <0x170000 0x1000>;
-				interrupts = <4>;
-				clocks = <&v2m_clk24mhz>;
-				clock-names = "apb_pclk";
-			};
-
-			clcd@1f0000 {
-				compatible = "arm,pl111", "arm,primecell";
-				reg = <0x1f0000 0x1000>;
-				interrupts = <14>;
-				clocks = <&v2m_oscclk1>, <&v2m_clk24mhz>;
-				clock-names = "clcdclk", "apb_pclk";
-				mode = "XVGA";
-				use_dma = <0>;
-				framebuffer = <0x18000000 0x00180000>;
-			};
-
-			virtio_block@0130000 {
-				compatible = "virtio,mmio";
-				reg = <0x130000 0x1000>;
-				interrupts = <0x2a>;
-			};
-		};
-
-		v2m_fixed_3v3: fixedregulator@0 {
-			compatible = "regulator-fixed";
-			regulator-name = "3V3";
-			regulator-min-microvolt = <3300000>;
-			regulator-max-microvolt = <3300000>;
-			regulator-always-on;
-		};
-
-		mcc {
-			compatible = "arm,vexpress,config-bus", "simple-bus";
-			arm,vexpress,config-bridge = <&v2m_sysreg>;
-
-			v2m_oscclk1: osc@1 {
-				/* CLCD clock */
-				compatible = "arm,vexpress-osc";
-				arm,vexpress-sysreg,func = <1 1>;
-				freq-range = <23750000 63500000>;
-				#clock-cells = <0>;
-				clock-output-names = "v2m:oscclk1";
-			};
-
-			reset@0 {
-				compatible = "arm,vexpress-reset";
-				arm,vexpress-sysreg,func = <5 0>;
-			};
-
-			muxfpga@0 {
-				compatible = "arm,vexpress-muxfpga";
-				arm,vexpress-sysreg,func = <7 0>;
-			};
-
-			shutdown@0 {
-				compatible = "arm,vexpress-shutdown";
-				arm,vexpress-sysreg,func = <8 0>;
-			};
-
-			reboot@0 {
-				compatible = "arm,vexpress-reboot";
-				arm,vexpress-sysreg,func = <9 0>;
-			};
-
-			dvimode@0 {
-				compatible = "arm,vexpress-dvimode";
-				arm,vexpress-sysreg,func = <11 0>;
-			};
-		};
-	};
diff --git a/fdts/rtsm_ve-motherboard.dtsi b/fdts/rtsm_ve-motherboard.dtsi
index 6aa40ff..8baa829 100644
--- a/fdts/rtsm_ve-motherboard.dtsi
+++ b/fdts/rtsm_ve-motherboard.dtsi
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 	motherboard {
@@ -33,7 +9,6 @@
 		compatible = "arm,vexpress,v2m-p1", "simple-bus";
 		#address-cells = <2>; /* SMB chipselect number and offset */
 		#size-cells = <1>;
-		#interrupt-cells = <1>;
 		ranges;
 
 		flash@0,00000000 {
@@ -51,7 +26,7 @@
 		ethernet@2,02000000 {
 			compatible = "smsc,lan91c111";
 			reg = <2 0x02000000 0x10000>;
-			interrupts = <15>;
+			interrupts = <0 15 4>;
 		};
 
 		v2m_clk24mhz: clk24mhz {
@@ -100,7 +75,7 @@
 			aaci@040000 {
 				compatible = "arm,pl041", "arm,primecell";
 				reg = <0x040000 0x1000>;
-				interrupts = <11>;
+				interrupts = <0 11 4>;
 				clocks = <&v2m_clk24mhz>;
 				clock-names = "apb_pclk";
 			};
@@ -108,7 +83,7 @@
 			mmci@050000 {
 				compatible = "arm,pl180", "arm,primecell";
 				reg = <0x050000 0x1000>;
-				interrupts = <9 10>;
+				interrupts = <0 9 4 0 10 4>;
 				cd-gpios = <&v2m_sysreg 0 0>;
 				wp-gpios = <&v2m_sysreg 1 0>;
 				max-frequency = <12000000>;
@@ -120,7 +95,7 @@
 			kmi@060000 {
 				compatible = "arm,pl050", "arm,primecell";
 				reg = <0x060000 0x1000>;
-				interrupts = <12>;
+				interrupts = <0 12 4>;
 				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
 				clock-names = "KMIREFCLK", "apb_pclk";
 			};
@@ -128,7 +103,7 @@
 			kmi@070000 {
 				compatible = "arm,pl050", "arm,primecell";
 				reg = <0x070000 0x1000>;
-				interrupts = <13>;
+				interrupts = <0 13 4>;
 				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
 				clock-names = "KMIREFCLK", "apb_pclk";
 			};
@@ -136,7 +111,7 @@
 			v2m_serial0: uart@090000 {
 				compatible = "arm,pl011", "arm,primecell";
 				reg = <0x090000 0x1000>;
-				interrupts = <5>;
+				interrupts = <0 5 4>;
 				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
 				clock-names = "uartclk", "apb_pclk";
 			};
@@ -144,7 +119,7 @@
 			v2m_serial1: uart@0a0000 {
 				compatible = "arm,pl011", "arm,primecell";
 				reg = <0x0a0000 0x1000>;
-				interrupts = <6>;
+				interrupts = <0 6 4>;
 				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
 				clock-names = "uartclk", "apb_pclk";
 			};
@@ -152,7 +127,7 @@
 			v2m_serial2: uart@0b0000 {
 				compatible = "arm,pl011", "arm,primecell";
 				reg = <0x0b0000 0x1000>;
-				interrupts = <7>;
+				interrupts = <0 7 4>;
 				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
 				clock-names = "uartclk", "apb_pclk";
 			};
@@ -160,7 +135,7 @@
 			v2m_serial3: uart@0c0000 {
 				compatible = "arm,pl011", "arm,primecell";
 				reg = <0x0c0000 0x1000>;
-				interrupts = <8>;
+				interrupts = <0 8 4>;
 				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
 				clock-names = "uartclk", "apb_pclk";
 			};
@@ -168,7 +143,7 @@
 			wdt@0f0000 {
 				compatible = "arm,sp805", "arm,primecell";
 				reg = <0x0f0000 0x1000>;
-				interrupts = <0>;
+				interrupts = <0 0 4>;
 				clocks = <&v2m_refclk32khz>, <&v2m_clk24mhz>;
 				clock-names = "wdogclk", "apb_pclk";
 			};
@@ -176,7 +151,7 @@
 			v2m_timer01: timer@110000 {
 				compatible = "arm,sp804", "arm,primecell";
 				reg = <0x110000 0x1000>;
-				interrupts = <2>;
+				interrupts = <0 2 4>;
 				clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&v2m_clk24mhz>;
 				clock-names = "timclken1", "timclken2", "apb_pclk";
 			};
@@ -184,7 +159,7 @@
 			v2m_timer23: timer@120000 {
 				compatible = "arm,sp804", "arm,primecell";
 				reg = <0x120000 0x1000>;
-				interrupts = <3>;
+				interrupts = <0 3 4>;
 				clocks = <&v2m_sysctl 2>, <&v2m_sysctl 3>, <&v2m_clk24mhz>;
 				clock-names = "timclken1", "timclken2", "apb_pclk";
 			};
@@ -192,7 +167,7 @@
 			rtc@170000 {
 				compatible = "arm,pl031", "arm,primecell";
 				reg = <0x170000 0x1000>;
-				interrupts = <4>;
+				interrupts = <0 4 4>;
 				clocks = <&v2m_clk24mhz>;
 				clock-names = "apb_pclk";
 			};
@@ -200,7 +175,7 @@
 			clcd@1f0000 {
 				compatible = "arm,pl111", "arm,primecell";
 				reg = <0x1f0000 0x1000>;
-				interrupts = <14>;
+				interrupts = <0 14 4>;
 				clocks = <&v2m_oscclk1>, <&v2m_clk24mhz>;
 				clock-names = "clcdclk", "apb_pclk";
 				mode = "XVGA";
@@ -211,7 +186,7 @@
 			virtio_block@0130000 {
 				compatible = "virtio,mmio";
 				reg = <0x130000 0x1000>;
-				interrupts = <0x2a>;
+				interrupts = <0 0x2a 4>;
 			};
 		};
 
diff --git a/include/bl1/bl1.h b/include/bl1/bl1.h
new file mode 100644
index 0000000..1544523
--- /dev/null
+++ b/include/bl1/bl1.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __BL1_FWU_H__
+#define __BL1_FWU_H__
+
+#include <bl_common.h>
+
+/*
+ * Defines for BL1 SMC function ids.
+ */
+#define BL1_SMC_CALL_COUNT		0x0
+#define BL1_SMC_UID			0x1
+/* SMC #0x2 reserved */
+#define BL1_SMC_VERSION			0x3
+
+/*
+ * Corresponds to the function ID of the SMC that
+ * the BL1 exception handler service to execute BL31.
+ */
+#define BL1_SMC_RUN_IMAGE		0x4
+
+/*
+ * BL1 SMC version
+ */
+#define BL1_SMC_MAJOR_VER		0x0
+#define BL1_SMC_MINOR_VER		0x1
+
+/*
+ * Defines for FWU SMC function ids.
+ */
+
+#define FWU_SMC_IMAGE_COPY		0x10
+#define FWU_SMC_IMAGE_AUTH		0x11
+#define FWU_SMC_IMAGE_EXECUTE		0x12
+#define FWU_SMC_IMAGE_RESUME		0x13
+#define FWU_SMC_SEC_IMAGE_DONE		0x14
+#define FWU_SMC_UPDATE_DONE		0x15
+#define FWU_SMC_IMAGE_RESET		0x16
+
+/*
+ * Number of FWU calls (above) implemented
+ */
+#define FWU_NUM_SMC_CALLS		7
+
+#if TRUSTED_BOARD_BOOT
+# define BL1_NUM_SMC_CALLS		(FWU_NUM_SMC_CALLS + 4)
+#else
+# define BL1_NUM_SMC_CALLS		4
+#endif
+
+/*
+ * The macros below are used to identify FWU
+ * calls from the SMC function ID
+ */
+#define FWU_SMC_FID_START		FWU_SMC_IMAGE_COPY
+#define FWU_SMC_FID_END			FWU_SMC_IMAGE_RESET
+#define is_fwu_fid(_fid) \
+    ((_fid >= FWU_SMC_FID_START) && (_fid <= FWU_SMC_FID_END))
+
+#ifndef __ASSEMBLY__
+#include <cassert.h>
+
+/*
+ * Check if the total number of FWU SMC calls are as expected.
+ */
+CASSERT(FWU_NUM_SMC_CALLS == 	\
+		(FWU_SMC_FID_END - FWU_SMC_FID_START + 1),\
+		assert_FWU_NUM_SMC_CALLS_mismatch);
+
+#endif /* __ASSEMBLY__ */
+#endif /* __BL1_FWU_H__ */
diff --git a/include/bl1/tbbr/tbbr_img_desc.h b/include/bl1/tbbr/tbbr_img_desc.h
new file mode 100644
index 0000000..4063f8a
--- /dev/null
+++ b/include/bl1/tbbr/tbbr_img_desc.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TBBR_IMG_DESC_H__
+#define __TBBR_IMG_DESC_H__
+
+#include <bl_common.h>
+
+extern image_desc_t bl1_tbbr_image_descs[];
+
+#endif /* __TBBR_IMG_DESC_H__ */
diff --git a/include/bl31/bl31.h b/include/bl31/bl31.h
index 96867b0..b3567e2 100644
--- a/include/bl31/bl31.h
+++ b/include/bl31/bl31.h
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __BL31_H__
@@ -36,11 +12,11 @@
 /*******************************************************************************
  * Function prototypes
  ******************************************************************************/
-void bl31_arch_setup(void);
 void bl31_next_el_arch_setup(uint32_t security_state);
 void bl31_set_next_image_type(uint32_t type);
 uint32_t bl31_get_next_image_type(void);
 void bl31_prepare_next_image_entry(void);
 void bl31_register_bl32_init(int32_t (*)(void));
+void bl31_warm_entrypoint(void);
 
 #endif /* __BL31_H__ */
diff --git a/include/bl31/context.h b/include/bl31/context.h
deleted file mode 100644
index 0dfebe0..0000000
--- a/include/bl31/context.h
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __CONTEXT_H__
-#define __CONTEXT_H__
-
-/*******************************************************************************
- * Constants that allow assembler code to access members of and the 'gp_regs'
- * structure at their correct offsets.
- ******************************************************************************/
-#define CTX_GPREGS_OFFSET	0x0
-#define CTX_GPREG_X0		0x0
-#define CTX_GPREG_X1		0x8
-#define CTX_GPREG_X2		0x10
-#define CTX_GPREG_X3		0x18
-#define CTX_GPREG_X4		0x20
-#define CTX_GPREG_X5		0x28
-#define CTX_GPREG_X6		0x30
-#define CTX_GPREG_X7		0x38
-#define CTX_GPREG_X8		0x40
-#define CTX_GPREG_X9		0x48
-#define CTX_GPREG_X10		0x50
-#define CTX_GPREG_X11		0x58
-#define CTX_GPREG_X12		0x60
-#define CTX_GPREG_X13		0x68
-#define CTX_GPREG_X14		0x70
-#define CTX_GPREG_X15		0x78
-#define CTX_GPREG_X16		0x80
-#define CTX_GPREG_X17		0x88
-#define CTX_GPREG_X18		0x90
-#define CTX_GPREG_X19		0x98
-#define CTX_GPREG_X20		0xa0
-#define CTX_GPREG_X21		0xa8
-#define CTX_GPREG_X22		0xb0
-#define CTX_GPREG_X23		0xb8
-#define CTX_GPREG_X24		0xc0
-#define CTX_GPREG_X25		0xc8
-#define CTX_GPREG_X26		0xd0
-#define CTX_GPREG_X27		0xd8
-#define CTX_GPREG_X28		0xe0
-#define CTX_GPREG_X29		0xe8
-#define CTX_GPREG_LR		0xf0
-#define CTX_GPREG_SP_EL0	0xf8
-#define CTX_GPREGS_END		0x100
-
-/*******************************************************************************
- * Constants that allow assembler code to access members of and the 'el3_state'
- * structure at their correct offsets. Note that some of the registers are only
- * 32-bits wide but are stored as 64-bit values for convenience
- ******************************************************************************/
-#define CTX_EL3STATE_OFFSET	(CTX_GPREGS_OFFSET + CTX_GPREGS_END)
-#define CTX_SCR_EL3		0x0
-#define CTX_RUNTIME_SP		0x8
-#define CTX_SPSR_EL3		0x10
-#define CTX_ELR_EL3		0x18
-#define CTX_EL3STATE_END	0x20
-
-/*******************************************************************************
- * Constants that allow assembler code to access members of and the
- * 'el1_sys_regs' structure at their correct offsets. Note that some of the
- * registers are only 32-bits wide but are stored as 64-bit values for
- * convenience
- ******************************************************************************/
-#define CTX_SYSREGS_OFFSET	(CTX_EL3STATE_OFFSET + CTX_EL3STATE_END)
-#define CTX_SPSR_EL1		0x0
-#define CTX_ELR_EL1		0x8
-#define CTX_SPSR_ABT		0x10
-#define CTX_SPSR_UND		0x18
-#define CTX_SPSR_IRQ		0x20
-#define CTX_SPSR_FIQ		0x28
-#define CTX_SCTLR_EL1		0x30
-#define CTX_ACTLR_EL1		0x38
-#define CTX_CPACR_EL1		0x40
-#define CTX_CSSELR_EL1		0x48
-#define CTX_SP_EL1		0x50
-#define CTX_ESR_EL1		0x58
-#define CTX_TTBR0_EL1		0x60
-#define CTX_TTBR1_EL1		0x68
-#define CTX_MAIR_EL1		0x70
-#define CTX_AMAIR_EL1		0x78
-#define CTX_TCR_EL1		0x80
-#define CTX_TPIDR_EL1		0x88
-#define CTX_TPIDR_EL0		0x90
-#define CTX_TPIDRRO_EL0		0x98
-#define CTX_DACR32_EL2		0xa0
-#define CTX_IFSR32_EL2		0xa8
-#define CTX_PAR_EL1		0xb0
-#define CTX_FAR_EL1		0xb8
-#define CTX_AFSR0_EL1		0xc0
-#define CTX_AFSR1_EL1		0xc8
-#define CTX_CONTEXTIDR_EL1	0xd0
-#define CTX_VBAR_EL1		0xd8
-/*
- * If the timer registers aren't saved and restored, we don't have to reserve
- * space for them in the context
- */
-#if NS_TIMER_SWITCH
-#define CTX_CNTP_CTL_EL0	0xe0
-#define CTX_CNTP_CVAL_EL0	0xe8
-#define CTX_CNTV_CTL_EL0	0xf0
-#define CTX_CNTV_CVAL_EL0	0xf8
-#define CTX_CNTKCTL_EL1		0x100
-#define CTX_FP_FPEXC32_EL2	0x108
-#define CTX_SYSREGS_END		0x110
-#else
-#define CTX_FP_FPEXC32_EL2	0xe0
-#define CTX_SYSREGS_END		0xf0
-#endif
-
-/*******************************************************************************
- * Constants that allow assembler code to access members of and the 'fp_regs'
- * structure at their correct offsets.
- ******************************************************************************/
-#if CTX_INCLUDE_FPREGS
-#define CTX_FPREGS_OFFSET	(CTX_SYSREGS_OFFSET + CTX_SYSREGS_END)
-#define CTX_FP_Q0		0x0
-#define CTX_FP_Q1		0x10
-#define CTX_FP_Q2		0x20
-#define CTX_FP_Q3		0x30
-#define CTX_FP_Q4		0x40
-#define CTX_FP_Q5		0x50
-#define CTX_FP_Q6		0x60
-#define CTX_FP_Q7		0x70
-#define CTX_FP_Q8		0x80
-#define CTX_FP_Q9		0x90
-#define CTX_FP_Q10		0xa0
-#define CTX_FP_Q11		0xb0
-#define CTX_FP_Q12		0xc0
-#define CTX_FP_Q13		0xd0
-#define CTX_FP_Q14		0xe0
-#define CTX_FP_Q15		0xf0
-#define CTX_FP_Q16		0x100
-#define CTX_FP_Q17		0x110
-#define CTX_FP_Q18		0x120
-#define CTX_FP_Q19		0x130
-#define CTX_FP_Q20		0x140
-#define CTX_FP_Q21		0x150
-#define CTX_FP_Q22		0x160
-#define CTX_FP_Q23		0x170
-#define CTX_FP_Q24		0x180
-#define CTX_FP_Q25		0x190
-#define CTX_FP_Q26		0x1a0
-#define CTX_FP_Q27		0x1b0
-#define CTX_FP_Q28		0x1c0
-#define CTX_FP_Q29		0x1d0
-#define CTX_FP_Q30		0x1e0
-#define CTX_FP_Q31		0x1f0
-#define CTX_FP_FPSR		0x200
-#define CTX_FP_FPCR		0x208
-#define CTX_FPREGS_END		0x210
-#endif
-
-#ifndef __ASSEMBLY__
-
-#include <cassert.h>
-#include <platform_def.h>	/* for CACHE_WRITEBACK_GRANULE */
-#include <stdint.h>
-
-/*
- * Common constants to help define the 'cpu_context' structure and its
- * members below.
- */
-#define DWORD_SHIFT		3
-#define DEFINE_REG_STRUCT(name, num_regs)	\
-	typedef struct name {			\
-		uint64_t _regs[num_regs];	\
-	}  __aligned(16) name##_t
-
-/* Constants to determine the size of individual context structures */
-#define CTX_GPREG_ALL		(CTX_GPREGS_END >> DWORD_SHIFT)
-#define CTX_SYSREG_ALL		(CTX_SYSREGS_END >> DWORD_SHIFT)
-#if CTX_INCLUDE_FPREGS
-#define CTX_FPREG_ALL		(CTX_FPREGS_END >> DWORD_SHIFT)
-#endif
-#define CTX_EL3STATE_ALL	(CTX_EL3STATE_END >> DWORD_SHIFT)
-
-/*
- * AArch64 general purpose register context structure. Usually x0-x18,
- * lr are saved as the compiler is expected to preserve the remaining
- * callee saved registers if used by the C runtime and the assembler
- * does not touch the remaining. But in case of world switch during
- * exception handling, we need to save the callee registers too.
- */
-DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL);
-
-/*
- * AArch64 EL1 system register context structure for preserving the
- * architectural state during switches from one security state to
- * another in EL1.
- */
-DEFINE_REG_STRUCT(el1_sys_regs, CTX_SYSREG_ALL);
-
-/*
- * AArch64 floating point register context structure for preserving
- * the floating point state during switches from one security state to
- * another.
- */
-#if CTX_INCLUDE_FPREGS
-DEFINE_REG_STRUCT(fp_regs, CTX_FPREG_ALL);
-#endif
-
-/*
- * Miscellaneous registers used by EL3 firmware to maintain its state
- * across exception entries and exits
- */
-DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL);
-
-/*
- * Macros to access members of any of the above structures using their
- * offsets
- */
-#define read_ctx_reg(ctx, offset)	((ctx)->_regs[offset >> DWORD_SHIFT])
-#define write_ctx_reg(ctx, offset, val)	(((ctx)->_regs[offset >> DWORD_SHIFT]) \
-					 = val)
-
-/*
- * Top-level context structure which is used by EL3 firmware to
- * preserve the state of a core at EL1 in one of the two security
- * states and save enough EL3 meta data to be able to return to that
- * EL and security state. The context management library will be used
- * to ensure that SP_EL3 always points to an instance of this
- * structure at exception entry and exit. Each instance will
- * correspond to either the secure or the non-secure state.
- */
-typedef struct cpu_context {
-	gp_regs_t gpregs_ctx;
-	el3_state_t el3state_ctx;
-	el1_sys_regs_t sysregs_ctx;
-#if CTX_INCLUDE_FPREGS
-	fp_regs_t fpregs_ctx;
-#endif
-} cpu_context_t;
-
-/* Macros to access members of the 'cpu_context_t' structure */
-#define get_el3state_ctx(h)	(&((cpu_context_t *) h)->el3state_ctx)
-#if CTX_INCLUDE_FPREGS
-#define get_fpregs_ctx(h)	(&((cpu_context_t *) h)->fpregs_ctx)
-#endif
-#define get_sysregs_ctx(h)	(&((cpu_context_t *) h)->sysregs_ctx)
-#define get_gpregs_ctx(h)	(&((cpu_context_t *) h)->gpregs_ctx)
-
-/*
- * Compile time assertions related to the 'cpu_context' structure to
- * ensure that the assembler and the compiler view of the offsets of
- * the structure members is the same.
- */
-CASSERT(CTX_GPREGS_OFFSET == __builtin_offsetof(cpu_context_t, gpregs_ctx), \
-	assert_core_context_gp_offset_mismatch);
-CASSERT(CTX_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, sysregs_ctx), \
-	assert_core_context_sys_offset_mismatch);
-#if CTX_INCLUDE_FPREGS
-CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx), \
-	assert_core_context_fp_offset_mismatch);
-#endif
-CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx), \
-	assert_core_context_el3state_offset_mismatch);
-
-/*
- * Helper macro to set the general purpose registers that correspond to
- * parameters in an aapcs_64 call i.e. x0-x7
- */
-#define set_aapcs_args0(ctx, x0)				do {	\
-		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, x0);	\
-	} while (0);
-#define set_aapcs_args1(ctx, x0, x1)				do {	\
-		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, x1);	\
-		set_aapcs_args0(ctx, x0);				\
-	} while (0);
-#define set_aapcs_args2(ctx, x0, x1, x2)			do {	\
-		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, x2);	\
-		set_aapcs_args1(ctx, x0, x1);				\
-	} while (0);
-#define set_aapcs_args3(ctx, x0, x1, x2, x3)			do {	\
-		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, x3);	\
-		set_aapcs_args2(ctx, x0, x1, x2);			\
-	} while (0);
-#define set_aapcs_args4(ctx, x0, x1, x2, x3, x4)		do {	\
-		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X4, x4);	\
-		set_aapcs_args3(ctx, x0, x1, x2, x3);			\
-	} while (0);
-#define set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5)		do {	\
-		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X5, x5);	\
-		set_aapcs_args4(ctx, x0, x1, x2, x3, x4);		\
-	} while (0);
-#define set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6)	do {	\
-		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X6, x6);	\
-		set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5);		\
-	} while (0);
-#define set_aapcs_args7(ctx, x0, x1, x2, x3, x4, x5, x6, x7)	do {	\
-		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X7, x7);	\
-		set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6);	\
-	} while (0);
-
-/*******************************************************************************
- * Function prototypes
- ******************************************************************************/
-void el1_sysregs_context_save(el1_sys_regs_t *regs);
-void el1_sysregs_context_restore(el1_sys_regs_t *regs);
-#if CTX_INCLUDE_FPREGS
-void fpregs_context_save(fp_regs_t *regs);
-void fpregs_context_restore(fp_regs_t *regs);
-#endif
-
-
-#undef CTX_SYSREG_ALL
-#if CTX_INCLUDE_FPREGS
-#undef CTX_FPREG_ALL
-#endif
-#undef CTX_GPREG_ALL
-#undef CTX_EL3STATE_ALL
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __CONTEXT_H__ */
diff --git a/include/bl31/context_mgmt.h b/include/bl31/context_mgmt.h
deleted file mode 100644
index 6e82fb7..0000000
--- a/include/bl31/context_mgmt.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __CM_H__
-#define __CM_H__
-
-#include <cpu_data.h>
-#include <stdint.h>
-
-/*******************************************************************************
- * Forward declarations
- ******************************************************************************/
-struct entry_point_info;
-
-/*******************************************************************************
- * Function & variable prototypes
- ******************************************************************************/
-void cm_init(void);
-void *cm_get_context_by_mpidr(uint64_t mpidr, uint32_t security_state);
-static inline void *cm_get_context(uint32_t security_state);
-void cm_set_context_by_mpidr(uint64_t mpidr,
-			     void *context,
-			     uint32_t security_state);
-static inline void cm_set_context(void *context, uint32_t security_state);
-void cm_init_context(uint64_t mpidr, const struct entry_point_info *ep);
-void cm_prepare_el3_exit(uint32_t security_state);
-void cm_el1_sysregs_context_save(uint32_t security_state);
-void cm_el1_sysregs_context_restore(uint32_t security_state);
-void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint);
-void cm_set_elr_spsr_el3(uint32_t security_state,
-			 uint64_t entrypoint, uint32_t spsr);
-void cm_write_scr_el3_bit(uint32_t security_state,
-			  uint32_t bit_pos,
-			  uint32_t value);
-void cm_set_next_eret_context(uint32_t security_state);
-uint32_t cm_get_scr_el3(uint32_t security_state);
-
-/* Inline definitions */
-
-/*******************************************************************************
- * This function returns a pointer to the most recent 'cpu_context' structure
- * for the calling CPU that was set as the context for the specified security
- * state. NULL is returned if no such structure has been specified.
- ******************************************************************************/
-void *cm_get_context(uint32_t security_state)
-{
-	assert(security_state <= NON_SECURE);
-
-	return get_cpu_data(cpu_context[security_state]);
-}
-
-/*******************************************************************************
- * This function sets the pointer to the current 'cpu_context' structure for the
- * specified security state for the calling CPU
- ******************************************************************************/
-void cm_set_context(void *context, uint32_t security_state)
-{
-	assert(security_state <= NON_SECURE);
-
-	set_cpu_data(cpu_context[security_state], context);
-}
-
-
-#endif /* __CM_H__ */
diff --git a/include/bl31/cpu_data.h b/include/bl31/cpu_data.h
deleted file mode 100644
index 1926e29..0000000
--- a/include/bl31/cpu_data.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __CPU_DATA_H__
-#define __CPU_DATA_H__
-
-/* Offsets for the cpu_data structure */
-#define CPU_DATA_CRASH_BUF_OFFSET	0x18
-#if CRASH_REPORTING
-#define CPU_DATA_LOG2SIZE		7
-#else
-#define CPU_DATA_LOG2SIZE		6
-#endif
-/* need enough space in crash buffer to save 8 registers */
-#define CPU_DATA_CRASH_BUF_SIZE		64
-#define CPU_DATA_CPU_OPS_PTR		0x10
-
-#ifndef __ASSEMBLY__
-
-#include <arch_helpers.h>
-#include <cassert.h>
-#include <platform_def.h>
-#include <psci.h>
-#include <stdint.h>
-
-/* Offsets for the cpu_data structure */
-#define CPU_DATA_PSCI_LOCK_OFFSET	__builtin_offsetof\
-		(cpu_data_t, psci_svc_cpu_data.pcpu_bakery_info)
-
-#if PLAT_PCPU_DATA_SIZE
-#define CPU_DATA_PLAT_PCPU_OFFSET	__builtin_offsetof\
-		(cpu_data_t, platform_cpu_data)
-#endif
-
-/*******************************************************************************
- * Function & variable prototypes
- ******************************************************************************/
-
-/*******************************************************************************
- * Cache of frequently used per-cpu data:
- *   Pointers to non-secure and secure security state contexts
- *   Address of the crash stack
- * It is aligned to the cache line boundary to allow efficient concurrent
- * manipulation of these pointers on different cpus
- *
- * TODO: Add other commonly used variables to this (tf_issues#90)
- *
- * The data structure and the _cpu_data accessors should not be used directly
- * by components that have per-cpu members. The member access macros should be
- * used for this.
- ******************************************************************************/
-typedef struct cpu_data {
-	void *cpu_context[2];
-	uint64_t cpu_ops_ptr;
-#if CRASH_REPORTING
-	uint64_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3];
-#endif
-	struct psci_cpu_data psci_svc_cpu_data;
-#if PLAT_PCPU_DATA_SIZE
-	uint8_t platform_cpu_data[PLAT_PCPU_DATA_SIZE];
-#endif
-} __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t;
-
-#if CRASH_REPORTING
-/* verify assembler offsets match data structures */
-CASSERT(CPU_DATA_CRASH_BUF_OFFSET == __builtin_offsetof
-	(cpu_data_t, crash_buf),
-	assert_cpu_data_crash_stack_offset_mismatch);
-#endif
-
-CASSERT((1 << CPU_DATA_LOG2SIZE) == sizeof(cpu_data_t),
-	assert_cpu_data_log2size_mismatch);
-
-CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof
-		(cpu_data_t, cpu_ops_ptr),
-		assert_cpu_data_cpu_ops_ptr_offset_mismatch);
-
-struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);
-struct cpu_data *_cpu_data_by_mpidr(uint64_t mpidr);
-
-/* Return the cpu_data structure for the current CPU. */
-static inline struct cpu_data *_cpu_data(void)
-{
-	return (cpu_data_t *)read_tpidr_el3();
-}
-
-
-/**************************************************************************
- * APIs for initialising and accessing per-cpu data
- *************************************************************************/
-
-void init_cpu_data_ptr(void);
-
-#define get_cpu_data(_m)		   _cpu_data()->_m
-#define set_cpu_data(_m, _v)		   _cpu_data()->_m = _v
-#define get_cpu_data_by_index(_ix, _m)	   _cpu_data_by_index(_ix)->_m
-#define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = _v
-#define get_cpu_data_by_mpidr(_id, _m)	   _cpu_data_by_mpidr(_id)->_m
-#define set_cpu_data_by_mpidr(_id, _m, _v) _cpu_data_by_mpidr(_id)->_m = _v
-
-#define flush_cpu_data(_m)	   flush_dcache_range((uint64_t) 	  \
-						      &(_cpu_data()->_m), \
-						      sizeof(_cpu_data()->_m))
-#define flush_cpu_data_by_index(_ix, _m)	\
-				   flush_dcache_range((uint64_t)	  \
-					 &(_cpu_data_by_index(_ix)->_m),  \
-					 sizeof(_cpu_data_by_index(_ix)->_m))
-
-
-#endif /* __ASSEMBLY__ */
-#endif /* __CPU_DATA_H__ */
diff --git a/include/bl31/interrupt_mgmt.h b/include/bl31/interrupt_mgmt.h
index e07ddf8..cccad3a 100644
--- a/include/bl31/interrupt_mgmt.h
+++ b/include/bl31/interrupt_mgmt.h
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __INTERRUPT_MGMT_H__
@@ -36,16 +12,21 @@
 /*******************************************************************************
  * Constants for the types of interrupts recognised by the IM framework
  ******************************************************************************/
-#define INTR_TYPE_S_EL1			0
-#define INTR_TYPE_EL3			1
-#define INTR_TYPE_NS			2
-#define MAX_INTR_TYPES			3
+#define INTR_TYPE_S_EL1			U(0)
+#define INTR_TYPE_EL3			U(1)
+#define INTR_TYPE_NS			U(2)
+#define MAX_INTR_TYPES			U(3)
 #define INTR_TYPE_INVAL			MAX_INTR_TYPES
+
+/* Interrupt routing modes */
+#define INTR_ROUTING_MODE_PE		0
+#define INTR_ROUTING_MODE_ANY		1
+
 /*
  * Constant passed to the interrupt handler in the 'id' field when the
  * framework does not read the gic registers to determine the interrupt id.
  */
-#define INTR_ID_UNAVAILABLE		0xFFFFFFFF
+#define INTR_ID_UNAVAILABLE		U(0xFFFFFFFF)
 
 
 /*******************************************************************************
@@ -53,33 +34,37 @@
  * constants to define the valid routing models for each supported interrupt
  * type
  ******************************************************************************/
-#define INTR_RM_FLAGS_SHIFT		0x0
-#define INTR_RM_FLAGS_MASK		0x3
+#define INTR_RM_FLAGS_SHIFT		U(0x0)
+#define INTR_RM_FLAGS_MASK		U(0x3)
 /* Routed to EL3 from NS. Taken to S-EL1 from Secure */
-#define INTR_SEL1_VALID_RM0		0x2
+#define INTR_SEL1_VALID_RM0		U(0x2)
 /* Routed to EL3 from NS and Secure */
-#define INTR_SEL1_VALID_RM1		0x3
+#define INTR_SEL1_VALID_RM1		U(0x3)
 /* Routed to EL1/EL2 from NS and to S-EL1 from Secure */
-#define INTR_NS_VALID_RM0		0x0
+#define INTR_NS_VALID_RM0		U(0x0)
 /* Routed to EL1/EL2 from NS and to EL3 from Secure */
-#define INTR_NS_VALID_RM1		0x1
+#define INTR_NS_VALID_RM1		U(0x1)
+/* Routed to EL3 from NS. Taken to S-EL1 from Secure and handed over to EL3 */
+#define INTR_EL3_VALID_RM0		U(0x2)
+/* Routed to EL3 from NS and Secure */
+#define INTR_EL3_VALID_RM1		U(0x3)
 /* This is the default routing model */
-#define INTR_DEFAULT_RM		0x0
+#define INTR_DEFAULT_RM			U(0x0)
 
 /*******************************************************************************
  * Constants for the _individual_ routing model bits in the 'flags' field for
  * each interrupt type and mask to validate the 'flags' parameter while
  * registering an interrupt handler
  ******************************************************************************/
-#define INTR_TYPE_FLAGS_MASK		0xFFFFFFFC
+#define INTR_TYPE_FLAGS_MASK		U(0xFFFFFFFC)
 
 #define INTR_RM_FROM_SEC_SHIFT		SECURE		/* BIT[0] */
 #define INTR_RM_FROM_NS_SHIFT		NON_SECURE	/* BIT[1] */
-#define INTR_RM_FROM_FLAG_MASK		1
+#define INTR_RM_FROM_FLAG_MASK		U(1)
 #define get_interrupt_rm_flag(flag, ss)	(((flag >> INTR_RM_FLAGS_SHIFT) >> ss) \
 					 & INTR_RM_FROM_FLAG_MASK)
-#define set_interrupt_rm_flag(flag, ss)	(flag |= 1 << ss)
-#define clr_interrupt_rm_flag(flag, ss)	(flag &= ~(1 << ss))
+#define set_interrupt_rm_flag(flag, ss)	(flag |= U(1) << ss)
+#define clr_interrupt_rm_flag(flag, ss)	(flag &= ~(U(1) << ss))
 
 
 /*******************************************************************************
@@ -87,12 +72,16 @@
  * of interrupt. If the model does not match one of the valid masks
  * -EINVAL is returned.
  ******************************************************************************/
-#define validate_sel1_interrupt_rm(x)	(x == INTR_SEL1_VALID_RM0 ? 0 : \
-					 (x == INTR_SEL1_VALID_RM1 ? 0 :\
+#define validate_sel1_interrupt_rm(x)	((x) == INTR_SEL1_VALID_RM0 ? 0 : \
+					 ((x) == INTR_SEL1_VALID_RM1 ? 0 :\
 					  -EINVAL))
 
-#define validate_ns_interrupt_rm(x)	(x == INTR_NS_VALID_RM0 ? 0 : \
-					 (x == INTR_NS_VALID_RM1 ? 0 :\
+#define validate_ns_interrupt_rm(x)	((x) == INTR_NS_VALID_RM0 ? 0 : \
+					 ((x) == INTR_NS_VALID_RM1 ? 0 :\
+					  -EINVAL))
+
+#define validate_el3_interrupt_rm(x)	((x) == INTR_EL3_VALID_RM0 ? 0 : \
+					 ((x) == INTR_EL3_VALID_RM1 ? 0 :\
 					  -EINVAL))
 
 /*******************************************************************************
@@ -100,15 +89,17 @@
  * the flag to indicate the security state when the exception was generated is
  * supported.
  ******************************************************************************/
-#define INTR_SRC_SS_FLAG_SHIFT		0		/* BIT[0] */
-#define INTR_SRC_SS_FLAG_MASK		1
+#define INTR_SRC_SS_FLAG_SHIFT		U(0)		/* BIT[0] */
+#define INTR_SRC_SS_FLAG_MASK		U(1)
 #define set_interrupt_src_ss(flag, val)	(flag |= val << INTR_SRC_SS_FLAG_SHIFT)
-#define clr_interrupt_src_ss(flag)	(flag &= ~(1 << INTR_SRC_SS_FLAG_SHIFT))
+#define clr_interrupt_src_ss(flag)	(flag &= ~(U(1) << INTR_SRC_SS_FLAG_SHIFT))
 #define get_interrupt_src_ss(flag)	((flag >> INTR_SRC_SS_FLAG_SHIFT) & \
 					 INTR_SRC_SS_FLAG_MASK)
 
 #ifndef __ASSEMBLY__
 
+#include <stdint.h>
+
 /* Prototype for defining a handler for an interrupt type */
 typedef uint64_t (*interrupt_type_handler_t)(uint32_t id,
 					     uint32_t flags,
diff --git a/include/bl31/runtime_svc.h b/include/bl31/runtime_svc.h
deleted file mode 100644
index de40041..0000000
--- a/include/bl31/runtime_svc.h
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __RUNTIME_SVC_H__
-#define __RUNTIME_SVC_H__
-
-#include <utils.h>
-
-/*******************************************************************************
- * Bit definitions inside the function id as per the SMC calling convention
- ******************************************************************************/
-#define FUNCID_TYPE_SHIFT		31
-#define FUNCID_CC_SHIFT			30
-#define FUNCID_OEN_SHIFT		24
-#define FUNCID_NUM_SHIFT		0
-
-#define FUNCID_TYPE_MASK		0x1
-#define FUNCID_CC_MASK			0x1
-#define FUNCID_OEN_MASK			0x3f
-#define FUNCID_NUM_MASK			0xffff
-
-#define FUNCID_TYPE_WIDTH		1
-#define FUNCID_CC_WIDTH			1
-#define FUNCID_OEN_WIDTH		6
-#define FUNCID_NUM_WIDTH		16
-
-#define GET_SMC_CC(id)			((id >> FUNCID_CC_SHIFT) & \
-					 FUNCID_CC_MASK)
-#define GET_SMC_TYPE(id)		((id >> FUNCID_TYPE_SHIFT) & \
-					 FUNCID_TYPE_MASK)
-
-#define SMC_64				1
-#define SMC_32				0
-#define SMC_UNK				0xffffffff
-#define SMC_TYPE_FAST			ULL(1)
-#define SMC_TYPE_STD			0
-#define SMC_PREEMPTED		0xfffffffe
-/*******************************************************************************
- * Owning entity number definitions inside the function id as per the SMC
- * calling convention
- ******************************************************************************/
-#define OEN_ARM_START			0
-#define OEN_ARM_END			0
-#define OEN_CPU_START			1
-#define OEN_CPU_END			1
-#define OEN_SIP_START			2
-#define OEN_SIP_END			2
-#define OEN_OEM_START			3
-#define OEN_OEM_END			3
-#define OEN_STD_START			4	/* Standard Calls */
-#define OEN_STD_END			4
-#define OEN_TAP_START			48	/* Trusted Applications */
-#define OEN_TAP_END			49
-#define OEN_TOS_START			50	/* Trusted OS */
-#define OEN_TOS_END			63
-#define OEN_LIMIT			64
-
-/*******************************************************************************
- * Constants to indicate type of exception to the common exception handler.
- ******************************************************************************/
-#define SYNC_EXCEPTION_SP_EL0		0x0
-#define IRQ_SP_EL0			0x1
-#define FIQ_SP_EL0			0x2
-#define SERROR_SP_EL0			0x3
-#define SYNC_EXCEPTION_SP_ELX		0x4
-#define IRQ_SP_ELX			0x5
-#define FIQ_SP_ELX			0x6
-#define SERROR_SP_ELX			0x7
-#define SYNC_EXCEPTION_AARCH64		0x8
-#define IRQ_AARCH64			0x9
-#define FIQ_AARCH64			0xa
-#define SERROR_AARCH64			0xb
-#define SYNC_EXCEPTION_AARCH32		0xc
-#define IRQ_AARCH32			0xd
-#define FIQ_AARCH32			0xe
-#define SERROR_AARCH32			0xf
-
-/*******************************************************************************
- * Structure definition, typedefs & constants for the runtime service framework
- ******************************************************************************/
-
-/*
- * Constants to allow the assembler access a runtime service
- * descriptor
- */
-#define RT_SVC_SIZE_LOG2	5
-#define SIZEOF_RT_SVC_DESC	(1 << RT_SVC_SIZE_LOG2)
-#define RT_SVC_DESC_INIT	16
-#define RT_SVC_DESC_HANDLE	24
-
-/*
- * The function identifier has 6 bits for the owning entity number and
- * single bit for the type of smc call. When taken together these
- * values limit the maximum number of runtime services to 128.
- */
-#define MAX_RT_SVCS		128
-
-#ifndef __ASSEMBLY__
-
-#include <cassert.h>
-#include <context.h>
-#include <stdint.h>
-
-/* Various flags passed to SMC handlers */
-#define SMC_FROM_SECURE		(0 << 0)
-#define SMC_FROM_NON_SECURE	(1 << 0)
-
-#define is_caller_non_secure(_f)	(!!(_f & SMC_FROM_NON_SECURE))
-#define is_caller_secure(_f)		(!(is_caller_non_secure(_f)))
-
-/* Prototype for runtime service initializing function */
-typedef int32_t (*rt_svc_init_t)(void);
-
-/* Convenience macros to return from SMC handler */
-#define SMC_RET0(_h)	{ \
-	return (uint64_t) (_h);		\
-}
-#define SMC_RET1(_h, _x0)	{ \
-	write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X0, (_x0)); \
-	SMC_RET0(_h);						\
-}
-#define SMC_RET2(_h, _x0, _x1)	{ \
-	write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X1, (_x1)); \
-	SMC_RET1(_h, (_x0)); \
-}
-#define SMC_RET3(_h, _x0, _x1, _x2)	{ \
-	write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X2, (_x2)); \
-	SMC_RET2(_h, (_x0), (_x1)); \
-}
-#define SMC_RET4(_h, _x0, _x1, _x2, _x3)	{ \
-	write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X3, (_x3)); \
-	SMC_RET3(_h, (_x0), (_x1), (_x2)); \
-}
-
-
-/*
- * Convenience macros to access general purpose registers using handle provided
- * to SMC handler. These takes the offset values defined in context.h
- */
-#define SMC_GET_GP(_h, _g) \
-	read_ctx_reg(get_gpregs_ctx(_h), (_g));
-#define SMC_SET_GP(_h, _g, _v) \
-	write_ctx_reg(get_gpregs_ctx(_h), (_g), (_v));
-
-/*
- * Convenience macros to access EL3 context registers using handle provided to
- * SMC handler. These takes the offset values defined in context.h
- */
-#define SMC_GET_EL3(_h, _e) \
-	read_ctx_reg(get_el3state_ctx(_h), (_e));
-#define SMC_SET_EL3(_h, _e, _v) \
-	write_ctx_reg(get_el3state_ctx(_h), (_e), (_v));
-
-/* The macro below is used to identify a Standard Service SMC call */
-#define is_std_svc_call(_fid)		((((_fid) >> FUNCID_OEN_SHIFT) & \
-					   FUNCID_OEN_MASK) == OEN_STD_START)
-
-/* The macro below is used to identify a valid Fast SMC call */
-#define is_valid_fast_smc(_fid)		((!(((_fid) >> 16) & 0xff)) && \
-					   (GET_SMC_TYPE(_fid) == SMC_TYPE_FAST))
-
-/*
- * Prototype for runtime service SMC handler function. x0 (SMC Function ID) to
- * x4 are as passed by the caller. Rest of the arguments to SMC and the context
- * can be accessed using the handle pointer. The cookie parameter is reserved
- * for future use
- */
-typedef uint64_t (*rt_svc_handle_t)(uint32_t smc_fid,
-				  uint64_t x1,
-				  uint64_t x2,
-				  uint64_t x3,
-				  uint64_t x4,
-				  void *cookie,
-				  void *handle,
-				  uint64_t flags);
-typedef struct rt_svc_desc {
-	uint8_t start_oen;
-	uint8_t end_oen;
-	uint8_t call_type;
-	const char *name;
-	rt_svc_init_t init;
-	rt_svc_handle_t handle;
-} rt_svc_desc_t;
-
-/*
- * Convenience macro to declare a service descriptor
- */
-#define DECLARE_RT_SVC(_name, _start, _end, _type, _setup, _smch) \
-	static const rt_svc_desc_t __svc_desc_ ## _name \
-		__attribute__ ((section("rt_svc_descs"), used)) = { \
-			_start, \
-			_end, \
-			_type, \
-			#_name, \
-			_setup, \
-			_smch }
-
-/*
- * Compile time assertions related to the 'rt_svc_desc' structure to:
- * 1. ensure that the assembler and the compiler view of the size
- *    of the structure are the same.
- * 2. ensure that the assembler and the compiler see the initialisation
- *    routine at the same offset.
- * 3. ensure that the assembler and the compiler see the handler
- *    routine at the same offset.
- */
-CASSERT((sizeof(rt_svc_desc_t) == SIZEOF_RT_SVC_DESC), \
-	assert_sizeof_rt_svc_desc_mismatch);
-CASSERT(RT_SVC_DESC_INIT == __builtin_offsetof(rt_svc_desc_t, init), \
-	assert_rt_svc_desc_init_offset_mismatch);
-CASSERT(RT_SVC_DESC_HANDLE == __builtin_offsetof(rt_svc_desc_t, handle), \
-	assert_rt_svc_desc_handle_offset_mismatch);
-
-
-/*
- * This macro combines the call type and the owning entity number corresponding
- * to a runtime service to generate a unique owning entity number. This unique
- * oen is used to access an entry in the 'rt_svc_descs_indices' array. The entry
- * contains the index of the service descriptor in the 'rt_svc_descs' array.
- */
-#define get_unique_oen(oen, call_type)	((oen & FUNCID_OEN_MASK) |	\
-					((call_type & FUNCID_TYPE_MASK) \
-					 << FUNCID_OEN_WIDTH))
-
-
-/*
- * Macro to define UUID for services. Apart from defining and initializing a
- * uuid_t structure, this macro verifies that the first word of the defined UUID
- * does not equal SMC_UNK. This is to ensure that the caller won't mistake the
- * returned UUID in x0 for an invalid SMC error return
- */
-#define DEFINE_SVC_UUID(_name, _tl, _tm, _th, _cl, _ch, \
-		_n0, _n1, _n2, _n3, _n4, _n5) \
-	CASSERT(_tl != SMC_UNK, invalid_svc_uuid);\
-	static const uuid_t _name = { \
-		_tl, _tm, _th, _cl, _ch, \
-		{ _n0, _n1, _n2, _n3, _n4, _n5 } \
-	}
-
-/* Return a UUID in the SMC return registers */
-#define SMC_UUID_RET(_h, _uuid) \
-	SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \
-			 ((const uint32_t *) &(_uuid))[1], \
-			 ((const uint32_t *) &(_uuid))[2], \
-			 ((const uint32_t *) &(_uuid))[3])
-
-/*******************************************************************************
- * Function & variable prototypes
- ******************************************************************************/
-void runtime_svc_init(void);
-extern uint64_t __RT_SVC_DESCS_START__;
-extern uint64_t __RT_SVC_DESCS_END__;
-void init_crash_reporting(void);
-
-#endif /*__ASSEMBLY__*/
-#endif /* __RUNTIME_SVC_H__ */
diff --git a/include/bl31/services/psci.h b/include/bl31/services/psci.h
deleted file mode 100644
index dd1891c..0000000
--- a/include/bl31/services/psci.h
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __PSCI_H__
-#define __PSCI_H__
-
-#include <bakery_lock.h>
-#include <platform_def.h>	/* for PLATFORM_NUM_AFFS */
-
-/*******************************************************************************
- * Number of affinity instances whose state this psci imp. can track
- ******************************************************************************/
-#ifdef PLATFORM_NUM_AFFS
-#define PSCI_NUM_AFFS		PLATFORM_NUM_AFFS
-#else
-#define PSCI_NUM_AFFS		(2 * PLATFORM_CORE_COUNT)
-#endif
-
-/*******************************************************************************
- * Defines for runtime services func ids
- ******************************************************************************/
-#define PSCI_VERSION			0x84000000
-#define PSCI_CPU_SUSPEND_AARCH32	0x84000001
-#define PSCI_CPU_SUSPEND_AARCH64	0xc4000001
-#define PSCI_CPU_OFF			0x84000002
-#define PSCI_CPU_ON_AARCH32		0x84000003
-#define PSCI_CPU_ON_AARCH64		0xc4000003
-#define PSCI_AFFINITY_INFO_AARCH32	0x84000004
-#define PSCI_AFFINITY_INFO_AARCH64	0xc4000004
-#define PSCI_MIG_AARCH32		0x84000005
-#define PSCI_MIG_AARCH64		0xc4000005
-#define PSCI_MIG_INFO_TYPE		0x84000006
-#define PSCI_MIG_INFO_UP_CPU_AARCH32	0x84000007
-#define PSCI_MIG_INFO_UP_CPU_AARCH64	0xc4000007
-#define PSCI_SYSTEM_OFF			0x84000008
-#define PSCI_SYSTEM_RESET		0x84000009
-#define PSCI_FEATURES			0x8400000A
-#define PSCI_SYSTEM_SUSPEND_AARCH32	0x8400000E
-#define PSCI_SYSTEM_SUSPEND_AARCH64	0xc400000E
-
-/* Macro to help build the psci capabilities bitfield */
-#define define_psci_cap(x)		(1 << (x & 0x1f))
-
-/*
- * Number of PSCI calls (above) implemented
- */
-#define PSCI_NUM_CALLS			18
-
-/*******************************************************************************
- * PSCI Migrate and friends
- ******************************************************************************/
-#define PSCI_TOS_UP_MIG_CAP	0
-#define PSCI_TOS_NOT_UP_MIG_CAP	1
-#define PSCI_TOS_NOT_PRESENT_MP	2
-
-/*******************************************************************************
- * PSCI CPU_SUSPEND 'power_state' parameter specific defines
- ******************************************************************************/
-#define PSTATE_ID_SHIFT		0
-#define PSTATE_TYPE_SHIFT	16
-#define PSTATE_AFF_LVL_SHIFT	24
-
-#define PSTATE_ID_MASK		0xffff
-#define PSTATE_TYPE_MASK	0x1
-#define PSTATE_AFF_LVL_MASK	0x3
-#define PSTATE_VALID_MASK     0xFCFE0000
-
-#define PSTATE_TYPE_STANDBY	0x0
-#define PSTATE_TYPE_POWERDOWN	0x1
-
-#define psci_get_pstate_id(pstate)	(((pstate) >> PSTATE_ID_SHIFT) & \
-					PSTATE_ID_MASK)
-#define psci_get_pstate_type(pstate)	(((pstate) >> PSTATE_TYPE_SHIFT) & \
-					PSTATE_TYPE_MASK)
-#define psci_get_pstate_afflvl(pstate)	(((pstate) >> PSTATE_AFF_LVL_SHIFT) & \
-					PSTATE_AFF_LVL_MASK)
-#define psci_make_powerstate(state_id, type, afflvl) \
-			(((state_id) & PSTATE_ID_MASK) << PSTATE_ID_SHIFT) |\
-			(((type) & PSTATE_TYPE_MASK) << PSTATE_TYPE_SHIFT) |\
-			(((afflvl) & PSTATE_AFF_LVL_MASK) << PSTATE_AFF_LVL_SHIFT)
-
-/*******************************************************************************
- * PSCI CPU_FEATURES feature flag specific defines
- ******************************************************************************/
-/* Features flags for CPU SUSPEND power state parameter format. Bits [1:1] */
-#define FF_PSTATE_SHIFT		1
-#define FF_PSTATE_ORIG		0
-#define FF_PSTATE_EXTENDED	1
-
-/* Features flags for CPU SUSPEND OS Initiated mode support. Bits [0:0] */
-#define FF_MODE_SUPPORT_SHIFT		0
-#define FF_SUPPORTS_OS_INIT_MODE	1
-
-/*******************************************************************************
- * PSCI version
- ******************************************************************************/
-#define PSCI_MAJOR_VER		(1 << 16)
-#define PSCI_MINOR_VER		0x0
-
-/*******************************************************************************
- * PSCI error codes
- ******************************************************************************/
-#define PSCI_E_SUCCESS		0
-#define PSCI_E_NOT_SUPPORTED	-1
-#define PSCI_E_INVALID_PARAMS	-2
-#define PSCI_E_DENIED		-3
-#define PSCI_E_ALREADY_ON	-4
-#define PSCI_E_ON_PENDING	-5
-#define PSCI_E_INTERN_FAIL	-6
-#define PSCI_E_NOT_PRESENT	-7
-#define PSCI_E_DISABLED		-8
-
-/*******************************************************************************
- * PSCI affinity state related constants. An affinity instance could be present
- * or absent physically to cater for asymmetric topologies. If present then it
- * could in one of the 4 further defined states.
- ******************************************************************************/
-#define PSCI_STATE_SHIFT	1
-#define PSCI_STATE_MASK		0xff
-
-#define PSCI_AFF_ABSENT		0x0
-#define PSCI_AFF_PRESENT	0x1
-#define PSCI_STATE_ON		0x0
-#define PSCI_STATE_OFF		0x1
-#define PSCI_STATE_ON_PENDING	0x2
-#define PSCI_STATE_SUSPEND	0x3
-
-#define PSCI_INVALID_DATA -1
-
-#define get_phys_state(x)	(x != PSCI_STATE_ON ? \
-				 PSCI_STATE_OFF : PSCI_STATE_ON)
-
-#define psci_validate_power_state(pstate) (pstate & PSTATE_VALID_MASK)
-
-
-#ifndef __ASSEMBLY__
-
-#include <stdint.h>
-
-/*******************************************************************************
- * Structure used to store per-cpu information relevant to the PSCI service.
- * It is populated in the per-cpu data array. In return we get a guarantee that
- * this information will not reside on a cache line shared with another cpu.
- ******************************************************************************/
-typedef struct psci_cpu_data {
-	uint32_t power_state;
-	uint32_t max_phys_off_afflvl;	/* Highest affinity level in physically
-					   powered off state */
-#if !USE_COHERENT_MEM
-	bakery_info_t pcpu_bakery_info[PSCI_NUM_AFFS];
-#endif
-} psci_cpu_data_t;
-
-/*******************************************************************************
- * Structure populated by platform specific code to export routines which
- * perform common low level pm functions
- ******************************************************************************/
-typedef struct plat_pm_ops {
-	void (*affinst_standby)(unsigned int power_state);
-	int (*affinst_on)(unsigned long mpidr,
-			  unsigned long sec_entrypoint,
-			  unsigned int afflvl,
-			  unsigned int state);
-	void (*affinst_off)(unsigned int afflvl, unsigned int state);
-	void (*affinst_suspend)(unsigned long sec_entrypoint,
-			       unsigned int afflvl,
-			       unsigned int state);
-	void (*affinst_on_finish)(unsigned int afflvl, unsigned int state);
-	void (*affinst_suspend_finish)(unsigned int afflvl,
-				      unsigned int state);
-	void (*system_off)(void) __dead2;
-	void (*system_reset)(void) __dead2;
-	int (*validate_power_state)(unsigned int power_state);
-	int (*validate_ns_entrypoint)(unsigned long ns_entrypoint);
-	unsigned int (*get_sys_suspend_power_state)(void);
-} plat_pm_ops_t;
-
-/*******************************************************************************
- * Optional structure populated by the Secure Payload Dispatcher to be given a
- * chance to perform any bookkeeping before PSCI executes a power mgmt.
- * operation. It also allows PSCI to determine certain properties of the SP e.g.
- * migrate capability etc.
- ******************************************************************************/
-typedef struct spd_pm_ops {
-	void (*svc_on)(uint64_t target_cpu);
-	int32_t (*svc_off)(uint64_t __unused);
-	void (*svc_suspend)(uint64_t __unused);
-	void (*svc_on_finish)(uint64_t __unused);
-	void (*svc_suspend_finish)(uint64_t suspend_level);
-	int32_t (*svc_migrate)(uint64_t from_cpu, uint64_t to_cpu);
-	int32_t (*svc_migrate_info)(uint64_t *resident_cpu);
-	void (*svc_system_off)(void);
-	void (*svc_system_reset)(void);
-} spd_pm_ops_t;
-
-/*******************************************************************************
- * Function & Data prototypes
- ******************************************************************************/
-unsigned int psci_version(void);
-int psci_affinity_info(unsigned long, unsigned int);
-int psci_migrate(unsigned long);
-int psci_migrate_info_type(void);
-long psci_migrate_info_up_cpu(void);
-int psci_cpu_on(unsigned long,
-		unsigned long,
-		unsigned long);
-void __dead2 psci_power_down_wfi(void);
-void psci_aff_on_finish_entry(void);
-void psci_aff_suspend_finish_entry(void);
-void psci_register_spd_pm_hook(const spd_pm_ops_t *);
-int psci_get_suspend_stateid_by_mpidr(unsigned long);
-int psci_get_suspend_stateid(void);
-int psci_get_suspend_afflvl(void);
-uint32_t psci_get_max_phys_off_afflvl(void);
-
-uint64_t psci_smc_handler(uint32_t smc_fid,
-			  uint64_t x1,
-			  uint64_t x2,
-			  uint64_t x3,
-			  uint64_t x4,
-			  void *cookie,
-			  void *handle,
-			  uint64_t flags);
-
-/* PSCI setup function */
-int32_t psci_setup(void);
-
-
-#endif /*__ASSEMBLY__*/
-
-
-#endif /* __PSCI_H__ */
diff --git a/include/bl31/services/std_svc.h b/include/bl31/services/std_svc.h
deleted file mode 100644
index cbd5b62..0000000
--- a/include/bl31/services/std_svc.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __STD_SVC_H__
-#define __STD_SVC_H__
-
-/* SMC function IDs for Standard Service queries */
-
-#define ARM_STD_SVC_CALL_COUNT		0x8400ff00
-#define ARM_STD_SVC_UID			0x8400ff01
-/*					0x8400ff02 is reserved */
-#define ARM_STD_SVC_VERSION		0x8400ff03
-
-/* ARM Standard Service Calls version numbers */
-#define STD_SVC_VERSION_MAJOR		0x0
-#define STD_SVC_VERSION_MINOR		0x1
-
-/* The macros below are used to identify PSCI calls from the SMC function ID */
-#define PSCI_FID_MASK			0xffe0u
-#define PSCI_FID_VALUE			0u
-#define is_psci_fid(_fid) \
-	(((_fid) & PSCI_FID_MASK) == PSCI_FID_VALUE)
-
-#endif /* __STD_SVC_H__ */
diff --git a/include/bl32/payloads/tlk.h b/include/bl32/payloads/tlk.h
new file mode 100644
index 0000000..4e06bcd
--- /dev/null
+++ b/include/bl32/payloads/tlk.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TLK_H__
+#define __TLK_H__
+
+#include <utils_def.h>
+
+/*
+ * Generate function IDs for the Trusted OS/Apps
+ */
+#define TLK_TOS_YIELD_FID(fid)	((fid) | 0x72000000 | (0 << 31))
+#define TLK_TA_YIELD_FID(fid)	((fid) | 0x70000000 | (0 << 31))
+
+/*
+ * Trusted OS specific function IDs
+ */
+#define TLK_REGISTER_LOGBUF	TLK_TOS_YIELD_FID(0x1)
+#define TLK_REGISTER_REQBUF	TLK_TOS_YIELD_FID(0x2)
+#define TLK_RESUME_FID		TLK_TOS_YIELD_FID(0x100)
+#define TLK_SYSTEM_SUSPEND	TLK_TOS_YIELD_FID(0xE001)
+#define TLK_SYSTEM_RESUME	TLK_TOS_YIELD_FID(0xE002)
+#define TLK_SYSTEM_OFF		TLK_TOS_YIELD_FID(0xE003)
+
+/*
+ * SMC function IDs that TLK uses to signal various forms of completions
+ * to the secure payload dispatcher.
+ */
+#define TLK_REQUEST_DONE	(0x32000001 | (ULL(1) << 31))
+#define TLK_PREEMPTED		(0x32000002 | (ULL(1) << 31))
+#define TLK_ENTRY_DONE		(0x32000003 | (ULL(1) << 31))
+#define TLK_VA_TRANSLATE	(0x32000004 | (ULL(1) << 31))
+#define TLK_SUSPEND_DONE	(0x32000005 | (ULL(1) << 31))
+#define TLK_RESUME_DONE		(0x32000006 | (ULL(1) << 31))
+#define TLK_SYSTEM_OFF_DONE	(0x32000007 | (ULL(1) << 31))
+
+/*
+ * Trusted Application specific function IDs
+ */
+#define TLK_OPEN_TA_SESSION	TLK_TA_YIELD_FID(0x1)
+#define TLK_CLOSE_TA_SESSION	TLK_TA_YIELD_FID(0x2)
+#define TLK_TA_LAUNCH_OP	TLK_TA_YIELD_FID(0x3)
+#define TLK_TA_SEND_EVENT	TLK_TA_YIELD_FID(0x4)
+
+/*
+ * Total number of function IDs implemented for services offered to NS clients.
+ */
+#define TLK_NUM_FID		7
+
+/* TLK implementation version numbers */
+#define TLK_VERSION_MAJOR	0x0 /* Major version */
+#define TLK_VERSION_MINOR	0x1 /* Minor version */
+
+/*
+ * Standard Trusted OS Function IDs that fall under Trusted OS call range
+ * according to SMC calling convention
+ */
+#define TOS_CALL_COUNT		0xbf00ff00 /* Number of calls implemented */
+#define TOS_UID			0xbf00ff01 /* Implementation UID */
+#define TOS_CALL_VERSION	0xbf00ff03 /* Trusted OS Call Version */
+
+#endif /* __TLK_H__ */
diff --git a/include/bl32/sp_min/platform_sp_min.h b/include/bl32/sp_min/platform_sp_min.h
new file mode 100644
index 0000000..6c7e0cc
--- /dev/null
+++ b/include/bl32/sp_min/platform_sp_min.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_SP_MIN_H__
+#define __PLATFORM_SP_MIN_H__
+
+/*******************************************************************************
+ * Mandatory SP_MIN functions
+ ******************************************************************************/
+void sp_min_early_platform_setup(void *from_bl2,
+		void *plat_params_from_bl2);
+void sp_min_platform_setup(void);
+void sp_min_plat_runtime_setup(void);
+void sp_min_plat_arch_setup(void);
+entry_point_info_t *sp_min_plat_get_bl33_ep_info(void);
+
+/* Platforms that enable SP_MIN_WITH_SECURE_FIQ shall implement this api */
+void sp_min_plat_fiq_handler(uint32_t id);
+
+#endif /* __PLATFORM_SP_MIN_H__ */
diff --git a/include/bl32/tsp/platform_tsp.h b/include/bl32/tsp/platform_tsp.h
index f6f7391..a5a0223 100644
--- a/include/bl32/tsp/platform_tsp.h
+++ b/include/bl32/tsp/platform_tsp.h
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __PLATFORM_TSP_H__
diff --git a/include/bl32/tsp/tsp.h b/include/bl32/tsp/tsp.h
index c6578b7..43bba8f 100644
--- a/include/bl32/tsp/tsp.h
+++ b/include/bl32/tsp/tsp.h
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __TSP_H__
@@ -41,39 +17,30 @@
 #define TSP_SUSPEND_DONE	0xf2000003
 #define TSP_RESUME_DONE		0xf2000004
 #define TSP_PREEMPTED		0xf2000005
+#define TSP_ABORT_DONE		0xf2000007
 #define TSP_SYSTEM_OFF_DONE	0xf2000008
 #define TSP_SYSTEM_RESET_DONE	0xf2000009
 
 /*
- * Function identifiers to handle FIQs through the synchronous handling model.
- * If the TSP was previously interrupted then control has to be returned to
- * the TSPD after handling the interrupt else execution can remain in the TSP.
+ * Function identifiers to handle S-EL1 interrupt through the synchronous
+ * handling model. If the TSP was previously interrupted then control has to
+ * be returned to the TSPD after handling the interrupt else execution can
+ * remain in the TSP.
  */
-#define TSP_HANDLED_S_EL1_FIQ		0xf2000006
-#define TSP_EL3_FIQ			0xf2000007
+#define TSP_HANDLED_S_EL1_INTR		0xf2000006
 
 /* SMC function ID that TSP uses to request service from secure monitor */
 #define TSP_GET_ARGS		0xf2001000
 
 /*
  * Identifiers for various TSP services. Corresponding function IDs (whether
- * fast or standard) are generated by macros defined below
+ * fast or yielding) are generated by macros defined below
  */
 #define TSP_ADD		0x2000
 #define TSP_SUB		0x2001
 #define TSP_MUL		0x2002
 #define TSP_DIV		0x2003
-#define TSP_HANDLE_FIQ_AND_RETURN	0x2004
-
-/*
- * Generate function IDs for TSP services to be used in SMC calls, by
- * appropriately setting bit 31 to differentiate standard and fast SMC calls
- */
-#define TSP_STD_FID(fid)	((fid) | 0x72000000 | (0 << 31))
-#define TSP_FAST_FID(fid)	((fid) | 0x72000000 | (1 << 31))
-
-/* SMC function ID to request a previously preempted std smc */
-#define TSP_FID_RESUME		TSP_STD_FID(0x3000)
+#define TSP_HANDLE_SEL1_INTR_AND_RETURN	0x2004
 
 /*
  * Identify a TSP service from function ID filtering the last 16 bits from the
@@ -82,10 +49,26 @@
 #define TSP_BARE_FID(fid)	((fid) & 0xffff)
 
 /*
+ * Generate function IDs for TSP services to be used in SMC calls, by
+ * appropriately setting bit 31 to differentiate yielding and fast SMC calls
+ */
+#define TSP_YIELD_FID(fid)	((TSP_BARE_FID(fid) | 0x72000000))
+#define TSP_FAST_FID(fid)	((TSP_BARE_FID(fid) | 0x72000000) | (1u << 31))
+
+/* SMC function ID to request a previously preempted yielding smc */
+#define TSP_FID_RESUME		TSP_YIELD_FID(0x3000)
+/*
+ * SMC function ID to request abortion of a previously preempted yielding SMC. A
+ * fast SMC is used so that the TSP abort handler does not have to be
+ * reentrant.
+ */
+#define TSP_FID_ABORT		TSP_FAST_FID(0x3001)
+
+/*
  * Total number of function IDs implemented for services offered to NS clients.
  * The function IDs are defined above
  */
-#define TSP_NUM_FID		0x4
+#define TSP_NUM_FID		0x5
 
 /* TSP implementation version numbers */
 #define TSP_VERSION_MAJOR	0x0 /* Major version */
@@ -109,15 +92,16 @@
 typedef uint32_t tsp_vector_isn_t;
 
 typedef struct tsp_vectors {
-	tsp_vector_isn_t std_smc_entry;
+	tsp_vector_isn_t yield_smc_entry;
 	tsp_vector_isn_t fast_smc_entry;
 	tsp_vector_isn_t cpu_on_entry;
 	tsp_vector_isn_t cpu_off_entry;
 	tsp_vector_isn_t cpu_resume_entry;
 	tsp_vector_isn_t cpu_suspend_entry;
-	tsp_vector_isn_t fiq_entry;
+	tsp_vector_isn_t sel1_intr_entry;
 	tsp_vector_isn_t system_off_entry;
 	tsp_vector_isn_t system_reset_entry;
+	tsp_vector_isn_t abort_yield_smc_entry;
 } tsp_vectors_t;
 
 
diff --git a/include/common/aarch32/asm_macros.S b/include/common/aarch32/asm_macros.S
new file mode 100644
index 0000000..f573744
--- /dev/null
+++ b/include/common/aarch32/asm_macros.S
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __ASM_MACROS_S__
+#define __ASM_MACROS_S__
+
+#include <arch.h>
+#include <asm_macros_common.S>
+#include <spinlock.h>
+
+#define WORD_SIZE	4
+
+	/*
+	 * Co processor register accessors
+	 */
+	.macro ldcopr reg, coproc, opc1, CRn, CRm, opc2
+	mrc	\coproc, \opc1, \reg, \CRn, \CRm, \opc2
+	.endm
+
+	.macro ldcopr16 reg1, reg2, coproc, opc1, CRm
+	mrrc	\coproc, \opc1, \reg1, \reg2, \CRm
+	.endm
+
+	.macro stcopr reg, coproc, opc1, CRn, CRm, opc2
+	mcr	\coproc, \opc1, \reg, \CRn, \CRm, \opc2
+	.endm
+
+	.macro stcopr16 reg1, reg2, coproc, opc1, CRm
+	mcrr	\coproc, \opc1, \reg1, \reg2, \CRm
+	.endm
+
+	/* Cache line size helpers */
+	.macro	dcache_line_size  reg, tmp
+	ldcopr	\tmp, CTR
+	ubfx	\tmp, \tmp, #CTR_DMINLINE_SHIFT, #CTR_DMINLINE_WIDTH
+	mov	\reg, #WORD_SIZE
+	lsl	\reg, \reg, \tmp
+	.endm
+
+	.macro	icache_line_size  reg, tmp
+	ldcopr	\tmp, CTR
+	and	\tmp, \tmp, #CTR_IMINLINE_MASK
+	mov	\reg, #WORD_SIZE
+	lsl	\reg, \reg, \tmp
+	.endm
+
+	/*
+	 * Declare the exception vector table, enforcing it is aligned on a
+	 * 32 byte boundary.
+	 */
+	.macro vector_base  label
+	.section .vectors, "ax"
+	.align 5
+	\label:
+	.endm
+
+	/*
+	 * This macro calculates the base address of the current CPU's multi
+	 * processor(MP) stack using the plat_my_core_pos() index, the name of
+	 * the stack storage and the size of each stack.
+	 * Out: r0 = physical address of stack base
+	 * Clobber: r14, r1, r2
+	 */
+	.macro get_my_mp_stack _name, _size
+	bl  plat_my_core_pos
+	ldr r2, =(\_name + \_size)
+	mov r1, #\_size
+	mla r0, r0, r1, r2
+	.endm
+
+	/*
+	 * This macro calculates the base address of a uniprocessor(UP) stack
+	 * using the name of the stack storage and the size of the stack
+	 * Out: r0 = physical address of stack base
+	 */
+	.macro get_up_stack _name, _size
+	ldr r0, =(\_name + \_size)
+	.endm
+
+	/*
+	 * Helper macro to generate the best mov/movw/movt combinations
+	 * according to the value to be moved.
+	 */
+	.macro mov_imm _reg, _val
+		.if ((\_val) & 0xffff0000) == 0
+			mov	\_reg, #(\_val)
+		.else
+			movw	\_reg, #((\_val) & 0xffff)
+			movt	\_reg, #((\_val) >> 16)
+		.endif
+	.endm
+
+	/*
+	 * Macro to mark instances where we're jumping to a function and don't
+	 * expect a return. To provide the function being jumped to with
+	 * additional information, we use 'bl' instruction to jump rather than
+	 * 'b'.
+         *
+	 * Debuggers infer the location of a call from where LR points to, which
+	 * is usually the instruction after 'bl'. If this macro expansion
+	 * happens to be the last location in a function, that'll cause the LR
+	 * to point a location beyond the function, thereby misleading debugger
+	 * back trace. We therefore insert a 'nop' after the function call for
+	 * debug builds, unless 'skip_nop' parameter is non-zero.
+	 */
+	.macro no_ret _func:req, skip_nop=0
+	bl	\_func
+#if DEBUG
+	.ifeq \skip_nop
+	nop
+	.endif
+#endif
+	.endm
+
+	/*
+	 * Reserve space for a spin lock in assembly file.
+	 */
+	.macro define_asm_spinlock _name:req
+	.align	SPINLOCK_ASM_ALIGN
+	\_name:
+	.space	SPINLOCK_ASM_SIZE
+	.endm
+
+	/*
+	 * Helper macro to OR the bottom 32 bits of `_val` into `_reg_l`
+	 * and the top 32 bits of `_val` into `_reg_h`.  If either the bottom
+	 * or top word of `_val` is zero, the corresponding OR operation
+	 * is skipped.
+	 */
+	.macro orr64_imm _reg_l, _reg_h, _val
+		.if (\_val >> 32)
+			orr \_reg_h, \_reg_h, #(\_val >> 32)
+		.endif
+		.if (\_val & 0xffffffff)
+			orr \_reg_l, \_reg_l, #(\_val & 0xffffffff)
+		.endif
+	.endm
+
+	/*
+	 * Helper macro to bitwise-clear bits in `_reg_l` and
+	 * `_reg_h` given a 64 bit immediate `_val`.  The set bits
+	 * in the bottom word of `_val` dictate which bits from
+	 * `_reg_l` should be cleared.  Similarly, the set bits in
+	 * the top word of `_val` dictate which bits from `_reg_h`
+	 * should be cleared.  If either the bottom or top word of
+	 * `_val` is zero, the corresponding BIC operation is skipped.
+	 */
+	.macro bic64_imm _reg_l, _reg_h, _val
+		.if (\_val >> 32)
+			bic \_reg_h, \_reg_h, #(\_val >> 32)
+		.endif
+		.if (\_val & 0xffffffff)
+			bic \_reg_l, \_reg_l, #(\_val & 0xffffffff)
+		.endif
+	.endm
+
+#endif /* __ASM_MACROS_S__ */
diff --git a/include/common/aarch32/assert_macros.S b/include/common/aarch32/assert_macros.S
new file mode 100644
index 0000000..c958810
--- /dev/null
+++ b/include/common/aarch32/assert_macros.S
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __ASSERT_MACROS_S__
+#define __ASSERT_MACROS_S__
+
+	/*
+	 * Assembler macro to enable asm_assert. We assume that the stack is
+	 * initialized prior to invoking this macro.
+	 */
+#define ASM_ASSERT(_cc) \
+.ifndef .L_assert_filename ;\
+	.pushsection .rodata.str1.1, "aS" ;\
+	.L_assert_filename: ;\
+			.string	__FILE__ ;\
+	.popsection ;\
+.endif ;\
+	b##_cc	300f ;\
+	ldr	r0, =.L_assert_filename ;\
+	ldr	r1, =__LINE__ ;\
+	b	asm_assert;\
+300:
+
+#endif /* __ASSERT_MACROS_S__ */
diff --git a/include/common/aarch32/el3_common_macros.S b/include/common/aarch32/el3_common_macros.S
new file mode 100644
index 0000000..6fc00dd
--- /dev/null
+++ b/include/common/aarch32/el3_common_macros.S
@@ -0,0 +1,314 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __EL3_COMMON_MACROS_S__
+#define __EL3_COMMON_MACROS_S__
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+
+	/*
+	 * Helper macro to initialise EL3 registers we care about.
+	 */
+	.macro el3_arch_init_common _exception_vectors
+	/* ---------------------------------------------------------------------
+	 * SCTLR has already been initialised - read current value before
+	 * modifying.
+	 *
+	 * SCTLR.I: Enable the instruction cache.
+	 *
+	 * SCTLR.A: Enable Alignment fault checking. All instructions that load
+	 *  or store one or more registers have an alignment check that the
+	 *  address being accessed is aligned to the size of the data element(s)
+	 *  being accessed.
+	 * ---------------------------------------------------------------------
+	 */
+	ldr	r1, =(SCTLR_I_BIT | SCTLR_A_BIT)
+	ldcopr	r0, SCTLR
+	orr	r0, r0, r1
+	stcopr	r0, SCTLR
+	isb
+
+	/* ---------------------------------------------------------------------
+	 * Set the exception vectors (VBAR/MVBAR).
+	 * ---------------------------------------------------------------------
+	 */
+	ldr	r0, =\_exception_vectors
+	stcopr	r0, VBAR
+	stcopr	r0, MVBAR
+	isb
+
+	/* ---------------------------------------------------------------------
+	 * Initialise SCR, setting all fields rather than relying on the hw.
+	 *
+	 * SCR.SIF: Enabled so that Secure state instruction fetches from
+	 *  Non-secure memory are not permitted.
+	 * ---------------------------------------------------------------------
+	 */
+	ldr	r0, =(SCR_RESET_VAL | SCR_SIF_BIT)
+	stcopr	r0, SCR
+
+	/* -----------------------------------------------------
+	 * Enable the Asynchronous data abort now that the
+	 * exception vectors have been setup.
+	 * -----------------------------------------------------
+	 */
+	cpsie   a
+	isb
+
+	/* ---------------------------------------------------------------------
+	 * Initialise NSACR, setting all the fields, except for the
+	 * IMPLEMENTATION DEFINED field, rather than relying on the hw. Some
+	 * fields are architecturally UNKNOWN on reset.
+	 *
+	 * NSACR_ENABLE_FP_ACCESS: Represents NSACR.cp11 and NSACR.cp10. The
+	 *  cp11 field is ignored, but is set to same value as cp10. The cp10
+	 *  field is set to allow access to Advanced SIMD and floating point
+	 *  features from both Security states.
+	 * ---------------------------------------------------------------------
+	 */
+	ldcopr	r0, NSACR
+	and	r0, r0, #NSACR_IMP_DEF_MASK
+	orr	r0, r0, #(NSACR_RESET_VAL | NSACR_ENABLE_FP_ACCESS)
+	stcopr	r0, NSACR
+	isb
+
+	/* ---------------------------------------------------------------------
+	 * Initialise CPACR, setting all fields rather than relying on hw. Some
+	 * fields are architecturally UNKNOWN on reset.
+	 *
+	 * CPACR.TRCDIS: Trap control for PL0 and PL1 System register accesses
+	 *  to trace registers. Set to zero to allow access.
+	 *
+	 * CPACR_ENABLE_FP_ACCESS: Represents CPACR.cp11 and CPACR.cp10. The
+	 *  cp11 field is ignored, but is set to same value as cp10. The cp10
+	 *  field is set to allow full access from PL0 and PL1 to floating-point
+	 *  and Advanced SIMD features.
+	 * ---------------------------------------------------------------------
+	 */
+	ldr	r0, =((CPACR_RESET_VAL | CPACR_ENABLE_FP_ACCESS) & ~(TRCDIS_BIT))
+	stcopr	r0, CPACR
+	isb
+
+	/* ---------------------------------------------------------------------
+	 * Initialise FPEXC, setting all fields rather than relying on hw. Some
+	 * fields are architecturally UNKNOWN on reset and are set to zero
+	 * except for field(s) listed below.
+	 *
+	 * FPEXC.EN: Enable access to Advanced SIMD and floating point features
+	 *  from all exception levels.
+	 * ---------------------------------------------------------------------
+	 */
+	ldr	r0, =(FPEXC_RESET_VAL | FPEXC_EN_BIT)
+	vmsr	FPEXC, r0
+	isb
+
+	/* ---------------------------------------------------------------------
+	 * Initialise SDCR, setting all the fields rather than relying on hw.
+	 *
+	 * SDCR.SPD: Disable AArch32 privileged debug. Debug exceptions from
+	 * Secure EL1 are disabled.
+	 * ---------------------------------------------------------------------
+	 */
+	ldr	r0, =(SDCR_RESET_VAL | SDCR_SPD(SDCR_SPD_DISABLE))
+	stcopr	r0, SDCR
+
+	.endm
+
+/* -----------------------------------------------------------------------------
+ * This is the super set of actions that need to be performed during a cold boot
+ * or a warm boot in EL3. This code is shared by BL1 and BL32 (SP_MIN).
+ *
+ * This macro will always perform reset handling, architectural initialisations
+ * and stack setup. The rest of the actions are optional because they might not
+ * be needed, depending on the context in which this macro is called. This is
+ * why this macro is parameterised ; each parameter allows to enable/disable
+ * some actions.
+ *
+ *  _init_sctlr:
+ *	Whether the macro needs to initialise the SCTLR register including
+ *	configuring the endianness of data accesses.
+ *
+ *  _warm_boot_mailbox:
+ *	Whether the macro needs to detect the type of boot (cold/warm). The
+ *	detection is based on the platform entrypoint address : if it is zero
+ *	then it is a cold boot, otherwise it is a warm boot. In the latter case,
+ *	this macro jumps on the platform entrypoint address.
+ *
+ *  _secondary_cold_boot:
+ *	Whether the macro needs to identify the CPU that is calling it: primary
+ *	CPU or secondary CPU. The primary CPU will be allowed to carry on with
+ *	the platform initialisations, while the secondaries will be put in a
+ *	platform-specific state in the meantime.
+ *
+ *	If the caller knows this macro will only be called by the primary CPU
+ *	then this parameter can be defined to 0 to skip this step.
+ *
+ * _init_memory:
+ *	Whether the macro needs to initialise the memory.
+ *
+ * _init_c_runtime:
+ *	Whether the macro needs to initialise the C runtime environment.
+ *
+ * _exception_vectors:
+ *	Address of the exception vectors to program in the VBAR_EL3 register.
+ * -----------------------------------------------------------------------------
+ */
+	.macro el3_entrypoint_common					\
+		_init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,	\
+		_init_memory, _init_c_runtime, _exception_vectors
+
+	/* Make sure we are in Secure Mode */
+#if ENABLE_ASSERTIONS
+	ldcopr	r0, SCR
+	tst	r0, #SCR_NS_BIT
+	ASM_ASSERT(eq)
+#endif
+
+	.if \_init_sctlr
+		/* -------------------------------------------------------------
+		 * This is the initialisation of SCTLR and so must ensure that
+		 * all fields are explicitly set rather than relying on hw. Some
+		 * fields reset to an IMPLEMENTATION DEFINED value.
+		 *
+		 * SCTLR.TE: Set to zero so that exceptions to an Exception
+		 *  Level executing at PL1 are taken to A32 state.
+		 *
+		 * SCTLR.EE: Set the CPU endianness before doing anything that
+		 *  might involve memory reads or writes. Set to zero to select
+		 *  Little Endian.
+		 *
+		 * SCTLR.V: Set to zero to select the normal exception vectors
+		 *  with base address held in VBAR.
+		 * -------------------------------------------------------------
+		 */
+		ldr     r0, =(SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_EE_BIT | SCTLR_V_BIT))
+		stcopr	r0, SCTLR
+		isb
+	.endif /* _init_sctlr */
+
+	/* Switch to monitor mode */
+	cps	#MODE32_mon
+	isb
+
+	.if \_warm_boot_mailbox
+		/* -------------------------------------------------------------
+		 * This code will be executed for both warm and cold resets.
+		 * Now is the time to distinguish between the two.
+		 * Query the platform entrypoint address and if it is not zero
+		 * then it means it is a warm boot so jump to this address.
+		 * -------------------------------------------------------------
+		 */
+		bl	plat_get_my_entrypoint
+		cmp	r0, #0
+		bxne	r0
+	.endif /* _warm_boot_mailbox */
+
+	/* ---------------------------------------------------------------------
+	 * It is a cold boot.
+	 * Perform any processor specific actions upon reset e.g. cache, TLB
+	 * invalidations etc.
+	 * ---------------------------------------------------------------------
+	 */
+	bl	reset_handler
+
+	el3_arch_init_common \_exception_vectors
+
+	.if \_secondary_cold_boot
+		/* -------------------------------------------------------------
+		 * Check if this is a primary or secondary CPU cold boot.
+		 * The primary CPU will set up the platform while the
+		 * secondaries are placed in a platform-specific state until the
+		 * primary CPU performs the necessary actions to bring them out
+		 * of that state and allows entry into the OS.
+		 * -------------------------------------------------------------
+		 */
+		bl	plat_is_my_cpu_primary
+		cmp	r0, #0
+		bne	do_primary_cold_boot
+
+		/* This is a cold boot on a secondary CPU */
+		bl	plat_secondary_cold_boot_setup
+		/* plat_secondary_cold_boot_setup() is not supposed to return */
+		no_ret	plat_panic_handler
+
+	do_primary_cold_boot:
+	.endif /* _secondary_cold_boot */
+
+	/* ---------------------------------------------------------------------
+	 * Initialize memory now. Secondary CPU initialization won't get to this
+	 * point.
+	 * ---------------------------------------------------------------------
+	 */
+
+	.if \_init_memory
+		bl	platform_mem_init
+	.endif /* _init_memory */
+
+	/* ---------------------------------------------------------------------
+	 * Init C runtime environment:
+	 *   - Zero-initialise the NOBITS sections. There are 2 of them:
+	 *       - the .bss section;
+	 *       - the coherent memory section (if any).
+	 *   - Relocate the data section from ROM to RAM, if required.
+	 * ---------------------------------------------------------------------
+	 */
+	.if \_init_c_runtime
+#ifdef IMAGE_BL32
+		/* -----------------------------------------------------------------
+		 * Invalidate the RW memory used by the BL32 (SP_MIN) image. This
+		 * includes the data and NOBITS sections. This is done to
+		 * safeguard against possible corruption of this memory by
+		 * dirty cache lines in a system cache as a result of use by
+		 * an earlier boot loader stage.
+		 * -----------------------------------------------------------------
+		 */
+		ldr	r0, =__RW_START__
+		ldr	r1, =__RW_END__
+		sub	r1, r1, r0
+		bl	inv_dcache_range
+#endif /* IMAGE_BL32 */
+
+		ldr	r0, =__BSS_START__
+		ldr	r1, =__BSS_SIZE__
+		bl	zeromem
+
+#if USE_COHERENT_MEM
+		ldr	r0, =__COHERENT_RAM_START__
+		ldr	r1, =__COHERENT_RAM_UNALIGNED_SIZE__
+		bl	zeromem
+#endif
+
+#ifdef IMAGE_BL1
+		/* -----------------------------------------------------
+		 * Copy data from ROM to RAM.
+		 * -----------------------------------------------------
+		 */
+		ldr	r0, =__DATA_RAM_START__
+		ldr	r1, =__DATA_ROM_START__
+		ldr	r2, =__DATA_SIZE__
+		bl	memcpy4
+#endif
+	.endif /* _init_c_runtime */
+
+	/* ---------------------------------------------------------------------
+	 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
+	 * the MMU is enabled. There is no risk of reading stale stack memory
+	 * after enabling the MMU as only the primary CPU is running at the
+	 * moment.
+	 * ---------------------------------------------------------------------
+	 */
+	bl	plat_set_my_stack
+
+#if STACK_PROTECTOR_ENABLED
+	.if \_init_c_runtime
+	bl	update_stack_protector_canary
+	.endif /* _init_c_runtime */
+#endif
+	.endm
+
+#endif /* __EL3_COMMON_MACROS_S__ */
diff --git a/include/common/aarch64/asm_macros.S b/include/common/aarch64/asm_macros.S
new file mode 100644
index 0000000..6d6989c
--- /dev/null
+++ b/include/common/aarch64/asm_macros.S
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __ASM_MACROS_S__
+#define __ASM_MACROS_S__
+
+#include <arch.h>
+#include <asm_macros_common.S>
+#include <spinlock.h>
+
+
+	.macro	func_prologue
+	stp	x29, x30, [sp, #-0x10]!
+	mov	x29,sp
+	.endm
+
+	.macro	func_epilogue
+	ldp	x29, x30, [sp], #0x10
+	.endm
+
+
+	.macro	dcache_line_size  reg, tmp
+	mrs	\tmp, ctr_el0
+	ubfx	\tmp, \tmp, #16, #4
+	mov	\reg, #4
+	lsl	\reg, \reg, \tmp
+	.endm
+
+
+	.macro	icache_line_size  reg, tmp
+	mrs	\tmp, ctr_el0
+	and	\tmp, \tmp, #0xf
+	mov	\reg, #4
+	lsl	\reg, \reg, \tmp
+	.endm
+
+
+	.macro	smc_check  label
+	mrs	x0, esr_el3
+	ubfx	x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+	cmp	x0, #EC_AARCH64_SMC
+	b.ne	$label
+	.endm
+
+	/*
+	 * Declare the exception vector table, enforcing it is aligned on a
+	 * 2KB boundary, as required by the ARMv8 architecture.
+	 * Use zero bytes as the fill value to be stored in the padding bytes
+	 * so that it inserts illegal AArch64 instructions. This increases
+	 * security, robustness and potentially facilitates debugging.
+	 */
+	.macro vector_base  label
+	.section .vectors, "ax"
+	.align 11, 0
+	\label:
+	.endm
+
+	/*
+	 * Create an entry in the exception vector table, enforcing it is
+	 * aligned on a 128-byte boundary, as required by the ARMv8 architecture.
+	 * Use zero bytes as the fill value to be stored in the padding bytes
+	 * so that it inserts illegal AArch64 instructions. This increases
+	 * security, robustness and potentially facilitates debugging.
+	 */
+	.macro vector_entry  label
+	.cfi_sections .debug_frame
+	.section .vectors, "ax"
+	.align 7, 0
+	.type \label, %function
+	.func \label
+	.cfi_startproc
+	\label:
+	.endm
+
+	/*
+	 * This macro verifies that the given vector doesn't exceed the
+	 * architectural limit of 32 instructions. This is meant to be placed
+	 * immediately after the last instruction in the vector. It takes the
+	 * vector entry as the parameter
+	 */
+	.macro check_vector_size since
+	  .endfunc
+	  .cfi_endproc
+	  .if (. - \since) > (32 * 4)
+	    .error "Vector exceeds 32 instructions"
+	  .endif
+	.endm
+
+#if ENABLE_PLAT_COMPAT
+	/*
+	 * This macro calculates the base address of an MP stack using the
+	 * platform_get_core_pos() index, the name of the stack storage and
+	 * the size of each stack
+	 * In: X0 = MPIDR of CPU whose stack is wanted
+	 * Out: X0 = physical address of stack base
+	 * Clobber: X30, X1, X2
+	 */
+	.macro get_mp_stack _name, _size
+	bl  platform_get_core_pos
+	ldr x2, =(\_name + \_size)
+	mov x1, #\_size
+	madd x0, x0, x1, x2
+	.endm
+#endif
+
+	/*
+	 * This macro calculates the base address of the current CPU's MP stack
+	 * using the plat_my_core_pos() index, the name of the stack storage
+	 * and the size of each stack
+	 * Out: X0 = physical address of stack base
+	 * Clobber: X30, X1, X2
+	 */
+	.macro get_my_mp_stack _name, _size
+	bl  plat_my_core_pos
+	ldr x2, =(\_name + \_size)
+	mov x1, #\_size
+	madd x0, x0, x1, x2
+	.endm
+
+	/*
+	 * This macro calculates the base address of a UP stack using the
+	 * name of the stack storage and the size of the stack
+	 * Out: X0 = physical address of stack base
+	 */
+	.macro get_up_stack _name, _size
+	ldr x0, =(\_name + \_size)
+	.endm
+
+	/*
+	 * Helper macro to generate the best mov/movk combinations according
+	 * the value to be moved. The 16 bits from '_shift' are tested and
+	 * if not zero, they are moved into '_reg' without affecting
+	 * other bits.
+	 */
+	.macro _mov_imm16 _reg, _val, _shift
+		.if (\_val >> \_shift) & 0xffff
+			.if (\_val & (1 << \_shift - 1))
+				movk	\_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
+			.else
+				mov	\_reg, \_val & (0xffff << \_shift)
+			.endif
+		.endif
+	.endm
+
+	/*
+	 * Helper macro to load arbitrary values into 32 or 64-bit registers
+	 * which generates the best mov/movk combinations. Many base addresses
+	 * are 64KB aligned the macro will eliminate updating bits 15:0 in
+	 * that case
+	 */
+	.macro mov_imm _reg, _val
+		.if (\_val) == 0
+			mov	\_reg, #0
+		.else
+			_mov_imm16	\_reg, (\_val), 0
+			_mov_imm16	\_reg, (\_val), 16
+			_mov_imm16	\_reg, (\_val), 32
+			_mov_imm16	\_reg, (\_val), 48
+		.endif
+	.endm
+
+	/*
+	 * Macro to mark instances where we're jumping to a function and don't
+	 * expect a return. To provide the function being jumped to with
+	 * additional information, we use 'bl' instruction to jump rather than
+	 * 'b'.
+         *
+	 * Debuggers infer the location of a call from where LR points to, which
+	 * is usually the instruction after 'bl'. If this macro expansion
+	 * happens to be the last location in a function, that'll cause the LR
+	 * to point a location beyond the function, thereby misleading debugger
+	 * back trace. We therefore insert a 'nop' after the function call for
+	 * debug builds, unless 'skip_nop' parameter is non-zero.
+	 */
+	.macro no_ret _func:req, skip_nop=0
+	bl	\_func
+#if DEBUG
+	.ifeq \skip_nop
+	nop
+	.endif
+#endif
+	.endm
+
+	/*
+	 * Reserve space for a spin lock in assembly file.
+	 */
+	.macro define_asm_spinlock _name:req
+	.align	SPINLOCK_ASM_ALIGN
+	\_name:
+	.space	SPINLOCK_ASM_SIZE
+	.endm
+
+#endif /* __ASM_MACROS_S__ */
diff --git a/include/common/aarch64/assert_macros.S b/include/common/aarch64/assert_macros.S
new file mode 100644
index 0000000..4567c69
--- /dev/null
+++ b/include/common/aarch64/assert_macros.S
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __ASSERT_MACROS_S__
+#define __ASSERT_MACROS_S__
+
+	/*
+	 * Assembler macro to enable asm_assert. Use this macro wherever
+	 * assert is required in assembly. Please note that the macro makes
+	 * use of label '300' to provide the logic and the caller
+	 * should make sure that this label is not used to branch prior
+	 * to calling this macro.
+	 */
+#define ASM_ASSERT(_cc) \
+.ifndef .L_assert_filename ;\
+	.pushsection .rodata.str1.1, "aS" ;\
+	.L_assert_filename: ;\
+			.string	__FILE__ ;\
+	.popsection ;\
+.endif ;\
+	b._cc	300f ;\
+	adr	x0, .L_assert_filename ;\
+	mov	x1, __LINE__ ;\
+	b	asm_assert ;\
+300:
+
+#endif /* __ASSERT_MACROS_S__ */
diff --git a/include/common/aarch64/el3_common_macros.S b/include/common/aarch64/el3_common_macros.S
new file mode 100644
index 0000000..34fdaee
--- /dev/null
+++ b/include/common/aarch64/el3_common_macros.S
@@ -0,0 +1,344 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __EL3_COMMON_MACROS_S__
+#define __EL3_COMMON_MACROS_S__
+
+#include <arch.h>
+#include <asm_macros.S>
+
+	/*
+	 * Helper macro to initialise EL3 registers we care about.
+	 */
+	.macro el3_arch_init_common _exception_vectors
+	/* ---------------------------------------------------------------------
+	 * SCTLR_EL3 has already been initialised - read current value before
+	 * modifying.
+	 *
+	 * SCTLR_EL3.I: Enable the instruction cache.
+	 *
+	 * SCTLR_EL3.SA: Enable Stack Aligment check. A SP alignment fault
+	 *  exception is generated if a load or store instruction executed at
+	 *  EL3 uses the SP as the base address and the SP is not aligned to a
+	 *  16-byte boundary.
+	 *
+	 * SCTLR_EL3.A: Enable Alignment fault checking. All instructions that
+	 *  load or store one or more registers have an alignment check that the
+	 *  address being accessed is aligned to the size of the data element(s)
+	 *  being accessed.
+	 * ---------------------------------------------------------------------
+	 */
+	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+	mrs	x0, sctlr_el3
+	orr	x0, x0, x1
+	msr	sctlr_el3, x0
+	isb
+
+#ifdef IMAGE_BL31
+	/* ---------------------------------------------------------------------
+	 * Initialise the per-cpu cache pointer to the CPU.
+	 * This is done early to enable crash reporting to have access to crash
+	 * stack. Since crash reporting depends on cpu_data to report the
+	 * unhandled exception, not doing so can lead to recursive exceptions
+	 * due to a NULL TPIDR_EL3.
+	 * ---------------------------------------------------------------------
+	 */
+	bl	init_cpu_data_ptr
+#endif /* IMAGE_BL31 */
+
+	/* ---------------------------------------------------------------------
+	 * Set the exception vectors.
+	 * ---------------------------------------------------------------------
+	 */
+	adr	x0, \_exception_vectors
+	msr	vbar_el3, x0
+	isb
+
+	/* ---------------------------------------------------------------------
+	 * Initialise SCR_EL3, setting all fields rather than relying on hw.
+	 * All fields are architecturally UNKNOWN on reset. The following fields
+	 * do not change during the TF lifetime. The remaining fields are set to
+	 * zero here but are updated ahead of transitioning to a lower EL in the
+	 * function cm_init_context_common().
+	 *
+	 * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at
+	 *  EL2, EL1 and EL0 are not trapped to EL3.
+	 *
+	 * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at
+	 *  EL2, EL1 and EL0 are not trapped to EL3.
+	 *
+	 * SCR_EL3.SIF: Set to one to disable instruction fetches from
+	 *  Non-secure memory.
+	 *
+	 * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from
+	 *  both Security states and both Execution states.
+	 *
+	 * SCR_EL3.EA: Set to one to route External Aborts and SError Interrupts
+	 *  to EL3 when executing at any EL.
+	 * ---------------------------------------------------------------------
+	 */
+	mov	x0, #((SCR_RESET_VAL | SCR_EA_BIT | SCR_SIF_BIT) \
+			& ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT))
+	msr	scr_el3, x0
+
+	/* ---------------------------------------------------------------------
+	 * Initialise MDCR_EL3, setting all fields rather than relying on hw.
+	 * Some fields are architecturally UNKNOWN on reset.
+	 *
+	 * MDCR_EL3.SDD: Set to one to disable AArch64 Secure self-hosted debug.
+	 *  Debug exceptions, other than Breakpoint Instruction exceptions, are
+	 *  disabled from all ELs in Secure state.
+	 *
+	 * MDCR_EL3.SPD32: Set to 0b10 to disable AArch32 Secure self-hosted
+	 *  privileged debug from S-EL1.
+	 *
+	 * MDCR_EL3.NSPB (ARM v8.2): SPE enabled in non-secure state and
+	 * disabled in secure state. Accesses to SPE registers at SEL1 generate
+	 * trap exceptions to EL3.
+	 *
+	 * MDCR_EL3.TDOSA: Set to zero so that EL2 and EL2 System register
+	 *  access to the powerdown debug registers do not trap to EL3.
+	 *
+	 * MDCR_EL3.TDA: Set to zero to allow EL0, EL1 and EL2 access to the
+	 *  debug registers, other than those registers that are controlled by
+	 *  MDCR_EL3.TDOSA.
+	 *
+	 * MDCR_EL3.TPM: Set to zero so that EL0, EL1, and EL2 System register
+	 *  accesses to all Performance Monitors registers do not trap to EL3.
+	 * ---------------------------------------------------------------------
+	 */
+	mov_imm	x0, ((MDCR_EL3_RESET_VAL | MDCR_SDD_BIT | MDCR_SPD32(MDCR_SPD32_DISABLE)) \
+			& ~(MDCR_TDOSA_BIT | MDCR_TDA_BIT | MDCR_TPM_BIT))
+
+#if ENABLE_SPE_FOR_LOWER_ELS
+	/* Detect if SPE is implemented */
+	mrs	x1, id_aa64dfr0_el1
+	ubfx	x1, x1, #ID_AA64DFR0_PMS_SHIFT, #ID_AA64DFR0_PMS_LENGTH
+	cmp	x1, #0x1
+	b.ne	1f
+
+	/* Enable SPE for use by normal world */
+	orr	x0, x0, #MDCR_NSPB(MDCR_NSPB_EL1)
+1:
+#endif
+
+	msr	mdcr_el3, x0
+
+	/* ---------------------------------------------------------------------
+	 * Enable External Aborts and SError Interrupts now that the exception
+	 * vectors have been setup.
+	 * ---------------------------------------------------------------------
+	 */
+	msr	daifclr, #DAIF_ABT_BIT
+
+	/* ---------------------------------------------------------------------
+	 * Initialise CPTR_EL3, setting all fields rather than relying on hw.
+	 * All fields are architecturally UNKNOWN on reset.
+	 *
+	 * CPTR_EL3.TCPAC: Set to zero so that any accesses to CPACR_EL1,
+	 *  CPTR_EL2, CPACR, or HCPTR do not trap to EL3.
+	 *
+	 * CPTR_EL3.TTA: Set to zero so that System register accesses to the
+	 *  trace registers do not trap to EL3.
+	 *
+	 * CPTR_EL3.TFP: Set to zero so that accesses to Advanced SIMD and
+	 *  floating-point functionality do not trap to EL3.
+	 * ---------------------------------------------------------------------
+	 */
+	mov_imm x0, (CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT))
+	msr	cptr_el3, x0
+	.endm
+
+/* -----------------------------------------------------------------------------
+ * This is the super set of actions that need to be performed during a cold boot
+ * or a warm boot in EL3. This code is shared by BL1 and BL31.
+ *
+ * This macro will always perform reset handling, architectural initialisations
+ * and stack setup. The rest of the actions are optional because they might not
+ * be needed, depending on the context in which this macro is called. This is
+ * why this macro is parameterised ; each parameter allows to enable/disable
+ * some actions.
+ *
+ *  _init_sctlr:
+ *	Whether the macro needs to initialise SCTLR_EL3, including configuring
+ *      the endianness of data accesses.
+ *
+ *  _warm_boot_mailbox:
+ *	Whether the macro needs to detect the type of boot (cold/warm). The
+ *	detection is based on the platform entrypoint address : if it is zero
+ *	then it is a cold boot, otherwise it is a warm boot. In the latter case,
+ *	this macro jumps on the platform entrypoint address.
+ *
+ *  _secondary_cold_boot:
+ *	Whether the macro needs to identify the CPU that is calling it: primary
+ *	CPU or secondary CPU. The primary CPU will be allowed to carry on with
+ *	the platform initialisations, while the secondaries will be put in a
+ *	platform-specific state in the meantime.
+ *
+ *	If the caller knows this macro will only be called by the primary CPU
+ *	then this parameter can be defined to 0 to skip this step.
+ *
+ * _init_memory:
+ *	Whether the macro needs to initialise the memory.
+ *
+ * _init_c_runtime:
+ *	Whether the macro needs to initialise the C runtime environment.
+ *
+ * _exception_vectors:
+ *	Address of the exception vectors to program in the VBAR_EL3 register.
+ * -----------------------------------------------------------------------------
+ */
+	.macro el3_entrypoint_common					\
+		_init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,	\
+		_init_memory, _init_c_runtime, _exception_vectors
+
+	.if \_init_sctlr
+		/* -------------------------------------------------------------
+		 * This is the initialisation of SCTLR_EL3 and so must ensure
+		 * that all fields are explicitly set rather than relying on hw.
+		 * Some fields reset to an IMPLEMENTATION DEFINED value and
+		 * others are architecturally UNKNOWN on reset.
+		 *
+		 * SCTLR.EE: Set the CPU endianness before doing anything that
+		 *  might involve memory reads or writes. Set to zero to select
+		 *  Little Endian.
+		 *
+		 * SCTLR_EL3.WXN: For the EL3 translation regime, this field can
+		 *  force all memory regions that are writeable to be treated as
+		 *  XN (Execute-never). Set to zero so that this control has no
+		 *  effect on memory access permissions.
+		 *
+		 * SCTLR_EL3.SA: Set to zero to disable Stack Aligment check.
+		 *
+		 * SCTLR_EL3.A: Set to zero to disable Alignment fault checking.
+		 * -------------------------------------------------------------
+		 */
+		mov_imm	x0, (SCTLR_RESET_VAL & ~(SCTLR_EE_BIT | SCTLR_WXN_BIT \
+				| SCTLR_SA_BIT | SCTLR_A_BIT))
+		msr	sctlr_el3, x0
+		isb
+	.endif /* _init_sctlr */
+
+	.if \_warm_boot_mailbox
+		/* -------------------------------------------------------------
+		 * This code will be executed for both warm and cold resets.
+		 * Now is the time to distinguish between the two.
+		 * Query the platform entrypoint address and if it is not zero
+		 * then it means it is a warm boot so jump to this address.
+		 * -------------------------------------------------------------
+		 */
+		bl	plat_get_my_entrypoint
+		cbz	x0, do_cold_boot
+		br	x0
+
+	do_cold_boot:
+	.endif /* _warm_boot_mailbox */
+
+	/* ---------------------------------------------------------------------
+	 * It is a cold boot.
+	 * Perform any processor specific actions upon reset e.g. cache, TLB
+	 * invalidations etc.
+	 * ---------------------------------------------------------------------
+	 */
+	bl	reset_handler
+
+	el3_arch_init_common \_exception_vectors
+
+	.if \_secondary_cold_boot
+		/* -------------------------------------------------------------
+		 * Check if this is a primary or secondary CPU cold boot.
+		 * The primary CPU will set up the platform while the
+		 * secondaries are placed in a platform-specific state until the
+		 * primary CPU performs the necessary actions to bring them out
+		 * of that state and allows entry into the OS.
+		 * -------------------------------------------------------------
+		 */
+		bl	plat_is_my_cpu_primary
+		cbnz	w0, do_primary_cold_boot
+
+		/* This is a cold boot on a secondary CPU */
+		bl	plat_secondary_cold_boot_setup
+		/* plat_secondary_cold_boot_setup() is not supposed to return */
+		bl	el3_panic
+
+	do_primary_cold_boot:
+	.endif /* _secondary_cold_boot */
+
+	/* ---------------------------------------------------------------------
+	 * Initialize memory now. Secondary CPU initialization won't get to this
+	 * point.
+	 * ---------------------------------------------------------------------
+	 */
+
+	.if \_init_memory
+		bl	platform_mem_init
+	.endif /* _init_memory */
+
+	/* ---------------------------------------------------------------------
+	 * Init C runtime environment:
+	 *   - Zero-initialise the NOBITS sections. There are 2 of them:
+	 *       - the .bss section;
+	 *       - the coherent memory section (if any).
+	 *   - Relocate the data section from ROM to RAM, if required.
+	 * ---------------------------------------------------------------------
+	 */
+	.if \_init_c_runtime
+#ifdef IMAGE_BL31
+		/* -------------------------------------------------------------
+		 * Invalidate the RW memory used by the BL31 image. This
+		 * includes the data and NOBITS sections. This is done to
+		 * safeguard against possible corruption of this memory by
+		 * dirty cache lines in a system cache as a result of use by
+		 * an earlier boot loader stage.
+		 * -------------------------------------------------------------
+		 */
+		adr	x0, __RW_START__
+		adr	x1, __RW_END__
+		sub	x1, x1, x0
+		bl	inv_dcache_range
+#endif /* IMAGE_BL31 */
+
+		ldr	x0, =__BSS_START__
+		ldr	x1, =__BSS_SIZE__
+		bl	zeromem
+
+#if USE_COHERENT_MEM
+		ldr	x0, =__COHERENT_RAM_START__
+		ldr	x1, =__COHERENT_RAM_UNALIGNED_SIZE__
+		bl	zeromem
+#endif
+
+#ifdef IMAGE_BL1
+		ldr	x0, =__DATA_RAM_START__
+		ldr	x1, =__DATA_ROM_START__
+		ldr	x2, =__DATA_SIZE__
+		bl	memcpy16
+#endif
+	.endif /* _init_c_runtime */
+
+	/* ---------------------------------------------------------------------
+	 * Use SP_EL0 for the C runtime stack.
+	 * ---------------------------------------------------------------------
+	 */
+	msr	spsel, #0
+
+	/* ---------------------------------------------------------------------
+	 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
+	 * the MMU is enabled. There is no risk of reading stale stack memory
+	 * after enabling the MMU as only the primary CPU is running at the
+	 * moment.
+	 * ---------------------------------------------------------------------
+	 */
+	bl	plat_set_my_stack
+
+#if STACK_PROTECTOR_ENABLED
+	.if \_init_c_runtime
+	bl	update_stack_protector_canary
+	.endif /* _init_c_runtime */
+#endif
+	.endm
+
+#endif /* __EL3_COMMON_MACROS_S__ */
diff --git a/include/common/asm_macros.S b/include/common/asm_macros.S
deleted file mode 100644
index 238fa82..0000000
--- a/include/common/asm_macros.S
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-
-
-	.macro	func_prologue
-	stp	x29, x30, [sp, #-0x10]!
-	mov	x29,sp
-	.endm
-
-	.macro	func_epilogue
-	ldp	x29, x30, [sp], #0x10
-	.endm
-
-
-	.macro	dcache_line_size  reg, tmp
-	mrs	\tmp, ctr_el0
-	ubfx	\tmp, \tmp, #16, #4
-	mov	\reg, #4
-	lsl	\reg, \reg, \tmp
-	.endm
-
-
-	.macro	icache_line_size  reg, tmp
-	mrs	\tmp, ctr_el0
-	and	\tmp, \tmp, #0xf
-	mov	\reg, #4
-	lsl	\reg, \reg, \tmp
-	.endm
-
-
-	.macro	smc_check  label
-	mrs	x0, esr_el3
-	ubfx	x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH
-	cmp	x0, #EC_AARCH64_SMC
-	b.ne	$label
-	.endm
-
-
-	/*
-	 * This macro verifies that the a given vector doesn't exceed the
-	 * architectural limit of 32 instructions. This is meant to be placed
-	 * immedately after the last instruction in the vector. It takes the
-	 * vector entry as the parameter
-	 */
-	.macro check_vector_size since
-	  .if (. - \since) > (32 * 4)
-	    .error "Vector exceeds 32 instructions"
-	  .endif
-	.endm
-
-	/*
-	 * This macro is used to create a function label and place the
-	 * code into a separate text section based on the function name
-	 * to enable elimination of unused code during linking
-	 */
-	.macro func _name
-	.section .text.\_name, "ax"
-	.type \_name, %function
-	\_name:
-	.endm
-
-	/* ---------------------------------------------
-	 * Find the type of reset and jump to handler
-	 * if present. If the handler is null then it is
-	 * a cold boot. The primary cpu will set up the
-	 * platform while the secondaries wait for
-	 * their turn to be woken up
-	 * ---------------------------------------------
-	 */
-	.macro wait_for_entrypoint
-wait_for_entrypoint:
-	mrs	x0, mpidr_el1
-	bl	platform_get_entrypoint
-	cbnz	x0, do_warm_boot
-	mrs	x0, mpidr_el1
-	bl	platform_is_primary_cpu
-	cbnz	x0, do_cold_boot
-
-	/* ---------------------------------------------
-	 * Perform any platform specific secondary cpu
-	 * actions
-	 * ---------------------------------------------
-	 */
-	bl	plat_secondary_cold_boot_setup
-	b	wait_for_entrypoint
-
-	do_warm_boot:
-	/* ---------------------------------------------
-	 * Jump to BL31 for all warm boot init.
-	 * ---------------------------------------------
-	 */
-	blr	x0
-
-	do_cold_boot:
-	.endm
-
-	/*
-	 * This macro declares an array of 1 or more stacks, properly
-	 * aligned and in the requested section
-	 */
-#define STACK_ALIGN	6
-
-	.macro declare_stack _name, _section, _size, _count
-	.if ((\_size & ((1 << STACK_ALIGN) - 1)) <> 0)
-	  .error "Stack size not correctly aligned"
-	.endif
-	.section    \_section, "aw", %nobits
-	.align STACK_ALIGN
-	\_name:
-	.space ((\_count) * (\_size)), 0
-	.endm
-
-	/*
-	 * This macro calculates the base address of an MP stack using the
-	 * platform_get_core_pos() index, the name of the stack storage and
-	 * the size of each stack
-	 * In: X0 = MPIDR of CPU whose stack is wanted
-	 * Out: X0 = physical address of stack base
-	 * Clobber: X30, X1, X2
-	 */
-	.macro get_mp_stack _name, _size
-	bl  platform_get_core_pos
-	ldr x2, =(\_name + \_size)
-	mov x1, #\_size
-	madd x0, x0, x1, x2
-	.endm
-
-	/*
-	 * This macro calculates the base address of a UP stack using the
-	 * name of the stack storage and the size of the stack
-	 * Out: X0 = physical address of stack base
-	 */
-	.macro get_up_stack _name, _size
-	ldr x0, =(\_name + \_size)
-	.endm
-
-	/*
-	 * Helper macro to generate the best mov/movk combinations according
-	 * the value to be moved. The 16 bits from '_shift' are tested and
-	 * if not zero, they are moved into '_reg' without affecting
-	 * other bits.
-	 */
-	.macro _mov_imm16 _reg, _val, _shift
-		.if (\_val >> \_shift) & 0xffff
-			.if (\_val & (1 << \_shift - 1))
-				movk	\_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
-			.else
-				mov	\_reg, \_val & (0xffff << \_shift)
-			.endif
-		.endif
-	.endm
-
-	/*
-	 * Helper macro to load arbitrary values into 32 or 64-bit registers
-	 * which generates the best mov/movk combinations. Many base addresses
-	 * are 64KB aligned the macro will eliminate updating bits 15:0 in
-	 * that case
-	 */
-	.macro mov_imm _reg, _val
-		.if (\_val) == 0
-			mov	\_reg, #0
-		.else
-			_mov_imm16	\_reg, (\_val), 0
-			_mov_imm16	\_reg, (\_val), 16
-			_mov_imm16	\_reg, (\_val), 32
-			_mov_imm16	\_reg, (\_val), 48
-		.endif
-	.endm
diff --git a/include/common/asm_macros_common.S b/include/common/asm_macros_common.S
new file mode 100644
index 0000000..6a02e18
--- /dev/null
+++ b/include/common/asm_macros_common.S
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __ASM_MACROS_COMMON_S__
+#define __ASM_MACROS_COMMON_S__
+
+	/*
+	 * This macro is used to create a function label and place the
+	 * code into a separate text section based on the function name
+	 * to enable elimination of unused code during linking. It also adds
+	 * basic debug information to enable call stack printing most of the
+	 * time. The optional _align parameter can be used to force a
+	 * non-standard alignment (indicated in powers of 2). The default is
+	 * _align=2 because both Aarch32 and Aarch64 instructions must be
+	 * word aligned. Do *not* try to use a raw .align directive. Since func
+	 * switches to a new section, this would not have the desired effect.
+	 */
+	.macro func _name, _align=2
+	/*
+	 * Add Call Frame Information entry in the .debug_frame section for
+	 * debugger consumption. This enables callstack printing in debuggers.
+	 * This does not use any space in the final loaded binary, only in the
+	 * ELF file.
+	 * Note that a function manipulating the CFA pointer location (i.e. the
+	 * x29 frame pointer on AArch64) should declare it using the
+	 * appropriate .cfi* directives, or be prepared to have a degraded
+	 * debugging experience.
+	 */
+	.cfi_sections .debug_frame
+	.section .text.\_name, "ax"
+	.type \_name, %function
+	.func \_name
+	/*
+	 * .cfi_startproc and .cfi_endproc are needed to output entries in
+	 * .debug_frame
+	 */
+	.cfi_startproc
+	.align \_align
+	\_name:
+	.endm
+
+	/*
+	 * This macro is used to mark the end of a function.
+	 */
+	.macro endfunc _name
+	.endfunc
+	.cfi_endproc
+	.size \_name, . - \_name
+	.endm
+
+	/*
+	 * Theses macros are used to create function labels for deprecated
+	 * APIs. If ERROR_DEPRECATED is non zero, the callers of these APIs
+	 * will fail to link and cause build failure.
+	 */
+#if ERROR_DEPRECATED
+	.macro func_deprecated _name
+	func deprecated\_name
+	.endm
+
+	.macro endfunc_deprecated _name
+	endfunc deprecated\_name
+	.endm
+#else
+	.macro func_deprecated _name
+	func \_name
+	.endm
+
+	.macro endfunc_deprecated _name
+	endfunc \_name
+	.endm
+#endif
+
+	/*
+	 * Helper assembler macro to count trailing zeros. The output is
+	 * populated in the `TZ_COUNT` symbol.
+	 */
+	.macro count_tz _value, _tz_count
+	.if \_value
+	  count_tz "(\_value >> 1)", "(\_tz_count + 1)"
+	.else
+	  .equ TZ_COUNT, (\_tz_count - 1)
+	.endif
+	.endm
+
+	/*
+	 * This macro declares an array of 1 or more stacks, properly
+	 * aligned and in the requested section
+	 */
+#define DEFAULT_STACK_ALIGN	(1 << 6)   /* In case the caller doesnt provide alignment */
+
+	.macro declare_stack _name, _section, _size, _count, _align=DEFAULT_STACK_ALIGN
+	count_tz \_align, 0
+	.if (\_align - (1 << TZ_COUNT))
+	  .error "Incorrect stack alignment specified (Must be a power of 2)."
+	.endif
+	.if ((\_size & ((1 << TZ_COUNT) - 1)) <> 0)
+	  .error "Stack size not correctly aligned"
+	.endif
+	.section    \_section, "aw", %nobits
+	.align TZ_COUNT
+	\_name:
+	.space ((\_count) * (\_size)), 0
+	.endm
+
+
+#endif /* __ASM_MACROS_COMMON_S__ */
diff --git a/include/common/assert_macros.S b/include/common/assert_macros.S
deleted file mode 100644
index 807972f..0000000
--- a/include/common/assert_macros.S
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-	/*
-	 * Assembler macro to enable asm_assert. Use this macro wherever
-	 * assert is required in assembly. Please note that the macro makes
-	 * use of label '300' to provide the logic and the caller
-	 * should make sure that this label is not used to branch prior
-	 * to calling this macro.
-	 */
-#define ASM_ASSERT(_cc) \
-.ifndef .L_assert_filename ;\
-	.pushsection .rodata.str1.1, "aS" ;\
-	.L_assert_filename: ;\
-			.string	__FILE__ ;\
-	.popsection ;\
-.endif ;\
-	b._cc	300f ;\
-	adr	x0, .L_assert_filename ;\
-	mov	x1, __LINE__ ;\
-	b	asm_assert ;\
-300:
diff --git a/include/common/auth.h b/include/common/auth.h
deleted file mode 100644
index 3c3a6bd..0000000
--- a/include/common/auth.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef AUTH_H_
-#define AUTH_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-/*
- * Authentication infrastructure for Trusted Boot
- *
- * This infrastructure provides an API to access the authentication module. This
- * module will implement the required operations for Trusted Boot by creating an
- * instance of the structure 'auth_mod_t'. This instance must be called
- * 'auth_mod' and must provide the functions to initialize the module and
- * verify the authenticity of the images.
- */
-
-/* Objects (images and certificates) involved in the TBB process */
-enum {
-	AUTH_BL2_IMG_CERT,
-	AUTH_BL2_IMG,
-	AUTH_TRUSTED_KEY_CERT,
-	AUTH_BL30_KEY_CERT,
-	AUTH_BL30_IMG_CERT,
-	AUTH_BL30_IMG,
-	AUTH_BL31_KEY_CERT,
-	AUTH_BL31_IMG_CERT,
-	AUTH_BL31_IMG,
-	AUTH_BL32_KEY_CERT,
-	AUTH_BL32_IMG_CERT,
-	AUTH_BL32_IMG,
-	AUTH_BL33_KEY_CERT,
-	AUTH_BL33_IMG_CERT,
-	AUTH_BL33_IMG,
-	AUTH_NUM_OBJ
-};
-
-/* Authentication module structure */
-typedef struct auth_mod_s {
-	/* [mandatory] Module name. Printed to the log during initialization */
-	const char *name;
-
-	/* [mandatory] Initialize the authentication module */
-	int (*init)(void);
-
-	/* [mandatory] This function will be called to authenticate a new
-	 * object loaded into memory. The obj_id corresponds to one of the
-	 * values in the enumeration above */
-	int (*verify)(unsigned int obj_id, uintptr_t obj_buf, size_t len);
-} auth_mod_t;
-
-/* This variable must be instantiated by the authentication module */
-extern const auth_mod_t auth_mod;
-
-/* Public functions */
-void auth_init(void);
-int auth_verify_obj(unsigned int obj_id, uintptr_t obj_buf, size_t len);
-
-#endif /* AUTH_H_ */
diff --git a/include/common/bl_common.h b/include/common/bl_common.h
index 0959c89..15ffc57 100644
--- a/include/common/bl_common.h
+++ b/include/common/bl_common.h
@@ -1,39 +1,14 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __BL_COMMON_H__
 #define __BL_COMMON_H__
 
-#define SECURE		0x0
-#define NON_SECURE	0x1
-#define sec_state_is_valid(s) (((s) == SECURE) || ((s) == NON_SECURE))
+#include <ep_info.h>
+#include <param_header.h>
 
 #define UP	1
 #define DOWN	0
@@ -45,117 +20,91 @@
 #define TOP	0x1
 #define BOTTOM	!TOP
 
-/******************************************************************************
- * Opcode passed in x0 to tell next EL that we want to run an image.
- * Corresponds to the function ID of the only SMC that the BL1 exception
- * handlers service. That's why the chosen value is the first function ID of
- * the ARM SMC64 range.
- *****************************************************************************/
-#define RUN_IMAGE	0xC0000000
+/*
+ * The following are used for image state attributes.
+ * Image can only be in one of the following state.
+ */
+#define IMAGE_STATE_RESET			0
+#define IMAGE_STATE_COPIED			1
+#define IMAGE_STATE_COPYING			2
+#define IMAGE_STATE_AUTHENTICATED		3
+#define IMAGE_STATE_EXECUTED			4
+#define IMAGE_STATE_INTERRUPTED			5
+
+#define IMAGE_ATTRIB_SKIP_LOADING	0x02
+#define IMAGE_ATTRIB_PLAT_SETUP		0x04
+
+#define INVALID_IMAGE_ID		(0xFFFFFFFF)
 
 /*******************************************************************************
- * Constants that allow assembler code to access members of and the
- * 'entry_point_info' structure at their correct offsets.
+ * Constants to indicate type of exception to the common exception handler.
  ******************************************************************************/
-#define ENTRY_POINT_INFO_PC_OFFSET	0x08
-#define ENTRY_POINT_INFO_ARGS_OFFSET	0x18
-
-#define PARAM_EP_SECURITY_MASK    0x1
-#define GET_SECURITY_STATE(x) (x & PARAM_EP_SECURITY_MASK)
-#define SET_SECURITY_STATE(x, security) \
-			((x) = ((x) & ~PARAM_EP_SECURITY_MASK) | (security))
-
-#define EP_EE_MASK	0x2
-#define EP_EE_LITTLE	0x0
-#define EP_EE_BIG	0x2
-#define EP_GET_EE(x) (x & EP_EE_MASK)
-#define EP_SET_EE(x, ee) ((x) = ((x) & ~EP_EE_MASK) | (ee))
-
-#define EP_ST_MASK	0x4
-#define EP_ST_DISABLE	0x0
-#define EP_ST_ENABLE	0x4
-#define EP_GET_ST(x) (x & EP_ST_MASK)
-#define EP_SET_ST(x, ee) ((x) = ((x) & ~EP_ST_MASK) | (ee))
-
-#define PARAM_EP     0x01
-#define PARAM_IMAGE_BINARY  0x02
-#define PARAM_BL31       0x03
-
-#define VERSION_1		0x01
-
-#define SET_PARAM_HEAD(_p, _type, _ver, _attr) do { \
-	(_p)->h.type = (uint8_t)(_type); \
-	(_p)->h.version = (uint8_t)(_ver); \
-	(_p)->h.size = (uint16_t)sizeof(*_p); \
-	(_p)->h.attr = (uint32_t)(_attr) ; \
-	} while (0)
-
-/*******************************************************************************
- * Constant that indicates if this is the first version of the reset handler
- * contained in an image. This will be the case when the image is BL1 or when
- * its BL3-1 and RESET_TO_BL31 is true. This constant enables a subsequent
- * version of the reset handler to perform actions that override the ones
- * performed in the first version of the code. This will be required when the
- * first version exists in an un-modifiable image e.g. a BootROM image.
- ******************************************************************************/
-#if IMAGE_BL1 || (IMAGE_BL31 && RESET_TO_BL31)
-#define FIRST_RESET_HANDLER_CALL
-#endif
+#define SYNC_EXCEPTION_SP_EL0		0x0
+#define IRQ_SP_EL0			0x1
+#define FIQ_SP_EL0			0x2
+#define SERROR_SP_EL0			0x3
+#define SYNC_EXCEPTION_SP_ELX		0x4
+#define IRQ_SP_ELX			0x5
+#define FIQ_SP_ELX			0x6
+#define SERROR_SP_ELX			0x7
+#define SYNC_EXCEPTION_AARCH64		0x8
+#define IRQ_AARCH64			0x9
+#define FIQ_AARCH64			0xa
+#define SERROR_AARCH64			0xb
+#define SYNC_EXCEPTION_AARCH32		0xc
+#define IRQ_AARCH32			0xd
+#define FIQ_AARCH32			0xe
+#define SERROR_AARCH32			0xf
 
 #ifndef __ASSEMBLY__
-#include <cdefs.h> /* For __dead2 */
 #include <cassert.h>
-#include <stdint.h>
 #include <stddef.h>
+#include <stdint.h>
+#include <types.h>
+#include <utils_def.h> /* To retain compatibility */
+
+/*
+ * Declarations of linker defined symbols to help determine memory layout of
+ * BL images
+ */
+#if SEPARATE_CODE_AND_RODATA
+extern uintptr_t __TEXT_START__;
+extern uintptr_t __TEXT_END__;
+extern uintptr_t __RODATA_START__;
+extern uintptr_t __RODATA_END__;
+#else
+extern uintptr_t __RO_START__;
+extern uintptr_t __RO_END__;
+#endif
+
+#if defined(IMAGE_BL2)
+extern uintptr_t __BL2_END__;
+#elif defined(IMAGE_BL2U)
+extern uintptr_t __BL2U_END__;
+#elif defined(IMAGE_BL31)
+extern uintptr_t __BL31_END__;
+#elif defined(IMAGE_BL32)
+extern uintptr_t __BL32_END__;
+#endif /* IMAGE_BLX */
+
+#if USE_COHERENT_MEM
+extern uintptr_t __COHERENT_RAM_START__;
+extern uintptr_t __COHERENT_RAM_END__;
+#endif
 
 /*******************************************************************************
  * Structure used for telling the next BL how much of a particular type of
  * memory is available for its use and how much is already used.
  ******************************************************************************/
 typedef struct meminfo {
-	uint64_t total_base;
+	uintptr_t total_base;
 	size_t total_size;
-	uint64_t free_base;
+#if !LOAD_IMAGE_V2
+	uintptr_t free_base;
 	size_t free_size;
+#endif
 } meminfo_t;
 
-typedef struct aapcs64_params {
-	unsigned long arg0;
-	unsigned long arg1;
-	unsigned long arg2;
-	unsigned long arg3;
-	unsigned long arg4;
-	unsigned long arg5;
-	unsigned long arg6;
-	unsigned long arg7;
-} aapcs64_params_t;
-
-/***************************************************************************
- * This structure provides version information and the size of the
- * structure, attributes for the structure it represents
- ***************************************************************************/
-typedef struct param_header {
-	uint8_t type;		/* type of the structure */
-	uint8_t version;    /* version of this structure */
-	uint16_t size;      /* size of this structure in bytes */
-	uint32_t attr;      /* attributes: unused bits SBZ */
-} param_header_t;
-
-/*****************************************************************************
- * This structure represents the superset of information needed while
- * switching exception levels. The only two mechanisms to do so are
- * ERET & SMC. Security state is indicated using bit zero of header
- * attribute
- * NOTE: BL1 expects entrypoint followed by spsr while processing
- * SMC to jump to BL31 from the start of entry_point_info
- *****************************************************************************/
-typedef struct entry_point_info {
-	param_header_t h;
-	uintptr_t pc;
-	uint32_t spsr;
-	aapcs64_params_t args;
-} entry_point_info_t;
-
 /*****************************************************************************
  * Image info binary provides information from the image loader that
  * can be used by the firmware to manage available trusted RAM.
@@ -167,20 +116,72 @@
 	param_header_t h;
 	uintptr_t image_base;   /* physical address of base of image */
 	uint32_t image_size;    /* bytes read from image file */
+#if LOAD_IMAGE_V2
+	uint32_t image_max_size;
+#endif
 } image_info_t;
 
+/*****************************************************************************
+ * The image descriptor struct definition.
+ *****************************************************************************/
+typedef struct image_desc {
+	/* Contains unique image id for the image. */
+	unsigned int image_id;
+	/*
+	 * This member contains Image state information.
+	 * Refer IMAGE_STATE_XXX defined above.
+	 */
+	unsigned int state;
+	uint32_t copied_size;	/* image size copied in blocks */
+	image_info_t image_info;
+	entry_point_info_t ep_info;
+} image_desc_t;
+
+#if LOAD_IMAGE_V2
+/* BL image node in the BL image loading sequence */
+typedef struct bl_load_info_node {
+	unsigned int image_id;
+	image_info_t *image_info;
+	struct bl_load_info_node *next_load_info;
+} bl_load_info_node_t;
+
+/* BL image head node in the BL image loading sequence */
+typedef struct bl_load_info {
+	param_header_t h;
+	bl_load_info_node_t *head;
+} bl_load_info_t;
+
+/* BL image node in the BL image execution sequence */
+typedef struct bl_params_node {
+	unsigned int image_id;
+	image_info_t *image_info;
+	entry_point_info_t *ep_info;
+	struct bl_params_node *next_params_info;
+} bl_params_node_t;
+
+/*
+ * BL image head node in the BL image execution sequence
+ * It is also used to pass information to next BL image.
+ */
+typedef struct bl_params {
+	param_header_t h;
+	bl_params_node_t *head;
+} bl_params_t;
+
+#else /* LOAD_IMAGE_V2 */
+
 /*******************************************************************************
  * This structure represents the superset of information that can be passed to
  * BL31 e.g. while passing control to it from BL2. The BL32 parameters will be
  * populated only if BL2 detects its presence. A pointer to a structure of this
- * type should be passed in X3 to BL31's cold boot entrypoint
+ * type should be passed in X0 to BL31's cold boot entrypoint.
  *
- * Use of this structure and the X3 parameter is not mandatory: the BL3-1
+ * Use of this structure and the X0 parameter is not mandatory: the BL31
  * platform code can use other mechanisms to provide the necessary information
- * about BL3-2 and BL3-3 to the common and SPD code.
+ * about BL32 and BL33 to the common and SPD code.
  *
- * BL3-1 image information is mandatory if this structure is used. If either of
- * the optional BL3-2 and BL3-3 image information is not provided, this is
+ * BL31 image information is mandatory if this structure is used. If either of
+ * the optional BL32 and BL33 image information is not provided, this is
  * indicated by the respective image_info pointers being zero.
  ******************************************************************************/
 typedef struct bl31_params {
@@ -192,41 +193,43 @@
 	image_info_t *bl33_image_info;
 } bl31_params_t;
 
-
-/*
- * Compile time assertions related to the 'entry_point_info' structure to
- * ensure that the assembler and the compiler view of the offsets of
- * the structure members is the same.
- */
-CASSERT(ENTRY_POINT_INFO_PC_OFFSET ==
-		__builtin_offsetof(entry_point_info_t, pc), \
-		assert_BL31_pc_offset_mismatch);
-
-CASSERT(ENTRY_POINT_INFO_ARGS_OFFSET == \
-		__builtin_offsetof(entry_point_info_t, args), \
-		assert_BL31_args_offset_mismatch);
-
-CASSERT(sizeof(unsigned long) ==
-		__builtin_offsetof(entry_point_info_t, spsr) - \
-		__builtin_offsetof(entry_point_info_t, pc), \
-		assert_entrypoint_and_spsr_should_be_adjacent);
+#endif /* LOAD_IMAGE_V2 */
 
 /*******************************************************************************
  * Function & variable prototypes
  ******************************************************************************/
-unsigned long page_align(unsigned long, unsigned);
-void change_security_state(unsigned int);
-unsigned long image_size(const char *);
+size_t image_size(unsigned int image_id);
+
+int is_mem_free(uintptr_t free_base, size_t free_size,
+		uintptr_t addr, size_t size);
+
+#if LOAD_IMAGE_V2
+
+int load_image(unsigned int image_id, image_info_t *image_data);
+int load_auth_image(unsigned int image_id, image_info_t *image_data);
+
+#else
+
+uintptr_t page_align(uintptr_t, unsigned);
 int load_image(meminfo_t *mem_layout,
-	       const char *image_name,
-	       uint64_t image_base,
+	       unsigned int image_id,
+	       uintptr_t image_base,
 	       image_info_t *image_data,
 	       entry_point_info_t *entry_point_info);
+int load_auth_image(meminfo_t *mem_layout,
+		    unsigned int image_id,
+		    uintptr_t image_base,
+		    image_info_t *image_data,
+		    entry_point_info_t *entry_point_info);
+void reserve_mem(uintptr_t *free_base, size_t *free_size,
+		uintptr_t addr, size_t size);
+
+#endif /* LOAD_IMAGE_V2 */
+
 extern const char build_message[];
 extern const char version_string[];
 
-void reserve_mem(uint64_t *free_base, size_t *free_size,
-		uint64_t addr, size_t size);
+void print_entry_point_info(const entry_point_info_t *ep_info);
 
 #endif /*__ASSEMBLY__*/
 
diff --git a/include/common/debug.h b/include/common/debug.h
index a8dcb8d..3f0f84a 100644
--- a/include/common/debug.h
+++ b/include/common/debug.h
@@ -1,38 +1,12 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __DEBUG_H__
 #define __DEBUG_H__
 
-#include <stdio.h>
-
 /* The log output macros print output to the console. These macros produce
  * compiled log output only if the LOG_LEVEL defined in the makefile (or the
  * make command line) is greater or equal than the level required for that
@@ -49,41 +23,63 @@
 #define LOG_LEVEL_INFO			40
 #define LOG_LEVEL_VERBOSE		50
 
+#ifndef __ASSEMBLY__
+#include <stdarg.h>
+#include <stdio.h>
+
+/*
+ * Define Log Markers corresponding to each log level which will
+ * be embedded in the format string and is expected by tf_log() to determine
+ * the log level.
+ */
+#define LOG_MARKER_ERROR		"\xa"	/* 10 */
+#define LOG_MARKER_NOTICE		"\x14"	/* 20 */
+#define LOG_MARKER_WARNING		"\x1e"	/* 30 */
+#define LOG_MARKER_INFO			"\x28"	/* 40 */
+#define LOG_MARKER_VERBOSE		"\x32"	/* 50 */
 
 #if LOG_LEVEL >= LOG_LEVEL_NOTICE
-# define NOTICE(...)	tf_printf("NOTICE:  " __VA_ARGS__)
+# define NOTICE(...)	tf_log(LOG_MARKER_NOTICE __VA_ARGS__)
 #else
 # define NOTICE(...)
 #endif
 
 #if LOG_LEVEL >= LOG_LEVEL_ERROR
-# define ERROR(...)	tf_printf("ERROR:   " __VA_ARGS__)
+# define ERROR(...)	tf_log(LOG_MARKER_ERROR __VA_ARGS__)
 #else
 # define ERROR(...)
 #endif
 
 #if LOG_LEVEL >= LOG_LEVEL_WARNING
-# define WARN(...)	tf_printf("WARNING: " __VA_ARGS__)
+# define WARN(...)	tf_log(LOG_MARKER_WARNING __VA_ARGS__)
 #else
 # define WARN(...)
 #endif
 
 #if LOG_LEVEL >= LOG_LEVEL_INFO
-# define INFO(...)	tf_printf("INFO:    " __VA_ARGS__)
+# define INFO(...)	tf_log(LOG_MARKER_INFO __VA_ARGS__)
 #else
 # define INFO(...)
 #endif
 
 #if LOG_LEVEL >= LOG_LEVEL_VERBOSE
-# define VERBOSE(...)	tf_printf("VERBOSE: " __VA_ARGS__)
+# define VERBOSE(...)	tf_log(LOG_MARKER_VERBOSE __VA_ARGS__)
 #else
 # define VERBOSE(...)
 #endif
 
-
 void __dead2 do_panic(void);
 #define panic()	do_panic()
 
-void tf_printf(const char *fmt, ...);
+/* Function called when stack protection check code detects a corrupted stack */
+void __dead2 __stack_chk_fail(void);
 
+void tf_log(const char *fmt, ...) __printflike(1, 2);
+void tf_printf(const char *fmt, ...) __printflike(1, 2);
+int tf_snprintf(char *s, size_t n, const char *fmt, ...) __printflike(3, 4);
+void tf_vprintf(const char *fmt, va_list args);
+void tf_string_print(const char *str);
+void tf_log_set_max_level(unsigned int log_level);
+
+#endif /* __ASSEMBLY__ */
 #endif /* __DEBUG_H__ */
diff --git a/include/common/desc_image_load.h b/include/common/desc_image_load.h
new file mode 100644
index 0000000..79f2bd7
--- /dev/null
+++ b/include/common/desc_image_load.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __DESC_IMAGE_LOAD_H__
+#define __DESC_IMAGE_LOAD_H__
+
+#include <bl_common.h>
+
+#if LOAD_IMAGE_V2
+/* Following structure is used to store BL ep/image info. */
+typedef struct bl_mem_params_node {
+	unsigned int image_id;
+	image_info_t image_info;
+	entry_point_info_t ep_info;
+	unsigned int next_handoff_image_id;
+	bl_load_info_node_t load_node_mem;
+	bl_params_node_t params_node_mem;
+} bl_mem_params_node_t;
+
+/*
+ * Macro to register list of BL image descriptors,
+ * defined as an array of bl_mem_params_node_t.
+ */
+#define REGISTER_BL_IMAGE_DESCS(_img_desc)				\
+	bl_mem_params_node_t *bl_mem_params_desc_ptr = &_img_desc[0];	\
+	unsigned int bl_mem_params_desc_num = ARRAY_SIZE(_img_desc);
+
+/* BL image loading utility functions */
+void flush_bl_params_desc(void);
+int get_bl_params_node_index(unsigned int image_id);
+bl_mem_params_node_t *get_bl_mem_params_node(unsigned int image_id);
+bl_load_info_t *get_bl_load_info_from_mem_params_desc(void);
+bl_params_t *get_next_bl_params_from_mem_params_desc(void);
+
+
+#endif /* LOAD_IMAGE_V2 */
+#endif /* __DESC_IMAGE_LOAD_H__ */
diff --git a/include/common/ep_info.h b/include/common/ep_info.h
new file mode 100644
index 0000000..3f6213f
--- /dev/null
+++ b/include/common/ep_info.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __EP_INFO_H__
+#define __EP_INFO_H__
+
+#include <param_header.h>
+#include <utils_def.h>
+
+#define SECURE		U(0x0)
+#define NON_SECURE	U(0x1)
+#define sec_state_is_valid(s) (((s) == SECURE) || ((s) == NON_SECURE))
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the
+ * 'entry_point_info' structure at their correct offsets.
+ ******************************************************************************/
+#define ENTRY_POINT_INFO_PC_OFFSET	U(0x08)
+#ifdef AARCH32
+#define ENTRY_POINT_INFO_ARGS_OFFSET	U(0x10)
+#else
+#define ENTRY_POINT_INFO_ARGS_OFFSET	U(0x18)
+#endif
+
+/* The following are used to set/get image attributes. */
+#define PARAM_EP_SECURITY_MASK		U(0x1)
+
+#define GET_SECURITY_STATE(x) (x & PARAM_EP_SECURITY_MASK)
+#define SET_SECURITY_STATE(x, security) \
+			((x) = ((x) & ~PARAM_EP_SECURITY_MASK) | (security))
+
+#define EP_EE_MASK	U(0x2)
+#define EP_EE_SHIFT	1
+#define EP_EE_LITTLE	U(0x0)
+#define EP_EE_BIG	U(0x2)
+#define EP_GET_EE(x) (x & EP_EE_MASK)
+#define EP_SET_EE(x, ee) ((x) = ((x) & ~EP_EE_MASK) | (ee))
+
+#define EP_ST_MASK	U(0x4)
+#define EP_ST_DISABLE	U(0x0)
+#define EP_ST_ENABLE	U(0x4)
+#define EP_GET_ST(x) (x & EP_ST_MASK)
+#define EP_SET_ST(x, ee) ((x) = ((x) & ~EP_ST_MASK) | (ee))
+
+#define EP_EXE_MASK	U(0x8)
+#define NON_EXECUTABLE	U(0x0)
+#define EXECUTABLE	U(0x8)
+#define EP_GET_EXE(x) (x & EP_EXE_MASK)
+#define EP_SET_EXE(x, ee) ((x) = ((x) & ~EP_EXE_MASK) | (ee))
+
+#define EP_FIRST_EXE_MASK	U(0x10)
+#define EP_FIRST_EXE		U(0x10)
+#define EP_GET_FIRST_EXE(x) ((x) & EP_FIRST_EXE_MASK)
+#define EP_SET_FIRST_EXE(x, ee) ((x) = ((x) & ~EP_FIRST_EXE_MASK) | (ee))
+
+#ifndef __ASSEMBLY__
+
+#include <cassert.h>
+#include <types.h>
+
+typedef struct aapcs64_params {
+	u_register_t arg0;
+	u_register_t arg1;
+	u_register_t arg2;
+	u_register_t arg3;
+	u_register_t arg4;
+	u_register_t arg5;
+	u_register_t arg6;
+	u_register_t arg7;
+} aapcs64_params_t;
+
+typedef struct aapcs32_params {
+	u_register_t arg0;
+	u_register_t arg1;
+	u_register_t arg2;
+	u_register_t arg3;
+} aapcs32_params_t;
+
+/*****************************************************************************
+ * This structure represents the superset of information needed while
+ * switching exception levels. The only two mechanisms to do so are
+ * ERET & SMC. Security state is indicated using bit zero of header
+ * attribute
+ * NOTE: BL1 expects entrypoint followed by spsr at an offset from the start
+ * of this structure defined by the macro `ENTRY_POINT_INFO_PC_OFFSET` while
+ * processing SMC to jump to BL31.
+ *****************************************************************************/
+typedef struct entry_point_info {
+	param_header_t h;
+	uintptr_t pc;
+	uint32_t spsr;
+#ifdef AARCH32
+	aapcs32_params_t args;
+#else
+	aapcs64_params_t args;
+#endif
+} entry_point_info_t;
+
+/*
+ * Compile time assertions related to the 'entry_point_info' structure to
+ * ensure that the assembler and the compiler view of the offsets of
+ * the structure members is the same.
+ */
+CASSERT(ENTRY_POINT_INFO_PC_OFFSET ==
+		__builtin_offsetof(entry_point_info_t, pc), \
+		assert_BL31_pc_offset_mismatch);
+
+CASSERT(ENTRY_POINT_INFO_ARGS_OFFSET == \
+		__builtin_offsetof(entry_point_info_t, args), \
+		assert_BL31_args_offset_mismatch);
+
+CASSERT(sizeof(uintptr_t) ==
+		__builtin_offsetof(entry_point_info_t, spsr) - \
+		__builtin_offsetof(entry_point_info_t, pc), \
+		assert_entrypoint_and_spsr_should_be_adjacent);
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* __EP_INFO_H__ */
+
diff --git a/include/common/firmware_image_package.h b/include/common/firmware_image_package.h
deleted file mode 100644
index 8fb669e..0000000
--- a/include/common/firmware_image_package.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __FIRMWARE_IMAGE_PACKAGE_H__
-#define __FIRMWARE_IMAGE_PACKAGE_H__
-
-#include <stdint.h>
-#include <uuid.h>
-
-/* This is used as a signature to validate the blob header */
-#define TOC_HEADER_NAME	0xAA640001
-
-
-/* ToC Entry UUIDs */
-#define UUID_TRUSTED_BOOT_FIRMWARE_BL2 \
-	{0x0becf95f, 0x224d, 0x4d3e, 0xa5, 0x44, {0xc3, 0x9d, 0x81, 0xc7, 0x3f, 0x0a} }
-#define UUID_SCP_FIRMWARE_BL30 \
-	{0x3dfd6697, 0xbe89, 0x49e8, 0xae, 0x5d, {0x78, 0xa1, 0x40, 0x60, 0x82, 0x13} }
-#define UUID_EL3_RUNTIME_FIRMWARE_BL31 \
-	{0x6d08d447, 0xfe4c, 0x4698, 0x9b, 0x95, {0x29, 0x50, 0xcb, 0xbd, 0x5a, 0x00} }
-#define UUID_SECURE_PAYLOAD_BL32 \
-	{0x89e1d005, 0xdc53, 0x4713, 0x8d, 0x2b, {0x50, 0x0a, 0x4b, 0x7a, 0x3e, 0x38} }
-#define UUID_NON_TRUSTED_FIRMWARE_BL33 \
-	{0xa7eed0d6, 0xeafc, 0x4bd5, 0x97, 0x82, {0x99, 0x34, 0xf2, 0x34, 0xb6, 0xe4} }
-/* Key certificates */
-#define UUID_ROT_KEY_CERT \
-	{0x721d2d86, 0x60f8, 0x11e4, 0x92, 0x0b, {0x8b, 0xe7, 0x62, 0x16, 0x0f, 0x24} }
-#define UUID_TRUSTED_KEY_CERT \
-	{0x90e87e82, 0x60f8, 0x11e4, 0xa1, 0xb4, {0x77, 0x7a, 0x21, 0xb4, 0xf9, 0x4c} }
-#define UUID_NON_TRUSTED_WORLD_KEY_CERT \
-	{0x3d87671c, 0x635f, 0x11e4, 0x97, 0x8d, {0x27, 0xc0, 0xc7, 0x14, 0x8a, 0xbd} }
-#define UUID_SCP_FIRMWARE_BL30_KEY_CERT \
-	{0xa1214202, 0x60f8, 0x11e4, 0x8d, 0x9b, {0xf3, 0x3c, 0x0e, 0x15, 0xa0, 0x14} }
-#define UUID_EL3_RUNTIME_FIRMWARE_BL31_KEY_CERT \
-	{0xccbeb88a, 0x60f9, 0x11e4, 0x9a, 0xd0, {0xeb, 0x48, 0x22, 0xd8, 0xdc, 0xf8} }
-#define UUID_SECURE_PAYLOAD_BL32_KEY_CERT \
-	{0x03d67794, 0x60fb, 0x11e4, 0x85, 0xdd, {0xb7, 0x10, 0x5b, 0x8c, 0xee, 0x04} }
-#define UUID_NON_TRUSTED_FIRMWARE_BL33_KEY_CERT \
-	{0x2a83d58a, 0x60fb, 0x11e4, 0x8a, 0xaf, {0xdf, 0x30, 0xbb, 0xc4, 0x98, 0x59} }
-/* Content certificates */
-#define UUID_TRUSTED_BOOT_FIRMWARE_BL2_CERT \
-	{0xea69e2d6, 0x635d, 0x11e4, 0x8d, 0x8c, {0x9f, 0xba, 0xbe, 0x99, 0x56, 0xa5} }
-#define UUID_SCP_FIRMWARE_BL30_CERT \
-	{0x046fbe44, 0x635e, 0x11e4, 0xb2, 0x8b, {0x73, 0xd8, 0xea, 0xae, 0x96, 0x56} }
-#define UUID_EL3_RUNTIME_FIRMWARE_BL31_CERT \
-	{0x200cb2e2, 0x635e, 0x11e4, 0x9c, 0xe8, {0xab, 0xcc, 0xf9, 0x2b, 0xb6, 0x66} }
-#define UUID_SECURE_PAYLOAD_BL32_CERT \
-	{0x11449fa4, 0x635e, 0x11e4, 0x87, 0x28, {0x3f, 0x05, 0x72, 0x2a, 0xf3, 0x3d} }
-#define UUID_NON_TRUSTED_FIRMWARE_BL33_CERT \
-	{0xf3c1c48e, 0x635d, 0x11e4, 0xa7, 0xa9, {0x87, 0xee, 0x40, 0xb2, 0x3f, 0xa7} }
-
-typedef struct fip_toc_header {
-	uint32_t	name;
-	uint32_t	serial_number;
-	uint64_t	flags;
-} fip_toc_header_t;
-
-typedef struct fip_toc_entry {
-	uuid_t		uuid;
-	uint64_t	offset_address;
-	uint64_t	size;
-	uint64_t	flags;
-} fip_toc_entry_t;
-
-#endif /* __FIRMWARE_IMAGE_PACKAGE_H__ */
diff --git a/include/common/interrupt_props.h b/include/common/interrupt_props.h
new file mode 100644
index 0000000..9786b40
--- /dev/null
+++ b/include/common/interrupt_props.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __INTERRUPT_PROPS_H__
+#define __INTERRUPT_PROPS_H__
+
+#ifndef __ASSEMBLY__
+
+/* Create an interrupt property descriptor from various interrupt properties */
+#define INTR_PROP_DESC(num, pri, grp, cfg) \
+	{ \
+		.intr_num = num, \
+		.intr_pri = pri, \
+		.intr_grp = grp, \
+		.intr_cfg = cfg, \
+	}
+
+typedef struct interrupt_prop {
+	unsigned int intr_num:10;
+	unsigned int intr_pri:8;
+	unsigned int intr_grp:2;
+	unsigned int intr_cfg:2;
+} interrupt_prop_t;
+
+#endif /* __ASSEMBLY__ */
+#endif /* __INTERRUPT_PROPS_H__ */
diff --git a/include/common/param_header.h b/include/common/param_header.h
new file mode 100644
index 0000000..90d59b3
--- /dev/null
+++ b/include/common/param_header.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PARAM_HEADER_H__
+#define __PARAM_HEADER_H__
+
+/* Param header types */
+#define PARAM_EP		0x01
+#define PARAM_IMAGE_BINARY	0x02
+#define PARAM_BL31		0x03
+#define PARAM_BL_LOAD_INFO	0x04
+#define PARAM_BL_PARAMS		0x05
+#define PARAM_PSCI_LIB_ARGS	0x06
+
+/* Param header version */
+#define VERSION_1	0x01
+#define VERSION_2	0x02
+
+#define SET_PARAM_HEAD(_p, _type, _ver, _attr) do { \
+	(_p)->h.type = (uint8_t)(_type); \
+	(_p)->h.version = (uint8_t)(_ver); \
+	(_p)->h.size = (uint16_t)sizeof(*_p); \
+	(_p)->h.attr = (uint32_t)(_attr) ; \
+	} while (0)
+
+/* Following is used for populating structure members statically. */
+#define SET_STATIC_PARAM_HEAD(_p, _type, _ver, _p_type, _attr)	\
+	._p.h.type = (uint8_t)(_type), \
+	._p.h.version = (uint8_t)(_ver), \
+	._p.h.size = (uint16_t)sizeof(_p_type), \
+	._p.h.attr = (uint32_t)(_attr)
+
+#ifndef __ASSEMBLY__
+
+#include <types.h>
+
+/***************************************************************************
+ * This structure provides version information and the size of the
+ * structure, attributes for the structure it represents
+ ***************************************************************************/
+typedef struct param_header {
+	uint8_t type;		/* type of the structure */
+	uint8_t version;    /* version of this structure */
+	uint16_t size;      /* size of this structure in bytes */
+	uint32_t attr;      /* attributes: unused bits SBZ */
+} param_header_t;
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* __PARAM_HEADER_H__ */
+
diff --git a/include/common/runtime_svc.h b/include/common/runtime_svc.h
new file mode 100644
index 0000000..e179e4b
--- /dev/null
+++ b/include/common/runtime_svc.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __RUNTIME_SVC_H__
+#define __RUNTIME_SVC_H__
+
+#include <bl_common.h>		/* to include exception types */
+#include <smcc_helpers.h>	/* to include SMCC definitions */
+
+
+/*******************************************************************************
+ * Structure definition, typedefs & constants for the runtime service framework
+ ******************************************************************************/
+
+/*
+ * Constants to allow the assembler access a runtime service
+ * descriptor
+ */
+#ifdef AARCH32
+#define RT_SVC_SIZE_LOG2	4
+#define RT_SVC_DESC_INIT	8
+#define RT_SVC_DESC_HANDLE	12
+#else
+#define RT_SVC_SIZE_LOG2	5
+#define RT_SVC_DESC_INIT	16
+#define RT_SVC_DESC_HANDLE	24
+#endif /* AARCH32 */
+#define SIZEOF_RT_SVC_DESC	(1 << RT_SVC_SIZE_LOG2)
+
+
+/*
+ * The function identifier has 6 bits for the owning entity number and
+ * single bit for the type of smc call. When taken together these
+ * values limit the maximum number of runtime services to 128.
+ */
+#define MAX_RT_SVCS		128
+
+#ifndef __ASSEMBLY__
+
+/* Prototype for runtime service initializing function */
+typedef int32_t (*rt_svc_init_t)(void);
+
+/*
+ * Prototype for runtime service SMC handler function. x0 (SMC Function ID) to
+ * x4 are as passed by the caller. Rest of the arguments to SMC and the context
+ * can be accessed using the handle pointer. The cookie parameter is reserved
+ * for future use
+ */
+typedef uintptr_t (*rt_svc_handle_t)(uint32_t smc_fid,
+				  u_register_t x1,
+				  u_register_t x2,
+				  u_register_t x3,
+				  u_register_t x4,
+				  void *cookie,
+				  void *handle,
+				  u_register_t flags);
+typedef struct rt_svc_desc {
+	uint8_t start_oen;
+	uint8_t end_oen;
+	uint8_t call_type;
+	const char *name;
+	rt_svc_init_t init;
+	rt_svc_handle_t handle;
+} rt_svc_desc_t;
+
+/*
+ * Convenience macro to declare a service descriptor
+ */
+#define DECLARE_RT_SVC(_name, _start, _end, _type, _setup, _smch) \
+	static const rt_svc_desc_t __svc_desc_ ## _name \
+		__section("rt_svc_descs") __used = { \
+			.start_oen = _start, \
+			.end_oen = _end, \
+			.call_type = _type, \
+			.name = #_name, \
+			.init = _setup, \
+			.handle = _smch }
+
+/*
+ * Compile time assertions related to the 'rt_svc_desc' structure to:
+ * 1. ensure that the assembler and the compiler view of the size
+ *    of the structure are the same.
+ * 2. ensure that the assembler and the compiler see the initialisation
+ *    routine at the same offset.
+ * 3. ensure that the assembler and the compiler see the handler
+ *    routine at the same offset.
+ */
+CASSERT((sizeof(rt_svc_desc_t) == SIZEOF_RT_SVC_DESC), \
+	assert_sizeof_rt_svc_desc_mismatch);
+CASSERT(RT_SVC_DESC_INIT == __builtin_offsetof(rt_svc_desc_t, init), \
+	assert_rt_svc_desc_init_offset_mismatch);
+CASSERT(RT_SVC_DESC_HANDLE == __builtin_offsetof(rt_svc_desc_t, handle), \
+	assert_rt_svc_desc_handle_offset_mismatch);
+
+
+/*
+ * This macro combines the call type and the owning entity number corresponding
+ * to a runtime service to generate a unique owning entity number. This unique
+ * oen is used to access an entry in the 'rt_svc_descs_indices' array. The entry
+ * contains the index of the service descriptor in the 'rt_svc_descs' array.
+ */
+#define get_unique_oen(oen, call_type)	((oen & FUNCID_OEN_MASK) |	\
+					((call_type & FUNCID_TYPE_MASK) \
+					 << FUNCID_OEN_WIDTH))
+
+/*
+ * This macro generates the unique owning entity number from the SMC Function
+ * ID.  This unique oen is used to access an entry in the
+ * 'rt_svc_descs_indices' array to invoke the corresponding runtime service
+ * handler during SMC handling.
+ */
+#define get_unique_oen_from_smc_fid(fid)		\
+	get_unique_oen(((fid) >> FUNCID_OEN_SHIFT),	\
+			((fid) >> FUNCID_TYPE_SHIFT))
+
+/*******************************************************************************
+ * Function & variable prototypes
+ ******************************************************************************/
+void runtime_svc_init(void);
+uintptr_t handle_runtime_svc(uint32_t smc_fid, void *cookie, void *handle,
+						unsigned int flags);
+extern uintptr_t __RT_SVC_DESCS_START__;
+extern uintptr_t __RT_SVC_DESCS_END__;
+void init_crash_reporting(void);
+
+#endif /*__ASSEMBLY__*/
+#endif /* __RUNTIME_SVC_H__ */
diff --git a/include/common/tbbr/cot_def.h b/include/common/tbbr/cot_def.h
new file mode 100644
index 0000000..4a2a8ef
--- /dev/null
+++ b/include/common/tbbr/cot_def.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __COT_DEF_H__
+#define __COT_DEF_H__
+
+/* TBBR CoT definitions */
+
+#define COT_MAX_VERIFIED_PARAMS		4
+
+#endif /* __COT_DEF_H__ */
diff --git a/include/common/tbbr/tbbr_img_def.h b/include/common/tbbr/tbbr_img_def.h
new file mode 100644
index 0000000..3e68b64
--- /dev/null
+++ b/include/common/tbbr/tbbr_img_def.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TBBR_IMG_DEF_H__
+#define __TBBR_IMG_DEF_H__
+
+/* Firmware Image Package */
+#define FIP_IMAGE_ID			0
+
+/* Trusted Boot Firmware BL2 */
+#define BL2_IMAGE_ID			1
+
+/* SCP Firmware SCP_BL2 */
+#define SCP_BL2_IMAGE_ID		2
+
+/* EL3 Runtime Firmware BL31 */
+#define BL31_IMAGE_ID			3
+
+/* Secure Payload BL32 (Trusted OS) */
+#define BL32_IMAGE_ID			4
+
+/* Non-Trusted Firmware BL33 */
+#define BL33_IMAGE_ID			5
+
+/* Certificates */
+#define TRUSTED_BOOT_FW_CERT_ID		6
+#define TRUSTED_KEY_CERT_ID		7
+
+#define SCP_FW_KEY_CERT_ID		8
+#define SOC_FW_KEY_CERT_ID		9
+#define TRUSTED_OS_FW_KEY_CERT_ID	10
+#define NON_TRUSTED_FW_KEY_CERT_ID	11
+
+#define SCP_FW_CONTENT_CERT_ID		12
+#define SOC_FW_CONTENT_CERT_ID		13
+#define TRUSTED_OS_FW_CONTENT_CERT_ID	14
+#define NON_TRUSTED_FW_CONTENT_CERT_ID	15
+
+/* Non-Trusted ROM Firmware NS_BL1U */
+#define NS_BL1U_IMAGE_ID		16
+
+/* Trusted FWU Certificate */
+#define FWU_CERT_ID			17
+
+/* Trusted FWU SCP Firmware SCP_BL2U */
+#define SCP_BL2U_IMAGE_ID		18
+
+/* Trusted FWU Boot Firmware BL2U */
+#define BL2U_IMAGE_ID			19
+
+/* Non-Trusted FWU Firmware NS_BL2U */
+#define NS_BL2U_IMAGE_ID		20
+
+/* Secure Payload BL32_EXTRA1 (Trusted OS Extra1) */
+#define BL32_EXTRA1_IMAGE_ID		21
+
+/* Secure Payload BL32_EXTRA2 (Trusted OS Extra2) */
+#define BL32_EXTRA2_IMAGE_ID		22
+
+#endif /* __TBBR_IMG_DEF_H__ */
diff --git a/include/drivers/arm/arm_gic.h b/include/drivers/arm/arm_gic.h
index 9ab1a95..019159f 100644
--- a/include/drivers/arm/arm_gic.h
+++ b/include/drivers/arm/arm_gic.h
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __ARM_GIC_H__
@@ -36,22 +12,22 @@
 /*******************************************************************************
  * Function declarations
  ******************************************************************************/
-void arm_gic_init(unsigned int gicc_base,
-		unsigned int gicd_base,
-		unsigned long gicr_base,
-		const unsigned int *irq_sec_ptr,
-		unsigned int num_irqs);
-void arm_gic_setup(void);
-void arm_gic_cpuif_deactivate(void);
-void arm_gic_cpuif_setup(void);
-void arm_gic_pcpu_distif_setup(void);
+void arm_gic_init(uintptr_t gicc_base,
+		  uintptr_t gicd_base,
+		  uintptr_t gicr_base,
+		  const unsigned int *irq_sec_ptr,
+		  unsigned int num_irqs) __deprecated;
+void arm_gic_setup(void) __deprecated;
+void arm_gic_cpuif_deactivate(void) __deprecated;
+void arm_gic_cpuif_setup(void) __deprecated;
+void arm_gic_pcpu_distif_setup(void) __deprecated;
 
 uint32_t arm_gic_interrupt_type_to_line(uint32_t type,
-				uint32_t security_state);
-uint32_t arm_gic_get_pending_interrupt_type(void);
-uint32_t arm_gic_get_pending_interrupt_id(void);
-uint32_t arm_gic_acknowledge_interrupt(void);
-void arm_gic_end_of_interrupt(uint32_t id);
-uint32_t arm_gic_get_interrupt_type(uint32_t id);
+				uint32_t security_state) __deprecated;
+uint32_t arm_gic_get_pending_interrupt_type(void) __deprecated;
+uint32_t arm_gic_get_pending_interrupt_id(void) __deprecated;
+uint32_t arm_gic_acknowledge_interrupt(void) __deprecated;
+void arm_gic_end_of_interrupt(uint32_t id) __deprecated;
+uint32_t arm_gic_get_interrupt_type(uint32_t id) __deprecated;
 
 #endif /* __GIC_H__ */
diff --git a/include/drivers/arm/arm_gicv3_common.h b/include/drivers/arm/arm_gicv3_common.h
new file mode 100644
index 0000000..8970e3f
--- /dev/null
+++ b/include/drivers/arm/arm_gicv3_common.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __ARM_GICV3_COMMON_H__
+#define __ARM_GICV3_COMMON_H__
+
+/*******************************************************************************
+ * GIC500/GIC600 Re-distributor interface registers & constants
+ ******************************************************************************/
+
+/* GICR_WAKER implementation-defined bit definitions */
+#define	WAKER_SL_SHIFT		0
+#define	WAKER_QSC_SHIFT		31
+
+#define WAKER_SL_BIT		(1U << WAKER_SL_SHIFT)
+#define WAKER_QSC_BIT		(1U << WAKER_QSC_SHIFT)
+
+#endif /* __ARM_GICV3_COMMON_H__ */
diff --git a/include/drivers/arm/cci.h b/include/drivers/arm/cci.h
new file mode 100644
index 0000000..1def6a8
--- /dev/null
+++ b/include/drivers/arm/cci.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CCI_H__
+#define __CCI_H__
+
+/* Slave interface offsets from PERIPHBASE */
+#define SLAVE_IFACE6_OFFSET		0x7000
+#define SLAVE_IFACE5_OFFSET		0x6000
+#define SLAVE_IFACE4_OFFSET		0x5000
+#define SLAVE_IFACE3_OFFSET		0x4000
+#define SLAVE_IFACE2_OFFSET		0x3000
+#define SLAVE_IFACE1_OFFSET		0x2000
+#define SLAVE_IFACE0_OFFSET		0x1000
+#define SLAVE_IFACE_OFFSET(index)	(SLAVE_IFACE0_OFFSET +	\
+					(0x1000 * (index)))
+
+/* Slave interface event and count register offsets from PERIPHBASE */
+#define EVENT_SELECT7_OFFSET		0x80000
+#define EVENT_SELECT6_OFFSET		0x70000
+#define EVENT_SELECT5_OFFSET		0x60000
+#define EVENT_SELECT4_OFFSET		0x50000
+#define EVENT_SELECT3_OFFSET		0x40000
+#define EVENT_SELECT2_OFFSET		0x30000
+#define EVENT_SELECT1_OFFSET		0x20000
+#define EVENT_SELECT0_OFFSET		0x10000
+#define EVENT_OFFSET(index)		(EVENT_SELECT0_OFFSET +	\
+					(0x10000 * (index)))
+
+/* Control and ID register offsets */
+#define CTRL_OVERRIDE_REG		0x0
+#define SECURE_ACCESS_REG		0x8
+#define STATUS_REG			0xc
+#define IMPRECISE_ERR_REG		0x10
+#define PERFMON_CTRL_REG		0x100
+#define IFACE_MON_CTRL_REG		0x104
+
+/* Component and peripheral ID registers */
+#define PERIPHERAL_ID0			0xFE0
+#define PERIPHERAL_ID1			0xFE4
+#define PERIPHERAL_ID2			0xFE8
+#define PERIPHERAL_ID3			0xFEC
+#define PERIPHERAL_ID4			0xFD0
+#define PERIPHERAL_ID5			0xFD4
+#define PERIPHERAL_ID6			0xFD8
+#define PERIPHERAL_ID7			0xFDC
+
+#define COMPONENT_ID0			0xFF0
+#define COMPONENT_ID1			0xFF4
+#define COMPONENT_ID2			0xFF8
+#define COMPONENT_ID3			0xFFC
+#define COMPONENT_ID4			0x1000
+#define COMPONENT_ID5			0x1004
+#define COMPONENT_ID6			0x1008
+#define COMPONENT_ID7			0x100C
+
+/* Slave interface register offsets */
+#define SNOOP_CTRL_REG			0x0
+#define SH_OVERRIDE_REG			0x4
+#define READ_CHNL_QOS_VAL_OVERRIDE_REG	0x100
+#define WRITE_CHNL_QOS_VAL_OVERRIDE_REG	0x104
+#define MAX_OT_REG			0x110
+
+/* Snoop Control register bit definitions */
+#define DVM_EN_BIT			(1 << 1)
+#define SNOOP_EN_BIT			(1 << 0)
+#define SUPPORT_SNOOPS			(1 << 30)
+#define SUPPORT_DVM			(1 << 31)
+
+/* Status register bit definitions */
+#define CHANGE_PENDING_BIT		(1 << 0)
+
+/* Event and count register offsets */
+#define EVENT_SELECT_REG		0x0
+#define EVENT_COUNT_REG			0x4
+#define COUNT_CNTRL_REG			0x8
+#define COUNT_OVERFLOW_REG		0xC
+
+/* Slave interface monitor registers */
+#define INT_MON_REG_SI0			0x90000
+#define INT_MON_REG_SI1			0x90004
+#define INT_MON_REG_SI2			0x90008
+#define INT_MON_REG_SI3			0x9000C
+#define INT_MON_REG_SI4			0x90010
+#define INT_MON_REG_SI5			0x90014
+#define INT_MON_REG_SI6			0x90018
+
+/* Master interface monitor registers */
+#define INT_MON_REG_MI0			0x90100
+#define INT_MON_REG_MI1			0x90104
+#define INT_MON_REG_MI2			0x90108
+#define INT_MON_REG_MI3			0x9010c
+#define INT_MON_REG_MI4			0x90110
+#define INT_MON_REG_MI5			0x90114
+
+#define SLAVE_IF_UNUSED			-1
+
+#ifndef __ASSEMBLY__
+
+#include <stdint.h>
+
+/* Function declarations */
+
+/*
+ * The ARM CCI driver needs the following:
+ * 1. Base address of the CCI product
+ * 2. An array  of map between AMBA 4 master ids and ACE/ACE lite slave
+ *    interfaces.
+ * 3. Size of the array.
+ *
+ * SLAVE_IF_UNUSED should be used in the map to represent no AMBA 4 master exists
+ * for that interface.
+ */
+void cci_init(uintptr_t base, const int *map, unsigned int num_cci_masters);
+
+void cci_enable_snoop_dvm_reqs(unsigned int master_id);
+void cci_disable_snoop_dvm_reqs(unsigned int master_id);
+
+#endif /* __ASSEMBLY__ */
+#endif /* __CCI_H__ */
diff --git a/include/drivers/arm/cci400.h b/include/drivers/arm/cci400.h
index 7756bdf..e11dad4 100644
--- a/include/drivers/arm/cci400.h
+++ b/include/drivers/arm/cci400.h
@@ -1,36 +1,20 @@
 /*
  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __CCI_400_H__
 #define __CCI_400_H__
 
+/**************************************************************
+ * THIS DRIVER IS DEPRECATED. Please use the driver in cci.h
+ **************************************************************/
+#if ERROR_DEPRECATED
+#error " The CCI-400 specific driver is deprecated."
+#endif
+
+
 /* Slave interface offsets from PERIPHBASE */
 #define SLAVE_IFACE4_OFFSET		0x5000
 #define SLAVE_IFACE3_OFFSET		0x4000
@@ -68,6 +52,8 @@
 
 #ifndef __ASSEMBLY__
 
+#include <stdint.h>
+
 /* Function declarations */
 
 /*
@@ -79,12 +65,12 @@
  * affinity instance of the mpidr representing the cluster. A negative cluster
  * index indicates that no cluster is present on that slave interface.
  */
-void cci_init(unsigned long cci_base,
+void cci_init(uintptr_t cci_base,
 		int slave_iface3_cluster_ix,
-		int slave_iface4_cluster_ix);
+		int slave_iface4_cluster_ix) __deprecated;
 
-void cci_enable_cluster_coherency(unsigned long mpidr);
-void cci_disable_cluster_coherency(unsigned long mpidr);
+void cci_enable_cluster_coherency(unsigned long mpidr) __deprecated;
+void cci_disable_cluster_coherency(unsigned long mpidr) __deprecated;
 
 #endif /* __ASSEMBLY__ */
 #endif /* __CCI_400_H__ */
diff --git a/include/drivers/arm/ccn.h b/include/drivers/arm/ccn.h
new file mode 100644
index 0000000..d740867
--- /dev/null
+++ b/include/drivers/arm/ccn.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CCN_H__
+#define __CCN_H__
+
+/*
+ * This macro defines the maximum number of master interfaces that reside on
+ * Request nodes which the CCN driver can accommodate. The driver APIs to add
+ * and remove Request nodes from snoop/dvm domains take a bit map of master
+ * interfaces as inputs. The largest C data type that can be used is a 64-bit
+ * unsigned integer. Hence the value of 64. The platform will have to ensure
+ * that the master interfaces are numbered from 0-63.
+ */
+#define CCN_MAX_RN_MASTERS	64
+
+/*
+ * The following constants define the various run modes that the platform can
+ * request the CCN driver to place the L3 cache in. These map to the
+ * programmable P-State values in a HN-F P-state register.
+ */
+#define CCN_L3_RUN_MODE_NOL3	0x0	/* HNF_PM_NOL3 */
+#define CCN_L3_RUN_MODE_SFONLY	0x1	/* HNF_PM_SFONLY */
+#define CCN_L3_RUN_MODE_HAM	0x2	/* HNF_PM_HALF */
+#define CCN_L3_RUN_MODE_FAM	0x3	/* HNF_PM_FULL */
+
+/* part 0 IDs for various CCN variants */
+#define CCN_502_PART0_ID	0x30
+#define CCN_504_PART0_ID	0x26
+#define CCN_505_PART0_ID	0x27
+#define CCN_508_PART0_ID	0x28
+#define CCN_512_PART0_ID	0x29
+
+/*
+ * The following macro takes the value returned from a read of a HN-F P-state
+ * status register and returns the retention state value.
+ */
+#define CCN_GET_RETENTION_STATE(pstate)	((pstate >> 4) & 0x3)
+
+/*
+ * The following macro takes the value returned from a read of a HN-F P-state
+ * status register and returns the run state value.
+ */
+#define CCN_GET_RUN_STATE(pstate)	(pstate & 0xf)
+
+#ifndef __ASSEMBLY__
+#include <stdint.h>
+
+/*
+ * This structure describes some of the implementation defined attributes of the
+ * CCN IP. It is used by the platform port to specify these attributes in order
+ * to initialise the CCN driver. The attributes are described below.
+ *
+ * 1. The 'num_masters' field specifies the total number of master interfaces
+ *    resident on Request nodes.
+ *
+ * 2. The 'master_to_rn_id_map' field is a ponter to an array in which each
+ *    index corresponds to a master interface and its value corresponds to the
+ *    Request node on which the master interface resides.
+ *    This field is not simply defined as an array of size CCN_MAX_RN_MASTERS.
+ *    In reality, a platform will have much fewer master * interfaces than
+ *    CCN_MAX_RN_MASTERS. With an array of this size, it would also have to
+ *    set the unused entries to a suitable value. Zeroing the array would not
+ *    be enough since 0 is also a valid node id. Hence, such an array is not
+ *    used.
+ *
+ * 3. The 'periphbase' field is the base address of the programmer's view of the
+ *    CCN IP.
+ */
+typedef struct ccn_desc {
+	unsigned int num_masters;
+	const unsigned char *master_to_rn_id_map;
+	uintptr_t periphbase;
+} ccn_desc_t;
+
+
+void ccn_init(const ccn_desc_t *plat_ccn_desc);
+void ccn_enter_snoop_dvm_domain(unsigned long long master_iface_map);
+void ccn_exit_snoop_dvm_domain(unsigned long long master_iface_map);
+void ccn_enter_dvm_domain(unsigned long long master_iface_map);
+void ccn_exit_dvm_domain(unsigned long long master_iface_map);
+void ccn_set_l3_run_mode(unsigned int mode);
+void ccn_program_sys_addrmap(unsigned int sn0_id,
+		 unsigned int sn1_id,
+		 unsigned int sn2_id,
+		 unsigned int top_addr_bit0,
+		 unsigned int top_addr_bit1,
+		 unsigned char three_sn_en);
+unsigned int ccn_get_l3_run_mode(void);
+int ccn_get_part0_id(uintptr_t periphbase);
+
+#endif /* __ASSEMBLY__ */
+#endif /* __CCN_H__ */
diff --git a/include/drivers/arm/cryptocell/cc_crypto_boot_defs.h b/include/drivers/arm/cryptocell/cc_crypto_boot_defs.h
new file mode 100644
index 0000000..2cb8938
--- /dev/null
+++ b/include/drivers/arm/cryptocell/cc_crypto_boot_defs.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _CC_CRYPTO_BOOT_DEFS_H
+#define _CC_CRYPTO_BOOT_DEFS_H
+
+/*! @file
+@brief This file contains SBROM definitions
+*/
+
+/*! Version counters value. */
+typedef enum {
+
+	CC_SW_VERSION_COUNTER1 = 1,	/*!< Counter 1 - trusted version. */
+	CC_SW_VERSION_COUNTER2,		/*!< Counter 2 - non trusted version. */
+
+	CC_SW_VERSION_MAX      = 0x7FFFFFFF
+
+} CCSbSwVersionId_t;
+
+/* HASH boot key definition */
+typedef enum {
+	CC_SB_HASH_BOOT_KEY_0_128B 	= 0,		/*!< 128-bit truncated SHA256 digest of public key 0. */
+	CC_SB_HASH_BOOT_KEY_1_128B	= 1,		/*!< 128-bit truncated SHA256 digest of public key 1. */
+	CC_SB_HASH_BOOT_KEY_256B	= 2,		/*!< 256-bit SHA256 digest of public key. */
+	CC_SB_HASH_BOOT_NOT_USED	= 0xFF,
+	CC_SB_HASH_MAX_NUM 		= 0x7FFFFFFF,	/*!\internal use external 128-bit truncated SHA256 digest */
+} CCSbPubKeyIndexType_t;
+
+
+#endif
diff --git a/include/drivers/arm/cryptocell/cc_pal_sb_plat.h b/include/drivers/arm/cryptocell/cc_pal_sb_plat.h
new file mode 100644
index 0000000..212a710
--- /dev/null
+++ b/include/drivers/arm/cryptocell/cc_pal_sb_plat.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*!
+@file
+@brief This file contains the platform-dependent definitions that are used in the SBROM code.
+*/
+
+#ifndef _CC_PAL_SB_PLAT_H
+#define _CC_PAL_SB_PLAT_H
+
+#include "cc_pal_types.h"
+
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/*! Definition of DMA address type, can be 32 bits or 64 bits according to CryptoCell's HW. */
+typedef uint64_t		CCDmaAddr_t;
+/*! Definition of CryptoCell address type, can be 32 bits or 64 bits according to platform. */
+typedef uintptr_t		CCAddr_t;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/drivers/arm/cryptocell/cc_pal_types.h b/include/drivers/arm/cryptocell/cc_pal_types.h
new file mode 100644
index 0000000..8c09b23
--- /dev/null
+++ b/include/drivers/arm/cryptocell/cc_pal_types.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CC_PAL_TYPES_H
+#define CC_PAL_TYPES_H
+
+/*!
+@file
+@brief This file contains platform-dependent definitions and types.
+*/
+
+#include "cc_pal_types_plat.h"
+
+typedef enum {
+	CC_FALSE = 0,
+	CC_TRUE = 1
+} CCBool;
+
+#define CC_SUCCESS		0UL
+#define CC_FAIL			1UL
+
+#define CC_1K_SIZE_IN_BYTES	1024
+#define CC_BITS_IN_BYTE		8
+#define CC_BITS_IN_32BIT_WORD	32
+#define CC_32BIT_WORD_SIZE	(sizeof(uint32_t))
+
+#define CC_OK			CC_SUCCESS
+
+#define CC_UNUSED_PARAM(prm)	((void)prm)
+
+#define CC_MAX_UINT32_VAL	(0xFFFFFFFF)
+
+#define CALC_FULL_BYTES(numBits)		(((numBits) + (CC_BITS_IN_BYTE - 1))/CC_BITS_IN_BYTE)
+#define CALC_FULL_32BIT_WORDS(numBits)		(((numBits) + (CC_BITS_IN_32BIT_WORD - 1))/CC_BITS_IN_32BIT_WRD)
+#define CALC_32BIT_WORDS_FROM_BYTES(sizeBytes)	(((sizeBytes) + CC_32BIT_WORD_SIZE - 1)/CC_32BIT_WORD_SIZE)
+
+#endif
diff --git a/include/drivers/arm/cryptocell/cc_pal_types_plat.h b/include/drivers/arm/cryptocell/cc_pal_types_plat.h
new file mode 100644
index 0000000..8410024
--- /dev/null
+++ b/include/drivers/arm/cryptocell/cc_pal_types_plat.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*! @file
+@brief This file contains basic type definitions that are platform-dependent.
+*/
+#ifndef _CC_PAL_TYPES_PLAT_H
+#define _CC_PAL_TYPES_PLAT_H
+/* Host specific types for standard (ISO-C99) compilant platforms */
+
+#include <stddef.h>
+#include <stdint.h>
+
+typedef uint32_t CCStatus;
+
+#define CCError_t	CCStatus
+#define CC_INFINITE	0xFFFFFFFF
+
+#define CEXPORT_C
+#define CIMPORT_C
+
+#endif /*_CC_PAL_TYPES_PLAT_H*/
diff --git a/include/drivers/arm/cryptocell/cc_sec_defs.h b/include/drivers/arm/cryptocell/cc_sec_defs.h
new file mode 100644
index 0000000..d419218
--- /dev/null
+++ b/include/drivers/arm/cryptocell/cc_sec_defs.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _CC_SEC_DEFS_H
+#define _CC_SEC_DEFS_H
+
+/*!
+@file
+@brief This file contains general hash definitions and types.
+*/
+
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/*! The hashblock size in words. */
+#define HASH_BLOCK_SIZE_IN_WORDS             16
+/*! The hash - SHA2 results in words. */
+#define HASH_RESULT_SIZE_IN_WORDS            8
+#define HASH_RESULT_SIZE_IN_BYTES            32
+
+/*! Definition for hash result array. */
+typedef uint32_t CCHashResult_t[HASH_RESULT_SIZE_IN_WORDS];
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/drivers/arm/cryptocell/crypto_driver.h b/include/drivers/arm/cryptocell/crypto_driver.h
new file mode 100644
index 0000000..18104dd
--- /dev/null
+++ b/include/drivers/arm/cryptocell/crypto_driver.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _CRYPTO_DRIVER_H
+#define _CRYPTO_DRIVER_H
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include "cc_pal_sb_plat.h"
+#include "cc_sec_defs.h"
+
+/*----------------------------
+      PUBLIC FUNCTIONS
+-----------------------------------*/
+/*!
+ * @brief This function gives the functionality of integrated hash
+ *
+ * @param[in] hwBaseAddress	- CryptoCell base address
+ * @param[out] hashResult	- the HASH result.
+ *
+ */
+CCError_t SBROM_CryptoHash(unsigned long hwBaseAddress, CCDmaAddr_t inputDataAddr, uint32_t BlockSize,
+				CCHashResult_t hashResult);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/drivers/arm/cryptocell/nvm.h b/include/drivers/arm/cryptocell/nvm.h
new file mode 100644
index 0000000..a70289f
--- /dev/null
+++ b/include/drivers/arm/cryptocell/nvm.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _NVM__H
+#define _NVM__H
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include "cc_crypto_boot_defs.h"
+#include "cc_pal_types.h"
+#include "cc_sec_defs.h"
+
+/*------------------------------------
+    DEFINES
+-------------------------------------*/
+
+/**
+ * @brief This function reads the LCS from the SRAM/NVM
+ *
+ * @param[in] hwBaseAddress  -  CryptoCell base address
+ *
+ * @param[in/out] lcs_ptr  - pointer to memory to store the LCS
+ *
+ * @return CCError_t - On success the value CC_OK is returned, and on failure   -a value from NVM_error.h
+ */
+CCError_t NVM_GetLCS(unsigned long hwBaseAddress, uint32_t *lcs_ptr);
+
+/**
+ * @brief The NVM_ReadHASHPubKey function is a NVM interface function -
+ *        The function retrieves the HASH of the device Public key from the SRAM/NVM
+ *
+ * @param[in] hwBaseAddress -  CryptoCell base address
+ *
+ * @param[in] pubKeyIndex -  Index of HASH in the OTP
+ *
+ * @param[out] PubKeyHASH   -  the public key HASH.
+ *
+ * @param[in] hashSizeInWords -  hash size (valid values: 4W, 8W)
+ *
+ * @return CCError_t - On success the value CC_OK is returned, and on failure   -a value from NVM_error.h
+ */
+
+CCError_t NVM_ReadHASHPubKey(unsigned long hwBaseAddress, CCSbPubKeyIndexType_t pubKeyIndex, CCHashResult_t PubKeyHASH, uint32_t hashSizeInWords);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/drivers/arm/cryptocell/nvm_otp.h b/include/drivers/arm/cryptocell/nvm_otp.h
new file mode 100644
index 0000000..390d62b
--- /dev/null
+++ b/include/drivers/arm/cryptocell/nvm_otp.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _NVM_OTP_H
+#define _NVM_OTP_H
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include "cc_crypto_boot_defs.h"
+#include "cc_pal_types.h"
+
+/*------------------------------------
+    DEFINES
+-------------------------------------*/
+
+
+
+/**
+ * @brief The NVM_GetSwVersion function is a NVM interface function -
+ *        The function retrieves the SW version from the SRAM/NVM.
+ *        In case of OTP, we support up to 16 anti-rollback counters (taken from the certificate)
+ *
+ * @param[in] hwBaseAddress -  CryptoCell base address
+ *
+ * @param[in] counterId -  relevant only for OTP (valid values: 1,2)
+ *
+ * @param[out] swVersion   -  the minimum SW version
+ *
+ * @return CCError_t - On success the value CC_OK is returned, and on failure   -a value from NVM_error.h
+ */
+CCError_t NVM_GetSwVersion(unsigned long hwBaseAddress, CCSbSwVersionId_t counterId, uint32_t *swVersion);
+
+
+/**
+ * @brief The NVM_SetSwVersion function is a NVM interface function -
+ *        The function writes the SW version into the SRAM/NVM.
+ *        In case of OTP, we support up to 16 anti-rollback counters (taken from the certificate)
+ *
+ * @param[in] hwBaseAddress -  CryptoCell base address
+ *
+ * @param[in] counterId -  relevant only for OTP (valid values: 1,2)
+ *
+ * @param[in] swVersion   -  the minimum SW version
+ *
+ * @return CCError_t - On success the value CC_OK is returned, and on failure   -a value from NVM_error.h
+ */
+CCError_t NVM_SetSwVersion(unsigned long hwBaseAddress, CCSbSwVersionId_t counterId, uint32_t swVersion);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/drivers/arm/cryptocell/rsa.h b/include/drivers/arm/cryptocell/rsa.h
new file mode 100644
index 0000000..cd9925b
--- /dev/null
+++ b/include/drivers/arm/cryptocell/rsa.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RSA_H
+#define RSA_H
+
+/*
+ * All the includes that are needed for code using this module to
+ * compile correctly should be #included here.
+ */
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include "cc_pal_types.h"
+
+/************************ Defines ******************************/
+
+/* the modulus size ion bits */
+#define RSA_MOD_SIZE_IN_BITS				2048UL
+#define RSA_MOD_SIZE_IN_BYTES				(CALC_FULL_BYTES(RSA_MOD_SIZE_IN_BITS))
+#define RSA_MOD_SIZE_IN_WORDS				(CALC_FULL_32BIT_WORDS(RSA_MOD_SIZE_IN_BITS))
+#define RSA_MOD_SIZE_IN_256BITS				(RSA_MOD_SIZE_IN_WORDS/8)
+#define RSA_EXP_SIZE_IN_BITS				17UL
+#define RSA_EXP_SIZE_IN_BYTES				(CALC_FULL_BYTES(RSA_EXP_SIZE_IN_BITS))
+
+/* size of buffer for Barrett modulus tag NP, used in PKA algorithms */
+#define RSA_HW_PKI_PKA_BARRETT_MOD_TAG_SIZE_IN_BITS	132
+#define RSA_HW_PKI_PKA_BARRETT_MOD_TAG_SIZE_IN_BYTES	(CALC_FULL_BYTES(RSA_HW_PKI_PKA_BARRETT_MOD_TAG_SIZE_IN_BITS))
+#define RSA_HW_PKI_PKA_BARRETT_MOD_TAG_SIZE_IN_WORDS	(CALC_FULL_32BIT_WORDS(RSA_HW_PKI_PKA_BARRETT_MOD_TAG_SIZE_IN_BITS))
+
+/*
+ * @brief The RSA_CalcNp calculates Np value and saves it into Np_ptr:
+ *
+ *
+
+ * @param[in] hwBaseAddress -	HW base address. Relevant for HW
+ *				implementation, for SW it is ignored.
+ * @N_ptr[in]               -	The pointer to the modulus buffer.
+ * @Np_ptr[out]             -	pointer to Np vector buffer. Its size must be >= 160.
+ */
+void RSA_CalcNp(unsigned long hwBaseAddress,
+		uint32_t *N_ptr,
+		uint32_t *Np_ptr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/drivers/arm/cryptocell/sbrom_bsv_api.h b/include/drivers/arm/cryptocell/sbrom_bsv_api.h
new file mode 100644
index 0000000..de83546
--- /dev/null
+++ b/include/drivers/arm/cryptocell/sbrom_bsv_api.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _SBROM_BSV_API_H
+#define _SBROM_BSV_API_H
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/*! @file
+@brief This file contains all SBROM library APIs and definitions.
+*/
+#include "cc_pal_types.h"
+
+/* Life cycle state definitions */
+#define CC_BSV_CHIP_MANUFACTURE_LCS		0x0 /*!< CM lifecycle value. */
+#define CC_BSV_DEVICE_MANUFACTURE_LCS		0x1 /*!< DM lifecycle value. */
+#define CC_BSV_SECURITY_DISABLED_LCS		0x3 /*!< SD lifecycle value. */
+#define CC_BSV_SECURE_LCS			0x5 /*!< Secure lifecycle value. */
+#define CC_BSV_RMA_LCS				0x7 /*!< RMA lifecycle value. */
+
+/*----------------------------
+      PUBLIC FUNCTIONS
+-----------------------------------*/
+
+/*!
+@brief This function should be the first ARM TrustZone CryptoCell TEE SBROM library API called.
+It verifies the HW product and version numbers.
+
+@return CC_OK	On success.
+@return A non-zero value from sbrom_bsv_error.h on failure.
+*/
+CCError_t CC_BsvSbromInit(
+	unsigned long hwBaseAddress 	/*!< [in] HW registers base address. */
+	);
+
+
+/*!
+@brief This function can be used for checking the LCS value, after CC_BsvLcsGetAndInit was called by the Boot ROM.
+
+@return CC_OK	On success.
+@return A non-zero value from sbrom_bsv_error.h on failure.
+*/
+CCError_t CC_BsvLcsGet(
+	unsigned long hwBaseAddress,	/*!< [in] HW registers base address. */
+	uint32_t *pLcs			/*!< [out] Returned lifecycle state. */
+	);
+
+/*!
+@brief This function retrieves the HW security lifecycle state, performs validity checks,
+and additional initializations in case the LCS is RMA (sets the Kce to fixed value).
+\note	Invalid LCS results in an error returned.
+In this case, the customer's code must completely disable the device.
+
+@return CC_OK	On success.
+@return A non-zero value from sbrom_bsv_error.h on failure.
+*/
+CCError_t CC_BsvLcsGetAndInit(
+	unsigned long hwBaseAddress,	/*!< [in] HW registers base address. */
+	uint32_t *pLcs		/*!< [out] Returned lifecycle state. */
+	);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/drivers/arm/cryptocell/secureboot_base_func.h b/include/drivers/arm/cryptocell/secureboot_base_func.h
new file mode 100644
index 0000000..6db596e
--- /dev/null
+++ b/include/drivers/arm/cryptocell/secureboot_base_func.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _SECURE_BOOT_BASE_FUNC_H
+#define _SECURE_BOOT_BASE_FUNC_H
+
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include "cc_pal_types.h"
+#include "secureboot_gen_defs.h"
+
+
+/*----------------------------
+      PUBLIC FUNCTIONS
+-----------------------------------*/
+
+/**
+ * @brief This function calculates the HASH over the given data and than verify
+ *	  RSA signature on that hashed data
+ *
+ * @param[in] hwBaseAddr -  CryptoCell base address
+ * @param[in] pData - pointer to the data to be verified
+ * @param[in] pNParams - a pointer to the public key parameters
+ * @param[in] pSignature - a pointer to the signature structure
+ * @param[in] sizeOfData - size of the data to calculate the HASH on (in bytes)
+ * @param[in] RSAAlg - RSA algorithm to use
+ *
+ * @return CCError_t - On success the value CC_OK is returned,
+ *         on failure - a value from BootImagesVerifier_error.h
+ */
+CCError_t CCSbVerifySignature(unsigned long hwBaseAddress,
+				uint32_t *pData,
+				CCSbNParams_t *pNParams,
+				CCSbSignature_t *pSignature,
+				uint32_t sizeOfData,
+				CCSbRsaAlg_t RSAAlg);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/drivers/arm/cryptocell/secureboot_gen_defs.h b/include/drivers/arm/cryptocell/secureboot_gen_defs.h
new file mode 100644
index 0000000..68b9ef8
--- /dev/null
+++ b/include/drivers/arm/cryptocell/secureboot_gen_defs.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _SECURE_BOOT_GEN_DEFS_H
+#define _SECURE_BOOT_GEN_DEFS_H
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/*! @file
+@brief This file contains all of the definitions and structures that are used for the secure boot.
+*/
+
+#include "cc_pal_sb_plat.h"
+#include "cc_sec_defs.h"
+
+
+/* General definitions */
+/***********************/
+
+/*RSA definitions*/
+#define SB_RSA_MOD_SIZE_IN_WORDS		 64
+#define SB_RSA_HW_PKI_PKA_BARRETT_MOD_TAG_SIZE_IN_WORDS 5
+
+
+/*! Public key data structure. */
+typedef struct {
+	uint32_t N[SB_RSA_MOD_SIZE_IN_WORDS];				/*!< N public key, big endian representation. */
+	uint32_t Np[SB_RSA_HW_PKI_PKA_BARRETT_MOD_TAG_SIZE_IN_WORDS];	/*!< Np (Barrett n' value). */
+} CCSbNParams_t;
+
+/*! Signature structure. */
+typedef struct {
+	uint32_t sig[SB_RSA_MOD_SIZE_IN_WORDS];				/*!< RSA PSS signature. */
+} CCSbSignature_t;
+
+
+/********* Supported algorithms definitions ***********/
+
+/*! RSA supported algorithms */
+typedef enum {
+	RSA_PSS_2048           = 0x01,			/*!< RSA PSS 2048 after hash SHA 256 */
+	RSA_PKCS15_2048	       = 0x02,			/*!< RSA PKX15 */
+	RSA_Last               = 0x7FFFFFFF
+} CCSbRsaAlg_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/drivers/arm/cryptocell/util.h b/include/drivers/arm/cryptocell/util.h
new file mode 100644
index 0000000..18fb599
--- /dev/null
+++ b/include/drivers/arm/cryptocell/util.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef UTIL_H
+#define UTIL_H
+
+/*
+ * All the includes that are needed for code using this module to
+ * compile correctly should be #included here.
+ */
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/************************ Defines ******************************/
+
+/* invers the bytes on a word- used for output from HASH */
+#ifdef BIG__ENDIAN
+#define UTIL_INVERSE_UINT32_BYTES(val)	(val)
+#else
+#define UTIL_INVERSE_UINT32_BYTES(val) \
+	(((val) >> 24) | (((val) & 0x00FF0000) >> 8) | (((val) & 0x0000FF00) << 8) | (((val) & 0x000000FF) << 24))
+#endif
+
+/* invers the bytes on a word - used for input data for HASH */
+#ifdef BIG__ENDIAN
+#define UTIL_REVERT_UINT32_BYTES(val) \
+	(((val) >> 24) | (((val) & 0x00FF0000) >> 8) | (((val) & 0x0000FF00) << 8) | (((val) & 0x000000FF) << 24))
+#else
+#define UTIL_REVERT_UINT32_BYTES(val)	(val)
+#endif
+
+ /* ------------------------------------------------------------
+ **
+ * @brief This function executes a reverse bytes copying from one buffer to another buffer.
+ *
+ * @param[in] dst_ptr - The pointer to destination buffer.
+ * @param[in] src_ptr - The pointer to source buffer.
+ * @param[in] size    - The size in bytes.
+ *
+ */
+
+void UTIL_ReverseMemCopy(uint8_t *dst_ptr, uint8_t *src_ptr, uint32_t size);
+
+
+ /* ------------------------------------------------------------
+  **
+  * @brief This function executes a reversed byte copy on a specified buffer.
+  *
+  *        on a 6 byte byffer:
+  *
+  *        buff[5] <---> buff[0]
+  *        buff[4] <---> buff[1]
+  *        buff[3] <---> buff[2]
+  *
+  * @param[in] dst_ptr - The counter buffer.
+  * @param[in] src_ptr - The counter size in bytes.
+  *
+  */
+void UTIL_ReverseBuff(uint8_t *buff_ptr, uint32_t size);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/drivers/arm/gic_common.h b/include/drivers/arm/gic_common.h
new file mode 100644
index 0000000..9e126a8
--- /dev/null
+++ b/include/drivers/arm/gic_common.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __GIC_COMMON_H__
+#define __GIC_COMMON_H__
+
+/*******************************************************************************
+ * GIC Distributor interface general definitions
+ ******************************************************************************/
+/* Constants to categorise interrupts */
+#define MIN_SGI_ID		0
+#define MIN_SEC_SGI_ID		8
+#define MIN_PPI_ID		16
+#define MIN_SPI_ID		32
+#define MAX_SPI_ID		1019
+
+#define TOTAL_SPI_INTR_NUM	(MAX_SPI_ID - MIN_SPI_ID + 1)
+#define TOTAL_PCPU_INTR_NUM	(MIN_SPI_ID - MIN_SGI_ID)
+
+/* Mask for the priority field common to all GIC interfaces */
+#define GIC_PRI_MASK			0xff
+
+/* Mask for the configuration field common to all GIC interfaces */
+#define GIC_CFG_MASK			0x3
+
+/* Constant to indicate a spurious interrupt in all GIC versions */
+#define GIC_SPURIOUS_INTERRUPT		1023
+
+/* Interrupt configurations */
+#define GIC_INTR_CFG_LEVEL		0
+#define GIC_INTR_CFG_EDGE		1
+
+/* Constants to categorise priorities */
+#define GIC_HIGHEST_SEC_PRIORITY	0
+#define GIC_LOWEST_SEC_PRIORITY		127
+#define GIC_HIGHEST_NS_PRIORITY		128
+#define GIC_LOWEST_NS_PRIORITY		254 /* 255 would disable an interrupt */
+
+/*******************************************************************************
+ * GIC Distributor interface register offsets that are common to GICv3 & GICv2
+ ******************************************************************************/
+#define GICD_CTLR		0x0
+#define GICD_TYPER		0x4
+#define GICD_IIDR		0x8
+#define GICD_IGROUPR		0x80
+#define GICD_ISENABLER		0x100
+#define GICD_ICENABLER		0x180
+#define GICD_ISPENDR		0x200
+#define GICD_ICPENDR		0x280
+#define GICD_ISACTIVER		0x300
+#define GICD_ICACTIVER		0x380
+#define GICD_IPRIORITYR		0x400
+#define GICD_ICFGR		0xc00
+#define GICD_NSACR		0xe00
+
+/* GICD_CTLR bit definitions */
+#define CTLR_ENABLE_G0_SHIFT		0
+#define CTLR_ENABLE_G0_MASK		0x1
+#define CTLR_ENABLE_G0_BIT		(1 << CTLR_ENABLE_G0_SHIFT)
+
+
+/*******************************************************************************
+ * GIC Distributor interface register constants that are common to GICv3 & GICv2
+ ******************************************************************************/
+#define PIDR2_ARCH_REV_SHIFT	4
+#define PIDR2_ARCH_REV_MASK	0xf
+
+/* GICv3 revision as reported by the PIDR2 register */
+#define ARCH_REV_GICV3		0x3
+/* GICv2 revision as reported by the PIDR2 register */
+#define ARCH_REV_GICV2		0x2
+
+#define IGROUPR_SHIFT		5
+#define ISENABLER_SHIFT		5
+#define ICENABLER_SHIFT		ISENABLER_SHIFT
+#define ISPENDR_SHIFT		5
+#define ICPENDR_SHIFT		ISPENDR_SHIFT
+#define ISACTIVER_SHIFT		5
+#define ICACTIVER_SHIFT		ISACTIVER_SHIFT
+#define IPRIORITYR_SHIFT	2
+#define ITARGETSR_SHIFT		2
+#define ICFGR_SHIFT		4
+#define NSACR_SHIFT		4
+
+/* GICD_TYPER shifts and masks */
+#define TYPER_IT_LINES_NO_SHIFT	0
+#define TYPER_IT_LINES_NO_MASK	0x1f
+
+/* Value used to initialize Normal world interrupt priorities four at a time */
+#define GICD_IPRIORITYR_DEF_VAL			\
+	(GIC_HIGHEST_NS_PRIORITY	|	\
+	(GIC_HIGHEST_NS_PRIORITY << 8)	|	\
+	(GIC_HIGHEST_NS_PRIORITY << 16)	|	\
+	(GIC_HIGHEST_NS_PRIORITY << 24))
+
+#endif /* __GIC_COMMON_H__ */
diff --git a/include/drivers/arm/gic_v2.h b/include/drivers/arm/gic_v2.h
index a2d3eee..258b898 100644
--- a/include/drivers/arm/gic_v2.h
+++ b/include/drivers/arm/gic_v2.h
@@ -1,205 +1,164 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __GIC_V2_H__
 #define __GIC_V2_H__
 
+/* The macros required here are additional to those in gic_common.h. */
+#include <gic_common.h>
 
-#define GIC400_NUM_SPIS		480
-#define MAX_PPIS		14
-#define MAX_SGIS		16
+/******************************************************************************
+ * THIS DRIVER IS DEPRECATED. For GICv2 systems, use the driver in gicv2.h
+ * and for GICv3 systems, use the driver in gicv3.h.
+ *****************************************************************************/
+#if ERROR_DEPRECATED
+#error " The legacy ARM GIC driver is deprecated."
+#endif
 
-#define MIN_SGI_ID		0
-#define MIN_PPI_ID		16
-#define MIN_SPI_ID		32
+#define GIC400_NUM_SPIS		U(480)
+#define MAX_PPIS		U(14)
+#define MAX_SGIS		U(16)
 
-#define GRP0			0
-#define GRP1			1
-#define GIC_PRI_MASK		0xff
-#define GIC_HIGHEST_SEC_PRIORITY 0
-#define GIC_LOWEST_SEC_PRIORITY	127
-#define GIC_HIGHEST_NS_PRIORITY	128
-#define GIC_LOWEST_NS_PRIORITY	254 /* 255 would disable an interrupt */
-#define GIC_SPURIOUS_INTERRUPT	1023
-#define GIC_TARGET_CPU_MASK	0xff
 
-#define ENABLE_GRP0		(1 << 0)
-#define ENABLE_GRP1		(1 << 1)
+#define GRP0			U(0)
+#define GRP1			U(1)
+#define GIC_TARGET_CPU_MASK	U(0xff)
+
+#define ENABLE_GRP0		(U(1) << 0)
+#define ENABLE_GRP1		(U(1) << 1)
 
 /* Distributor interface definitions */
-#define GICD_CTLR		0x0
-#define GICD_TYPER		0x4
-#define GICD_IGROUPR		0x80
-#define GICD_ISENABLER		0x100
-#define GICD_ICENABLER		0x180
-#define GICD_ISPENDR		0x200
-#define GICD_ICPENDR		0x280
-#define GICD_ISACTIVER		0x300
-#define GICD_ICACTIVER		0x380
-#define GICD_IPRIORITYR		0x400
-#define GICD_ITARGETSR		0x800
-#define GICD_ICFGR		0xC00
-#define GICD_SGIR		0xF00
-#define GICD_CPENDSGIR		0xF10
-#define GICD_SPENDSGIR		0xF20
+#define GICD_ITARGETSR		U(0x800)
+#define GICD_SGIR		U(0xF00)
+#define GICD_CPENDSGIR		U(0xF10)
+#define GICD_SPENDSGIR		U(0xF20)
 
-#define IGROUPR_SHIFT		5
-#define ISENABLER_SHIFT		5
-#define ICENABLER_SHIFT		ISENABLER_SHIFT
-#define ISPENDR_SHIFT		5
-#define ICPENDR_SHIFT		ISPENDR_SHIFT
-#define ISACTIVER_SHIFT		5
-#define ICACTIVER_SHIFT		ISACTIVER_SHIFT
-#define IPRIORITYR_SHIFT	2
-#define ITARGETSR_SHIFT		2
-#define ICFGR_SHIFT		4
-#define CPENDSGIR_SHIFT		2
+#define CPENDSGIR_SHIFT		U(2)
 #define SPENDSGIR_SHIFT		CPENDSGIR_SHIFT
 
 /* GICD_TYPER bit definitions */
-#define IT_LINES_NO_MASK	0x1f
+#define IT_LINES_NO_MASK	U(0x1f)
 
 /* Physical CPU Interface registers */
-#define GICC_CTLR		0x0
-#define GICC_PMR		0x4
-#define GICC_BPR		0x8
-#define GICC_IAR		0xC
-#define GICC_EOIR		0x10
-#define GICC_RPR		0x14
-#define GICC_HPPIR		0x18
-#define GICC_AHPPIR		0x28
-#define GICC_IIDR		0xFC
-#define GICC_DIR		0x1000
+#define GICC_CTLR		U(0x0)
+#define GICC_PMR		U(0x4)
+#define GICC_BPR		U(0x8)
+#define GICC_IAR		U(0xC)
+#define GICC_EOIR		U(0x10)
+#define GICC_RPR		U(0x14)
+#define GICC_HPPIR		U(0x18)
+#define GICC_AHPPIR		U(0x28)
+#define GICC_IIDR		U(0xFC)
+#define GICC_DIR		U(0x1000)
 #define GICC_PRIODROP           GICC_EOIR
 
+/* Common CPU Interface definitions */
+#define INT_ID_MASK		U(0x3ff)
+
 /* GICC_CTLR bit definitions */
-#define EOI_MODE_NS		(1 << 10)
-#define EOI_MODE_S		(1 << 9)
-#define IRQ_BYP_DIS_GRP1	(1 << 8)
-#define FIQ_BYP_DIS_GRP1	(1 << 7)
-#define IRQ_BYP_DIS_GRP0	(1 << 6)
-#define FIQ_BYP_DIS_GRP0	(1 << 5)
-#define CBPR			(1 << 4)
-#define FIQ_EN			(1 << 3)
-#define ACK_CTL			(1 << 2)
+#define EOI_MODE_NS		(U(1) << 10)
+#define EOI_MODE_S		(U(1) << 9)
+#define IRQ_BYP_DIS_GRP1	(U(1) << 8)
+#define FIQ_BYP_DIS_GRP1	(U(1) << 7)
+#define IRQ_BYP_DIS_GRP0	(U(1) << 6)
+#define FIQ_BYP_DIS_GRP0	(U(1) << 5)
+#define CBPR			(U(1) << 4)
+#define FIQ_EN			(U(1) << 3)
+#define ACK_CTL			(U(1) << 2)
 
 /* GICC_IIDR bit masks and shifts */
-#define GICC_IIDR_PID_SHIFT	20
-#define GICC_IIDR_ARCH_SHIFT	16
-#define GICC_IIDR_REV_SHIFT	12
-#define GICC_IIDR_IMP_SHIFT	0
+#define GICC_IIDR_PID_SHIFT	U(20)
+#define GICC_IIDR_ARCH_SHIFT	U(16)
+#define GICC_IIDR_REV_SHIFT	U(12)
+#define GICC_IIDR_IMP_SHIFT	U(0)
 
-#define GICC_IIDR_PID_MASK	0xfff
-#define GICC_IIDR_ARCH_MASK	0xf
-#define GICC_IIDR_REV_MASK	0xf
-#define GICC_IIDR_IMP_MASK	0xfff
+#define GICC_IIDR_PID_MASK	U(0xfff)
+#define GICC_IIDR_ARCH_MASK	U(0xf)
+#define GICC_IIDR_REV_MASK	U(0xf)
+#define GICC_IIDR_IMP_MASK	U(0xfff)
 
 /* HYP view virtual CPU Interface registers */
-#define GICH_CTL		0x0
-#define GICH_VTR		0x4
-#define GICH_ELRSR0		0x30
-#define GICH_ELRSR1		0x34
-#define GICH_APR0		0xF0
-#define GICH_LR_BASE		0x100
+#define GICH_CTL		U(0x0)
+#define GICH_VTR		U(0x4)
+#define GICH_ELRSR0		U(0x30)
+#define GICH_ELRSR1		U(0x34)
+#define GICH_APR0		U(0xF0)
+#define GICH_LR_BASE		U(0x100)
 
 /* Virtual CPU Interface registers */
-#define GICV_CTL		0x0
-#define GICV_PRIMASK		0x4
-#define GICV_BP			0x8
-#define GICV_INTACK		0xC
-#define GICV_EOI		0x10
-#define GICV_RUNNINGPRI		0x14
-#define GICV_HIGHESTPEND	0x18
-#define GICV_DEACTIVATE		0x1000
+#define GICV_CTL		U(0x0)
+#define GICV_PRIMASK		U(0x4)
+#define GICV_BP			U(0x8)
+#define GICV_INTACK		U(0xC)
+#define GICV_EOI		U(0x10)
+#define GICV_RUNNINGPRI		U(0x14)
+#define GICV_HIGHESTPEND	U(0x18)
+#define GICV_DEACTIVATE		U(0x1000)
 
 #ifndef __ASSEMBLY__
 
 #include <mmio.h>
-
+#include <stdint.h>
 
 /*******************************************************************************
  * GIC Distributor function prototypes
  ******************************************************************************/
 
-unsigned int gicd_read_igroupr(unsigned int, unsigned int);
-unsigned int gicd_read_isenabler(unsigned int, unsigned int);
-unsigned int gicd_read_icenabler(unsigned int, unsigned int);
-unsigned int gicd_read_ispendr(unsigned int, unsigned int);
-unsigned int gicd_read_icpendr(unsigned int, unsigned int);
-unsigned int gicd_read_isactiver(unsigned int, unsigned int);
-unsigned int gicd_read_icactiver(unsigned int, unsigned int);
-unsigned int gicd_read_ipriorityr(unsigned int, unsigned int);
-unsigned int gicd_read_itargetsr(unsigned int, unsigned int);
-unsigned int gicd_read_icfgr(unsigned int, unsigned int);
-unsigned int gicd_read_cpendsgir(unsigned int, unsigned int);
-unsigned int gicd_read_spendsgir(unsigned int, unsigned int);
-void gicd_write_igroupr(unsigned int, unsigned int, unsigned int);
-void gicd_write_isenabler(unsigned int, unsigned int, unsigned int);
-void gicd_write_icenabler(unsigned int, unsigned int, unsigned int);
-void gicd_write_ispendr(unsigned int, unsigned int, unsigned int);
-void gicd_write_icpendr(unsigned int, unsigned int, unsigned int);
-void gicd_write_isactiver(unsigned int, unsigned int, unsigned int);
-void gicd_write_icactiver(unsigned int, unsigned int, unsigned int);
-void gicd_write_ipriorityr(unsigned int, unsigned int, unsigned int);
-void gicd_write_itargetsr(unsigned int, unsigned int, unsigned int);
-void gicd_write_icfgr(unsigned int, unsigned int, unsigned int);
-void gicd_write_cpendsgir(unsigned int, unsigned int, unsigned int);
-void gicd_write_spendsgir(unsigned int, unsigned int, unsigned int);
-unsigned int gicd_get_igroupr(unsigned int, unsigned int);
-void gicd_set_igroupr(unsigned int, unsigned int);
-void gicd_clr_igroupr(unsigned int, unsigned int);
-void gicd_set_isenabler(unsigned int, unsigned int);
-void gicd_set_icenabler(unsigned int, unsigned int);
-void gicd_set_ispendr(unsigned int, unsigned int);
-void gicd_set_icpendr(unsigned int, unsigned int);
-void gicd_set_isactiver(unsigned int, unsigned int);
-void gicd_set_icactiver(unsigned int, unsigned int);
-void gicd_set_ipriorityr(unsigned int, unsigned int, unsigned int);
-void gicd_set_itargetsr(unsigned int, unsigned int, unsigned int);
+unsigned int gicd_read_igroupr(uintptr_t, unsigned int);
+unsigned int gicd_read_isenabler(uintptr_t, unsigned int);
+unsigned int gicd_read_icenabler(uintptr_t, unsigned int);
+unsigned int gicd_read_ispendr(uintptr_t, unsigned int);
+unsigned int gicd_read_icpendr(uintptr_t, unsigned int);
+unsigned int gicd_read_isactiver(uintptr_t, unsigned int);
+unsigned int gicd_read_icactiver(uintptr_t, unsigned int);
+unsigned int gicd_read_ipriorityr(uintptr_t, unsigned int);
+unsigned int gicd_read_itargetsr(uintptr_t, unsigned int);
+unsigned int gicd_read_icfgr(uintptr_t, unsigned int);
+unsigned int gicd_read_cpendsgir(uintptr_t, unsigned int);
+unsigned int gicd_read_spendsgir(uintptr_t, unsigned int);
+void gicd_write_igroupr(uintptr_t, unsigned int, unsigned int);
+void gicd_write_isenabler(uintptr_t, unsigned int, unsigned int);
+void gicd_write_icenabler(uintptr_t, unsigned int, unsigned int);
+void gicd_write_ispendr(uintptr_t, unsigned int, unsigned int);
+void gicd_write_icpendr(uintptr_t, unsigned int, unsigned int);
+void gicd_write_isactiver(uintptr_t, unsigned int, unsigned int);
+void gicd_write_icactiver(uintptr_t, unsigned int, unsigned int);
+void gicd_write_ipriorityr(uintptr_t, unsigned int, unsigned int);
+void gicd_write_itargetsr(uintptr_t, unsigned int, unsigned int);
+void gicd_write_icfgr(uintptr_t, unsigned int, unsigned int);
+void gicd_write_cpendsgir(uintptr_t, unsigned int, unsigned int);
+void gicd_write_spendsgir(uintptr_t, unsigned int, unsigned int);
+unsigned int gicd_get_igroupr(uintptr_t, unsigned int);
+void gicd_set_igroupr(uintptr_t, unsigned int);
+void gicd_clr_igroupr(uintptr_t, unsigned int);
+void gicd_set_isenabler(uintptr_t, unsigned int);
+void gicd_set_icenabler(uintptr_t, unsigned int);
+void gicd_set_ispendr(uintptr_t, unsigned int);
+void gicd_set_icpendr(uintptr_t, unsigned int);
+void gicd_set_isactiver(uintptr_t, unsigned int);
+void gicd_set_icactiver(uintptr_t, unsigned int);
+void gicd_set_ipriorityr(uintptr_t, unsigned int, unsigned int);
+void gicd_set_itargetsr(uintptr_t, unsigned int, unsigned int);
 
 
 /*******************************************************************************
  * GIC Distributor interface accessors for reading entire registers
  ******************************************************************************/
 
-static inline unsigned int gicd_read_ctlr(unsigned int base)
+static inline unsigned int gicd_read_ctlr(uintptr_t base)
 {
 	return mmio_read_32(base + GICD_CTLR);
 }
 
-static inline unsigned int gicd_read_typer(unsigned int base)
+static inline unsigned int gicd_read_typer(uintptr_t base)
 {
 	return mmio_read_32(base + GICD_TYPER);
 }
 
-static inline unsigned int gicd_read_sgir(unsigned int base)
+static inline unsigned int gicd_read_sgir(uintptr_t base)
 {
 	return mmio_read_32(base + GICD_SGIR);
 }
@@ -209,12 +168,12 @@
  * GIC Distributor interface accessors for writing entire registers
  ******************************************************************************/
 
-static inline void gicd_write_ctlr(unsigned int base, unsigned int val)
+static inline void gicd_write_ctlr(uintptr_t base, unsigned int val)
 {
 	mmio_write_32(base + GICD_CTLR, val);
 }
 
-static inline void gicd_write_sgir(unsigned int base, unsigned int val)
+static inline void gicd_write_sgir(uintptr_t base, unsigned int val)
 {
 	mmio_write_32(base + GICD_SGIR, val);
 }
@@ -224,47 +183,47 @@
  * GIC CPU interface accessors for reading entire registers
  ******************************************************************************/
 
-static inline unsigned int gicc_read_ctlr(unsigned int base)
+static inline unsigned int gicc_read_ctlr(uintptr_t base)
 {
 	return mmio_read_32(base + GICC_CTLR);
 }
 
-static inline unsigned int gicc_read_pmr(unsigned int base)
+static inline unsigned int gicc_read_pmr(uintptr_t base)
 {
 	return mmio_read_32(base + GICC_PMR);
 }
 
-static inline unsigned int gicc_read_BPR(unsigned int base)
+static inline unsigned int gicc_read_BPR(uintptr_t base)
 {
 	return mmio_read_32(base + GICC_BPR);
 }
 
-static inline unsigned int gicc_read_IAR(unsigned int base)
+static inline unsigned int gicc_read_IAR(uintptr_t base)
 {
 	return mmio_read_32(base + GICC_IAR);
 }
 
-static inline unsigned int gicc_read_EOIR(unsigned int base)
+static inline unsigned int gicc_read_EOIR(uintptr_t base)
 {
 	return mmio_read_32(base + GICC_EOIR);
 }
 
-static inline unsigned int gicc_read_hppir(unsigned int base)
+static inline unsigned int gicc_read_hppir(uintptr_t base)
 {
 	return mmio_read_32(base + GICC_HPPIR);
 }
 
-static inline unsigned int gicc_read_ahppir(unsigned int base)
+static inline unsigned int gicc_read_ahppir(uintptr_t base)
 {
 	return mmio_read_32(base + GICC_AHPPIR);
 }
 
-static inline unsigned int gicc_read_dir(unsigned int base)
+static inline unsigned int gicc_read_dir(uintptr_t base)
 {
 	return mmio_read_32(base + GICC_DIR);
 }
 
-static inline unsigned int gicc_read_iidr(unsigned int base)
+static inline unsigned int gicc_read_iidr(uintptr_t base)
 {
 	return mmio_read_32(base + GICC_IIDR);
 }
@@ -274,38 +233,38 @@
  * GIC CPU interface accessors for writing entire registers
  ******************************************************************************/
 
-static inline void gicc_write_ctlr(unsigned int base, unsigned int val)
+static inline void gicc_write_ctlr(uintptr_t base, unsigned int val)
 {
 	mmio_write_32(base + GICC_CTLR, val);
 }
 
-static inline void gicc_write_pmr(unsigned int base, unsigned int val)
+static inline void gicc_write_pmr(uintptr_t base, unsigned int val)
 {
 	mmio_write_32(base + GICC_PMR, val);
 }
 
-static inline void gicc_write_BPR(unsigned int base, unsigned int val)
+static inline void gicc_write_BPR(uintptr_t base, unsigned int val)
 {
 	mmio_write_32(base + GICC_BPR, val);
 }
 
 
-static inline void gicc_write_IAR(unsigned int base, unsigned int val)
+static inline void gicc_write_IAR(uintptr_t base, unsigned int val)
 {
 	mmio_write_32(base + GICC_IAR, val);
 }
 
-static inline void gicc_write_EOIR(unsigned int base, unsigned int val)
+static inline void gicc_write_EOIR(uintptr_t base, unsigned int val)
 {
 	mmio_write_32(base + GICC_EOIR, val);
 }
 
-static inline void gicc_write_hppir(unsigned int base, unsigned int val)
+static inline void gicc_write_hppir(uintptr_t base, unsigned int val)
 {
 	mmio_write_32(base + GICC_HPPIR, val);
 }
 
-static inline void gicc_write_dir(unsigned int base, unsigned int val)
+static inline void gicc_write_dir(uintptr_t base, unsigned int val)
 {
 	mmio_write_32(base + GICC_DIR, val);
 }
diff --git a/include/drivers/arm/gic_v3.h b/include/drivers/arm/gic_v3.h
index c410626..02f9006 100644
--- a/include/drivers/arm/gic_v3.h
+++ b/include/drivers/arm/gic_v3.h
@@ -1,38 +1,23 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __GIC_V3_H__
 #define __GIC_V3_H__
 
+/******************************************************************************
+ * THIS DRIVER IS DEPRECATED. For GICv2 systems, use the driver in gicv2.h
+ * and for GICv3 systems, use the driver in gicv3.h.
+ *****************************************************************************/
+#if ERROR_DEPRECATED
+#error " The legacy ARM GIC driver is deprecated."
+#endif
+
 #include <mmio.h>
 #include <stdint.h>
+#include <types.h>
 
 
 /* GICv3 Re-distributor interface registers & shifts */
@@ -41,17 +26,17 @@
 #define GICR_WAKER		0x14
 
 /* GICR_WAKER bit definitions */
-#define WAKER_CA		(1UL << 2)
-#define WAKER_PS		(1UL << 1)
+#define WAKER_CA		(U(1) << 2)
+#define WAKER_PS		(U(1) << 1)
 
 /* GICR_TYPER bit definitions */
 #define GICR_TYPER_AFF_SHIFT	32
 #define GICR_TYPER_AFF_MASK	0xffffffff
-#define GICR_TYPER_LAST		(1UL << 4)
+#define GICR_TYPER_LAST		(U(1) << 4)
 
 /* GICv3 ICC_SRE register bit definitions*/
-#define ICC_SRE_EN		(1UL << 3)
-#define ICC_SRE_SRE		(1UL << 0)
+#define ICC_SRE_EN		(U(1) << 3)
+#define ICC_SRE_SRE		(U(1) << 0)
 
 /*******************************************************************************
  * GICv3 defintions
@@ -66,7 +51,7 @@
 /*******************************************************************************
  * Function prototypes
  ******************************************************************************/
-uintptr_t gicv3_get_rdist(uintptr_t gicr_base, uint64_t mpidr);
+uintptr_t gicv3_get_rdist(uintptr_t gicr_base, u_register_t mpidr);
 
 /*******************************************************************************
  * GIC Redistributor interface accessors
diff --git a/include/drivers/arm/gicv2.h b/include/drivers/arm/gicv2.h
new file mode 100644
index 0000000..6e8322e
--- /dev/null
+++ b/include/drivers/arm/gicv2.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __GICV2_H__
+#define __GICV2_H__
+
+/*******************************************************************************
+ * GICv2 miscellaneous definitions
+ ******************************************************************************/
+
+/* Interrupt group definitions */
+#define GICV2_INTR_GROUP0	0
+#define GICV2_INTR_GROUP1	1
+
+/* Interrupt IDs reported by the HPPIR and IAR registers */
+#define PENDING_G1_INTID	1022
+
+/* GICv2 can only target up to 8 PEs */
+#define GICV2_MAX_TARGET_PE	8
+
+/*******************************************************************************
+ * GICv2 specific Distributor interface register offsets and constants.
+ ******************************************************************************/
+#define GICD_ITARGETSR		0x800
+#define GICD_SGIR		0xF00
+#define GICD_CPENDSGIR		0xF10
+#define GICD_SPENDSGIR		0xF20
+#define GICD_PIDR2_GICV2	0xFE8
+
+#define ITARGETSR_SHIFT		2
+#define GIC_TARGET_CPU_MASK	0xff
+
+#define CPENDSGIR_SHIFT		2
+#define SPENDSGIR_SHIFT		CPENDSGIR_SHIFT
+
+#define SGIR_TGTLSTFLT_SHIFT	24
+#define SGIR_TGTLSTFLT_MASK	0x3
+#define SGIR_TGTLST_SHIFT	16
+#define SGIR_TGTLST_MASK	0xff
+#define SGIR_INTID_MASK		0xf
+
+#define SGIR_TGT_SPECIFIC	0
+
+#define GICV2_SGIR_VALUE(tgt_lst_flt, tgt, intid) \
+	((((tgt_lst_flt) & SGIR_TGTLSTFLT_MASK) << SGIR_TGTLSTFLT_SHIFT) | \
+	 (((tgt) & SGIR_TGTLST_MASK) << SGIR_TGTLST_SHIFT) | \
+	 ((intid) & SGIR_INTID_MASK))
+
+/*******************************************************************************
+ * GICv2 specific CPU interface register offsets and constants.
+ ******************************************************************************/
+/* Physical CPU Interface registers */
+#define GICC_CTLR		0x0
+#define GICC_PMR		0x4
+#define GICC_BPR		0x8
+#define GICC_IAR		0xC
+#define GICC_EOIR		0x10
+#define GICC_RPR		0x14
+#define GICC_HPPIR		0x18
+#define GICC_AHPPIR		0x28
+#define GICC_IIDR		0xFC
+#define GICC_DIR		0x1000
+#define GICC_PRIODROP		GICC_EOIR
+
+/* GICC_CTLR bit definitions */
+#define EOI_MODE_NS		(1 << 10)
+#define EOI_MODE_S		(1 << 9)
+#define IRQ_BYP_DIS_GRP1	(1 << 8)
+#define FIQ_BYP_DIS_GRP1	(1 << 7)
+#define IRQ_BYP_DIS_GRP0	(1 << 6)
+#define FIQ_BYP_DIS_GRP0	(1 << 5)
+#define CBPR			(1 << 4)
+#define FIQ_EN_SHIFT		3
+#define FIQ_EN_BIT		(1 << FIQ_EN_SHIFT)
+#define ACK_CTL			(1 << 2)
+
+/* GICC_IIDR bit masks and shifts */
+#define GICC_IIDR_PID_SHIFT	20
+#define GICC_IIDR_ARCH_SHIFT	16
+#define GICC_IIDR_REV_SHIFT	12
+#define GICC_IIDR_IMP_SHIFT	0
+
+#define GICC_IIDR_PID_MASK	0xfff
+#define GICC_IIDR_ARCH_MASK	0xf
+#define GICC_IIDR_REV_MASK	0xf
+#define GICC_IIDR_IMP_MASK	0xfff
+
+/* HYP view virtual CPU Interface registers */
+#define GICH_CTL		0x0
+#define GICH_VTR		0x4
+#define GICH_ELRSR0		0x30
+#define GICH_ELRSR1		0x34
+#define GICH_APR0		0xF0
+#define GICH_LR_BASE		0x100
+
+/* Virtual CPU Interface registers */
+#define GICV_CTL		0x0
+#define GICV_PRIMASK		0x4
+#define GICV_BP			0x8
+#define GICV_INTACK		0xC
+#define GICV_EOI		0x10
+#define GICV_RUNNINGPRI		0x14
+#define GICV_HIGHESTPEND	0x18
+#define GICV_DEACTIVATE		0x1000
+
+/* GICD_CTLR bit definitions */
+#define CTLR_ENABLE_G1_SHIFT		1
+#define CTLR_ENABLE_G1_MASK		0x1
+#define CTLR_ENABLE_G1_BIT		(1 << CTLR_ENABLE_G1_SHIFT)
+
+/* Interrupt ID mask for HPPIR, AHPPIR, IAR and AIAR CPU Interface registers */
+#define INT_ID_MASK		0x3ff
+
+#ifndef __ASSEMBLY__
+
+#include <interrupt_props.h>
+#include <stdint.h>
+
+/*******************************************************************************
+ * This structure describes some of the implementation defined attributes of
+ * the GICv2 IP. It is used by the platform port to specify these attributes
+ * in order to initialize the GICv2 driver. The attributes are described
+ * below.
+ *
+ * The 'gicd_base' field contains the base address of the Distributor interface
+ * programmer's view.
+ *
+ * The 'gicc_base' field contains the base address of the CPU Interface
+ * programmer's view.
+ *
+ * The 'g0_interrupt_array' field is a pointer to an array in which each entry
+ * corresponds to an ID of a Group 0 interrupt. This field is ignored when
+ * 'interrupt_props' field is used. This field is deprecated.
+ *
+ * The 'g0_interrupt_num' field contains the number of entries in the
+ * 'g0_interrupt_array'. This field is ignored when 'interrupt_props' field is
+ * used. This field is deprecated.
+ *
+ * The 'target_masks' is a pointer to an array containing 'target_masks_num'
+ * elements. The GIC driver will populate the array with per-PE target mask to
+ * use to when targeting interrupts.
+ *
+ * The 'interrupt_props' field is a pointer to an array that enumerates secure
+ * interrupts and their properties. If this field is not NULL, both
+ * 'g0_interrupt_array' and 'g1s_interrupt_array' fields are ignored.
+ *
+ * The 'interrupt_props_num' field contains the number of entries in the
+ * 'interrupt_props' array. If this field is non-zero, 'g0_interrupt_num' is
+ * ignored.
+ ******************************************************************************/
+typedef struct gicv2_driver_data {
+	uintptr_t gicd_base;
+	uintptr_t gicc_base;
+#if !ERROR_DEPRECATED
+	unsigned int g0_interrupt_num;
+	const unsigned int *g0_interrupt_array;
+#endif
+	unsigned int *target_masks;
+	unsigned int target_masks_num;
+	const interrupt_prop_t *interrupt_props;
+	unsigned int interrupt_props_num;
+} gicv2_driver_data_t;
+
+/*******************************************************************************
+ * Function prototypes
+ ******************************************************************************/
+void gicv2_driver_init(const gicv2_driver_data_t *plat_driver_data);
+void gicv2_distif_init(void);
+void gicv2_pcpu_distif_init(void);
+void gicv2_cpuif_enable(void);
+void gicv2_cpuif_disable(void);
+unsigned int gicv2_is_fiq_enabled(void);
+unsigned int gicv2_get_pending_interrupt_type(void);
+unsigned int gicv2_get_pending_interrupt_id(void);
+unsigned int gicv2_acknowledge_interrupt(void);
+void gicv2_end_of_interrupt(unsigned int id);
+unsigned int gicv2_get_interrupt_group(unsigned int id);
+unsigned int gicv2_get_running_priority(void);
+void gicv2_set_pe_target_mask(unsigned int proc_num);
+unsigned int gicv2_get_interrupt_active(unsigned int id);
+void gicv2_enable_interrupt(unsigned int id);
+void gicv2_disable_interrupt(unsigned int id);
+void gicv2_set_interrupt_priority(unsigned int id, unsigned int priority);
+void gicv2_set_interrupt_type(unsigned int id, unsigned int type);
+void gicv2_raise_sgi(int sgi_num, int proc_num);
+void gicv2_set_spi_routing(unsigned int id, int proc_num);
+void gicv2_set_interrupt_pending(unsigned int id);
+void gicv2_clear_interrupt_pending(unsigned int id);
+unsigned int gicv2_set_pmr(unsigned int mask);
+
+#endif /* __ASSEMBLY__ */
+#endif /* __GICV2_H__ */
diff --git a/include/drivers/arm/gicv3.h b/include/drivers/arm/gicv3.h
new file mode 100644
index 0000000..b2e4d4c
--- /dev/null
+++ b/include/drivers/arm/gicv3.h
@@ -0,0 +1,411 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __GICV3_H__
+#define __GICV3_H__
+
+/*******************************************************************************
+ * GICv3 miscellaneous definitions
+ ******************************************************************************/
+/* Interrupt group definitions */
+#define INTR_GROUP1S		0
+#define INTR_GROUP0		1
+#define INTR_GROUP1NS		2
+
+/* Interrupt IDs reported by the HPPIR and IAR registers */
+#define PENDING_G1S_INTID	1020
+#define PENDING_G1NS_INTID	1021
+
+/* Constant to categorize LPI interrupt */
+#define MIN_LPI_ID		8192
+
+/* GICv3 can only target up to 16 PEs with SGI */
+#define GICV3_MAX_SGI_TARGETS	16
+
+/*******************************************************************************
+ * GICv3 specific Distributor interface register offsets and constants.
+ ******************************************************************************/
+#define GICD_STATUSR		0x10
+#define GICD_SETSPI_NSR		0x40
+#define GICD_CLRSPI_NSR		0x48
+#define GICD_SETSPI_SR		0x50
+#define GICD_CLRSPI_SR		0x50
+#define GICD_IGRPMODR		0xd00
+/*
+ * GICD_IROUTER<n> register is at 0x6000 + 8n, where n is the interrupt id and
+ * n >= 32, making the effective offset as 0x6100.
+ */
+#define GICD_IROUTER		0x6000
+#define GICD_PIDR2_GICV3	0xffe8
+
+#define IGRPMODR_SHIFT		5
+
+/* GICD_CTLR bit definitions */
+#define CTLR_ENABLE_G1NS_SHIFT		1
+#define CTLR_ENABLE_G1S_SHIFT		2
+#define CTLR_ARE_S_SHIFT		4
+#define CTLR_ARE_NS_SHIFT		5
+#define CTLR_DS_SHIFT			6
+#define CTLR_E1NWF_SHIFT		7
+#define GICD_CTLR_RWP_SHIFT		31
+
+#define CTLR_ENABLE_G1NS_MASK		0x1
+#define CTLR_ENABLE_G1S_MASK		0x1
+#define CTLR_ARE_S_MASK			0x1
+#define CTLR_ARE_NS_MASK		0x1
+#define CTLR_DS_MASK			0x1
+#define CTLR_E1NWF_MASK			0x1
+#define GICD_CTLR_RWP_MASK		0x1
+
+#define CTLR_ENABLE_G1NS_BIT		(1 << CTLR_ENABLE_G1NS_SHIFT)
+#define CTLR_ENABLE_G1S_BIT		(1 << CTLR_ENABLE_G1S_SHIFT)
+#define CTLR_ARE_S_BIT			(1 << CTLR_ARE_S_SHIFT)
+#define CTLR_ARE_NS_BIT			(1 << CTLR_ARE_NS_SHIFT)
+#define CTLR_DS_BIT			(1 << CTLR_DS_SHIFT)
+#define CTLR_E1NWF_BIT			(1 << CTLR_E1NWF_SHIFT)
+#define GICD_CTLR_RWP_BIT		(1 << GICD_CTLR_RWP_SHIFT)
+
+/* GICD_IROUTER shifts and masks */
+#define IROUTER_SHIFT		0
+#define IROUTER_IRM_SHIFT	31
+#define IROUTER_IRM_MASK	0x1
+
+#define GICV3_IRM_PE		0
+#define GICV3_IRM_ANY		1
+
+#define NUM_OF_DIST_REGS	30
+
+/*******************************************************************************
+ * GICv3 Re-distributor interface registers & constants
+ ******************************************************************************/
+#define GICR_PCPUBASE_SHIFT	0x11
+#define GICR_SGIBASE_OFFSET	(1 << 0x10)	/* 64 KB */
+#define GICR_CTLR		0x0
+#define GICR_TYPER		0x08
+#define GICR_WAKER		0x14
+#define GICR_PROPBASER		0x70
+#define GICR_PENDBASER		0x78
+#define GICR_IGROUPR0		(GICR_SGIBASE_OFFSET + 0x80)
+#define GICR_ISENABLER0		(GICR_SGIBASE_OFFSET + 0x100)
+#define GICR_ICENABLER0		(GICR_SGIBASE_OFFSET + 0x180)
+#define GICR_ISPENDR0		(GICR_SGIBASE_OFFSET + 0x200)
+#define GICR_ICPENDR0		(GICR_SGIBASE_OFFSET + 0x280)
+#define GICR_ISACTIVER0		(GICR_SGIBASE_OFFSET + 0x300)
+#define GICR_ICACTIVER0		(GICR_SGIBASE_OFFSET + 0x380)
+#define GICR_IPRIORITYR		(GICR_SGIBASE_OFFSET + 0x400)
+#define GICR_ICFGR0		(GICR_SGIBASE_OFFSET + 0xc00)
+#define GICR_ICFGR1		(GICR_SGIBASE_OFFSET + 0xc04)
+#define GICR_IGRPMODR0		(GICR_SGIBASE_OFFSET + 0xd00)
+#define GICR_NSACR		(GICR_SGIBASE_OFFSET + 0xe00)
+
+/* GICR_CTLR bit definitions */
+#define GICR_CTLR_UWP_SHIFT	31
+#define GICR_CTLR_UWP_MASK	0x1
+#define GICR_CTLR_UWP_BIT	(1U << GICR_CTLR_UWP_SHIFT)
+#define GICR_CTLR_RWP_SHIFT	3
+#define GICR_CTLR_RWP_MASK	0x1
+#define GICR_CTLR_RWP_BIT	(1U << GICR_CTLR_RWP_SHIFT)
+#define GICR_CTLR_EN_LPIS_BIT	(1U << 0)
+
+/* GICR_WAKER bit definitions */
+#define WAKER_CA_SHIFT		2
+#define WAKER_PS_SHIFT		1
+
+#define WAKER_CA_MASK		0x1
+#define WAKER_PS_MASK		0x1
+
+#define WAKER_CA_BIT		(1 << WAKER_CA_SHIFT)
+#define WAKER_PS_BIT		(1 << WAKER_PS_SHIFT)
+
+/* GICR_TYPER bit definitions */
+#define TYPER_AFF_VAL_SHIFT	32
+#define TYPER_PROC_NUM_SHIFT	8
+#define TYPER_LAST_SHIFT	4
+
+#define TYPER_AFF_VAL_MASK	0xffffffff
+#define TYPER_PROC_NUM_MASK	0xffff
+#define TYPER_LAST_MASK		0x1
+
+#define TYPER_LAST_BIT		(1 << TYPER_LAST_SHIFT)
+
+#define NUM_OF_REDIST_REGS	30
+
+/*******************************************************************************
+ * GICv3 CPU interface registers & constants
+ ******************************************************************************/
+/* ICC_SRE bit definitions*/
+#define ICC_SRE_EN_BIT		(1 << 3)
+#define ICC_SRE_DIB_BIT		(1 << 2)
+#define ICC_SRE_DFB_BIT		(1 << 1)
+#define ICC_SRE_SRE_BIT		(1 << 0)
+
+/* ICC_IGRPEN1_EL3 bit definitions */
+#define IGRPEN1_EL3_ENABLE_G1NS_SHIFT	0
+#define IGRPEN1_EL3_ENABLE_G1S_SHIFT	1
+
+#define IGRPEN1_EL3_ENABLE_G1NS_BIT	(1 << IGRPEN1_EL3_ENABLE_G1NS_SHIFT)
+#define IGRPEN1_EL3_ENABLE_G1S_BIT	(1 << IGRPEN1_EL3_ENABLE_G1S_SHIFT)
+
+/* ICC_IGRPEN0_EL1 bit definitions */
+#define IGRPEN1_EL1_ENABLE_G0_SHIFT	0
+#define IGRPEN1_EL1_ENABLE_G0_BIT	(1 << IGRPEN1_EL1_ENABLE_G0_SHIFT)
+
+/* ICC_HPPIR0_EL1 bit definitions */
+#define HPPIR0_EL1_INTID_SHIFT		0
+#define HPPIR0_EL1_INTID_MASK		0xffffff
+
+/* ICC_HPPIR1_EL1 bit definitions */
+#define HPPIR1_EL1_INTID_SHIFT		0
+#define HPPIR1_EL1_INTID_MASK		0xffffff
+
+/* ICC_IAR0_EL1 bit definitions */
+#define IAR0_EL1_INTID_SHIFT		0
+#define IAR0_EL1_INTID_MASK		0xffffff
+
+/* ICC_IAR1_EL1 bit definitions */
+#define IAR1_EL1_INTID_SHIFT		0
+#define IAR1_EL1_INTID_MASK		0xffffff
+
+/* ICC SGI macros */
+#define SGIR_TGT_MASK			0xffff
+#define SGIR_AFF1_SHIFT			16
+#define SGIR_INTID_SHIFT		24
+#define SGIR_INTID_MASK			0xf
+#define SGIR_AFF2_SHIFT			32
+#define SGIR_IRM_SHIFT			40
+#define SGIR_IRM_MASK			0x1
+#define SGIR_AFF3_SHIFT			48
+#define SGIR_AFF_MASK			0xf
+
+#define SGIR_IRM_TO_AFF			0
+
+#define GICV3_SGIR_VALUE(aff3, aff2, aff1, intid, irm, tgt) \
+	((((uint64_t) (aff3) & SGIR_AFF_MASK) << SGIR_AFF3_SHIFT) | \
+	 (((uint64_t) (irm) & SGIR_IRM_MASK) << SGIR_IRM_SHIFT) | \
+	 (((uint64_t) (aff2) & SGIR_AFF_MASK) << SGIR_AFF2_SHIFT) | \
+	 (((intid) & SGIR_INTID_MASK) << SGIR_INTID_SHIFT) | \
+	 (((aff1) & SGIR_AFF_MASK) << SGIR_AFF1_SHIFT) | \
+	 ((tgt) & SGIR_TGT_MASK))
+
+/*****************************************************************************
+ * GICv3 ITS registers and constants
+ *****************************************************************************/
+
+#define GITS_CTLR			0x0
+#define GITS_IIDR			0x4
+#define GITS_TYPER			0x8
+#define GITS_CBASER			0x80
+#define GITS_CWRITER			0x88
+#define GITS_CREADR			0x90
+#define GITS_BASER			0x100
+
+/* GITS_CTLR bit definitions */
+#define GITS_CTLR_ENABLED_BIT		1
+#define GITS_CTLR_QUIESCENT_SHIFT	31
+#define GITS_CTLR_QUIESCENT_BIT		(1U << GITS_CTLR_QUIESCENT_SHIFT)
+
+#ifndef __ASSEMBLY__
+
+#include <gic_common.h>
+#include <interrupt_props.h>
+#include <stdint.h>
+#include <types.h>
+#include <utils_def.h>
+
+#define gicv3_is_intr_id_special_identifier(id)	\
+	(((id) >= PENDING_G1S_INTID) && ((id) <= GIC_SPURIOUS_INTERRUPT))
+
+/*******************************************************************************
+ * Helper GICv3 macros for SEL1
+ ******************************************************************************/
+#define gicv3_acknowledge_interrupt_sel1()	read_icc_iar1_el1() &\
+							IAR1_EL1_INTID_MASK
+#define gicv3_get_pending_interrupt_id_sel1()	read_icc_hppir1_el1() &\
+							HPPIR1_EL1_INTID_MASK
+#define gicv3_end_of_interrupt_sel1(id)		write_icc_eoir1_el1(id)
+
+
+/*******************************************************************************
+ * Helper GICv3 macros for EL3
+ ******************************************************************************/
+#define gicv3_acknowledge_interrupt()		read_icc_iar0_el1() &\
+							IAR0_EL1_INTID_MASK
+#define gicv3_end_of_interrupt(id)		write_icc_eoir0_el1(id)
+
+/*
+ * This macro returns the total number of GICD registers corresponding to
+ * the name.
+ */
+#define GICD_NUM_REGS(reg_name)	\
+	DIV_ROUND_UP_2EVAL(TOTAL_SPI_INTR_NUM, (1 << reg_name ## _SHIFT))
+
+#define GICR_NUM_REGS(reg_name)	\
+	DIV_ROUND_UP_2EVAL(TOTAL_PCPU_INTR_NUM, (1 << reg_name ## _SHIFT))
+
+/*******************************************************************************
+ * This structure describes some of the implementation defined attributes of the
+ * GICv3 IP. It is used by the platform port to specify these attributes in order
+ * to initialise the GICV3 driver. The attributes are described below.
+ *
+ * The 'gicd_base' field contains the base address of the Distributor interface
+ * programmer's view.
+ *
+ * The 'gicr_base' field contains the base address of the Re-distributor
+ * interface programmer's view.
+ *
+ * The 'g0_interrupt_array' field is a pointer to an array in which each entry
+ * corresponds to an ID of a Group 0 interrupt. This field is ignored when
+ * 'interrupt_props' field is used. This field is deprecated.
+ *
+ * The 'g0_interrupt_num' field contains the number of entries in the
+ * 'g0_interrupt_array'. This field is ignored when 'interrupt_props' field is
+ * used. This field is deprecated.
+ *
+ * The 'g1s_interrupt_array' field is a pointer to an array in which each entry
+ * corresponds to an ID of a Group 1 interrupt. This field is ignored when
+ * 'interrupt_props' field is used. This field is deprecated.
+ *
+ * The 'g1s_interrupt_num' field contains the number of entries in the
+ * 'g1s_interrupt_array'. This field must be 0 if 'interrupt_props' field is
+ * used. This field is ignored when 'interrupt_props' field is used. This field
+ * is deprecated.
+ *
+ * The 'interrupt_props' field is a pointer to an array that enumerates secure
+ * interrupts and their properties. If this field is not NULL, both
+ * 'g0_interrupt_array' and 'g1s_interrupt_array' fields are ignored.
+ *
+ * The 'interrupt_props_num' field contains the number of entries in the
+ * 'interrupt_props' array. If this field is non-zero, both 'g0_interrupt_num'
+ * and 'g1s_interrupt_num' are ignored.
+ *
+ * The 'rdistif_num' field contains the number of Redistributor interfaces the
+ * GIC implements. This is equal to the number of CPUs or CPU interfaces
+ * instantiated in the GIC.
+ *
+ * The 'rdistif_base_addrs' field is a pointer to an array that has an entry for
+ * storing the base address of the Redistributor interface frame of each CPU in
+ * the system. The size of the array = 'rdistif_num'. The base addresses are
+ * detected during driver initialisation.
+ *
+ * The 'mpidr_to_core_pos' field is a pointer to a hash function which the
+ * driver will use to convert an MPIDR value to a linear core index. This index
+ * will be used for accessing the 'rdistif_base_addrs' array. This is an
+ * optional field. A GICv3 implementation maps each MPIDR to a linear core index
+ * as well. This mapping can be found by reading the "Affinity Value" and
+ * "Processor Number" fields in the GICR_TYPER. It is IMP. DEF. if the
+ * "Processor Numbers" are suitable to index into an array to access core
+ * specific information. If this not the case, the platform port must provide a
+ * hash function. Otherwise, the "Processor Number" field will be used to access
+ * the array elements.
+ ******************************************************************************/
+typedef unsigned int (*mpidr_hash_fn)(u_register_t mpidr);
+
+typedef struct gicv3_driver_data {
+	uintptr_t gicd_base;
+	uintptr_t gicr_base;
+#if !ERROR_DEPRECATED
+	unsigned int g0_interrupt_num;
+	unsigned int g1s_interrupt_num;
+	const unsigned int *g0_interrupt_array;
+	const unsigned int *g1s_interrupt_array;
+#endif
+	const interrupt_prop_t *interrupt_props;
+	unsigned int interrupt_props_num;
+	unsigned int rdistif_num;
+	uintptr_t *rdistif_base_addrs;
+	mpidr_hash_fn mpidr_to_core_pos;
+} gicv3_driver_data_t;
+
+typedef struct gicv3_redist_ctx {
+	/* 64 bits registers */
+	uint64_t gicr_propbaser;
+	uint64_t gicr_pendbaser;
+
+	/* 32 bits registers */
+	uint32_t gicr_ctlr;
+	uint32_t gicr_igroupr0;
+	uint32_t gicr_isenabler0;
+	uint32_t gicr_ispendr0;
+	uint32_t gicr_isactiver0;
+	uint32_t gicr_ipriorityr[GICR_NUM_REGS(IPRIORITYR)];
+	uint32_t gicr_icfgr0;
+	uint32_t gicr_icfgr1;
+	uint32_t gicr_igrpmodr0;
+	uint32_t gicr_nsacr;
+} gicv3_redist_ctx_t;
+
+typedef struct gicv3_dist_ctx {
+	/* 64 bits registers */
+	uint64_t gicd_irouter[TOTAL_SPI_INTR_NUM];
+
+	/* 32 bits registers */
+	uint32_t gicd_ctlr;
+	uint32_t gicd_igroupr[GICD_NUM_REGS(IGROUPR)];
+	uint32_t gicd_isenabler[GICD_NUM_REGS(ISENABLER)];
+	uint32_t gicd_ispendr[GICD_NUM_REGS(ISPENDR)];
+	uint32_t gicd_isactiver[GICD_NUM_REGS(ISACTIVER)];
+	uint32_t gicd_ipriorityr[GICD_NUM_REGS(IPRIORITYR)];
+	uint32_t gicd_icfgr[GICD_NUM_REGS(ICFGR)];
+	uint32_t gicd_igrpmodr[GICD_NUM_REGS(IGRPMODR)];
+	uint32_t gicd_nsacr[GICD_NUM_REGS(NSACR)];
+} gicv3_dist_ctx_t;
+
+typedef struct gicv3_its_ctx {
+	/* 64 bits registers */
+	uint64_t gits_cbaser;
+	uint64_t gits_cwriter;
+	uint64_t gits_baser[8];
+
+	/* 32 bits registers */
+	uint32_t gits_ctlr;
+} gicv3_its_ctx_t;
+
+/*******************************************************************************
+ * GICv3 EL3 driver API
+ ******************************************************************************/
+void gicv3_driver_init(const gicv3_driver_data_t *plat_driver_data);
+void gicv3_distif_init(void);
+void gicv3_rdistif_init(unsigned int proc_num);
+void gicv3_rdistif_on(unsigned int proc_num);
+void gicv3_rdistif_off(unsigned int proc_num);
+void gicv3_cpuif_enable(unsigned int proc_num);
+void gicv3_cpuif_disable(unsigned int proc_num);
+unsigned int gicv3_get_pending_interrupt_type(void);
+unsigned int gicv3_get_pending_interrupt_id(void);
+unsigned int gicv3_get_interrupt_type(unsigned int id,
+					  unsigned int proc_num);
+void gicv3_distif_init_restore(const gicv3_dist_ctx_t * const dist_ctx);
+void gicv3_distif_save(gicv3_dist_ctx_t * const dist_ctx);
+/*
+ * gicv3_distif_post_restore and gicv3_distif_pre_save must be implemented if
+ * gicv3_distif_save and gicv3_rdistif_init_restore are used. If no
+ * implementation-defined sequence is needed at these steps, an empty function
+ * can be provided.
+ */
+void gicv3_distif_post_restore(unsigned int proc_num);
+void gicv3_distif_pre_save(unsigned int proc_num);
+void gicv3_rdistif_init_restore(unsigned int proc_num, const gicv3_redist_ctx_t * const rdist_ctx);
+void gicv3_rdistif_save(unsigned int proc_num, gicv3_redist_ctx_t * const rdist_ctx);
+void gicv3_its_save_disable(uintptr_t gits_base, gicv3_its_ctx_t * const its_ctx);
+void gicv3_its_restore(uintptr_t gits_base, const gicv3_its_ctx_t * const its_ctx);
+
+unsigned int gicv3_get_running_priority(void);
+unsigned int gicv3_get_interrupt_active(unsigned int id, unsigned int proc_num);
+void gicv3_enable_interrupt(unsigned int id, unsigned int proc_num);
+void gicv3_disable_interrupt(unsigned int id, unsigned int proc_num);
+void gicv3_set_interrupt_priority(unsigned int id, unsigned int proc_num,
+		unsigned int priority);
+void gicv3_set_interrupt_type(unsigned int id, unsigned int proc_num,
+		unsigned int group);
+void gicv3_raise_secure_g0_sgi(int sgi_num, u_register_t target);
+void gicv3_set_spi_routing(unsigned int id, unsigned int irm,
+		u_register_t mpidr);
+void gicv3_set_interrupt_pending(unsigned int id, unsigned int proc_num);
+void gicv3_clear_interrupt_pending(unsigned int id, unsigned int proc_num);
+unsigned int gicv3_set_pmr(unsigned int mask);
+
+#endif /* __ASSEMBLY__ */
+#endif /* __GICV3_H__ */
diff --git a/include/drivers/arm/gpio.h b/include/drivers/arm/gpio.h
deleted file mode 100644
index 06a41ad..0000000
--- a/include/drivers/arm/gpio.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __GPIO_H__
-#define __GPIO_H__
-
-extern int gpio_direction_input(unsigned int gpio);
-extern int gpio_direction_output(unsigned int gpio);
-extern int gpio_get_value(unsigned int gpio);
-extern int gpio_set_value(unsigned int gpio, unsigned int value);
-extern int gpio_register_device(unsigned int base);
-
-#endif	/* __GPIO_H__ */
diff --git a/include/drivers/arm/nic_400.h b/include/drivers/arm/nic_400.h
new file mode 100644
index 0000000..740f184
--- /dev/null
+++ b/include/drivers/arm/nic_400.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __NIC_400_H__
+#define __NIC_400_H__
+
+/*
+ * Address of slave 'n' security setting in the NIC-400 address region
+ * control
+ */
+#define NIC400_ADDR_CTRL_SECURITY_REG(n)	(0x8 + (n) * 4)
+
+#endif /* __NIC_400_H__ */
diff --git a/include/drivers/arm/pl011.h b/include/drivers/arm/pl011.h
index 7c4df62..cd259c5 100644
--- a/include/drivers/arm/pl011.h
+++ b/include/drivers/arm/pl011.h
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __PL011_H__
@@ -36,17 +12,21 @@
 #define UARTRSR                   0x004
 #define UARTECR                   0x004
 #define UARTFR                    0x018
+#define UARTIMSC                  0x038
+#define UARTRIS                   0x03C
+#define UARTICR                   0x044
+
+/* PL011 registers (out of the SBSA specification) */
+#if !PL011_GENERIC_UART
 #define UARTILPR                  0x020
 #define UARTIBRD                  0x024
 #define UARTFBRD                  0x028
 #define UARTLCR_H                 0x02C
 #define UARTCR                    0x030
 #define UARTIFLS                  0x034
-#define UARTIMSC                  0x038
-#define UARTRIS                   0x03C
 #define UARTMIS                   0x040
-#define UARTICR                   0x044
 #define UARTDMACR                 0x048
+#endif /* !PL011_GENERIC_UART */
 
 /* Data status bits */
 #define UART_DATA_ERROR_MASK      0x0F00
@@ -67,8 +47,10 @@
 
 #define PL011_UARTFR_TXFF_BIT	5	/* Transmit FIFO full bit in UARTFR register */
 #define PL011_UARTFR_RXFE_BIT	4	/* Receive FIFO empty bit in UARTFR register */
+#define PL011_UARTFR_BUSY_BIT	3	/* UART busy bit in UARTFR register */
 
 /* Control reg bits */
+#if !PL011_GENERIC_UART
 #define PL011_UARTCR_CTSEN        (1 << 15)	/* CTS hardware flow control enable */
 #define PL011_UARTCR_RTSEN        (1 << 14)	/* RTS hardware flow control enable */
 #define PL011_UARTCR_RTS          (1 << 11)	/* Request to send */
@@ -95,4 +77,6 @@
 #define PL011_UARTLCR_H_PEN       (1 << 1)	/* Parity Enable */
 #define PL011_UARTLCR_H_BRK       (1 << 0)	/* Send break */
 
+#endif /* !PL011_GENERIC_UART */
+
 #endif	/* __PL011_H__ */
diff --git a/include/drivers/arm/pl061_gpio.h b/include/drivers/arm/pl061_gpio.h
new file mode 100644
index 0000000..971a23d
--- /dev/null
+++ b/include/drivers/arm/pl061_gpio.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PL061_GPIO_H__
+#define __PL061_GPIO_H__
+
+#include <gpio.h>
+
+void pl061_gpio_register(uintptr_t base_addr, int gpio_dev);
+void pl061_gpio_init(void);
+
+#endif	/* __PL061_GPIO_H__ */
diff --git a/include/drivers/arm/smmu_v3.h b/include/drivers/arm/smmu_v3.h
new file mode 100644
index 0000000..b7efde4
--- /dev/null
+++ b/include/drivers/arm/smmu_v3.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SMMU_V3_H__
+#define __SMMU_V3_H__
+
+#include <stdint.h>
+
+/* SMMUv3 register offsets from device base */
+#define SMMU_S_IDR1	0x8004
+#define SMMU_S_INIT	0x803c
+
+/* SMMU_S_IDR1 register fields */
+#define SMMU_S_IDR1_SECURE_IMPL_SHIFT	31
+#define SMMU_S_IDR1_SECURE_IMPL_MASK	0x1
+
+/* SMMU_S_INIT register fields */
+#define SMMU_S_INIT_INV_ALL_MASK	0x1
+
+
+int smmuv3_init(uintptr_t smmu_base);
+
+#endif /* __SMMU_V3_H__ */
diff --git a/include/drivers/arm/sp804_delay_timer.h b/include/drivers/arm/sp804_delay_timer.h
new file mode 100644
index 0000000..0cf168b
--- /dev/null
+++ b/include/drivers/arm/sp804_delay_timer.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SP804_DELAY_TIMER_H__
+#define __SP804_DELAY_TIMER_H__
+
+#include <delay_timer.h>
+#include <stdint.h>
+
+
+uint32_t sp804_get_timer_value(void);
+
+void sp804_timer_ops_init(uintptr_t base_addr, const timer_ops_t *ops);
+
+#define sp804_timer_init(base_addr, clk_mult, clk_div)			\
+	do {								\
+		static const timer_ops_t sp804_timer_ops = {		\
+			sp804_get_timer_value,				\
+			(clk_mult),					\
+			(clk_div)					\
+		};							\
+		sp804_timer_ops_init((base_addr), &sp804_timer_ops);	\
+	} while (0)
+
+#endif /* __SP804_DELAY_TIMER_H__ */
diff --git a/include/drivers/arm/sp805.h b/include/drivers/arm/sp805.h
new file mode 100644
index 0000000..f00bcba
--- /dev/null
+++ b/include/drivers/arm/sp805.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SP805_H__
+#define __SP805_H__
+
+/* SP805 register offset */
+#define SP805_WDOG_LOAD_OFF		0x000
+#define SP805_WDOG_CTR_OFF		0x008
+#define SP805_WDOG_LOCK_OFF		0xc00
+
+/* Magic word to unlock the wd registers */
+#define WDOG_UNLOCK_KEY			0x1ACCE551
+
+/* Register field definitions */
+#define SP805_CTR_RESEN			(1 << 1)
+#define SP805_CTR_INTEN			(1 << 0)
+
+#ifndef __ASSEMBLY__
+
+#include <stdint.h>
+
+/* Public high level API */
+
+void sp805_start(uintptr_t base, unsigned long ticks);
+void sp805_stop(uintptr_t base);
+void sp805_refresh(uintptr_t base, unsigned long ticks);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __SP805_H__ */
diff --git a/include/drivers/arm/tzc400.h b/include/drivers/arm/tzc400.h
index d62e67b..038a3ba 100644
--- a/include/drivers/arm/tzc400.h
+++ b/include/drivers/arm/tzc400.h
@@ -1,201 +1,186 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __TZC400_H__
 #define __TZC400_H__
 
-#include <stdint.h>
+#include <tzc_common.h>
 
-#define BUILD_CONFIG_OFF	0x000
-#define ACTION_OFF		0x004
-#define GATE_KEEPER_OFF		0x008
-#define SPECULATION_CTRL_OFF	0x00c
-#define INT_STATUS		0x010
-#define INT_CLEAR		0x014
+#define BUILD_CONFIG_OFF			0x000
+#define GATE_KEEPER_OFF				0x008
+#define SPECULATION_CTRL_OFF			0x00c
+#define INT_STATUS				0x010
+#define INT_CLEAR				0x014
 
-#define FAIL_ADDRESS_LOW_OFF	0x020
-#define FAIL_ADDRESS_HIGH_OFF	0x024
-#define FAIL_CONTROL_OFF	0x028
-#define FAIL_ID			0x02c
+#define FAIL_ADDRESS_LOW_OFF			0x020
+#define FAIL_ADDRESS_HIGH_OFF			0x024
+#define FAIL_CONTROL_OFF			0x028
+#define FAIL_ID					0x02c
 
-#define REGION_BASE_LOW_OFF	0x100
-#define REGION_BASE_HIGH_OFF	0x104
-#define REGION_TOP_LOW_OFF	0x108
-#define REGION_TOP_HIGH_OFF	0x10c
-#define REGION_ATTRIBUTES_OFF	0x110
-#define REGION_ID_ACCESS_OFF	0x114
-#define REGION_NUM_OFF(region)  (0x20 * region)
+/* ID registers not common across different varieties of TZC */
+#define PID5					0xFD4
+#define PID6					0xFD8
+#define PID7					0xFDC
 
-/* ID Registers */
-#define PID0_OFF		0xfe0
-#define PID1_OFF		0xfe4
-#define PID2_OFF		0xfe8
-#define PID3_OFF		0xfec
-#define PID4_OFF		0xfd0
-#define PID5_OFF		0xfd4
-#define PID6_OFF		0xfd8
-#define PID7_OFF		0xfdc
-#define CID0_OFF		0xff0
-#define CID1_OFF		0xff4
-#define CID2_OFF		0xff8
-#define CID3_OFF		0xffc
-
-#define BUILD_CONFIG_NF_SHIFT	24
-#define BUILD_CONFIG_NF_MASK	0x3
-#define BUILD_CONFIG_AW_SHIFT	8
-#define BUILD_CONFIG_AW_MASK	0x3f
-#define BUILD_CONFIG_NR_SHIFT	0
-#define BUILD_CONFIG_NR_MASK	0x1f
-
-/* Not describing the case where regions 1 to 8 overlap */
-#define ACTION_RV_SHIFT		0
-#define ACTION_RV_MASK		0x3
-#define  ACTION_RV_LOWOK	0x0
-#define  ACTION_RV_LOWERR	0x1
-#define  ACTION_RV_HIGHOK	0x2
-#define  ACTION_RV_HIGHERR	0x3
+#define BUILD_CONFIG_NF_SHIFT			24
+#define BUILD_CONFIG_NF_MASK			0x3
+#define BUILD_CONFIG_AW_SHIFT			8
+#define BUILD_CONFIG_AW_MASK			0x3f
+#define BUILD_CONFIG_NR_SHIFT			0
+#define BUILD_CONFIG_NR_MASK			0x1f
 
 /*
  * Number of gate keepers is implementation defined. But we know the max for
  * this device is 4. Get implementation details from BUILD_CONFIG.
  */
-#define GATE_KEEPER_OS_SHIFT	16
-#define GATE_KEEPER_OS_MASK	0xf
-#define GATE_KEEPER_OR_SHIFT	0
-#define GATE_KEEPER_OR_MASK	0xf
-#define GATE_KEEPER_FILTER_MASK	0x1
+#define GATE_KEEPER_OS_SHIFT			16
+#define GATE_KEEPER_OS_MASK			0xf
+#define GATE_KEEPER_OR_SHIFT			0
+#define GATE_KEEPER_OR_MASK			0xf
+#define GATE_KEEPER_FILTER_MASK			0x1
 
 /* Speculation is enabled by default. */
-#define SPECULATION_CTRL_WRITE_DISABLE	(1 << 1)
-#define SPECULATION_CTRL_READ_DISABLE	(1 << 0)
+#define SPECULATION_CTRL_WRITE_DISABLE		(1 << 1)
+#define SPECULATION_CTRL_READ_DISABLE		(1 << 0)
 
 /* Max number of filters allowed is 4. */
-#define INT_STATUS_OVERLAP_SHIFT	16
-#define INT_STATUS_OVERLAP_MASK		0xf
-#define INT_STATUS_OVERRUN_SHIFT	8
-#define INT_STATUS_OVERRUN_MASK		0xf
-#define INT_STATUS_STATUS_SHIFT		0
-#define INT_STATUS_STATUS_MASK		0xf
+#define INT_STATUS_OVERLAP_SHIFT		16
+#define INT_STATUS_OVERLAP_MASK			0xf
+#define INT_STATUS_OVERRUN_SHIFT		8
+#define INT_STATUS_OVERRUN_MASK			0xf
+#define INT_STATUS_STATUS_SHIFT			0
+#define INT_STATUS_STATUS_MASK			0xf
 
-#define INT_CLEAR_CLEAR_SHIFT		0
-#define INT_CLEAR_CLEAR_MASK		0xf
+#define INT_CLEAR_CLEAR_SHIFT			0
+#define INT_CLEAR_CLEAR_MASK			0xf
 
-#define FAIL_CONTROL_DIR_SHIFT		(1 << 24)
-#define  FAIL_CONTROL_DIR_READ		0x0
-#define  FAIL_CONTROL_DIR_WRITE		0x1
-#define FAIL_CONTROL_NS_SHIFT		(1 << 21)
-#define  FAIL_CONTROL_NS_SECURE		0x0
-#define  FAIL_CONTROL_NS_NONSECURE	0x1
-#define FAIL_CONTROL_PRIV_SHIFT		(1 << 20)
-#define  FAIL_CONTROL_PRIV_PRIV		0x0
-#define  FAIL_CONTROL_PRIV_UNPRIV	0x1
+#define FAIL_CONTROL_DIR_SHIFT			(1 << 24)
+#define FAIL_CONTROL_DIR_READ			0x0
+#define FAIL_CONTROL_DIR_WRITE			0x1
+#define FAIL_CONTROL_NS_SHIFT			(1 << 21)
+#define FAIL_CONTROL_NS_SECURE			0x0
+#define FAIL_CONTROL_NS_NONSECURE		0x1
+#define FAIL_CONTROL_PRIV_SHIFT			(1 << 20)
+#define FAIL_CONTROL_PRIV_PRIV			0x0
+#define FAIL_CONTROL_PRIV_UNPRIV		0x1
 
 /*
  * FAIL_ID_ID_MASK depends on AID_WIDTH which is platform specific.
  * Platform should provide the value on initialisation.
  */
-#define FAIL_ID_VNET_SHIFT		24
-#define FAIL_ID_VNET_MASK		0xf
-#define FAIL_ID_ID_SHIFT		0
+#define FAIL_ID_VNET_SHIFT			24
+#define FAIL_ID_VNET_MASK			0xf
+#define FAIL_ID_ID_SHIFT			0
 
-/* Used along with 'tzc_region_attributes_t' below */
-#define REG_ATTR_SEC_SHIFT		30
-#define REG_ATTR_F_EN_SHIFT		0
-#define REG_ATTR_F_EN_MASK		0xf
-#define REG_ATTR_FILTER_BIT(x)		((1 << x) << REG_ATTR_F_EN_SHIFT)
-#define REG_ATTR_FILTER_BIT_ALL		(REG_ATTR_F_EN_MASK << \
-					REG_ATTR_F_EN_SHIFT)
+#define TZC_400_PERIPHERAL_ID			0x460
 
-#define REGION_ID_ACCESS_NSAID_WR_EN_SHIFT	16
-#define REGION_ID_ACCESS_NSAID_RD_EN_SHIFT	0
-#define REGION_ID_ACCESS_NSAID_ID_MASK		0xf
+/* Filter enable bits in a TZC */
+#define TZC_400_REGION_ATTR_F_EN_MASK		0xf
+#define TZC_400_REGION_ATTR_FILTER_BIT(x)	((1 << x)		\
+					<< TZC_REGION_ATTR_F_EN_SHIFT)
+#define TZC_400_REGION_ATTR_FILTER_BIT_ALL				\
+				(TZC_400_REGION_ATTR_F_EN_MASK <<	\
+				TZC_REGION_ATTR_F_EN_SHIFT)
 
+/*
+ * Define some macros for backward compatibility with existing tzc400 clients.
+ */
+#if !ERROR_DEPRECATED
+#define REG_ATTR_FILTER_BIT(x)			((1 << x)		\
+					<< TZC_REGION_ATTR_F_EN_SHIFT)
+#define REG_ATTR_FILTER_BIT_ALL	(TZC_400_REGION_ATTR_F_EN_MASK <<	\
+					TZC_REGION_ATTR_F_EN_SHIFT)
+#endif /* __ERROR_DEPRECATED__ */
 
-/* Macros for setting Region ID access permissions based on NSAID */
-#define TZC_REGION_ACCESS_RD(id)					\
-		((1 << (id & REGION_ID_ACCESS_NSAID_ID_MASK)) <<	\
-		 REGION_ID_ACCESS_NSAID_RD_EN_SHIFT)
-#define TZC_REGION_ACCESS_WR(id)					\
-		((1 << (id & REGION_ID_ACCESS_NSAID_ID_MASK)) <<	\
-		 REGION_ID_ACCESS_NSAID_WR_EN_SHIFT)
-#define TZC_REGION_ACCESS_RDWR(id)					\
-		(TZC_REGION_ACCESS_RD(id) | TZC_REGION_ACCESS_WR(id))
+/*
+ * All TZC region configuration registers are placed one after another. It
+ * depicts size of block of registers for programming each region.
+ */
+#define TZC_400_REGION_SIZE			0x20
+#define TZC_400_ACTION_OFF			0x4
 
-/* Filters are bit mapped 0 to 3. */
-#define TZC400_COMPONENT_ID	0xb105f00d
+#ifndef __ASSEMBLY__
+
+#include <cdefs.h>
+#include <stdint.h>
 
 /*******************************************************************************
  * Function & variable prototypes
  ******************************************************************************/
+void tzc400_init(uintptr_t base);
+void tzc400_configure_region0(tzc_region_attributes_t sec_attr,
+			   unsigned int ns_device_access);
+void tzc400_configure_region(unsigned int filters,
+			  int region,
+			  unsigned long long region_base,
+			  unsigned long long region_top,
+			  tzc_region_attributes_t sec_attr,
+			  unsigned int ns_device_access);
+void tzc400_set_action(tzc_action_t action);
+void tzc400_enable_filters(void);
+void tzc400_disable_filters(void);
 
 /*
- * What type of action is expected when an access violation occurs.
- * The memory requested is zeroed. But we can also raise and event to
- * let the system know it happened.
- * We can raise an interrupt(INT) and/or cause an exception(ERR).
- *  TZC_ACTION_NONE    - No interrupt, no Exception
- *  TZC_ACTION_ERR     - No interrupt, raise exception -> sync external
- *                       data abort
- *  TZC_ACTION_INT     - Raise interrupt, no exception
- *  TZC_ACTION_ERR_INT - Raise interrupt, raise exception -> sync
- *                       external data abort
+ * Deprecated APIs
  */
-typedef enum {
-	TZC_ACTION_NONE = 0,
-	TZC_ACTION_ERR = 1,
-	TZC_ACTION_INT = 2,
-	TZC_ACTION_ERR_INT = (TZC_ACTION_ERR | TZC_ACTION_INT)
-} tzc_action_t;
-
-/*
- * Controls secure access to a region. If not enabled secure access is not
- * allowed to region.
- */
-typedef enum {
-	TZC_REGION_S_NONE = 0,
-	TZC_REGION_S_RD = 1,
-	TZC_REGION_S_WR = 2,
-	TZC_REGION_S_RDWR = (TZC_REGION_S_RD | TZC_REGION_S_WR)
-} tzc_region_attributes_t;
-
-
-void tzc_init(uint64_t base);
-void tzc_configure_region(uint32_t filters,
-			uint8_t region,
-			uint64_t region_base,
-			uint64_t region_top,
+static inline void tzc_init(uintptr_t base) __deprecated;
+static inline void tzc_configure_region0(
 			tzc_region_attributes_t sec_attr,
-			uint32_t ns_device_access);
-void tzc_enable_filters(void);
-void tzc_disable_filters(void);
-void tzc_set_action(tzc_action_t action);
+			unsigned int ns_device_access) __deprecated;
+static inline void tzc_configure_region(
+			  unsigned int filters,
+			  int region,
+			  unsigned long long region_base,
+			  unsigned long long region_top,
+			  tzc_region_attributes_t sec_attr,
+			  unsigned int ns_device_access) __deprecated;
+static inline void tzc_set_action(tzc_action_t action) __deprecated;
+static inline void tzc_enable_filters(void) __deprecated;
+static inline void tzc_disable_filters(void) __deprecated;
 
+static inline void tzc_init(uintptr_t base)
+{
+	tzc400_init(base);
+}
+
+static inline void tzc_configure_region0(
+			tzc_region_attributes_t sec_attr,
+			unsigned int ns_device_access)
+{
+	tzc400_configure_region0(sec_attr, ns_device_access);
+}
+
+static inline void tzc_configure_region(
+			  unsigned int filters,
+			  int region,
+			  unsigned long long region_base,
+			  unsigned long long region_top,
+			  tzc_region_attributes_t sec_attr,
+			  unsigned int ns_device_access)
+{
+	tzc400_configure_region(filters, region, region_base,
+			region_top, sec_attr, ns_device_access);
+}
+
+static inline void tzc_set_action(tzc_action_t action)
+{
+	tzc400_set_action(action);
+}
+
+
+static inline void tzc_enable_filters(void)
+{
+	tzc400_enable_filters();
+}
+
+static inline void tzc_disable_filters(void)
+{
+	tzc400_disable_filters();
+}
+
+#endif /* __ASSEMBLY__ */
 
 #endif /* __TZC400__ */
diff --git a/include/drivers/arm/tzc_common.h b/include/drivers/arm/tzc_common.h
new file mode 100644
index 0000000..9411b73
--- /dev/null
+++ b/include/drivers/arm/tzc_common.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TZC_COMMON_H__
+#define __TZC_COMMON_H__
+
+/*
+ * Offset of core registers from the start of the base of configuration
+ * registers for each region.
+ */
+
+/* ID Registers */
+#define PID0_OFF					0xfe0
+#define PID1_OFF					0xfe4
+#define PID2_OFF					0xfe8
+#define PID3_OFF					0xfec
+#define PID4_OFF					0xfd0
+#define CID0_OFF					0xff0
+#define CID1_OFF					0xff4
+#define CID2_OFF					0xff8
+#define CID3_OFF					0xffc
+
+/* Bit positions of TZC_ACTION registers */
+#define TZC_ACTION_RV_SHIFT				0
+#define TZC_ACTION_RV_MASK				0x3
+#define TZC_ACTION_RV_LOWOK				0x0
+#define TZC_ACTION_RV_LOWERR				0x1
+#define TZC_ACTION_RV_HIGHOK				0x2
+#define TZC_ACTION_RV_HIGHERR				0x3
+
+/* Used along with 'tzc_region_attributes_t' below */
+#define TZC_REGION_ATTR_S_RD_SHIFT			30
+#define TZC_REGION_ATTR_S_WR_SHIFT			31
+#define TZC_REGION_ATTR_F_EN_SHIFT			0
+#define TZC_REGION_ATTR_SEC_SHIFT			30
+#define TZC_REGION_ATTR_S_RD_MASK			0x1
+#define TZC_REGION_ATTR_S_WR_MASK			0x1
+#define TZC_REGION_ATTR_SEC_MASK			0x3
+
+#define TZC_REGION_ACCESS_WR_EN_SHIFT			16
+#define TZC_REGION_ACCESS_RD_EN_SHIFT			0
+#define TZC_REGION_ACCESS_ID_MASK			0xf
+
+/* Macros for allowing Non-Secure access to a region based on NSAID */
+#define TZC_REGION_ACCESS_RD(nsaid)				\
+	((1 << (nsaid & TZC_REGION_ACCESS_ID_MASK)) <<		\
+	 TZC_REGION_ACCESS_RD_EN_SHIFT)
+#define TZC_REGION_ACCESS_WR(nsaid)				\
+	((1 << (nsaid & TZC_REGION_ACCESS_ID_MASK)) <<		\
+	 TZC_REGION_ACCESS_WR_EN_SHIFT)
+#define TZC_REGION_ACCESS_RDWR(nsaid)				\
+	(TZC_REGION_ACCESS_RD(nsaid) |				\
+	TZC_REGION_ACCESS_WR(nsaid))
+
+#ifndef __ASSEMBLY__
+
+/* Returns offset of registers to program for a given region no */
+#define TZC_REGION_OFFSET(region_size, region_no)	\
+				((region_size) * (region_no))
+
+/*
+ * What type of action is expected when an access violation occurs.
+ * The memory requested is returned as zero. But we can also raise an event to
+ * let the system know it happened.
+ * We can raise an interrupt(INT) and/or cause an exception(ERR).
+ *  TZC_ACTION_NONE    - No interrupt, no Exception
+ *  TZC_ACTION_ERR     - No interrupt, raise exception -> sync external
+ *                       data abort
+ *  TZC_ACTION_INT     - Raise interrupt, no exception
+ *  TZC_ACTION_ERR_INT - Raise interrupt, raise exception -> sync
+ *                       external data abort
+ */
+typedef enum {
+	TZC_ACTION_NONE = 0,
+	TZC_ACTION_ERR = 1,
+	TZC_ACTION_INT = 2,
+	TZC_ACTION_ERR_INT = (TZC_ACTION_ERR | TZC_ACTION_INT)
+} tzc_action_t;
+
+/*
+ * Controls secure access to a region. If not enabled secure access is not
+ * allowed to region.
+ */
+typedef enum {
+	TZC_REGION_S_NONE = 0,
+	TZC_REGION_S_RD = 1,
+	TZC_REGION_S_WR = 2,
+	TZC_REGION_S_RDWR = (TZC_REGION_S_RD | TZC_REGION_S_WR)
+} tzc_region_attributes_t;
+
+#endif /* __ASSEMBLY__ */
+#endif /* __TZC_COMMON_H__ */
diff --git a/include/drivers/arm/tzc_dmc500.h b/include/drivers/arm/tzc_dmc500.h
new file mode 100644
index 0000000..2606d1b
--- /dev/null
+++ b/include/drivers/arm/tzc_dmc500.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TZC_DMC500_H__
+#define __TZC_DMC500_H__
+
+#include <tzc_common.h>
+
+#define SI_STATUS_OFFSET				0x000
+#define SI_STATE_CTRL_OFFSET				0x030
+#define SI_FLUSH_CTRL_OFFSET				0x034
+#define SI_INT_CONTROL_OFFSET				0x048
+
+#define SI_INT_STATUS_OFFSET				0x004
+#define SI_TZ_FAIL_ADDRESS_LOW_OFFSET			0x008
+#define SI_TZ_FAIL_ADDRESS_HIGH_OFFSET			0x00c
+#define SI_FAIL_CONTROL_OFFSET				0x010
+#define SI_FAIL_ID_OFFSET				0x014
+#define SI_INT_CLR_OFFSET				0x04c
+
+/*
+ * DMC-500 has 2 system interfaces each having a similar set of regs
+ * to configure each interface.
+ */
+#define SI0_BASE					0x0000
+#define SI1_BASE					0x0200
+
+/* Bit positions of SIx_SI_STATUS */
+#define SI_EMPTY_SHIFT					0x01
+#define SI_STALL_ACK_SHIFT				0x00
+#define SI_EMPTY_MASK					0x01
+#define SI_STALL_ACK_MASK				0x01
+
+/* Bit positions of SIx_SI_INT_STATUS */
+#define PMU_REQ_INT_OVERFLOW_STATUS_SHIFT		18
+#define FAILED_ACCESS_INT_OVERFLOW_STATUS_SHIFT		16
+#define PMU_REQ_INT_STATUS_SHIFT			2
+#define FAILED_ACCESS_INT_INFO_TZ_OVERLAP_STATUS_SHIFT	1
+#define FAILED_ACCESS_INT_STATUS_SHIFT			0
+#define PMU_REQ_INT_OVERFLOW_STATUS_MASK		0x1
+#define FAILED_ACCESS_INT_OVERFLOW_STATUS_MASK		0x1
+#define PMU_REQ_INT_STATUS_MASK				0x1
+#define FAILED_ACCESS_INT_INFO_TZ_OVERLAP_STATUS_MASK	0x1
+#define FAILED_ACCESS_INT_STATUS_MASK			0x1
+
+/* Bit positions of SIx_TZ_FAIL_CONTROL */
+#define DIRECTION_SHIFT					24
+#define NON_SECURE_SHIFT				21
+#define PRIVILEGED_SHIFT				20
+#define FAILED_ACCESS_INT_INFO_RANK_MASKED_SHIFT	3
+#define FAILED_ACCESS_INT_INFO_UNMAPPED_SHIFT		2
+#define FAILED_ACCESS_INT_TZ_FAIL_SHIFT			0x1
+#define FAILED_ACCESS_INT_INFO_OUTSIDE_DEFAULT_SHIFT	0
+#define DIRECTION_MASK					0x1
+#define NON_SECURE_MASK					0x1
+#define PRIVILEGED_MASK					0x1
+#define FAILED_ACCESS_INT_INFO_RANK_MASKED_MASK		0x1
+#define FAILED_ACCESS_INT_INFO_UNMAPPED_MASK		0x1
+#define FAILED_ACCESS_INT_TZ_FAIL_MASK			1
+#define FAILED_ACCESS_INT_INFO_OUTSIDE_DEFAULT_MASK	0x1
+
+/* Bit positions of SIx_FAIL_STATUS */
+#define FAIL_ID_VNET_SHIFT				24
+#define FAIL_ID_ID_SHIFT				0
+#define FAIL_ID_VNET_MASK				0xf
+#define FAIL_ID_ID_MASK					0xffffff
+
+/* Bit positions of SIx_SI_STATE_CONTRL */
+#define SI_STALL_REQ_GO					0x0
+#define SI_STALL_REQ_STALL				0x1
+
+/* Bit positions of SIx_SI_FLUSH_CONTROL */
+#define SI_FLUSH_REQ_INACTIVE				0x0
+#define SI_FLUSH_REQ_ACTIVE				0x1
+#define SI_FLUSH_REQ_MASK				0x1
+
+/* Bit positions of SIx_SI_INT_CONTROL */
+#define PMU_REQ_INT_EN_SHIFT				2
+#define OVERLAP_DETECT_INT_EN_SHIFT			1
+#define FAILED_ACCESS_INT_EN_SHIFT			0
+#define PMU_REQ_INT_EN_MASK				0x1
+#define OVERLAP_DETECT_INT_EN_MASK			0x1
+#define FAILED_ACCESS_INT_EN_MASK			0x1
+#define PMU_REQ_INT_EN					0x1
+#define OVERLAP_DETECT_INT_EN				0x1
+#define FAILED_ACCESS_INT_EN				0x1
+
+/* Bit positions of SIx_SI_INT_CLR */
+#define PMU_REQ_OFLOW_CLR_SHIFT				18
+#define FAILED_ACCESS_OFLOW_CLR_SHIFT			16
+#define PMU_REQ_INT_CLR_SHIFT				2
+#define FAILED_ACCESS_INT_CLR_SHIFT			0
+#define PMU_REQ_OFLOW_CLR_MASK				0x1
+#define FAILED_ACCESS_OFLOW_CLR_MASK			0x1
+#define PMU_REQ_INT_CLR_MASK				0x1
+#define FAILED_ACCESS_INT_CLR_MASK			0x1
+#define PMU_REQ_OFLOW_CLR				0x1
+#define FAILED_ACCESS_OFLOW_CLR				0x1
+#define PMU_REQ_INT_CLR					0x1
+#define FAILED_ACCESS_INT_CLR				0x1
+
+/* Macro to get the correct base register for a system interface */
+#define IFACE_OFFSET(sys_if)	((sys_if) ? SI1_BASE : SI0_BASE)
+
+#define MAX_SYS_IF_COUNT				2
+#define MAX_REGION_VAL					8
+
+/* DMC-500 supports striping across a max of 4 DMC instances */
+#define MAX_DMC_COUNT					4
+
+/* Consist of part_number_1 and part_number_0 */
+#define DMC500_PERIPHERAL_ID				0x0450
+
+/* Filter enable bits in a TZC */
+#define TZC_DMC500_REGION_ATTR_F_EN_MASK		0x1
+
+/* Length of registers for configuring each region */
+#define TZC_DMC500_REGION_SIZE				0x018
+
+#ifndef __ASSEMBLY__
+
+#include <stdint.h>
+
+/*
+ * Contains the base addresses of all the DMC instances.
+ */
+typedef struct tzc_dmc500_driver_data {
+	uintptr_t dmc_base[MAX_DMC_COUNT];
+	int dmc_count;
+} tzc_dmc500_driver_data_t;
+
+void tzc_dmc500_driver_init(const tzc_dmc500_driver_data_t *plat_driver_data);
+void tzc_dmc500_configure_region0(tzc_region_attributes_t sec_attr,
+				unsigned int nsaid_permissions);
+void tzc_dmc500_configure_region(int region_no,
+				unsigned long long region_base,
+				unsigned long long region_top,
+				tzc_region_attributes_t sec_attr,
+				unsigned int nsaid_permissions);
+void tzc_dmc500_set_action(tzc_action_t action);
+void tzc_dmc500_config_complete(void);
+int tzc_dmc500_verify_complete(void);
+
+
+#endif /* __ASSEMBLY__ */
+#endif /* __TZC_DMC500_H__ */
+
diff --git a/include/drivers/auth/auth_common.h b/include/drivers/auth/auth_common.h
new file mode 100644
index 0000000..058375f
--- /dev/null
+++ b/include/drivers/auth/auth_common.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __AUTH_COMMON_H__
+#define __AUTH_COMMON_H__
+
+/*
+ * Authentication framework common types
+ */
+
+/*
+ * Type of parameters that can be extracted from an image and
+ * used for authentication
+ */
+typedef enum auth_param_type_enum {
+	AUTH_PARAM_NONE,
+	AUTH_PARAM_RAW_DATA,		/* Raw image data */
+	AUTH_PARAM_SIG,			/* The image signature */
+	AUTH_PARAM_SIG_ALG,		/* The image signature algorithm */
+	AUTH_PARAM_HASH,		/* A hash (including the algorithm) */
+	AUTH_PARAM_PUB_KEY,		/* A public key */
+	AUTH_PARAM_NV_CTR,		/* A non-volatile counter */
+} auth_param_type_t;
+
+/*
+ * Defines an authentication parameter. The cookie will be interpreted by the
+ * image parser module.
+ */
+typedef struct auth_param_type_desc_s {
+	auth_param_type_t type;
+	void *cookie;
+} auth_param_type_desc_t;
+
+/*
+ * Store a pointer to the authentication parameter and its length
+ */
+typedef struct auth_param_data_desc_s {
+	void *ptr;
+	unsigned int len;
+} auth_param_data_desc_t;
+
+/*
+ * Authentication parameter descriptor, including type and value
+ */
+typedef struct auth_param_desc_s {
+	auth_param_type_desc_t *type_desc;
+	auth_param_data_desc_t data;
+} auth_param_desc_t;
+
+/*
+ * The method type defines how an image is authenticated
+ */
+typedef enum auth_method_type_enum {
+	AUTH_METHOD_NONE = 0,
+	AUTH_METHOD_HASH,	/* Authenticate by hash matching */
+	AUTH_METHOD_SIG,	/* Authenticate by PK operation */
+	AUTH_METHOD_NV_CTR,	/* Authenticate by Non-Volatile Counter */
+	AUTH_METHOD_NUM 	/* Number of methods */
+} auth_method_type_t;
+
+/*
+ * Parameters for authentication by hash matching
+ */
+typedef struct auth_method_param_hash_s {
+	auth_param_type_desc_t *data;	/* Data to hash */
+	auth_param_type_desc_t *hash;	/* Hash to match with */
+} auth_method_param_hash_t;
+
+/*
+ * Parameters for authentication by signature
+ */
+typedef struct auth_method_param_sig_s {
+	auth_param_type_desc_t *pk;	/* Public key */
+	auth_param_type_desc_t *sig;	/* Signature to check */
+	auth_param_type_desc_t *alg;	/* Signature algorithm */
+	auth_param_type_desc_t *data;	/* Data signed */
+} auth_method_param_sig_t;
+
+/*
+ * Parameters for authentication by NV counter
+ */
+typedef struct auth_method_param_nv_ctr_s {
+	auth_param_type_desc_t *cert_nv_ctr;	/* NV counter in certificate */
+	auth_param_type_desc_t *plat_nv_ctr;	/* NV counter in platform */
+} auth_method_param_nv_ctr_t;
+
+/*
+ * Authentication method descriptor
+ */
+typedef struct auth_method_desc_s {
+	auth_method_type_t type;
+	union {
+		auth_method_param_hash_t hash;
+		auth_method_param_sig_t sig;
+		auth_method_param_nv_ctr_t nv_ctr;
+	} param;
+} auth_method_desc_t;
+
+/*
+ * Helper macro to define an authentication parameter type descriptor
+ */
+#define AUTH_PARAM_TYPE_DESC(_type, _cookie) \
+	{ \
+		.type = _type, \
+		.cookie = (void *)_cookie \
+	}
+
+/*
+ * Helper macro to define an authentication parameter data descriptor
+ */
+#define AUTH_PARAM_DATA_DESC(_ptr, _len) \
+	{ \
+		.ptr = (void *)_ptr, \
+		.len = (unsigned int)_len \
+	}
+
+#endif /* __AUTH_COMMON_H__ */
diff --git a/include/drivers/auth/auth_mod.h b/include/drivers/auth/auth_mod.h
new file mode 100644
index 0000000..bd65098
--- /dev/null
+++ b/include/drivers/auth/auth_mod.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __AUTH_MOD_H__
+#define __AUTH_MOD_H__
+
+#if TRUSTED_BOARD_BOOT
+
+#include <auth_common.h>
+#include <cot_def.h>
+#include <img_parser_mod.h>
+
+/*
+ * Image flags
+ */
+#define IMG_FLAG_AUTHENTICATED		(1 << 0)
+
+
+/*
+ * Authentication image descriptor
+ */
+typedef struct auth_img_desc_s {
+	unsigned int img_id;
+	img_type_t img_type;
+	const struct auth_img_desc_s *parent;
+	auth_method_desc_t img_auth_methods[AUTH_METHOD_NUM];
+	auth_param_desc_t authenticated_data[COT_MAX_VERIFIED_PARAMS];
+} auth_img_desc_t;
+
+/* Public functions */
+void auth_mod_init(void);
+int auth_mod_get_parent_id(unsigned int img_id, unsigned int *parent_id);
+int auth_mod_verify_img(unsigned int img_id,
+			void *img_ptr,
+			unsigned int img_len);
+
+/* Macro to register a CoT defined as an array of auth_img_desc_t */
+#define REGISTER_COT(_cot) \
+	const auth_img_desc_t *const cot_desc_ptr = \
+			(const auth_img_desc_t *const)&_cot[0]; \
+	unsigned int auth_img_flags[sizeof(_cot)/sizeof(_cot[0])]
+
+#endif /* TRUSTED_BOARD_BOOT */
+
+#endif /* __AUTH_MOD_H__ */
diff --git a/include/drivers/auth/crypto_mod.h b/include/drivers/auth/crypto_mod.h
new file mode 100644
index 0000000..08884ab
--- /dev/null
+++ b/include/drivers/auth/crypto_mod.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CRYPTO_MOD_H__
+#define __CRYPTO_MOD_H__
+
+/* Return values */
+enum crypto_ret_value {
+	CRYPTO_SUCCESS = 0,
+	CRYPTO_ERR_INIT,
+	CRYPTO_ERR_HASH,
+	CRYPTO_ERR_SIGNATURE,
+	CRYPTO_ERR_UNKNOWN
+};
+
+/*
+ * Cryptographic library descriptor
+ */
+typedef struct crypto_lib_desc_s {
+	const char *name;
+
+	/* Initialize library. This function is not expected to fail. All errors
+	 * must be handled inside the function, asserting or panicing in case of
+	 * a non-recoverable error */
+	void (*init)(void);
+
+	/* Verify a digital signature. Return one of the
+	 * 'enum crypto_ret_value' options */
+	int (*verify_signature)(void *data_ptr, unsigned int data_len,
+				void *sig_ptr, unsigned int sig_len,
+				void *sig_alg, unsigned int sig_alg_len,
+				void *pk_ptr, unsigned int pk_len);
+
+	/* Verify a hash. Return one of the 'enum crypto_ret_value' options */
+	int (*verify_hash)(void *data_ptr, unsigned int data_len,
+			   void *digest_info_ptr, unsigned int digest_info_len);
+} crypto_lib_desc_t;
+
+/* Public functions */
+void crypto_mod_init(void);
+int crypto_mod_verify_signature(void *data_ptr, unsigned int data_len,
+				void *sig_ptr, unsigned int sig_len,
+				void *sig_alg, unsigned int sig_alg_len,
+				void *pk_ptr, unsigned int pk_len);
+int crypto_mod_verify_hash(void *data_ptr, unsigned int data_len,
+			   void *digest_info_ptr, unsigned int digest_info_len);
+
+/* Macro to register a cryptographic library */
+#define REGISTER_CRYPTO_LIB(_name, _init, _verify_signature, _verify_hash) \
+	const crypto_lib_desc_t crypto_lib_desc = { \
+		.name = _name, \
+		.init = _init, \
+		.verify_signature = _verify_signature, \
+		.verify_hash = _verify_hash \
+	}
+
+#endif /* __CRYPTO_MOD_H__ */
diff --git a/include/drivers/auth/img_parser_mod.h b/include/drivers/auth/img_parser_mod.h
new file mode 100644
index 0000000..347ed62
--- /dev/null
+++ b/include/drivers/auth/img_parser_mod.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __IMG_PARSER_MOD_H__
+#define __IMG_PARSER_MOD_H__
+
+#include <auth_common.h>
+
+/*
+ * Return values
+ */
+enum img_parser_ret_value {
+	IMG_PARSER_OK,
+	IMG_PARSER_ERR,			/* Parser internal error */
+	IMG_PARSER_ERR_FORMAT,		/* Malformed image */
+	IMG_PARSER_ERR_NOT_FOUND	/* Authentication data not found */
+};
+
+/*
+ * Image types. A parser should be instantiated and registered for each type
+ */
+typedef enum img_type_enum {
+	IMG_RAW,			/* Binary image */
+	IMG_PLAT,			/* Platform specific format */
+	IMG_CERT,			/* X509v3 certificate */
+	IMG_MAX_TYPES,
+} img_type_t;
+
+/* Image parser library structure */
+typedef struct img_parser_lib_desc_s {
+	img_type_t img_type;
+	const char *name;
+
+	void (*init)(void);
+	int (*check_integrity)(void *img, unsigned int img_len);
+	int (*get_auth_param)(const auth_param_type_desc_t *type_desc,
+			void *img, unsigned int img_len,
+			void **param, unsigned int *param_len);
+} img_parser_lib_desc_t;
+
+/* Exported functions */
+void img_parser_init(void);
+int img_parser_check_integrity(img_type_t img_type,
+		void *img, unsigned int img_len);
+int img_parser_get_auth_param(img_type_t img_type,
+		const auth_param_type_desc_t *type_desc,
+		void *img, unsigned int img_len,
+		void **param_ptr, unsigned int *param_len);
+
+/* Macro to register an image parser library */
+#define REGISTER_IMG_PARSER_LIB(_type, _name, _init, _check_int, _get_param) \
+	static const img_parser_lib_desc_t __img_parser_lib_desc_##_type \
+	__section(".img_parser_lib_descs") __used = { \
+		.img_type = _type, \
+		.name = _name, \
+		.init = _init, \
+		.check_integrity = _check_int, \
+		.get_auth_param = _get_param \
+	}
+
+#endif /* __IMG_PARSER_MOD_H__ */
diff --git a/include/drivers/auth/mbedtls/mbedtls_common.h b/include/drivers/auth/mbedtls/mbedtls_common.h
new file mode 100644
index 0000000..5d3e198
--- /dev/null
+++ b/include/drivers/auth/mbedtls/mbedtls_common.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MBEDTLS_COMMON_H__
+#define __MBEDTLS_COMMON_H__
+
+void mbedtls_init(void);
+
+#endif /* __MBEDTLS_COMMON_H__ */
diff --git a/include/drivers/auth/mbedtls/mbedtls_config.h b/include/drivers/auth/mbedtls/mbedtls_config.h
new file mode 100644
index 0000000..96587ac
--- /dev/null
+++ b/include/drivers/auth/mbedtls/mbedtls_config.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __MBEDTLS_CONFIG_H__
+#define __MBEDTLS_CONFIG_H__
+
+/*
+ * Key algorithms currently supported on mbed TLS libraries
+ */
+#define TF_MBEDTLS_RSA			1
+#define TF_MBEDTLS_ECDSA		2
+#define TF_MBEDTLS_RSA_AND_ECDSA	3
+
+/*
+ * Configuration file to build mbed TLS with the required features for
+ * Trusted Boot
+ */
+
+#define MBEDTLS_PLATFORM_MEMORY
+#define MBEDTLS_PLATFORM_NO_STD_FUNCTIONS
+/* Prevent mbed TLS from using snprintf so that it can use tf_snprintf. */
+#define MBEDTLS_PLATFORM_SNPRINTF_ALT
+
+#if !ERROR_DEPRECATED
+#define MBEDTLS_PKCS1_V15
+#endif
+#define MBEDTLS_PKCS1_V21
+
+#define MBEDTLS_X509_ALLOW_UNSUPPORTED_CRITICAL_EXTENSION
+#define MBEDTLS_X509_CHECK_KEY_USAGE
+#define MBEDTLS_X509_CHECK_EXTENDED_KEY_USAGE
+
+#define MBEDTLS_ASN1_PARSE_C
+#define MBEDTLS_ASN1_WRITE_C
+
+#define MBEDTLS_BASE64_C
+#define MBEDTLS_BIGNUM_C
+
+#define MBEDTLS_ERROR_C
+#define MBEDTLS_MD_C
+
+#define MBEDTLS_MEMORY_BUFFER_ALLOC_C
+#define MBEDTLS_OID_C
+
+#define MBEDTLS_PK_C
+#define MBEDTLS_PK_PARSE_C
+#define MBEDTLS_PK_WRITE_C
+
+#define MBEDTLS_PLATFORM_C
+
+#if (TF_MBEDTLS_KEY_ALG_ID == TF_MBEDTLS_ECDSA)
+#define MBEDTLS_ECDSA_C
+#define MBEDTLS_ECP_C
+#define MBEDTLS_ECP_DP_SECP256R1_ENABLED
+#elif (TF_MBEDTLS_KEY_ALG_ID == TF_MBEDTLS_RSA)
+#define MBEDTLS_RSA_C
+#define MBEDTLS_X509_RSASSA_PSS_SUPPORT
+#elif (TF_MBEDTLS_KEY_ALG_ID == TF_MBEDTLS_RSA_AND_ECDSA)
+#define MBEDTLS_RSA_C
+#define MBEDTLS_X509_RSASSA_PSS_SUPPORT
+#define MBEDTLS_ECDSA_C
+#define MBEDTLS_ECP_C
+#define MBEDTLS_ECP_DP_SECP256R1_ENABLED
+#endif
+
+#define MBEDTLS_SHA256_C
+
+#define MBEDTLS_VERSION_C
+
+#define MBEDTLS_X509_USE_C
+#define MBEDTLS_X509_CRT_PARSE_C
+
+/* MPI / BIGNUM options */
+#define MBEDTLS_MPI_WINDOW_SIZE              2
+#define MBEDTLS_MPI_MAX_SIZE               256
+
+/* Memory buffer allocator options */
+#define MBEDTLS_MEMORY_ALIGN_MULTIPLE        8
+
+#ifndef __ASSEMBLY__
+/* System headers required to build mbed TLS with the current configuration */
+#include <stdlib.h>
+#include "mbedtls/check_config.h"
+#endif
+
+#endif /* __MBEDTLS_CONFIG_H__ */
diff --git a/include/drivers/cadence/cdns_uart.h b/include/drivers/cadence/cdns_uart.h
new file mode 100644
index 0000000..3aadde3
--- /dev/null
+++ b/include/drivers/cadence/cdns_uart.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CADENCE_UART_H__
+#define __CADENCE_UART_H__
+
+/* This is very minimalistic and will only work in QEMU.  */
+
+/* CADENCE Registers */
+#define R_UART_CR	0
+#define R_UART_CR_RXRST	(1 << 0) /* RX logic reset */
+#define R_UART_CR_TXRST	(1 << 1) /* TX logic reset */
+#define R_UART_CR_RX_EN	(1 << 2) /* RX enabled */
+#define R_UART_CR_TX_EN	(1 << 4) /* TX enabled */
+
+#define R_UART_SR		0x2C
+#define UART_SR_INTR_REMPTY_BIT	1
+#define UART_SR_INTR_TFUL_BIT	4
+
+#define R_UART_TX	0x30
+#define R_UART_RX	0x30
+
+#endif
diff --git a/include/drivers/console.h b/include/drivers/console.h
index f144ab9..da5cb8f 100644
--- a/include/drivers/console.h
+++ b/include/drivers/console.h
@@ -1,40 +1,20 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __CONSOLE_H__
 #define __CONSOLE_H__
 
-int console_init(unsigned long base_addr,
+#include <stdint.h>
+
+int console_init(uintptr_t base_addr,
 		unsigned int uart_clk, unsigned int baud_rate);
+void console_uninit(void);
 int console_putc(int c);
 int console_getc(void);
+int console_flush(void);
 
 #endif /* __CONSOLE_H__ */
 
diff --git a/include/drivers/delay_timer.h b/include/drivers/delay_timer.h
new file mode 100644
index 0000000..4e44a5e
--- /dev/null
+++ b/include/drivers/delay_timer.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __DELAY_TIMER_H__
+#define __DELAY_TIMER_H__
+
+#include <stdint.h>
+
+/********************************************************************
+ * A simple timer driver providing synchronous delay functionality.
+ * The driver must be initialized with a structure that provides a
+ * function pointer to return the timer value and a clock
+ * multiplier/divider. The ratio of the multiplier and the divider is
+ * the clock period in microseconds.
+ ********************************************************************/
+
+typedef struct timer_ops {
+	uint32_t (*get_timer_value)(void);
+	uint32_t clk_mult;
+	uint32_t clk_div;
+} timer_ops_t;
+
+void mdelay(uint32_t msec);
+void udelay(uint32_t usec);
+void timer_init(const timer_ops_t *ops);
+
+
+#endif /* __DELAY_TIMER_H__ */
diff --git a/include/drivers/dw_ufs.h b/include/drivers/dw_ufs.h
new file mode 100644
index 0000000..b05c7f5
--- /dev/null
+++ b/include/drivers/dw_ufs.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __DW_UFS_H__
+#define __DW_UFS_H__
+
+#include <sys/types.h>
+
+/* Bus Throtting */
+#define BUSTHRTL				0xC0
+/* Outstanding OCP Requests */
+#define OOCPR					0xC4
+/* Fatal Error Interrupt Enable */
+#define FEIE					0xC8
+/* C-Port Direct Access Configuration register */
+#define CDACFG					0xD0
+/* C-Port Direct Access Transmit 1 register */
+#define CDATX1					0xD4
+/* C-Port Direct Access Transmit 2 register */
+#define CDATX2					0xD8
+/* C-Port Direct Access Receive 1 register */
+#define CDARX1					0xDC
+/* C-Port Direct Access Receive 2 register */
+#define CDARX2					0xE0
+/* C-Port Direct Access Status register */
+#define CDASTA					0xE4
+/* UPIU Loopback Configuration register */
+#define LBMCFG					0xF0
+/* UPIU Loopback Status */
+#define LBMSTA					0xF4
+/* Debug register */
+#define DBG					0xF8
+/* HClk Divider register */
+#define HCLKDIV					0xFC
+
+#define TX_HIBERN8TIME_CAP_OFFSET		0x000F
+#define TX_FSM_STATE_OFFSET			0x0041
+#define TX_FSM_STATE_LINE_RESET			7
+#define TX_FSM_STATE_LINE_CFG			6
+#define TX_FSM_STATE_HS_BURST			5
+#define TX_FSM_STATE_LS_BURST			4
+#define TX_FSM_STATE_STALL			3
+#define TX_FSM_STATE_SLEEP			2
+#define TX_FSM_STATE_HIBERN8			1
+#define TX_FSM_STATE_DISABLE			0
+
+#define RX_MIN_ACTIVATETIME_CAP_OFFSET		0x008F
+#define RX_HS_G2_SYNC_LENGTH_CAP_OFFSET		0x0094
+#define RX_HS_G3_SYNC_LENGTH_CAP_OFFSET		0x0095
+
+#define PA_AVAIL_TX_DATA_LANES_OFFSET		0x1520
+#define PA_TX_SKIP_OFFSET			0x155C
+#define PA_TX_SKIP_PERIOD_OFFSET		0x155D
+#define PA_LOCAL_TX_LCC_ENABLE_OFFSET		0x155E
+#define PA_ACTIVE_TX_DATA_LANES_OFFSET		0x1560
+#define PA_CONNECTED_TX_DATA_LANES_OFFSET	0x1561
+#define PA_TX_TRAILING_CLOCKS_OFFSET		0x1564
+#define PA_TX_GEAR_OFFSET			0x1568
+#define PA_TX_TERMINATION_OFFSET		0x1569
+#define PA_HS_SERIES_OFFSET			0x156A
+#define PA_PWR_MODE_OFFSET			0x1571
+#define PA_ACTIVE_RX_DATA_LANES_OFFSET		0x1580
+#define PA_CONNECTED_RX_DATA_LANES_OFFSET	0x1581
+#define PA_RX_PWR_STATUS_OFFSET			0x1582
+#define PA_RX_GEAR_OFFSET			0x1583
+#define PA_RX_TERMINATION_OFFSET		0x1584
+#define PA_SCRAMBLING_OFFSET			0x1585
+#define PA_MAX_RX_PWM_GEAR_OFFSET		0x1586
+#define PA_MAX_RX_HS_GEAR_OFFSET		0x1587
+#define PA_PACP_REQ_TIMEOUT_OFFSET		0x1590
+#define PA_PACP_REQ_EOB_TIMEOUT_OFFSET		0x1591
+#define PA_REMOTE_VER_INFO_OFFSET		0x15A0
+#define PA_LOGICAL_LANE_MAP_OFFSET		0x15A1
+#define PA_TACTIVATE_OFFSET			0x15A8
+#define PA_PWR_MODE_USER_DATA0_OFFSET		0x15B0
+#define PA_PWR_MODE_USER_DATA1_OFFSET		0x15B1
+#define PA_PWR_MODE_USER_DATA2_OFFSET		0x15B2
+#define PA_PWR_MODE_USER_DATA3_OFFSET		0x15B3
+#define PA_PWR_MODE_USER_DATA4_OFFSET		0x15B4
+#define PA_PWR_MODE_USER_DATA5_OFFSET		0x15B5
+
+#define DL_TC0_TX_FC_THRESHOLD_OFFSET		0x2040
+#define DL_AFC0_CREDIT_THRESHOLD_OFFSET		0x2044
+#define DL_TC0_OUT_ACK_THRESHOLD_OFFSET		0x2045
+
+#define DME_FC0_PROTECTION_TIMEOUT_OFFSET	0xD041
+#define DME_TC0_REPLAY_TIMEOUT_OFFSET		0xD042
+#define DME_AFC0_REQ_TIMEOUT_OFFSET		0xD043
+#define DME_FC1_PROTECTION_TIMEOUT_OFFSET	0xD044
+#define DME_TC1_REPLAY_TIMEOUT_OFFSET		0xD045
+#define DME_AFC1_REQ_TIMEOUT_OFFSET		0xD046
+
+#define VS_MPHY_CFG_UPDT_OFFSET			0xD085
+#define VS_MK2_EXTN_SUPPORT_OFFSET		0xD0AB
+#define VS_MPHY_DISABLE_OFFSET			0xD0C1
+#define VS_MPHY_DISABLE_MPHYDIS			(1 << 0)
+
+typedef struct dw_ufs_params {
+	uintptr_t		reg_base;
+	uintptr_t		desc_base;
+	size_t			desc_size;
+	unsigned long		flags;
+} dw_ufs_params_t;
+
+int dw_ufs_init(dw_ufs_params_t *params);
+
+#endif /* __DW_UFS_H__ */
diff --git a/include/drivers/emmc.h b/include/drivers/emmc.h
new file mode 100644
index 0000000..921f4cf
--- /dev/null
+++ b/include/drivers/emmc.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __EMMC_H__
+#define __EMMC_H__
+
+#include <stdint.h>
+
+#define EMMC_BLOCK_SIZE			512
+#define EMMC_BLOCK_MASK			(EMMC_BLOCK_SIZE - 1)
+#define EMMC_BOOT_CLK_RATE		(400 * 1000)
+
+#define EMMC_CMD0			0
+#define EMMC_CMD1			1
+#define EMMC_CMD2			2
+#define EMMC_CMD3			3
+#define EMMC_CMD6			6
+#define EMMC_CMD7			7
+#define EMMC_CMD8			8
+#define EMMC_CMD9			9
+#define EMMC_CMD12			12
+#define EMMC_CMD13			13
+#define EMMC_CMD17			17
+#define EMMC_CMD18			18
+#define EMMC_CMD23			23
+#define EMMC_CMD24			24
+#define EMMC_CMD25			25
+#define EMMC_CMD35			35
+#define EMMC_CMD36			36
+#define EMMC_CMD38			38
+
+#define OCR_POWERUP			(1 << 31)
+#define OCR_BYTE_MODE			(0 << 29)
+#define OCR_SECTOR_MODE			(2 << 29)
+#define OCR_ACCESS_MODE_MASK		(3 << 29)
+#define OCR_VDD_MIN_2V7			(0x1ff << 15)
+#define OCR_VDD_MIN_2V0			(0x7f << 8)
+#define OCR_VDD_MIN_1V7			(1 << 7)
+
+#define EMMC_RESPONSE_R1		1
+#define EMMC_RESPONSE_R1B		1
+#define EMMC_RESPONSE_R2		4
+#define EMMC_RESPONSE_R3		1
+#define EMMC_RESPONSE_R4		1
+#define EMMC_RESPONSE_R5		1
+
+#define EMMC_FIX_RCA			6	/* > 1 */
+#define RCA_SHIFT_OFFSET		16
+
+#define CMD_EXTCSD_PARTITION_CONFIG	179
+#define CMD_EXTCSD_BUS_WIDTH		183
+#define CMD_EXTCSD_HS_TIMING		185
+
+#define PART_CFG_BOOT_PARTITION1_ENABLE	(1 << 3)
+#define PART_CFG_PARTITION1_ACCESS	(1 << 0)
+
+/* values in EXT CSD register */
+#define EMMC_BUS_WIDTH_1		0
+#define EMMC_BUS_WIDTH_4		1
+#define EMMC_BUS_WIDTH_8		2
+#define EMMC_BOOT_MODE_BACKWARD		(0 << 3)
+#define EMMC_BOOT_MODE_HS_TIMING	(1 << 3)
+#define EMMC_BOOT_MODE_DDR		(2 << 3)
+
+#define EXTCSD_SET_CMD			(0 << 24)
+#define EXTCSD_SET_BITS			(1 << 24)
+#define EXTCSD_CLR_BITS			(2 << 24)
+#define EXTCSD_WRITE_BYTES		(3 << 24)
+#define EXTCSD_CMD(x)			(((x) & 0xff) << 16)
+#define EXTCSD_VALUE(x)			(((x) & 0xff) << 8)
+
+#define STATUS_CURRENT_STATE(x)		(((x) & 0xf) << 9)
+#define STATUS_READY_FOR_DATA		(1 << 8)
+#define STATUS_SWITCH_ERROR		(1 << 7)
+#define EMMC_GET_STATE(x)		(((x) >> 9) & 0xf)
+#define EMMC_STATE_IDLE			0
+#define EMMC_STATE_READY		1
+#define EMMC_STATE_IDENT		2
+#define EMMC_STATE_STBY			3
+#define EMMC_STATE_TRAN			4
+#define EMMC_STATE_DATA			5
+#define EMMC_STATE_RCV			6
+#define EMMC_STATE_PRG			7
+#define EMMC_STATE_DIS			8
+#define EMMC_STATE_BTST			9
+#define EMMC_STATE_SLP			10
+
+#define EMMC_FLAG_CMD23			(1 << 0)
+
+typedef struct emmc_cmd {
+	unsigned int	cmd_idx;
+	unsigned int	cmd_arg;
+	unsigned int	resp_type;
+	unsigned int	resp_data[4];
+} emmc_cmd_t;
+
+typedef struct emmc_ops {
+	void (*init)(void);
+	int (*send_cmd)(emmc_cmd_t *cmd);
+	int (*set_ios)(int clk, int width);
+	int (*prepare)(int lba, uintptr_t buf, size_t size);
+	int (*read)(int lba, uintptr_t buf, size_t size);
+	int (*write)(int lba, const uintptr_t buf, size_t size);
+} emmc_ops_t;
+
+typedef struct emmc_csd {
+	unsigned int 	    not_used:		1;
+	unsigned int   	    crc:			7;
+	unsigned int   	    ecc:			2;
+	unsigned int   	    file_format:		2;
+	unsigned int   	    tmp_write_protect:	1;
+	unsigned int   	    perm_write_protect:	1;
+	unsigned int   	    copy:			1;
+	unsigned int   	    file_format_grp:	1;
+
+	unsigned int 		reserved_1:		5;
+	unsigned int 		write_bl_partial:	1;
+	unsigned int 		write_bl_len:		4;
+	unsigned int 		r2w_factor:		3;
+	unsigned int 		default_ecc:		2;
+	unsigned int 		wp_grp_enable:		1;
+
+	unsigned int		wp_grp_size:		5;
+	unsigned int		erase_grp_mult:		5;
+	unsigned int		erase_grp_size:		5;
+	unsigned int		c_size_mult:		3;
+	unsigned int		vdd_w_curr_max:		3;
+	unsigned int		vdd_w_curr_min:		3;
+	unsigned int		vdd_r_curr_max:		3;
+	unsigned int		vdd_r_curr_min:		3;
+	unsigned int		c_size_low:		2;
+
+	unsigned int		c_size_high:		10;
+	unsigned int		reserved_2:		2;
+	unsigned int		dsr_imp:		1;
+	unsigned int		read_blk_misalign:	1;
+	unsigned int		write_blk_misalign:	1;
+	unsigned int		read_bl_partial:	1;
+	unsigned int		read_bl_len:		4;
+	unsigned int		ccc:			12;
+
+	unsigned int		tran_speed:		8;
+	unsigned int		nsac:			8;
+	unsigned int		taac:			8;
+	unsigned int		reserved_3:		2;
+	unsigned int		spec_vers:		4;
+	unsigned int		csd_structure:		2;
+} emmc_csd_t;
+
+size_t emmc_read_blocks(int lba, uintptr_t buf, size_t size);
+size_t emmc_write_blocks(int lba, const uintptr_t buf, size_t size);
+size_t emmc_erase_blocks(int lba, size_t size);
+size_t emmc_rpmb_read_blocks(int lba, uintptr_t buf, size_t size);
+size_t emmc_rpmb_write_blocks(int lba, const uintptr_t buf, size_t size);
+size_t emmc_rpmb_erase_blocks(int lba, size_t size);
+void emmc_init(const emmc_ops_t *ops, int clk, int bus_width,
+	       unsigned int flags);
+
+#endif	/* __EMMC_H__ */
diff --git a/include/drivers/fastboot.h b/include/drivers/fastboot.h
deleted file mode 100644
index d5bf965..0000000
--- a/include/drivers/fastboot.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2015, Linaro Ltd. and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __FASTBOOT_H__
-#define __FASTBOOT_H__
-
-#include <sys/types.h>
-
-typedef struct sparse_header {
-	uint32_t	magic;
-	uint16_t	major_version;
-	uint16_t	minor_version;
-	uint16_t	file_hdr_sz;
-	uint16_t	chunk_hdr_sz;
-	uint32_t	blk_sz;
-	uint32_t	total_blks;
-	uint32_t	total_chunks;
-	uint32_t	image_checksum;
-} sparse_header_t;
-
-#define SPARSE_HEADER_MAGIC	0xed26ff3a
-
-#define CHUNK_TYPE_RAW		0xCAC1
-#define CHUNK_TYPE_FILL		0xCAC2
-#define CHUNK_TYPE_DONT_CARE	0xCAC3
-#define CHUNK_TYPE_CRC32	0xCAC4
-
-typedef struct chunk_header {
-	uint16_t	chunk_type;     /* 0xCAC1 -> raw; 0xCAC2 -> fill; 0xCAC3 -> don't care */
-	uint16_t	reserved1;
-	uint32_t	chunk_sz;       /* in blocks in output image */
-	uint32_t	total_sz;       /* in bytes of chunk input file including chunk header and data */
-} chunk_header_t;
-
-#endif /* __FASTBOOT_H__ */
diff --git a/include/drivers/generic_delay_timer.h b/include/drivers/generic_delay_timer.h
new file mode 100644
index 0000000..1c53a72
--- /dev/null
+++ b/include/drivers/generic_delay_timer.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __GENERIC_DELAY_TIMER_H__
+#define __GENERIC_DELAY_TIMER_H__
+
+#include <stdint.h>
+
+void generic_delay_timer_init_args(uint32_t mult, uint32_t div);
+
+void generic_delay_timer_init(void);
+
+#endif /* __GENERIC_DELAY_TIMER_H__ */
diff --git a/include/drivers/gpio.h b/include/drivers/gpio.h
new file mode 100644
index 0000000..5722051
--- /dev/null
+++ b/include/drivers/gpio.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __GPIO_H__
+#define __GPIO_H__
+
+#define GPIO_DIR_OUT		0
+#define GPIO_DIR_IN		1
+
+#define GPIO_LEVEL_LOW		0
+#define GPIO_LEVEL_HIGH		1
+
+#define GPIO_PULL_NONE		0
+#define GPIO_PULL_UP		1
+#define GPIO_PULL_DOWN		2
+
+typedef struct gpio_ops {
+	int (*get_direction)(int gpio);
+	void (*set_direction)(int gpio, int direction);
+	int (*get_value)(int gpio);
+	void (*set_value)(int gpio, int value);
+	void (*set_pull)(int gpio, int pull);
+	int (*get_pull)(int gpio);
+} gpio_ops_t;
+
+int gpio_get_direction(int gpio);
+void gpio_set_direction(int gpio, int direction);
+int gpio_get_value(int gpio);
+void gpio_set_value(int gpio, int value);
+void gpio_set_pull(int gpio, int pull);
+int gpio_get_pull(int gpio);
+void gpio_init(const gpio_ops_t *ops);
+
+#endif	/* __GPIO_H__ */
diff --git a/include/drivers/io/io_block.h b/include/drivers/io/io_block.h
index a0a8558..4f3ab39 100644
--- a/include/drivers/io/io_block.h
+++ b/include/drivers/io/io_block.h
@@ -1,44 +1,27 @@
 /*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __IO_BLOCK_H__
 #define __IO_BLOCK_H__
 
-struct io_dev_connector;
+#include <io_storage.h>
 
-struct block_ops {
-	int	(*init)(void);
-	int	(*read)(unsigned long, unsigned long, size_t, uint32_t);
-	int	(*write)(unsigned long, unsigned long, size_t, uint32_t);
-};
+/* block devices ops */
+typedef struct io_block_ops {
+	size_t	(*read)(int lba, uintptr_t buf, size_t size);
+	size_t	(*write)(int lba, const uintptr_t buf, size_t size);
+} io_block_ops_t;
+
+typedef struct io_block_dev_spec {
+	io_block_spec_t	buffer;
+	io_block_ops_t	ops;
+	size_t		block_size;
+} io_block_dev_spec_t;
+
+struct io_dev_connector;
 
 int register_io_dev_block(const struct io_dev_connector **dev_con);
 
diff --git a/include/drivers/io/io_driver.h b/include/drivers/io/io_driver.h
index adb38b0..8306407 100644
--- a/include/drivers/io/io_driver.h
+++ b/include/drivers/io/io_driver.h
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __IO_DRIVER_H__
diff --git a/include/drivers/io/io_dummy.h b/include/drivers/io/io_dummy.h
new file mode 100644
index 0000000..abe77ec
--- /dev/null
+++ b/include/drivers/io/io_dummy.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __IO_DUMMY_H__
+#define __IO_DUMMY_H__
+
+int register_io_dev_dummy(const struct io_dev_connector **dev_con);
+
+#endif /* __IO_DUMMY_H__ */
diff --git a/include/drivers/io/io_fip.h b/include/drivers/io/io_fip.h
index 90b2fd0..1131cc7 100644
--- a/include/drivers/io/io_fip.h
+++ b/include/drivers/io/io_fip.h
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2014 ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __IO_FIP_H__
diff --git a/include/drivers/io/io_memmap.h b/include/drivers/io/io_memmap.h
index 7ee60fe..70e4e55 100644
--- a/include/drivers/io/io_memmap.h
+++ b/include/drivers/io/io_memmap.h
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __IO_MEMMAP_H__
diff --git a/include/drivers/io/io_semihosting.h b/include/drivers/io/io_semihosting.h
index 8902a6f..a917f5a 100644
--- a/include/drivers/io/io_semihosting.h
+++ b/include/drivers/io/io_semihosting.h
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __IO_SH_H__
diff --git a/include/drivers/io/io_storage.h b/include/drivers/io/io_storage.h
index 1c2d26d..50907ff 100644
--- a/include/drivers/io/io_storage.h
+++ b/include/drivers/io/io_storage.h
@@ -1,38 +1,16 @@
 /*
  * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __IO_H__
 #define __IO_H__
 
+#include <errno.h>
 #include <stdint.h>
 #include <stdio.h> /* For ssize_t */
+#include <uuid.h>
 
 
 /* Device type which can be used to enable policy decisions about which device
@@ -41,6 +19,7 @@
 	IO_TYPE_INVALID,
 	IO_TYPE_SEMIHOSTING,
 	IO_TYPE_MEMMAP,
+	IO_TYPE_DUMMY,
 	IO_TYPE_FIRMWARE_IMAGE_PACKAGE,
 	IO_TYPE_BLOCK,
 	IO_TYPE_MAX
@@ -68,6 +47,11 @@
 	unsigned int mode;
 } io_file_spec_t;
 
+/* UUID specification - used to refer to data accessed using UUIDs (i.e. FIP
+ * images) */
+typedef struct io_uuid_spec {
+	const uuid_t uuid;
+} io_uuid_spec_t;
 
 /* Block specification - used to refer to data on a device supporting
  * block-like entities */
@@ -83,13 +67,6 @@
 #define IO_MODE_RW	(1 << 1)
 
 
-/* Return codes reported by 'io_*' APIs */
-#define IO_SUCCESS		(0)
-#define IO_FAIL			(-1)
-#define IO_NOT_SUPPORTED	(-2)
-#define IO_RESOURCES_EXHAUSTED	(-3)
-
-
 /* Open a connection to a device */
 int io_dev_open(const struct io_dev_connector *dev_con,
 		const uintptr_t dev_spec,
diff --git a/include/drivers/partition/gpt.h b/include/drivers/partition/gpt.h
new file mode 100644
index 0000000..7a22d9e
--- /dev/null
+++ b/include/drivers/partition/gpt.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __GPT_H__
+#define __GPT_H__
+
+#include <partition.h>
+
+#define PARTITION_TYPE_GPT		0xee
+#define GPT_HEADER_OFFSET		PARTITION_BLOCK_SIZE
+#define GPT_ENTRY_OFFSET		(GPT_HEADER_OFFSET +		\
+					 PARTITION_BLOCK_SIZE)
+#define GUID_LEN			16
+
+#define GPT_SIGNATURE			"EFI PART"
+
+typedef struct gpt_entry {
+	unsigned char		type_uuid[GUID_LEN];
+	unsigned char		unique_uuid[GUID_LEN];
+	unsigned long long	first_lba;
+	unsigned long long	last_lba;
+	unsigned long long	attr;
+	unsigned short		name[EFI_NAMELEN];
+} gpt_entry_t;
+
+typedef struct gpt_header {
+	unsigned char		signature[8];
+	unsigned int		revision;
+	unsigned int		size;
+	unsigned int		header_crc;
+	unsigned int		reserved;
+	unsigned long long	current_lba;
+	unsigned long long	backup_lba;
+	unsigned long long	first_lba;
+	unsigned long long	last_lba;
+	unsigned char		disk_uuid[16];
+	/* starting LBA of array of partition entries */
+	unsigned long long	part_lba;
+	/* number of partition entries in array */
+	unsigned int		list_num;
+	/* size of a single partition entry (usually 128) */
+	unsigned int		part_size;
+	unsigned int		part_crc;
+} gpt_header_t;
+
+int parse_gpt_entry(gpt_entry_t *gpt_entry, partition_entry_t *entry);
+
+#endif	/* __GPT_H__ */
diff --git a/include/drivers/partition/mbr.h b/include/drivers/partition/mbr.h
new file mode 100644
index 0000000..1cacb78
--- /dev/null
+++ b/include/drivers/partition/mbr.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MBR_H__
+#define __MBR_H__
+
+#define MBR_OFFSET			0
+
+#define MBR_PRIMARY_ENTRY_OFFSET	0x1be
+#define MBR_PRIMARY_ENTRY_SIZE		0x10
+#define MBR_PRIMARY_ENTRY_NUMBER	4
+#define MBR_CHS_ADDRESS_LEN		3
+
+#define MBR_SIGNATURE_FIRST		0x55
+#define MBR_SIGNATURE_SECOND		0xAA
+
+typedef struct mbr_entry {
+	unsigned char		status;
+	unsigned char		first_sector[MBR_CHS_ADDRESS_LEN];
+	unsigned char		type;
+	unsigned char		last_sector[MBR_CHS_ADDRESS_LEN];
+	unsigned int		first_lba;
+	unsigned int		sector_nums;
+} mbr_entry_t;
+
+#endif	/* __MBR_H__ */
diff --git a/include/drivers/partition/partition.h b/include/drivers/partition/partition.h
new file mode 100644
index 0000000..4951f8c
--- /dev/null
+++ b/include/drivers/partition/partition.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PARTITION_H__
+#define __PARTITION_H__
+
+#include <cassert.h>
+#include <types.h>
+
+#if !PLAT_PARTITION_MAX_ENTRIES
+# define PLAT_PARTITION_MAX_ENTRIES	128
+#endif	/* PLAT_PARTITION_MAX_ENTRIES */
+
+CASSERT(PLAT_PARTITION_MAX_ENTRIES <= 128, assert_plat_partition_max_entries);
+
+#define PARTITION_BLOCK_SIZE		512
+
+#define EFI_NAMELEN			36
+
+typedef struct partition_entry {
+	uint64_t		start;
+	uint64_t		length;
+	char			name[EFI_NAMELEN];
+} partition_entry_t;
+
+typedef struct partition_entry_list {
+	partition_entry_t	list[PLAT_PARTITION_MAX_ENTRIES];
+	int			entry_count;
+} partition_entry_list_t;
+
+int load_partition_table(unsigned int image_id);
+const partition_entry_t *get_partition_entry(const char *name);
+const partition_entry_list_t *get_partition_entry_list(void);
+void partition_init(unsigned int image_id);
+
+#endif	/* __PARTITION_H__ */
diff --git a/include/drivers/synopsys/dw_mmc.h b/include/drivers/synopsys/dw_mmc.h
new file mode 100644
index 0000000..4e6b348
--- /dev/null
+++ b/include/drivers/synopsys/dw_mmc.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __DW_MMC_H__
+#define __DW_MMC_H__
+
+typedef struct dw_mmc_params {
+	uintptr_t	reg_base;
+	uintptr_t	desc_base;
+	size_t		desc_size;
+	int		clk_rate;
+	int		bus_width;
+	unsigned int	flags;
+} dw_mmc_params_t;
+
+void dw_mmc_init(dw_mmc_params_t *params);
+
+#endif	/* __DW_MMC_H__ */
diff --git a/include/drivers/ti/uart/uart_16550.h b/include/drivers/ti/uart/uart_16550.h
new file mode 100644
index 0000000..f258d45
--- /dev/null
+++ b/include/drivers/ti/uart/uart_16550.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __UART_16550_H__
+#define __UART_16550_H__
+
+/* UART16550 Registers */
+#define UARTTX			0x0
+#define UARTRX			0x0
+#define UARTDLL			0x0
+#define UARTIER			0x4
+#define UARTDLLM		0x4
+#define UARTIIR			0x8
+#define UARTFCR			0x8
+#define UARTLCR			0xc
+#define UARTMCR			0x10
+#define UARTLSR			0x14
+#define UARTMSR			0x18
+#define UARTSPR			0x1c
+#define UARTCSR			0x20
+#define UARTRXFIFOCFG		0x24
+#define UARTMIE			0x28
+#define UARTVNDR		0x2c
+#define UARTASR			0x3c
+
+/* FIFO Control Register bits */
+#define UARTFCR_FIFOMD_16450	(0 << 6)
+#define UARTFCR_FIFOMD_16550	(1 << 6)
+#define UARTFCR_RXTRIG_1	(0 << 6)
+#define UARTFCR_RXTRIG_4	(1 << 6)
+#define UARTFCR_RXTRIG_8	(2 << 6)
+#define UARTFCR_RXTRIG_16	(3 << 6)
+#define UARTFCR_TXTRIG_1	(0 << 4)
+#define UARTFCR_TXTRIG_4	(1 << 4)
+#define UARTFCR_TXTRIG_8	(2 << 4)
+#define UARTFCR_TXTRIG_16	(3 << 4)
+#define UARTFCR_DMAEN		(1 << 3)	/* Enable DMA mode */
+#define UARTFCR_TXCLR		(1 << 2)	/* Clear contents of Tx FIFO */
+#define UARTFCR_RXCLR		(1 << 1)	/* Clear contents of Rx FIFO */
+#define UARTFCR_FIFOEN		(1 << 0)	/* Enable the Tx/Rx FIFO */
+
+/* Line Control Register bits */
+#define UARTLCR_DLAB		(1 << 7)	/* Divisor Latch Access */
+#define UARTLCR_SETB		(1 << 6)	/* Set BREAK Condition */
+#define UARTLCR_SETP		(1 << 5)	/* Set Parity to LCR[4] */
+#define UARTLCR_EVEN		(1 << 4)	/* Even Parity Format */
+#define UARTLCR_PAR		(1 << 3)	/* Parity */
+#define UARTLCR_STOP		(1 << 2)	/* Stop Bit */
+#define UARTLCR_WORDSZ_5	0		/* Word Length of 5 */
+#define UARTLCR_WORDSZ_6	1		/* Word Length of 6 */
+#define UARTLCR_WORDSZ_7	2		/* Word Length of 7 */
+#define UARTLCR_WORDSZ_8	3		/* Word Length of 8 */
+
+/* Line Status Register bits */
+#define UARTLSR_RXFIFOEMT	(1 << 9)	/* Rx Fifo Empty */
+#define UARTLSR_TXFIFOFULL	(1 << 8)	/* Tx Fifo Full */
+#define UARTLSR_RXFIFOERR	(1 << 7)	/* Rx Fifo Error */
+#define UARTLSR_TEMT		(1 << 6)	/* Tx Shift Register Empty */
+#define UARTLSR_THRE		(1 << 5)	/* Tx Holding Register Empty */
+#define UARTLSR_BRK		(1 << 4)	/* Break Condition Detected */
+#define UARTLSR_FERR		(1 << 3)	/* Framing Error */
+#define UARTLSR_PERR		(1 << 3)	/* Parity Error */
+#define UARTLSR_OVRF		(1 << 2)	/* Rx Overrun Error */
+#define UARTLSR_RDR_BIT		(0)		/* Rx Data Ready Bit */
+#define UARTLSR_RDR		(1 << UARTLSR_RDR_BIT)	/* Rx Data Ready */
+
+#endif	/* __UART_16550_H__ */
diff --git a/include/drivers/ufs.h b/include/drivers/ufs.h
new file mode 100644
index 0000000..3a4f1c7
--- /dev/null
+++ b/include/drivers/ufs.h
@@ -0,0 +1,529 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __UFS_H__
+#define __UFS_H__
+
+/* register map of UFSHCI */
+/* Controller Capabilities */
+#define CAP				0x00
+#define CAP_NUTRS_MASK			0x1F
+
+/* UFS Version */
+#define VER				0x08
+/* Host Controller Identification - Product ID */
+#define HCDDID				0x10
+/* Host Controller Identification Descriptor - Manufacturer ID */
+#define HCPMID				0x14
+/* Auto-Hibernate Idle Timer */
+#define AHIT				0x18
+/* Interrupt Status */
+#define IS				0x20
+/* Interrupt Enable */
+#define IE				0x24
+/* System Bus Fatal Error Status */
+#define UFS_INT_SBFES			(1 << 17)
+/* Host Controller Fatal Error Status */
+#define UFS_INT_HCFES			(1 << 16)
+/* UTP Error Status */
+#define UFS_INT_UTPES			(1 << 12)
+/* Device Fatal Error Status */
+#define UFS_INT_DFES			(1 << 11)
+/* UIC Command Completion Status */
+#define UFS_INT_UCCS			(1 << 10)
+/* UTP Task Management Request Completion Status */
+#define UFS_INT_UTMRCS			(1 << 9)
+/* UIC Link Startup Status */
+#define UFS_INT_ULSS			(1 << 8)
+/* UIC Link Lost Status */
+#define UFS_INT_ULLS			(1 << 7)
+/* UIC Hibernate Enter Status */
+#define UFS_INT_UHES			(1 << 6)
+/* UIC Hibernate Exit Status */
+#define UFS_INT_UHXS			(1 << 5)
+/* UIC Power Mode Status */
+#define UFS_INT_UPMS			(1 << 4)
+/* UIC Test Mode Status */
+#define UFS_INT_UTMS			(1 << 3)
+/* UIC Error */
+#define UFS_INT_UE			(1 << 2)
+/* UIC DME_ENDPOINTRESET Indication */
+#define UFS_INT_UDEPRI			(1 << 1)
+/* UTP Transfer Request Completion Status */
+#define UFS_INT_UTRCS			(1 << 0)
+
+/* Host Controller Status */
+#define HCS				0x30
+#define HCS_UPMCRS_MASK			(7 << 8)
+#define HCS_PWR_LOCAL			(1 << 8)
+#define HCS_UCRDY			(1 << 3)
+#define HCS_UTMRLRDY			(1 << 2)
+#define HCS_UTRLRDY			(1 << 1)
+#define HCS_DP				(1 << 0)
+
+/* Host Controller Enable */
+#define HCE				0x34
+#define HCE_ENABLE			1
+
+/* Host UIC Error Code PHY Adapter Layer */
+#define UECPA				0x38
+/* Host UIC Error Code Data Link Layer */
+#define UECDL				0x3C
+/* Host UIC Error Code Network Layer */
+#define UECN				0x40
+/* Host UIC Error Code Transport Layer */
+#define UECT				0x44
+/* Host UIC Error Code */
+#define UECDME				0x48
+/* UTP Transfer Request Interrupt Aggregation Control Register */
+#define UTRIACR				0x4C
+#define UTRIACR_IAEN			(1 << 31)
+#define UTRIACR_IAPWEN			(1 << 24)
+#define UTRIACR_IASB			(1 << 20)
+#define UTRIACR_CTR			(1 << 16)
+#define UTRIACR_IACTH(x)		(((x) & 0x1F) << 8)
+#define UTRIACR_IATOVAL(x)		((x) & 0xFF)
+
+/* UTP Transfer Request List Base Address */
+#define UTRLBA				0x50
+/* UTP Transfer Request List Base Address Upper 32-bits */
+#define UTRLBAU				0x54
+/* UTP Transfer Request List Door Bell Register */
+#define UTRLDBR				0x58
+/* UTP Transfer Request List Clear Register */
+#define UTRLCLR				0x5C
+/* UTP Transfer Request List Run Stop Register */
+#define UTRLRSR				0x60
+#define UTMRLBA				0x70
+#define UTMRLBAU			0x74
+#define UTMRLDBR			0x78
+#define UTMRLCLR			0x7C
+#define UTMRLRSR			0x80
+/* UIC Command */
+#define UICCMD				0x90
+/* UIC Command Argument 1 */
+#define UCMDARG1			0x94
+/* UIC Command Argument 2 */
+#define UCMDARG2			0x98
+/* UIC Command Argument 3 */
+#define UCMDARG3			0x9C
+
+#define UFS_BLOCK_SHIFT			12		/* 4KB */
+#define UFS_BLOCK_SIZE			(1 << UFS_BLOCK_SHIFT)
+#define UFS_BLOCK_MASK			(UFS_BLOCK_SIZE - 1)
+#define UFS_MAX_LUNS			8
+
+/* UTP Transfer Request Descriptor */
+/* Command Type */
+#define CT_UFS_STORAGE			1
+#define CT_SCSI				0
+
+/* Data Direction */
+#define DD_OUT				2		/* Device --> Host */
+#define DD_IN				1		/* Host --> Device */
+#define DD_NO_DATA_TRANSFER		0
+
+#define UTP_TRD_SIZE			32
+
+/* Transaction Type */
+#define TRANS_TYPE_HD			(1 << 7)	/* E2ECRC */
+#define TRANS_TYPE_DD			(1 << 6)
+#define TRANS_TYPE_CODE_MASK		0x3F
+#define QUERY_RESPONSE_UPIU		(0x36 << 0)
+#define READY_TO_TRANSACTION_UPIU	(0x31 << 0)
+#define DATA_IN_UPIU			(0x22 << 0)
+#define RESPONSE_UPIU			(0x21 << 0)
+#define NOP_IN_UPIU			(0x20 << 0)
+#define QUERY_REQUEST_UPIU		(0x16 << 0)
+#define DATA_OUT_UPIU			(0x02 << 0)
+#define CMD_UPIU			(0x01 << 0)
+#define NOP_OUT_UPIU			(0x00 << 0)
+
+#define OCS_SUCCESS			0x0
+#define OCS_INVALID_FUNC_ATTRIBUTE	0x1
+#define OCS_MISMATCH_REQUEST_SIZE	0x2
+#define OCS_MISMATCH_RESPONSE_SIZE	0x3
+#define OCS_PEER_COMMUNICATION_FAILURE	0x4
+#define OCS_ABORTED			0x5
+#define OCS_FATAL_ERROR			0x6
+#define OCS_MASK			0xF
+
+/* UIC Command */
+#define DME_GET				0x01
+#define DME_SET				0x02
+#define DME_PEER_GET			0x03
+#define DME_PEER_SET			0x04
+#define DME_POWERON			0x10
+#define DME_POWEROFF			0x11
+#define DME_ENABLE			0x12
+#define DME_RESET			0x14
+#define DME_ENDPOINTRESET		0x15
+#define DME_LINKSTARTUP			0x16
+#define DME_HIBERNATE_ENTER		0x17
+#define DME_HIBERNATE_EXIT		0x18
+#define DME_TEST_MODE			0x1A
+
+#define GEN_SELECTOR_IDX(x)		((x) & 0xFFFF)
+
+#define CONFIG_RESULT_CODE_MASK		0xFF
+
+#define CDBCMD_TEST_UNIT_READY		0x00
+#define CDBCMD_READ_6			0x08
+#define CDBCMD_WRITE_6			0x0A
+#define CDBCMD_START_STOP_UNIT		0x1B
+#define CDBCMD_READ_CAPACITY_10		0x25
+#define CDBCMD_READ_10			0x28
+#define CDBCMD_WRITE_10			0x2A
+#define CDBCMD_READ_16			0x88
+#define CDBCMD_WRITE_16			0x8A
+#define CDBCMD_READ_CAPACITY_16		0x9E
+#define CDBCMD_REPORT_LUNS		0xA0
+
+#define UPIU_FLAGS_R			(1 << 6)
+#define UPIU_FLAGS_W			(1 << 5)
+#define UPIU_FLAGS_ATTR_MASK		(3 << 0)
+#define UPIU_FLAGS_ATTR_S		(0 << 0)	/* Simple */
+#define UPIU_FLAGS_ATTR_O		(1 << 0)	/* Ordered */
+#define UPIU_FLAGS_ATTR_HQ		(2 << 0)	/* Head of Queue */
+#define UPIU_FLAGS_ATTR_ACA		(3 << 0)
+#define UPIU_FLAGS_O			(1 << 6)
+#define UPIU_FLAGS_U			(1 << 5)
+#define UPIU_FLAGS_D			(1 << 4)
+
+#define QUERY_FUNC_STD_READ		0x01
+#define QUERY_FUNC_STD_WRITE		0x81
+
+#define QUERY_NOP			0x00
+#define QUERY_READ_DESC			0x01
+#define QUERY_WRITE_DESC		0x02
+#define QUERY_READ_ATTR			0x03
+#define QUERY_WRITE_ATTR		0x04
+#define QUERY_READ_FLAG			0x05
+#define QUERY_SET_FLAG			0x06
+#define QUERY_CLEAR_FLAG		0x07
+#define QUERY_TOGGLE_FLAG		0x08
+
+#define RW_WITHOUT_CACHE		0x18
+
+#define DESC_TYPE_DEVICE		0x00
+#define DESC_TYPE_CONFIGURATION		0x01
+#define DESC_TYPE_UNIT			0x02
+#define DESC_TYPE_INTERCONNECT		0x04
+#define DESC_TYPE_STRING		0x05
+
+#define ATTR_CUR_PWR_MODE		0x02	/* bCurrentPowerMode */
+#define ATTR_ACTIVECC			0x03	/* bActiveICCLevel */
+
+#define DEVICE_DESCRIPTOR_LEN		0x40
+#define UNIT_DESCRIPTOR_LEN		0x23
+
+#define QUERY_RESP_SUCCESS		0x00
+#define QUERY_RESP_OPCODE		0xFE
+#define QUERY_RESP_GENERAL_FAIL		0xFF
+
+#define SENSE_KEY_NO_SENSE		0x00
+#define SENSE_KEY_RECOVERED_ERROR	0x01
+#define SENSE_KEY_NOT_READY		0x02
+#define SENSE_KEY_MEDIUM_ERROR		0x03
+#define SENSE_KEY_HARDWARE_ERROR	0x04
+#define SENSE_KEY_ILLEGAL_REQUEST	0x05
+#define SENSE_KEY_UNIT_ATTENTION	0x06
+#define SENSE_KEY_DATA_PROTECT		0x07
+#define SENSE_KEY_BLANK_CHECK		0x08
+#define SENSE_KEY_VENDOR_SPECIFIC	0x09
+#define SENSE_KEY_COPY_ABORTED		0x0A
+#define SENSE_KEY_ABORTED_COMMAND	0x0B
+#define SENSE_KEY_VOLUME_OVERFLOW	0x0D
+#define SENSE_KEY_MISCOMPARE		0x0E
+
+#define SENSE_DATA_VALID		0x70
+#define SENSE_DATA_LENGTH		18
+
+#define READ_CAPACITY_LENGTH		8
+
+#define FLAG_DEVICE_INIT		0x01
+
+/* UFS Driver Flags */
+#define UFS_FLAGS_SKIPINIT		(1 << 0)
+
+typedef struct sense_data {
+	uint8_t		resp_code : 7;
+	uint8_t		valid : 1;
+	uint8_t		reserved0;
+	uint8_t		sense_key : 4;
+	uint8_t		reserved1 : 1;
+	uint8_t		ili : 1;
+	uint8_t		eom : 1;
+	uint8_t		file_mark : 1;
+	uint8_t		info[4];
+	uint8_t		asl;
+	uint8_t		cmd_spec_len[4];
+	uint8_t		asc;
+	uint8_t		ascq;
+	uint8_t		fruc;
+	uint8_t		sense_key_spec0 : 7;
+	uint8_t		sksv : 1;
+	uint8_t		sense_key_spec1;
+	uint8_t		sense_key_spec2;
+} sense_data_t;
+
+/* UTP Transfer Request Descriptor */
+typedef struct utrd_header {
+	uint32_t	reserved0 : 24;
+	uint32_t	i : 1;		/* interrupt */
+	uint32_t	dd : 2;		/* data direction */
+	uint32_t	reserved1 : 1;
+	uint32_t	ct : 4;		/* command type */
+	uint32_t	reserved2;
+	uint32_t	ocs : 8;	/* Overall Command Status */
+	uint32_t	reserved3 : 24;
+	uint32_t	reserved4;
+	uint32_t	ucdba;		/* aligned to 128-byte */
+	uint32_t	ucdbau;		/* Upper 32-bits */
+	uint32_t	rul : 16;	/* Response UPIU Length */
+	uint32_t	ruo : 16;	/* Response UPIU Offset */
+	uint32_t	prdtl : 16;	/* PRDT Length */
+	uint32_t	prdto : 16;	/* PRDT Offset */
+} utrd_header_t;	/* 8 words with little endian */
+
+/* UTP Task Management Request Descriptor */
+typedef struct utp_utmrd {
+	/* 4 words with little endian */
+	uint32_t	reserved0 : 24;
+	uint32_t	i : 1;		/* interrupt */
+	uint32_t	reserved1 : 7;
+	uint32_t	reserved2;
+	uint32_t	ocs : 8;	/* Overall Command Status */
+	uint32_t	reserved3 : 24;
+	uint32_t	reserved4;
+
+	/* followed by 8 words UPIU with big endian */
+
+	/* followed by 8 words Response UPIU with big endian */
+} utp_utmrd_t;
+
+/* NOP OUT UPIU */
+typedef struct nop_out_upiu {
+	uint8_t		trans_type;
+	uint8_t		flags;
+	uint8_t		reserved0;
+	uint8_t		task_tag;
+	uint8_t		reserved1;
+	uint8_t		reserved2;
+	uint8_t		reserved3;
+	uint8_t		reserved4;
+	uint8_t		total_ehs_len;
+	uint8_t		reserved5;
+	uint16_t	data_segment_len;
+	uint32_t	reserved6;
+	uint32_t	reserved7;
+	uint32_t	reserved8;
+	uint32_t	reserved9;
+	uint32_t	reserved10;
+	uint32_t	e2ecrc;
+} nop_out_upiu_t;	/* 36 bytes with big endian */
+
+/* NOP IN UPIU */
+typedef struct nop_in_upiu {
+	uint8_t		trans_type;
+	uint8_t		flags;
+	uint8_t		reserved0;
+	uint8_t		task_tag;
+	uint8_t		reserved1;
+	uint8_t		reserved2;
+	uint8_t		response;
+	uint8_t		reserved3;
+	uint8_t		total_ehs_len;
+	uint8_t		dev_info;
+	uint16_t	data_segment_len;
+	uint32_t	reserved4;
+	uint32_t	reserved5;
+	uint32_t	reserved6;
+	uint32_t	reserved7;
+	uint32_t	reserved8;
+	uint32_t	e2ecrc;
+} nop_in_upiu_t;	/* 36 bytes with big endian */
+
+/* Command UPIU */
+typedef struct cmd_upiu {
+	uint8_t		trans_type;
+	uint8_t		flags;
+	uint8_t		lun;
+	uint8_t		task_tag;
+	uint8_t		cmd_set_type;
+	uint8_t		reserved0;
+	uint8_t		reserved1;
+	uint8_t		reserved2;
+	uint8_t		total_ehs_len;
+	uint8_t		reserved3;
+	uint16_t	data_segment_len;
+	uint32_t	exp_data_trans_len;
+	/*
+	 * A CDB has a fixed length of 16bytes or a variable length
+	 * of between 12 and 260 bytes
+	 */
+	uint8_t		cdb[16];	/* little endian */
+} cmd_upiu_t;	/* 32 bytes with big endian except for cdb[] */
+
+typedef struct query_desc {
+	uint8_t		opcode;
+	uint8_t		idn;
+	uint8_t		index;
+	uint8_t		selector;
+	uint8_t		reserved0[2];
+	uint16_t	length;
+	uint32_t	reserved2[2];
+} query_desc_t;		/* 16 bytes with big endian */
+
+typedef struct query_flag {
+	uint8_t		opcode;
+	uint8_t		idn;
+	uint8_t		index;
+	uint8_t		selector;
+	uint8_t		reserved0[7];
+	uint8_t		value;
+	uint32_t	reserved8;
+} query_flag_t;		/* 16 bytes with big endian */
+
+typedef struct query_attr {
+	uint8_t		opcode;
+	uint8_t		idn;
+	uint8_t		index;
+	uint8_t		selector;
+	uint8_t		reserved0[4];
+	uint32_t	value;	/* little endian */
+	uint32_t	reserved4;
+} query_attr_t;		/* 16 bytes with big endian except for value */
+
+/* Query Request UPIU */
+typedef struct query_upiu {
+	uint8_t		trans_type;
+	uint8_t		flags;
+	uint8_t		reserved0;
+	uint8_t		task_tag;
+	uint8_t		reserved1;
+	uint8_t		query_func;
+	uint8_t		reserved2;
+	uint8_t		reserved3;
+	uint8_t		total_ehs_len;
+	uint8_t		reserved4;
+	uint16_t	data_segment_len;
+	/* Transaction Specific Fields */
+	union {
+		query_desc_t	desc;
+		query_flag_t	flag;
+		query_attr_t	attr;
+	} ts;
+	uint32_t	reserved5;
+} query_upiu_t; /* 32 bytes with big endian */
+
+/* Query Response UPIU */
+typedef struct query_resp_upiu {
+	uint8_t		trans_type;
+	uint8_t		flags;
+	uint8_t		reserved0;
+	uint8_t		task_tag;
+	uint8_t		reserved1;
+	uint8_t		query_func;
+	uint8_t		query_resp;
+	uint8_t		reserved2;
+	uint8_t		total_ehs_len;
+	uint8_t		dev_info;
+	uint16_t	data_segment_len;
+	union {
+		query_desc_t	desc;
+		query_flag_t	flag;
+		query_attr_t	attr;
+	} ts;
+	uint32_t	reserved3;
+} query_resp_upiu_t;	/* 32 bytes with big endian */
+
+/* Response UPIU */
+typedef struct resp_upiu {
+	uint8_t		trans_type;
+	uint8_t		flags;
+	uint8_t		lun;
+	uint8_t		task_tag;
+	uint8_t		cmd_set_type;
+	uint8_t		reserved0;
+	uint8_t		reserved1;
+	uint8_t		status;
+	uint8_t		total_ehs_len;
+	uint8_t		dev_info;
+	uint16_t	data_segment_len;
+	uint32_t	res_trans_cnt;	/* Residual Transfer Count */
+	uint32_t	reserved2[4];
+	uint16_t	sense_data_len;
+	union {
+		uint8_t		sense_data[18];
+		sense_data_t	sense;
+	} sd;
+} resp_upiu_t;		/* 52 bytes with big endian */
+
+typedef struct cmd_info {
+	uintptr_t	buf;
+	size_t		length;
+	int		lba;
+	uint8_t		op;
+	uint8_t		direction;
+	uint8_t		lun;
+} cmd_info_t;
+
+typedef struct utp_utrd {
+	uintptr_t	header;		/* utrd_header_t */
+	uintptr_t	upiu;
+	uintptr_t	resp_upiu;
+	uintptr_t	prdt;
+	size_t		size_upiu;
+	size_t		size_resp_upiu;
+	size_t		size_prdt;
+	int		task_tag;
+} utp_utrd_t;
+
+/* Physical Region Description Table */
+typedef struct prdt {
+	uint32_t	dba;		/* Data Base Address */
+	uint32_t	dbau;		/* Data Base Address Upper 32-bits */
+	uint32_t	reserved0;
+	uint32_t	dbc : 18;	/* Data Byte Count */
+	uint32_t	reserved1 : 14;
+} prdt_t;
+
+typedef struct uic_cmd {
+	uint32_t	op;
+	uint32_t	arg1;
+	uint32_t	arg2;
+	uint32_t	arg3;
+} uic_cmd_t;
+
+typedef struct ufs_params {
+	uintptr_t	reg_base;
+	uintptr_t	desc_base;
+	size_t		desc_size;
+	unsigned long	flags;
+} ufs_params_t;
+
+typedef struct ufs_ops {
+	int		(*phy_init)(ufs_params_t *params);
+	int		(*phy_set_pwr_mode)(ufs_params_t *params);
+} ufs_ops_t;
+
+int ufshc_send_uic_cmd(uintptr_t base, uic_cmd_t *cmd);
+int ufshc_dme_get(unsigned int attr, unsigned int idx, unsigned int *val);
+int ufshc_dme_set(unsigned int attr, unsigned int idx, unsigned int val);
+
+unsigned int ufs_read_attr(int idn);
+void ufs_write_attr(int idn, unsigned int value);
+unsigned int ufs_read_flag(int idn);
+void ufs_set_flag(int idn);
+void ufs_clear_flag(int idn);
+void ufs_read_desc(int idn, int index, uintptr_t buf, size_t size);
+void ufs_write_desc(int idn, int index, uintptr_t buf, size_t size);
+size_t ufs_read_blocks(int lun, int lba, uintptr_t buf, size_t size);
+size_t ufs_write_blocks(int lun, int lba, const uintptr_t buf, size_t size);
+int ufs_init(const ufs_ops_t *ops, ufs_params_t *params);
+
+#endif /* __UFS_H__ */
diff --git a/include/lib/aarch32/arch.h b/include/lib/aarch32/arch.h
new file mode 100644
index 0000000..3846bec
--- /dev/null
+++ b/include/lib/aarch32/arch.h
@@ -0,0 +1,516 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ARCH_H__
+#define __ARCH_H__
+
+#include <utils_def.h>
+
+/*******************************************************************************
+ * MIDR bit definitions
+ ******************************************************************************/
+#define MIDR_IMPL_MASK		0xff
+#define MIDR_IMPL_SHIFT		24
+#define MIDR_VAR_SHIFT		20
+#define MIDR_VAR_BITS		4
+#define MIDR_REV_SHIFT		0
+#define MIDR_REV_BITS		4
+#define MIDR_PN_MASK		0xfff
+#define MIDR_PN_SHIFT		4
+
+/*******************************************************************************
+ * MPIDR macros
+ ******************************************************************************/
+#define MPIDR_MT_MASK		(1 << 24)
+#define MPIDR_CPU_MASK		MPIDR_AFFLVL_MASK
+#define MPIDR_CLUSTER_MASK	(MPIDR_AFFLVL_MASK << MPIDR_AFFINITY_BITS)
+#define MPIDR_AFFINITY_BITS	8
+#define MPIDR_AFFLVL_MASK	0xff
+#define MPIDR_AFFLVL_SHIFT	3
+#define MPIDR_AFF0_SHIFT	0
+#define MPIDR_AFF1_SHIFT	8
+#define MPIDR_AFF2_SHIFT	16
+#define MPIDR_AFFINITY_MASK	0x00ffffff
+#define MPIDR_AFFLVL0		0
+#define MPIDR_AFFLVL1		1
+#define MPIDR_AFFLVL2		2
+
+#define MPIDR_AFFLVL0_VAL(mpidr) \
+		(((mpidr) >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL1_VAL(mpidr) \
+		(((mpidr) >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL2_VAL(mpidr) \
+		(((mpidr) >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL3_VAL(mpidr)	0
+
+/*
+ * The MPIDR_MAX_AFFLVL count starts from 0. Take care to
+ * add one while using this macro to define array sizes.
+ */
+#define MPIDR_MAX_AFFLVL	2
+
+/* Data Cache set/way op type defines */
+#define DC_OP_ISW			0x0
+#define DC_OP_CISW			0x1
+#define DC_OP_CSW			0x2
+
+/*******************************************************************************
+ * Generic timer memory mapped registers & offsets
+ ******************************************************************************/
+#define CNTCR_OFF			0x000
+#define CNTFID_OFF			0x020
+
+#define CNTCR_EN			(1 << 0)
+#define CNTCR_HDBG			(1 << 1)
+#define CNTCR_FCREQ(x)			((x) << 8)
+
+/*******************************************************************************
+ * System register bit definitions
+ ******************************************************************************/
+/* CLIDR definitions */
+#define LOUIS_SHIFT		21
+#define LOC_SHIFT		24
+#define CLIDR_FIELD_WIDTH	3
+
+/* CSSELR definitions */
+#define LEVEL_SHIFT		1
+
+/* ID_PFR1 definitions */
+#define ID_PFR1_VIRTEXT_SHIFT	12
+#define ID_PFR1_VIRTEXT_MASK	0xf
+#define GET_VIRT_EXT(id)	(((id) >> ID_PFR1_VIRTEXT_SHIFT) \
+				 & ID_PFR1_VIRTEXT_MASK)
+#define ID_PFR1_GIC_SHIFT	28
+#define ID_PFR1_GIC_MASK	0xf
+
+/* SCTLR definitions */
+#define SCTLR_RES1	((1 << 23) | (1 << 22) | (1 << 11) | (1 << 4) | \
+			(1 << 3))
+#define SCTLR_M_BIT		(1 << 0)
+#define SCTLR_A_BIT		(1 << 1)
+#define SCTLR_C_BIT		(1 << 2)
+#define SCTLR_CP15BEN_BIT	(1 << 5)
+#define SCTLR_ITD_BIT		(1 << 7)
+#define SCTLR_I_BIT		(1 << 12)
+#define SCTLR_V_BIT		(1 << 13)
+#define SCTLR_NTWI_BIT		(1 << 16)
+#define SCTLR_NTWE_BIT		(1 << 18)
+#define SCTLR_WXN_BIT		(1 << 19)
+#define SCTLR_UWXN_BIT		(1 << 20)
+#define SCTLR_EE_BIT		(1 << 25)
+#define SCTLR_TRE_BIT		(1 << 28)
+#define SCTLR_AFE_BIT		(1 << 29)
+#define SCTLR_TE_BIT		(1 << 30)
+#define SCTLR_RESET_VAL         (SCTLR_RES1 | SCTLR_NTWE_BIT |		\
+				SCTLR_NTWI_BIT | SCTLR_CP15BEN_BIT)
+
+/* SDCR definitions */
+#define SDCR_SPD(x)		((x) << 14)
+#define SDCR_SPD_LEGACY		0x0
+#define SDCR_SPD_DISABLE	0x2
+#define SDCR_SPD_ENABLE		0x3
+#define SDCR_RESET_VAL		0x0
+
+#if !ERROR_DEPRECATED
+#define SDCR_DEF_VAL		SDCR_SPD(SDCR_SPD_DISABLE)
+#endif
+
+/* HSCTLR definitions */
+#define HSCTLR_RES1 	((1 << 29) | (1 << 28) | (1 << 23) | (1 << 22)	\
+			| (1 << 18) | (1 << 16) | (1 << 11) | (1 << 4)	\
+			| (1 << 3))
+#define HSCTLR_M_BIT		(1 << 0)
+#define HSCTLR_A_BIT		(1 << 1)
+#define HSCTLR_C_BIT		(1 << 2)
+#define HSCTLR_CP15BEN_BIT	(1 << 5)
+#define HSCTLR_ITD_BIT		(1 << 7)
+#define HSCTLR_SED_BIT		(1 << 8)
+#define HSCTLR_I_BIT		(1 << 12)
+#define HSCTLR_WXN_BIT		(1 << 19)
+#define HSCTLR_EE_BIT		(1 << 25)
+#define HSCTLR_TE_BIT		(1 << 30)
+
+/* CPACR definitions */
+#define CPACR_FPEN(x)	((x) << 20)
+#define CPACR_FP_TRAP_PL0	0x1
+#define CPACR_FP_TRAP_ALL	0x2
+#define CPACR_FP_TRAP_NONE	0x3
+
+/* SCR definitions */
+#define SCR_TWE_BIT		(1 << 13)
+#define SCR_TWI_BIT		(1 << 12)
+#define SCR_SIF_BIT		(1 << 9)
+#define SCR_HCE_BIT		(1 << 8)
+#define SCR_SCD_BIT		(1 << 7)
+#define SCR_NET_BIT		(1 << 6)
+#define SCR_AW_BIT		(1 << 5)
+#define SCR_FW_BIT		(1 << 4)
+#define SCR_EA_BIT		(1 << 3)
+#define SCR_FIQ_BIT		(1 << 2)
+#define SCR_IRQ_BIT		(1 << 1)
+#define SCR_NS_BIT		(1 << 0)
+#define SCR_VALID_BIT_MASK	0x33ff
+#define SCR_RESET_VAL		0x0
+
+#define GET_NS_BIT(scr)		((scr) & SCR_NS_BIT)
+
+/* HCR definitions */
+#define HCR_AMO_BIT		(1 << 5)
+#define HCR_IMO_BIT		(1 << 4)
+#define HCR_FMO_BIT		(1 << 3)
+#define HCR_RESET_VAL		0x0
+
+/* CNTHCTL definitions */
+#define CNTHCTL_RESET_VAL	0x0
+#define PL1PCEN_BIT		(1 << 1)
+#define PL1PCTEN_BIT		(1 << 0)
+
+/* CNTKCTL definitions */
+#define PL0PTEN_BIT		(1 << 9)
+#define PL0VTEN_BIT		(1 << 8)
+#define PL0PCTEN_BIT		(1 << 0)
+#define PL0VCTEN_BIT		(1 << 1)
+#define EVNTEN_BIT		(1 << 2)
+#define EVNTDIR_BIT		(1 << 3)
+#define EVNTI_SHIFT		4
+#define EVNTI_MASK		0xf
+
+/* HCPTR definitions */
+#define HCPTR_RES1		((1 << 13) | (1<<12) | 0x3ff)
+#define TCPAC_BIT		(1 << 31)
+#define TTA_BIT			(1 << 20)
+#define TCP11_BIT		(1 << 10)
+#define TCP10_BIT		(1 << 10)
+#define HCPTR_RESET_VAL		HCPTR_RES1
+
+/* VTTBR defintions */
+#define VTTBR_RESET_VAL		ULL(0x0)
+#define VTTBR_VMID_MASK		ULL(0xff)
+#define VTTBR_VMID_SHIFT	48
+#define VTTBR_BADDR_MASK	0xffffffffffff
+#define VTTBR_BADDR_SHIFT	0
+
+/* HDCR definitions */
+#define HDCR_RESET_VAL		0x0
+
+/* HSTR definitions */
+#define HSTR_RESET_VAL		0x0
+
+/* CNTHP_CTL definitions */
+#define CNTHP_CTL_RESET_VAL	0x0
+
+/* NASCR definitions */
+#define NSASEDIS_BIT		(1 << 15)
+#define NSTRCDIS_BIT		(1 << 20)
+/* NOTE: correct typo in the definitions */
+#if !ERROR_DEPRECATED
+#define NASCR_CP11_BIT		(1 << 11)
+#define NASCR_CP10_BIT		(1 << 10)
+#endif
+#define NSACR_CP11_BIT		(1 << 11)
+#define NSACR_CP10_BIT		(1 << 10)
+#define NSACR_IMP_DEF_MASK	(0x7 << 16)
+#define NSACR_ENABLE_FP_ACCESS	(NSACR_CP11_BIT | NSACR_CP10_BIT)
+#define NSACR_RESET_VAL		0x0
+
+/* CPACR definitions */
+#define ASEDIS_BIT		(1 << 31)
+#define TRCDIS_BIT		(1 << 28)
+#define CPACR_CP11_SHIFT	22
+#define CPACR_CP10_SHIFT	20
+#define CPACR_ENABLE_FP_ACCESS	(0x3 << CPACR_CP11_SHIFT |\
+					0x3 << CPACR_CP10_SHIFT)
+#define CPACR_RESET_VAL         0x0
+
+/* FPEXC definitions */
+#define FPEXC_RES1		((1 << 10) | (1 << 9) | (1 << 8))
+#define FPEXC_EN_BIT		(1 << 30)
+#define FPEXC_RESET_VAL		FPEXC_RES1
+
+/* SPSR/CPSR definitions */
+#define SPSR_FIQ_BIT		(1 << 0)
+#define SPSR_IRQ_BIT		(1 << 1)
+#define SPSR_ABT_BIT		(1 << 2)
+#define SPSR_AIF_SHIFT		6
+#define SPSR_AIF_MASK		0x7
+
+#define SPSR_E_SHIFT		9
+#define SPSR_E_MASK		0x1
+#define SPSR_E_LITTLE		0
+#define SPSR_E_BIG		1
+
+#define SPSR_T_SHIFT		5
+#define SPSR_T_MASK		0x1
+#define SPSR_T_ARM		0
+#define SPSR_T_THUMB		1
+
+#define SPSR_MODE_SHIFT		0
+#define SPSR_MODE_MASK		0x7
+
+
+#define DISABLE_ALL_EXCEPTIONS \
+		(SPSR_FIQ_BIT | SPSR_IRQ_BIT | SPSR_ABT_BIT)
+
+/*
+ * TTBCR definitions
+ */
+/* The ARM Trusted Firmware uses the long descriptor format */
+#define TTBCR_EAE_BIT		(1 << 31)
+
+#define TTBCR_SH1_NON_SHAREABLE		(0x0 << 28)
+#define TTBCR_SH1_OUTER_SHAREABLE	(0x2 << 28)
+#define TTBCR_SH1_INNER_SHAREABLE	(0x3 << 28)
+
+#define TTBCR_RGN1_OUTER_NC	(0x0 << 26)
+#define TTBCR_RGN1_OUTER_WBA	(0x1 << 26)
+#define TTBCR_RGN1_OUTER_WT	(0x2 << 26)
+#define TTBCR_RGN1_OUTER_WBNA	(0x3 << 26)
+
+#define TTBCR_RGN1_INNER_NC	(0x0 << 24)
+#define TTBCR_RGN1_INNER_WBA	(0x1 << 24)
+#define TTBCR_RGN1_INNER_WT	(0x2 << 24)
+#define TTBCR_RGN1_INNER_WBNA	(0x3 << 24)
+
+#define TTBCR_EPD1_BIT		(1 << 23)
+#define TTBCR_A1_BIT		(1 << 22)
+
+#define TTBCR_T1SZ_SHIFT	16
+#define TTBCR_T1SZ_MASK		(0x7)
+#define TTBCR_TxSZ_MIN		0
+#define TTBCR_TxSZ_MAX		7
+
+#define TTBCR_SH0_NON_SHAREABLE		(0x0 << 12)
+#define TTBCR_SH0_OUTER_SHAREABLE	(0x2 << 12)
+#define TTBCR_SH0_INNER_SHAREABLE	(0x3 << 12)
+
+#define TTBCR_RGN0_OUTER_NC	(0x0 << 10)
+#define TTBCR_RGN0_OUTER_WBA	(0x1 << 10)
+#define TTBCR_RGN0_OUTER_WT	(0x2 << 10)
+#define TTBCR_RGN0_OUTER_WBNA	(0x3 << 10)
+
+#define TTBCR_RGN0_INNER_NC	(0x0 << 8)
+#define TTBCR_RGN0_INNER_WBA	(0x1 << 8)
+#define TTBCR_RGN0_INNER_WT	(0x2 << 8)
+#define TTBCR_RGN0_INNER_WBNA	(0x3 << 8)
+
+#define TTBCR_EPD0_BIT		(1 << 7)
+#define TTBCR_T0SZ_SHIFT	0
+#define TTBCR_T0SZ_MASK		(0x7)
+
+#define MODE_RW_SHIFT		0x4
+#define MODE_RW_MASK		0x1
+#define MODE_RW_32		0x1
+
+#define MODE32_SHIFT		0
+#define MODE32_MASK		0x1f
+#define MODE32_usr		0x10
+#define MODE32_fiq		0x11
+#define MODE32_irq		0x12
+#define MODE32_svc		0x13
+#define MODE32_mon		0x16
+#define MODE32_abt		0x17
+#define MODE32_hyp		0x1a
+#define MODE32_und		0x1b
+#define MODE32_sys		0x1f
+
+#define GET_M32(mode)		(((mode) >> MODE32_SHIFT) & MODE32_MASK)
+
+#define SPSR_MODE32(mode, isa, endian, aif)		\
+	(MODE_RW_32 << MODE_RW_SHIFT |			\
+	((mode) & MODE32_MASK) << MODE32_SHIFT |	\
+	((isa) & SPSR_T_MASK) << SPSR_T_SHIFT |		\
+	((endian) & SPSR_E_MASK) << SPSR_E_SHIFT |	\
+	((aif) & SPSR_AIF_MASK) << SPSR_AIF_SHIFT)
+
+/*
+ * TTBR definitions
+ */
+#define TTBR_CNP_BIT		0x1
+
+/*
+ * CTR definitions
+ */
+#define CTR_CWG_SHIFT		24
+#define CTR_CWG_MASK		0xf
+#define CTR_ERG_SHIFT		20
+#define CTR_ERG_MASK		0xf
+#define CTR_DMINLINE_SHIFT	16
+#define CTR_DMINLINE_WIDTH	4
+#define CTR_DMINLINE_MASK	((1 << 4) - 1)
+#define CTR_L1IP_SHIFT		14
+#define CTR_L1IP_MASK		0x3
+#define CTR_IMINLINE_SHIFT	0
+#define CTR_IMINLINE_MASK	0xf
+
+#define MAX_CACHE_LINE_SIZE	0x800 /* 2KB */
+
+/* PMCR definitions */
+#define PMCR_N_SHIFT		11
+#define PMCR_N_MASK		0x1f
+#define PMCR_N_BITS		(PMCR_N_MASK << PMCR_N_SHIFT)
+#define PMCR_LC_BIT		(1 << 6)
+#define PMCR_DP_BIT		(1 << 5)
+
+/*******************************************************************************
+ * Definitions of register offsets, fields and macros for CPU system
+ * instructions.
+ ******************************************************************************/
+
+#define TLBI_ADDR_SHIFT		0
+#define TLBI_ADDR_MASK		0xFFFFF000
+#define TLBI_ADDR(x)		(((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
+
+/*******************************************************************************
+ * Definitions of register offsets and fields in the CNTCTLBase Frame of the
+ * system level implementation of the Generic Timer.
+ ******************************************************************************/
+#define CNTNSAR			0x4
+#define CNTNSAR_NS_SHIFT(x)	(x)
+
+#define CNTACR_BASE(x)		(0x40 + ((x) << 2))
+#define CNTACR_RPCT_SHIFT	0x0
+#define CNTACR_RVCT_SHIFT	0x1
+#define CNTACR_RFRQ_SHIFT	0x2
+#define CNTACR_RVOFF_SHIFT	0x3
+#define CNTACR_RWVT_SHIFT	0x4
+#define CNTACR_RWPT_SHIFT	0x5
+
+/* MAIR macros */
+#define MAIR0_ATTR_SET(attr, index)	((attr) << ((index) << 3))
+#define MAIR1_ATTR_SET(attr, index)	((attr) << (((index) - 3) << 3))
+
+/* System register defines The format is: coproc, opt1, CRn, CRm, opt2 */
+#define SCR		p15, 0, c1, c1, 0
+#define SCTLR		p15, 0, c1, c0, 0
+#define SDCR		p15, 0, c1, c3, 1
+#define MPIDR		p15, 0, c0, c0, 5
+#define MIDR		p15, 0, c0, c0, 0
+#define VBAR		p15, 0, c12, c0, 0
+#define MVBAR		p15, 0, c12, c0, 1
+#define NSACR		p15, 0, c1, c1, 2
+#define CPACR		p15, 0, c1, c0, 2
+#define DCCIMVAC	p15, 0, c7, c14, 1
+#define DCCMVAC		p15, 0, c7, c10, 1
+#define DCIMVAC		p15, 0, c7, c6, 1
+#define DCCISW		p15, 0, c7, c14, 2
+#define DCCSW		p15, 0, c7, c10, 2
+#define DCISW		p15, 0, c7, c6, 2
+#define CTR		p15, 0, c0, c0, 1
+#define CNTFRQ		p15, 0, c14, c0, 0
+#define ID_PFR1		p15, 0, c0, c1, 1
+#define MAIR0		p15, 0, c10, c2, 0
+#define MAIR1		p15, 0, c10, c2, 1
+#define TTBCR		p15, 0, c2, c0, 2
+#define TTBR0		p15, 0, c2, c0, 0
+#define TTBR1		p15, 0, c2, c0, 1
+#define TLBIALL		p15, 0, c8, c7, 0
+#define TLBIALLIS	p15, 0, c8, c3, 0
+#define TLBIMVA		p15, 0, c8, c7, 1
+#define TLBIMVAA	p15, 0, c8, c7, 3
+#define TLBIMVAAIS	p15, 0, c8, c3, 3
+#define BPIALLIS	p15, 0, c7, c1, 6
+#define HSCTLR		p15, 4, c1, c0, 0
+#define HCR		p15, 4, c1, c1, 0
+#define HCPTR		p15, 4, c1, c1, 2
+#define HSTR		p15, 4, c1, c1, 3
+#define CNTHCTL		p15, 4, c14, c1, 0
+#define CNTKCTL		p15, 0, c14, c1, 0
+#define VPIDR		p15, 4, c0, c0, 0
+#define VMPIDR		p15, 4, c0, c0, 5
+#define ISR		p15, 0, c12, c1, 0
+#define CLIDR		p15, 1, c0, c0, 1
+#define CSSELR		p15, 2, c0, c0, 0
+#define CCSIDR		p15, 1, c0, c0, 0
+#define DBGOSDLR	p14, 0, c1, c3, 4
+
+/* Debug register defines. The format is: coproc, opt1, CRn, CRm, opt2 */
+#define HDCR		p15, 4, c1, c1, 1
+#define PMCR		p15, 0, c9, c12, 0
+#define CNTHP_CTL	p15, 4, c14, c2, 1
+
+/* GICv3 CPU Interface system register defines. The format is: coproc, opt1, CRn, CRm, opt2 */
+#define ICC_IAR1	p15, 0, c12, c12, 0
+#define ICC_IAR0	p15, 0, c12, c8, 0
+#define ICC_EOIR1	p15, 0, c12, c12, 1
+#define ICC_EOIR0	p15, 0, c12, c8, 1
+#define ICC_HPPIR1	p15, 0, c12, c12, 2
+#define ICC_HPPIR0	p15, 0, c12, c8, 2
+#define ICC_BPR1	p15, 0, c12, c12, 3
+#define ICC_BPR0	p15, 0, c12, c8, 3
+#define ICC_DIR		p15, 0, c12, c11, 1
+#define ICC_PMR		p15, 0, c4, c6, 0
+#define ICC_RPR		p15, 0, c12, c11, 3
+#define ICC_CTLR	p15, 0, c12, c12, 4
+#define ICC_MCTLR	p15, 6, c12, c12, 4
+#define ICC_SRE		p15, 0, c12, c12, 5
+#define ICC_HSRE	p15, 4, c12, c9, 5
+#define ICC_MSRE	p15, 6, c12, c12, 5
+#define ICC_IGRPEN0	p15, 0, c12, c12, 6
+#define ICC_IGRPEN1	p15, 0, c12, c12, 7
+#define ICC_MGRPEN1	p15, 6, c12, c12, 7
+
+/* 64 bit system register defines The format is: coproc, opt1, CRm */
+#define TTBR0_64	p15, 0, c2
+#define TTBR1_64	p15, 1, c2
+#define CNTVOFF_64	p15, 4, c14
+#define VTTBR_64	p15, 6, c2
+#define CNTPCT_64	p15, 0, c14
+
+/* 64 bit GICv3 CPU Interface system register defines. The format is: coproc, opt1, CRm */
+#define ICC_SGI1R_EL1_64	p15, 0, c12
+#define ICC_ASGI1R_EL1_64	p15, 1, c12
+#define ICC_SGI0R_EL1_64	p15, 2, c12
+
+/*******************************************************************************
+ * Definitions of MAIR encodings for device and normal memory
+ ******************************************************************************/
+/*
+ * MAIR encodings for device memory attributes.
+ */
+#define MAIR_DEV_nGnRnE		U(0x0)
+#define MAIR_DEV_nGnRE		U(0x4)
+#define MAIR_DEV_nGRE		U(0x8)
+#define MAIR_DEV_GRE		U(0xc)
+
+/*
+ * MAIR encodings for normal memory attributes.
+ *
+ * Cache Policy
+ *  WT:	 Write Through
+ *  WB:	 Write Back
+ *  NC:	 Non-Cacheable
+ *
+ * Transient Hint
+ *  NTR: Non-Transient
+ *  TR:	 Transient
+ *
+ * Allocation Policy
+ *  RA:	 Read Allocate
+ *  WA:	 Write Allocate
+ *  RWA: Read and Write Allocate
+ *  NA:	 No Allocation
+ */
+#define MAIR_NORM_WT_TR_WA	U(0x1)
+#define MAIR_NORM_WT_TR_RA	U(0x2)
+#define MAIR_NORM_WT_TR_RWA	U(0x3)
+#define MAIR_NORM_NC		U(0x4)
+#define MAIR_NORM_WB_TR_WA	U(0x5)
+#define MAIR_NORM_WB_TR_RA	U(0x6)
+#define MAIR_NORM_WB_TR_RWA	U(0x7)
+#define MAIR_NORM_WT_NTR_NA	U(0x8)
+#define MAIR_NORM_WT_NTR_WA	U(0x9)
+#define MAIR_NORM_WT_NTR_RA	U(0xa)
+#define MAIR_NORM_WT_NTR_RWA	U(0xb)
+#define MAIR_NORM_WB_NTR_NA	U(0xc)
+#define MAIR_NORM_WB_NTR_WA	U(0xd)
+#define MAIR_NORM_WB_NTR_RA	U(0xe)
+#define MAIR_NORM_WB_NTR_RWA	U(0xf)
+
+#define MAIR_NORM_OUTER_SHIFT	4
+
+#define MAKE_MAIR_NORMAL_MEMORY(inner, outer)	((inner) | ((outer) << MAIR_NORM_OUTER_SHIFT))
+
+#endif /* __ARCH_H__ */
diff --git a/include/lib/aarch32/arch_helpers.h b/include/lib/aarch32/arch_helpers.h
new file mode 100644
index 0000000..469e9b0
--- /dev/null
+++ b/include/lib/aarch32/arch_helpers.h
@@ -0,0 +1,333 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ARCH_HELPERS_H__
+#define __ARCH_HELPERS_H__
+
+#include <arch.h>	/* for additional register definitions */
+#include <stdint.h>
+#include <sys/types.h>
+
+/**********************************************************************
+ * Macros which create inline functions to read or write CPU system
+ * registers
+ *********************************************************************/
+
+#define _DEFINE_COPROCR_WRITE_FUNC(_name, coproc, opc1, CRn, CRm, opc2)	\
+static inline void write_## _name(u_register_t v)			\
+{									\
+	__asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+#define _DEFINE_COPROCR_READ_FUNC(_name, coproc, opc1, CRn, CRm, opc2)	\
+static inline u_register_t read_ ## _name(void)				\
+{									\
+	u_register_t v;							\
+	__asm__ volatile ("mrc "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : "=r" (v));\
+	return v;							\
+}
+
+/*
+ *  The undocumented %Q and %R extended asm are used to implemented the below
+ *  64 bit `mrrc` and `mcrr` instructions. It works only on Little Endian
+ *  systems for GCC versions < 4.6. Above GCC 4.6, both Little Endian and
+ *  Big Endian systems generate the right instruction encoding.
+ */
+#if !(__clang__ || __GNUC__ > (4) || __GNUC__ == (4) && __GNUC_MINOR__ >= (6))
+#error "clang or GCC 4.6 or above is required to build AArch32 Trusted Firmware"
+#endif
+
+#define _DEFINE_COPROCR_WRITE_FUNC_64(_name, coproc, opc1, CRm)		\
+static inline void write64_## _name(uint64_t v)				\
+{									\
+	__asm__ volatile ("mcrr "#coproc","#opc1", %Q0, %R0,"#CRm : : "r" (v));\
+}
+
+#define _DEFINE_COPROCR_READ_FUNC_64(_name, coproc, opc1, CRm)		\
+static inline uint64_t read64_## _name(void)				\
+{	uint64_t v;							\
+	__asm__ volatile ("mrrc "#coproc","#opc1", %Q0, %R0,"#CRm : "=r" (v));\
+	return v;							\
+}
+
+#define _DEFINE_SYSREG_READ_FUNC(_name, _reg_name)			\
+static inline u_register_t read_ ## _name(void)				\
+{									\
+	u_register_t v;							\
+	__asm__ volatile ("mrs %0, " #_reg_name : "=r" (v));		\
+	return v;							\
+}
+
+#define _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)			\
+static inline void write_ ## _name(u_register_t v)			\
+{									\
+	__asm__ volatile ("msr " #_reg_name ", %0" : : "r" (v));	\
+}
+
+#define _DEFINE_SYSREG_WRITE_CONST_FUNC(_name, _reg_name)		\
+static inline void write_ ## _name(const u_register_t v)		\
+{									\
+	__asm__ volatile ("msr " #_reg_name ", %0" : : "i" (v));	\
+}
+
+/* Define read function for coproc register */
+#define DEFINE_COPROCR_READ_FUNC(_name, ...) 				\
+	_DEFINE_COPROCR_READ_FUNC(_name, __VA_ARGS__)
+
+/* Define read & write function for coproc register */
+#define DEFINE_COPROCR_RW_FUNCS(_name, ...) 				\
+	_DEFINE_COPROCR_READ_FUNC(_name, __VA_ARGS__)			\
+	_DEFINE_COPROCR_WRITE_FUNC(_name, __VA_ARGS__)
+
+/* Define 64 bit read function for coproc register */
+#define DEFINE_COPROCR_READ_FUNC_64(_name, ...) 			\
+	_DEFINE_COPROCR_READ_FUNC_64(_name, __VA_ARGS__)
+
+/* Define 64 bit read & write function for coproc register */
+#define DEFINE_COPROCR_RW_FUNCS_64(_name, ...) 				\
+	_DEFINE_COPROCR_READ_FUNC_64(_name, __VA_ARGS__)		\
+	_DEFINE_COPROCR_WRITE_FUNC_64(_name, __VA_ARGS__)
+
+/* Define read & write function for system register */
+#define DEFINE_SYSREG_RW_FUNCS(_name)					\
+	_DEFINE_SYSREG_READ_FUNC(_name, _name)				\
+	_DEFINE_SYSREG_WRITE_FUNC(_name, _name)
+
+/**********************************************************************
+ * Macros to create inline functions for tlbi operations
+ *********************************************************************/
+
+#if ERRATA_A57_813419
+/*
+ * Define function for TLBI instruction with type specifier that
+ * implements the workaround for errata 813419 of Cortex-A57
+ */
+#define _DEFINE_TLBIOP_FUNC(_op, coproc, opc1, CRn, CRm, opc2)		\
+static inline void tlbi##_op(void)					\
+{									\
+	u_register_t v = 0;						\
+	__asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+	__asm__ volatile ("dsb ish");\
+	__asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+#define _DEFINE_TLBIOP_PARAM_FUNC(_op, coproc, opc1, CRn, CRm, opc2)	\
+static inline void tlbi##_op(u_register_t v)				\
+{									\
+	__asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+	__asm__ volatile ("dsb ish");\
+	__asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+#else
+#define _DEFINE_TLBIOP_FUNC(_op, coproc, opc1, CRn, CRm, opc2)		\
+static inline void tlbi##_op(void)					\
+{									\
+	u_register_t v = 0;						\
+	__asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+#define _DEFINE_TLBIOP_PARAM_FUNC(_op, coproc, opc1, CRn, CRm, opc2)	\
+static inline void tlbi##_op(u_register_t v)				\
+{									\
+	__asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+#endif /* ERRATA_A57_813419 */
+
+#define _DEFINE_BPIOP_FUNC(_op, coproc, opc1, CRn, CRm, opc2)		\
+static inline void bpi##_op(void)					\
+{									\
+	u_register_t v = 0;						\
+	__asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+/* Define function for simple TLBI operation */
+#define DEFINE_TLBIOP_FUNC(_op, ...)					\
+	_DEFINE_TLBIOP_FUNC(_op, __VA_ARGS__)
+
+/* Define function for TLBI operation with register parameter */
+#define DEFINE_TLBIOP_PARAM_FUNC(_op, ...)				\
+	_DEFINE_TLBIOP_PARAM_FUNC(_op, __VA_ARGS__)
+
+/* Define function for simple BPI operation */
+#define DEFINE_BPIOP_FUNC(_op, ...)					\
+	_DEFINE_BPIOP_FUNC(_op, __VA_ARGS__)
+
+/**********************************************************************
+ * Macros to create inline functions for DC operations
+ *********************************************************************/
+#define _DEFINE_DCOP_PARAM_FUNC(_op, coproc, opc1, CRn, CRm, opc2)	\
+static inline void dc##_op(u_register_t v)				\
+{									\
+	__asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+/* Define function for DC operation with register parameter */
+#define DEFINE_DCOP_PARAM_FUNC(_op, ...)				\
+	_DEFINE_DCOP_PARAM_FUNC(_op, __VA_ARGS__)
+
+/**********************************************************************
+ * Macros to create inline functions for system instructions
+ *********************************************************************/
+ /* Define function for simple system instruction */
+#define DEFINE_SYSOP_FUNC(_op)						\
+static inline void _op(void)						\
+{									\
+	__asm__ (#_op);							\
+}
+
+
+/* Define function for system instruction with type specifier */
+#define DEFINE_SYSOP_TYPE_FUNC(_op, _type)				\
+static inline void _op ## _type(void)					\
+{									\
+	__asm__ (#_op " " #_type);					\
+}
+
+/* Define function for system instruction with register parameter */
+#define DEFINE_SYSOP_TYPE_PARAM_FUNC(_op, _type)			\
+static inline void _op ## _type(u_register_t v)				\
+{									\
+	 __asm__ (#_op " " #_type ", %0" : : "r" (v));			\
+}
+
+void flush_dcache_range(uintptr_t addr, size_t size);
+void clean_dcache_range(uintptr_t addr, size_t size);
+void inv_dcache_range(uintptr_t addr, size_t size);
+
+void dcsw_op_louis(u_register_t op_type);
+void dcsw_op_all(u_register_t op_type);
+
+void disable_mmu_secure(void);
+void disable_mmu_icache_secure(void);
+
+DEFINE_SYSOP_FUNC(wfi)
+DEFINE_SYSOP_FUNC(wfe)
+DEFINE_SYSOP_FUNC(sev)
+DEFINE_SYSOP_TYPE_FUNC(dsb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dmb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dmb, st)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ld)
+DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dsb, ishst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ishst)
+DEFINE_SYSOP_FUNC(isb)
+
+void __dead2 smc(uint32_t r0, uint32_t r1, uint32_t r2, uint32_t r3,
+		 uint32_t r4, uint32_t r5, uint32_t r6, uint32_t r7);
+
+DEFINE_SYSREG_RW_FUNCS(spsr)
+DEFINE_SYSREG_RW_FUNCS(cpsr)
+
+/*******************************************************************************
+ * System register accessor prototypes
+ ******************************************************************************/
+DEFINE_COPROCR_READ_FUNC(mpidr, MPIDR)
+DEFINE_COPROCR_READ_FUNC(midr, MIDR)
+DEFINE_COPROCR_READ_FUNC(id_pfr1, ID_PFR1)
+DEFINE_COPROCR_READ_FUNC(isr, ISR)
+DEFINE_COPROCR_READ_FUNC(clidr, CLIDR)
+DEFINE_COPROCR_READ_FUNC_64(cntpct, CNTPCT_64)
+
+DEFINE_COPROCR_RW_FUNCS(scr, SCR)
+DEFINE_COPROCR_RW_FUNCS(ctr, CTR)
+DEFINE_COPROCR_RW_FUNCS(sctlr, SCTLR)
+DEFINE_COPROCR_RW_FUNCS(hsctlr, HSCTLR)
+DEFINE_COPROCR_RW_FUNCS(hcr, HCR)
+DEFINE_COPROCR_RW_FUNCS(hcptr, HCPTR)
+DEFINE_COPROCR_RW_FUNCS(cntfrq, CNTFRQ)
+DEFINE_COPROCR_RW_FUNCS(cnthctl, CNTHCTL)
+DEFINE_COPROCR_RW_FUNCS(mair0, MAIR0)
+DEFINE_COPROCR_RW_FUNCS(mair1, MAIR1)
+DEFINE_COPROCR_RW_FUNCS(ttbcr, TTBCR)
+DEFINE_COPROCR_RW_FUNCS(ttbr0, TTBR0)
+DEFINE_COPROCR_RW_FUNCS_64(ttbr0, TTBR0_64)
+DEFINE_COPROCR_RW_FUNCS(ttbr1, TTBR1)
+DEFINE_COPROCR_RW_FUNCS(vpidr, VPIDR)
+DEFINE_COPROCR_RW_FUNCS(vmpidr, VMPIDR)
+DEFINE_COPROCR_RW_FUNCS_64(vttbr, VTTBR_64)
+DEFINE_COPROCR_RW_FUNCS_64(ttbr1, TTBR1_64)
+DEFINE_COPROCR_RW_FUNCS_64(cntvoff, CNTVOFF_64)
+DEFINE_COPROCR_RW_FUNCS(csselr, CSSELR)
+DEFINE_COPROCR_RW_FUNCS(hstr, HSTR)
+
+DEFINE_COPROCR_RW_FUNCS(icc_sre_el1, ICC_SRE)
+DEFINE_COPROCR_RW_FUNCS(icc_sre_el2, ICC_HSRE)
+DEFINE_COPROCR_RW_FUNCS(icc_sre_el3, ICC_MSRE)
+DEFINE_COPROCR_RW_FUNCS(icc_pmr_el1, ICC_PMR)
+DEFINE_COPROCR_RW_FUNCS(icc_rpr_el1, ICC_RPR)
+DEFINE_COPROCR_RW_FUNCS(icc_igrpen1_el3, ICC_MGRPEN1)
+DEFINE_COPROCR_RW_FUNCS(icc_igrpen0_el1, ICC_IGRPEN0)
+DEFINE_COPROCR_RW_FUNCS(icc_hppir0_el1, ICC_HPPIR0)
+DEFINE_COPROCR_RW_FUNCS(icc_hppir1_el1, ICC_HPPIR1)
+DEFINE_COPROCR_RW_FUNCS(icc_iar0_el1, ICC_IAR0)
+DEFINE_COPROCR_RW_FUNCS(icc_iar1_el1, ICC_IAR1)
+DEFINE_COPROCR_RW_FUNCS(icc_eoir0_el1, ICC_EOIR0)
+DEFINE_COPROCR_RW_FUNCS(icc_eoir1_el1, ICC_EOIR1)
+DEFINE_COPROCR_RW_FUNCS_64(icc_sgi0r_el1, ICC_SGI0R_EL1_64)
+
+DEFINE_COPROCR_RW_FUNCS(hdcr, HDCR)
+DEFINE_COPROCR_RW_FUNCS(cnthp_ctl, CNTHP_CTL)
+DEFINE_COPROCR_READ_FUNC(pmcr, PMCR)
+
+/*
+ * TLBI operation prototypes
+ */
+DEFINE_TLBIOP_FUNC(all, TLBIALL)
+DEFINE_TLBIOP_FUNC(allis, TLBIALLIS)
+DEFINE_TLBIOP_PARAM_FUNC(mva, TLBIMVA)
+DEFINE_TLBIOP_PARAM_FUNC(mvaa, TLBIMVAA)
+DEFINE_TLBIOP_PARAM_FUNC(mvaais, TLBIMVAAIS)
+
+/*
+ * BPI operation prototypes.
+ */
+DEFINE_BPIOP_FUNC(allis, BPIALLIS)
+
+/*
+ * DC operation prototypes
+ */
+DEFINE_DCOP_PARAM_FUNC(civac, DCCIMVAC)
+DEFINE_DCOP_PARAM_FUNC(ivac, DCIMVAC)
+DEFINE_DCOP_PARAM_FUNC(cvac, DCCMVAC)
+
+/* Previously defined accessor functions with incomplete register names  */
+#define dsb()			dsbsy()
+
+#define IS_IN_SECURE() \
+	(GET_NS_BIT(read_scr()) == 0)
+
+ /*
+  * If EL3 is AArch32, then secure PL1 and monitor mode correspond to EL3
+  */
+#define IS_IN_EL3() \
+	((GET_M32(read_cpsr()) == MODE32_mon) ||	\
+		(IS_IN_SECURE() && (GET_M32(read_cpsr()) != MODE32_usr)))
+
+/* Macros for compatibility with AArch64 system registers */
+#define read_mpidr_el1()	read_mpidr()
+
+#define read_scr_el3()		read_scr()
+#define write_scr_el3(_v)	write_scr(_v)
+
+#define read_hcr_el2()		read_hcr()
+#define write_hcr_el2(_v)	write_hcr(_v)
+
+#define read_cpacr_el1()	read_cpacr()
+#define write_cpacr_el1(_v)	write_cpacr(_v)
+
+#define read_cntfrq_el0()	read_cntfrq()
+#define write_cntfrq_el0(_v)	write_cntfrq(_v)
+#define read_isr_el1()		read_isr()
+
+#define read_cntpct_el0()	read64_cntpct()
+
+#define read_ctr_el0()		read_ctr()
+
+#define write_icc_sgi0r_el1(_v) \
+		write64_icc_sgi0r_el1(_v)
+
+#endif /* __ARCH_HELPERS_H__ */
diff --git a/include/lib/aarch32/smcc_helpers.h b/include/lib/aarch32/smcc_helpers.h
new file mode 100644
index 0000000..53f1aa4
--- /dev/null
+++ b/include/lib/aarch32/smcc_helpers.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SMCC_HELPERS_H__
+#define __SMCC_HELPERS_H__
+
+#include <smcc.h>
+
+/* These are offsets to registers in smc_ctx_t */
+#define SMC_CTX_GPREG_R0	0x0
+#define SMC_CTX_GPREG_R1	0x4
+#define SMC_CTX_GPREG_R2	0x8
+#define SMC_CTX_GPREG_R3	0xC
+#define SMC_CTX_GPREG_R4	0x10
+#define SMC_CTX_GPREG_R5	0x14
+#define SMC_CTX_SP_USR		0x34
+#define SMC_CTX_SPSR_MON	0x78
+#define SMC_CTX_SP_MON		0x7C
+#define SMC_CTX_LR_MON		0x80
+#define SMC_CTX_SCR		0x84
+#define SMC_CTX_PMCR		0x88
+#define SMC_CTX_SIZE		0x8C
+
+#ifndef __ASSEMBLY__
+#include <cassert.h>
+#include <types.h>
+
+/*
+ * The generic structure to save arguments and callee saved registers during
+ * an SMC. Also this structure is used to store the result return values after
+ * the completion of SMC service.
+ */
+typedef struct smc_ctx {
+	u_register_t r0;
+	u_register_t r1;
+	u_register_t r2;
+	u_register_t r3;
+	u_register_t r4;
+	u_register_t r5;
+	u_register_t r6;
+	u_register_t r7;
+	u_register_t r8;
+	u_register_t r9;
+	u_register_t r10;
+	u_register_t r11;
+	u_register_t r12;
+	/* spsr_usr doesn't exist */
+	u_register_t sp_usr;
+	u_register_t lr_usr;
+	u_register_t spsr_irq;
+	u_register_t sp_irq;
+	u_register_t lr_irq;
+	u_register_t spsr_fiq;
+	u_register_t sp_fiq;
+	u_register_t lr_fiq;
+	u_register_t spsr_svc;
+	u_register_t sp_svc;
+	u_register_t lr_svc;
+	u_register_t spsr_abt;
+	u_register_t sp_abt;
+	u_register_t lr_abt;
+	u_register_t spsr_und;
+	u_register_t sp_und;
+	u_register_t lr_und;
+	u_register_t spsr_mon;
+	/*
+	 * `sp_mon` will point to the C runtime stack in monitor mode. But prior
+	 * to exit from SMC, this will point to the `smc_ctx_t` so that
+	 * on next entry due to SMC, the `smc_ctx_t` can be easily accessed.
+	 */
+	u_register_t sp_mon;
+	u_register_t lr_mon;
+	u_register_t scr;
+	u_register_t pmcr;
+} smc_ctx_t;
+
+/*
+ * Compile time assertions related to the 'smc_context' structure to
+ * ensure that the assembler and the compiler view of the offsets of
+ * the structure members is the same.
+ */
+CASSERT(SMC_CTX_GPREG_R0 == __builtin_offsetof(smc_ctx_t, r0), \
+	assert_smc_ctx_greg_r0_offset_mismatch);
+CASSERT(SMC_CTX_GPREG_R1 == __builtin_offsetof(smc_ctx_t, r1), \
+	assert_smc_ctx_greg_r1_offset_mismatch);
+CASSERT(SMC_CTX_GPREG_R2 == __builtin_offsetof(smc_ctx_t, r2), \
+	assert_smc_ctx_greg_r2_offset_mismatch);
+CASSERT(SMC_CTX_GPREG_R3 == __builtin_offsetof(smc_ctx_t, r3), \
+	assert_smc_ctx_greg_r3_offset_mismatch);
+CASSERT(SMC_CTX_GPREG_R4 == __builtin_offsetof(smc_ctx_t, r4), \
+	assert_smc_ctx_greg_r4_offset_mismatch);
+CASSERT(SMC_CTX_SP_USR == __builtin_offsetof(smc_ctx_t, sp_usr), \
+	assert_smc_ctx_sp_usr_offset_mismatch);
+CASSERT(SMC_CTX_LR_MON == __builtin_offsetof(smc_ctx_t, lr_mon), \
+	assert_smc_ctx_lr_mon_offset_mismatch);
+CASSERT(SMC_CTX_SPSR_MON == __builtin_offsetof(smc_ctx_t, spsr_mon), \
+	assert_smc_ctx_spsr_mon_offset_mismatch);
+
+CASSERT(SMC_CTX_SIZE == sizeof(smc_ctx_t), assert_smc_ctx_size_mismatch);
+
+/* Convenience macros to return from SMC handler */
+#define SMC_RET0(_h) {				\
+	return (uintptr_t)(_h);			\
+}
+#define SMC_RET1(_h, _r0) {			\
+	((smc_ctx_t *)(_h))->r0 = (_r0);	\
+	SMC_RET0(_h);				\
+}
+#define SMC_RET2(_h, _r0, _r1) {		\
+	((smc_ctx_t *)(_h))->r1 = (_r1);	\
+	SMC_RET1(_h, (_r0));			\
+}
+#define SMC_RET3(_h, _r0, _r1, _r2) {		\
+	((smc_ctx_t *)(_h))->r2 = (_r2);	\
+	SMC_RET2(_h, (_r0), (_r1));		\
+}
+#define SMC_RET4(_h, _r0, _r1, _r2, _r3) {	\
+	((smc_ctx_t *)(_h))->r3 = (_r3);	\
+	SMC_RET3(_h, (_r0), (_r1), (_r2));	\
+}
+
+/* Return a UUID in the SMC return registers */
+#define SMC_UUID_RET(_h, _uuid) \
+	SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \
+			 ((const uint32_t *) &(_uuid))[1], \
+			 ((const uint32_t *) &(_uuid))[2], \
+			 ((const uint32_t *) &(_uuid))[3])
+
+/*
+ * Helper macro to retrieve the SMC parameters from smc_ctx_t.
+ */
+#define get_smc_params_from_ctx(_hdl, _r1, _r2, _r3, _r4) {	\
+		_r1 = ((smc_ctx_t *)_hdl)->r1;		\
+		_r2 = ((smc_ctx_t *)_hdl)->r2;		\
+		_r3 = ((smc_ctx_t *)_hdl)->r3;		\
+		_r4 = ((smc_ctx_t *)_hdl)->r4;		\
+		}
+
+/* ------------------------------------------------------------------------
+ * Helper APIs for setting and retrieving appropriate `smc_ctx_t`.
+ * These functions need to implemented by the BL including this library.
+ * ------------------------------------------------------------------------
+ */
+
+/* Get the pointer to `smc_ctx_t` corresponding to the security state. */
+void *smc_get_ctx(unsigned int security_state);
+
+/* Set the next `smc_ctx_t` corresponding to the security state. */
+void smc_set_next_ctx(unsigned int security_state);
+
+/* Get the pointer to next `smc_ctx_t` already set by `smc_set_next_ctx()`. */
+void *smc_get_next_ctx(void);
+
+#endif /*__ASSEMBLY__*/
+#endif /* __SMCC_HELPERS_H__ */
diff --git a/include/lib/aarch32/smcc_macros.S b/include/lib/aarch32/smcc_macros.S
new file mode 100644
index 0000000..cf26175
--- /dev/null
+++ b/include/lib/aarch32/smcc_macros.S
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __SMCC_MACROS_S__
+#define __SMCC_MACROS_S__
+
+#include <arch.h>
+
+/*
+ * Macro to save the General purpose registers (r0 - r12), the banked
+ * spsr, lr, sp registers and the `scr` register to the SMC context on entry
+ * due a SMC call. The `lr` of the current mode (monitor) is expected to be
+ * already saved. The `sp` must point to the `smc_ctx_t` to save to.
+ * Additionally, also save the 'pmcr' register as this is updated whilst
+ * executing in the secure world.
+ */
+	.macro smcc_save_gp_mode_regs
+	/* Save r0 - r12 in the SMC context */
+	stm	sp, {r0-r12}
+	mov	r0, sp
+	add	r0, r0, #SMC_CTX_SP_USR
+
+	/* Save the banked registers including the current SPSR and LR */
+	mrs	r4, sp_usr
+	mrs	r5, lr_usr
+	mrs	r6, spsr_irq
+	mrs	r7, sp_irq
+	mrs	r8, lr_irq
+	mrs	r9, spsr_fiq
+	mrs	r10, sp_fiq
+	mrs	r11, lr_fiq
+	mrs	r12, spsr_svc
+	stm	r0!, {r4-r12}
+
+	mrs	r4, sp_svc
+	mrs	r5, lr_svc
+	mrs	r6, spsr_abt
+	mrs	r7, sp_abt
+	mrs	r8, lr_abt
+	mrs	r9, spsr_und
+	mrs	r10, sp_und
+	mrs	r11, lr_und
+	mrs	r12, spsr
+	stm	r0!, {r4-r12}
+
+	/* lr_mon is already saved by caller */
+	ldcopr	r4, SCR
+	str	r4, [sp, #SMC_CTX_SCR]
+	ldcopr	r4, PMCR
+	str	r4, [sp, #SMC_CTX_PMCR]
+	.endm
+
+/*
+ * Macro to restore the `smc_ctx_t`, which includes the General purpose
+ * registers and banked mode registers, and exit from the monitor mode.
+ * r0 must point to the `smc_ctx_t` to restore from.
+ */
+	.macro monitor_exit
+	/*
+	 * Save the current sp and restore the smc context
+	 * pointer to sp which will be used for handling the
+	 * next SMC.
+	 */
+	str	sp, [r0, #SMC_CTX_SP_MON]
+	mov	sp, r0
+
+	/*
+	 * Restore SCR first so that we access the right banked register
+	 * when the other mode registers are restored.
+	 */
+	ldr	r1, [r0, #SMC_CTX_SCR]
+	stcopr	r1, SCR
+	isb
+
+	/*
+	 * Restore the PMCR register.
+	 */
+	ldr	r1, [r0, #SMC_CTX_PMCR]
+	stcopr	r1, PMCR
+
+	/* Restore the banked registers including the current SPSR */
+	add	r1, r0, #SMC_CTX_SP_USR
+	ldm	r1!, {r4-r12}
+	msr	sp_usr, r4
+	msr	lr_usr, r5
+	msr	spsr_irq, r6
+	msr	sp_irq, r7
+	msr	lr_irq, r8
+	msr	spsr_fiq, r9
+	msr	sp_fiq, r10
+	msr	lr_fiq, r11
+	msr	spsr_svc, r12
+
+	ldm	r1!, {r4-r12}
+	msr	sp_svc, r4
+	msr	lr_svc, r5
+	msr	spsr_abt, r6
+	msr	sp_abt, r7
+	msr	lr_abt, r8
+	msr	spsr_und, r9
+	msr	sp_und, r10
+	msr	lr_und, r11
+	/*
+	 * Use the `_fsxc` suffix explicitly to instruct the assembler
+	 * to update all the 32 bits of SPSR. Else, by default, the
+	 * assembler assumes `_fc` suffix which only modifies
+	 * f->[31:24] and c->[7:0] bits of SPSR.
+	 */
+	msr	spsr_fsxc, r12
+
+	/* Restore the LR */
+	ldr	lr, [r0, #SMC_CTX_LR_MON]
+
+	/* Restore the rest of the general purpose registers */
+	ldm	r0, {r0-r12}
+	eret
+	.endm
+
+#endif /* __SMCC_MACROS_S__ */
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
index 5291684..997e3a2 100644
--- a/include/lib/aarch64/arch.h
+++ b/include/lib/aarch64/arch.h
@@ -1,75 +1,63 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __ARCH_H__
 #define __ARCH_H__
 
+#include <utils_def.h>
 
 /*******************************************************************************
  * MIDR bit definitions
  ******************************************************************************/
-#define MIDR_IMPL_MASK		0xff
-#define MIDR_IMPL_SHIFT		0x18
-#define MIDR_VAR_SHIFT		20
-#define MIDR_VAR_BITS		4
-#define MIDR_REV_SHIFT		0
-#define MIDR_REV_BITS		4
-#define MIDR_PN_MASK		0xfff
-#define MIDR_PN_SHIFT		0x4
+#define MIDR_IMPL_MASK		U(0xff)
+#define MIDR_IMPL_SHIFT		U(0x18)
+#define MIDR_VAR_SHIFT		U(20)
+#define MIDR_VAR_BITS		U(4)
+#define MIDR_VAR_MASK		U(0xf)
+#define MIDR_REV_SHIFT		U(0)
+#define MIDR_REV_BITS		U(4)
+#define MIDR_REV_MASK		U(0xf)
+#define MIDR_PN_MASK		U(0xfff)
+#define MIDR_PN_SHIFT		U(0x4)
 
 /*******************************************************************************
  * MPIDR macros
  ******************************************************************************/
+#define MPIDR_MT_MASK		(U(1) << 24)
 #define MPIDR_CPU_MASK		MPIDR_AFFLVL_MASK
-#define MPIDR_CLUSTER_MASK	MPIDR_AFFLVL_MASK << MPIDR_AFFINITY_BITS
-#define MPIDR_AFFINITY_BITS	8
-#define MPIDR_AFFLVL_MASK	0xff
-#define MPIDR_AFF0_SHIFT	0
-#define MPIDR_AFF1_SHIFT	8
-#define MPIDR_AFF2_SHIFT	16
-#define MPIDR_AFF3_SHIFT	32
-#define MPIDR_AFFINITY_MASK	0xff00ffffff
-#define MPIDR_AFFLVL_SHIFT	3
-#define MPIDR_AFFLVL0		0
-#define MPIDR_AFFLVL1		1
-#define MPIDR_AFFLVL2		2
-#define MPIDR_AFFLVL3		3
+#define MPIDR_CLUSTER_MASK	(MPIDR_AFFLVL_MASK << MPIDR_AFFINITY_BITS)
+#define MPIDR_AFFINITY_BITS	U(8)
+#define MPIDR_AFFLVL_MASK	U(0xff)
+#define MPIDR_AFF0_SHIFT	U(0)
+#define MPIDR_AFF1_SHIFT	U(8)
+#define MPIDR_AFF2_SHIFT	U(16)
+#define MPIDR_AFF3_SHIFT	U(32)
+#define MPIDR_AFFINITY_MASK	U(0xff00ffffff)
+#define MPIDR_AFFLVL_SHIFT	U(3)
+#define MPIDR_AFFLVL0		U(0)
+#define MPIDR_AFFLVL1		U(1)
+#define MPIDR_AFFLVL2		U(2)
+#define MPIDR_AFFLVL3		U(3)
+#define MPIDR_AFFLVL0_VAL(mpidr) \
+		((mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL1_VAL(mpidr) \
+		((mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL2_VAL(mpidr) \
+		((mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL3_VAL(mpidr) \
+		((mpidr >> MPIDR_AFF3_SHIFT) & MPIDR_AFFLVL_MASK)
 /*
  * The MPIDR_MAX_AFFLVL count starts from 0. Take care to
  * add one while using this macro to define array sizes.
  * TODO: Support only the first 3 affinity levels for now.
  */
-#define MPIDR_MAX_AFFLVL	2
+#define MPIDR_MAX_AFFLVL	U(2)
 
 /* Constant to highlight the assumption that MPIDR allocation starts from 0 */
-#define FIRST_MPIDR		0
+#define FIRST_MPIDR		U(0)
 
 /*******************************************************************************
  * Definitions for CPU system register interface to GICv3
@@ -80,201 +68,318 @@
 #define ICC_CTLR_EL1    S3_0_C12_C12_4
 #define ICC_CTLR_EL3    S3_6_C12_C12_4
 #define ICC_PMR_EL1     S3_0_C4_C6_0
+#define ICC_RPR_EL1     S3_0_C12_C11_3
+#define ICC_IGRPEN1_EL3 S3_6_c12_c12_7
+#define ICC_IGRPEN0_EL1 S3_0_c12_c12_6
+#define ICC_HPPIR0_EL1  S3_0_c12_c8_2
+#define ICC_HPPIR1_EL1  S3_0_c12_c12_2
+#define ICC_IAR0_EL1    S3_0_c12_c8_0
+#define ICC_IAR1_EL1    S3_0_c12_c12_0
+#define ICC_EOIR0_EL1   S3_0_c12_c8_1
+#define ICC_EOIR1_EL1   S3_0_c12_c12_1
+#define ICC_SGI0R_EL1	S3_0_c12_c11_7
 
 /*******************************************************************************
  * Generic timer memory mapped registers & offsets
  ******************************************************************************/
-#define CNTCR_OFF			0x000
-#define CNTFID_OFF			0x020
+#define CNTCR_OFF			U(0x000)
+#define CNTFID_OFF			U(0x020)
 
-#define CNTCR_EN			(1 << 0)
-#define CNTCR_HDBG			(1 << 1)
+#define CNTCR_EN			(U(1) << 0)
+#define CNTCR_HDBG			(U(1) << 1)
 #define CNTCR_FCREQ(x)			((x) << 8)
 
 /*******************************************************************************
  * System register bit definitions
  ******************************************************************************/
 /* CLIDR definitions */
-#define LOUIS_SHIFT		21
-#define LOC_SHIFT		24
-#define CLIDR_FIELD_WIDTH	3
+#define LOUIS_SHIFT		U(21)
+#define LOC_SHIFT		U(24)
+#define CLIDR_FIELD_WIDTH	U(3)
 
 /* CSSELR definitions */
-#define LEVEL_SHIFT		1
+#define LEVEL_SHIFT		U(1)
 
 /* D$ set/way op type defines */
-#define DCISW			0x0
-#define DCCISW			0x1
-#define DCCSW			0x2
+#define DCISW			U(0x0)
+#define DCCISW			U(0x1)
+#define DCCSW			U(0x2)
 
 /* ID_AA64PFR0_EL1 definitions */
-#define ID_AA64PFR0_EL0_SHIFT	0
-#define ID_AA64PFR0_EL1_SHIFT	4
-#define ID_AA64PFR0_EL2_SHIFT	8
-#define ID_AA64PFR0_EL3_SHIFT	12
-#define ID_AA64PFR0_ELX_MASK	0xf
+#define ID_AA64PFR0_EL0_SHIFT	U(0)
+#define ID_AA64PFR0_EL1_SHIFT	U(4)
+#define ID_AA64PFR0_EL2_SHIFT	U(8)
+#define ID_AA64PFR0_EL3_SHIFT	U(12)
+#define ID_AA64PFR0_ELX_MASK	U(0xf)
+
+/* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
+#define ID_AA64DFR0_PMS_SHIFT	U(32)
+#define ID_AA64DFR0_PMS_LENGTH	U(4)
+#define ID_AA64DFR0_PMS_MASK	U(0xf)
+
+#define EL_IMPL_NONE		U(0)
+#define EL_IMPL_A64ONLY		U(1)
+#define EL_IMPL_A64_A32		U(2)
+
+#define ID_AA64PFR0_GIC_SHIFT	U(24)
+#define ID_AA64PFR0_GIC_WIDTH	U(4)
+#define ID_AA64PFR0_GIC_MASK	((U(1) << ID_AA64PFR0_GIC_WIDTH) - 1)
+
+/* ID_AA64MMFR0_EL1 definitions */
+#define ID_AA64MMFR0_EL1_PARANGE_MASK	U(0xf)
+
+#define PARANGE_0000	U(32)
+#define PARANGE_0001	U(36)
+#define PARANGE_0010	U(40)
+#define PARANGE_0011	U(42)
+#define PARANGE_0100	U(44)
+#define PARANGE_0101	U(48)
 
 /* ID_PFR1_EL1 definitions */
-#define ID_PFR1_VIRTEXT_SHIFT	12
-#define ID_PFR1_VIRTEXT_MASK	0xf
+#define ID_PFR1_VIRTEXT_SHIFT	U(12)
+#define ID_PFR1_VIRTEXT_MASK	U(0xf)
 #define GET_VIRT_EXT(id)	((id >> ID_PFR1_VIRTEXT_SHIFT) \
 				 & ID_PFR1_VIRTEXT_MASK)
 
 /* SCTLR definitions */
-#define SCTLR_EL2_RES1  ((1 << 29) | (1 << 28) | (1 << 23) | (1 << 22) | \
-			(1 << 18) | (1 << 16) | (1 << 11) | (1 << 5) |  \
-			(1 << 4))
+#define SCTLR_EL2_RES1	((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
+			 (U(1) << 22) | (U(1) << 18) | (U(1) << 16) | \
+			 (U(1) << 11) | (U(1) << 5) | (U(1) << 4))
 
-#define SCTLR_EL1_RES1  ((1 << 29) | (1 << 28) | (1 << 23) | (1 << 22) | \
-			(1 << 11))
+#define SCTLR_EL1_RES1	((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
+			 (U(1) << 22) | (U(1) << 20) | (U(1) << 11))
 #define SCTLR_AARCH32_EL1_RES1 \
-			((1 << 23) | (1 << 22) | (1 << 11) | (1 << 4) | \
-			(1 << 3))
+			((U(1) << 23) | (U(1) << 22) | (U(1) << 11) | \
+			 (U(1) << 4) | (U(1) << 3))
 
-#define SCTLR_M_BIT		(1 << 0)
-#define SCTLR_A_BIT		(1 << 1)
-#define SCTLR_C_BIT		(1 << 2)
-#define SCTLR_SA_BIT		(1 << 3)
-#define SCTLR_I_BIT		(1 << 12)
-#define SCTLR_WXN_BIT		(1 << 19)
-#define SCTLR_EE_BIT		(1 << 25)
+#define SCTLR_EL3_RES1	((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
+			(U(1) << 22) | (U(1) << 18) | (U(1) << 16) | \
+			(U(1) << 11) | (U(1) << 5) | (U(1) << 4))
+
+#define SCTLR_M_BIT		(U(1) << 0)
+#define SCTLR_A_BIT		(U(1) << 1)
+#define SCTLR_C_BIT		(U(1) << 2)
+#define SCTLR_SA_BIT		(U(1) << 3)
+#define SCTLR_CP15BEN_BIT	(U(1) << 5)
+#define SCTLR_I_BIT		(U(1) << 12)
+#define SCTLR_NTWI_BIT		(U(1) << 16)
+#define SCTLR_NTWE_BIT		(U(1) << 18)
+#define SCTLR_WXN_BIT		(U(1) << 19)
+#define SCTLR_EE_BIT		(U(1) << 25)
+#define SCTLR_RESET_VAL		SCTLR_EL3_RES1
 
 /* CPACR_El1 definitions */
-#define CPACR_EL1_FPEN(x)	(x << 20)
-#define CPACR_EL1_FP_TRAP_EL0	0x1
-#define CPACR_EL1_FP_TRAP_ALL	0x2
-#define CPACR_EL1_FP_TRAP_NONE	0x3
+#define CPACR_EL1_FPEN(x)	((x) << 20)
+#define CPACR_EL1_FP_TRAP_EL0	U(0x1)
+#define CPACR_EL1_FP_TRAP_ALL	U(0x2)
+#define CPACR_EL1_FP_TRAP_NONE	U(0x3)
 
 /* SCR definitions */
-#define SCR_RES1_BITS		((1 << 4) | (1 << 5))
-#define SCR_TWE_BIT		(1 << 13)
-#define SCR_TWI_BIT		(1 << 12)
-#define SCR_ST_BIT		(1 << 11)
-#define SCR_RW_BIT		(1 << 10)
-#define SCR_SIF_BIT		(1 << 9)
-#define SCR_HCE_BIT		(1 << 8)
-#define SCR_SMD_BIT		(1 << 7)
-#define SCR_EA_BIT		(1 << 3)
-#define SCR_FIQ_BIT		(1 << 2)
-#define SCR_IRQ_BIT		(1 << 1)
-#define SCR_NS_BIT		(1 << 0)
-#define SCR_VALID_BIT_MASK	0x2f8f
+#define SCR_RES1_BITS		((U(1) << 4) | (U(1) << 5))
+#define SCR_TWE_BIT		(U(1) << 13)
+#define SCR_TWI_BIT		(U(1) << 12)
+#define SCR_ST_BIT		(U(1) << 11)
+#define SCR_RW_BIT		(U(1) << 10)
+#define SCR_SIF_BIT		(U(1) << 9)
+#define SCR_HCE_BIT		(U(1) << 8)
+#define SCR_SMD_BIT		(U(1) << 7)
+#define SCR_EA_BIT		(U(1) << 3)
+#define SCR_FIQ_BIT		(U(1) << 2)
+#define SCR_IRQ_BIT		(U(1) << 1)
+#define SCR_NS_BIT		(U(1) << 0)
+#define SCR_VALID_BIT_MASK	U(0x2f8f)
+#define SCR_RESET_VAL		SCR_RES1_BITS
+
+/* MDCR_EL3 definitions */
+#define MDCR_SPD32(x)		((x) << 14)
+#define MDCR_SPD32_LEGACY	U(0x0)
+#define MDCR_SPD32_DISABLE	U(0x2)
+#define MDCR_SPD32_ENABLE	U(0x3)
+#define MDCR_SDD_BIT		(U(1) << 16)
+#define MDCR_NSPB(x)		((x) << 12)
+#define MDCR_NSPB_EL1		U(0x3)
+#define MDCR_TDOSA_BIT		(U(1) << 10)
+#define MDCR_TDA_BIT		(U(1) << 9)
+#define MDCR_TPM_BIT		(U(1) << 6)
+#define MDCR_EL3_RESET_VAL	U(0x0)
+
+#if !ERROR_DEPRECATED
+#define MDCR_DEF_VAL		(MDCR_SDD_BIT | MDCR_SPD32(MDCR_SPD32_DISABLE))
+#endif
+
+/* MDCR_EL2 definitions */
+#define MDCR_EL2_TPMS		(U(1) << 14)
+#define MDCR_EL2_E2PB(x)	((x) << 12)
+#define MDCR_EL2_E2PB_EL1	U(0x3)
+#define MDCR_EL2_TDRA_BIT	(U(1) << 11)
+#define MDCR_EL2_TDOSA_BIT	(U(1) << 10)
+#define MDCR_EL2_TDA_BIT	(U(1) << 9)
+#define MDCR_EL2_TDE_BIT	(U(1) << 8)
+#define MDCR_EL2_HPME_BIT	(U(1) << 7)
+#define MDCR_EL2_TPM_BIT	(U(1) << 6)
+#define MDCR_EL2_TPMCR_BIT	(U(1) << 5)
+#define MDCR_EL2_RESET_VAL	U(0x0)
+
+/* HSTR_EL2 definitions */
+#define HSTR_EL2_RESET_VAL	U(0x0)
+#define HSTR_EL2_T_MASK		U(0xff)
+
+/* CNTHP_CTL_EL2 definitions */
+#define CNTHP_CTL_ENABLE_BIT	(U(1) << 0)
+#define CNTHP_CTL_RESET_VAL	U(0x0)
+
+/* VTTBR_EL2 definitions */
+#define VTTBR_RESET_VAL		ULL(0x0)
+#define VTTBR_VMID_MASK		ULL(0xff)
+#define VTTBR_VMID_SHIFT	U(48)
+#define VTTBR_BADDR_MASK	ULL(0xffffffffffff)
+#define VTTBR_BADDR_SHIFT	U(0)
 
 /* HCR definitions */
-#define HCR_RW_BIT		(1ull << 31)
-#define HCR_AMO_BIT		(1 << 5)
-#define HCR_IMO_BIT		(1 << 4)
-#define HCR_FMO_BIT		(1 << 3)
+#define HCR_RW_SHIFT		U(31)
+#define HCR_RW_BIT		(ULL(1) << HCR_RW_SHIFT)
+#define HCR_AMO_BIT		(U(1) << 5)
+#define HCR_IMO_BIT		(U(1) << 4)
+#define HCR_FMO_BIT		(U(1) << 3)
+
+/* ISR definitions */
+#define ISR_A_SHIFT		U(8)
+#define ISR_I_SHIFT		U(7)
+#define ISR_F_SHIFT		U(6)
 
 /* CNTHCTL_EL2 definitions */
-#define EVNTEN_BIT		(1 << 2)
-#define EL1PCEN_BIT		(1 << 1)
-#define EL1PCTEN_BIT		(1 << 0)
+#define CNTHCTL_RESET_VAL	U(0x0)
+#define EVNTEN_BIT		(U(1) << 2)
+#define EL1PCEN_BIT		(U(1) << 1)
+#define EL1PCTEN_BIT		(U(1) << 0)
 
 /* CNTKCTL_EL1 definitions */
-#define EL0PTEN_BIT		(1 << 9)
-#define EL0VTEN_BIT		(1 << 8)
-#define EL0PCTEN_BIT		(1 << 0)
-#define EL0VCTEN_BIT		(1 << 1)
-#define EVNTEN_BIT		(1 << 2)
-#define EVNTDIR_BIT		(1 << 3)
-#define EVNTI_SHIFT		4
-#define EVNTI_MASK		0xf
+#define EL0PTEN_BIT		(U(1) << 9)
+#define EL0VTEN_BIT		(U(1) << 8)
+#define EL0PCTEN_BIT		(U(1) << 0)
+#define EL0VCTEN_BIT		(U(1) << 1)
+#define EVNTEN_BIT		(U(1) << 2)
+#define EVNTDIR_BIT		(U(1) << 3)
+#define EVNTI_SHIFT		U(4)
+#define EVNTI_MASK		U(0xf)
 
 /* CPTR_EL3 definitions */
-#define TCPAC_BIT		(1 << 31)
-#define TTA_BIT			(1 << 20)
-#define TFP_BIT			(1 << 10)
+#define TCPAC_BIT		(U(1) << 31)
+#define TTA_BIT			(U(1) << 20)
+#define TFP_BIT			(U(1) << 10)
+#define CPTR_EL3_RESET_VAL	U(0x0)
+
+/* CPTR_EL2 definitions */
+#define CPTR_EL2_RES1		((U(1) << 13) | (U(1) << 12) | (U(0x3ff)))
+#define CPTR_EL2_TCPAC_BIT	(U(1) << 31)
+#define CPTR_EL2_TTA_BIT	(U(1) << 20)
+#define CPTR_EL2_TFP_BIT	(U(1) << 10)
+#define CPTR_EL2_RESET_VAL	CPTR_EL2_RES1
 
 /* CPSR/SPSR definitions */
-#define DAIF_FIQ_BIT		(1 << 0)
-#define DAIF_IRQ_BIT		(1 << 1)
-#define DAIF_ABT_BIT		(1 << 2)
-#define DAIF_DBG_BIT		(1 << 3)
-#define SPSR_DAIF_SHIFT		6
-#define SPSR_DAIF_MASK		0xf
+#define DAIF_FIQ_BIT		(U(1) << 0)
+#define DAIF_IRQ_BIT		(U(1) << 1)
+#define DAIF_ABT_BIT		(U(1) << 2)
+#define DAIF_DBG_BIT		(U(1) << 3)
+#define SPSR_DAIF_SHIFT		U(6)
+#define SPSR_DAIF_MASK		U(0xf)
 
-#define SPSR_AIF_SHIFT		6
-#define SPSR_AIF_MASK		0x7
+#define SPSR_AIF_SHIFT		U(6)
+#define SPSR_AIF_MASK		U(0x7)
 
-#define SPSR_E_SHIFT		9
-#define SPSR_E_MASK			0x1
-#define SPSR_E_LITTLE		0x0
-#define SPSR_E_BIG			0x1
+#define SPSR_E_SHIFT		U(9)
+#define SPSR_E_MASK		U(0x1)
+#define SPSR_E_LITTLE		U(0x0)
+#define SPSR_E_BIG		U(0x1)
 
-#define SPSR_T_SHIFT		5
-#define SPSR_T_MASK			0x1
-#define SPSR_T_ARM			0x0
-#define SPSR_T_THUMB		0x1
+#define SPSR_T_SHIFT		U(5)
+#define SPSR_T_MASK		U(0x1)
+#define SPSR_T_ARM		U(0x0)
+#define SPSR_T_THUMB		U(0x1)
 
 #define DISABLE_ALL_EXCEPTIONS \
 		(DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
 
+/*
+ * RMR_EL3 definitions
+ */
+#define RMR_EL3_RR_BIT		(U(1) << 1)
+#define RMR_EL3_AA64_BIT	(U(1) << 0)
+
+/*
+ * HI-VECTOR address for AArch32 state
+ */
+#define HI_VECTOR_BASE	U(0xFFFF0000)
 
 /*
  * TCR defintions
  */
-#define TCR_EL3_RES1		((1UL << 31) | (1UL << 23))
-#define TCR_EL1_IPS_SHIFT	32
-#define TCR_EL3_PS_SHIFT	16
+#define TCR_EL3_RES1		((U(1) << 31) | (U(1) << 23))
+#define TCR_EL1_IPS_SHIFT	U(32)
+#define TCR_EL3_PS_SHIFT	U(16)
+
+#define TCR_TxSZ_MIN		U(16)
+#define TCR_TxSZ_MAX		U(39)
 
 /* (internal) physical address size bits in EL3/EL1 */
-#define TCR_PS_BITS_4GB		(0x0)
-#define TCR_PS_BITS_64GB	(0x1)
-#define TCR_PS_BITS_1TB		(0x2)
-#define TCR_PS_BITS_4TB		(0x3)
-#define TCR_PS_BITS_16TB	(0x4)
-#define TCR_PS_BITS_256TB	(0x5)
+#define TCR_PS_BITS_4GB		U(0x0)
+#define TCR_PS_BITS_64GB	U(0x1)
+#define TCR_PS_BITS_1TB		U(0x2)
+#define TCR_PS_BITS_4TB		U(0x3)
+#define TCR_PS_BITS_16TB	U(0x4)
+#define TCR_PS_BITS_256TB	U(0x5)
 
-#define ADDR_MASK_48_TO_63	0xFFFF000000000000UL
-#define ADDR_MASK_44_TO_47	0x0000F00000000000UL
-#define ADDR_MASK_42_TO_43	0x00000C0000000000UL
-#define ADDR_MASK_40_TO_41	0x0000030000000000UL
-#define ADDR_MASK_36_TO_39	0x000000F000000000UL
-#define ADDR_MASK_32_TO_35	0x0000000F00000000UL
+#define ADDR_MASK_48_TO_63	ULL(0xFFFF000000000000)
+#define ADDR_MASK_44_TO_47	ULL(0x0000F00000000000)
+#define ADDR_MASK_42_TO_43	ULL(0x00000C0000000000)
+#define ADDR_MASK_40_TO_41	ULL(0x0000030000000000)
+#define ADDR_MASK_36_TO_39	ULL(0x000000F000000000)
+#define ADDR_MASK_32_TO_35	ULL(0x0000000F00000000)
 
-#define TCR_RGN_INNER_NC	(0x0 << 8)
-#define TCR_RGN_INNER_WBA	(0x1 << 8)
-#define TCR_RGN_INNER_WT	(0x2 << 8)
-#define TCR_RGN_INNER_WBNA	(0x3 << 8)
+#define TCR_RGN_INNER_NC	(U(0x0) << 8)
+#define TCR_RGN_INNER_WBA	(U(0x1) << 8)
+#define TCR_RGN_INNER_WT	(U(0x2) << 8)
+#define TCR_RGN_INNER_WBNA	(U(0x3) << 8)
 
-#define TCR_RGN_OUTER_NC	(0x0 << 10)
-#define TCR_RGN_OUTER_WBA	(0x1 << 10)
-#define TCR_RGN_OUTER_WT	(0x2 << 10)
-#define TCR_RGN_OUTER_WBNA	(0x3 << 10)
+#define TCR_RGN_OUTER_NC	(U(0x0) << 10)
+#define TCR_RGN_OUTER_WBA	(U(0x1) << 10)
+#define TCR_RGN_OUTER_WT	(U(0x2) << 10)
+#define TCR_RGN_OUTER_WBNA	(U(0x3) << 10)
 
-#define TCR_SH_NON_SHAREABLE	(0x0 << 12)
-#define TCR_SH_OUTER_SHAREABLE	(0x2 << 12)
-#define TCR_SH_INNER_SHAREABLE	(0x3 << 12)
+#define TCR_SH_NON_SHAREABLE	(U(0x0) << 12)
+#define TCR_SH_OUTER_SHAREABLE	(U(0x2) << 12)
+#define TCR_SH_INNER_SHAREABLE	(U(0x3) << 12)
 
-#define MODE_SP_SHIFT		0x0
-#define MODE_SP_MASK		0x1
-#define MODE_SP_EL0		0x0
-#define MODE_SP_ELX		0x1
+#define TCR_EPD1_BIT		(U(1) << 23)
 
-#define MODE_RW_SHIFT		0x4
-#define MODE_RW_MASK		0x1
-#define MODE_RW_64			0x0
-#define MODE_RW_32			0x1
+#define MODE_SP_SHIFT		U(0x0)
+#define MODE_SP_MASK		U(0x1)
+#define MODE_SP_EL0		U(0x0)
+#define MODE_SP_ELX		U(0x1)
 
-#define MODE_EL_SHIFT		0x2
-#define MODE_EL_MASK		0x3
-#define MODE_EL3		0x3
-#define MODE_EL2		0x2
-#define MODE_EL1		0x1
-#define MODE_EL0		0x0
+#define MODE_RW_SHIFT		U(0x4)
+#define MODE_RW_MASK		U(0x1)
+#define MODE_RW_64		U(0x0)
+#define MODE_RW_32		U(0x1)
 
-#define MODE32_SHIFT		0
-#define MODE32_MASK		0xf
-#define MODE32_usr		0x0
-#define MODE32_fiq		0x1
-#define MODE32_irq		0x2
-#define MODE32_svc		0x3
-#define MODE32_mon		0x6
-#define MODE32_abt		0x7
-#define MODE32_hyp		0xa
-#define MODE32_und		0xb
-#define MODE32_sys		0xf
+#define MODE_EL_SHIFT		U(0x2)
+#define MODE_EL_MASK		U(0x3)
+#define MODE_EL3		U(0x3)
+#define MODE_EL2		U(0x2)
+#define MODE_EL1		U(0x1)
+#define MODE_EL0		U(0x0)
+
+#define MODE32_SHIFT		U(0)
+#define MODE32_MASK		U(0xf)
+#define MODE32_usr		U(0x0)
+#define MODE32_fiq		U(0x1)
+#define MODE32_irq		U(0x2)
+#define MODE32_svc		U(0x3)
+#define MODE32_mon		U(0x6)
+#define MODE32_abt		U(0x7)
+#define MODE32_hyp		U(0xa)
+#define MODE32_und		U(0xb)
+#define MODE32_sys		U(0xf)
 
 #define GET_RW(mode)		(((mode) >> MODE_RW_SHIFT) & MODE_RW_MASK)
 #define GET_EL(mode)		(((mode) >> MODE_EL_SHIFT) & MODE_EL_MASK)
@@ -288,155 +393,173 @@
 	((daif) & SPSR_DAIF_MASK) << SPSR_DAIF_SHIFT)
 
 #define SPSR_MODE32(mode, isa, endian, aif)		\
-	(MODE_RW_32 << MODE_RW_SHIFT |			\
-	((mode) & MODE32_MASK) << MODE32_SHIFT |	\
-	((isa) & SPSR_T_MASK) << SPSR_T_SHIFT |		\
-	((endian) & SPSR_E_MASK) << SPSR_E_SHIFT |	\
-	((aif) & SPSR_AIF_MASK) << SPSR_AIF_SHIFT)
-
-
-/* Physical timer control register bit fields shifts and masks */
-#define CNTP_CTL_ENABLE_SHIFT   0
-#define CNTP_CTL_IMASK_SHIFT    1
-#define CNTP_CTL_ISTATUS_SHIFT  2
-
-#define CNTP_CTL_ENABLE_MASK    1
-#define CNTP_CTL_IMASK_MASK     1
-#define CNTP_CTL_ISTATUS_MASK   1
-
-#define get_cntp_ctl_enable(x)  ((x >> CNTP_CTL_ENABLE_SHIFT) & \
-					CNTP_CTL_ENABLE_MASK)
-#define get_cntp_ctl_imask(x)   ((x >> CNTP_CTL_IMASK_SHIFT) & \
-					CNTP_CTL_IMASK_MASK)
-#define get_cntp_ctl_istatus(x) ((x >> CNTP_CTL_ISTATUS_SHIFT) & \
-					CNTP_CTL_ISTATUS_MASK)
-
-#define set_cntp_ctl_enable(x)  (x |= 1 << CNTP_CTL_ENABLE_SHIFT)
-#define set_cntp_ctl_imask(x)   (x |= 1 << CNTP_CTL_IMASK_SHIFT)
-
-#define clr_cntp_ctl_enable(x)  (x &= ~(1 << CNTP_CTL_ENABLE_SHIFT))
-#define clr_cntp_ctl_imask(x)   (x &= ~(1 << CNTP_CTL_IMASK_SHIFT))
-
-/* Miscellaneous MMU related constants */
-#define NUM_2MB_IN_GB		(1 << 9)
-#define NUM_4K_IN_2MB		(1 << 9)
-#define NUM_GB_IN_4GB		(1 << 2)
-
-#define TWO_MB_SHIFT		21
-#define ONE_GB_SHIFT		30
-#define FOUR_KB_SHIFT		12
-
-#define ONE_GB_INDEX(x)		((x) >> ONE_GB_SHIFT)
-#define TWO_MB_INDEX(x)		((x) >> TWO_MB_SHIFT)
-#define FOUR_KB_INDEX(x)	((x) >> FOUR_KB_SHIFT)
-
-#define INVALID_DESC		0x0
-#define BLOCK_DESC		0x1
-#define TABLE_DESC		0x3
-
-#define FIRST_LEVEL_DESC_N	ONE_GB_SHIFT
-#define SECOND_LEVEL_DESC_N	TWO_MB_SHIFT
-#define THIRD_LEVEL_DESC_N	FOUR_KB_SHIFT
-
-#define LEVEL1			1
-#define LEVEL2			2
-#define LEVEL3			3
-
-#define XN			(1ull << 2)
-#define PXN			(1ull << 1)
-#define CONT_HINT		(1ull << 0)
-
-#define UPPER_ATTRS(x)		(x & 0x7) << 52
-#define NON_GLOBAL		(1 << 9)
-#define ACCESS_FLAG		(1 << 8)
-#define NSH			(0x0 << 6)
-#define OSH			(0x2 << 6)
-#define ISH			(0x3 << 6)
-
-#define PAGE_SIZE_SHIFT		FOUR_KB_SHIFT
-#define PAGE_SIZE		(1 << PAGE_SIZE_SHIFT)
-#define PAGE_SIZE_MASK		(PAGE_SIZE - 1)
-#define IS_PAGE_ALIGNED(addr)	(((addr) & PAGE_SIZE_MASK) == 0)
-
-#define XLAT_ENTRY_SIZE_SHIFT	3 /* Each MMU table entry is 8 bytes (1 << 3) */
-#define XLAT_ENTRY_SIZE		(1 << XLAT_ENTRY_SIZE_SHIFT)
-
-#define XLAT_TABLE_SIZE_SHIFT	PAGE_SIZE_SHIFT
-#define XLAT_TABLE_SIZE		(1 << XLAT_TABLE_SIZE_SHIFT)
-
-/* Values for number of entries in each MMU translation table */
-#define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
-#define XLAT_TABLE_ENTRIES	(1 << XLAT_TABLE_ENTRIES_SHIFT)
-#define XLAT_TABLE_ENTRIES_MASK	(XLAT_TABLE_ENTRIES - 1)
-
-/* Values to convert a memory address to an index into a translation table */
-#define L3_XLAT_ADDRESS_SHIFT	PAGE_SIZE_SHIFT
-#define L2_XLAT_ADDRESS_SHIFT	(L3_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
-#define L1_XLAT_ADDRESS_SHIFT	(L2_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
+	((MODE_RW_32 << MODE_RW_SHIFT) |		\
+	(((mode) & MODE32_MASK) << MODE32_SHIFT) |	\
+	(((isa) & SPSR_T_MASK) << SPSR_T_SHIFT) |	\
+	(((endian) & SPSR_E_MASK) << SPSR_E_SHIFT) |	\
+	(((aif) & SPSR_AIF_MASK) << SPSR_AIF_SHIFT))
 
 /*
- * AP[1] bit is ignored by hardware and is
- * treated as if it is One in EL2/EL3
+ * TTBR Definitions
  */
-#define AP_RO			(0x1 << 5)
-#define AP_RW			(0x0 << 5)
+#define TTBR_CNP_BIT		0x1
 
-#define NS				(0x1 << 3)
-#define ATTR_SO_INDEX			0x2
-#define ATTR_DEVICE_INDEX		0x1
-#define ATTR_IWBWA_OWBWA_NTR_INDEX	0x0
-#define LOWER_ATTRS(x)			(((x) & 0xfff) << 2)
-#define ATTR_SO				(0x0)
-#define ATTR_DEVICE			(0x4)
-#define ATTR_IWBWA_OWBWA_NTR		(0xff)
-#define MAIR_ATTR_SET(attr, index)	(attr << (index << 3))
+/*
+ * CTR_EL0 definitions
+ */
+#define CTR_CWG_SHIFT		U(24)
+#define CTR_CWG_MASK		U(0xf)
+#define CTR_ERG_SHIFT		U(20)
+#define CTR_ERG_MASK		U(0xf)
+#define CTR_DMINLINE_SHIFT	U(16)
+#define CTR_DMINLINE_MASK	U(0xf)
+#define CTR_L1IP_SHIFT		U(14)
+#define CTR_L1IP_MASK		U(0x3)
+#define CTR_IMINLINE_SHIFT	U(0)
+#define CTR_IMINLINE_MASK	U(0xf)
+
+#define MAX_CACHE_LINE_SIZE	U(0x800) /* 2KB */
+
+/* Physical timer control register bit fields shifts and masks */
+#define CNTP_CTL_ENABLE_SHIFT   U(0)
+#define CNTP_CTL_IMASK_SHIFT    U(1)
+#define CNTP_CTL_ISTATUS_SHIFT  U(2)
+
+#define CNTP_CTL_ENABLE_MASK    U(1)
+#define CNTP_CTL_IMASK_MASK     U(1)
+#define CNTP_CTL_ISTATUS_MASK   U(1)
+
+#define get_cntp_ctl_enable(x)  (((x) >> CNTP_CTL_ENABLE_SHIFT) & \
+					CNTP_CTL_ENABLE_MASK)
+#define get_cntp_ctl_imask(x)   (((x) >> CNTP_CTL_IMASK_SHIFT) & \
+					CNTP_CTL_IMASK_MASK)
+#define get_cntp_ctl_istatus(x) (((x) >> CNTP_CTL_ISTATUS_SHIFT) & \
+					CNTP_CTL_ISTATUS_MASK)
+
+#define set_cntp_ctl_enable(x)  ((x) |= (U(1) << CNTP_CTL_ENABLE_SHIFT))
+#define set_cntp_ctl_imask(x)   ((x) |= (U(1) << CNTP_CTL_IMASK_SHIFT))
+
+#define clr_cntp_ctl_enable(x)  ((x) &= ~(U(1) << CNTP_CTL_ENABLE_SHIFT))
+#define clr_cntp_ctl_imask(x)   ((x) &= ~(U(1) << CNTP_CTL_IMASK_SHIFT))
 
 /* Exception Syndrome register bits and bobs */
-#define ESR_EC_SHIFT			26
-#define ESR_EC_MASK			0x3f
-#define ESR_EC_LENGTH			6
-#define EC_UNKNOWN			0x0
-#define EC_WFE_WFI			0x1
-#define EC_AARCH32_CP15_MRC_MCR		0x3
-#define EC_AARCH32_CP15_MRRC_MCRR	0x4
-#define EC_AARCH32_CP14_MRC_MCR		0x5
-#define EC_AARCH32_CP14_LDC_STC		0x6
-#define EC_FP_SIMD			0x7
-#define EC_AARCH32_CP10_MRC		0x8
-#define EC_AARCH32_CP14_MRRC_MCRR	0xc
-#define EC_ILLEGAL			0xe
-#define EC_AARCH32_SVC			0x11
-#define EC_AARCH32_HVC			0x12
-#define EC_AARCH32_SMC			0x13
-#define EC_AARCH64_SVC			0x15
-#define EC_AARCH64_HVC			0x16
-#define EC_AARCH64_SMC			0x17
-#define EC_AARCH64_SYS			0x18
-#define EC_IABORT_LOWER_EL		0x20
-#define EC_IABORT_CUR_EL		0x21
-#define EC_PC_ALIGN			0x22
-#define EC_DABORT_LOWER_EL		0x24
-#define EC_DABORT_CUR_EL		0x25
-#define EC_SP_ALIGN			0x26
-#define EC_AARCH32_FP			0x28
-#define EC_AARCH64_FP			0x2c
-#define EC_SERROR			0x2f
+#define ESR_EC_SHIFT			U(26)
+#define ESR_EC_MASK			U(0x3f)
+#define ESR_EC_LENGTH			U(6)
+#define EC_UNKNOWN			U(0x0)
+#define EC_WFE_WFI			U(0x1)
+#define EC_AARCH32_CP15_MRC_MCR		U(0x3)
+#define EC_AARCH32_CP15_MRRC_MCRR	U(0x4)
+#define EC_AARCH32_CP14_MRC_MCR		U(0x5)
+#define EC_AARCH32_CP14_LDC_STC		U(0x6)
+#define EC_FP_SIMD			U(0x7)
+#define EC_AARCH32_CP10_MRC		U(0x8)
+#define EC_AARCH32_CP14_MRRC_MCRR	U(0xc)
+#define EC_ILLEGAL			U(0xe)
+#define EC_AARCH32_SVC			U(0x11)
+#define EC_AARCH32_HVC			U(0x12)
+#define EC_AARCH32_SMC			U(0x13)
+#define EC_AARCH64_SVC			U(0x15)
+#define EC_AARCH64_HVC			U(0x16)
+#define EC_AARCH64_SMC			U(0x17)
+#define EC_AARCH64_SYS			U(0x18)
+#define EC_IABORT_LOWER_EL		U(0x20)
+#define EC_IABORT_CUR_EL		U(0x21)
+#define EC_PC_ALIGN			U(0x22)
+#define EC_DABORT_LOWER_EL		U(0x24)
+#define EC_DABORT_CUR_EL		U(0x25)
+#define EC_SP_ALIGN			U(0x26)
+#define EC_AARCH32_FP			U(0x28)
+#define EC_AARCH64_FP			U(0x2c)
+#define EC_SERROR			U(0x2f)
 
-#define EC_BITS(x)			(x >> ESR_EC_SHIFT) & ESR_EC_MASK
+#define EC_BITS(x)			(((x) >> ESR_EC_SHIFT) & ESR_EC_MASK)
+
+/* Reset bit inside the Reset management register for EL3 (RMR_EL3) */
+#define RMR_RESET_REQUEST_SHIFT 	U(0x1)
+#define RMR_WARM_RESET_CPU		(U(1) << RMR_RESET_REQUEST_SHIFT)
+
+/*******************************************************************************
+ * Definitions of register offsets, fields and macros for CPU system
+ * instructions.
+ ******************************************************************************/
+
+#define TLBI_ADDR_SHIFT		U(12)
+#define TLBI_ADDR_MASK		ULL(0x00000FFFFFFFFFFF)
+#define TLBI_ADDR(x)		(((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
 
 /*******************************************************************************
  * Definitions of register offsets and fields in the CNTCTLBase Frame of the
  * system level implementation of the Generic Timer.
  ******************************************************************************/
-#define CNTNSAR			0x4
-#define CNTNSAR_NS_SHIFT(x)	x
+#define CNTNSAR			U(0x4)
+#define CNTNSAR_NS_SHIFT(x)	(x)
 
-#define CNTACR_BASE(x)		(0x40 + (x << 2))
-#define CNTACR_RPCT_SHIFT	0x0
-#define CNTACR_RVCT_SHIFT	0x1
-#define CNTACR_RFRQ_SHIFT	0x2
-#define CNTACR_RVOFF_SHIFT	0x3
-#define CNTACR_RWVT_SHIFT	0x4
-#define CNTACR_RWPT_SHIFT	0x5
+#define CNTACR_BASE(x)		(U(0x40) + ((x) << 2))
+#define CNTACR_RPCT_SHIFT	U(0x0)
+#define CNTACR_RVCT_SHIFT	U(0x1)
+#define CNTACR_RFRQ_SHIFT	U(0x2)
+#define CNTACR_RVOFF_SHIFT	U(0x3)
+#define CNTACR_RWVT_SHIFT	U(0x4)
+#define CNTACR_RWPT_SHIFT	U(0x5)
+
+/* PMCR_EL0 definitions */
+#define PMCR_EL0_RESET_VAL	U(0x0)
+#define PMCR_EL0_N_SHIFT	U(11)
+#define PMCR_EL0_N_MASK		U(0x1f)
+#define PMCR_EL0_N_BITS		(PMCR_EL0_N_MASK << PMCR_EL0_N_SHIFT)
+#define PMCR_EL0_LC_BIT		(U(1) << 6)
+#define PMCR_EL0_DP_BIT		(U(1) << 5)
+#define PMCR_EL0_X_BIT		(U(1) << 4)
+#define PMCR_EL0_D_BIT		(U(1) << 3)
+
+/*******************************************************************************
+ * Definitions of MAIR encodings for device and normal memory
+ ******************************************************************************/
+/*
+ * MAIR encodings for device memory attributes.
+ */
+#define MAIR_DEV_nGnRnE		ULL(0x0)
+#define MAIR_DEV_nGnRE		ULL(0x4)
+#define MAIR_DEV_nGRE		ULL(0x8)
+#define MAIR_DEV_GRE		ULL(0xc)
+
+/*
+ * MAIR encodings for normal memory attributes.
+ *
+ * Cache Policy
+ *  WT:	 Write Through
+ *  WB:	 Write Back
+ *  NC:	 Non-Cacheable
+ *
+ * Transient Hint
+ *  NTR: Non-Transient
+ *  TR:	 Transient
+ *
+ * Allocation Policy
+ *  RA:	 Read Allocate
+ *  WA:	 Write Allocate
+ *  RWA: Read and Write Allocate
+ *  NA:	 No Allocation
+ */
+#define MAIR_NORM_WT_TR_WA	ULL(0x1)
+#define MAIR_NORM_WT_TR_RA	ULL(0x2)
+#define MAIR_NORM_WT_TR_RWA	ULL(0x3)
+#define MAIR_NORM_NC		ULL(0x4)
+#define MAIR_NORM_WB_TR_WA	ULL(0x5)
+#define MAIR_NORM_WB_TR_RA	ULL(0x6)
+#define MAIR_NORM_WB_TR_RWA	ULL(0x7)
+#define MAIR_NORM_WT_NTR_NA	ULL(0x8)
+#define MAIR_NORM_WT_NTR_WA	ULL(0x9)
+#define MAIR_NORM_WT_NTR_RA	ULL(0xa)
+#define MAIR_NORM_WT_NTR_RWA	ULL(0xb)
+#define MAIR_NORM_WB_NTR_NA	ULL(0xc)
+#define MAIR_NORM_WB_NTR_WA	ULL(0xd)
+#define MAIR_NORM_WB_NTR_RA	ULL(0xe)
+#define MAIR_NORM_WB_NTR_RWA	ULL(0xf)
+
+#define MAIR_NORM_OUTER_SHIFT	4
+
+#define MAKE_MAIR_NORMAL_MEMORY(inner, outer)	((inner) | ((outer) << MAIR_NORM_OUTER_SHIFT))
 
 #endif /* __ARCH_H__ */
diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h
index 65941e6..9c022ab 100644
--- a/include/lib/aarch64/arch_helpers.h
+++ b/include/lib/aarch64/arch_helpers.h
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __ARCH_HELPERS_H__
@@ -34,6 +10,7 @@
 #include <arch.h>	/* for additional register definitions */
 #include <cdefs.h>	/* For __dead2 */
 #include <stdint.h>
+#include <sys/types.h>
 
 /**********************************************************************
  * Macros which create inline functions to read or write CPU system
@@ -54,11 +31,8 @@
 	__asm__ volatile ("msr " #_reg_name ", %0" : : "r" (v));	\
 }
 
-#define _DEFINE_SYSREG_WRITE_CONST_FUNC(_name, _reg_name)		\
-static inline void write_ ## _name(const uint64_t v)			\
-{									\
-	__asm__ volatile ("msr " #_reg_name ", %0" : : "i" (v));	\
-}
+#define SYSREG_WRITE_CONST(reg_name, v)				\
+	__asm__ volatile ("msr " #reg_name ", %0" : : "i" (v))
 
 /* Define read function for system register */
 #define DEFINE_SYSREG_READ_FUNC(_name) 			\
@@ -74,10 +48,13 @@
 	_DEFINE_SYSREG_READ_FUNC(_name, _reg_name)	\
 	_DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)
 
-/* Define write function for special system registers */
-#define DEFINE_SYSREG_WRITE_CONST_FUNC(_name)		\
-	_DEFINE_SYSREG_WRITE_CONST_FUNC(_name, _name)
+/* Define read function for renamed system register */
+#define DEFINE_RENAME_SYSREG_READ_FUNC(_name, _reg_name)	\
+	_DEFINE_SYSREG_READ_FUNC(_name, _reg_name)
 
+/* Define write function for renamed system register */
+#define DEFINE_RENAME_SYSREG_WRITE_FUNC(_name, _reg_name)	\
+	_DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)
 
 /**********************************************************************
  * Macros to create inline functions for system instructions
@@ -105,25 +82,60 @@
 }
 
 /*******************************************************************************
- * Aarch64 translation tables manipulation helper prototypes
-******************************************************************************/
-uint64_t create_table_desc(uint64_t *next_table_ptr);
-uint64_t create_block_desc(uint64_t desc, uint64_t addr, uint32_t level);
-uint64_t create_device_block(uint64_t output_addr, uint32_t level, uint32_t ns);
-uint64_t create_romem_block(uint64_t output_addr, uint32_t level, uint32_t ns);
-uint64_t create_rwmem_block(uint64_t output_addr, uint32_t level, uint32_t ns);
-
-/*******************************************************************************
  * TLB maintenance accessor prototypes
  ******************************************************************************/
+
+#if ERRATA_A57_813419
+/*
+ * Define function for TLBI instruction with type specifier that implements
+ * the workaround for errata 813419 of Cortex-A57.
+ */
+#define DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_FUNC(_type)\
+static inline void tlbi ## _type(void)			\
+{							\
+	__asm__("tlbi " #_type "\n"			\
+		"dsb ish\n"				\
+		"tlbi " #_type);			\
+}
+
+/*
+ * Define function for TLBI instruction with register parameter that implements
+ * the workaround for errata 813419 of Cortex-A57.
+ */
+#define DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_PARAM_FUNC(_type)	\
+static inline void tlbi ## _type(uint64_t v)			\
+{								\
+	__asm__("tlbi " #_type ", %0\n"				\
+		"dsb ish\n"					\
+		"tlbi " #_type ", %0" : : "r" (v));		\
+}
+#endif /* ERRATA_A57_813419 */
+
 DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1)
 DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1is)
 DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2)
 DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2is)
+#if ERRATA_A57_813419
+DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_FUNC(alle3)
+DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_FUNC(alle3is)
+#else
 DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3)
 DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3is)
+#endif
 DEFINE_SYSOP_TYPE_FUNC(tlbi, vmalle1)
 
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vaae1is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vaale1is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vae2is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vale2is)
+#if ERRATA_A57_813419
+DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_PARAM_FUNC(vae3is)
+DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_PARAM_FUNC(vale3is)
+#else
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vae3is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vale3is)
+#endif
+
 /*******************************************************************************
  * Cache maintenance accessor prototypes
  ******************************************************************************/
@@ -136,32 +148,37 @@
 DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cvau)
 DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, zva)
 
-void flush_dcache_range(uint64_t, uint64_t);
-void inv_dcache_range(uint64_t, uint64_t);
-void dcsw_op_louis(uint32_t);
-void dcsw_op_all(uint32_t);
+/*******************************************************************************
+ * Address translation accessor prototypes
+ ******************************************************************************/
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e1r)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e1w)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e0r)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e0w)
 
+void flush_dcache_range(uintptr_t addr, size_t size);
+void clean_dcache_range(uintptr_t addr, size_t size);
+void inv_dcache_range(uintptr_t addr, size_t size);
+
+void dcsw_op_louis(u_register_t op_type);
+void dcsw_op_all(u_register_t op_type);
+
+void disable_mmu_el1(void);
 void disable_mmu_el3(void);
+void disable_mmu_icache_el1(void);
 void disable_mmu_icache_el3(void);
 
 /*******************************************************************************
  * Misc. accessor prototypes
  ******************************************************************************/
 
-DEFINE_SYSREG_WRITE_CONST_FUNC(daifset)
-DEFINE_SYSREG_WRITE_CONST_FUNC(daifclr)
+#define write_daifclr(val) SYSREG_WRITE_CONST(daifclr, val)
+#define write_daifset(val) SYSREG_WRITE_CONST(daifset, val)
 
-#define enable_irq()			write_daifclr(DAIF_IRQ_BIT)
-#define enable_fiq()			write_daifclr(DAIF_FIQ_BIT)
-#define enable_serror()			write_daifclr(DAIF_ABT_BIT)
-#define enable_debug_exceptions()	write_daifclr(DAIF_DBG_BIT)
-#define disable_irq()			write_daifset(DAIF_IRQ_BIT)
-#define disable_fiq()			write_daifset(DAIF_FIQ_BIT)
-#define disable_serror()		write_daifset(DAIF_ABT_BIT)
-#define disable_debug_exceptions()	write_daifset(DAIF_DBG_BIT)
-
+DEFINE_SYSREG_READ_FUNC(par_el1)
 DEFINE_SYSREG_READ_FUNC(id_pfr1_el1)
 DEFINE_SYSREG_READ_FUNC(id_aa64pfr0_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64dfr0_el1)
 DEFINE_SYSREG_READ_FUNC(CurrentEl)
 DEFINE_SYSREG_RW_FUNCS(daif)
 DEFINE_SYSREG_RW_FUNCS(spsr_el1)
@@ -176,8 +193,12 @@
 DEFINE_SYSOP_FUNC(sev)
 DEFINE_SYSOP_TYPE_FUNC(dsb, sy)
 DEFINE_SYSOP_TYPE_FUNC(dmb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dmb, st)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ld)
 DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dsb, ishst)
 DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ishst)
 DEFINE_SYSOP_FUNC(isb)
 
 uint32_t get_afflvl_shift(uint32_t);
@@ -194,6 +215,7 @@
  ******************************************************************************/
 DEFINE_SYSREG_READ_FUNC(midr_el1)
 DEFINE_SYSREG_READ_FUNC(mpidr_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64mmfr0_el1)
 
 DEFINE_SYSREG_RW_FUNCS(scr_el3)
 DEFINE_SYSREG_RW_FUNCS(hcr_el2)
@@ -252,6 +274,8 @@
 
 DEFINE_SYSREG_RW_FUNCS(ttbr1_el1)
 
+DEFINE_SYSREG_RW_FUNCS(vttbr_el2)
+
 DEFINE_SYSREG_RW_FUNCS(cptr_el2)
 DEFINE_SYSREG_RW_FUNCS(cptr_el3)
 
@@ -269,15 +293,31 @@
 
 DEFINE_SYSREG_RW_FUNCS(vpidr_el2)
 DEFINE_SYSREG_RW_FUNCS(vmpidr_el2)
+DEFINE_SYSREG_RW_FUNCS(cntp_ctl_el0)
 
 DEFINE_SYSREG_READ_FUNC(isr_el1)
 
-/* GICv3 System Registers */
+DEFINE_SYSREG_READ_FUNC(ctr_el0)
+
+DEFINE_SYSREG_RW_FUNCS(mdcr_el2)
+DEFINE_SYSREG_RW_FUNCS(hstr_el2)
+DEFINE_SYSREG_RW_FUNCS(cnthp_ctl_el2)
+DEFINE_SYSREG_RW_FUNCS(pmcr_el0)
 
 DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el1, ICC_SRE_EL1)
 DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el2, ICC_SRE_EL2)
 DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el3, ICC_SRE_EL3)
 DEFINE_RENAME_SYSREG_RW_FUNCS(icc_pmr_el1, ICC_PMR_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_rpr_el1, ICC_RPR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_igrpen1_el3, ICC_IGRPEN1_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_igrpen0_el1, ICC_IGRPEN0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_hppir0_el1, ICC_HPPIR0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_hppir1_el1, ICC_HPPIR1_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_iar0_el1, ICC_IAR0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_iar1_el1, ICC_IAR1_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir0_el1, ICC_EOIR0_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir1_el1, ICC_EOIR1_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_sgi0r_el1, ICC_SGI0R_EL1)
 
 
 #define IS_IN_EL(x) \
@@ -286,6 +326,14 @@
 #define IS_IN_EL1() IS_IN_EL(1)
 #define IS_IN_EL3() IS_IN_EL(3)
 
+/*
+ * Check if an EL is implemented from AA64PFR0 register fields. 'el' argument
+ * must be one of 1, 2 or 3.
+ */
+#define EL_IMPLEMENTED(el) \
+	((read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL##el##_SHIFT) \
+		& ID_AA64PFR0_ELX_MASK)
+
 /* Previously defined accesor functions with incomplete register names  */
 
 #define read_current_el()	read_CurrentEl()
diff --git a/include/lib/aarch64/smcc_helpers.h b/include/lib/aarch64/smcc_helpers.h
new file mode 100644
index 0000000..62294d0
--- /dev/null
+++ b/include/lib/aarch64/smcc_helpers.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SMCC_HELPERS_H__
+#define __SMCC_HELPERS_H__
+
+#include <smcc.h>
+
+#ifndef __ASSEMBLY__
+#include <context.h>
+
+/* Convenience macros to return from SMC handler */
+#define SMC_RET0(_h)	{					\
+	return (uint64_t) (_h);					\
+}
+#define SMC_RET1(_h, _x0)	{				\
+	write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X0), (_x0));	\
+	SMC_RET0(_h);						\
+}
+#define SMC_RET2(_h, _x0, _x1)	{				\
+	write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X1), (_x1));	\
+	SMC_RET1(_h, (_x0));					\
+}
+#define SMC_RET3(_h, _x0, _x1, _x2)	{			\
+	write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X2), (_x2));	\
+	SMC_RET2(_h, (_x0), (_x1));				\
+}
+#define SMC_RET4(_h, _x0, _x1, _x2, _x3)	{		\
+	write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X3), (_x3));	\
+	SMC_RET3(_h, (_x0), (_x1), (_x2));			\
+}
+#define SMC_RET5(_h, _x0, _x1, _x2, _x3, _x4)	{		\
+	write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X4), (_x4));	\
+	SMC_RET4(_h, (_x0), (_x1), (_x2), (_x3));		\
+}
+#define SMC_RET6(_h, _x0, _x1, _x2, _x3, _x4, _x5)	{	\
+	write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X5), (_x5));	\
+	SMC_RET5(_h, (_x0), (_x1), (_x2), (_x3), (_x4));	\
+}
+#define SMC_RET7(_h, _x0, _x1, _x2, _x3, _x4, _x5, _x6)	{	\
+	write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X6), (_x6));	\
+	SMC_RET6(_h, (_x0), (_x1), (_x2), (_x3), (_x4), (_x5));	\
+}
+#define SMC_RET8(_h, _x0, _x1, _x2, _x3, _x4, _x5, _x6, _x7) {	\
+	write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X7), (_x7));	\
+	SMC_RET7(_h, (_x0), (_x1), (_x2), (_x3), (_x4), (_x5), (_x6));	\
+}
+
+/*
+ * Convenience macros to access general purpose registers using handle provided
+ * to SMC handler. These take the offset values defined in context.h
+ */
+#define SMC_GET_GP(_h, _g)					\
+	read_ctx_reg((get_gpregs_ctx(_h)), (_g))
+#define SMC_SET_GP(_h, _g, _v)					\
+	write_ctx_reg((get_gpregs_ctx(_h)), (_g), (_v))
+
+/*
+ * Convenience macros to access EL3 context registers using handle provided to
+ * SMC handler. These take the offset values defined in context.h
+ */
+#define SMC_GET_EL3(_h, _e)					\
+	read_ctx_reg((get_el3state_ctx(_h)), (_e))
+#define SMC_SET_EL3(_h, _e, _v)					\
+	write_ctx_reg((get_el3state_ctx(_h)), (_e), (_v))
+
+/* Return a UUID in the SMC return registers */
+#define SMC_UUID_RET(_h, _uuid)					\
+	SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0],	\
+			 ((const uint32_t *) &(_uuid))[1],	\
+			 ((const uint32_t *) &(_uuid))[2],	\
+			 ((const uint32_t *) &(_uuid))[3])
+
+/*
+ * Helper macro to retrieve the SMC parameters from cpu_context_t.
+ */
+#define get_smc_params_from_ctx(_hdl, _x1, _x2, _x3, _x4)	\
+	do {							\
+		const gp_regs_t *regs = get_gpregs_ctx(_hdl);	\
+		_x1 = read_ctx_reg(regs, CTX_GPREG_X1);		\
+		_x2 = read_ctx_reg(regs, CTX_GPREG_X2);		\
+		_x3 = read_ctx_reg(regs, CTX_GPREG_X3);		\
+		_x4 = read_ctx_reg(regs, CTX_GPREG_X4);		\
+	} while (0)
+
+#endif /*__ASSEMBLY__*/
+#endif /* __SMCC_HELPERS_H__ */
diff --git a/include/lib/aarch64/xlat_tables.h b/include/lib/aarch64/xlat_tables.h
deleted file mode 100644
index 0b5dbdf..0000000
--- a/include/lib/aarch64/xlat_tables.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __XLAT_TABLES_H__
-#define __XLAT_TABLES_H__
-
-
-/*
- * Flags to override default values used to program system registers while
- * enabling the MMU.
- */
-#define DISABLE_DCACHE		(1 << 0)
-
-#ifndef __ASSEMBLY__
-#include <stdint.h>
-
-/* Helper macro to define entries for mmap_region_t. It creates
- * identity mappings for each region.
- */
-#define MAP_REGION_FLAT(adr, sz, attr) MAP_REGION(adr, adr, sz, attr)
-
-/* Helper macro to define entries for mmap_region_t. It allows to
- * re-map address mappings from 'pa' to 'va' for each region.
- */
-#define MAP_REGION(pa, va, sz, attr) {(pa), (va), (sz), (attr)}
-
-/*
- * Flags for building up memory mapping attributes.
- * These are organised so that a clear bit gives a more restrictive  mapping
- * that a set bit, that way a bitwise-and two sets of attributes will never give
- * an attribute which has greater access rights that any of the original
- * attributes.
- */
-typedef enum  {
-	MT_DEVICE	= 0 << 0,
-	MT_MEMORY	= 1 << 0,
-
-	MT_RO		= 0 << 1,
-	MT_RW		= 1 << 1,
-
-	MT_SECURE	= 0 << 2,
-	MT_NS		= 1 << 2
-} mmap_attr_t;
-
-/*
- * Structure for specifying a single region of memory.
- */
-typedef struct mmap_region {
-	unsigned long	base_pa;
-	unsigned long	base_va;
-	unsigned long	size;
-	mmap_attr_t	attr;
-} mmap_region_t;
-
-void mmap_add_region(unsigned long base_pa, unsigned long base_va,
-				unsigned long size, unsigned attr);
-void mmap_add(const mmap_region_t *mm);
-
-void init_xlat_tables(void);
-
-void enable_mmu_el1(uint32_t flags);
-void enable_mmu_el3(uint32_t flags);
-
-#endif /*__ASSEMBLY__*/
-#endif /* __XLAT_TABLES_H__ */
diff --git a/include/lib/bakery_lock.h b/include/lib/bakery_lock.h
index 9736f85..d4645c3 100644
--- a/include/lib/bakery_lock.h
+++ b/include/lib/bakery_lock.h
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __BAKERY_LOCK_H__
@@ -38,22 +14,49 @@
 #ifndef __ASSEMBLY__
 #include <stdint.h>
 
+/*****************************************************************************
+ * Internal helper macros used by the bakery lock implementation.
+ ****************************************************************************/
+/* Convert a ticket to priority */
+#define PRIORITY(t, pos)	(((t) << 8) | (pos))
+
+#define CHOOSING_TICKET		0x1
+#define CHOSEN_TICKET		0x0
+
+#define bakery_is_choosing(info)	(info & 0x1)
+#define bakery_ticket_number(info)	((info >> 1) & 0x7FFF)
+#define make_bakery_data(choosing, number) \
+		(((choosing & 0x1) | (number << 1)) & 0xFFFF)
+
+/*****************************************************************************
+ * External bakery lock interface.
+ ****************************************************************************/
 #if USE_COHERENT_MEM
+/*
+ * Bakery locks are stored in coherent memory
+ *
+ * Each lock's data is contiguous and fully allocated by the compiler
+ */
 
 typedef struct bakery_lock {
-	int owner;
-	volatile char entering[BAKERY_LOCK_MAX_CPUS];
-	volatile unsigned number[BAKERY_LOCK_MAX_CPUS];
+	/*
+	 * The lock_data is a bit-field of 2 members:
+	 * Bit[0]       : choosing. This field is set when the CPU is
+	 *                choosing its bakery number.
+	 * Bits[1 - 15] : number. This is the bakery number allocated.
+	 */
+	volatile uint16_t lock_data[BAKERY_LOCK_MAX_CPUS];
 } bakery_lock_t;
 
-#define NO_OWNER (-1)
-
-void bakery_lock_init(bakery_lock_t *bakery);
-void bakery_lock_get(bakery_lock_t *bakery);
-void bakery_lock_release(bakery_lock_t *bakery);
-int bakery_lock_try(bakery_lock_t *bakery);
-
 #else
+/*
+ * Bakery locks are stored in normal .bss memory
+ *
+ * Each lock's data is spread across multiple cache lines, one per CPU,
+ * but multiple locks can share the same cache line.
+ * The compiler will allocate enough memory for one CPU's bakery locks,
+ * the remaining cache lines are allocated by the linker script
+ */
 
 typedef struct bakery_info {
 	/*
@@ -65,9 +68,18 @@
 	volatile uint16_t lock_data;
 } bakery_info_t;
 
-void bakery_lock_get(unsigned int id, unsigned int offset);
-void bakery_lock_release(unsigned int id, unsigned int offset);
+typedef bakery_info_t bakery_lock_t;
 
 #endif /* __USE_COHERENT_MEM__ */
+
+static inline void bakery_lock_init(bakery_lock_t *bakery) {}
+void bakery_lock_get(bakery_lock_t *bakery);
+void bakery_lock_release(bakery_lock_t *bakery);
+
+#define DEFINE_BAKERY_LOCK(_name) bakery_lock_t _name __section("bakery_lock")
+
+#define DECLARE_BAKERY_LOCK(_name) extern bakery_lock_t _name
+
+
 #endif /* __ASSEMBLY__ */
 #endif /* __BAKERY_LOCK_H__ */
diff --git a/include/lib/cassert.h b/include/lib/cassert.h
index 0e5529d..40cb483 100644
--- a/include/lib/cassert.h
+++ b/include/lib/cassert.h
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __CASSERT_H__
@@ -34,9 +10,12 @@
 /*******************************************************************************
  * Macro to flag a compile time assertion. It uses the preprocessor to generate
  * an invalid C construct if 'cond' evaluates to false.
- * The following  compilation error is triggered if the assertion fails:
+ * The following compilation error is triggered if the assertion fails:
  * "error: size of array 'msg' is negative"
+ * The 'unused' attribute ensures that the unused typedef does not emit a
+ * compiler warning.
  ******************************************************************************/
-#define CASSERT(cond, msg)	typedef char msg[(cond) ? 1 : -1]
+#define CASSERT(cond, msg)	\
+	typedef char msg[(cond) ? 1 : -1] __unused
 
 #endif /* __CASSERT_H__ */
diff --git a/include/lib/cpus/aarch32/aem_generic.h b/include/lib/cpus/aarch32/aem_generic.h
new file mode 100644
index 0000000..f5476df
--- /dev/null
+++ b/include/lib/cpus/aarch32/aem_generic.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __AEM_GENERIC_H__
+#define __AEM_GENERIC_H__
+
+/* BASE AEM midr for revision 0 */
+#define BASE_AEM_MIDR 0x410FD0F0
+
+#endif /* __AEM_GENERIC_H__ */
diff --git a/include/lib/cpus/aarch32/cortex_a32.h b/include/lib/cpus/aarch32/cortex_a32.h
new file mode 100644
index 0000000..4d6826a
--- /dev/null
+++ b/include/lib/cpus/aarch32/cortex_a32.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CORTEX_A32_H__
+#define __CORTEX_A32_H__
+
+/* Cortex-A32 Main ID register for revision 0 */
+#define CORTEX_A32_MIDR				0x410FD010
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ * CPUECTLR_EL1 is an implementation-specific register.
+ ******************************************************************************/
+#define CORTEX_A32_CPUECTLR_EL1			p15, 1, c15
+#define CORTEX_A32_CPUECTLR_SMPEN_BIT		(1 << 6)
+
+#endif /* __CORTEX_A32_H__ */
diff --git a/include/lib/cpus/aarch32/cortex_a53.h b/include/lib/cpus/aarch32/cortex_a53.h
new file mode 100644
index 0000000..8e86df4
--- /dev/null
+++ b/include/lib/cpus/aarch32/cortex_a53.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CORTEX_A53_H__
+#define __CORTEX_A53_H__
+
+/* Cortex-A53 midr for revision 0 */
+#define CORTEX_A53_MIDR 0x410FD030
+
+/* Retention timer tick definitions */
+#define RETENTION_ENTRY_TICKS_2		0x1
+#define RETENTION_ENTRY_TICKS_8		0x2
+#define RETENTION_ENTRY_TICKS_32	0x3
+#define RETENTION_ENTRY_TICKS_64	0x4
+#define RETENTION_ENTRY_TICKS_128	0x5
+#define RETENTION_ENTRY_TICKS_256	0x6
+#define RETENTION_ENTRY_TICKS_512	0x7
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_ECTLR			p15, 1, c15
+
+#define CORTEX_A53_ECTLR_SMP_BIT		(1 << 6)
+
+#define CORTEX_A53_ECTLR_CPU_RET_CTRL_SHIFT	0
+#define CORTEX_A53_ECTLR_CPU_RET_CTRL_MASK	(0x7 << CORTEX_A53_ECTLR_CPU_RET_CTRL_SHIFT)
+
+#define CORTEX_A53_ECTLR_FPU_RET_CTRL_SHIFT	3
+#define CORTEX_A53_ECTLR_FPU_RET_CTRL_MASK	(0x7 << CORTEX_A53_ECTLR_FPU_RET_CTRL_SHIFT)
+
+/*******************************************************************************
+ * CPU Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_MERRSR			p15, 2, c15
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_CPUACTLR			p15, 0, c15
+
+#define CORTEX_A53_CPUACTLR_ENDCCASCI_SHIFT	44
+#define CORTEX_A53_CPUACTLR_ENDCCASCI		(1 << CORTEX_A53_CPUACTLR_ENDCCASCI_SHIFT)
+#define CORTEX_A53_CPUACTLR_DTAH		(1 << 24)
+
+/*******************************************************************************
+ * L2 Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_L2ACTLR			p15, 1, c15, c0, 0
+
+#define CORTEX_A53_L2ACTLR_ENABLE_UNIQUECLEAN	(1 << 14)
+#define CORTEX_A53_L2ACTLR_DISABLE_CLEAN_PUSH	(1 << 3)
+
+/*******************************************************************************
+ * L2 Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_L2ECTLR			p15, 1, c9, c0, 3
+
+#define CORTEX_A53_L2ECTLR_RET_CTRL_SHIFT	0
+#define CORTEX_A53_L2ECTLR_RET_CTRL_MASK	(0x7 << L2ECTLR_RET_CTRL_SHIFT)
+
+/*******************************************************************************
+ * L2 Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_L2MERRSR			p15, 3, c15
+
+#if !ERROR_DEPRECATED
+/*
+ * These registers were previously wrongly named. Provide previous definitions so
+ * as not to break platforms that continue using them.
+ */
+#define CORTEX_A53_ACTLR			CORTEX_A53_CPUACTLR
+
+#define CORTEX_A53_ACTLR_ENDCCASCI_SHIFT	CORTEX_A53_CPUACTLR_ENDCCASCI_SHIFT
+#define CORTEX_A53_ACTLR_ENDCCASCI		CORTEX_A53_CPUACTLR_ENDCCASCI
+#define CORTEX_A53_ACTLR_DTAH			CORTEX_A53_CPUACTLR_DTAH
+#endif /* !ERROR_DEPRECATED */
+
+#endif /* __CORTEX_A53_H__ */
diff --git a/include/lib/cpus/aarch32/cortex_a57.h b/include/lib/cpus/aarch32/cortex_a57.h
new file mode 100644
index 0000000..3fac9c7
--- /dev/null
+++ b/include/lib/cpus/aarch32/cortex_a57.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CORTEX_A57_H__
+#define __CORTEX_A57_H__
+#include <utils_def.h>
+
+/* Cortex-A57 midr for revision 0 */
+#define CORTEX_A57_MIDR 0x410FD070
+
+/* Retention timer tick definitions */
+#define RETENTION_ENTRY_TICKS_2		0x1
+#define RETENTION_ENTRY_TICKS_8		0x2
+#define RETENTION_ENTRY_TICKS_32	0x3
+#define RETENTION_ENTRY_TICKS_64	0x4
+#define RETENTION_ENTRY_TICKS_128	0x5
+#define RETENTION_ENTRY_TICKS_256	0x6
+#define RETENTION_ENTRY_TICKS_512	0x7
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_ECTLR			p15, 1, c15
+
+#define CORTEX_A57_ECTLR_SMP_BIT		(ULL(1) << 6)
+#define CORTEX_A57_ECTLR_DIS_TWD_ACC_PFTCH_BIT	(ULL(1) << 38)
+#define CORTEX_A57_ECTLR_L2_IPFTCH_DIST_MASK	(ULL(0x3) << 35)
+#define CORTEX_A57_ECTLR_L2_DPFTCH_DIST_MASK	(ULL(0x3) << 32)
+
+#define CORTEX_A57_ECTLR_CPU_RET_CTRL_SHIFT	0
+#define CORTEX_A57_ECTLR_CPU_RET_CTRL_MASK	(ULL(0x7) << CORTEX_A57_ECTLR_CPU_RET_CTRL_SHIFT)
+
+/*******************************************************************************
+ * CPU Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_CPUMERRSR			p15, 2, c15
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_CPUACTLR				p15, 0, c15
+
+#define CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_DMB		(ULL(1) << 59)
+#define CORTEX_A57_CPUACTLR_GRE_NGRE_AS_NGNRE		(ULL(1) << 54)
+#define CORTEX_A57_CPUACTLR_DIS_OVERREAD		(ULL(1) << 52)
+#define CORTEX_A57_CPUACTLR_NO_ALLOC_WBWA		(ULL(1) << 49)
+#define CORTEX_A57_CPUACTLR_DCC_AS_DCCI			(ULL(1) << 44)
+#define CORTEX_A57_CPUACTLR_FORCE_FPSCR_FLUSH		(ULL(1) << 38)
+#define CORTEX_A57_CPUACTLR_DIS_INSTR_PREFETCH		(ULL(1) << 32)
+#define CORTEX_A57_CPUACTLR_DIS_STREAMING		(ULL(3) << 27)
+#define CORTEX_A57_CPUACTLR_DIS_L1_STREAMING		(ULL(3) << 25)
+#define CORTEX_A57_CPUACTLR_DIS_INDIRECT_PREDICTOR	(ULL(1) << 4)
+
+/*******************************************************************************
+ * L2 Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_L2CTLR				p15, 1, c9, c0, 2
+
+#define CORTEX_A57_L2CTLR_DATA_RAM_LATENCY_SHIFT	0
+#define CORTEX_A57_L2CTLR_TAG_RAM_LATENCY_SHIFT		6
+
+#define CORTEX_A57_L2_DATA_RAM_LATENCY_3_CYCLES		0x2
+#define CORTEX_A57_L2_TAG_RAM_LATENCY_3_CYCLES		0x2
+
+/*******************************************************************************
+ * L2 Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_L2ECTLR			p15, 1, c9, c0, 3
+
+#define CORTEX_A57_L2ECTLR_RET_CTRL_SHIFT	0
+#define CORTEX_A57_L2ECTLR_RET_CTRL_MASK	(ULL(0x7) << CORTEX_A57_L2ECTLR_RET_CTRL_SHIFT)
+
+/*******************************************************************************
+ * L2 Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_L2MERRSR			p15, 3, c15
+
+#if !ERROR_DEPRECATED
+/*
+ * These registers were previously wrongly named. Provide previous definitions so
+ * as not to break platforms that continue using them.
+ */
+#define CORTEX_A57_ACTLR			CORTEX_A57_CPUACTLR
+
+#define CORTEX_A57_ACTLR_DIS_LOAD_PASS_DMB	CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_DMB
+#define CORTEX_A57_ACTLR_GRE_NGRE_AS_NGNRE	CORTEX_A57_CPUACTLR_GRE_NGRE_AS_NGNRE
+#define CORTEX_A57_ACTLR_DIS_OVERREAD		CORTEX_A57_CPUACTLR_DIS_OVERREAD
+#define CORTEX_A57_ACTLR_NO_ALLOC_WBWA		CORTEX_A57_CPUACTLR_NO_ALLOC_WBWA
+#define CORTEX_A57_ACTLR_DCC_AS_DCCI		CORTEX_A57_CPUACTLR_DCC_AS_DCCI
+#define CORTEX_A57_ACTLR_FORCE_FPSCR_FLUSH	CORTEX_A57_CPUACTLR_FORCE_FPSCR_FLUSH
+#define CORTEX_A57_ACTLR_DIS_STREAMING		CORTEX_A57_CPUACTLR_DIS_STREAMING
+#define CORTEX_A57_ACTLR_DIS_L1_STREAMING	CORTEX_A57_CPUACTLR_DIS_L1_STREAMING
+#define CORTEX_A57_ACTLR_DIS_INDIRECT_PREDICTOR	CORTEX_A57_CPUACTLR_DIS_INDIRECT_PREDICTOR
+#endif /* !ERROR_DEPRECATED */
+
+#endif /* __CORTEX_A57_H__ */
diff --git a/include/lib/cpus/aarch32/cortex_a72.h b/include/lib/cpus/aarch32/cortex_a72.h
new file mode 100644
index 0000000..f7da1f0
--- /dev/null
+++ b/include/lib/cpus/aarch32/cortex_a72.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CORTEX_A72_H__
+#define __CORTEX_A72_H__
+#include <utils_def.h>
+
+/* Cortex-A72 midr for revision 0 */
+#define CORTEX_A72_MIDR 0x410FD080
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_ECTLR				p15, 1, c15
+
+#define CORTEX_A72_ECTLR_SMP_BIT			(ULL(1) << 6)
+#define CORTEX_A72_ECTLR_DIS_TWD_ACC_PFTCH_BIT		(ULL(1) << 38)
+#define CORTEX_A72_ECTLR_L2_IPFTCH_DIST_MASK		(ULL(0x3) << 35)
+#define CORTEX_A72_ECTLR_L2_DPFTCH_DIST_MASK		(ULL(0x3) << 32)
+
+/*******************************************************************************
+ * CPU Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_MERRSR				p15, 2, c15
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_CPUACTLR				p15, 0, c15
+
+#define CORTEX_A72_CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH	(ULL(1) << 56)
+#define CORTEX_A72_CPUACTLR_NO_ALLOC_WBWA		(ULL(1) << 49)
+#define CORTEX_A72_CPUACTLR_DCC_AS_DCCI			(ULL(1) << 44)
+#define CORTEX_A72_CPUACTLR_DIS_INSTR_PREFETCH		(ULL(1) << 32)
+
+/*******************************************************************************
+ * L2 Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_L2CTLR				p15, 1, c9, c0, 2
+
+#define CORTEX_A72_L2CTLR_DATA_RAM_LATENCY_SHIFT	0
+#define CORTEX_A72_L2CTLR_TAG_RAM_LATENCY_SHIFT		6
+
+#define CORTEX_A72_L2_DATA_RAM_LATENCY_3_CYCLES		0x2
+#define CORTEX_A72_L2_TAG_RAM_LATENCY_2_CYCLES		0x1
+#define CORTEX_A72_L2_TAG_RAM_LATENCY_3_CYCLES		0x2
+
+/*******************************************************************************
+ * L2 Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_L2MERRSR				p15, 3, c15
+
+#if !ERROR_DEPRECATED
+/*
+ * These registers were previously wrongly named. Provide previous definitions so
+ * as not to break platforms that continue using them.
+ */
+#define CORTEX_A72_ACTLR				CORTEX_A72_CPUACTLR
+
+#define CORTEX_A72_ACTLR_DISABLE_L1_DCACHE_HW_PFTCH	CORTEX_A72_CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH
+#define CORTEX_A72_ACTLR_NO_ALLOC_WBWA			CORTEX_A72_CPUACTLR_NO_ALLOC_WBWA
+#define CORTEX_A72_ACTLR_DCC_AS_DCCI			CORTEX_A72_CPUACTLR_DCC_AS_DCCI
+#endif /* !ERROR_DEPRECATED */
+
+#endif /* __CORTEX_A72_H__ */
diff --git a/include/lib/cpus/aarch32/cpu_macros.S b/include/lib/cpus/aarch32/cpu_macros.S
new file mode 100644
index 0000000..e2e4316
--- /dev/null
+++ b/include/lib/cpus/aarch32/cpu_macros.S
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __CPU_MACROS_S__
+#define __CPU_MACROS_S__
+
+#include <arch.h>
+#include <errata_report.h>
+
+#define CPU_IMPL_PN_MASK	(MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | \
+				(MIDR_PN_MASK << MIDR_PN_SHIFT)
+
+/* The number of CPU operations allowed */
+#define CPU_MAX_PWR_DWN_OPS		2
+
+/* Special constant to specify that CPU has no reset function */
+#define CPU_NO_RESET_FUNC		0
+
+/* Word size for 32-bit CPUs */
+#define CPU_WORD_SIZE			4
+
+/*
+ * Whether errata status needs reporting. Errata status is printed in debug
+ * builds for both BL1 and BL32 images.
+ */
+#if (defined(IMAGE_BL1) || defined(IMAGE_BL32)) && DEBUG
+# define REPORT_ERRATA	1
+#else
+# define REPORT_ERRATA	0
+#endif
+
+	/*
+	 * Define the offsets to the fields in cpu_ops structure.
+	 */
+	.struct 0
+CPU_MIDR: /* cpu_ops midr */
+	.space  4
+/* Reset fn is needed during reset */
+#if defined(IMAGE_BL1) || defined(IMAGE_BL32)
+CPU_RESET_FUNC: /* cpu_ops reset_func */
+	.space  4
+#endif
+#ifdef IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
+CPU_PWR_DWN_OPS: /* cpu_ops power down functions */
+	.space  (4 * CPU_MAX_PWR_DWN_OPS)
+#endif
+
+/*
+ * Fields required to print errata status. Only in BL32 that the printing
+ * require mutual exclusion and printed flag.
+ */
+#if REPORT_ERRATA
+CPU_ERRATA_FUNC: /* CPU errata status printing function */
+	.space  4
+#ifdef IMAGE_BL32
+CPU_ERRATA_LOCK:
+	.space	4
+CPU_ERRATA_PRINTED:
+	.space	4
+#endif
+#endif
+
+CPU_OPS_SIZE = .
+
+	/*
+	 * Write given expressions as words
+	 *
+	 * _count:
+	 *	Write at least _count words. If the given number of expressions
+	 *	is less than _count, repeat the last expression to fill _count
+	 *	words in total
+	 * _rest:
+	 *	Optional list of expressions. _this is for parameter extraction
+	 *	only, and has no significance to the caller
+	 *
+	 * Invoked as:
+	 *	fill_constants 2, foo, bar, blah, ...
+	 */
+	.macro fill_constants _count:req, _this, _rest:vararg
+	  .ifgt \_count
+	    /* Write the current expression */
+	    .ifb \_this
+	      .error "Nothing to fill"
+	    .endif
+	    .word \_this
+
+	    /* Invoke recursively for remaining expressions */
+	    .ifnb \_rest
+	      fill_constants \_count-1, \_rest
+	    .else
+	      fill_constants \_count-1, \_this
+	    .endif
+	  .endif
+	.endm
+
+	/*
+	 * Declare CPU operations
+	 *
+	 * _name:
+	 *	Name of the CPU for which operations are being specified
+	 * _midr:
+	 *	Numeric value expected to read from CPU's MIDR
+	 * _resetfunc:
+	 *	Reset function for the CPU. If there's no CPU reset function,
+	 *	specify CPU_NO_RESET_FUNC
+	 * _power_down_ops:
+	 *	Comma-separated list of functions to perform power-down
+	 *	operatios on the CPU. At least one, and up to
+	 *	CPU_MAX_PWR_DWN_OPS number of functions may be specified.
+	 *	Starting at power level 0, these functions shall handle power
+	 *	down at subsequent power levels. If there aren't exactly
+	 *	CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
+	 *	used to handle power down at subsequent levels
+	 */
+	.macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
+		_power_down_ops:vararg
+	.section cpu_ops, "a"
+	.align 2
+	.type cpu_ops_\_name, %object
+	.word \_midr
+#if defined(IMAGE_BL1) || defined(IMAGE_BL32)
+	.word \_resetfunc
+#endif
+#ifdef IMAGE_BL32
+1:
+	/* Insert list of functions */
+	fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
+2:
+	/*
+	 * Error if no or more than CPU_MAX_PWR_DWN_OPS were specified in the
+	 * list
+	 */
+	.ifeq 2b - 1b
+	  .error "At least one power down function must be specified"
+	.else
+	  .iflt 2b - 1b - (CPU_MAX_PWR_DWN_OPS * CPU_WORD_SIZE)
+	    .error "More than CPU_MAX_PWR_DWN_OPS functions specified"
+	  .endif
+	.endif
+#endif
+
+#if REPORT_ERRATA
+	.ifndef \_name\()_cpu_str
+	  /*
+	   * Place errata reported flag, and the spinlock to arbitrate access to
+	   * it in the data section.
+	   */
+	  .pushsection .data
+	  define_asm_spinlock \_name\()_errata_lock
+	  \_name\()_errata_reported:
+	  .word	0
+	  .popsection
+
+	  /* Place CPU string in rodata */
+	  .pushsection .rodata
+	  \_name\()_cpu_str:
+	  .asciz "\_name"
+	  .popsection
+	.endif
+
+	/*
+	 * Weakly-bound, optional errata status printing function for CPUs of
+	 * this class.
+	 */
+	.weak \_name\()_errata_report
+	.word \_name\()_errata_report
+
+#ifdef IMAGE_BL32
+	/* Pointers to errata lock and reported flag */
+	.word \_name\()_errata_lock
+	.word \_name\()_errata_reported
+#endif
+#endif
+	.endm
+
+#if REPORT_ERRATA
+	/*
+	 * Print status of a CPU errata
+	 *
+	 * _chosen:
+	 *	Identifier indicating whether or not a CPU errata has been
+	 *	compiled in.
+	 * _cpu:
+	 *	Name of the CPU
+	 * _id:
+	 *	Errata identifier
+	 * _rev_var:
+	 *	Register containing the combined value CPU revision and variant
+	 *	- typically the return value of cpu_get_rev_var
+	 */
+	.macro report_errata _chosen, _cpu, _id, _rev_var=r4
+	/* Stash a string with errata ID */
+	.pushsection .rodata
+	\_cpu\()_errata_\_id\()_str:
+	.asciz	"\_id"
+	.popsection
+
+	/* Check whether errata applies */
+	mov	r0, \_rev_var
+	bl	check_errata_\_id
+
+	.ifeq \_chosen
+	/*
+	 * Errata workaround has not been compiled in. If the errata would have
+	 * applied had it been compiled in, print its status as missing.
+	 */
+	cmp	r0, #0
+	movne	r0, #ERRATA_MISSING
+	.endif
+	ldr	r1, =\_cpu\()_cpu_str
+	ldr	r2, =\_cpu\()_errata_\_id\()_str
+	bl	errata_print_msg
+	.endm
+#endif
+
+#endif /* __CPU_MACROS_S__ */
diff --git a/include/lib/cpus/aarch64/aem_generic.h b/include/lib/cpus/aarch64/aem_generic.h
index 2f701d1..ddb235f 100644
--- a/include/lib/cpus/aarch64/aem_generic.h
+++ b/include/lib/cpus/aarch64/aem_generic.h
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __AEM_GENERIC_H__
diff --git a/include/lib/cpus/aarch64/cortex_a35.h b/include/lib/cpus/aarch64/cortex_a35.h
new file mode 100644
index 0000000..ad0fedc
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a35.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CORTEX_A35_H__
+#define __CORTEX_A35_H__
+
+/* Cortex-A35 Main ID register for revision 0 */
+#define CORTEX_A35_MIDR				0x410FD040
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ * CPUECTLR_EL1 is an implementation-specific register.
+ ******************************************************************************/
+#define CORTEX_A35_CPUECTLR_EL1			S3_1_C15_C2_1
+#define CORTEX_A35_CPUECTLR_SMPEN_BIT		(1 << 6)
+
+#endif /* __CORTEX_A35_H__ */
diff --git a/include/lib/cpus/aarch64/cortex_a53.h b/include/lib/cpus/aarch64/cortex_a53.h
index 14821ab..22c6800 100644
--- a/include/lib/cpus/aarch64/cortex_a53.h
+++ b/include/lib/cpus/aarch64/cortex_a53.h
@@ -1,44 +1,91 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __CORTEX_A53_H__
 #define __CORTEX_A53_H__
 
 /* Cortex-A53 midr for revision 0 */
-#define CORTEX_A53_MIDR 0x410FD030
+#define CORTEX_A53_MIDR			U(0x410FD030)
+
+/* Retention timer tick definitions */
+#define RETENTION_ENTRY_TICKS_2		U(0x1)
+#define RETENTION_ENTRY_TICKS_8		U(0x2)
+#define RETENTION_ENTRY_TICKS_32	U(0x3)
+#define RETENTION_ENTRY_TICKS_64	U(0x4)
+#define RETENTION_ENTRY_TICKS_128	U(0x5)
+#define RETENTION_ENTRY_TICKS_256	U(0x6)
+#define RETENTION_ENTRY_TICKS_512	U(0x7)
 
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
-#define CPUECTLR_EL1			S3_1_C15_C2_1	/* Instruction def. */
+#define CORTEX_A53_ECTLR_EL1				S3_1_C15_C2_1
 
-#define CPUECTLR_SMP_BIT		(1 << 6)
+#define CORTEX_A53_ECTLR_SMP_BIT			(U(1) << 6)
+
+#define CORTEX_A53_ECTLR_CPU_RET_CTRL_SHIFT		U(0)
+#define CORTEX_A53_ECTLR_CPU_RET_CTRL_MASK		(U(0x7) << CORTEX_A53_ECTLR_CPU_RET_CTRL_SHIFT)
+
+#define CORTEX_A53_ECTLR_FPU_RET_CTRL_SHIFT		U(3)
+#define CORTEX_A53_ECTLR_FPU_RET_CTRL_MASK		(U(0x7) << CORTEX_A53_ECTLR_FPU_RET_CTRL_SHIFT)
+
+/*******************************************************************************
+ * CPU Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_MERRSR_EL1				S3_1_C15_C2_2
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_CPUACTLR_EL1				S3_1_C15_C2_0
+
+#define CORTEX_A53_CPUACTLR_EL1_ENDCCASCI_SHIFT		U(44)
+#define CORTEX_A53_CPUACTLR_EL1_ENDCCASCI		(U(1) << CORTEX_A53_CPUACTLR_EL1_ENDCCASCI_SHIFT)
+#define CORTEX_A53_CPUACTLR_EL1_RADIS_SHIFT		U(27)
+#define CORTEX_A53_CPUACTLR_EL1_RADIS			(U(3) << CORTEX_A53_CPUACTLR_EL1_RADIS_SHIFT)
+#define CORTEX_A53_CPUACTLR_EL1_L1RADIS_SHIFT		U(25)
+#define CORTEX_A53_CPUACTLR_EL1_L1RADIS			(U(3) << CORTEX_A53_CPUACTLR_EL1_L1RADIS_SHIFT)
+#define CORTEX_A53_CPUACTLR_EL1_DTAH_SHIFT		U(24)
+#define CORTEX_A53_CPUACTLR_EL1_DTAH			(U(1) << CORTEX_A53_CPUACTLR_EL1_DTAH_SHIFT)
+
+/*******************************************************************************
+ * L2 Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_L2ACTLR_EL1				S3_1_C15_C0_0
+
+#define CORTEX_A53_L2ACTLR_ENABLE_UNIQUECLEAN		(U(1) << 14)
+#define CORTEX_A53_L2ACTLR_DISABLE_CLEAN_PUSH		(U(1) << 3)
+/*******************************************************************************
+ * L2 Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_L2ECTLR_EL1				S3_1_C11_C0_3
+
+#define CORTEX_A53_L2ECTLR_RET_CTRL_SHIFT		U(0)
+#define CORTEX_A53_L2ECTLR_RET_CTRL_MASK		(U(0x7) << L2ECTLR_RET_CTRL_SHIFT)
+
+/*******************************************************************************
+ * L2 Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_L2MERRSR_EL1				S3_1_C15_C2_3
+
+#if !ERROR_DEPRECATED
+/*
+ * These registers were previously wrongly named. Provide previous definitions
+ * so as not to break platforms that continue using them.
+ */
+#define CORTEX_A53_ACTLR_EL1			CORTEX_A53_CPUACTLR_EL1
+
+#define CORTEX_A53_ACTLR_ENDCCASCI_SHIFT	CORTEX_A53_CPUACTLR_EL1_ENDCCASCI_SHIFT
+#define CORTEX_A53_ACTLR_ENDCCASCI		CORTEX_A53_CPUACTLR_EL1_ENDCCASCI
+#define CORTEX_A53_ACTLR_RADIS_SHIFT		CORTEX_A53_CPUACTLR_EL1_RADIS_SHIFT
+#define CORTEX_A53_ACTLR_RADIS			CORTEX_A53_CPUACTLR_EL1_RADIS
+#define CORTEX_A53_ACTLR_L1RADIS_SHIFT		CORTEX_A53_CPUACTLR_EL1_L1RADIS_SHIFT
+#define CORTEX_A53_ACTLR_L1RADIS		CORTEX_A53_CPUACTLR_EL1_L1RADIS
+#define CORTEX_A53_ACTLR_DTAH_SHIFT		CORTEX_A53_CPUACTLR_EL1_DTAH_SHIFT
+#define CORTEX_A53_ACTLR_DTAH			CORTEX_A53_CPUACTLR_EL1_DTAH
+#endif /* !ERROR_DEPRECATED */
 
 #endif /* __CORTEX_A53_H__ */
diff --git a/include/lib/cpus/aarch64/cortex_a55.h b/include/lib/cpus/aarch64/cortex_a55.h
new file mode 100644
index 0000000..293f2b2
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a55.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CORTEX_A55_H__
+#define __CORTEX_A55_H__
+
+/* Cortex-A55 MIDR for revision 0 */
+#define CORTEX_A55_MIDR		0x410fd050
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A55_CPUPWRCTLR_EL1		S3_0_C15_C2_7
+#define CORTEX_A55_CPUECTLR_EL1		S3_0_C15_C1_4
+
+/* Definitions of register field mask in CORTEX_A55_CPUPWRCTLR_EL1 */
+#define CORTEX_A55_CORE_PWRDN_EN_MASK	0x1
+
+#endif /* __CORTEX_A55_H__ */
diff --git a/include/lib/cpus/aarch64/cortex_a57.h b/include/lib/cpus/aarch64/cortex_a57.h
index 6128b16..6c45c06 100644
--- a/include/lib/cpus/aarch64/cortex_a57.h
+++ b/include/lib/cpus/aarch64/cortex_a57.h
@@ -1,66 +1,101 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __CORTEX_A57_H__
 #define __CORTEX_A57_H__
+#include <utils_def.h>
 
 /* Cortex-A57 midr for revision 0 */
-#define CORTEX_A57_MIDR 0x410FD070
+#define CORTEX_A57_MIDR			U(0x410FD070)
+
+/* Retention timer tick definitions */
+#define RETENTION_ENTRY_TICKS_2		U(0x1)
+#define RETENTION_ENTRY_TICKS_8		U(0x2)
+#define RETENTION_ENTRY_TICKS_32	U(0x3)
+#define RETENTION_ENTRY_TICKS_64	U(0x4)
+#define RETENTION_ENTRY_TICKS_128	U(0x5)
+#define RETENTION_ENTRY_TICKS_256	U(0x6)
+#define RETENTION_ENTRY_TICKS_512	U(0x7)
 
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
-#define CPUECTLR_EL1			S3_1_C15_C2_1	/* Instruction def. */
+#define CORTEX_A57_ECTLR_EL1			S3_1_C15_C2_1
 
-#define CPUECTLR_SMP_BIT		(1 << 6)
-#define CPUECTLR_DIS_TWD_ACC_PFTCH_BIT	(1 << 38)
-#define CPUECTLR_L2_IPFTCH_DIST_MASK	(0x3 << 35)
-#define CPUECTLR_L2_DPFTCH_DIST_MASK	(0x3 << 32)
+#define CORTEX_A57_ECTLR_SMP_BIT		(U(1) << 6)
+#define CORTEX_A57_ECTLR_DIS_TWD_ACC_PFTCH_BIT	(U(1) << 38)
+#define CORTEX_A57_ECTLR_L2_IPFTCH_DIST_MASK	(U(0x3) << 35)
+#define CORTEX_A57_ECTLR_L2_DPFTCH_DIST_MASK	(U(0x3) << 32)
+
+#define CORTEX_A57_ECTLR_CPU_RET_CTRL_SHIFT	U(0)
+#define CORTEX_A57_ECTLR_CPU_RET_CTRL_MASK	(U(0x7) << CORTEX_A57_ECTLR_CPU_RET_CTRL_SHIFT)
+
+/*******************************************************************************
+ * CPU Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_MERRSR_EL1			S3_1_C15_C2_2
 
 /*******************************************************************************
  * CPU Auxiliary Control register specific definitions.
  ******************************************************************************/
-#define CPUACTLR_EL1			S3_1_C15_C2_0	/* Instruction def. */
+#define CORTEX_A57_CPUACTLR_EL1				S3_1_C15_C2_0
 
-#define CPUACTLR_NO_ALLOC_WBWA         (1 << 49)
-#define CPUACTLR_DCC_AS_DCCI           (1 << 44)
+#define CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_DMB	(ULL(1) << 59)
+#define CORTEX_A57_CPUACTLR_EL1_GRE_NGRE_AS_NGNRE	(ULL(1) << 54)
+#define CORTEX_A57_CPUACTLR_EL1_DIS_OVERREAD		(ULL(1) << 52)
+#define CORTEX_A57_CPUACTLR_EL1_NO_ALLOC_WBWA		(ULL(1) << 49)
+#define CORTEX_A57_CPUACTLR_EL1_DCC_AS_DCCI		(ULL(1) << 44)
+#define CORTEX_A57_CPUACTLR_EL1_FORCE_FPSCR_FLUSH	(ULL(1) << 38)
+#define CORTEX_A57_CPUACTLR_EL1_DIS_INSTR_PREFETCH	(ULL(1) << 32)
+#define CORTEX_A57_CPUACTLR_EL1_DIS_STREAMING		(ULL(3) << 27)
+#define CORTEX_A57_CPUACTLR_EL1_DIS_L1_STREAMING	(ULL(3) << 25)
+#define CORTEX_A57_CPUACTLR_EL1_DIS_INDIRECT_PREDICTOR	(ULL(1) << 4)
 
 /*******************************************************************************
  * L2 Control register specific definitions.
  ******************************************************************************/
-#define L2CTLR_EL1			S3_1_C11_C0_2	/* Instruction def. */
+#define CORTEX_A57_L2CTLR_EL1				S3_1_C11_C0_2
 
-#define L2CTLR_DATA_RAM_LATENCY_SHIFT	0
-#define L2CTLR_TAG_RAM_LATENCY_SHIFT	6
+#define CORTEX_A57_L2CTLR_DATA_RAM_LATENCY_SHIFT	U(0)
+#define CORTEX_A57_L2CTLR_TAG_RAM_LATENCY_SHIFT		U(6)
 
-#define L2_DATA_RAM_LATENCY_3_CYCLES	0x2
-#define L2_TAG_RAM_LATENCY_3_CYCLES	0x2
+#define CORTEX_A57_L2_DATA_RAM_LATENCY_3_CYCLES		U(0x2)
+#define CORTEX_A57_L2_TAG_RAM_LATENCY_3_CYCLES		U(0x2)
+
+#define CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT		(U(1) << 21)
+
+/*******************************************************************************
+ * L2 Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_L2ECTLR_EL1			S3_1_C11_C0_3
+
+#define CORTEX_A57_L2ECTLR_RET_CTRL_SHIFT	U(0)
+#define CORTEX_A57_L2ECTLR_RET_CTRL_MASK	(U(0x7) << CORTEX_A57_L2ECTLR_RET_CTRL_SHIFT)
+
+/*******************************************************************************
+ * L2 Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_L2MERRSR_EL1			S3_1_C15_C2_3
+
+#if !ERROR_DEPRECATED
+/*
+ * These registers were previously wrongly named. Provide previous definitions so
+ * as not to break platforms that continue using them.
+ */
+#define CORTEX_A57_ACTLR_EL1			CORTEX_A57_CPUACTLR_EL1
+
+#define CORTEX_A57_ACTLR_DIS_LOAD_PASS_DMB	CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_DMB
+#define CORTEX_A57_ACTLR_GRE_NGRE_AS_NGNRE	CORTEX_A57_CPUACTLR_EL1_GRE_NGRE_AS_NGNRE
+#define CORTEX_A57_ACTLR_DIS_OVERREAD		CORTEX_A57_CPUACTLR_EL1_DIS_OVERREAD
+#define CORTEX_A57_ACTLR_NO_ALLOC_WBWA		CORTEX_A57_CPUACTLR_EL1_NO_ALLOC_WBWA
+#define CORTEX_A57_ACTLR_DCC_AS_DCCI		CORTEX_A57_CPUACTLR_EL1_DCC_AS_DCCI
+#define CORTEX_A57_ACTLR_FORCE_FPSCR_FLUSH	CORTEX_A57_CPUACTLR_EL1_FORCE_FPSCR_FLUSH
+#define CORTEX_A57_ACTLR_DIS_STREAMING		CORTEX_A57_CPUACTLR_EL1_DIS_STREAMING
+#define CORTEX_A57_ACTLR_DIS_L1_STREAMING	CORTEX_A57_CPUACTLR_EL1_DIS_L1_STREAMING
+#define CORTEX_A57_ACTLR_DIS_INDIRECT_PREDICTOR	CORTEX_A57_CPUACTLR_EL1_DIS_INDIRECT_PREDICTOR
+#endif /* !ERROR_DEPRECATED */
 
 #endif /* __CORTEX_A57_H__ */
diff --git a/include/lib/cpus/aarch64/cortex_a72.h b/include/lib/cpus/aarch64/cortex_a72.h
new file mode 100644
index 0000000..6fbb707
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a72.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CORTEX_A72_H__
+#define __CORTEX_A72_H__
+#include <utils_def.h>
+
+/* Cortex-A72 midr for revision 0 */
+#define CORTEX_A72_MIDR 				0x410FD080
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_ECTLR_EL1				S3_1_C15_C2_1
+
+#define CORTEX_A72_ECTLR_SMP_BIT			(ULL(1) << 6)
+#define CORTEX_A72_ECTLR_DIS_TWD_ACC_PFTCH_BIT		(ULL(1) << 38)
+#define CORTEX_A72_ECTLR_L2_IPFTCH_DIST_MASK		(ULL(0x3) << 35)
+#define CORTEX_A72_ECTLR_L2_DPFTCH_DIST_MASK		(ULL(0x3) << 32)
+
+/*******************************************************************************
+ * CPU Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_MERRSR_EL1				S3_1_C15_C2_2
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_CPUACTLR_EL1					S3_1_C15_C2_0
+
+#define CORTEX_A72_CPUACTLR_EL1_DISABLE_L1_DCACHE_HW_PFTCH	(ULL(1) << 56)
+#define CORTEX_A72_CPUACTLR_EL1_NO_ALLOC_WBWA			(ULL(1) << 49)
+#define CORTEX_A72_CPUACTLR_EL1_DCC_AS_DCCI			(ULL(1) << 44)
+#define CORTEX_A72_CPUACTLR_EL1_DIS_INSTR_PREFETCH		(ULL(1) << 32)
+
+/*******************************************************************************
+ * L2 Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_L2CTLR_EL1				S3_1_C11_C0_2
+
+#define CORTEX_A72_L2CTLR_DATA_RAM_LATENCY_SHIFT	0
+#define CORTEX_A72_L2CTLR_TAG_RAM_LATENCY_SHIFT		6
+
+#define CORTEX_A72_L2_DATA_RAM_LATENCY_3_CYCLES		0x2
+#define CORTEX_A72_L2_TAG_RAM_LATENCY_2_CYCLES		0x1
+#define CORTEX_A72_L2_TAG_RAM_LATENCY_3_CYCLES		0x2
+
+/*******************************************************************************
+ * L2 Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_L2MERRSR_EL1				S3_1_C15_C2_3
+
+#if !ERROR_DEPRECATED
+/*
+ * These registers were previously wrongly named. Provide previous definitions so
+ * as not to break platforms that continue using them.
+ */
+#define CORTEX_A72_ACTLR				CORTEX_A72_CPUACTLR_EL1
+
+#define CORTEX_A72_ACTLR_DISABLE_L1_DCACHE_HW_PFTCH	CORTEX_A72_CPUACTLR_EL1_DISABLE_L1_DCACHE_HW_PFTCH
+#define CORTEX_A72_ACTLR_NO_ALLOC_WBWA			CORTEX_A72_CPUACTLR_EL1_NO_ALLOC_WBWA
+#define CORTEX_A72_ACTLR_DCC_AS_DCCI			CORTEX_A72_CPUACTLR_EL1_DCC_AS_DCCI
+#endif /* !ERROR_DEPRECATED */
+
+#endif /* __CORTEX_A72_H__ */
diff --git a/include/lib/cpus/aarch64/cortex_a73.h b/include/lib/cpus/aarch64/cortex_a73.h
new file mode 100644
index 0000000..faff5fe
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a73.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CORTEX_A73_H__
+#define __CORTEX_A73_H__
+
+/* Cortex-A73 midr for revision 0 */
+#define CORTEX_A73_MIDR	0x410FD090
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A73_CPUECTLR_EL1		S3_1_C15_C2_1	/* Instruction def. */
+
+#define CORTEX_A73_CPUECTLR_SMP_BIT	(1 << 6)
+
+/*******************************************************************************
+ * L2 Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A73_L2MERRSR_EL1		S3_1_C15_C2_3   /* Instruction def. */
+
+#endif /* __CORTEX_A73_H__ */
diff --git a/include/lib/cpus/aarch64/cortex_a75.h b/include/lib/cpus/aarch64/cortex_a75.h
new file mode 100644
index 0000000..1ffe20b
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a75.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CORTEX_A75_H__
+#define __CORTEX_A75_H__
+
+/* Cortex-A75 MIDR */
+#define CORTEX_A75_MIDR		0x410fd0a0
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A75_CPUPWRCTLR_EL1	S3_0_C15_C2_7
+#define CORTEX_A75_CPUECTLR_EL1		S3_0_C15_C1_4
+
+/* Definitions of register field mask in CORTEX_A75_CPUPWRCTLR_EL1 */
+#define CORTEX_A75_CORE_PWRDN_EN_MASK	0x1
+
+#endif /* __CORTEX_A75_H__ */
diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S
index 089f09c..a8c23e5 100644
--- a/include/lib/cpus/aarch64/cpu_macros.S
+++ b/include/lib/cpus/aarch64/cpu_macros.S
@@ -1,38 +1,36 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
+#ifndef __CPU_MACROS_S__
+#define __CPU_MACROS_S__
 
 #include <arch.h>
+#include <errata_report.h>
 
 #define CPU_IMPL_PN_MASK	(MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | \
 				(MIDR_PN_MASK << MIDR_PN_SHIFT)
 
+/* The number of CPU operations allowed */
+#define CPU_MAX_PWR_DWN_OPS		2
+
+/* Special constant to specify that CPU has no reset function */
+#define CPU_NO_RESET_FUNC		0
+
+/* Word size for 64-bit CPUs */
+#define CPU_WORD_SIZE			8
+
+/*
+ * Whether errata status needs reporting. Errata status is printed in debug
+ * builds for both BL1 and BL31 images.
+ */
+#if (defined(IMAGE_BL1) || defined(IMAGE_BL31)) && DEBUG
+# define REPORT_ERRATA	1
+#else
+# define REPORT_ERRATA	0
+#endif
+
 	/*
 	 * Define the offsets to the fields in cpu_ops structure.
 	 */
@@ -40,43 +38,190 @@
 CPU_MIDR: /* cpu_ops midr */
 	.space  8
 /* Reset fn is needed in BL at reset vector */
-#if IMAGE_BL1 || IMAGE_BL31
+#if defined(IMAGE_BL1) || defined(IMAGE_BL31)
 CPU_RESET_FUNC: /* cpu_ops reset_func */
 	.space  8
 #endif
-#if IMAGE_BL31 /* The power down core and cluster is needed only in BL3-1 */
-CPU_PWR_DWN_CORE: /* cpu_ops core_pwr_dwn */
-	.space  8
-CPU_PWR_DWN_CLUSTER: /* cpu_ops cluster_pwr_dwn */
-	.space  8
+#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
+CPU_PWR_DWN_OPS: /* cpu_ops power down functions */
+	.space  (8 * CPU_MAX_PWR_DWN_OPS)
 #endif
-#if (IMAGE_BL31 && CRASH_REPORTING)
+
+/*
+ * Fields required to print errata status. Only in BL31 that the printing
+ * require mutual exclusion and printed flag.
+ */
+#if REPORT_ERRATA
+CPU_ERRATA_FUNC:
+	.space	8
+#ifdef IMAGE_BL31
+CPU_ERRATA_LOCK:
+	.space	8
+CPU_ERRATA_PRINTED:
+	.space	8
+#endif
+#endif
+
+#if defined(IMAGE_BL31) && CRASH_REPORTING
 CPU_REG_DUMP: /* cpu specific register dump for crash reporting */
 	.space  8
 #endif
 CPU_OPS_SIZE = .
 
 	/*
-	 * Convenience macro to declare cpu_ops structure.
-	 * Make sure the structure fields are as per the offsets
-	 * defined above.
+	 * Write given expressions as quad words
+	 *
+	 * _count:
+	 *	Write at least _count quad words. If the given number of
+	 *	expressions is less than _count, repeat the last expression to
+	 *	fill _count quad words in total
+	 * _rest:
+	 *	Optional list of expressions. _this is for parameter extraction
+	 *	only, and has no significance to the caller
+	 *
+	 * Invoked as:
+	 *	fill_constants 2, foo, bar, blah, ...
 	 */
-	.macro declare_cpu_ops _name:req, _midr:req, _noresetfunc = 0
-	.section cpu_ops, "a"; .align 3
+	.macro fill_constants _count:req, _this, _rest:vararg
+	  .ifgt \_count
+	    /* Write the current expression */
+	    .ifb \_this
+	      .error "Nothing to fill"
+	    .endif
+	    .quad \_this
+
+	    /* Invoke recursively for remaining expressions */
+	    .ifnb \_rest
+	      fill_constants \_count-1, \_rest
+	    .else
+	      fill_constants \_count-1, \_this
+	    .endif
+	  .endif
+	.endm
+
+	/*
+	 * Declare CPU operations
+	 *
+	 * _name:
+	 *	Name of the CPU for which operations are being specified
+	 * _midr:
+	 *	Numeric value expected to read from CPU's MIDR
+	 * _resetfunc:
+	 *	Reset function for the CPU. If there's no CPU reset function,
+	 *	specify CPU_NO_RESET_FUNC
+	 * _power_down_ops:
+	 *	Comma-separated list of functions to perform power-down
+	 *	operatios on the CPU. At least one, and up to
+	 *	CPU_MAX_PWR_DWN_OPS number of functions may be specified.
+	 *	Starting at power level 0, these functions shall handle power
+	 *	down at subsequent power levels. If there aren't exactly
+	 *	CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
+	 *	used to handle power down at subsequent levels
+	 */
+	.macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
+		_power_down_ops:vararg
+	.section cpu_ops, "a"
+	.align 3
 	.type cpu_ops_\_name, %object
 	.quad \_midr
-#if IMAGE_BL1 || IMAGE_BL31
-	.if \_noresetfunc
-	.quad 0
+#if defined(IMAGE_BL1) || defined(IMAGE_BL31)
+	.quad \_resetfunc
+#endif
+#ifdef IMAGE_BL31
+1:
+	/* Insert list of functions */
+	fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
+2:
+	/*
+	 * Error if no or more than CPU_MAX_PWR_DWN_OPS were specified in the
+	 * list
+	 */
+	.ifeq 2b - 1b
+	  .error "At least one power down function must be specified"
 	.else
-	.quad \_name\()_reset_func
+	  .iflt 2b - 1b - (CPU_MAX_PWR_DWN_OPS * CPU_WORD_SIZE)
+	    .error "More than CPU_MAX_PWR_DWN_OPS functions specified"
+	  .endif
 	.endif
 #endif
-#if IMAGE_BL31
-	.quad \_name\()_core_pwr_dwn
-	.quad \_name\()_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+	.ifndef \_name\()_cpu_str
+	  /*
+	   * Place errata reported flag, and the spinlock to arbitrate access to
+	   * it in the data section.
+	   */
+	  .pushsection .data
+	  define_asm_spinlock \_name\()_errata_lock
+	  \_name\()_errata_reported:
+	  .word	0
+	  .popsection
+
+	  /* Place CPU string in rodata */
+	  .pushsection .rodata
+	  \_name\()_cpu_str:
+	  .asciz "\_name"
+	  .popsection
+	.endif
+
+	/*
+	 * Weakly-bound, optional errata status printing function for CPUs of
+	 * this class.
+	 */
+	.weak \_name\()_errata_report
+	.quad \_name\()_errata_report
+
+#ifdef IMAGE_BL31
+	/* Pointers to errata lock and reported flag */
+	.quad \_name\()_errata_lock
+	.quad \_name\()_errata_reported
 #endif
-#if (IMAGE_BL31 && CRASH_REPORTING)
+#endif
+
+#if defined(IMAGE_BL31) && CRASH_REPORTING
 	.quad \_name\()_cpu_reg_dump
 #endif
 	.endm
+
+#if REPORT_ERRATA
+	/*
+	 * Print status of a CPU errata
+	 *
+	 * _chosen:
+	 *	Identifier indicating whether or not a CPU errata has been
+	 *	compiled in.
+	 * _cpu:
+	 *	Name of the CPU
+	 * _id:
+	 *	Errata identifier
+	 * _rev_var:
+	 *	Register containing the combined value CPU revision and variant
+	 *	- typically the return value of cpu_get_rev_var
+	 */
+	.macro report_errata _chosen, _cpu, _id, _rev_var=x8
+	/* Stash a string with errata ID */
+	.pushsection .rodata
+	\_cpu\()_errata_\_id\()_str:
+	.asciz	"\_id"
+	.popsection
+
+	/* Check whether errata applies */
+	mov	x0, \_rev_var
+	bl	check_errata_\_id
+
+	.ifeq \_chosen
+	/*
+	 * Errata workaround has not been compiled in. If the errata would have
+	 * applied had it been compiled in, print its status as missing.
+	 */
+	cbz	x0, 900f
+	mov	x0, #ERRATA_MISSING
+	.endif
+900:
+	adr	x1, \_cpu\()_cpu_str
+	adr	x2, \_cpu\()_errata_\_id\()_str
+	bl	errata_print_msg
+	.endm
+#endif
+
+#endif /* __CPU_MACROS_S__ */
diff --git a/include/lib/cpus/aarch64/denver.h b/include/lib/cpus/aarch64/denver.h
new file mode 100644
index 0000000..d8c4d2e
--- /dev/null
+++ b/include/lib/cpus/aarch64/denver.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __DENVER_H__
+#define __DENVER_H__
+
+/* MIDR values for Denver */
+#define DENVER_MIDR_PN0			U(0x4E0F0000)
+#define DENVER_MIDR_PN1			U(0x4E0F0010)
+#define DENVER_MIDR_PN2			U(0x4E0F0020)
+#define DENVER_MIDR_PN3			U(0x4E0F0030)
+#define DENVER_MIDR_PN4			U(0x4E0F0040)
+
+/* Implementer code in the MIDR register */
+#define DENVER_IMPL			U(0x4E)
+
+/* CPU state ids - implementation defined */
+#define DENVER_CPU_STATE_POWER_DOWN	U(0x3)
+
+#ifndef __ASSEMBLY__
+
+/* Disable Dynamic Code Optimisation */
+void denver_disable_dco(void);
+
+#endif
+
+#endif /* __DENVER_H__ */
diff --git a/include/lib/cpus/errata_report.h b/include/lib/cpus/errata_report.h
new file mode 100644
index 0000000..14f2407
--- /dev/null
+++ b/include/lib/cpus/errata_report.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ERRATA_H__
+#define __ERRATA_H__
+
+#ifndef __ASSEMBLY__
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <spinlock.h>
+#include <utils_def.h>
+
+#if DEBUG
+void print_errata_status(void);
+#else
+static inline void print_errata_status(void) {}
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+/* Errata status */
+#define ERRATA_NOT_APPLIES	0
+#define ERRATA_APPLIES		1
+#define ERRATA_MISSING		2
+
+#endif /* __ERRATA_H__ */
+
diff --git a/include/lib/el3_runtime/aarch32/context.h b/include/lib/el3_runtime/aarch32/context.h
new file mode 100644
index 0000000..6447360
--- /dev/null
+++ b/include/lib/el3_runtime/aarch32/context.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CONTEXT_H__
+#define __CONTEXT_H__
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'regs'
+ * structure at their correct offsets.
+ ******************************************************************************/
+#define CTX_REGS_OFFSET		0x0
+#define CTX_GPREG_R0		0x0
+#define CTX_GPREG_R1		0x4
+#define CTX_GPREG_R2		0x8
+#define CTX_GPREG_R3		0xC
+#define CTX_LR			0x10
+#define CTX_SCR			0x14
+#define CTX_SPSR		0x18
+#define CTX_NS_SCTLR		0x1C
+#define CTX_REGS_END		0x20
+
+#ifndef __ASSEMBLY__
+
+#include <cassert.h>
+#include <stdint.h>
+
+/*
+ * Common constants to help define the 'cpu_context' structure and its
+ * members below.
+ */
+#define WORD_SHIFT		2
+#define DEFINE_REG_STRUCT(name, num_regs)	\
+	typedef struct name {			\
+		uint32_t _regs[num_regs];	\
+	}  __aligned(8) name##_t
+
+/* Constants to determine the size of individual context structures */
+#define CTX_REG_ALL		(CTX_REGS_END >> WORD_SHIFT)
+
+DEFINE_REG_STRUCT(regs, CTX_REG_ALL);
+
+#undef CTX_REG_ALL
+
+#define read_ctx_reg(ctx, offset)	((ctx)->_regs[offset >> WORD_SHIFT])
+#define write_ctx_reg(ctx, offset, val)	(((ctx)->_regs[offset >> WORD_SHIFT]) \
+					 = val)
+typedef struct cpu_context {
+	regs_t regs_ctx;
+} cpu_context_t;
+
+/* Macros to access members of the 'cpu_context_t' structure */
+#define get_regs_ctx(h)		(&((cpu_context_t *) h)->regs_ctx)
+
+/*
+ * Compile time assertions related to the 'cpu_context' structure to
+ * ensure that the assembler and the compiler view of the offsets of
+ * the structure members is the same.
+ */
+CASSERT(CTX_REGS_OFFSET == __builtin_offsetof(cpu_context_t, regs_ctx), \
+	assert_core_context_regs_offset_mismatch);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __CONTEXT_H__ */
diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h
new file mode 100644
index 0000000..a89468d
--- /dev/null
+++ b/include/lib/el3_runtime/aarch64/context.h
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CONTEXT_H__
+#define __CONTEXT_H__
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'gp_regs'
+ * structure at their correct offsets.
+ ******************************************************************************/
+#define CTX_GPREGS_OFFSET	U(0x0)
+#define CTX_GPREG_X0		U(0x0)
+#define CTX_GPREG_X1		U(0x8)
+#define CTX_GPREG_X2		U(0x10)
+#define CTX_GPREG_X3		U(0x18)
+#define CTX_GPREG_X4		U(0x20)
+#define CTX_GPREG_X5		U(0x28)
+#define CTX_GPREG_X6		U(0x30)
+#define CTX_GPREG_X7		U(0x38)
+#define CTX_GPREG_X8		U(0x40)
+#define CTX_GPREG_X9		U(0x48)
+#define CTX_GPREG_X10		U(0x50)
+#define CTX_GPREG_X11		U(0x58)
+#define CTX_GPREG_X12		U(0x60)
+#define CTX_GPREG_X13		U(0x68)
+#define CTX_GPREG_X14		U(0x70)
+#define CTX_GPREG_X15		U(0x78)
+#define CTX_GPREG_X16		U(0x80)
+#define CTX_GPREG_X17		U(0x88)
+#define CTX_GPREG_X18		U(0x90)
+#define CTX_GPREG_X19		U(0x98)
+#define CTX_GPREG_X20		U(0xa0)
+#define CTX_GPREG_X21		U(0xa8)
+#define CTX_GPREG_X22		U(0xb0)
+#define CTX_GPREG_X23		U(0xb8)
+#define CTX_GPREG_X24		U(0xc0)
+#define CTX_GPREG_X25		U(0xc8)
+#define CTX_GPREG_X26		U(0xd0)
+#define CTX_GPREG_X27		U(0xd8)
+#define CTX_GPREG_X28		U(0xe0)
+#define CTX_GPREG_X29		U(0xe8)
+#define CTX_GPREG_LR		U(0xf0)
+#define CTX_GPREG_SP_EL0	U(0xf8)
+#define CTX_GPREGS_END		U(0x100)
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'el3_state'
+ * structure at their correct offsets. Note that some of the registers are only
+ * 32-bits wide but are stored as 64-bit values for convenience
+ ******************************************************************************/
+#define CTX_EL3STATE_OFFSET	(CTX_GPREGS_OFFSET + CTX_GPREGS_END)
+#define CTX_SCR_EL3		U(0x0)
+#define CTX_RUNTIME_SP		U(0x8)
+#define CTX_SPSR_EL3		U(0x10)
+#define CTX_ELR_EL3		U(0x18)
+#define CTX_EL3STATE_END	U(0x20)
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the
+ * 'el1_sys_regs' structure at their correct offsets. Note that some of the
+ * registers are only 32-bits wide but are stored as 64-bit values for
+ * convenience
+ ******************************************************************************/
+#define CTX_SYSREGS_OFFSET	(CTX_EL3STATE_OFFSET + CTX_EL3STATE_END)
+#define CTX_SPSR_EL1		U(0x0)
+#define CTX_ELR_EL1		U(0x8)
+#define CTX_SCTLR_EL1		U(0x10)
+#define CTX_ACTLR_EL1		U(0x18)
+#define CTX_CPACR_EL1		U(0x20)
+#define CTX_CSSELR_EL1		U(0x28)
+#define CTX_SP_EL1		U(0x30)
+#define CTX_ESR_EL1		U(0x38)
+#define CTX_TTBR0_EL1		U(0x40)
+#define CTX_TTBR1_EL1		U(0x48)
+#define CTX_MAIR_EL1		U(0x50)
+#define CTX_AMAIR_EL1		U(0x58)
+#define CTX_TCR_EL1		U(0x60)
+#define CTX_TPIDR_EL1		U(0x68)
+#define CTX_TPIDR_EL0		U(0x70)
+#define CTX_TPIDRRO_EL0		U(0x78)
+#define CTX_PAR_EL1		U(0x80)
+#define CTX_FAR_EL1		U(0x88)
+#define CTX_AFSR0_EL1		U(0x90)
+#define CTX_AFSR1_EL1		U(0x98)
+#define CTX_CONTEXTIDR_EL1	U(0xa0)
+#define CTX_VBAR_EL1		U(0xa8)
+#define CTX_PMCR_EL0		U(0xb0)
+
+/*
+ * If the platform is AArch64-only, there is no need to save and restore these
+ * AArch32 registers.
+ */
+#if CTX_INCLUDE_AARCH32_REGS
+#define CTX_SPSR_ABT		U(0xc0)  /* Align to the next 16 byte boundary */
+#define CTX_SPSR_UND		U(0xc8)
+#define CTX_SPSR_IRQ		U(0xd0)
+#define CTX_SPSR_FIQ		U(0xd8)
+#define CTX_DACR32_EL2		U(0xe0)
+#define CTX_IFSR32_EL2		U(0xe8)
+#define CTX_FP_FPEXC32_EL2	U(0xf0)
+#define CTX_TIMER_SYSREGS_OFF	U(0x100) /* Align to the next 16 byte boundary */
+#else
+#define CTX_TIMER_SYSREGS_OFF	U(0xc0)  /* Align to the next 16 byte boundary */
+#endif /* __CTX_INCLUDE_AARCH32_REGS__ */
+
+/*
+ * If the timer registers aren't saved and restored, we don't have to reserve
+ * space for them in the context
+ */
+#if NS_TIMER_SWITCH
+#define CTX_CNTP_CTL_EL0	(CTX_TIMER_SYSREGS_OFF + U(0x0))
+#define CTX_CNTP_CVAL_EL0	(CTX_TIMER_SYSREGS_OFF + U(0x8))
+#define CTX_CNTV_CTL_EL0	(CTX_TIMER_SYSREGS_OFF + U(0x10))
+#define CTX_CNTV_CVAL_EL0	(CTX_TIMER_SYSREGS_OFF + U(0x18))
+#define CTX_CNTKCTL_EL1		(CTX_TIMER_SYSREGS_OFF + U(0x20))
+#define CTX_SYSREGS_END		(CTX_TIMER_SYSREGS_OFF + U(0x30)) /* Align to the next 16 byte boundary */
+#else
+#define CTX_SYSREGS_END		CTX_TIMER_SYSREGS_OFF
+#endif /* __NS_TIMER_SWITCH__ */
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'fp_regs'
+ * structure at their correct offsets.
+ ******************************************************************************/
+#if CTX_INCLUDE_FPREGS
+#define CTX_FPREGS_OFFSET	(CTX_SYSREGS_OFFSET + CTX_SYSREGS_END)
+#define CTX_FP_Q0		U(0x0)
+#define CTX_FP_Q1		U(0x10)
+#define CTX_FP_Q2		U(0x20)
+#define CTX_FP_Q3		U(0x30)
+#define CTX_FP_Q4		U(0x40)
+#define CTX_FP_Q5		U(0x50)
+#define CTX_FP_Q6		U(0x60)
+#define CTX_FP_Q7		U(0x70)
+#define CTX_FP_Q8		U(0x80)
+#define CTX_FP_Q9		U(0x90)
+#define CTX_FP_Q10		U(0xa0)
+#define CTX_FP_Q11		U(0xb0)
+#define CTX_FP_Q12		U(0xc0)
+#define CTX_FP_Q13		U(0xd0)
+#define CTX_FP_Q14		U(0xe0)
+#define CTX_FP_Q15		U(0xf0)
+#define CTX_FP_Q16		U(0x100)
+#define CTX_FP_Q17		U(0x110)
+#define CTX_FP_Q18		U(0x120)
+#define CTX_FP_Q19		U(0x130)
+#define CTX_FP_Q20		U(0x140)
+#define CTX_FP_Q21		U(0x150)
+#define CTX_FP_Q22		U(0x160)
+#define CTX_FP_Q23		U(0x170)
+#define CTX_FP_Q24		U(0x180)
+#define CTX_FP_Q25		U(0x190)
+#define CTX_FP_Q26		U(0x1a0)
+#define CTX_FP_Q27		U(0x1b0)
+#define CTX_FP_Q28		U(0x1c0)
+#define CTX_FP_Q29		U(0x1d0)
+#define CTX_FP_Q30		U(0x1e0)
+#define CTX_FP_Q31		U(0x1f0)
+#define CTX_FP_FPSR		U(0x200)
+#define CTX_FP_FPCR		U(0x208)
+#define CTX_FPREGS_END		U(0x210)
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <cassert.h>
+#include <platform_def.h>	/* for CACHE_WRITEBACK_GRANULE */
+#include <stdint.h>
+
+/*
+ * Common constants to help define the 'cpu_context' structure and its
+ * members below.
+ */
+#define DWORD_SHIFT		U(3)
+#define DEFINE_REG_STRUCT(name, num_regs)	\
+	typedef struct name {			\
+		uint64_t _regs[num_regs];	\
+	}  __aligned(16) name##_t
+
+/* Constants to determine the size of individual context structures */
+#define CTX_GPREG_ALL		(CTX_GPREGS_END >> DWORD_SHIFT)
+#define CTX_SYSREG_ALL		(CTX_SYSREGS_END >> DWORD_SHIFT)
+#if CTX_INCLUDE_FPREGS
+#define CTX_FPREG_ALL		(CTX_FPREGS_END >> DWORD_SHIFT)
+#endif
+#define CTX_EL3STATE_ALL	(CTX_EL3STATE_END >> DWORD_SHIFT)
+
+/*
+ * AArch64 general purpose register context structure. Usually x0-x18,
+ * lr are saved as the compiler is expected to preserve the remaining
+ * callee saved registers if used by the C runtime and the assembler
+ * does not touch the remaining. But in case of world switch during
+ * exception handling, we need to save the callee registers too.
+ */
+DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL);
+
+/*
+ * AArch64 EL1 system register context structure for preserving the
+ * architectural state during switches from one security state to
+ * another in EL1.
+ */
+DEFINE_REG_STRUCT(el1_sys_regs, CTX_SYSREG_ALL);
+
+/*
+ * AArch64 floating point register context structure for preserving
+ * the floating point state during switches from one security state to
+ * another.
+ */
+#if CTX_INCLUDE_FPREGS
+DEFINE_REG_STRUCT(fp_regs, CTX_FPREG_ALL);
+#endif
+
+/*
+ * Miscellaneous registers used by EL3 firmware to maintain its state
+ * across exception entries and exits
+ */
+DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL);
+
+/*
+ * Macros to access members of any of the above structures using their
+ * offsets
+ */
+#define read_ctx_reg(ctx, offset)	((ctx)->_regs[offset >> DWORD_SHIFT])
+#define write_ctx_reg(ctx, offset, val)	(((ctx)->_regs[offset >> DWORD_SHIFT]) \
+					 = val)
+
+/*
+ * Top-level context structure which is used by EL3 firmware to
+ * preserve the state of a core at EL1 in one of the two security
+ * states and save enough EL3 meta data to be able to return to that
+ * EL and security state. The context management library will be used
+ * to ensure that SP_EL3 always points to an instance of this
+ * structure at exception entry and exit. Each instance will
+ * correspond to either the secure or the non-secure state.
+ */
+typedef struct cpu_context {
+	gp_regs_t gpregs_ctx;
+	el3_state_t el3state_ctx;
+	el1_sys_regs_t sysregs_ctx;
+#if CTX_INCLUDE_FPREGS
+	fp_regs_t fpregs_ctx;
+#endif
+} cpu_context_t;
+
+/* Macros to access members of the 'cpu_context_t' structure */
+#define get_el3state_ctx(h)	(&((cpu_context_t *) h)->el3state_ctx)
+#if CTX_INCLUDE_FPREGS
+#define get_fpregs_ctx(h)	(&((cpu_context_t *) h)->fpregs_ctx)
+#endif
+#define get_sysregs_ctx(h)	(&((cpu_context_t *) h)->sysregs_ctx)
+#define get_gpregs_ctx(h)	(&((cpu_context_t *) h)->gpregs_ctx)
+
+/*
+ * Compile time assertions related to the 'cpu_context' structure to
+ * ensure that the assembler and the compiler view of the offsets of
+ * the structure members is the same.
+ */
+CASSERT(CTX_GPREGS_OFFSET == __builtin_offsetof(cpu_context_t, gpregs_ctx), \
+	assert_core_context_gp_offset_mismatch);
+CASSERT(CTX_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, sysregs_ctx), \
+	assert_core_context_sys_offset_mismatch);
+#if CTX_INCLUDE_FPREGS
+CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx), \
+	assert_core_context_fp_offset_mismatch);
+#endif
+CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx), \
+	assert_core_context_el3state_offset_mismatch);
+
+/*
+ * Helper macro to set the general purpose registers that correspond to
+ * parameters in an aapcs_64 call i.e. x0-x7
+ */
+#define set_aapcs_args0(ctx, x0)				do {	\
+		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, x0);	\
+	} while (0)
+#define set_aapcs_args1(ctx, x0, x1)				do {	\
+		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, x1);	\
+		set_aapcs_args0(ctx, x0);				\
+	} while (0)
+#define set_aapcs_args2(ctx, x0, x1, x2)			do {	\
+		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, x2);	\
+		set_aapcs_args1(ctx, x0, x1);				\
+	} while (0)
+#define set_aapcs_args3(ctx, x0, x1, x2, x3)			do {	\
+		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, x3);	\
+		set_aapcs_args2(ctx, x0, x1, x2);			\
+	} while (0)
+#define set_aapcs_args4(ctx, x0, x1, x2, x3, x4)		do {	\
+		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X4, x4);	\
+		set_aapcs_args3(ctx, x0, x1, x2, x3);			\
+	} while (0)
+#define set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5)		do {	\
+		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X5, x5);	\
+		set_aapcs_args4(ctx, x0, x1, x2, x3, x4);		\
+	} while (0)
+#define set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6)	do {	\
+		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X6, x6);	\
+		set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5);		\
+	} while (0)
+#define set_aapcs_args7(ctx, x0, x1, x2, x3, x4, x5, x6, x7)	do {	\
+		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X7, x7);	\
+		set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6);	\
+	} while (0)
+
+/*******************************************************************************
+ * Function prototypes
+ ******************************************************************************/
+void el1_sysregs_context_save(el1_sys_regs_t *regs);
+void el1_sysregs_context_save_post_ops(void);
+void el1_sysregs_context_restore(el1_sys_regs_t *regs);
+#if CTX_INCLUDE_FPREGS
+void fpregs_context_save(fp_regs_t *regs);
+void fpregs_context_restore(fp_regs_t *regs);
+#endif
+
+
+#undef CTX_SYSREG_ALL
+#if CTX_INCLUDE_FPREGS
+#undef CTX_FPREG_ALL
+#endif
+#undef CTX_GPREG_ALL
+#undef CTX_EL3STATE_ALL
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __CONTEXT_H__ */
diff --git a/include/lib/el3_runtime/context_mgmt.h b/include/lib/el3_runtime/context_mgmt.h
new file mode 100644
index 0000000..eb7a953
--- /dev/null
+++ b/include/lib/el3_runtime/context_mgmt.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CM_H__
+#define __CM_H__
+
+#ifndef AARCH32
+#include <arch.h>
+#include <assert.h>
+#include <stdint.h>
+#endif
+
+/*******************************************************************************
+ * Forward declarations
+ ******************************************************************************/
+struct entry_point_info;
+
+/*******************************************************************************
+ * Function & variable prototypes
+ ******************************************************************************/
+void cm_init(void);
+void *cm_get_context_by_index(unsigned int cpu_idx,
+			      unsigned int security_state);
+void cm_set_context_by_index(unsigned int cpu_idx,
+			     void *context,
+			     unsigned int security_state);
+void *cm_get_context(uint32_t security_state);
+void cm_set_context(void *context, uint32_t security_state);
+void cm_init_my_context(const struct entry_point_info *ep);
+void cm_init_context_by_index(unsigned int cpu_idx,
+			      const struct entry_point_info *ep);
+void cm_prepare_el3_exit(uint32_t security_state);
+
+#ifndef AARCH32
+void cm_el1_sysregs_context_save(uint32_t security_state);
+void cm_el1_sysregs_context_restore(uint32_t security_state);
+void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint);
+void cm_set_elr_spsr_el3(uint32_t security_state,
+			uintptr_t entrypoint, uint32_t spsr);
+void cm_write_scr_el3_bit(uint32_t security_state,
+			  uint32_t bit_pos,
+			  uint32_t value);
+void cm_set_next_eret_context(uint32_t security_state);
+uint32_t cm_get_scr_el3(uint32_t security_state);
+
+
+void cm_init_context(uint64_t mpidr,
+		     const struct entry_point_info *ep) __deprecated;
+
+void *cm_get_context_by_mpidr(uint64_t mpidr,
+			      uint32_t security_state) __deprecated;
+void cm_set_context_by_mpidr(uint64_t mpidr,
+			     void *context,
+			     uint32_t security_state) __deprecated;
+
+/* Inline definitions */
+
+/*******************************************************************************
+ * This function is used to program the context that's used for exception
+ * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
+ * the required security state
+ ******************************************************************************/
+static inline void cm_set_next_context(void *context)
+{
+#if ENABLE_ASSERTIONS
+	uint64_t sp_mode;
+
+	/*
+	 * Check that this function is called with SP_EL0 as the stack
+	 * pointer
+	 */
+	__asm__ volatile("mrs	%0, SPSel\n"
+			 : "=r" (sp_mode));
+
+	assert(sp_mode == MODE_SP_EL0);
+#endif /* ENABLE_ASSERTIONS */
+
+	__asm__ volatile("msr	spsel, #1\n"
+			 "mov	sp, %0\n"
+			 "msr	spsel, #0\n"
+			 : : "r" (context));
+}
+
+#else
+void *cm_get_next_context(void);
+void cm_set_next_context(void *context);
+#endif /* AARCH32 */
+
+#endif /* __CM_H__ */
diff --git a/include/lib/el3_runtime/cpu_data.h b/include/lib/el3_runtime/cpu_data.h
new file mode 100644
index 0000000..bd787ce
--- /dev/null
+++ b/include/lib/el3_runtime/cpu_data.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CPU_DATA_H__
+#define __CPU_DATA_H__
+
+#include <platform_def.h>	/* CACHE_WRITEBACK_GRANULE required */
+
+#ifdef AARCH32
+
+#if CRASH_REPORTING
+#error "Crash reporting is not supported in AArch32"
+#endif
+#define CPU_DATA_CPU_OPS_PTR		0x0
+#define CPU_DATA_CRASH_BUF_OFFSET	0x4
+
+#else /* AARCH32 */
+
+/* Offsets for the cpu_data structure */
+#define CPU_DATA_CRASH_BUF_OFFSET	0x18
+/* need enough space in crash buffer to save 8 registers */
+#define CPU_DATA_CRASH_BUF_SIZE		64
+#define CPU_DATA_CPU_OPS_PTR		0x10
+
+#endif /* AARCH32 */
+
+#if CRASH_REPORTING
+#define CPU_DATA_CRASH_BUF_END		(CPU_DATA_CRASH_BUF_OFFSET + \
+						CPU_DATA_CRASH_BUF_SIZE)
+#else
+#define CPU_DATA_CRASH_BUF_END		CPU_DATA_CRASH_BUF_OFFSET
+#endif
+
+/* cpu_data size is the data size rounded up to the platform cache line size */
+#define CPU_DATA_SIZE			(((CPU_DATA_CRASH_BUF_END + \
+					CACHE_WRITEBACK_GRANULE - 1) / \
+						CACHE_WRITEBACK_GRANULE) * \
+							CACHE_WRITEBACK_GRANULE)
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+/* Temporary space to store PMF timestamps from assembly code */
+#define CPU_DATA_PMF_TS_COUNT		1
+#define CPU_DATA_PMF_TS0_OFFSET		CPU_DATA_CRASH_BUF_END
+#define CPU_DATA_PMF_TS0_IDX		0
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <arch_helpers.h>
+#include <cassert.h>
+#include <platform_def.h>
+#include <psci.h>
+#include <stdint.h>
+
+/* Offsets for the cpu_data structure */
+#define CPU_DATA_PSCI_LOCK_OFFSET	__builtin_offsetof\
+		(cpu_data_t, psci_svc_cpu_data.pcpu_bakery_info)
+
+#if PLAT_PCPU_DATA_SIZE
+#define CPU_DATA_PLAT_PCPU_OFFSET	__builtin_offsetof\
+		(cpu_data_t, platform_cpu_data)
+#endif
+
+/*******************************************************************************
+ * Function & variable prototypes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * Cache of frequently used per-cpu data:
+ *   Pointers to non-secure and secure security state contexts
+ *   Address of the crash stack
+ * It is aligned to the cache line boundary to allow efficient concurrent
+ * manipulation of these pointers on different cpus
+ *
+ * TODO: Add other commonly used variables to this (tf_issues#90)
+ *
+ * The data structure and the _cpu_data accessors should not be used directly
+ * by components that have per-cpu members. The member access macros should be
+ * used for this.
+ ******************************************************************************/
+typedef struct cpu_data {
+#ifndef AARCH32
+	void *cpu_context[2];
+#endif
+	uintptr_t cpu_ops_ptr;
+#if CRASH_REPORTING
+	u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3];
+#endif
+#if ENABLE_RUNTIME_INSTRUMENTATION
+	uint64_t cpu_data_pmf_ts[CPU_DATA_PMF_TS_COUNT];
+#endif
+	struct psci_cpu_data psci_svc_cpu_data;
+#if PLAT_PCPU_DATA_SIZE
+	uint8_t platform_cpu_data[PLAT_PCPU_DATA_SIZE];
+#endif
+} __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t;
+
+#if CRASH_REPORTING
+/* verify assembler offsets match data structures */
+CASSERT(CPU_DATA_CRASH_BUF_OFFSET == __builtin_offsetof
+	(cpu_data_t, crash_buf),
+	assert_cpu_data_crash_stack_offset_mismatch);
+#endif
+
+CASSERT(CPU_DATA_SIZE == sizeof(cpu_data_t),
+		assert_cpu_data_size_mismatch);
+
+CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof
+		(cpu_data_t, cpu_ops_ptr),
+		assert_cpu_data_cpu_ops_ptr_offset_mismatch);
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+CASSERT(CPU_DATA_PMF_TS0_OFFSET == __builtin_offsetof
+		(cpu_data_t, cpu_data_pmf_ts[0]),
+		assert_cpu_data_pmf_ts0_offset_mismatch);
+#endif
+
+struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);
+
+#ifndef AARCH32
+/* Return the cpu_data structure for the current CPU. */
+static inline struct cpu_data *_cpu_data(void)
+{
+	return (cpu_data_t *)read_tpidr_el3();
+}
+#else
+struct cpu_data *_cpu_data(void);
+#endif
+
+/**************************************************************************
+ * APIs for initialising and accessing per-cpu data
+ *************************************************************************/
+
+void init_cpu_data_ptr(void);
+void init_cpu_ops(void);
+
+#define get_cpu_data(_m)		   _cpu_data()->_m
+#define set_cpu_data(_m, _v)		   _cpu_data()->_m = _v
+#define get_cpu_data_by_index(_ix, _m)	   _cpu_data_by_index(_ix)->_m
+#define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = _v
+/* ((cpu_data_t *)0)->_m is a dummy to get the sizeof the struct member _m */
+#define flush_cpu_data(_m)	   flush_dcache_range((uintptr_t)	  \
+						&(_cpu_data()->_m), \
+						sizeof(((cpu_data_t *)0)->_m))
+#define inv_cpu_data(_m)	   inv_dcache_range((uintptr_t)	  	  \
+						&(_cpu_data()->_m), \
+						sizeof(((cpu_data_t *)0)->_m))
+#define flush_cpu_data_by_index(_ix, _m)	\
+				   flush_dcache_range((uintptr_t)	  \
+					 &(_cpu_data_by_index(_ix)->_m),  \
+						sizeof(((cpu_data_t *)0)->_m))
+
+
+#endif /* __ASSEMBLY__ */
+#endif /* __CPU_DATA_H__ */
diff --git a/include/lib/el3_runtime/pubsub.h b/include/lib/el3_runtime/pubsub.h
new file mode 100644
index 0000000..9a85480
--- /dev/null
+++ b/include/lib/el3_runtime/pubsub.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PUBSUB_H__
+#define __PUBSUB_H__
+
+#define __pubsub_start_sym(event)	__pubsub_##event##_start
+#define __pubsub_end_sym(event)		__pubsub_##event##_end
+
+#ifdef __LINKER__
+
+/* For the linker ... */
+
+#define __pubsub_section(event)		__pubsub_##event
+
+/*
+ * REGISTER_PUBSUB_EVENT has a different definition between linker and compiler
+ * contexts. In linker context, this collects pubsub sections for each event,
+ * placing guard symbols around each.
+ */
+#define REGISTER_PUBSUB_EVENT(event) \
+	__pubsub_start_sym(event) = .; \
+	KEEP(*(__pubsub_section(event))); \
+	__pubsub_end_sym(event) = .
+
+#else /* __LINKER__ */
+
+/* For the compiler ... */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cdefs.h>
+#include <stddef.h>
+
+#define __pubsub_section(event)		__section("__pubsub_" #event)
+
+/*
+ * In compiler context, REGISTER_PUBSUB_EVENT declares the per-event symbols
+ * exported by the linker required for the other pubsub macros to work.
+ */
+#define REGISTER_PUBSUB_EVENT(event) \
+	extern pubsub_cb_t __pubsub_start_sym(event)[]; \
+	extern pubsub_cb_t __pubsub_end_sym(event)[]
+
+/*
+ * Have the function func called back when the specified event happens. This
+ * macro places the function address into the pubsub section, which is picked up
+ * and invoked by the invoke_pubsubs() function via. the PUBLISH_EVENT* macros.
+ */
+#define SUBSCRIBE_TO_EVENT(event, func) \
+	pubsub_cb_t __cb_func_##func##event __pubsub_section(event) = func
+
+/*
+ * Iterate over subscribed handlers for a defined event. 'event' is the name of
+ * the event, and 'subscriber' a local variable of type 'pubsub_cb_t *'.
+ */
+#define for_each_subscriber(event, subscriber) \
+	for (subscriber = __pubsub_start_sym(event); \
+			subscriber < __pubsub_end_sym(event); \
+			subscriber++)
+
+/*
+ * Publish a defined event supplying an argument. All subscribed handlers are
+ * invoked, but the return value of handlers are ignored for now.
+ */
+#define PUBLISH_EVENT_ARG(event, arg) \
+	do { \
+		pubsub_cb_t *subscriber; \
+		for_each_subscriber(event, subscriber) { \
+			(*subscriber)(arg); \
+		} \
+	} while (0)
+
+/* Publish a defined event with NULL argument */
+#define PUBLISH_EVENT(event)	PUBLISH_EVENT_ARG(event, NULL)
+
+/* Subscriber callback type */
+typedef void* (*pubsub_cb_t)(const void *arg);
+
+#endif	/* __LINKER__ */
+#endif	/* __PUBSUB_H__ */
diff --git a/include/lib/el3_runtime/pubsub_events.h b/include/lib/el3_runtime/pubsub_events.h
new file mode 100644
index 0000000..9cfedb4
--- /dev/null
+++ b/include/lib/el3_runtime/pubsub_events.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <pubsub.h>
+
+/*
+ * This file defines a list of pubsub events, declared using
+ * REGISTER_PUBSUB_EVENT() macro.
+ */
+
+/*
+ * Event published after a CPU has been powered up and finished its
+ * initialization.
+ */
+REGISTER_PUBSUB_EVENT(psci_cpu_on_finish);
+
+#ifdef AARCH64
+/*
+ * These events are published by the AArch64 context management framework
+ * after the secure context is restored/saved via
+ * cm_el1_sysregs_context_{restore,save}() API.
+ */
+REGISTER_PUBSUB_EVENT(cm_entering_secure_world);
+REGISTER_PUBSUB_EVENT(cm_exited_secure_world);
+
+/*
+ * These events are published by the AArch64 context management framework
+ * after the normal context is restored/saved via
+ * cm_el1_sysregs_context_{restore,save}() API.
+ */
+REGISTER_PUBSUB_EVENT(cm_entering_normal_world);
+REGISTER_PUBSUB_EVENT(cm_exited_normal_world);
+#endif /* AARCH64 */
diff --git a/include/lib/libfdt/fdt.h b/include/lib/libfdt/fdt.h
new file mode 100644
index 0000000..c833dc1
--- /dev/null
+++ b/include/lib/libfdt/fdt.h
@@ -0,0 +1,119 @@
+#ifndef _FDT_H
+#define _FDT_H
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ * Copyright 2012 Kim Phillips, Freescale Semiconductor.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public
+ *     License along with this library; if not, write to the Free
+ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ *     MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ *  b) Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *     1. Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *     2. Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Portions copyright (c) 2016-2017, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef __ASSEMBLY__
+
+#include <libfdt_env.h>
+
+
+struct fdt_header {
+	fdt32_t magic;			 /* magic word FDT_MAGIC */
+	fdt32_t totalsize;		 /* total size of DT block */
+	fdt32_t off_dt_struct;		 /* offset to structure */
+	fdt32_t off_dt_strings;		 /* offset to strings */
+	fdt32_t off_mem_rsvmap;		 /* offset to memory reserve map */
+	fdt32_t version;		 /* format version */
+	fdt32_t last_comp_version;	 /* last compatible version */
+
+	/* version 2 fields below */
+	fdt32_t boot_cpuid_phys;	 /* Which physical CPU id we're
+					    booting on */
+	/* version 3 fields below */
+	fdt32_t size_dt_strings;	 /* size of the strings block */
+
+	/* version 17 fields below */
+	fdt32_t size_dt_struct;		 /* size of the structure block */
+};
+
+struct fdt_reserve_entry {
+	fdt64_t address;
+	fdt64_t size;
+};
+
+struct fdt_node_header {
+	fdt32_t tag;
+	char name[];
+};
+
+struct fdt_property {
+	fdt32_t tag;
+	fdt32_t len;
+	fdt32_t nameoff;
+	char data[];
+};
+
+#endif /* !__ASSEMBLY */
+
+#define FDT_MAGIC	0xd00dfeed	/* 4: version, 4: total size */
+#define FDT_TAGSIZE	sizeof(fdt32_t)
+
+#define FDT_BEGIN_NODE	0x1		/* Start node: full name */
+#define FDT_END_NODE	0x2		/* End node */
+#define FDT_PROP	0x3		/* Property: name off,
+					   size, content */
+#define FDT_NOP		0x4		/* nop */
+#define FDT_END		0x9
+
+#define FDT_V1_SIZE	(7*sizeof(fdt32_t))
+#define FDT_V2_SIZE	(FDT_V1_SIZE + sizeof(fdt32_t))
+#define FDT_V3_SIZE	(FDT_V2_SIZE + sizeof(fdt32_t))
+#define FDT_V16_SIZE	FDT_V3_SIZE
+#define FDT_V17_SIZE	(FDT_V16_SIZE + sizeof(fdt32_t))
+
+#endif /* _FDT_H */
diff --git a/include/lib/libfdt/libfdt.h b/include/lib/libfdt/libfdt.h
new file mode 100644
index 0000000..f662378
--- /dev/null
+++ b/include/lib/libfdt/libfdt.h
@@ -0,0 +1,1766 @@
+#ifndef _LIBFDT_H
+#define _LIBFDT_H
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public
+ *     License along with this library; if not, write to the Free
+ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ *     MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ *  b) Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *     1. Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *     2. Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Portions copyright (c) 2016-2017, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#include <libfdt_env.h>
+#include <fdt.h>
+
+#define FDT_FIRST_SUPPORTED_VERSION	0x10
+#define FDT_LAST_SUPPORTED_VERSION	0x11
+
+/* Error codes: informative error codes */
+#define FDT_ERR_NOTFOUND	1
+	/* FDT_ERR_NOTFOUND: The requested node or property does not exist */
+#define FDT_ERR_EXISTS		2
+	/* FDT_ERR_EXISTS: Attemped to create a node or property which
+	 * already exists */
+#define FDT_ERR_NOSPACE		3
+	/* FDT_ERR_NOSPACE: Operation needed to expand the device
+	 * tree, but its buffer did not have sufficient space to
+	 * contain the expanded tree. Use fdt_open_into() to move the
+	 * device tree to a buffer with more space. */
+
+/* Error codes: codes for bad parameters */
+#define FDT_ERR_BADOFFSET	4
+	/* FDT_ERR_BADOFFSET: Function was passed a structure block
+	 * offset which is out-of-bounds, or which points to an
+	 * unsuitable part of the structure for the operation. */
+#define FDT_ERR_BADPATH		5
+	/* FDT_ERR_BADPATH: Function was passed a badly formatted path
+	 * (e.g. missing a leading / for a function which requires an
+	 * absolute path) */
+#define FDT_ERR_BADPHANDLE	6
+	/* FDT_ERR_BADPHANDLE: Function was passed an invalid phandle
+	 * value.  phandle values of 0 and -1 are not permitted. */
+#define FDT_ERR_BADSTATE	7
+	/* FDT_ERR_BADSTATE: Function was passed an incomplete device
+	 * tree created by the sequential-write functions, which is
+	 * not sufficiently complete for the requested operation. */
+
+/* Error codes: codes for bad device tree blobs */
+#define FDT_ERR_TRUNCATED	8
+	/* FDT_ERR_TRUNCATED: Structure block of the given device tree
+	 * ends without an FDT_END tag. */
+#define FDT_ERR_BADMAGIC	9
+	/* FDT_ERR_BADMAGIC: Given "device tree" appears not to be a
+	 * device tree at all - it is missing the flattened device
+	 * tree magic number. */
+#define FDT_ERR_BADVERSION	10
+	/* FDT_ERR_BADVERSION: Given device tree has a version which
+	 * can't be handled by the requested operation.  For
+	 * read-write functions, this may mean that fdt_open_into() is
+	 * required to convert the tree to the expected version. */
+#define FDT_ERR_BADSTRUCTURE	11
+	/* FDT_ERR_BADSTRUCTURE: Given device tree has a corrupt
+	 * structure block or other serious error (e.g. misnested
+	 * nodes, or subnodes preceding properties). */
+#define FDT_ERR_BADLAYOUT	12
+	/* FDT_ERR_BADLAYOUT: For read-write functions, the given
+	 * device tree has it's sub-blocks in an order that the
+	 * function can't handle (memory reserve map, then structure,
+	 * then strings).  Use fdt_open_into() to reorganize the tree
+	 * into a form suitable for the read-write operations. */
+
+/* "Can't happen" error indicating a bug in libfdt */
+#define FDT_ERR_INTERNAL	13
+	/* FDT_ERR_INTERNAL: libfdt has failed an internal assertion.
+	 * Should never be returned, if it is, it indicates a bug in
+	 * libfdt itself. */
+
+/* Errors in device tree content */
+#define FDT_ERR_BADNCELLS	14
+	/* FDT_ERR_BADNCELLS: Device tree has a #address-cells, #size-cells
+	 * or similar property with a bad format or value */
+
+#define FDT_ERR_BADVALUE	15
+	/* FDT_ERR_BADVALUE: Device tree has a property with an unexpected
+	 * value. For example: a property expected to contain a string list
+	 * is not NUL-terminated within the length of its value. */
+
+#define FDT_ERR_MAX		15
+
+/**********************************************************************/
+/* Low-level functions (you probably don't need these)                */
+/**********************************************************************/
+
+const void *fdt_offset_ptr(const void *fdt, int offset, unsigned int checklen);
+static inline void *fdt_offset_ptr_w(void *fdt, int offset, int checklen)
+{
+	return (void *)(uintptr_t)fdt_offset_ptr(fdt, offset, checklen);
+}
+
+uint32_t fdt_next_tag(const void *fdt, int offset, int *nextoffset);
+
+/**********************************************************************/
+/* Traversal functions                                                */
+/**********************************************************************/
+
+int fdt_next_node(const void *fdt, int offset, int *depth);
+
+/**
+ * fdt_first_subnode() - get offset of first direct subnode
+ *
+ * @fdt:	FDT blob
+ * @offset:	Offset of node to check
+ * @return offset of first subnode, or -FDT_ERR_NOTFOUND if there is none
+ */
+int fdt_first_subnode(const void *fdt, int offset);
+
+/**
+ * fdt_next_subnode() - get offset of next direct subnode
+ *
+ * After first calling fdt_first_subnode(), call this function repeatedly to
+ * get direct subnodes of a parent node.
+ *
+ * @fdt:	FDT blob
+ * @offset:	Offset of previous subnode
+ * @return offset of next subnode, or -FDT_ERR_NOTFOUND if there are no more
+ * subnodes
+ */
+int fdt_next_subnode(const void *fdt, int offset);
+
+/**
+ * fdt_for_each_subnode - iterate over all subnodes of a parent
+ *
+ * @node:	child node (int, lvalue)
+ * @fdt:	FDT blob (const void *)
+ * @parent:	parent node (int)
+ *
+ * This is actually a wrapper around a for loop and would be used like so:
+ *
+ *	fdt_for_each_subnode(node, fdt, parent) {
+ *		Use node
+ *		...
+ *	}
+ *
+ *	if ((node < 0) && (node != -FDT_ERR_NOT_FOUND)) {
+ *		Error handling
+ *	}
+ *
+ * Note that this is implemented as a macro and @node is used as
+ * iterator in the loop. The parent variable be constant or even a
+ * literal.
+ *
+ */
+#define fdt_for_each_subnode(node, fdt, parent)		\
+	for (node = fdt_first_subnode(fdt, parent);	\
+	     node >= 0;					\
+	     node = fdt_next_subnode(fdt, node))
+
+/**********************************************************************/
+/* General functions                                                  */
+/**********************************************************************/
+
+#define fdt_get_header(fdt, field) \
+	(fdt32_to_cpu(((const struct fdt_header *)(fdt))->field))
+#define fdt_magic(fdt)			(fdt_get_header(fdt, magic))
+#define fdt_totalsize(fdt)		(fdt_get_header(fdt, totalsize))
+#define fdt_off_dt_struct(fdt)		(fdt_get_header(fdt, off_dt_struct))
+#define fdt_off_dt_strings(fdt)		(fdt_get_header(fdt, off_dt_strings))
+#define fdt_off_mem_rsvmap(fdt)		(fdt_get_header(fdt, off_mem_rsvmap))
+#define fdt_version(fdt)		(fdt_get_header(fdt, version))
+#define fdt_last_comp_version(fdt)	(fdt_get_header(fdt, last_comp_version))
+#define fdt_boot_cpuid_phys(fdt)	(fdt_get_header(fdt, boot_cpuid_phys))
+#define fdt_size_dt_strings(fdt)	(fdt_get_header(fdt, size_dt_strings))
+#define fdt_size_dt_struct(fdt)		(fdt_get_header(fdt, size_dt_struct))
+
+#define __fdt_set_hdr(name) \
+	static inline void fdt_set_##name(void *fdt, uint32_t val) \
+	{ \
+		struct fdt_header *fdth = (struct fdt_header *)fdt; \
+		fdth->name = cpu_to_fdt32(val); \
+	}
+__fdt_set_hdr(magic)
+__fdt_set_hdr(totalsize)
+__fdt_set_hdr(off_dt_struct)
+__fdt_set_hdr(off_dt_strings)
+__fdt_set_hdr(off_mem_rsvmap)
+__fdt_set_hdr(version)
+__fdt_set_hdr(last_comp_version)
+__fdt_set_hdr(boot_cpuid_phys)
+__fdt_set_hdr(size_dt_strings)
+__fdt_set_hdr(size_dt_struct)
+#undef __fdt_set_hdr
+
+/**
+ * fdt_check_header - sanity check a device tree or possible device tree
+ * @fdt: pointer to data which might be a flattened device tree
+ *
+ * fdt_check_header() checks that the given buffer contains what
+ * appears to be a flattened device tree with sane information in its
+ * header.
+ *
+ * returns:
+ *     0, if the buffer appears to contain a valid device tree
+ *     -FDT_ERR_BADMAGIC,
+ *     -FDT_ERR_BADVERSION,
+ *     -FDT_ERR_BADSTATE, standard meanings, as above
+ */
+int fdt_check_header(const void *fdt);
+
+/**
+ * fdt_move - move a device tree around in memory
+ * @fdt: pointer to the device tree to move
+ * @buf: pointer to memory where the device is to be moved
+ * @bufsize: size of the memory space at buf
+ *
+ * fdt_move() relocates, if possible, the device tree blob located at
+ * fdt to the buffer at buf of size bufsize.  The buffer may overlap
+ * with the existing device tree blob at fdt.  Therefore,
+ *     fdt_move(fdt, fdt, fdt_totalsize(fdt))
+ * should always succeed.
+ *
+ * returns:
+ *     0, on success
+ *     -FDT_ERR_NOSPACE, bufsize is insufficient to contain the device tree
+ *     -FDT_ERR_BADMAGIC,
+ *     -FDT_ERR_BADVERSION,
+ *     -FDT_ERR_BADSTATE, standard meanings
+ */
+int fdt_move(const void *fdt, void *buf, int bufsize);
+
+/**********************************************************************/
+/* Read-only functions                                                */
+/**********************************************************************/
+
+/**
+ * fdt_string - retrieve a string from the strings block of a device tree
+ * @fdt: pointer to the device tree blob
+ * @stroffset: offset of the string within the strings block (native endian)
+ *
+ * fdt_string() retrieves a pointer to a single string from the
+ * strings block of the device tree blob at fdt.
+ *
+ * returns:
+ *     a pointer to the string, on success
+ *     NULL, if stroffset is out of bounds
+ */
+const char *fdt_string(const void *fdt, int stroffset);
+
+/**
+ * fdt_get_max_phandle - retrieves the highest phandle in a tree
+ * @fdt: pointer to the device tree blob
+ *
+ * fdt_get_max_phandle retrieves the highest phandle in the given
+ * device tree. This will ignore badly formatted phandles, or phandles
+ * with a value of 0 or -1.
+ *
+ * returns:
+ *      the highest phandle on success
+ *      0, if no phandle was found in the device tree
+ *      -1, if an error occurred
+ */
+uint32_t fdt_get_max_phandle(const void *fdt);
+
+/**
+ * fdt_num_mem_rsv - retrieve the number of memory reserve map entries
+ * @fdt: pointer to the device tree blob
+ *
+ * Returns the number of entries in the device tree blob's memory
+ * reservation map.  This does not include the terminating 0,0 entry
+ * or any other (0,0) entries reserved for expansion.
+ *
+ * returns:
+ *     the number of entries
+ */
+int fdt_num_mem_rsv(const void *fdt);
+
+/**
+ * fdt_get_mem_rsv - retrieve one memory reserve map entry
+ * @fdt: pointer to the device tree blob
+ * @address, @size: pointers to 64-bit variables
+ *
+ * On success, *address and *size will contain the address and size of
+ * the n-th reserve map entry from the device tree blob, in
+ * native-endian format.
+ *
+ * returns:
+ *     0, on success
+ *     -FDT_ERR_BADMAGIC,
+ *     -FDT_ERR_BADVERSION,
+ *     -FDT_ERR_BADSTATE, standard meanings
+ */
+int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size);
+
+/**
+ * fdt_subnode_offset_namelen - find a subnode based on substring
+ * @fdt: pointer to the device tree blob
+ * @parentoffset: structure block offset of a node
+ * @name: name of the subnode to locate
+ * @namelen: number of characters of name to consider
+ *
+ * Identical to fdt_subnode_offset(), but only examine the first
+ * namelen characters of name for matching the subnode name.  This is
+ * useful for finding subnodes based on a portion of a larger string,
+ * such as a full path.
+ */
+int fdt_subnode_offset_namelen(const void *fdt, int parentoffset,
+			       const char *name, int namelen);
+/**
+ * fdt_subnode_offset - find a subnode of a given node
+ * @fdt: pointer to the device tree blob
+ * @parentoffset: structure block offset of a node
+ * @name: name of the subnode to locate
+ *
+ * fdt_subnode_offset() finds a subnode of the node at structure block
+ * offset parentoffset with the given name.  name may include a unit
+ * address, in which case fdt_subnode_offset() will find the subnode
+ * with that unit address, or the unit address may be omitted, in
+ * which case fdt_subnode_offset() will find an arbitrary subnode
+ * whose name excluding unit address matches the given name.
+ *
+ * returns:
+ *	structure block offset of the requested subnode (>=0), on success
+ *	-FDT_ERR_NOTFOUND, if the requested subnode does not exist
+ *	-FDT_ERR_BADOFFSET, if parentoffset did not point to an FDT_BEGIN_NODE
+ *		tag
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_TRUNCATED, standard meanings.
+ */
+int fdt_subnode_offset(const void *fdt, int parentoffset, const char *name);
+
+/**
+ * fdt_path_offset_namelen - find a tree node by its full path
+ * @fdt: pointer to the device tree blob
+ * @path: full path of the node to locate
+ * @namelen: number of characters of path to consider
+ *
+ * Identical to fdt_path_offset(), but only consider the first namelen
+ * characters of path as the path name.
+ */
+int fdt_path_offset_namelen(const void *fdt, const char *path, int namelen);
+
+/**
+ * fdt_path_offset - find a tree node by its full path
+ * @fdt: pointer to the device tree blob
+ * @path: full path of the node to locate
+ *
+ * fdt_path_offset() finds a node of a given path in the device tree.
+ * Each path component may omit the unit address portion, but the
+ * results of this are undefined if any such path component is
+ * ambiguous (that is if there are multiple nodes at the relevant
+ * level matching the given component, differentiated only by unit
+ * address).
+ *
+ * returns:
+ *	structure block offset of the node with the requested path (>=0), on
+ *		success
+ *	-FDT_ERR_BADPATH, given path does not begin with '/' or is invalid
+ *	-FDT_ERR_NOTFOUND, if the requested node does not exist
+ *      -FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_TRUNCATED, standard meanings.
+ */
+int fdt_path_offset(const void *fdt, const char *path);
+
+/**
+ * fdt_get_name - retrieve the name of a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: structure block offset of the starting node
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_get_name() retrieves the name (including unit address) of the
+ * device tree node at structure block offset nodeoffset.  If lenp is
+ * non-NULL, the length of this name is also returned, in the integer
+ * pointed to by lenp.
+ *
+ * returns:
+ *	pointer to the node's name, on success
+ *		If lenp is non-NULL, *lenp contains the length of that name
+ *			(>=0)
+ *	NULL, on error
+ *		if lenp is non-NULL *lenp contains an error code (<0):
+ *		-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE
+ *			tag
+ *		-FDT_ERR_BADMAGIC,
+ *		-FDT_ERR_BADVERSION,
+ *		-FDT_ERR_BADSTATE, standard meanings
+ */
+const char *fdt_get_name(const void *fdt, int nodeoffset, int *lenp);
+
+/**
+ * fdt_first_property_offset - find the offset of a node's first property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: structure block offset of a node
+ *
+ * fdt_first_property_offset() finds the first property of the node at
+ * the given structure block offset.
+ *
+ * returns:
+ *	structure block offset of the property (>=0), on success
+ *	-FDT_ERR_NOTFOUND, if the requested node has no properties
+ *	-FDT_ERR_BADOFFSET, if nodeoffset did not point to an FDT_BEGIN_NODE tag
+ *      -FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_TRUNCATED, standard meanings.
+ */
+int fdt_first_property_offset(const void *fdt, int nodeoffset);
+
+/**
+ * fdt_next_property_offset - step through a node's properties
+ * @fdt: pointer to the device tree blob
+ * @offset: structure block offset of a property
+ *
+ * fdt_next_property_offset() finds the property immediately after the
+ * one at the given structure block offset.  This will be a property
+ * of the same node as the given property.
+ *
+ * returns:
+ *	structure block offset of the next property (>=0), on success
+ *	-FDT_ERR_NOTFOUND, if the given property is the last in its node
+ *	-FDT_ERR_BADOFFSET, if nodeoffset did not point to an FDT_PROP tag
+ *      -FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_TRUNCATED, standard meanings.
+ */
+int fdt_next_property_offset(const void *fdt, int offset);
+
+/**
+ * fdt_for_each_property_offset - iterate over all properties of a node
+ *
+ * @property_offset:	property offset (int, lvalue)
+ * @fdt:		FDT blob (const void *)
+ * @node:		node offset (int)
+ *
+ * This is actually a wrapper around a for loop and would be used like so:
+ *
+ *	fdt_for_each_property_offset(property, fdt, node) {
+ *		Use property
+ *		...
+ *	}
+ *
+ *	if ((property < 0) && (property != -FDT_ERR_NOT_FOUND)) {
+ *		Error handling
+ *	}
+ *
+ * Note that this is implemented as a macro and property is used as
+ * iterator in the loop. The node variable can be constant or even a
+ * literal.
+ */
+#define fdt_for_each_property_offset(property, fdt, node)	\
+	for (property = fdt_first_property_offset(fdt, node);	\
+	     property >= 0;					\
+	     property = fdt_next_property_offset(fdt, property))
+
+/**
+ * fdt_get_property_by_offset - retrieve the property at a given offset
+ * @fdt: pointer to the device tree blob
+ * @offset: offset of the property to retrieve
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_get_property_by_offset() retrieves a pointer to the
+ * fdt_property structure within the device tree blob at the given
+ * offset.  If lenp is non-NULL, the length of the property value is
+ * also returned, in the integer pointed to by lenp.
+ *
+ * returns:
+ *	pointer to the structure representing the property
+ *		if lenp is non-NULL, *lenp contains the length of the property
+ *		value (>=0)
+ *	NULL, on error
+ *		if lenp is non-NULL, *lenp contains an error code (<0):
+ *		-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_PROP tag
+ *		-FDT_ERR_BADMAGIC,
+ *		-FDT_ERR_BADVERSION,
+ *		-FDT_ERR_BADSTATE,
+ *		-FDT_ERR_BADSTRUCTURE,
+ *		-FDT_ERR_TRUNCATED, standard meanings
+ */
+const struct fdt_property *fdt_get_property_by_offset(const void *fdt,
+						      int offset,
+						      int *lenp);
+
+/**
+ * fdt_get_property_namelen - find a property based on substring
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to find
+ * @name: name of the property to find
+ * @namelen: number of characters of name to consider
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * Identical to fdt_get_property(), but only examine the first namelen
+ * characters of name for matching the property name.
+ */
+const struct fdt_property *fdt_get_property_namelen(const void *fdt,
+						    int nodeoffset,
+						    const char *name,
+						    int namelen, int *lenp);
+
+/**
+ * fdt_get_property - find a given property in a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to find
+ * @name: name of the property to find
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_get_property() retrieves a pointer to the fdt_property
+ * structure within the device tree blob corresponding to the property
+ * named 'name' of the node at offset nodeoffset.  If lenp is
+ * non-NULL, the length of the property value is also returned, in the
+ * integer pointed to by lenp.
+ *
+ * returns:
+ *	pointer to the structure representing the property
+ *		if lenp is non-NULL, *lenp contains the length of the property
+ *		value (>=0)
+ *	NULL, on error
+ *		if lenp is non-NULL, *lenp contains an error code (<0):
+ *		-FDT_ERR_NOTFOUND, node does not have named property
+ *		-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE
+ *			tag
+ *		-FDT_ERR_BADMAGIC,
+ *		-FDT_ERR_BADVERSION,
+ *		-FDT_ERR_BADSTATE,
+ *		-FDT_ERR_BADSTRUCTURE,
+ *		-FDT_ERR_TRUNCATED, standard meanings
+ */
+const struct fdt_property *fdt_get_property(const void *fdt, int nodeoffset,
+					    const char *name, int *lenp);
+static inline struct fdt_property *fdt_get_property_w(void *fdt, int nodeoffset,
+						      const char *name,
+						      int *lenp)
+{
+	return (struct fdt_property *)(uintptr_t)
+		fdt_get_property(fdt, nodeoffset, name, lenp);
+}
+
+/**
+ * fdt_getprop_by_offset - retrieve the value of a property at a given offset
+ * @fdt: pointer to the device tree blob
+ * @ffset: offset of the property to read
+ * @namep: pointer to a string variable (will be overwritten) or NULL
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_getprop_by_offset() retrieves a pointer to the value of the
+ * property at structure block offset 'offset' (this will be a pointer
+ * to within the device blob itself, not a copy of the value).  If
+ * lenp is non-NULL, the length of the property value is also
+ * returned, in the integer pointed to by lenp.  If namep is non-NULL,
+ * the property's namne will also be returned in the char * pointed to
+ * by namep (this will be a pointer to within the device tree's string
+ * block, not a new copy of the name).
+ *
+ * returns:
+ *	pointer to the property's value
+ *		if lenp is non-NULL, *lenp contains the length of the property
+ *		value (>=0)
+ *		if namep is non-NULL *namep contiains a pointer to the property
+ *		name.
+ *	NULL, on error
+ *		if lenp is non-NULL, *lenp contains an error code (<0):
+ *		-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_PROP tag
+ *		-FDT_ERR_BADMAGIC,
+ *		-FDT_ERR_BADVERSION,
+ *		-FDT_ERR_BADSTATE,
+ *		-FDT_ERR_BADSTRUCTURE,
+ *		-FDT_ERR_TRUNCATED, standard meanings
+ */
+const void *fdt_getprop_by_offset(const void *fdt, int offset,
+				  const char **namep, int *lenp);
+
+/**
+ * fdt_getprop_namelen - get property value based on substring
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to find
+ * @name: name of the property to find
+ * @namelen: number of characters of name to consider
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * Identical to fdt_getprop(), but only examine the first namelen
+ * characters of name for matching the property name.
+ */
+const void *fdt_getprop_namelen(const void *fdt, int nodeoffset,
+				const char *name, int namelen, int *lenp);
+static inline void *fdt_getprop_namelen_w(void *fdt, int nodeoffset,
+					  const char *name, int namelen,
+					  int *lenp)
+{
+	return (void *)(uintptr_t)fdt_getprop_namelen(fdt, nodeoffset, name,
+						      namelen, lenp);
+}
+
+/**
+ * fdt_getprop - retrieve the value of a given property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to find
+ * @name: name of the property to find
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_getprop() retrieves a pointer to the value of the property
+ * named 'name' of the node at offset nodeoffset (this will be a
+ * pointer to within the device blob itself, not a copy of the value).
+ * If lenp is non-NULL, the length of the property value is also
+ * returned, in the integer pointed to by lenp.
+ *
+ * returns:
+ *	pointer to the property's value
+ *		if lenp is non-NULL, *lenp contains the length of the property
+ *		value (>=0)
+ *	NULL, on error
+ *		if lenp is non-NULL, *lenp contains an error code (<0):
+ *		-FDT_ERR_NOTFOUND, node does not have named property
+ *		-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE
+ *			tag
+ *		-FDT_ERR_BADMAGIC,
+ *		-FDT_ERR_BADVERSION,
+ *		-FDT_ERR_BADSTATE,
+ *		-FDT_ERR_BADSTRUCTURE,
+ *		-FDT_ERR_TRUNCATED, standard meanings
+ */
+const void *fdt_getprop(const void *fdt, int nodeoffset,
+			const char *name, int *lenp);
+static inline void *fdt_getprop_w(void *fdt, int nodeoffset,
+				  const char *name, int *lenp)
+{
+	return (void *)(uintptr_t)fdt_getprop(fdt, nodeoffset, name, lenp);
+}
+
+/**
+ * fdt_get_phandle - retrieve the phandle of a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: structure block offset of the node
+ *
+ * fdt_get_phandle() retrieves the phandle of the device tree node at
+ * structure block offset nodeoffset.
+ *
+ * returns:
+ *	the phandle of the node at nodeoffset, on success (!= 0, != -1)
+ *	0, if the node has no phandle, or another error occurs
+ */
+uint32_t fdt_get_phandle(const void *fdt, int nodeoffset);
+
+/**
+ * fdt_get_alias_namelen - get alias based on substring
+ * @fdt: pointer to the device tree blob
+ * @name: name of the alias th look up
+ * @namelen: number of characters of name to consider
+ *
+ * Identical to fdt_get_alias(), but only examine the first namelen
+ * characters of name for matching the alias name.
+ */
+const char *fdt_get_alias_namelen(const void *fdt,
+				  const char *name, int namelen);
+
+/**
+ * fdt_get_alias - retreive the path referenced by a given alias
+ * @fdt: pointer to the device tree blob
+ * @name: name of the alias th look up
+ *
+ * fdt_get_alias() retrieves the value of a given alias.  That is, the
+ * value of the property named 'name' in the node /aliases.
+ *
+ * returns:
+ *	a pointer to the expansion of the alias named 'name', if it exists
+ *	NULL, if the given alias or the /aliases node does not exist
+ */
+const char *fdt_get_alias(const void *fdt, const char *name);
+
+/**
+ * fdt_get_path - determine the full path of a node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose path to find
+ * @buf: character buffer to contain the returned path (will be overwritten)
+ * @buflen: size of the character buffer at buf
+ *
+ * fdt_get_path() computes the full path of the node at offset
+ * nodeoffset, and records that path in the buffer at buf.
+ *
+ * NOTE: This function is expensive, as it must scan the device tree
+ * structure from the start to nodeoffset.
+ *
+ * returns:
+ *	0, on success
+ *		buf contains the absolute path of the node at
+ *		nodeoffset, as a NUL-terminated string.
+ *	-FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ *	-FDT_ERR_NOSPACE, the path of the given node is longer than (bufsize-1)
+ *		characters and will not fit in the given buffer.
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen);
+
+/**
+ * fdt_supernode_atdepth_offset - find a specific ancestor of a node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose parent to find
+ * @supernodedepth: depth of the ancestor to find
+ * @nodedepth: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_supernode_atdepth_offset() finds an ancestor of the given node
+ * at a specific depth from the root (where the root itself has depth
+ * 0, its immediate subnodes depth 1 and so forth).  So
+ *	fdt_supernode_atdepth_offset(fdt, nodeoffset, 0, NULL);
+ * will always return 0, the offset of the root node.  If the node at
+ * nodeoffset has depth D, then:
+ *	fdt_supernode_atdepth_offset(fdt, nodeoffset, D, NULL);
+ * will return nodeoffset itself.
+ *
+ * NOTE: This function is expensive, as it must scan the device tree
+ * structure from the start to nodeoffset.
+ *
+ * returns:
+ *	structure block offset of the node at node offset's ancestor
+ *		of depth supernodedepth (>=0), on success
+ *	-FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ *	-FDT_ERR_NOTFOUND, supernodedepth was greater than the depth of
+ *		nodeoffset
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_supernode_atdepth_offset(const void *fdt, int nodeoffset,
+				 int supernodedepth, int *nodedepth);
+
+/**
+ * fdt_node_depth - find the depth of a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose parent to find
+ *
+ * fdt_node_depth() finds the depth of a given node.  The root node
+ * has depth 0, its immediate subnodes depth 1 and so forth.
+ *
+ * NOTE: This function is expensive, as it must scan the device tree
+ * structure from the start to nodeoffset.
+ *
+ * returns:
+ *	depth of the node at nodeoffset (>=0), on success
+ *	-FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_node_depth(const void *fdt, int nodeoffset);
+
+/**
+ * fdt_parent_offset - find the parent of a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose parent to find
+ *
+ * fdt_parent_offset() locates the parent node of a given node (that
+ * is, it finds the offset of the node which contains the node at
+ * nodeoffset as a subnode).
+ *
+ * NOTE: This function is expensive, as it must scan the device tree
+ * structure from the start to nodeoffset, *twice*.
+ *
+ * returns:
+ *	structure block offset of the parent of the node at nodeoffset
+ *		(>=0), on success
+ *	-FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_parent_offset(const void *fdt, int nodeoffset);
+
+/**
+ * fdt_node_offset_by_prop_value - find nodes with a given property value
+ * @fdt: pointer to the device tree blob
+ * @startoffset: only find nodes after this offset
+ * @propname: property name to check
+ * @propval: property value to search for
+ * @proplen: length of the value in propval
+ *
+ * fdt_node_offset_by_prop_value() returns the offset of the first
+ * node after startoffset, which has a property named propname whose
+ * value is of length proplen and has value equal to propval; or if
+ * startoffset is -1, the very first such node in the tree.
+ *
+ * To iterate through all nodes matching the criterion, the following
+ * idiom can be used:
+ *	offset = fdt_node_offset_by_prop_value(fdt, -1, propname,
+ *					       propval, proplen);
+ *	while (offset != -FDT_ERR_NOTFOUND) {
+ *		// other code here
+ *		offset = fdt_node_offset_by_prop_value(fdt, offset, propname,
+ *						       propval, proplen);
+ *	}
+ *
+ * Note the -1 in the first call to the function, if 0 is used here
+ * instead, the function will never locate the root node, even if it
+ * matches the criterion.
+ *
+ * returns:
+ *	structure block offset of the located node (>= 0, >startoffset),
+ *		 on success
+ *	-FDT_ERR_NOTFOUND, no node matching the criterion exists in the
+ *		tree after startoffset
+ *	-FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_node_offset_by_prop_value(const void *fdt, int startoffset,
+				  const char *propname,
+				  const void *propval, int proplen);
+
+/**
+ * fdt_node_offset_by_phandle - find the node with a given phandle
+ * @fdt: pointer to the device tree blob
+ * @phandle: phandle value
+ *
+ * fdt_node_offset_by_phandle() returns the offset of the node
+ * which has the given phandle value.  If there is more than one node
+ * in the tree with the given phandle (an invalid tree), results are
+ * undefined.
+ *
+ * returns:
+ *	structure block offset of the located node (>= 0), on success
+ *	-FDT_ERR_NOTFOUND, no node with that phandle exists
+ *	-FDT_ERR_BADPHANDLE, given phandle value was invalid (0 or -1)
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle);
+
+/**
+ * fdt_node_check_compatible: check a node's compatible property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of a tree node
+ * @compatible: string to match against
+ *
+ *
+ * fdt_node_check_compatible() returns 0 if the given node contains a
+ * 'compatible' property with the given string as one of its elements,
+ * it returns non-zero otherwise, or on error.
+ *
+ * returns:
+ *	0, if the node has a 'compatible' property listing the given string
+ *	1, if the node has a 'compatible' property, but it does not list
+ *		the given string
+ *	-FDT_ERR_NOTFOUND, if the given node has no 'compatible' property
+ *	-FDT_ERR_BADOFFSET, if nodeoffset does not refer to a BEGIN_NODE tag
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_node_check_compatible(const void *fdt, int nodeoffset,
+			      const char *compatible);
+
+/**
+ * fdt_node_offset_by_compatible - find nodes with a given 'compatible' value
+ * @fdt: pointer to the device tree blob
+ * @startoffset: only find nodes after this offset
+ * @compatible: 'compatible' string to match against
+ *
+ * fdt_node_offset_by_compatible() returns the offset of the first
+ * node after startoffset, which has a 'compatible' property which
+ * lists the given compatible string; or if startoffset is -1, the
+ * very first such node in the tree.
+ *
+ * To iterate through all nodes matching the criterion, the following
+ * idiom can be used:
+ *	offset = fdt_node_offset_by_compatible(fdt, -1, compatible);
+ *	while (offset != -FDT_ERR_NOTFOUND) {
+ *		// other code here
+ *		offset = fdt_node_offset_by_compatible(fdt, offset, compatible);
+ *	}
+ *
+ * Note the -1 in the first call to the function, if 0 is used here
+ * instead, the function will never locate the root node, even if it
+ * matches the criterion.
+ *
+ * returns:
+ *	structure block offset of the located node (>= 0, >startoffset),
+ *		 on success
+ *	-FDT_ERR_NOTFOUND, no node matching the criterion exists in the
+ *		tree after startoffset
+ *	-FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_node_offset_by_compatible(const void *fdt, int startoffset,
+				  const char *compatible);
+
+/**
+ * fdt_stringlist_contains - check a string list property for a string
+ * @strlist: Property containing a list of strings to check
+ * @listlen: Length of property
+ * @str: String to search for
+ *
+ * This is a utility function provided for convenience. The list contains
+ * one or more strings, each terminated by \0, as is found in a device tree
+ * "compatible" property.
+ *
+ * @return: 1 if the string is found in the list, 0 not found, or invalid list
+ */
+int fdt_stringlist_contains(const char *strlist, int listlen, const char *str);
+
+/**
+ * fdt_stringlist_count - count the number of strings in a string list
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of a tree node
+ * @property: name of the property containing the string list
+ * @return:
+ *   the number of strings in the given property
+ *   -FDT_ERR_BADVALUE if the property value is not NUL-terminated
+ *   -FDT_ERR_NOTFOUND if the property does not exist
+ */
+int fdt_stringlist_count(const void *fdt, int nodeoffset, const char *property);
+
+/**
+ * fdt_stringlist_search - find a string in a string list and return its index
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of a tree node
+ * @property: name of the property containing the string list
+ * @string: string to look up in the string list
+ *
+ * Note that it is possible for this function to succeed on property values
+ * that are not NUL-terminated. That's because the function will stop after
+ * finding the first occurrence of @string. This can for example happen with
+ * small-valued cell properties, such as #address-cells, when searching for
+ * the empty string.
+ *
+ * @return:
+ *   the index of the string in the list of strings
+ *   -FDT_ERR_BADVALUE if the property value is not NUL-terminated
+ *   -FDT_ERR_NOTFOUND if the property does not exist or does not contain
+ *                     the given string
+ */
+int fdt_stringlist_search(const void *fdt, int nodeoffset, const char *property,
+			  const char *string);
+
+/**
+ * fdt_stringlist_get() - obtain the string at a given index in a string list
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of a tree node
+ * @property: name of the property containing the string list
+ * @index: index of the string to return
+ * @lenp: return location for the string length or an error code on failure
+ *
+ * Note that this will successfully extract strings from properties with
+ * non-NUL-terminated values. For example on small-valued cell properties
+ * this function will return the empty string.
+ *
+ * If non-NULL, the length of the string (on success) or a negative error-code
+ * (on failure) will be stored in the integer pointer to by lenp.
+ *
+ * @return:
+ *   A pointer to the string at the given index in the string list or NULL on
+ *   failure. On success the length of the string will be stored in the memory
+ *   location pointed to by the lenp parameter, if non-NULL. On failure one of
+ *   the following negative error codes will be returned in the lenp parameter
+ *   (if non-NULL):
+ *     -FDT_ERR_BADVALUE if the property value is not NUL-terminated
+ *     -FDT_ERR_NOTFOUND if the property does not exist
+ */
+const char *fdt_stringlist_get(const void *fdt, int nodeoffset,
+			       const char *property, int index,
+			       int *lenp);
+
+/**********************************************************************/
+/* Read-only functions (addressing related)                           */
+/**********************************************************************/
+
+/**
+ * FDT_MAX_NCELLS - maximum value for #address-cells and #size-cells
+ *
+ * This is the maximum value for #address-cells, #size-cells and
+ * similar properties that will be processed by libfdt.  IEE1275
+ * requires that OF implementations handle values up to 4.
+ * Implementations may support larger values, but in practice higher
+ * values aren't used.
+ */
+#define FDT_MAX_NCELLS		4
+
+/**
+ * fdt_address_cells - retrieve address size for a bus represented in the tree
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node to find the address size for
+ *
+ * When the node has a valid #address-cells property, returns its value.
+ *
+ * returns:
+ *	0 <= n < FDT_MAX_NCELLS, on success
+ *      2, if the node has no #address-cells property
+ *      -FDT_ERR_BADNCELLS, if the node has a badly formatted or invalid
+ *		#address-cells property
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_address_cells(const void *fdt, int nodeoffset);
+
+/**
+ * fdt_size_cells - retrieve address range size for a bus represented in the
+ *                  tree
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node to find the address range size for
+ *
+ * When the node has a valid #size-cells property, returns its value.
+ *
+ * returns:
+ *	0 <= n < FDT_MAX_NCELLS, on success
+ *      2, if the node has no #address-cells property
+ *      -FDT_ERR_BADNCELLS, if the node has a badly formatted or invalid
+ *		#size-cells property
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_size_cells(const void *fdt, int nodeoffset);
+
+
+/**********************************************************************/
+/* Write-in-place functions                                           */
+/**********************************************************************/
+
+/**
+ * fdt_setprop_inplace_namelen_partial - change a property's value,
+ *                                       but not its size
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @namelen: number of characters of name to consider
+ * @idx: index of the property to change in the array
+ * @val: pointer to data to replace the property value with
+ * @len: length of the property value
+ *
+ * Identical to fdt_setprop_inplace(), but modifies the given property
+ * starting from the given index, and using only the first characters
+ * of the name. It is useful when you want to manipulate only one value of
+ * an array and you have a string that doesn't end with \0.
+ */
+int fdt_setprop_inplace_namelen_partial(void *fdt, int nodeoffset,
+					const char *name, int namelen,
+					uint32_t idx, const void *val,
+					int len);
+
+/**
+ * fdt_setprop_inplace - change a property's value, but not its size
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: pointer to data to replace the property value with
+ * @len: length of the property value
+ *
+ * fdt_setprop_inplace() replaces the value of a given property with
+ * the data in val, of length len.  This function cannot change the
+ * size of a property, and so will only work if len is equal to the
+ * current length of the property.
+ *
+ * This function will alter only the bytes in the blob which contain
+ * the given property value, and will not alter or move any other part
+ * of the tree.
+ *
+ * returns:
+ *	0, on success
+ *	-FDT_ERR_NOSPACE, if len is not equal to the property's current length
+ *	-FDT_ERR_NOTFOUND, node does not have the named property
+ *	-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name,
+			const void *val, int len);
+
+/**
+ * fdt_setprop_inplace_u32 - change the value of a 32-bit integer property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: 32-bit integer value to replace the property with
+ *
+ * fdt_setprop_inplace_u32() replaces the value of a given property
+ * with the 32-bit integer value in val, converting val to big-endian
+ * if necessary.  This function cannot change the size of a property,
+ * and so will only work if the property already exists and has length
+ * 4.
+ *
+ * This function will alter only the bytes in the blob which contain
+ * the given property value, and will not alter or move any other part
+ * of the tree.
+ *
+ * returns:
+ *	0, on success
+ *	-FDT_ERR_NOSPACE, if the property's length is not equal to 4
+ *	-FDT_ERR_NOTFOUND, node does not have the named property
+ *	-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_TRUNCATED, standard meanings
+ */
+static inline int fdt_setprop_inplace_u32(void *fdt, int nodeoffset,
+					  const char *name, uint32_t val)
+{
+	fdt32_t tmp = cpu_to_fdt32(val);
+	return fdt_setprop_inplace(fdt, nodeoffset, name, &tmp, sizeof(tmp));
+}
+
+/**
+ * fdt_setprop_inplace_u64 - change the value of a 64-bit integer property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: 64-bit integer value to replace the property with
+ *
+ * fdt_setprop_inplace_u64() replaces the value of a given property
+ * with the 64-bit integer value in val, converting val to big-endian
+ * if necessary.  This function cannot change the size of a property,
+ * and so will only work if the property already exists and has length
+ * 8.
+ *
+ * This function will alter only the bytes in the blob which contain
+ * the given property value, and will not alter or move any other part
+ * of the tree.
+ *
+ * returns:
+ *	0, on success
+ *	-FDT_ERR_NOSPACE, if the property's length is not equal to 8
+ *	-FDT_ERR_NOTFOUND, node does not have the named property
+ *	-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_TRUNCATED, standard meanings
+ */
+static inline int fdt_setprop_inplace_u64(void *fdt, int nodeoffset,
+					  const char *name, uint64_t val)
+{
+	fdt64_t tmp = cpu_to_fdt64(val);
+	return fdt_setprop_inplace(fdt, nodeoffset, name, &tmp, sizeof(tmp));
+}
+
+/**
+ * fdt_setprop_inplace_cell - change the value of a single-cell property
+ *
+ * This is an alternative name for fdt_setprop_inplace_u32()
+ */
+static inline int fdt_setprop_inplace_cell(void *fdt, int nodeoffset,
+					   const char *name, uint32_t val)
+{
+	return fdt_setprop_inplace_u32(fdt, nodeoffset, name, val);
+}
+
+/**
+ * fdt_nop_property - replace a property with nop tags
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to nop
+ * @name: name of the property to nop
+ *
+ * fdt_nop_property() will replace a given property's representation
+ * in the blob with FDT_NOP tags, effectively removing it from the
+ * tree.
+ *
+ * This function will alter only the bytes in the blob which contain
+ * the property, and will not alter or move any other part of the
+ * tree.
+ *
+ * returns:
+ *	0, on success
+ *	-FDT_ERR_NOTFOUND, node does not have the named property
+ *	-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_nop_property(void *fdt, int nodeoffset, const char *name);
+
+/**
+ * fdt_nop_node - replace a node (subtree) with nop tags
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node to nop
+ *
+ * fdt_nop_node() will replace a given node's representation in the
+ * blob, including all its subnodes, if any, with FDT_NOP tags,
+ * effectively removing it from the tree.
+ *
+ * This function will alter only the bytes in the blob which contain
+ * the node and its properties and subnodes, and will not alter or
+ * move any other part of the tree.
+ *
+ * returns:
+ *	0, on success
+ *	-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_nop_node(void *fdt, int nodeoffset);
+
+/**********************************************************************/
+/* Sequential write functions                                         */
+/**********************************************************************/
+
+int fdt_create(void *buf, int bufsize);
+int fdt_resize(void *fdt, void *buf, int bufsize);
+int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size);
+int fdt_finish_reservemap(void *fdt);
+int fdt_begin_node(void *fdt, const char *name);
+int fdt_property(void *fdt, const char *name, const void *val, int len);
+static inline int fdt_property_u32(void *fdt, const char *name, uint32_t val)
+{
+	fdt32_t tmp = cpu_to_fdt32(val);
+	return fdt_property(fdt, name, &tmp, sizeof(tmp));
+}
+static inline int fdt_property_u64(void *fdt, const char *name, uint64_t val)
+{
+	fdt64_t tmp = cpu_to_fdt64(val);
+	return fdt_property(fdt, name, &tmp, sizeof(tmp));
+}
+static inline int fdt_property_cell(void *fdt, const char *name, uint32_t val)
+{
+	return fdt_property_u32(fdt, name, val);
+}
+#define fdt_property_string(fdt, name, str) \
+	fdt_property(fdt, name, str, strlen(str)+1)
+int fdt_end_node(void *fdt);
+int fdt_finish(void *fdt);
+
+/**********************************************************************/
+/* Read-write functions                                               */
+/**********************************************************************/
+
+int fdt_create_empty_tree(void *buf, int bufsize);
+int fdt_open_into(const void *fdt, void *buf, int bufsize);
+int fdt_pack(void *fdt);
+
+/**
+ * fdt_add_mem_rsv - add one memory reserve map entry
+ * @fdt: pointer to the device tree blob
+ * @address, @size: 64-bit values (native endian)
+ *
+ * Adds a reserve map entry to the given blob reserving a region at
+ * address address of length size.
+ *
+ * This function will insert data into the reserve map and will
+ * therefore change the indexes of some entries in the table.
+ *
+ * returns:
+ *	0, on success
+ *	-FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ *		contain the new reservation entry
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_BADLAYOUT,
+ *	-FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_add_mem_rsv(void *fdt, uint64_t address, uint64_t size);
+
+/**
+ * fdt_del_mem_rsv - remove a memory reserve map entry
+ * @fdt: pointer to the device tree blob
+ * @n: entry to remove
+ *
+ * fdt_del_mem_rsv() removes the n-th memory reserve map entry from
+ * the blob.
+ *
+ * This function will delete data from the reservation table and will
+ * therefore change the indexes of some entries in the table.
+ *
+ * returns:
+ *	0, on success
+ *	-FDT_ERR_NOTFOUND, there is no entry of the given index (i.e. there
+ *		are less than n+1 reserve map entries)
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_BADLAYOUT,
+ *	-FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_del_mem_rsv(void *fdt, int n);
+
+/**
+ * fdt_set_name - change the name of a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: structure block offset of a node
+ * @name: name to give the node
+ *
+ * fdt_set_name() replaces the name (including unit address, if any)
+ * of the given node with the given string.  NOTE: this function can't
+ * efficiently check if the new name is unique amongst the given
+ * node's siblings; results are undefined if this function is invoked
+ * with a name equal to one of the given node's siblings.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ *	0, on success
+ *	-FDT_ERR_NOSPACE, there is insufficient free space in the blob
+ *		to contain the new name
+ *	-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE, standard meanings
+ */
+int fdt_set_name(void *fdt, int nodeoffset, const char *name);
+
+/**
+ * fdt_setprop - create or change a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: pointer to data to set the property value to
+ * @len: length of the property value
+ *
+ * fdt_setprop() sets the value of the named property in the given
+ * node to the given value and length, creating the property if it
+ * does not already exist.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ *	0, on success
+ *	-FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ *		contain the new property value
+ *	-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *	-FDT_ERR_BADLAYOUT,
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_BADLAYOUT,
+ *	-FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_setprop(void *fdt, int nodeoffset, const char *name,
+		const void *val, int len);
+
+/**
+ * fdt_setprop_u32 - set a property to a 32-bit integer
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: 32-bit integer value for the property (native endian)
+ *
+ * fdt_setprop_u32() sets the value of the named property in the given
+ * node to the given 32-bit integer value (converting to big-endian if
+ * necessary), or creates a new property with that value if it does
+ * not already exist.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ *	0, on success
+ *	-FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ *		contain the new property value
+ *	-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *	-FDT_ERR_BADLAYOUT,
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_BADLAYOUT,
+ *	-FDT_ERR_TRUNCATED, standard meanings
+ */
+static inline int fdt_setprop_u32(void *fdt, int nodeoffset, const char *name,
+				  uint32_t val)
+{
+	fdt32_t tmp = cpu_to_fdt32(val);
+	return fdt_setprop(fdt, nodeoffset, name, &tmp, sizeof(tmp));
+}
+
+/**
+ * fdt_setprop_u64 - set a property to a 64-bit integer
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: 64-bit integer value for the property (native endian)
+ *
+ * fdt_setprop_u64() sets the value of the named property in the given
+ * node to the given 64-bit integer value (converting to big-endian if
+ * necessary), or creates a new property with that value if it does
+ * not already exist.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ *	0, on success
+ *	-FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ *		contain the new property value
+ *	-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *	-FDT_ERR_BADLAYOUT,
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_BADLAYOUT,
+ *	-FDT_ERR_TRUNCATED, standard meanings
+ */
+static inline int fdt_setprop_u64(void *fdt, int nodeoffset, const char *name,
+				  uint64_t val)
+{
+	fdt64_t tmp = cpu_to_fdt64(val);
+	return fdt_setprop(fdt, nodeoffset, name, &tmp, sizeof(tmp));
+}
+
+/**
+ * fdt_setprop_cell - set a property to a single cell value
+ *
+ * This is an alternative name for fdt_setprop_u32()
+ */
+static inline int fdt_setprop_cell(void *fdt, int nodeoffset, const char *name,
+				   uint32_t val)
+{
+	return fdt_setprop_u32(fdt, nodeoffset, name, val);
+}
+
+/**
+ * fdt_setprop_string - set a property to a string value
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @str: string value for the property
+ *
+ * fdt_setprop_string() sets the value of the named property in the
+ * given node to the given string value (using the length of the
+ * string to determine the new length of the property), or creates a
+ * new property with that value if it does not already exist.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ *	0, on success
+ *	-FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ *		contain the new property value
+ *	-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *	-FDT_ERR_BADLAYOUT,
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_BADLAYOUT,
+ *	-FDT_ERR_TRUNCATED, standard meanings
+ */
+#define fdt_setprop_string(fdt, nodeoffset, name, str) \
+	fdt_setprop((fdt), (nodeoffset), (name), (str), strlen(str)+1)
+
+/**
+ * fdt_appendprop - append to or create a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to append to
+ * @val: pointer to data to append to the property value
+ * @len: length of the data to append to the property value
+ *
+ * fdt_appendprop() appends the value to the named property in the
+ * given node, creating the property if it does not already exist.
+ *
+ * This function may insert data into the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ *	0, on success
+ *	-FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ *		contain the new property value
+ *	-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *	-FDT_ERR_BADLAYOUT,
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_BADLAYOUT,
+ *	-FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_appendprop(void *fdt, int nodeoffset, const char *name,
+		   const void *val, int len);
+
+/**
+ * fdt_appendprop_u32 - append a 32-bit integer value to a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: 32-bit integer value to append to the property (native endian)
+ *
+ * fdt_appendprop_u32() appends the given 32-bit integer value
+ * (converting to big-endian if necessary) to the value of the named
+ * property in the given node, or creates a new property with that
+ * value if it does not already exist.
+ *
+ * This function may insert data into the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ *	0, on success
+ *	-FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ *		contain the new property value
+ *	-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *	-FDT_ERR_BADLAYOUT,
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_BADLAYOUT,
+ *	-FDT_ERR_TRUNCATED, standard meanings
+ */
+static inline int fdt_appendprop_u32(void *fdt, int nodeoffset,
+				     const char *name, uint32_t val)
+{
+	fdt32_t tmp = cpu_to_fdt32(val);
+	return fdt_appendprop(fdt, nodeoffset, name, &tmp, sizeof(tmp));
+}
+
+/**
+ * fdt_appendprop_u64 - append a 64-bit integer value to a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: 64-bit integer value to append to the property (native endian)
+ *
+ * fdt_appendprop_u64() appends the given 64-bit integer value
+ * (converting to big-endian if necessary) to the value of the named
+ * property in the given node, or creates a new property with that
+ * value if it does not already exist.
+ *
+ * This function may insert data into the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ *	0, on success
+ *	-FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ *		contain the new property value
+ *	-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *	-FDT_ERR_BADLAYOUT,
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_BADLAYOUT,
+ *	-FDT_ERR_TRUNCATED, standard meanings
+ */
+static inline int fdt_appendprop_u64(void *fdt, int nodeoffset,
+				     const char *name, uint64_t val)
+{
+	fdt64_t tmp = cpu_to_fdt64(val);
+	return fdt_appendprop(fdt, nodeoffset, name, &tmp, sizeof(tmp));
+}
+
+/**
+ * fdt_appendprop_cell - append a single cell value to a property
+ *
+ * This is an alternative name for fdt_appendprop_u32()
+ */
+static inline int fdt_appendprop_cell(void *fdt, int nodeoffset,
+				      const char *name, uint32_t val)
+{
+	return fdt_appendprop_u32(fdt, nodeoffset, name, val);
+}
+
+/**
+ * fdt_appendprop_string - append a string to a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @str: string value to append to the property
+ *
+ * fdt_appendprop_string() appends the given string to the value of
+ * the named property in the given node, or creates a new property
+ * with that value if it does not already exist.
+ *
+ * This function may insert data into the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ *	0, on success
+ *	-FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ *		contain the new property value
+ *	-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *	-FDT_ERR_BADLAYOUT,
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_BADLAYOUT,
+ *	-FDT_ERR_TRUNCATED, standard meanings
+ */
+#define fdt_appendprop_string(fdt, nodeoffset, name, str) \
+	fdt_appendprop((fdt), (nodeoffset), (name), (str), strlen(str)+1)
+
+/**
+ * fdt_delprop - delete a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to nop
+ * @name: name of the property to nop
+ *
+ * fdt_del_property() will delete the given property.
+ *
+ * This function will delete data from the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ *	0, on success
+ *	-FDT_ERR_NOTFOUND, node does not have the named property
+ *	-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *	-FDT_ERR_BADLAYOUT,
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_delprop(void *fdt, int nodeoffset, const char *name);
+
+/**
+ * fdt_add_subnode_namelen - creates a new node based on substring
+ * @fdt: pointer to the device tree blob
+ * @parentoffset: structure block offset of a node
+ * @name: name of the subnode to locate
+ * @namelen: number of characters of name to consider
+ *
+ * Identical to fdt_add_subnode(), but use only the first namelen
+ * characters of name as the name of the new node.  This is useful for
+ * creating subnodes based on a portion of a larger string, such as a
+ * full path.
+ */
+int fdt_add_subnode_namelen(void *fdt, int parentoffset,
+			    const char *name, int namelen);
+
+/**
+ * fdt_add_subnode - creates a new node
+ * @fdt: pointer to the device tree blob
+ * @parentoffset: structure block offset of a node
+ * @name: name of the subnode to locate
+ *
+ * fdt_add_subnode() creates a new node as a subnode of the node at
+ * structure block offset parentoffset, with the given name (which
+ * should include the unit address, if any).
+ *
+ * This function will insert data into the blob, and will therefore
+ * change the offsets of some existing nodes.
+
+ * returns:
+ *	structure block offset of the created nodeequested subnode (>=0), on
+ *		success
+ *	-FDT_ERR_NOTFOUND, if the requested subnode does not exist
+ *	-FDT_ERR_BADOFFSET, if parentoffset did not point to an FDT_BEGIN_NODE
+ *		tag
+ *	-FDT_ERR_EXISTS, if the node at parentoffset already has a subnode of
+ *		the given name
+ *	-FDT_ERR_NOSPACE, if there is insufficient free space in the
+ *		blob to contain the new node
+ *	-FDT_ERR_NOSPACE
+ *	-FDT_ERR_BADLAYOUT
+ *      -FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_TRUNCATED, standard meanings.
+ */
+int fdt_add_subnode(void *fdt, int parentoffset, const char *name);
+
+/**
+ * fdt_del_node - delete a node (subtree)
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node to nop
+ *
+ * fdt_del_node() will remove the given node, including all its
+ * subnodes if any, from the blob.
+ *
+ * This function will delete data from the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ *	0, on success
+ *	-FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *	-FDT_ERR_BADLAYOUT,
+ *	-FDT_ERR_BADMAGIC,
+ *	-FDT_ERR_BADVERSION,
+ *	-FDT_ERR_BADSTATE,
+ *	-FDT_ERR_BADSTRUCTURE,
+ *	-FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_del_node(void *fdt, int nodeoffset);
+
+/**********************************************************************/
+/* Debugging / informational functions                                */
+/**********************************************************************/
+
+const char *fdt_strerror(int errval);
+
+#endif /* _LIBFDT_H */
diff --git a/include/lib/libfdt/libfdt_env.h b/include/lib/libfdt/libfdt_env.h
new file mode 100644
index 0000000..9dea97d
--- /dev/null
+++ b/include/lib/libfdt/libfdt_env.h
@@ -0,0 +1,111 @@
+#ifndef _LIBFDT_ENV_H
+#define _LIBFDT_ENV_H
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ * Copyright 2012 Kim Phillips, Freescale Semiconductor.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public
+ *     License along with this library; if not, write to the Free
+ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ *     MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ *  b) Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *     1. Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *     2. Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#ifdef __CHECKER__
+#define __force __attribute__((force))
+#define __bitwise __attribute__((bitwise))
+#else
+#define __force
+#define __bitwise
+#endif
+
+typedef uint16_t __bitwise fdt16_t;
+typedef uint32_t __bitwise fdt32_t;
+typedef uint64_t __bitwise fdt64_t;
+
+#define EXTRACT_BYTE(x, n)	((unsigned long long)((uint8_t *)&x)[n])
+#define CPU_TO_FDT16(x) ((EXTRACT_BYTE(x, 0) << 8) | EXTRACT_BYTE(x, 1))
+#define CPU_TO_FDT32(x) ((EXTRACT_BYTE(x, 0) << 24) | (EXTRACT_BYTE(x, 1) << 16) | \
+			 (EXTRACT_BYTE(x, 2) << 8) | EXTRACT_BYTE(x, 3))
+#define CPU_TO_FDT64(x) ((EXTRACT_BYTE(x, 0) << 56) | (EXTRACT_BYTE(x, 1) << 48) | \
+			 (EXTRACT_BYTE(x, 2) << 40) | (EXTRACT_BYTE(x, 3) << 32) | \
+			 (EXTRACT_BYTE(x, 4) << 24) | (EXTRACT_BYTE(x, 5) << 16) | \
+			 (EXTRACT_BYTE(x, 6) << 8) | EXTRACT_BYTE(x, 7))
+
+static inline uint16_t fdt16_to_cpu(fdt16_t x)
+{
+	return (__force uint16_t)CPU_TO_FDT16(x);
+}
+static inline fdt16_t cpu_to_fdt16(uint16_t x)
+{
+	return (__force fdt16_t)CPU_TO_FDT16(x);
+}
+
+static inline uint32_t fdt32_to_cpu(fdt32_t x)
+{
+	return (__force uint32_t)CPU_TO_FDT32(x);
+}
+static inline fdt32_t cpu_to_fdt32(uint32_t x)
+{
+	return (__force fdt32_t)CPU_TO_FDT32(x);
+}
+
+static inline uint64_t fdt64_to_cpu(fdt64_t x)
+{
+	return (__force uint64_t)CPU_TO_FDT64(x);
+}
+static inline fdt64_t cpu_to_fdt64(uint64_t x)
+{
+	return (__force fdt64_t)CPU_TO_FDT64(x);
+}
+#undef CPU_TO_FDT64
+#undef CPU_TO_FDT32
+#undef CPU_TO_FDT16
+#undef EXTRACT_BYTE
+
+#endif /* _LIBFDT_ENV_H */
diff --git a/include/lib/mmio.h b/include/lib/mmio.h
index 5b72218..880d2c5 100644
--- a/include/lib/mmio.h
+++ b/include/lib/mmio.h
@@ -1,88 +1,69 @@
 /*
  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __MMIO_H__
 #define __MMIO_H__
 
-#include <arch_helpers.h>
 #include <stdint.h>
 
 static inline void mmio_write_8(uintptr_t addr, uint8_t value)
 {
-	dsb();
-	isb();
 	*(volatile uint8_t*)addr = value;
 }
 
 static inline uint8_t mmio_read_8(uintptr_t addr)
 {
-	uint8_t val;
+	return *(volatile uint8_t*)addr;
+}
 
-	val = *(volatile uint8_t*)addr;
-	dsb();
-	isb();
-	return val;
+static inline void mmio_write_16(uintptr_t addr, uint16_t value)
+{
+	*(volatile uint16_t*)addr = value;
+}
+
+static inline uint16_t mmio_read_16(uintptr_t addr)
+{
+	return *(volatile uint16_t*)addr;
 }
 
 static inline void mmio_write_32(uintptr_t addr, uint32_t value)
 {
-	dsb();
-	isb();
 	*(volatile uint32_t*)addr = value;
 }
 
 static inline uint32_t mmio_read_32(uintptr_t addr)
 {
-	uint32_t val;
-
-	val = *(volatile uint32_t*)addr;
-	dsb();
-	isb();
-	return val;
+	return *(volatile uint32_t*)addr;
 }
 
 static inline void mmio_write_64(uintptr_t addr, uint64_t value)
 {
-	dsb();
-	isb();
 	*(volatile uint64_t*)addr = value;
 }
 
 static inline uint64_t mmio_read_64(uintptr_t addr)
 {
-	uint64_t val;
+	return *(volatile uint64_t*)addr;
+}
 
-	val = *(volatile uint64_t*)addr;
-	dsb();
-	isb();
-	return val;
+static inline void mmio_clrbits_32(uintptr_t addr, uint32_t clear)
+{
+	mmio_write_32(addr, mmio_read_32(addr) & ~clear);
+}
+
+static inline void mmio_setbits_32(uintptr_t addr, uint32_t set)
+{
+	mmio_write_32(addr, mmio_read_32(addr) | set);
+}
+
+static inline void mmio_clrsetbits_32(uintptr_t addr,
+				uint32_t clear,
+				uint32_t set)
+{
+	mmio_write_32(addr, (mmio_read_32(addr) & ~clear) | set);
 }
 
 #endif /* __MMIO_H__ */
diff --git a/include/lib/optee_utils.h b/include/lib/optee_utils.h
new file mode 100644
index 0000000..3d35b19
--- /dev/null
+++ b/include/lib/optee_utils.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __OPTEE_UTILS_H__
+#define __OPTEE_UTILS_H__
+
+#include <bl_common.h>
+
+int parse_optee_header(entry_point_info_t *header_ep,
+	image_info_t *pager_image_info,
+	image_info_t *paged_image_info);
+
+#endif /* __OPTEE_UTILS_H__ */
diff --git a/include/lib/pmf/pmf.h b/include/lib/pmf/pmf.h
new file mode 100644
index 0000000..cdff763
--- /dev/null
+++ b/include/lib/pmf/pmf.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PMF_H__
+#define __PMF_H__
+
+#include <cassert.h>
+#include <pmf_helpers.h>
+
+/*
+ * Constants used for/by PMF services.
+ */
+#define PMF_ARM_TIF_IMPL_ID	0x41
+#define PMF_TID_SHIFT		0
+#define PMF_TID_MASK		(0xFF << PMF_TID_SHIFT)
+#define PMF_SVC_ID_SHIFT	10
+#define PMF_SVC_ID_MASK		(0x3F << PMF_SVC_ID_SHIFT)
+#define PMF_IMPL_ID_SHIFT	24
+#define PMF_IMPL_ID_MASK	(0xFFU << PMF_IMPL_ID_SHIFT)
+
+/*
+ * Flags passed to PMF_REGISTER_SERVICE
+ */
+#define PMF_STORE_ENABLE	(1 << 0)
+#define PMF_DUMP_ENABLE		(1 << 1)
+
+/*
+ * Flags passed to PMF_GET_TIMESTAMP_XXX
+ * and PMF_CAPTURE_TIMESTAMP
+ */
+#define PMF_CACHE_MAINT		(1 << 0)
+#define PMF_NO_CACHE_MAINT	0
+
+/*
+ * Defines for PMF SMC function ids.
+ */
+#define PMF_SMC_GET_TIMESTAMP_32	0x82000010
+#define PMF_SMC_GET_TIMESTAMP_64	0xC2000010
+#define PMF_NUM_SMC_CALLS		2
+
+/*
+ * The macros below are used to identify
+ * PMF calls from the SMC function ID.
+ */
+#define PMF_FID_MASK	0xffe0u
+#define PMF_FID_VALUE	0u
+#define is_pmf_fid(_fid)	(((_fid) & PMF_FID_MASK) == PMF_FID_VALUE)
+
+/* Following are the supported PMF service IDs */
+#define PMF_PSCI_STAT_SVC_ID	0
+#define PMF_RT_INSTR_SVC_ID	1
+
+#if ENABLE_PMF
+/*
+ * Convenience macros for capturing time-stamp.
+ */
+#define PMF_DECLARE_CAPTURE_TIMESTAMP(_name)			\
+	void pmf_capture_timestamp_with_cache_maint_ ## _name(	\
+				unsigned int tid,		\
+				unsigned long long ts);		\
+	void pmf_capture_timestamp_ ## _name(			\
+				unsigned int tid,		\
+				unsigned long long ts);
+
+#define PMF_CAPTURE_TIMESTAMP(_name, _tid, _flags)			\
+	do {								\
+		unsigned long long ts = read_cntpct_el0();		\
+		if ((_flags) & PMF_CACHE_MAINT)				\
+			pmf_capture_timestamp_with_cache_maint_ ## _name((_tid), ts);\
+		else							\
+			pmf_capture_timestamp_ ## _name((_tid), ts);	\
+	} while (0)
+
+#define PMF_CAPTURE_AND_GET_TIMESTAMP(_name, _tid, _flags, _tsval)	\
+	do {								\
+		(_tsval) = read_cntpct_el0();				\
+		CASSERT(sizeof(_tsval) == sizeof(unsigned long long), invalid_tsval_size);\
+		if ((_flags) & PMF_CACHE_MAINT)				\
+			pmf_capture_timestamp_with_cache_maint_ ## _name((_tid), (_tsval));\
+		else							\
+			pmf_capture_timestamp_ ## _name((_tid), (_tsval));\
+	} while (0)
+
+#define PMF_WRITE_TIMESTAMP(_name, _tid, _flags, _wrval)		\
+	do {								\
+		CASSERT(sizeof(_wrval) == sizeof(unsigned long long), invalid_wrval_size);\
+		if ((_flags) & PMF_CACHE_MAINT)				\
+			pmf_capture_timestamp_with_cache_maint_ ## _name((_tid), (_wrval));\
+		else							\
+			pmf_capture_timestamp_ ## _name((_tid), (_wrval));\
+	} while (0)
+
+/*
+ * Convenience macros for retrieving time-stamp.
+ */
+#define PMF_DECLARE_GET_TIMESTAMP(_name)			\
+	unsigned long long pmf_get_timestamp_by_index_ ## _name(\
+		unsigned int tid,				\
+		unsigned int cpuid,				\
+		unsigned int flags);				\
+	unsigned long long pmf_get_timestamp_by_mpidr_ ## _name(\
+		unsigned int tid,				\
+		u_register_t mpidr,				\
+		unsigned int flags);
+
+#define PMF_GET_TIMESTAMP_BY_MPIDR(_name, _tid, _mpidr, _flags, _tsval)\
+	_tsval = pmf_get_timestamp_by_mpidr_ ## _name(_tid, _mpidr, _flags)
+
+#define PMF_GET_TIMESTAMP_BY_INDEX(_name, _tid, _cpuid, _flags, _tsval)\
+	_tsval = pmf_get_timestamp_by_index_ ## _name(_tid, _cpuid, _flags)
+
+/* Convenience macros to register a PMF service.*/
+/*
+ * This macro is used to register a PMF Service. It allocates PMF memory
+ * and defines default service-specific PMF functions.
+ */
+#define PMF_REGISTER_SERVICE(_name, _svcid, _totalid, _flags)	\
+	PMF_ALLOCATE_TIMESTAMP_MEMORY(_name, _totalid)		\
+	PMF_DEFINE_CAPTURE_TIMESTAMP(_name, _flags)		\
+	PMF_DEFINE_GET_TIMESTAMP(_name)
+
+/*
+ * This macro is used to register a PMF service, including an
+ * SMC interface to that service.
+ */
+#define PMF_REGISTER_SERVICE_SMC(_name, _svcid, _totalid, _flags)\
+	PMF_REGISTER_SERVICE(_name, _svcid, _totalid, _flags)	\
+	PMF_DEFINE_SERVICE_DESC(_name, PMF_ARM_TIF_IMPL_ID,	\
+			_svcid, _totalid, NULL,			\
+			pmf_get_timestamp_by_mpidr_ ## _name)
+
+/*
+ * This macro is used to register a PMF service that has an SMC interface
+ * but provides its own service-specific PMF functions.
+ */
+#define PMF_REGISTER_SERVICE_SMC_OWN(_name, _implid, _svcid, _totalid,	\
+		 _init, _getts)						\
+	PMF_DEFINE_SERVICE_DESC(_name, _implid, _svcid, _totalid,	\
+		 _init, _getts)
+
+#else
+
+#define PMF_REGISTER_SERVICE(_name, _svcid, _totalid, _flags)
+#define PMF_REGISTER_SERVICE_SMC(_name, _svcid, _totalid, _flags)
+#define PMF_REGISTER_SERVICE_SMC_OWN(_name, _implid, _svcid, _totalid,	\
+				_init, _getts)
+#define PMF_DECLARE_CAPTURE_TIMESTAMP(_name)
+#define PMF_DECLARE_GET_TIMESTAMP(_name)
+#define PMF_CAPTURE_TIMESTAMP(_name, _tid, _flags)
+#define PMF_GET_TIMESTAMP_BY_MPIDR(_name, _tid, _mpidr, _flags, _tsval)
+#define PMF_GET_TIMESTAMP_BY_INDEX(_name, _tid, _cpuid, _flags, _tsval)
+
+#endif /* ENABLE_PMF */
+
+/*******************************************************************************
+ * Function & variable prototypes
+ ******************************************************************************/
+/* PMF common functions */
+int pmf_get_timestamp_smc(unsigned int tid,
+		u_register_t mpidr,
+		unsigned int flags,
+		unsigned long long *ts);
+int pmf_setup(void);
+uintptr_t pmf_smc_handler(unsigned int smc_fid,
+		u_register_t x1,
+		u_register_t x2,
+		u_register_t x3,
+		u_register_t x4,
+		void *cookie,
+		void *handle,
+		u_register_t flags);
+
+#endif /* __PMF_H__ */
diff --git a/include/lib/pmf/pmf_asm_macros.S b/include/lib/pmf/pmf_asm_macros.S
new file mode 100644
index 0000000..9ee8199
--- /dev/null
+++ b/include/lib/pmf/pmf_asm_macros.S
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PMF_ASM_MACROS_S__
+#define __PMF_ASM_MACROS_S__
+
+#define PMF_TS_SIZE	8
+
+	/*
+	 * This macro calculates the address of the per-cpu timestamp
+	 * for the given service name and local timestamp id.
+	 * Clobbers: x0 - x9
+	 */
+	.macro pmf_calc_timestamp_addr _name _tid
+	mov	x9, x30
+	bl	plat_my_core_pos
+	mov	x30, x9
+	ldr	x1, =__PERCPU_TIMESTAMP_SIZE__
+	mov	x2, #(\_tid * PMF_TS_SIZE)
+	madd	x0, x0, x1, x2
+	ldr	x1, =pmf_ts_mem_\_name
+	add	x0, x0, x1
+	.endm
+
+#endif /* __PMF_ASM_MACROS_S__ */
diff --git a/include/lib/pmf/pmf_helpers.h b/include/lib/pmf/pmf_helpers.h
new file mode 100644
index 0000000..9984d69
--- /dev/null
+++ b/include/lib/pmf/pmf_helpers.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PMF_HELPERS_H__
+#define __PMF_HELPERS_H__
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <platform.h>
+#include <pmf.h>
+#include <stddef.h>
+#include <stdint.h>
+
+/*
+ * Prototype for PMF service functions.
+ */
+typedef int (*pmf_svc_init_t)(void);
+typedef unsigned long long (*pmf_svc_get_ts_t)(unsigned int tid,
+		 u_register_t mpidr,
+		 unsigned int flags);
+
+/*
+ * This is the definition of PMF service desc.
+ */
+typedef struct pmf_svc_desc {
+	/* Structure version information */
+	param_header_t h;
+
+	/* Name of the PMF service */
+	const char *name;
+
+	/* PMF service config: Implementer id, Service id and total id*/
+	unsigned int svc_config;
+
+	/* PMF service initialization handler */
+	pmf_svc_init_t init;
+
+	/* PMF service time-stamp retrieval handler */
+	pmf_svc_get_ts_t get_ts;
+} pmf_svc_desc_t;
+
+/*
+ * Convenience macro to allocate memory for a PMF service.
+ */
+#define PMF_ALLOCATE_TIMESTAMP_MEMORY(_name, _total_id)		\
+	unsigned long long pmf_ts_mem_ ## _name[_total_id]	\
+	__aligned(CACHE_WRITEBACK_GRANULE)			\
+	__section("pmf_timestamp_array")			\
+	__used;
+
+/*
+ * Convenience macro to validate tid index for the given TS array.
+ */
+#define PMF_VALIDATE_TID(_name, _tid)	\
+	assert((_tid & PMF_TID_MASK) < (ARRAY_SIZE(pmf_ts_mem_ ## _name)))
+
+/*
+ * Convenience macros for capturing time-stamp.
+ */
+#define PMF_DEFINE_CAPTURE_TIMESTAMP(_name, _flags)			\
+	void pmf_capture_timestamp_ ## _name(				\
+			unsigned int tid,				\
+			unsigned long long ts)				\
+	{								\
+		CASSERT(_flags, select_proper_config);			\
+		PMF_VALIDATE_TID(_name, tid);				\
+		uintptr_t base_addr = (uintptr_t) pmf_ts_mem_ ## _name;	\
+		if ((_flags) & PMF_STORE_ENABLE)			\
+			__pmf_store_timestamp(base_addr, tid, ts);	\
+		if ((_flags) & PMF_DUMP_ENABLE)				\
+			__pmf_dump_timestamp(tid, ts);			\
+	}								\
+	void pmf_capture_timestamp_with_cache_maint_ ## _name(		\
+			unsigned int tid,				\
+			unsigned long long ts)				\
+	{								\
+		CASSERT(_flags, select_proper_config);			\
+		PMF_VALIDATE_TID(_name, tid);				\
+		uintptr_t base_addr = (uintptr_t) pmf_ts_mem_ ## _name;	\
+		if ((_flags) & PMF_STORE_ENABLE)			\
+			__pmf_store_timestamp_with_cache_maint(base_addr, tid, ts);\
+		if ((_flags) & PMF_DUMP_ENABLE)				\
+			__pmf_dump_timestamp(tid, ts);			\
+	}
+
+/*
+ * Convenience macros for retrieving time-stamp.
+ */
+#define PMF_DEFINE_GET_TIMESTAMP(_name)					\
+	unsigned long long pmf_get_timestamp_by_index_ ## _name(	\
+		unsigned int tid, unsigned int cpuid, unsigned int flags)\
+	{								\
+		PMF_VALIDATE_TID(_name, tid);				\
+		uintptr_t base_addr = (uintptr_t) pmf_ts_mem_ ## _name;	\
+		return __pmf_get_timestamp(base_addr, tid, cpuid, flags);\
+	}								\
+	unsigned long long pmf_get_timestamp_by_mpidr_ ## _name(	\
+		unsigned int tid, u_register_t mpidr, unsigned int flags)\
+	{								\
+		PMF_VALIDATE_TID(_name, tid);				\
+		uintptr_t base_addr = (uintptr_t) pmf_ts_mem_ ## _name;	\
+		return __pmf_get_timestamp(base_addr, tid,		\
+			plat_core_pos_by_mpidr(mpidr), flags);		\
+	}
+
+/*
+ * Convenience macro to register a PMF service.
+ * This is needed for services that require SMC handling.
+ */
+#define PMF_DEFINE_SERVICE_DESC(_name, _implid, _svcid, _totalid,	\
+		_init, _getts_by_mpidr) 				\
+	static const pmf_svc_desc_t __pmf_desc_ ## _name 		\
+	__section("pmf_svc_descs") __used = {		 		\
+		.h.type = PARAM_EP, 					\
+		.h.version = VERSION_1, 				\
+		.h.size = sizeof(pmf_svc_desc_t),			\
+		.h.attr = 0,						\
+		.name = #_name, 					\
+		.svc_config = ((((_implid) << PMF_IMPL_ID_SHIFT) &	\
+						PMF_IMPL_ID_MASK) |	\
+				(((_svcid) << PMF_SVC_ID_SHIFT) &	\
+						PMF_SVC_ID_MASK) |	\
+				(((_totalid) << PMF_TID_SHIFT) &	\
+						PMF_TID_MASK)),		\
+		.init = _init,						\
+		.get_ts = _getts_by_mpidr				\
+	};
+
+/* PMF internal functions */
+void __pmf_dump_timestamp(unsigned int tid, unsigned long long ts);
+void __pmf_store_timestamp(uintptr_t base_addr,
+		unsigned int tid,
+		unsigned long long ts);
+void __pmf_store_timestamp_with_cache_maint(uintptr_t base_addr,
+		unsigned int tid,
+		unsigned long long ts);
+unsigned long long __pmf_get_timestamp(uintptr_t base_addr,
+		unsigned int tid,
+		unsigned int cpuid,
+		unsigned int flags);
+#endif /* __PMF_HELPERS_H__ */
diff --git a/include/lib/psci/psci.h b/include/lib/psci/psci.h
new file mode 100644
index 0000000..06434f9
--- /dev/null
+++ b/include/lib/psci/psci.h
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PSCI_H__
+#define __PSCI_H__
+
+#include <bakery_lock.h>
+#include <bl_common.h>
+#include <platform_def.h>	/* for PLAT_NUM_PWR_DOMAINS */
+#if ENABLE_PLAT_COMPAT
+#include <psci_compat.h>
+#endif
+#include <psci_lib.h>		/* To maintain compatibility for SPDs */
+#include <utils_def.h>
+
+/*******************************************************************************
+ * Number of power domains whose state this PSCI implementation can track
+ ******************************************************************************/
+#ifdef PLAT_NUM_PWR_DOMAINS
+#define PSCI_NUM_PWR_DOMAINS	PLAT_NUM_PWR_DOMAINS
+#else
+#define PSCI_NUM_PWR_DOMAINS	(U(2) * PLATFORM_CORE_COUNT)
+#endif
+
+#define PSCI_NUM_NON_CPU_PWR_DOMAINS	(PSCI_NUM_PWR_DOMAINS - \
+					 PLATFORM_CORE_COUNT)
+
+/* This is the power level corresponding to a CPU */
+#define PSCI_CPU_PWR_LVL	(0)
+
+/*
+ * The maximum power level supported by PSCI. Since PSCI CPU_SUSPEND
+ * uses the old power_state parameter format which has 2 bits to specify the
+ * power level, this constant is defined to be 3.
+ */
+#define PSCI_MAX_PWR_LVL	U(3)
+
+/*******************************************************************************
+ * Defines for runtime services function ids
+ ******************************************************************************/
+#define PSCI_VERSION			U(0x84000000)
+#define PSCI_CPU_SUSPEND_AARCH32	U(0x84000001)
+#define PSCI_CPU_SUSPEND_AARCH64	U(0xc4000001)
+#define PSCI_CPU_OFF			U(0x84000002)
+#define PSCI_CPU_ON_AARCH32		U(0x84000003)
+#define PSCI_CPU_ON_AARCH64		U(0xc4000003)
+#define PSCI_AFFINITY_INFO_AARCH32	U(0x84000004)
+#define PSCI_AFFINITY_INFO_AARCH64	U(0xc4000004)
+#define PSCI_MIG_AARCH32		U(0x84000005)
+#define PSCI_MIG_AARCH64		U(0xc4000005)
+#define PSCI_MIG_INFO_TYPE		U(0x84000006)
+#define PSCI_MIG_INFO_UP_CPU_AARCH32	U(0x84000007)
+#define PSCI_MIG_INFO_UP_CPU_AARCH64	U(0xc4000007)
+#define PSCI_SYSTEM_OFF			U(0x84000008)
+#define PSCI_SYSTEM_RESET		U(0x84000009)
+#define PSCI_FEATURES			U(0x8400000A)
+#define PSCI_NODE_HW_STATE_AARCH32	U(0x8400000d)
+#define PSCI_NODE_HW_STATE_AARCH64	U(0xc400000d)
+#define PSCI_SYSTEM_SUSPEND_AARCH32	U(0x8400000E)
+#define PSCI_SYSTEM_SUSPEND_AARCH64	U(0xc400000E)
+#define PSCI_STAT_RESIDENCY_AARCH32	U(0x84000010)
+#define PSCI_STAT_RESIDENCY_AARCH64	U(0xc4000010)
+#define PSCI_STAT_COUNT_AARCH32		U(0x84000011)
+#define PSCI_STAT_COUNT_AARCH64		U(0xc4000011)
+#define PSCI_SYSTEM_RESET2_AARCH32	U(0x84000012)
+#define PSCI_SYSTEM_RESET2_AARCH64	U(0xc4000012)
+#define PSCI_MEM_PROTECT		U(0x84000013)
+#define PSCI_MEM_CHK_RANGE_AARCH32	U(0x84000014)
+#define PSCI_MEM_CHK_RANGE_AARCH64	U(0xc4000014)
+
+/* Macro to help build the psci capabilities bitfield */
+#define define_psci_cap(x)		(U(1) << (x & U(0x1f)))
+
+/*
+ * Number of PSCI calls (above) implemented
+ */
+#if ENABLE_PSCI_STAT
+#define PSCI_NUM_CALLS			U(22)
+#else
+#define PSCI_NUM_CALLS			U(18)
+#endif
+
+/* The macros below are used to identify PSCI calls from the SMC function ID */
+#define PSCI_FID_MASK			U(0xffe0)
+#define PSCI_FID_VALUE			U(0)
+#define is_psci_fid(_fid) \
+	(((_fid) & PSCI_FID_MASK) == PSCI_FID_VALUE)
+
+/*******************************************************************************
+ * PSCI Migrate and friends
+ ******************************************************************************/
+#define PSCI_TOS_UP_MIG_CAP	U(0)
+#define PSCI_TOS_NOT_UP_MIG_CAP	U(1)
+#define PSCI_TOS_NOT_PRESENT_MP	U(2)
+
+/*******************************************************************************
+ * PSCI CPU_SUSPEND 'power_state' parameter specific defines
+ ******************************************************************************/
+#define PSTATE_ID_SHIFT		U(0)
+
+#if PSCI_EXTENDED_STATE_ID
+#define PSTATE_VALID_MASK	U(0xB0000000)
+#define PSTATE_TYPE_SHIFT	U(30)
+#define PSTATE_ID_MASK		U(0xfffffff)
+#else
+#define PSTATE_VALID_MASK	U(0xFCFE0000)
+#define PSTATE_TYPE_SHIFT	U(16)
+#define PSTATE_PWR_LVL_SHIFT	U(24)
+#define PSTATE_ID_MASK		U(0xffff)
+#define PSTATE_PWR_LVL_MASK	U(0x3)
+
+#define psci_get_pstate_pwrlvl(pstate)	(((pstate) >> PSTATE_PWR_LVL_SHIFT) & \
+					PSTATE_PWR_LVL_MASK)
+#define psci_make_powerstate(state_id, type, pwrlvl) \
+			(((state_id) & PSTATE_ID_MASK) << PSTATE_ID_SHIFT) |\
+			(((type) & PSTATE_TYPE_MASK) << PSTATE_TYPE_SHIFT) |\
+			(((pwrlvl) & PSTATE_PWR_LVL_MASK) << PSTATE_PWR_LVL_SHIFT)
+#endif /* __PSCI_EXTENDED_STATE_ID__ */
+
+#define PSTATE_TYPE_STANDBY	U(0x0)
+#define PSTATE_TYPE_POWERDOWN	U(0x1)
+#define PSTATE_TYPE_MASK	U(0x1)
+
+#define psci_get_pstate_id(pstate)	(((pstate) >> PSTATE_ID_SHIFT) & \
+					PSTATE_ID_MASK)
+#define psci_get_pstate_type(pstate)	(((pstate) >> PSTATE_TYPE_SHIFT) & \
+					PSTATE_TYPE_MASK)
+#define psci_check_power_state(pstate)	((pstate) & PSTATE_VALID_MASK)
+
+/*******************************************************************************
+ * PSCI CPU_FEATURES feature flag specific defines
+ ******************************************************************************/
+/* Features flags for CPU SUSPEND power state parameter format. Bits [1:1] */
+#define FF_PSTATE_SHIFT		U(1)
+#define FF_PSTATE_ORIG		U(0)
+#define FF_PSTATE_EXTENDED	U(1)
+#if PSCI_EXTENDED_STATE_ID
+#define FF_PSTATE		FF_PSTATE_EXTENDED
+#else
+#define FF_PSTATE		FF_PSTATE_ORIG
+#endif
+
+/* Features flags for CPU SUSPEND OS Initiated mode support. Bits [0:0] */
+#define FF_MODE_SUPPORT_SHIFT		U(0)
+#define FF_SUPPORTS_OS_INIT_MODE	U(1)
+
+/*******************************************************************************
+ * PSCI version
+ ******************************************************************************/
+#define PSCI_MAJOR_VER		(U(1) << 16)
+#define PSCI_MINOR_VER		U(0x1)
+
+/*******************************************************************************
+ * PSCI error codes
+ ******************************************************************************/
+#define PSCI_E_SUCCESS		0
+#define PSCI_E_NOT_SUPPORTED	-1
+#define PSCI_E_INVALID_PARAMS	-2
+#define PSCI_E_DENIED		-3
+#define PSCI_E_ALREADY_ON	-4
+#define PSCI_E_ON_PENDING	-5
+#define PSCI_E_INTERN_FAIL	-6
+#define PSCI_E_NOT_PRESENT	-7
+#define PSCI_E_DISABLED		-8
+#define PSCI_E_INVALID_ADDRESS	-9
+
+#define PSCI_INVALID_MPIDR	~((u_register_t)0)
+
+/*
+ * SYSTEM_RESET2 macros
+ */
+#define PSCI_RESET2_TYPE_VENDOR_SHIFT	31
+#define PSCI_RESET2_TYPE_VENDOR		(1U << PSCI_RESET2_TYPE_VENDOR_SHIFT)
+#define PSCI_RESET2_TYPE_ARCH		(0U << PSCI_RESET2_TYPE_VENDOR_SHIFT)
+#define PSCI_RESET2_SYSTEM_WARM_RESET	(PSCI_RESET2_TYPE_ARCH | 0)
+
+#ifndef __ASSEMBLY__
+
+#include <stdint.h>
+#include <types.h>
+
+/*
+ * These are the states reported by the PSCI_AFFINITY_INFO API for the specified
+ * CPU. The definitions of these states can be found in Section 5.7.1 in the
+ * PSCI specification (ARM DEN 0022C).
+ */
+typedef enum {
+	AFF_STATE_ON = U(0),
+	AFF_STATE_OFF = U(1),
+	AFF_STATE_ON_PENDING = U(2)
+} aff_info_state_t;
+
+/*
+ * These are the power states reported by PSCI_NODE_HW_STATE API for the
+ * specified CPU. The definitions of these states can be found in Section 5.15.3
+ * of PSCI specification (ARM DEN 0022C).
+ */
+typedef enum {
+	HW_ON = U(0),
+	HW_OFF = U(1),
+	HW_STANDBY = U(2)
+} node_hw_state_t;
+
+/*
+ * Macro to represent invalid affinity level within PSCI.
+ */
+#define PSCI_INVALID_PWR_LVL	(PLAT_MAX_PWR_LVL + U(1))
+
+/*
+ * Type for representing the local power state at a particular level.
+ */
+typedef uint8_t plat_local_state_t;
+
+/* The local state macro used to represent RUN state. */
+#define PSCI_LOCAL_STATE_RUN  	U(0)
+
+/*
+ * Macro to test whether the plat_local_state is RUN state
+ */
+#define is_local_state_run(plat_local_state) \
+			((plat_local_state) == PSCI_LOCAL_STATE_RUN)
+
+/*
+ * Macro to test whether the plat_local_state is RETENTION state
+ */
+#define is_local_state_retn(plat_local_state) \
+			(((plat_local_state) > PSCI_LOCAL_STATE_RUN) && \
+			((plat_local_state) <= PLAT_MAX_RET_STATE))
+
+/*
+ * Macro to test whether the plat_local_state is OFF state
+ */
+#define is_local_state_off(plat_local_state) \
+			(((plat_local_state) > PLAT_MAX_RET_STATE) && \
+			((plat_local_state) <= PLAT_MAX_OFF_STATE))
+
+/*****************************************************************************
+ * This data structure defines the representation of the power state parameter
+ * for its exchange between the generic PSCI code and the platform port. For
+ * example, it is used by the platform port to specify the requested power
+ * states during a power management operation. It is used by the generic code to
+ * inform the platform about the target power states that each level should
+ * enter.
+ ****************************************************************************/
+typedef struct psci_power_state {
+	/*
+	 * The pwr_domain_state[] stores the local power state at each level
+	 * for the CPU.
+	 */
+	plat_local_state_t pwr_domain_state[PLAT_MAX_PWR_LVL + U(1)];
+} psci_power_state_t;
+
+/*******************************************************************************
+ * Structure used to store per-cpu information relevant to the PSCI service.
+ * It is populated in the per-cpu data array. In return we get a guarantee that
+ * this information will not reside on a cache line shared with another cpu.
+ ******************************************************************************/
+typedef struct psci_cpu_data {
+	/* State as seen by PSCI Affinity Info API */
+	aff_info_state_t aff_info_state;
+
+	/*
+	 * Highest power level which takes part in a power management
+	 * operation.
+	 */
+	unsigned char target_pwrlvl;
+
+	/* The local power state of this CPU */
+	plat_local_state_t local_state;
+} psci_cpu_data_t;
+
+/*******************************************************************************
+ * Structure populated by platform specific code to export routines which
+ * perform common low level power management functions
+ ******************************************************************************/
+typedef struct plat_psci_ops {
+	void (*cpu_standby)(plat_local_state_t cpu_state);
+	int (*pwr_domain_on)(u_register_t mpidr);
+	void (*pwr_domain_off)(const psci_power_state_t *target_state);
+	void (*pwr_domain_suspend_pwrdown_early)(
+				const psci_power_state_t *target_state);
+	void (*pwr_domain_suspend)(const psci_power_state_t *target_state);
+	void (*pwr_domain_on_finish)(const psci_power_state_t *target_state);
+	void (*pwr_domain_suspend_finish)(
+				const psci_power_state_t *target_state);
+	void (*pwr_domain_pwr_down_wfi)(
+				const psci_power_state_t *target_state) __dead2;
+	void (*system_off)(void) __dead2;
+	void (*system_reset)(void) __dead2;
+	int (*validate_power_state)(unsigned int power_state,
+				    psci_power_state_t *req_state);
+	int (*validate_ns_entrypoint)(uintptr_t ns_entrypoint);
+	void (*get_sys_suspend_power_state)(
+				    psci_power_state_t *req_state);
+	int (*get_pwr_lvl_state_idx)(plat_local_state_t pwr_domain_state,
+				    int pwrlvl);
+	int (*translate_power_state_by_mpidr)(u_register_t mpidr,
+				    unsigned int power_state,
+				    psci_power_state_t *output_state);
+	int (*get_node_hw_state)(u_register_t mpidr, unsigned int power_level);
+	int (*mem_protect_chk)(uintptr_t base, u_register_t length);
+	int (*read_mem_protect)(int *val);
+	int (*write_mem_protect)(int val);
+	int (*system_reset2)(int is_vendor,
+				int reset_type, u_register_t cookie);
+} plat_psci_ops_t;
+
+/*******************************************************************************
+ * Function & Data prototypes
+ ******************************************************************************/
+unsigned int psci_version(void);
+int psci_cpu_on(u_register_t target_cpu,
+		uintptr_t entrypoint,
+		u_register_t context_id);
+int psci_cpu_suspend(unsigned int power_state,
+		     uintptr_t entrypoint,
+		     u_register_t context_id);
+int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id);
+int psci_cpu_off(void);
+int psci_affinity_info(u_register_t target_affinity,
+		       unsigned int lowest_affinity_level);
+int psci_migrate(u_register_t target_cpu);
+int psci_migrate_info_type(void);
+long psci_migrate_info_up_cpu(void);
+int psci_node_hw_state(u_register_t target_cpu,
+		       unsigned int power_level);
+int psci_features(unsigned int psci_fid);
+void __dead2 psci_power_down_wfi(void);
+void psci_arch_setup(void);
+
+/*
+ * The below API is deprecated. This is now replaced by bl31_warmboot_entry in
+ * AArch64.
+ */
+void psci_entrypoint(void) __deprecated;
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* __PSCI_H__ */
diff --git a/include/lib/psci/psci_compat.h b/include/lib/psci/psci_compat.h
new file mode 100644
index 0000000..65ac15f
--- /dev/null
+++ b/include/lib/psci/psci_compat.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PSCI_COMPAT_H__
+#define __PSCI_COMPAT_H__
+
+#include <arch.h>
+#include <platform_def.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * The below declarations are to enable compatibility for the platform ports
+ * using the old platform interface and psci helpers.
+ */
+#define PLAT_MAX_PWR_LVL	PLATFORM_MAX_AFFLVL
+#define PLAT_NUM_PWR_DOMAINS	PLATFORM_NUM_AFFS
+
+/*******************************************************************************
+ * PSCI affinity related constants. An affinity instance could
+ * be present or absent physically to cater for asymmetric topologies.
+ ******************************************************************************/
+#define PSCI_AFF_ABSENT		0x0
+#define PSCI_AFF_PRESENT	0x1
+
+#define PSCI_STATE_ON		0x0
+#define PSCI_STATE_OFF		0x1
+#define PSCI_STATE_ON_PENDING	0x2
+#define PSCI_STATE_SUSPEND	0x3
+
+/*
+ * Using the compatibility platform interfaces means that the local states
+ * used in psci_power_state_t need to only convey whether its power down
+ * or standby state. The onus is on the platform port to do the right thing
+ * including the state coordination in case multiple power down states are
+ * involved. Hence if we assume 3 generic states viz, run, standby and
+ * power down, we can assign 1 and 2 to standby and power down respectively.
+ */
+#define PLAT_MAX_RET_STATE	1
+#define PLAT_MAX_OFF_STATE	2
+
+/*
+ * Macro to represent invalid affinity level within PSCI.
+ */
+#define PSCI_INVALID_DATA -1
+
+#define psci_get_pstate_afflvl(pstate)		psci_get_pstate_pwrlvl(pstate)
+
+/*
+ * This array stores the 'power_state' requests of each CPU during
+ * CPU_SUSPEND and SYSTEM_SUSPEND which will be populated by the
+ * compatibility layer when appropriate platform hooks are invoked.
+ */
+extern unsigned int psci_power_state_compat[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * Structure populated by platform specific code to export routines which
+ * perform common low level pm functions
+ ******************************************************************************/
+typedef struct plat_pm_ops {
+	void (*affinst_standby)(unsigned int power_state);
+	int (*affinst_on)(unsigned long mpidr,
+			  unsigned long sec_entrypoint,
+			  unsigned int afflvl,
+			  unsigned int state);
+	void (*affinst_off)(unsigned int afflvl, unsigned int state);
+	void (*affinst_suspend)(unsigned long sec_entrypoint,
+			       unsigned int afflvl,
+			       unsigned int state);
+	void (*affinst_on_finish)(unsigned int afflvl, unsigned int state);
+	void (*affinst_suspend_finish)(unsigned int afflvl,
+				      unsigned int state);
+	void (*system_off)(void) __dead2;
+	void (*system_reset)(void) __dead2;
+	int (*validate_power_state)(unsigned int power_state);
+	int (*validate_ns_entrypoint)(unsigned long ns_entrypoint);
+	unsigned int (*get_sys_suspend_power_state)(void);
+} plat_pm_ops_t;
+
+/*******************************************************************************
+ * Function & Data prototypes to enable compatibility for older platform ports
+ ******************************************************************************/
+int psci_get_suspend_stateid_by_mpidr(unsigned long);
+int psci_get_suspend_stateid(void);
+int psci_get_suspend_powerstate(void);
+unsigned int psci_get_max_phys_off_afflvl(void);
+int psci_get_suspend_afflvl(void);
+
+#endif /* ____ASSEMBLY__ */
+#endif /* __PSCI_COMPAT_H__ */
diff --git a/include/lib/psci/psci_lib.h b/include/lib/psci/psci_lib.h
new file mode 100644
index 0000000..4697f17
--- /dev/null
+++ b/include/lib/psci/psci_lib.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PSCI_LIB_H__
+#define __PSCI_LIB_H__
+
+#include <ep_info.h>
+
+#ifndef __ASSEMBLY__
+#include <types.h>
+
+/*******************************************************************************
+ * Optional structure populated by the Secure Payload Dispatcher to be given a
+ * chance to perform any bookkeeping before PSCI executes a power management
+ * operation. It also allows PSCI to determine certain properties of the SP e.g.
+ * migrate capability etc.
+ ******************************************************************************/
+typedef struct spd_pm_ops {
+	void (*svc_on)(u_register_t target_cpu);
+	int32_t (*svc_off)(u_register_t __unused);
+	void (*svc_suspend)(u_register_t max_off_pwrlvl);
+	void (*svc_on_finish)(u_register_t __unused);
+	void (*svc_suspend_finish)(u_register_t max_off_pwrlvl);
+	int32_t (*svc_migrate)(u_register_t from_cpu, u_register_t to_cpu);
+	int32_t (*svc_migrate_info)(u_register_t *resident_cpu);
+	void (*svc_system_off)(void);
+	void (*svc_system_reset)(void);
+} spd_pm_ops_t;
+
+/*
+ * Function prototype for the warmboot entrypoint function which will be
+ * programmed in the mailbox by the platform.
+ */
+typedef void (*mailbox_entrypoint_t)(void);
+
+/******************************************************************************
+ * Structure to pass PSCI Library arguments.
+ *****************************************************************************/
+typedef struct psci_lib_args {
+	/* The version information of PSCI Library Interface */
+	param_header_t		h;
+	/* The warm boot entrypoint function */
+	mailbox_entrypoint_t	mailbox_ep;
+} psci_lib_args_t;
+
+/* Helper macro to set the psci_lib_args_t structure at runtime */
+#define SET_PSCI_LIB_ARGS_V1(_p, _entry)	do {			\
+	SET_PARAM_HEAD(_p, PARAM_PSCI_LIB_ARGS, VERSION_1, 0);		\
+	(_p)->mailbox_ep = (_entry);					\
+	} while (0)
+
+/* Helper macro to define the psci_lib_args_t statically */
+#define DEFINE_STATIC_PSCI_LIB_ARGS_V1(_name, _entry)		\
+	static const psci_lib_args_t (_name) = {		\
+		.h.type = (uint8_t)PARAM_PSCI_LIB_ARGS,		\
+		.h.version = (uint8_t)VERSION_1,		\
+		.h.size = (uint16_t)sizeof(_name),		\
+		.h.attr = 0,					\
+		.mailbox_ep = (_entry)				\
+	}
+
+/* Helper macro to verify the pointer to psci_lib_args_t structure */
+#define VERIFY_PSCI_LIB_ARGS_V1(_p)	((_p)			\
+		&& ((_p)->h.type == PARAM_PSCI_LIB_ARGS)	\
+		&& ((_p)->h.version == VERSION_1)		\
+		&& ((_p)->h.size == sizeof(*(_p)))		\
+		&& ((_p)->h.attr == 0)				\
+		&& ((_p)->mailbox_ep))
+
+/******************************************************************************
+ * PSCI Library Interfaces
+ *****************************************************************************/
+u_register_t psci_smc_handler(uint32_t smc_fid,
+			  u_register_t x1,
+			  u_register_t x2,
+			  u_register_t x3,
+			  u_register_t x4,
+			  void *cookie,
+			  void *handle,
+			  u_register_t flags);
+int psci_setup(const psci_lib_args_t *lib_args);
+int psci_secondaries_brought_up(void);
+void psci_warmboot_entrypoint(void);
+void psci_register_spd_pm_hook(const spd_pm_ops_t *pm);
+void psci_prepare_next_non_secure_ctx(
+			  entry_point_info_t *next_image_info);
+#endif /* __ASSEMBLY__ */
+
+#endif /* __PSCI_LIB_H */
+
diff --git a/include/lib/runtime_instr.h b/include/lib/runtime_instr.h
new file mode 100644
index 0000000..b2f1a69
--- /dev/null
+++ b/include/lib/runtime_instr.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __RUNTIME_INSTR_H__
+#define __RUNTIME_INSTR_H__
+
+#define RT_INSTR_ENTER_PSCI		0
+#define RT_INSTR_EXIT_PSCI		1
+#define RT_INSTR_ENTER_HW_LOW_PWR	2
+#define RT_INSTR_EXIT_HW_LOW_PWR	3
+#define RT_INSTR_ENTER_CFLUSH		4
+#define RT_INSTR_EXIT_CFLUSH		5
+#define RT_INSTR_TOTAL_IDS		6
+
+#ifndef __ASSEMBLY__
+PMF_DECLARE_CAPTURE_TIMESTAMP(rt_instr_svc)
+PMF_DECLARE_GET_TIMESTAMP(rt_instr_svc)
+#endif /* __ASSEMBLY__ */
+
+#endif /* __RUNTIME_INSTR_H__ */
diff --git a/include/lib/semihosting.h b/include/lib/semihosting.h
index b4eecc5..eb7c4c3 100644
--- a/include/lib/semihosting.h
+++ b/include/lib/semihosting.h
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __SEMIHOSTING_H__
diff --git a/include/lib/smcc.h b/include/lib/smcc.h
new file mode 100644
index 0000000..13b1e7a
--- /dev/null
+++ b/include/lib/smcc.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SMCC_H__
+#define __SMCC_H__
+
+#include <utils_def.h>
+
+/*******************************************************************************
+ * Bit definitions inside the function id as per the SMC calling convention
+ ******************************************************************************/
+#define FUNCID_TYPE_SHIFT		U(31)
+#define FUNCID_CC_SHIFT			U(30)
+#define FUNCID_OEN_SHIFT		U(24)
+#define FUNCID_NUM_SHIFT		U(0)
+
+#define FUNCID_TYPE_MASK		U(0x1)
+#define FUNCID_CC_MASK			U(0x1)
+#define FUNCID_OEN_MASK			U(0x3f)
+#define FUNCID_NUM_MASK			U(0xffff)
+
+#define FUNCID_TYPE_WIDTH		U(1)
+#define FUNCID_CC_WIDTH			U(1)
+#define FUNCID_OEN_WIDTH		U(6)
+#define FUNCID_NUM_WIDTH		U(16)
+
+#define GET_SMC_CC(id)			((id >> FUNCID_CC_SHIFT) & \
+					 FUNCID_CC_MASK)
+#define GET_SMC_TYPE(id)		((id >> FUNCID_TYPE_SHIFT) & \
+					 FUNCID_TYPE_MASK)
+
+#define SMC_64				U(1)
+#define SMC_32				U(0)
+#define SMC_OK				U(0)
+#define SMC_UNK				U(0xffffffff)
+#define SMC_TYPE_FAST			ULL(1)
+#if !ERROR_DEPRECATED
+#define SMC_TYPE_STD			ULL(0)
+#endif
+#define SMC_TYPE_YIELD			U(0)
+#define SMC_PREEMPTED		U(0xfffffffe)
+/*******************************************************************************
+ * Owning entity number definitions inside the function id as per the SMC
+ * calling convention
+ ******************************************************************************/
+#define OEN_ARM_START			U(0)
+#define OEN_ARM_END			U(0)
+#define OEN_CPU_START			U(1)
+#define OEN_CPU_END			U(1)
+#define OEN_SIP_START			U(2)
+#define OEN_SIP_END			U(2)
+#define OEN_OEM_START			U(3)
+#define OEN_OEM_END			U(3)
+#define OEN_STD_START			U(4)	/* Standard Service Calls */
+#define OEN_STD_END			U(4)
+#define OEN_TAP_START			U(48)	/* Trusted Applications */
+#define OEN_TAP_END			U(49)
+#define OEN_TOS_START			U(50)	/* Trusted OS */
+#define OEN_TOS_END			U(63)
+#define OEN_LIMIT			U(64)
+
+#ifndef __ASSEMBLY__
+
+#include <cassert.h>
+#include <stdint.h>
+
+/* Various flags passed to SMC handlers */
+#define SMC_FROM_SECURE		(U(0) << 0)
+#define SMC_FROM_NON_SECURE	(U(1) << 0)
+
+#define is_caller_non_secure(_f)	(!!(_f & SMC_FROM_NON_SECURE))
+#define is_caller_secure(_f)		(!(is_caller_non_secure(_f)))
+
+/* The macro below is used to identify a Standard Service SMC call */
+#define is_std_svc_call(_fid)		((((_fid) >> FUNCID_OEN_SHIFT) & \
+					   FUNCID_OEN_MASK) == OEN_STD_START)
+
+/* The macro below is used to identify a valid Fast SMC call */
+#define is_valid_fast_smc(_fid)		((!(((_fid) >> 16) & U(0xff))) && \
+					   (GET_SMC_TYPE(_fid) == SMC_TYPE_FAST))
+
+/*
+ * Macro to define UUID for services. Apart from defining and initializing a
+ * uuid_t structure, this macro verifies that the first word of the defined UUID
+ * does not equal SMC_UNK. This is to ensure that the caller won't mistake the
+ * returned UUID in x0 for an invalid SMC error return
+ */
+#define DEFINE_SVC_UUID(_name, _tl, _tm, _th, _cl, _ch, \
+		_n0, _n1, _n2, _n3, _n4, _n5) \
+	CASSERT(_tl != SMC_UNK, invalid_svc_uuid);\
+	static const uuid_t _name = { \
+		_tl, _tm, _th, _cl, _ch, \
+		{ _n0, _n1, _n2, _n3, _n4, _n5 } \
+	}
+
+#endif /*__ASSEMBLY__*/
+#endif /* __SMCC_H__ */
diff --git a/include/lib/spinlock.h b/include/lib/spinlock.h
index cb0bc3e..a7b0d39 100644
--- a/include/lib/spinlock.h
+++ b/include/lib/spinlock.h
@@ -1,41 +1,29 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __SPINLOCK_H__
 #define __SPINLOCK_H__
 
+#ifndef __ASSEMBLY__
+
+#include <types.h>
+
 typedef struct spinlock {
-	volatile unsigned int lock;
+	volatile uint32_t lock;
 } spinlock_t;
 
 void spin_lock(spinlock_t *lock);
 void spin_unlock(spinlock_t *lock);
 
+#else
+
+/* Spin lock definitions for use in assembly */
+#define SPINLOCK_ASM_ALIGN	2
+#define SPINLOCK_ASM_SIZE	4
+
+#endif
+
 #endif /* __SPINLOCK_H__ */
diff --git a/include/lib/stdlib/assert.h b/include/lib/stdlib/assert.h
new file mode 100644
index 0000000..db567db
--- /dev/null
+++ b/include/lib/stdlib/assert.h
@@ -0,0 +1,77 @@
+/*-
+ * Copyright (c) 1992, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)assert.h	8.2 (Berkeley) 1/21/94
+ * $FreeBSD$
+ */
+/*
+ * Portions copyright (c) 2017, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef _ASSERT_H_
+#define _ASSERT_H_
+
+#include <debug.h>
+#include <platform_def.h>
+#include <sys/cdefs.h>
+
+#ifndef PLAT_LOG_LEVEL_ASSERT
+#define PLAT_LOG_LEVEL_ASSERT	LOG_LEVEL
+#endif
+
+#if ENABLE_ASSERTIONS
+#define	_assert(e)	assert(e)
+# if PLAT_LOG_LEVEL_ASSERT >= LOG_LEVEL_VERBOSE
+#  define	assert(e)	((e) ? (void)0 : __assert(__FILE__, __LINE__, #e))
+# elif PLAT_LOG_LEVEL_ASSERT >= LOG_LEVEL_INFO
+#  define	assert(e)	((e) ? (void)0 : __assert(__FILE__, __LINE__))
+# else
+#  define	assert(e)	((e) ? (void)0 : __assert())
+# endif
+#else
+#define	assert(e)	((void)0)
+#define	_assert(e)	((void)0)
+#endif /* ENABLE_ASSERTIONS */
+
+__BEGIN_DECLS
+#if PLAT_LOG_LEVEL_ASSERT >= LOG_LEVEL_VERBOSE
+void __assert(const char *, unsigned int, const char *) __dead2;
+#elif PLAT_LOG_LEVEL_ASSERT >= LOG_LEVEL_INFO
+void __assert(const char *, unsigned int) __dead2;
+#else
+void __assert(void) __dead2;
+#endif
+__END_DECLS
+
+#endif /* !_ASSERT_H_ */
diff --git a/include/stdlib/inttypes.h b/include/lib/stdlib/inttypes.h
similarity index 100%
rename from include/stdlib/inttypes.h
rename to include/lib/stdlib/inttypes.h
diff --git a/include/lib/stdlib/machine/_inttypes.h b/include/lib/stdlib/machine/_inttypes.h
new file mode 100644
index 0000000..7dfe9c3
--- /dev/null
+++ b/include/lib/stdlib/machine/_inttypes.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _MACHINE_INTTYPES_H_
+#define _MACHINE_INTTYPES_H_
+
+/*
+ * Trusted Firmware does not depend on any definitions in this file. Content
+ * will be added as needed.
+ */
+
+#endif /* !_MACHINE_INTTYPES_H_ */
diff --git a/include/lib/stdlib/machine/_limits.h b/include/lib/stdlib/machine/_limits.h
new file mode 100644
index 0000000..3bdc66f
--- /dev/null
+++ b/include/lib/stdlib/machine/_limits.h
@@ -0,0 +1,105 @@
+/*-
+ * Copyright (c) 1988, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)limits.h	8.3 (Berkeley) 1/4/94
+ * $FreeBSD$
+ */
+
+/*
+ * Portions copyright (c) 2017, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef	_MACHINE__LIMITS_H_
+#define	_MACHINE__LIMITS_H_
+
+/*
+ * According to ANSI (section 2.2.4.2), the values below must be usable by
+ * #if preprocessing directives.  Additionally, the expression must have the
+ * same type as would an expression that is an object of the corresponding
+ * type converted according to the integral promotions.  The subtraction for
+ * INT_MIN, etc., is so the value is not unsigned; e.g., 0x80000000 is an
+ * unsigned int for 32-bit two's complement ANSI compilers (section 3.1.3.2).
+ */
+
+#define	__CHAR_BIT	8		/* number of bits in a char */
+
+#define	__SCHAR_MAX	0x7f		/* max value for a signed char */
+#define	__SCHAR_MIN	(-0x7f-1)	/* min value for a signed char */
+
+#define	__UCHAR_MAX	0xff		/* max value for an unsigned char */
+
+#define	__USHRT_MAX	0xffff		/* max value for an unsigned short */
+#define	__SHRT_MAX	0x7fff		/* max value for a short */
+#define	__SHRT_MIN	(-0x7fff-1)	/* min value for a short */
+
+#define	__UINT_MAX	0xffffffffU	/* max value for an unsigned int */
+#define	__INT_MAX	0x7fffffff	/* max value for an int */
+#define	__INT_MIN	(-0x7fffffff-1)	/* min value for an int */
+
+#ifdef AARCH32
+#define	__ULONG_MAX	0xffffffffUL	/* max for an unsigned long */
+#define	__LONG_MAX	0x7fffffffL	/* max for a long */
+#define	__LONG_MIN	(-0x7fffffffL-1) /* min for a long */
+#else
+#define	__ULONG_MAX	0xffffffffffffffffUL	/* max for an unsigned long */
+#define	__LONG_MAX	0x7fffffffffffffffL	/* max for a long */
+#define	__LONG_MIN	(-0x7fffffffffffffffL-1) /* min for a long */
+#endif
+
+#define	__ULLONG_MAX	0xffffffffffffffffULL	/* max for an unsigned long long */
+#define	__LLONG_MAX	0x7fffffffffffffffLL	/* max for a long long */
+#define	__LLONG_MIN	(-0x7fffffffffffffffLL-1) /* min for a long long */
+
+#define	__SSIZE_MAX	__LONG_MAX	/* max value for a ssize_t */
+
+#define	__SIZE_T_MAX	__ULONG_MAX	/* max value for a size_t */
+
+#define	__OFF_MAX	__LONG_MAX	/* max value for an off_t */
+#define	__OFF_MIN	__LONG_MIN	/* min value for an off_t */
+
+#ifdef AARCH32
+/* Quads and long longs are the same size.  Ensure they stay in sync. */
+#define	__UQUAD_MAX	(__ULLONG_MAX)	/* max value for a uquad_t */
+#define	__QUAD_MAX	(__LLONG_MAX)	/* max value for a quad_t */
+#define	__QUAD_MIN	(__LLONG_MIN)	/* min value for a quad_t */
+#else
+/* Quads and longs are the same size.  Ensure they stay in sync. */
+#define	__UQUAD_MAX	(__ULONG_MAX)	/* max value for a uquad_t */
+#define	__QUAD_MAX	(__LONG_MAX)	/* max value for a quad_t */
+#define	__QUAD_MIN	(__LONG_MIN)	/* min value for a quad_t */
+#endif
+
+#ifdef AARCH32
+#define	__LONG_BIT	32
+#else
+#define	__LONG_BIT	64
+#endif
+#define	__WORD_BIT	32
+
+/* Minimum signal stack size. */
+#define	__MINSIGSTKSZ	(1024 * 4)
+
+#endif /* !_MACHINE__LIMITS_H_ */
diff --git a/include/lib/stdlib/machine/_stdint.h b/include/lib/stdlib/machine/_stdint.h
new file mode 100644
index 0000000..ee5372d
--- /dev/null
+++ b/include/lib/stdlib/machine/_stdint.h
@@ -0,0 +1,184 @@
+/*-
+ * Copyright (c) 2001, 2002 Mike Barcroft <mike@FreeBSD.org>
+ * Copyright (c) 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Klaus Klein.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Portions copyright (c) 2016-2017, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef	_MACHINE__STDINT_H_
+#define	_MACHINE__STDINT_H_
+
+#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS)
+
+#define	INT8_C(c)		(c)
+#define	INT16_C(c)		(c)
+#define	INT32_C(c)		(c)
+#define	INT64_C(c)		(c ## LL)
+
+#define	UINT8_C(c)		(c)
+#define	UINT16_C(c)		(c)
+#define	UINT32_C(c)		(c ## U)
+#define	UINT64_C(c)		(c ## ULL)
+
+#define	INTMAX_C(c)		INT64_C(c)
+#define	UINTMAX_C(c)		UINT64_C(c)
+
+#endif /* !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) */
+
+#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS)
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.1 Limits of exact-width integer types
+ */
+/* Minimum values of exact-width signed integer types. */
+#define	INT8_MIN	(-0x7f-1)
+#define	INT16_MIN	(-0x7fff-1)
+#define	INT32_MIN	(-0x7fffffff-1)
+#define	INT64_MIN	(-0x7fffffffffffffffLL-1)
+
+/* Maximum values of exact-width signed integer types. */
+#define	INT8_MAX	0x7f
+#define	INT16_MAX	0x7fff
+#define	INT32_MAX	0x7fffffff
+#define	INT64_MAX	0x7fffffffffffffffLL
+
+/* Maximum values of exact-width unsigned integer types. */
+#define	UINT8_MAX	0xff
+#define	UINT16_MAX	0xffff
+#define	UINT32_MAX	0xffffffffU
+#define	UINT64_MAX	0xffffffffffffffffULL
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.2  Limits of minimum-width integer types
+ */
+/* Minimum values of minimum-width signed integer types. */
+#define	INT_LEAST8_MIN	INT8_MIN
+#define	INT_LEAST16_MIN	INT16_MIN
+#define	INT_LEAST32_MIN	INT32_MIN
+#define	INT_LEAST64_MIN	INT64_MIN
+
+/* Maximum values of minimum-width signed integer types. */
+#define	INT_LEAST8_MAX	INT8_MAX
+#define	INT_LEAST16_MAX	INT16_MAX
+#define	INT_LEAST32_MAX	INT32_MAX
+#define	INT_LEAST64_MAX	INT64_MAX
+
+/* Maximum values of minimum-width unsigned integer types. */
+#define	UINT_LEAST8_MAX	 UINT8_MAX
+#define	UINT_LEAST16_MAX UINT16_MAX
+#define	UINT_LEAST32_MAX UINT32_MAX
+#define	UINT_LEAST64_MAX UINT64_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.3  Limits of fastest minimum-width integer types
+ */
+/* Minimum values of fastest minimum-width signed integer types. */
+#define	INT_FAST8_MIN	INT32_MIN
+#define	INT_FAST16_MIN	INT32_MIN
+#define	INT_FAST32_MIN	INT32_MIN
+#define	INT_FAST64_MIN	INT64_MIN
+
+/* Maximum values of fastest minimum-width signed integer types. */
+#define	INT_FAST8_MAX	INT32_MAX
+#define	INT_FAST16_MAX	INT32_MAX
+#define	INT_FAST32_MAX	INT32_MAX
+#define	INT_FAST64_MAX	INT64_MAX
+
+/* Maximum values of fastest minimum-width unsigned integer types. */
+#define	UINT_FAST8_MAX	UINT32_MAX
+#define	UINT_FAST16_MAX	UINT32_MAX
+#define	UINT_FAST32_MAX	UINT32_MAX
+#define	UINT_FAST64_MAX	UINT64_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.4  Limits of integer types capable of holding object pointers
+ */
+#ifdef AARCH32
+#define	INTPTR_MIN	INT32_MIN
+#define	INTPTR_MAX	INT32_MAX
+#define	UINTPTR_MAX	UINT32_MAX
+#else
+#define	INTPTR_MIN	INT64_MIN
+#define	INTPTR_MAX	INT64_MAX
+#define	UINTPTR_MAX	UINT64_MAX
+#endif
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.5  Limits of greatest-width integer types
+ */
+#define	INTMAX_MIN	INT64_MIN
+#define	INTMAX_MAX	INT64_MAX
+#define	UINTMAX_MAX	UINT64_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.3  Limits of other integer types
+ */
+/* Limits of ptrdiff_t. */
+#ifdef AARCH32
+#define	PTRDIFF_MIN	INT32_MIN
+#define	PTRDIFF_MAX	INT32_MAX
+#else
+#define	PTRDIFF_MIN	INT64_MIN
+#define	PTRDIFF_MAX	INT64_MAX
+#endif
+
+/* Limits of sig_atomic_t. */
+#define	SIG_ATOMIC_MIN	INT32_MIN
+#define	SIG_ATOMIC_MAX	INT32_MAX
+
+/* Limit of size_t. */
+#ifdef AARCH32
+#define	SIZE_MAX	UINT32_MAX
+#else
+#define	SIZE_MAX	UINT64_MAX
+#endif
+
+#ifndef WCHAR_MIN /* Also possibly defined in <wchar.h> */
+/* Limits of wchar_t. */
+#define	WCHAR_MIN	INT32_MIN
+#define	WCHAR_MAX	INT32_MAX
+#endif
+
+/* Limits of wint_t. */
+#define	WINT_MIN	INT32_MIN
+#define	WINT_MAX	INT32_MAX
+
+#endif /* !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) */
+
+#endif /* !_MACHINE__STDINT_H_ */
diff --git a/include/lib/stdlib/machine/_types.h b/include/lib/stdlib/machine/_types.h
new file mode 100644
index 0000000..fb1083b
--- /dev/null
+++ b/include/lib/stdlib/machine/_types.h
@@ -0,0 +1,140 @@
+/*-
+ * Copyright (c) 2002 Mike Barcroft <mike@FreeBSD.org>
+ * Copyright (c) 1990, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	From: @(#)ansi.h	8.2 (Berkeley) 1/4/94
+ *	From: @(#)types.h	8.3 (Berkeley) 1/5/94
+ * $FreeBSD$
+ */
+/*
+ * Portions copyright (c) 2016, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef _MACHINE__TYPES_H_
+#define	_MACHINE__TYPES_H_
+
+#ifndef _SYS_CDEFS_H_
+#error this file needs sys/cdefs.h as a prerequisite
+#endif
+
+/*
+ * Basic types upon which most other types are built.
+ */
+typedef	__signed char		__int8_t;
+typedef	unsigned char		__uint8_t;
+typedef	short			__int16_t;
+typedef	unsigned short		__uint16_t;
+typedef	int			__int32_t;
+typedef	unsigned int		__uint32_t;
+
+
+/*
+ * Standard type definitions which are different in AArch64 and AArch32
+ */
+#ifdef	AARCH32
+typedef	long long		__int64_t;
+typedef	unsigned long long	__uint64_t;
+typedef	__int32_t	__critical_t;
+typedef	__int32_t	__intfptr_t;
+typedef	__int32_t	__intptr_t;
+typedef	__int32_t	__ptrdiff_t;		/* ptr1 - ptr2 */
+typedef	__int32_t	__register_t;
+typedef	__int32_t	__segsz_t;		/* segment size (in pages) */
+typedef	__uint32_t	__size_t;		/* sizeof() */
+typedef	__int32_t	__ssize_t;		/* byte count or error */
+typedef	__uint32_t	__uintfptr_t;
+typedef	__uint32_t	__uintptr_t;
+typedef	__uint32_t	__u_register_t;
+typedef	__uint32_t	__vm_offset_t;
+typedef	__uint32_t	__vm_paddr_t;
+typedef	__uint32_t	__vm_size_t;
+#elif defined AARCH64
+typedef	long			__int64_t;
+typedef	unsigned long		__uint64_t;
+typedef	__int64_t	__critical_t;
+typedef	__int64_t	__intfptr_t;
+typedef	__int64_t	__intptr_t;
+typedef	__int64_t	__ptrdiff_t;		/* ptr1 - ptr2 */
+typedef	__int64_t	__register_t;
+typedef	__int64_t	__segsz_t;		/* segment size (in pages) */
+typedef	__uint64_t	__size_t;		/* sizeof() */
+typedef	__int64_t	__ssize_t;		/* byte count or error */
+typedef	__uint64_t	__uintfptr_t;
+typedef	__uint64_t	__uintptr_t;
+typedef	__uint64_t	__u_register_t;
+typedef	__uint64_t	__vm_offset_t;
+typedef	__uint64_t	__vm_paddr_t;
+typedef	__uint64_t	__vm_size_t;
+#else
+#error "Only AArch32 or AArch64 supported"
+#endif /* AARCH32 */
+
+/*
+ * Standard type definitions.
+ */
+typedef	__int32_t	__clock_t;		/* clock()... */
+typedef	double		__double_t;
+typedef	float		__float_t;
+typedef	__int64_t	__intmax_t;
+typedef	__int32_t	__int_fast8_t;
+typedef	__int32_t	__int_fast16_t;
+typedef	__int32_t	__int_fast32_t;
+typedef	__int64_t	__int_fast64_t;
+typedef	__int8_t	__int_least8_t;
+typedef	__int16_t	__int_least16_t;
+typedef	__int32_t	__int_least32_t;
+typedef	__int64_t	__int_least64_t;
+typedef	__int64_t	__time_t;		/* time()... */
+typedef	__uint64_t	__uintmax_t;
+typedef	__uint32_t	__uint_fast8_t;
+typedef	__uint32_t	__uint_fast16_t;
+typedef	__uint32_t	__uint_fast32_t;
+typedef	__uint64_t	__uint_fast64_t;
+typedef	__uint8_t	__uint_least8_t;
+typedef	__uint16_t	__uint_least16_t;
+typedef	__uint32_t	__uint_least32_t;
+typedef	__uint64_t	__uint_least64_t;
+typedef	__int64_t	__vm_ooffset_t;
+typedef	__uint64_t	__vm_pindex_t;
+
+/*
+ * Unusual type definitions.
+ */
+#ifdef __GNUCLIKE_BUILTIN_VARARGS
+typedef __builtin_va_list	__va_list;	/* internally known to gcc */
+#else
+typedef	char *			__va_list;
+#endif /* __GNUCLIKE_BUILTIN_VARARGS */
+#if defined(__GNUCLIKE_BUILTIN_VAALIST) && !defined(__GNUC_VA_LIST) \
+    && !defined(__NO_GNUC_VA_LIST)
+#define __GNUC_VA_LIST
+typedef __va_list		__gnuc_va_list;	/* compatibility w/GNU headers*/
+#endif
+
+#endif /* !_MACHINE__TYPES_H_ */
diff --git a/include/lib/stdlib/machine/endian.h b/include/lib/stdlib/machine/endian.h
new file mode 100644
index 0000000..57e33b1
--- /dev/null
+++ b/include/lib/stdlib/machine/endian.h
@@ -0,0 +1,168 @@
+/*-
+ * Copyright (c) 2001 David E. O'Brien
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)endian.h	8.1 (Berkeley) 6/10/93
+ * $NetBSD: endian.h,v 1.7 1999/08/21 05:53:51 simonb Exp $
+ * $FreeBSD$
+ */
+/*
+ * Portions copyright (c) 2017, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef _MACHINE_ENDIAN_H_
+#define	_MACHINE_ENDIAN_H_
+
+#include <sys/_types.h>
+
+/*
+ * Definitions for byte order, according to byte significance from low
+ * address to high.
+ */
+#define	_LITTLE_ENDIAN  1234    /* LSB first: i386, vax */
+#define	_BIG_ENDIAN     4321    /* MSB first: 68000, ibm, net */
+#define	_PDP_ENDIAN     3412    /* LSB first in word, MSW first in long */
+
+#define	_BYTE_ORDER	_LITTLE_ENDIAN
+
+#if __BSD_VISIBLE
+#define	LITTLE_ENDIAN   _LITTLE_ENDIAN
+#define	BIG_ENDIAN      _BIG_ENDIAN
+#define	PDP_ENDIAN      _PDP_ENDIAN
+#define	BYTE_ORDER      _BYTE_ORDER
+#endif
+
+#define	_QUAD_HIGHWORD  1
+#define	_QUAD_LOWWORD 0
+#define	__ntohl(x)        (__bswap32(x))
+#define	__ntohs(x)        (__bswap16(x))
+#define	__htonl(x)        (__bswap32(x))
+#define	__htons(x)        (__bswap16(x))
+
+#ifdef AARCH32
+static __inline __uint64_t
+__bswap64(__uint64_t _x)
+{
+
+	return ((_x >> 56) | ((_x >> 40) & 0xff00) | ((_x >> 24) & 0xff0000) |
+	    ((_x >> 8) & 0xff000000) | ((_x << 8) & ((__uint64_t)0xff << 32)) |
+	    ((_x << 24) & ((__uint64_t)0xff << 40)) |
+	    ((_x << 40) & ((__uint64_t)0xff << 48)) | ((_x << 56)));
+}
+
+static __inline __uint32_t
+__bswap32_var(__uint32_t v)
+{
+	__uint32_t t1;
+
+	__asm __volatile("eor %1, %0, %0, ror #16\n"
+			"bic %1, %1, #0x00ff0000\n"
+			"mov %0, %0, ror #8\n"
+			"eor %0, %0, %1, lsr #8\n"
+			 : "+r" (v), "=r" (t1));
+
+	return (v);
+}
+
+static __inline __uint16_t
+__bswap16_var(__uint16_t v)
+{
+	__uint32_t ret = v & 0xffff;
+
+	__asm __volatile(
+	    "mov    %0, %0, ror #8\n"
+	    "orr    %0, %0, %0, lsr #16\n"
+	    "bic    %0, %0, %0, lsl #16"
+	    : "+r" (ret));
+
+	return ((__uint16_t)ret);
+}
+#elif defined AARCH64
+static __inline __uint64_t
+__bswap64(__uint64_t x)
+{
+	__uint64_t ret;
+
+	__asm __volatile("rev %0, %1\n"
+			 : "=&r" (ret), "+r" (x));
+	
+	return (ret);
+}
+
+static __inline __uint32_t
+__bswap32_var(__uint32_t v)
+{
+	__uint32_t ret;
+
+	__asm __volatile("rev32 %x0, %x1\n"
+			 : "=&r" (ret), "+r" (v));
+	
+	return (ret);
+}
+
+static __inline __uint16_t
+__bswap16_var(__uint16_t v)
+{
+	__uint32_t ret;
+
+	__asm __volatile("rev16 %w0, %w1\n"
+			 : "=&r" (ret), "+r" (v));
+
+	return ((__uint16_t)ret);
+}		
+#else
+#error "Only AArch32 or AArch64 supported"
+#endif /* AARCH32 */
+
+#ifdef __OPTIMIZE__
+
+#define	__bswap32_constant(x)	\
+    ((((x) & 0xff000000U) >> 24) |	\
+     (((x) & 0x00ff0000U) >>  8) |	\
+     (((x) & 0x0000ff00U) <<  8) |	\
+     (((x) & 0x000000ffU) << 24))
+
+#define	__bswap16_constant(x)	\
+    ((((x) & 0xff00) >> 8) |		\
+     (((x) & 0x00ff) << 8))
+
+#define	__bswap16(x)	\
+    ((__uint16_t)(__builtin_constant_p(x) ?	\
+     __bswap16_constant(x) :			\
+     __bswap16_var(x)))
+
+#define	__bswap32(x)	\
+    ((__uint32_t)(__builtin_constant_p(x) ? 	\
+     __bswap32_constant(x) :			\
+     __bswap32_var(x)))
+
+#else
+#define	__bswap16(x)	__bswap16_var(x)
+#define	__bswap32(x)	__bswap32_var(x)
+
+#endif /* __OPTIMIZE__ */
+#endif /* !_MACHINE_ENDIAN_H_ */
diff --git a/include/lib/stdlib/stdbool.h b/include/lib/stdlib/stdbool.h
new file mode 100644
index 0000000..48070c1
--- /dev/null
+++ b/include/lib/stdlib/stdbool.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2000 Jeroen Ruigrok van der Werven <asmodai@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __bool_true_false_are_defined
+#define	__bool_true_false_are_defined	1
+
+#ifndef __cplusplus
+
+#define	false	0
+#define	true	1
+
+#define	bool	_Bool
+#if __STDC_VERSION__ < 199901L && __GNUC__ < 3 && !defined(__INTEL_COMPILER)
+typedef	int	_Bool;
+#endif
+
+#endif /* !__cplusplus */
+#endif /* __bool_true_false_are_defined */
diff --git a/include/stdlib/stddef.h b/include/lib/stdlib/stddef.h
similarity index 100%
rename from include/stdlib/stddef.h
rename to include/lib/stdlib/stddef.h
diff --git a/include/lib/stdlib/stdio.h b/include/lib/stdlib/stdio.h
new file mode 100644
index 0000000..57e5c7f
--- /dev/null
+++ b/include/lib/stdlib/stdio.h
@@ -0,0 +1,78 @@
+/*-
+ * Copyright (c) 1990, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)stdio.h	8.5 (Berkeley) 4/29/95
+ * $FreeBSD$
+ */
+
+/*
+ * Portions copyright (c) 2013-2014, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+
+#ifndef	_STDIO_H_
+#define	_STDIO_H_
+
+#include <sys/cdefs.h>
+#include <sys/_null.h>
+#include <sys/_types.h>
+
+#ifndef _SIZE_T_DECLARED
+typedef	__size_t	size_t;
+#define	_SIZE_T_DECLARED
+#endif
+
+#ifndef _SSIZE_T_DECLARED
+#define	_SSIZE_T_DECLARED
+typedef	__ssize_t	ssize_t;
+#endif
+
+#define	EOF	(-1)
+
+int	 printf(const char * __restrict, ...) __printflike(1, 2);
+int	 putchar(int);
+int	 puts(const char *);
+int	 sprintf(char * __restrict, const char * __restrict, ...)
+	__printflike(2, 3);
+int	 vsprintf(char * __restrict, const char * __restrict,
+	   __va_list) __printflike(2, 0);
+
+int	 sscanf(const char *__restrict, char const *__restrict, ...);
+
+#if __ISO_C_VISIBLE >= 1999
+int	 snprintf(char * __restrict, size_t, const char * __restrict,
+	   ...) __printflike(3, 4);
+int	 vsnprintf(char * __restrict, size_t, const char * __restrict,
+	   __va_list) __printflike(3, 0);
+#endif
+
+#endif /* !_STDIO_H_ */
diff --git a/include/stdlib/stdlib.h b/include/lib/stdlib/stdlib.h
similarity index 100%
rename from include/stdlib/stdlib.h
rename to include/lib/stdlib/stdlib.h
diff --git a/include/lib/stdlib/string.h b/include/lib/stdlib/string.h
new file mode 100644
index 0000000..56677b2
--- /dev/null
+++ b/include/lib/stdlib/string.h
@@ -0,0 +1,69 @@
+/*-
+ * Copyright (c) 1990, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)string.h	8.1 (Berkeley) 6/2/93
+ * $FreeBSD$
+ */
+
+/*
+ * Portions copyright (c) 2013-2017, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef _STRING_H_
+#define	_STRING_H_
+
+#include <sys/cdefs.h>
+#include <sys/_null.h>
+#include <sys/_types.h>
+
+#ifndef _SIZE_T_DECLARED
+typedef	__size_t	size_t;
+#define	_SIZE_T_DECLARED
+#endif
+
+__BEGIN_DECLS
+
+void	*memchr(const void *, int, size_t) __pure;
+int	 memcmp(const void *, const void *, size_t) __pure;
+void	*memcpy(void * __restrict, const void * __restrict, size_t);
+void	*memcpy16(void * __restrict, const void * __restrict, size_t);
+void	*memmove(void *, const void *, size_t);
+void	*memset(void *, int, size_t);
+
+char	*strchr(const char *, int) __pure;
+int	 strcmp(const char *, const char *) __pure;
+size_t	 strlen(const char *) __pure;
+int	 strncmp(const char *, const char *, size_t) __pure;
+size_t	 strnlen(const char *, size_t) __pure;
+int	 strcasecmp(const char *, const char *);
+int	 timingsafe_bcmp(const void *, const void *, size_t);
+
+__END_DECLS
+
+#endif /* _STRING_H_ */
diff --git a/include/stdlib/strings.h b/include/lib/stdlib/strings.h
similarity index 100%
rename from include/stdlib/strings.h
rename to include/lib/stdlib/strings.h
diff --git a/include/stdlib/sys/_null.h b/include/lib/stdlib/sys/_null.h
similarity index 100%
rename from include/stdlib/sys/_null.h
rename to include/lib/stdlib/sys/_null.h
diff --git a/include/stdlib/sys/_stdint.h b/include/lib/stdlib/sys/_stdint.h
similarity index 100%
rename from include/stdlib/sys/_stdint.h
rename to include/lib/stdlib/sys/_stdint.h
diff --git a/include/stdlib/sys/_timespec.h b/include/lib/stdlib/sys/_timespec.h
similarity index 100%
rename from include/stdlib/sys/_timespec.h
rename to include/lib/stdlib/sys/_timespec.h
diff --git a/include/stdlib/sys/_types.h b/include/lib/stdlib/sys/_types.h
similarity index 100%
rename from include/stdlib/sys/_types.h
rename to include/lib/stdlib/sys/_types.h
diff --git a/include/lib/stdlib/sys/cdefs.h b/include/lib/stdlib/sys/cdefs.h
new file mode 100644
index 0000000..70c09fd
--- /dev/null
+++ b/include/lib/stdlib/sys/cdefs.h
@@ -0,0 +1,688 @@
+/*-
+ * Copyright (c) 1991, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Berkeley Software Design, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)cdefs.h	8.8 (Berkeley) 1/9/95
+ * $FreeBSD$
+ */
+
+#ifndef	_SYS_CDEFS_H_
+#define	_SYS_CDEFS_H_
+
+#if defined(__cplusplus)
+#define	__BEGIN_DECLS	extern "C" {
+#define	__END_DECLS	}
+#else
+#define	__BEGIN_DECLS
+#define	__END_DECLS
+#endif
+
+/*
+ * This code has been put in place to help reduce the addition of
+ * compiler specific defines in FreeBSD code.  It helps to aid in
+ * having a compiler-agnostic source tree.
+ */
+
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+
+#if __GNUC__ >= 3 || defined(__INTEL_COMPILER)
+#define __GNUCLIKE_ASM 3
+#define __GNUCLIKE_MATH_BUILTIN_CONSTANTS
+#else
+#define __GNUCLIKE_ASM 2
+#endif
+#define __GNUCLIKE___TYPEOF 1
+#define __GNUCLIKE___OFFSETOF 1
+#define __GNUCLIKE___SECTION 1
+
+#ifndef __INTEL_COMPILER
+# define __GNUCLIKE_CTOR_SECTION_HANDLING 1
+#endif
+
+#define __GNUCLIKE_BUILTIN_CONSTANT_P 1
+# if defined(__INTEL_COMPILER) && defined(__cplusplus) \
+    && __INTEL_COMPILER < 800
+#  undef __GNUCLIKE_BUILTIN_CONSTANT_P
+# endif
+
+#if (__GNUC_MINOR__ > 95 || __GNUC__ >= 3) && !defined(__INTEL_COMPILER)
+# define __GNUCLIKE_BUILTIN_VARARGS 1
+# define __GNUCLIKE_BUILTIN_STDARG 1
+# define __GNUCLIKE_BUILTIN_VAALIST 1
+#endif
+
+#if defined(__GNUC__)
+# define __GNUC_VA_LIST_COMPATIBILITY 1
+#endif
+
+#ifndef __INTEL_COMPILER
+# define __GNUCLIKE_BUILTIN_NEXT_ARG 1
+# define __GNUCLIKE_MATH_BUILTIN_RELOPS
+#endif
+
+#define __GNUCLIKE_BUILTIN_MEMCPY 1
+
+/* XXX: if __GNUC__ >= 2: not tested everywhere originally, where replaced */
+#define __CC_SUPPORTS_INLINE 1
+#define __CC_SUPPORTS___INLINE 1
+#define __CC_SUPPORTS___INLINE__ 1
+
+#define __CC_SUPPORTS___FUNC__ 1
+#define __CC_SUPPORTS_WARNING 1
+
+#define __CC_SUPPORTS_VARADIC_XXX 1 /* see varargs.h */
+
+#define __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 1
+
+#endif /* __GNUC__ || __INTEL_COMPILER */
+
+/*
+ * Macro to test if we're using a specific version of gcc or later.
+ */
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
+#define	__GNUC_PREREQ__(ma, mi)	\
+	(__GNUC__ > (ma) || __GNUC__ == (ma) && __GNUC_MINOR__ >= (mi))
+#else
+#define	__GNUC_PREREQ__(ma, mi)	0
+#endif
+
+/*
+ * The __CONCAT macro is used to concatenate parts of symbol names, e.g.
+ * with "#define OLD(foo) __CONCAT(old,foo)", OLD(foo) produces oldfoo.
+ * The __CONCAT macro is a bit tricky to use if it must work in non-ANSI
+ * mode -- there must be no spaces between its arguments, and for nested
+ * __CONCAT's, all the __CONCAT's must be at the left.  __CONCAT can also
+ * concatenate double-quoted strings produced by the __STRING macro, but
+ * this only works with ANSI C.
+ *
+ * __XSTRING is like __STRING, but it expands any macros in its argument
+ * first.  It is only available with ANSI C.
+ */
+#if defined(__STDC__) || defined(__cplusplus)
+#define	__P(protos)	protos		/* full-blown ANSI C */
+#define	__CONCAT1(x,y)	x ## y
+#define	__CONCAT(x,y)	__CONCAT1(x,y)
+#define	__STRING(x)	#x		/* stringify without expanding x */
+#define	__XSTRING(x)	__STRING(x)	/* expand x, then stringify */
+
+#define	__const		const		/* define reserved names to standard */
+#define	__signed	signed
+#define	__volatile	volatile
+#if defined(__cplusplus)
+#define	__inline	inline		/* convert to C++ keyword */
+#else
+#if !(defined(__CC_SUPPORTS___INLINE))
+#define	__inline			/* delete GCC keyword */
+#endif /* ! __CC_SUPPORTS___INLINE */
+#endif /* !__cplusplus */
+
+#else	/* !(__STDC__ || __cplusplus) */
+#define	__P(protos)	()		/* traditional C preprocessor */
+#define	__CONCAT(x,y)	x/**/y
+#define	__STRING(x)	"x"
+
+#if !defined(__CC_SUPPORTS___INLINE)
+#define	__const				/* delete pseudo-ANSI C keywords */
+#define	__inline
+#define	__signed
+#define	__volatile
+/*
+ * In non-ANSI C environments, new programs will want ANSI-only C keywords
+ * deleted from the program and old programs will want them left alone.
+ * When using a compiler other than gcc, programs using the ANSI C keywords
+ * const, inline etc. as normal identifiers should define -DNO_ANSI_KEYWORDS.
+ * When using "gcc -traditional", we assume that this is the intent; if
+ * __GNUC__ is defined but __STDC__ is not, we leave the new keywords alone.
+ */
+#ifndef	NO_ANSI_KEYWORDS
+#define	const				/* delete ANSI C keywords */
+#define	inline
+#define	signed
+#define	volatile
+#endif	/* !NO_ANSI_KEYWORDS */
+#endif	/* !__CC_SUPPORTS___INLINE */
+#endif	/* !(__STDC__ || __cplusplus) */
+
+/*
+ * Compiler-dependent macros to help declare dead (non-returning) and
+ * pure (no side effects) functions, and unused variables.  They are
+ * null except for versions of gcc that are known to support the features
+ * properly (old versions of gcc-2 supported the dead and pure features
+ * in a different (wrong) way).  If we do not provide an implementation
+ * for a given compiler, let the compile fail if it is told to use
+ * a feature that we cannot live without.
+ */
+#ifdef lint
+#define	__dead2
+#define	__pure2
+#define	__unused
+#define	__packed
+#define	__aligned(x)
+#define	__section(x)
+#else
+#if !__GNUC_PREREQ__(2, 5) && !defined(__INTEL_COMPILER)
+#define	__dead2
+#define	__pure2
+#define	__unused
+#endif
+#if __GNUC__ == 2 && __GNUC_MINOR__ >= 5 && __GNUC_MINOR__ < 7 && !defined(__INTEL_COMPILER)
+#define	__dead2		__attribute__((__noreturn__))
+#define	__pure2		__attribute__((__const__))
+#define	__unused
+/* XXX Find out what to do for __packed, __aligned and __section */
+#endif
+#if __GNUC_PREREQ__(2, 7)
+#define	__dead2		__attribute__((__noreturn__))
+#define	__pure2		__attribute__((__const__))
+#define	__unused	__attribute__((__unused__))
+#define	__used		__attribute__((__used__))
+#define	__packed	__attribute__((__packed__))
+#define	__aligned(x)	__attribute__((__aligned__(x)))
+#define	__section(x)	__attribute__((__section__(x)))
+#endif
+#if defined(__INTEL_COMPILER)
+#define __dead2		__attribute__((__noreturn__))
+#define __pure2		__attribute__((__const__))
+#define __unused	__attribute__((__unused__))
+#define __used		__attribute__((__used__))
+#define __packed	__attribute__((__packed__))
+#define __aligned(x)	__attribute__((__aligned__(x)))
+#define __section(x)	__attribute__((__section__(x)))
+#endif
+#endif
+
+#if !__GNUC_PREREQ__(2, 95)
+#define	__alignof(x)	__offsetof(struct { char __a; x __b; }, __b)
+#endif
+
+/*
+ * Keywords added in C11.
+ */
+#if defined(__cplusplus) && __cplusplus >= 201103L
+#define	_Alignas(e)		alignas(e)
+#define	_Alignof(e)		alignof(e)
+#define	_Noreturn		[[noreturn]]
+#define	_Static_assert(e, s)	static_assert(e, s)
+/* FIXME: change this to thread_local when clang in base supports it */
+#define	_Thread_local		__thread
+#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
+/* Do nothing.  They are language keywords. */
+#else
+/* Not supported.  Implement them using our versions. */
+#define	_Alignas(x)		__aligned(x)
+#define	_Alignof(x)		__alignof(x)
+#define	_Noreturn		__dead2
+#define	_Thread_local		__thread
+#ifdef __COUNTER__
+#define	_Static_assert(x, y)	__Static_assert(x, __COUNTER__)
+#define	__Static_assert(x, y)	___Static_assert(x, y)
+#define	___Static_assert(x, y)	typedef char __assert_ ## y[(x) ? 1 : -1]
+#else
+#define	_Static_assert(x, y)	struct __hack
+#endif
+#endif
+
+/*
+ * Emulation of C11 _Generic().  Unlike the previously defined C11
+ * keywords, it is not possible to implement this using exactly the same
+ * syntax.  Therefore implement something similar under the name
+ * __generic().  Unlike _Generic(), this macro can only distinguish
+ * between a single type, so it requires nested invocations to
+ * distinguish multiple cases.
+ */
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
+#define	__generic(expr, t, yes, no)					\
+	_Generic(expr, t: yes, default: no)
+#elif __GNUC_PREREQ__(3, 1) && !defined(__cplusplus)
+#define	__generic(expr, t, yes, no)					\
+	__builtin_choose_expr(						\
+	    __builtin_types_compatible_p(__typeof(expr), t), yes, no)
+#endif
+
+#if __GNUC_PREREQ__(2, 96)
+#define	__malloc_like	__attribute__((__malloc__))
+#define	__pure		__attribute__((__pure__))
+#else
+#define	__malloc_like
+#define	__pure
+#endif
+
+#if __GNUC_PREREQ__(3, 1) || (defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 800)
+#define	__always_inline	__attribute__((__always_inline__))
+#else
+#define	__always_inline
+#endif
+
+#if __GNUC_PREREQ__(3, 1)
+#define	__noinline	__attribute__ ((__noinline__))
+#define	__deprecated	__attribute__ ((__deprecated__))
+#else
+#define	__noinline
+#define	__deprecated
+#endif
+
+#if __GNUC_PREREQ__(3, 3)
+#define __nonnull(x)	__attribute__((__nonnull__(x)))
+#else
+#define __nonnull(x)
+#endif
+
+#if __GNUC_PREREQ__(3, 4)
+#define	__fastcall	__attribute__((__fastcall__))
+#else
+#define	__fastcall
+#endif
+
+#if __GNUC_PREREQ__(4, 1)
+#define	__returns_twice	__attribute__((__returns_twice__))
+#else
+#define	__returns_twice
+#endif
+
+/* XXX: should use `#if __STDC_VERSION__ < 199901'. */
+#if !__GNUC_PREREQ__(2, 7) && !defined(__INTEL_COMPILER)
+#define	__func__	NULL
+#endif
+
+#if (defined(__INTEL_COMPILER) || (defined(__GNUC__) && __GNUC__ >= 2)) && !defined(__STRICT_ANSI__) || __STDC_VERSION__ >= 199901
+#define	__LONG_LONG_SUPPORTED
+#endif
+
+/* C++11 exposes a load of C99 stuff */
+#if defined(__cplusplus) && __cplusplus >= 201103L
+#define	__LONG_LONG_SUPPORTED
+#ifndef	__STDC_LIMIT_MACROS
+#define	__STDC_LIMIT_MACROS
+#endif
+#ifndef	__STDC_CONSTANT_MACROS
+#define	__STDC_CONSTANT_MACROS
+#endif
+#endif
+
+/*
+ * GCC 2.95 provides `__restrict' as an extension to C90 to support the
+ * C99-specific `restrict' type qualifier.  We happen to use `__restrict' as
+ * a way to define the `restrict' type qualifier without disturbing older
+ * software that is unaware of C99 keywords.
+ */
+#if !(__GNUC__ == 2 && __GNUC_MINOR__ == 95)
+#if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901 || defined(lint)
+#define	__restrict
+#else
+#define	__restrict	restrict
+#endif
+#endif
+
+/*
+ * GNU C version 2.96 adds explicit branch prediction so that
+ * the CPU back-end can hint the processor and also so that
+ * code blocks can be reordered such that the predicted path
+ * sees a more linear flow, thus improving cache behavior, etc.
+ *
+ * The following two macros provide us with a way to utilize this
+ * compiler feature.  Use __predict_true() if you expect the expression
+ * to evaluate to true, and __predict_false() if you expect the
+ * expression to evaluate to false.
+ *
+ * A few notes about usage:
+ *
+ *	* Generally, __predict_false() error condition checks (unless
+ *	  you have some _strong_ reason to do otherwise, in which case
+ *	  document it), and/or __predict_true() `no-error' condition
+ *	  checks, assuming you want to optimize for the no-error case.
+ *
+ *	* Other than that, if you don't know the likelihood of a test
+ *	  succeeding from empirical or other `hard' evidence, don't
+ *	  make predictions.
+ *
+ *	* These are meant to be used in places that are run `a lot'.
+ *	  It is wasteful to make predictions in code that is run
+ *	  seldomly (e.g. at subsystem initialization time) as the
+ *	  basic block reordering that this affects can often generate
+ *	  larger code.
+ */
+#if __GNUC_PREREQ__(2, 96)
+#define __predict_true(exp)     __builtin_expect((exp), 1)
+#define __predict_false(exp)    __builtin_expect((exp), 0)
+#else
+#define __predict_true(exp)     (exp)
+#define __predict_false(exp)    (exp)
+#endif
+
+#if __GNUC_PREREQ__(4, 2)
+#define	__hidden	__attribute__((__visibility__("hidden")))
+#define	__exported	__attribute__((__visibility__("default")))
+#else
+#define	__hidden
+#define	__exported
+#endif
+
+/*
+ * We define this here since <stddef.h>, <sys/queue.h>, and <sys/types.h>
+ * require it.
+ */
+#if __GNUC_PREREQ__(4, 1)
+#define __offsetof(type, field)	 __builtin_offsetof(type, field)
+#else
+#ifndef __cplusplus
+#define	__offsetof(type, field) \
+	((__size_t)(__uintptr_t)((const volatile void *)&((type *)0)->field))
+#else
+#define __offsetof(type, field)					\
+  (__offsetof__ (reinterpret_cast <__size_t>			\
+                 (&reinterpret_cast <const volatile char &>	\
+                  (static_cast<type *> (0)->field))))
+#endif
+#endif
+#define	__rangeof(type, start, end) \
+	(__offsetof(type, end) - __offsetof(type, start))
+
+/*
+ * Given the pointer x to the member m of the struct s, return
+ * a pointer to the containing structure.  When using GCC, we first
+ * assign pointer x to a local variable, to check that its type is
+ * compatible with member m.
+ */
+#if __GNUC_PREREQ__(3, 1)
+#define	__containerof(x, s, m) ({					\
+	const volatile __typeof(((s *)0)->m) *__x = (x);		\
+	__DEQUALIFY(s *, (const volatile char *)__x - __offsetof(s, m));\
+})
+#else
+#define	__containerof(x, s, m)						\
+	__DEQUALIFY(s *, (const volatile char *)(x) - __offsetof(s, m))
+#endif
+
+/*
+ * Compiler-dependent macros to declare that functions take printf-like
+ * or scanf-like arguments.  They are null except for versions of gcc
+ * that are known to support the features properly (old versions of gcc-2
+ * didn't permit keeping the keywords out of the application namespace).
+ */
+#if !__GNUC_PREREQ__(2, 7) && !defined(__INTEL_COMPILER)
+#define	__printflike(fmtarg, firstvararg)
+#define	__scanflike(fmtarg, firstvararg)
+#define	__format_arg(fmtarg)
+#define	__strfmonlike(fmtarg, firstvararg)
+#define	__strftimelike(fmtarg, firstvararg)
+#else
+#define	__printflike(fmtarg, firstvararg) \
+	    __attribute__((__format__ (__printf__, fmtarg, firstvararg)))
+#define	__scanflike(fmtarg, firstvararg) \
+	    __attribute__((__format__ (__scanf__, fmtarg, firstvararg)))
+#define	__format_arg(fmtarg)	__attribute__((__format_arg__ (fmtarg)))
+#define	__strfmonlike(fmtarg, firstvararg) \
+	    __attribute__((__format__ (__strfmon__, fmtarg, firstvararg)))
+#define	__strftimelike(fmtarg, firstvararg) \
+	    __attribute__((__format__ (__strftime__, fmtarg, firstvararg)))
+#endif
+
+/* Compiler-dependent macros that rely on FreeBSD-specific extensions. */
+#if __FreeBSD_cc_version >= 300001 && defined(__GNUC__) && !defined(__INTEL_COMPILER)
+#define	__printf0like(fmtarg, firstvararg) \
+	    __attribute__((__format__ (__printf0__, fmtarg, firstvararg)))
+#else
+#define	__printf0like(fmtarg, firstvararg)
+#endif
+
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+#ifndef __INTEL_COMPILER
+#define	__strong_reference(sym,aliassym)	\
+	extern __typeof (sym) aliassym __attribute__ ((__alias__ (#sym)))
+#endif
+#ifdef __STDC__
+#define	__weak_reference(sym,alias)	\
+	__asm__(".weak " #alias);	\
+	__asm__(".equ "  #alias ", " #sym)
+#define	__warn_references(sym,msg)	\
+	__asm__(".section .gnu.warning." #sym);	\
+	__asm__(".asciz \"" msg "\"");	\
+	__asm__(".previous")
+#define	__sym_compat(sym,impl,verid)	\
+	__asm__(".symver " #impl ", " #sym "@" #verid)
+#define	__sym_default(sym,impl,verid)	\
+	__asm__(".symver " #impl ", " #sym "@@" #verid)
+#else
+#define	__weak_reference(sym,alias)	\
+	__asm__(".weak alias");		\
+	__asm__(".equ alias, sym")
+#define	__warn_references(sym,msg)	\
+	__asm__(".section .gnu.warning.sym"); \
+	__asm__(".asciz \"msg\"");	\
+	__asm__(".previous")
+#define	__sym_compat(sym,impl,verid)	\
+	__asm__(".symver impl, sym@verid")
+#define	__sym_default(impl,sym,verid)	\
+	__asm__(".symver impl, sym@@verid")
+#endif	/* __STDC__ */
+#endif	/* __GNUC__ || __INTEL_COMPILER */
+
+#define	__GLOBL1(sym)	__asm__(".globl " #sym)
+#define	__GLOBL(sym)	__GLOBL1(sym)
+
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+#define	__IDSTRING(name,string)	__asm__(".ident\t\"" string "\"")
+#else
+/*
+ * The following definition might not work well if used in header files,
+ * but it should be better than nothing.  If you want a "do nothing"
+ * version, then it should generate some harmless declaration, such as:
+ *    #define __IDSTRING(name,string)	struct __hack
+ */
+#define	__IDSTRING(name,string)	static const char name[] __unused = string
+#endif
+
+/*
+ * Embed the rcs id of a source file in the resulting library.  Note that in
+ * more recent ELF binutils, we use .ident allowing the ID to be stripped.
+ * Usage:
+ *	__FBSDID("$FreeBSD$");
+ */
+#ifndef	__FBSDID
+#if !defined(lint) && !defined(STRIP_FBSDID)
+#define	__FBSDID(s)	__IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
+#else
+#define	__FBSDID(s)	struct __hack
+#endif
+#endif
+
+#ifndef	__RCSID
+#ifndef	NO__RCSID
+#define	__RCSID(s)	__IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
+#else
+#define	__RCSID(s)	struct __hack
+#endif
+#endif
+
+#ifndef	__RCSID_SOURCE
+#ifndef	NO__RCSID_SOURCE
+#define	__RCSID_SOURCE(s)	__IDSTRING(__CONCAT(__rcsid_source_,__LINE__),s)
+#else
+#define	__RCSID_SOURCE(s)	struct __hack
+#endif
+#endif
+
+#ifndef	__SCCSID
+#ifndef	NO__SCCSID
+#define	__SCCSID(s)	__IDSTRING(__CONCAT(__sccsid_,__LINE__),s)
+#else
+#define	__SCCSID(s)	struct __hack
+#endif
+#endif
+
+#ifndef	__COPYRIGHT
+#ifndef	NO__COPYRIGHT
+#define	__COPYRIGHT(s)	__IDSTRING(__CONCAT(__copyright_,__LINE__),s)
+#else
+#define	__COPYRIGHT(s)	struct __hack
+#endif
+#endif
+
+#ifndef	__DECONST
+#define	__DECONST(type, var)	((type)(__uintptr_t)(const void *)(var))
+#endif
+
+#ifndef	__DEVOLATILE
+#define	__DEVOLATILE(type, var)	((type)(__uintptr_t)(volatile void *)(var))
+#endif
+
+#ifndef	__DEQUALIFY
+#define	__DEQUALIFY(type, var)	((type)(__uintptr_t)(const volatile void *)(var))
+#endif
+
+/*-
+ * The following definitions are an extension of the behavior originally
+ * implemented in <sys/_posix.h>, but with a different level of granularity.
+ * POSIX.1 requires that the macros we test be defined before any standard
+ * header file is included.
+ *
+ * Here's a quick run-down of the versions:
+ *  defined(_POSIX_SOURCE)		1003.1-1988
+ *  _POSIX_C_SOURCE == 1		1003.1-1990
+ *  _POSIX_C_SOURCE == 2		1003.2-1992 C Language Binding Option
+ *  _POSIX_C_SOURCE == 199309		1003.1b-1993
+ *  _POSIX_C_SOURCE == 199506		1003.1c-1995, 1003.1i-1995,
+ *					and the omnibus ISO/IEC 9945-1: 1996
+ *  _POSIX_C_SOURCE == 200112		1003.1-2001
+ *  _POSIX_C_SOURCE == 200809		1003.1-2008
+ *
+ * In addition, the X/Open Portability Guide, which is now the Single UNIX
+ * Specification, defines a feature-test macro which indicates the version of
+ * that specification, and which subsumes _POSIX_C_SOURCE.
+ *
+ * Our macros begin with two underscores to avoid namespace screwage.
+ */
+
+/* Deal with IEEE Std. 1003.1-1990, in which _POSIX_C_SOURCE == 1. */
+#if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE == 1
+#undef _POSIX_C_SOURCE		/* Probably illegal, but beyond caring now. */
+#define	_POSIX_C_SOURCE		199009
+#endif
+
+/* Deal with IEEE Std. 1003.2-1992, in which _POSIX_C_SOURCE == 2. */
+#if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE == 2
+#undef _POSIX_C_SOURCE
+#define	_POSIX_C_SOURCE		199209
+#endif
+
+/* Deal with various X/Open Portability Guides and Single UNIX Spec. */
+#ifdef _XOPEN_SOURCE
+#if _XOPEN_SOURCE - 0 >= 700
+#define	__XSI_VISIBLE		700
+#undef _POSIX_C_SOURCE
+#define	_POSIX_C_SOURCE		200809
+#elif _XOPEN_SOURCE - 0 >= 600
+#define	__XSI_VISIBLE		600
+#undef _POSIX_C_SOURCE
+#define	_POSIX_C_SOURCE		200112
+#elif _XOPEN_SOURCE - 0 >= 500
+#define	__XSI_VISIBLE		500
+#undef _POSIX_C_SOURCE
+#define	_POSIX_C_SOURCE		199506
+#endif
+#endif
+
+/*
+ * Deal with all versions of POSIX.  The ordering relative to the tests above is
+ * important.
+ */
+#if defined(_POSIX_SOURCE) && !defined(_POSIX_C_SOURCE)
+#define	_POSIX_C_SOURCE		198808
+#endif
+#ifdef _POSIX_C_SOURCE
+#if _POSIX_C_SOURCE >= 200809
+#define	__POSIX_VISIBLE		200809
+#define	__ISO_C_VISIBLE		1999
+#elif _POSIX_C_SOURCE >= 200112
+#define	__POSIX_VISIBLE		200112
+#define	__ISO_C_VISIBLE		1999
+#elif _POSIX_C_SOURCE >= 199506
+#define	__POSIX_VISIBLE		199506
+#define	__ISO_C_VISIBLE		1990
+#elif _POSIX_C_SOURCE >= 199309
+#define	__POSIX_VISIBLE		199309
+#define	__ISO_C_VISIBLE		1990
+#elif _POSIX_C_SOURCE >= 199209
+#define	__POSIX_VISIBLE		199209
+#define	__ISO_C_VISIBLE		1990
+#elif _POSIX_C_SOURCE >= 199009
+#define	__POSIX_VISIBLE		199009
+#define	__ISO_C_VISIBLE		1990
+#else
+#define	__POSIX_VISIBLE		198808
+#define	__ISO_C_VISIBLE		0
+#endif /* _POSIX_C_SOURCE */
+#else
+/*-
+ * Deal with _ANSI_SOURCE:
+ * If it is defined, and no other compilation environment is explicitly
+ * requested, then define our internal feature-test macros to zero.  This
+ * makes no difference to the preprocessor (undefined symbols in preprocessing
+ * expressions are defined to have value zero), but makes it more convenient for
+ * a test program to print out the values.
+ *
+ * If a program mistakenly defines _ANSI_SOURCE and some other macro such as
+ * _POSIX_C_SOURCE, we will assume that it wants the broader compilation
+ * environment (and in fact we will never get here).
+ */
+#if defined(_ANSI_SOURCE)	/* Hide almost everything. */
+#define	__POSIX_VISIBLE		0
+#define	__XSI_VISIBLE		0
+#define	__BSD_VISIBLE		0
+#define	__ISO_C_VISIBLE		1990
+#elif defined(_C99_SOURCE)	/* Localism to specify strict C99 env. */
+#define	__POSIX_VISIBLE		0
+#define	__XSI_VISIBLE		0
+#define	__BSD_VISIBLE		0
+#define	__ISO_C_VISIBLE		1999
+#else				/* Default environment: show everything. */
+#define	__POSIX_VISIBLE		200809
+#define	__XSI_VISIBLE		700
+#define	__BSD_VISIBLE		1
+#define	__ISO_C_VISIBLE		1999
+#endif
+#endif
+
+#ifndef	__has_feature
+#define	__has_feature(x) 0
+#endif
+#ifndef	__has_include
+#define	__has_include(x) 0
+#endif
+#ifndef	__has_builtin
+#define	__has_builtin(x) 0
+#endif
+
+#if defined(__mips) || defined(__powerpc64__) || defined(__arm__)
+#define __NO_TLS 1
+#endif
+
+#endif /* !_SYS_CDEFS_H_ */
diff --git a/include/stdlib/sys/ctype.h b/include/lib/stdlib/sys/ctype.h
similarity index 100%
rename from include/stdlib/sys/ctype.h
rename to include/lib/stdlib/sys/ctype.h
diff --git a/include/lib/stdlib/sys/endian.h b/include/lib/stdlib/sys/endian.h
new file mode 100644
index 0000000..d50110c
--- /dev/null
+++ b/include/lib/stdlib/sys/endian.h
@@ -0,0 +1,205 @@
+/*-
+ * Copyright (c) 2002 Thomas Moestl <tmm@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_ENDIAN_H_
+#define _SYS_ENDIAN_H_
+
+#include <sys/cdefs.h>
+#include <sys/_types.h>
+#include <machine/endian.h>
+
+#ifndef _UINT8_T_DECLARED
+typedef	__uint8_t	uint8_t;
+#define	_UINT8_T_DECLARED
+#endif
+ 
+#ifndef _UINT16_T_DECLARED
+typedef	__uint16_t	uint16_t;
+#define	_UINT16_T_DECLARED
+#endif
+ 
+#ifndef _UINT32_T_DECLARED
+typedef	__uint32_t	uint32_t;
+#define	_UINT32_T_DECLARED
+#endif
+ 
+#ifndef _UINT64_T_DECLARED
+typedef	__uint64_t	uint64_t;
+#define	_UINT64_T_DECLARED
+#endif
+ 
+/*
+ * General byte order swapping functions.
+ */
+#define	bswap16(x)	__bswap16(x)
+#define	bswap32(x)	__bswap32(x)
+#define	bswap64(x)	__bswap64(x)
+
+/*
+ * Host to big endian, host to little endian, big endian to host, and little
+ * endian to host byte order functions as detailed in byteorder(9).
+ */
+#if _BYTE_ORDER == _LITTLE_ENDIAN
+#define	htobe16(x)	bswap16((x))
+#define	htobe32(x)	bswap32((x))
+#define	htobe64(x)	bswap64((x))
+#define	htole16(x)	((uint16_t)(x))
+#define	htole32(x)	((uint32_t)(x))
+#define	htole64(x)	((uint64_t)(x))
+
+#define	be16toh(x)	bswap16((x))
+#define	be32toh(x)	bswap32((x))
+#define	be64toh(x)	bswap64((x))
+#define	le16toh(x)	((uint16_t)(x))
+#define	le32toh(x)	((uint32_t)(x))
+#define	le64toh(x)	((uint64_t)(x))
+#else /* _BYTE_ORDER != _LITTLE_ENDIAN */
+#define	htobe16(x)	((uint16_t)(x))
+#define	htobe32(x)	((uint32_t)(x))
+#define	htobe64(x)	((uint64_t)(x))
+#define	htole16(x)	bswap16((x))
+#define	htole32(x)	bswap32((x))
+#define	htole64(x)	bswap64((x))
+
+#define	be16toh(x)	((uint16_t)(x))
+#define	be32toh(x)	((uint32_t)(x))
+#define	be64toh(x)	((uint64_t)(x))
+#define	le16toh(x)	bswap16((x))
+#define	le32toh(x)	bswap32((x))
+#define	le64toh(x)	bswap64((x))
+#endif /* _BYTE_ORDER == _LITTLE_ENDIAN */
+
+/* Alignment-agnostic encode/decode bytestream to/from little/big endian. */
+
+static __inline uint16_t
+be16dec(const void *pp)
+{
+	uint8_t const *p = (uint8_t const *)pp;
+
+	return ((p[0] << 8) | p[1]);
+}
+
+static __inline uint32_t
+be32dec(const void *pp)
+{
+	uint8_t const *p = (uint8_t const *)pp;
+
+	return (((unsigned)p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]);
+}
+
+static __inline uint64_t
+be64dec(const void *pp)
+{
+	uint8_t const *p = (uint8_t const *)pp;
+
+	return (((uint64_t)be32dec(p) << 32) | be32dec(p + 4));
+}
+
+static __inline uint16_t
+le16dec(const void *pp)
+{
+	uint8_t const *p = (uint8_t const *)pp;
+
+	return ((p[1] << 8) | p[0]);
+}
+
+static __inline uint32_t
+le32dec(const void *pp)
+{
+	uint8_t const *p = (uint8_t const *)pp;
+
+	return (((unsigned)p[3] << 24) | (p[2] << 16) | (p[1] << 8) | p[0]);
+}
+
+static __inline uint64_t
+le64dec(const void *pp)
+{
+	uint8_t const *p = (uint8_t const *)pp;
+
+	return (((uint64_t)le32dec(p + 4) << 32) | le32dec(p));
+}
+
+static __inline void
+be16enc(void *pp, uint16_t u)
+{
+	uint8_t *p = (uint8_t *)pp;
+
+	p[0] = (u >> 8) & 0xff;
+	p[1] = u & 0xff;
+}
+
+static __inline void
+be32enc(void *pp, uint32_t u)
+{
+	uint8_t *p = (uint8_t *)pp;
+
+	p[0] = (u >> 24) & 0xff;
+	p[1] = (u >> 16) & 0xff;
+	p[2] = (u >> 8) & 0xff;
+	p[3] = u & 0xff;
+}
+
+static __inline void
+be64enc(void *pp, uint64_t u)
+{
+	uint8_t *p = (uint8_t *)pp;
+
+	be32enc(p, (uint32_t)(u >> 32));
+	be32enc(p + 4, (uint32_t)(u & 0xffffffffU));
+}
+
+static __inline void
+le16enc(void *pp, uint16_t u)
+{
+	uint8_t *p = (uint8_t *)pp;
+
+	p[0] = u & 0xff;
+	p[1] = (u >> 8) & 0xff;
+}
+
+static __inline void
+le32enc(void *pp, uint32_t u)
+{
+	uint8_t *p = (uint8_t *)pp;
+
+	p[0] = u & 0xff;
+	p[1] = (u >> 8) & 0xff;
+	p[2] = (u >> 16) & 0xff;
+	p[3] = (u >> 24) & 0xff;
+}
+
+static __inline void
+le64enc(void *pp, uint64_t u)
+{
+	uint8_t *p = (uint8_t *)pp;
+
+	le32enc(p, (uint32_t)(u & 0xffffffffU));
+	le32enc(p + 4, (uint32_t)(u >> 32));
+}
+
+#endif	/* _SYS_ENDIAN_H_ */
diff --git a/include/stdlib/sys/errno.h b/include/lib/stdlib/sys/errno.h
similarity index 100%
rename from include/stdlib/sys/errno.h
rename to include/lib/stdlib/sys/errno.h
diff --git a/include/stdlib/sys/limits.h b/include/lib/stdlib/sys/limits.h
similarity index 100%
rename from include/stdlib/sys/limits.h
rename to include/lib/stdlib/sys/limits.h
diff --git a/include/stdlib/sys/stdarg.h b/include/lib/stdlib/sys/stdarg.h
similarity index 100%
rename from include/stdlib/sys/stdarg.h
rename to include/lib/stdlib/sys/stdarg.h
diff --git a/include/stdlib/sys/stdint.h b/include/lib/stdlib/sys/stdint.h
similarity index 100%
rename from include/stdlib/sys/stdint.h
rename to include/lib/stdlib/sys/stdint.h
diff --git a/include/stdlib/sys/timespec.h b/include/lib/stdlib/sys/timespec.h
similarity index 100%
rename from include/stdlib/sys/timespec.h
rename to include/lib/stdlib/sys/timespec.h
diff --git a/include/stdlib/sys/types.h b/include/lib/stdlib/sys/types.h
similarity index 100%
rename from include/stdlib/sys/types.h
rename to include/lib/stdlib/sys/types.h
diff --git a/include/stdlib/time.h b/include/lib/stdlib/time.h
similarity index 100%
rename from include/stdlib/time.h
rename to include/lib/stdlib/time.h
diff --git a/include/stdlib/xlocale/_strings.h b/include/lib/stdlib/xlocale/_strings.h
similarity index 100%
rename from include/stdlib/xlocale/_strings.h
rename to include/lib/stdlib/xlocale/_strings.h
diff --git a/include/stdlib/xlocale/_time.h b/include/lib/stdlib/xlocale/_time.h
similarity index 100%
rename from include/stdlib/xlocale/_time.h
rename to include/lib/stdlib/xlocale/_time.h
diff --git a/include/lib/utils.h b/include/lib/utils.h
index b6bc9af..cfc8302 100644
--- a/include/lib/utils.h
+++ b/include/lib/utils.h
@@ -1,83 +1,64 @@
 /*
  * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __UTILS_H__
 #define __UTILS_H__
 
-/* Compute the number of elements in the given array */
-#define ARRAY_SIZE(a)				\
-	(sizeof(a) / sizeof((a)[0]))
-
-#define IS_POWER_OF_TWO(x)			\
-	(((x) & ((x) - 1)) == 0)
-
-#define SIZE_FROM_LOG2_WORDS(n)		(4 << (n))
-
-#define BIT(nr)				(1UL << (nr))
-
-/*
- * The round_up() macro rounds up a value to the given boundary in a
- * type-agnostic yet type-safe manner. The boundary must be a power of two.
- * In other words, it computes the smallest multiple of boundary which is
- * greater than or equal to value.
- *
- * round_down() is similar but rounds the value down instead.
- */
-#define round_boundary(value, boundary)		\
-	((__typeof__(value))((boundary) - 1))
-
-#define round_up(value, boundary)		\
-	((((value) - 1) | round_boundary(value, boundary)) + 1)
-
-#define round_down(value, boundary)		\
-	((value) & ~round_boundary(value, boundary))
-
-/*
- * Evaluates to 1 if (ptr + inc) overflows, 0 otherwise.
- * Both arguments must be unsigned pointer values (i.e. uintptr_t).
- */
-#define check_uptr_overflow(ptr, inc)		\
-	(((ptr) > UINTPTR_MAX - (inc)) ? 1 : 0)
-
-/*
- * For those constants to be shared between C and other sources, apply a 'ull'
- * suffix to the argument only in C, to avoid undefined or unintended behaviour.
- *
- * The GNU assembler and linker do not support the 'ull' suffix (it causes the
- * build process to fail) therefore the suffix is omitted when used in linker
- * scripts and assembler files.
-*/
-#if defined(__LINKER__) || defined(__ASSEMBLY__)
-# define ULL(_x)	(_x)
-#else
-# define ULL(_x)	(_x##ull)
+#if !ERROR_DEPRECATED
+#include <utils_def.h>
 #endif
 
+/*
+ * C code should be put in this part of the header to avoid breaking ASM files
+ * or linker scripts including it.
+ */
+#if !(defined(__LINKER__) || defined(__ASSEMBLY__))
+
+#include <types.h>
+
+typedef struct mem_region_t {
+	uintptr_t base;
+	size_t nbytes;
+} mem_region_t;
+
+/*
+ * zero_normalmem all the regions defined in tbl.
+ */
+void clear_mem_regions(mem_region_t *tbl, size_t nregions);
+
+
+/*
+ * checks that a region (addr + nbytes-1) of memory is totally covered by
+ * one of the regions defined in tbl. Caller must ensure that (addr+nbytes-1)
+ * doesn't overflow.
+ */
+int mem_region_in_array_chk(mem_region_t *tbl, size_t nregions,
+			    uintptr_t addr, size_t nbytes);
+
+/*
+ * Fill a region of normal memory of size "length" in bytes with zero bytes.
+ *
+ * WARNING: This function can only operate on normal memory. This means that
+ *          the MMU must be enabled when using this function. Otherwise, use
+ *          zeromem.
+ */
+void zero_normalmem(void *mem, u_register_t length);
+
+/*
+ * Fill a region of memory of size "length" in bytes with null bytes.
+ *
+ * Unlike zero_normalmem, this function has no restriction on the type of
+ * memory targeted and can be used for any device memory as well as normal
+ * memory. This function must be used instead of zero_normalmem when MMU is
+ * disabled.
+ *
+ * NOTE: When data cache and MMU are enabled, prefer zero_normalmem for faster
+ *       zeroing.
+ */
+void zeromem(void *mem, u_register_t length);
+#endif /* !(defined(__LINKER__) || defined(__ASSEMBLY__)) */
+
 #endif /* __UTILS_H__ */
diff --git a/include/lib/utils_def.h b/include/lib/utils_def.h
new file mode 100644
index 0000000..185a1c1
--- /dev/null
+++ b/include/lib/utils_def.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __UTILS_DEF_H__
+#define __UTILS_DEF_H__
+
+/* Compute the number of elements in the given array */
+#define ARRAY_SIZE(a)				\
+	(sizeof(a) / sizeof((a)[0]))
+
+#define IS_POWER_OF_TWO(x)			\
+	(((x) & ((x) - 1)) == 0)
+
+#define SIZE_FROM_LOG2_WORDS(n)		(4 << (n))
+
+#define BIT(nr)				(1ULL << (nr))
+
+/*
+ * This variant of div_round_up can be used in macro definition but should not
+ * be used in C code as the `div` parameter is evaluated twice.
+ */
+#define DIV_ROUND_UP_2EVAL(n, d)	(((n) + (d) - 1) / (d))
+
+#define MIN(x, y) __extension__ ({	\
+	__typeof__(x) _x = (x);		\
+	__typeof__(y) _y = (y);		\
+	(void)(&_x == &_y);		\
+	_x < _y ? _x : _y;		\
+})
+
+#define MAX(x, y) __extension__ ({	\
+	__typeof__(x) _x = (x);		\
+	__typeof__(y) _y = (y);		\
+	(void)(&_x == &_y);		\
+	_x > _y ? _x : _y;		\
+})
+
+/*
+ * The round_up() macro rounds up a value to the given boundary in a
+ * type-agnostic yet type-safe manner. The boundary must be a power of two.
+ * In other words, it computes the smallest multiple of boundary which is
+ * greater than or equal to value.
+ *
+ * round_down() is similar but rounds the value down instead.
+ */
+#define round_boundary(value, boundary)		\
+	((__typeof__(value))((boundary) - 1))
+
+#define round_up(value, boundary)		\
+	((((value) - 1) | round_boundary(value, boundary)) + 1)
+
+#define round_down(value, boundary)		\
+	((value) & ~round_boundary(value, boundary))
+
+#define div_round_up(val, div) __extension__ ({	\
+	__typeof__(div) _div = (div);		\
+	round_up((val), _div)/_div;		\
+})
+
+/*
+ * Evaluates to 1 if (ptr + inc) overflows, 0 otherwise.
+ * Both arguments must be unsigned pointer values (i.e. uintptr_t).
+ */
+#define check_uptr_overflow(ptr, inc)		\
+	(((ptr) > UINTPTR_MAX - (inc)) ? 1 : 0)
+
+/*
+ * For those constants to be shared between C and other sources, apply a 'u'
+ * or 'ull' suffix to the argument only in C, to avoid undefined or unintended
+ * behaviour.
+ *
+ * The GNU assembler and linker do not support the 'u' and 'ull' suffix (it
+ * causes the build process to fail) therefore the suffix is omitted when used
+ * in linker scripts and assembler files.
+*/
+#if defined(__LINKER__) || defined(__ASSEMBLY__)
+# define  U(_x)		(_x)
+# define ULL(_x)	(_x)
+#else
+# define  U(_x)		(_x##u)
+# define ULL(_x)	(_x##ull)
+#endif
+
+/*
+ * Test for the current architecture version to be at least the version
+ * expected.
+ */
+#define ARM_ARCH_AT_LEAST(_maj, _min) \
+	((ARM_ARCH_MAJOR > _maj) || \
+	 ((ARM_ARCH_MAJOR == _maj) && (ARM_ARCH_MINOR >= _min)))
+
+#endif /* __UTILS_DEF_H__ */
diff --git a/include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h b/include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h
new file mode 100644
index 0000000..a418d2d
--- /dev/null
+++ b/include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __XLAT_TABLES_AARCH32_H__
+#define __XLAT_TABLES_AARCH32_H__
+
+#include <arch.h>
+#include <utils_def.h>
+#include <xlat_tables_defs.h>
+
+#if !defined(PAGE_SIZE)
+#error "PAGE_SIZE is not defined."
+#endif
+
+/*
+ * In AArch32 state, the MMU only supports 4KB page granularity, which means
+ * that the first translation table level is either 1 or 2. Both of them are
+ * allowed to have block and table descriptors. See section G4.5.6 of the
+ * ARMv8-A Architecture Reference Manual (DDI 0487A.k) for more information.
+ *
+ * The define below specifies the first table level that allows block
+ * descriptors.
+ */
+#if PAGE_SIZE != (4 * 1024)
+#error "Invalid granule size. AArch32 supports 4KB pages only."
+#endif
+
+#define MIN_LVL_BLOCK_DESC	U(1)
+
+#define XLAT_TABLE_LEVEL_MIN	U(1)
+
+/*
+ * Define the architectural limits of the virtual address space in AArch32
+ * state.
+ *
+ * TTBCR.TxSZ is calculated as 32 minus the width of said address space. The
+ * value of TTBCR.TxSZ must be in the range 0 to 7 [1], which means that the
+ * virtual address space width must be in the range 32 to 25 bits.
+ *
+ * [1] See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information, Section G4.6.5
+ */
+#define MIN_VIRT_ADDR_SPACE_SIZE	(ULL(1) << (32 - TTBCR_TxSZ_MAX))
+#define MAX_VIRT_ADDR_SPACE_SIZE	(ULL(1) << (32 - TTBCR_TxSZ_MIN))
+
+/*
+ * Here we calculate the initial lookup level from the value of the given
+ * virtual address space size. For a 4 KB page size,
+ * - level 1 supports virtual address spaces of widths 32 to 31 bits;
+ * - level 2 from 30 to 25.
+ *
+ * Wider or narrower address spaces are not supported. As a result, level 3
+ * cannot be used as the initial lookup level with 4 KB granularity.
+ * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information, Section G4.6.5
+ *
+ * For example, for a 31-bit address space (i.e. virt_addr_space_size ==
+ * 1 << 31), TTBCR.TxSZ will be programmed to (32 - 31) = 1. According to Table
+ * G4-5 in the ARM ARM, the initial lookup level for an address space like that
+ * is 1.
+ *
+ * Note that this macro assumes that the given virtual address space size is
+ * valid. Therefore, the caller is expected to check it is the case using the
+ * CHECK_VIRT_ADDR_SPACE_SIZE() macro first.
+ */
+#define GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size)			\
+	(((virt_addr_space_size) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) ? 1 : 2)
+
+#endif /* __XLAT_TABLES_AARCH32_H__ */
diff --git a/include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h b/include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h
new file mode 100644
index 0000000..7381bc8
--- /dev/null
+++ b/include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __XLAT_TABLES_AARCH64_H__
+#define __XLAT_TABLES_AARCH64_H__
+
+#include <arch.h>
+#include <utils_def.h>
+#include <xlat_tables_defs.h>
+
+#if !defined(PAGE_SIZE)
+#error "PAGE_SIZE is not defined."
+#endif
+
+/*
+ * In AArch64 state, the MMU may support 4 KB, 16 KB and 64 KB page
+ * granularity. For 4KB granularity, a level 0 table descriptor doesn't support
+ * block translation. For 16KB, the same thing happens to levels 0 and 1. For
+ * 64KB, same for level 1. See section D4.3.1 of the ARMv8-A Architecture
+ * Reference Manual (DDI 0487A.k) for more information.
+ *
+ * The define below specifies the first table level that allows block
+ * descriptors.
+ */
+#if PAGE_SIZE == (4 * 1024)
+# define MIN_LVL_BLOCK_DESC	U(1)
+#elif PAGE_SIZE == (16 * 1024) || PAGE_SIZE == (64 * 1024)
+# define MIN_LVL_BLOCK_DESC	U(2)
+#endif
+
+#define XLAT_TABLE_LEVEL_MIN	U(0)
+
+/*
+ * Define the architectural limits of the virtual address space in AArch64
+ * state.
+ *
+ * TCR.TxSZ is calculated as 64 minus the width of said address space.
+ * The value of TCR.TxSZ must be in the range 16 to 39 [1], which means that
+ * the virtual address space width must be in the range 48 to 25 bits.
+ *
+ * [1] See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information:
+ * Page 1730: 'Input address size', 'For all translation stages'.
+ */
+#define MIN_VIRT_ADDR_SPACE_SIZE	(ULL(1) << (64 - TCR_TxSZ_MAX))
+#define MAX_VIRT_ADDR_SPACE_SIZE	(ULL(1) << (64 - TCR_TxSZ_MIN))
+
+/*
+ * Here we calculate the initial lookup level from the value of the given
+ * virtual address space size. For a 4 KB page size,
+ * - level 0 supports virtual address spaces of widths 48 to 40 bits;
+ * - level 1 from 39 to 31;
+ * - level 2 from 30 to 25.
+ *
+ * Wider or narrower address spaces are not supported. As a result, level 3
+ * cannot be used as initial lookup level with 4 KB granularity. See section
+ * D4.2.5 in the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information.
+ *
+ * For example, for a 35-bit address space (i.e. virt_addr_space_size ==
+ * 1 << 35), TCR.TxSZ will be programmed to (64 - 35) = 29. According to Table
+ * D4-11 in the ARM ARM, the initial lookup level for an address space like that
+ * is 1.
+ *
+ * Note that this macro assumes that the given virtual address space size is
+ * valid. Therefore, the caller is expected to check it is the case using the
+ * CHECK_VIRT_ADDR_SPACE_SIZE() macro first.
+ */
+#define GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size)				\
+	(((virt_addr_space_size) > (ULL(1) << L0_XLAT_ADDRESS_SHIFT))		\
+	? 0									\
+	 : (((virt_addr_space_size) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT))	\
+	 ? 1 : 2))
+
+#endif /* __XLAT_TABLES_AARCH64_H__ */
diff --git a/include/lib/xlat_tables/xlat_mmu_helpers.h b/include/lib/xlat_tables/xlat_mmu_helpers.h
new file mode 100644
index 0000000..fd3efc3
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_mmu_helpers.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __XLAT_MMU_HELPERS_H__
+#define __XLAT_MMU_HELPERS_H__
+
+#ifdef AARCH32
+/* AArch32 specific translation table API */
+void enable_mmu_secure(uint32_t flags);
+#else
+/* AArch64 specific translation table APIs */
+void enable_mmu_el1(unsigned int flags);
+void enable_mmu_el3(unsigned int flags);
+#endif /* AARCH32 */
+
+#endif /* __XLAT_MMU_HELPERS_H__ */
diff --git a/include/lib/xlat_tables/xlat_tables.h b/include/lib/xlat_tables/xlat_tables.h
new file mode 100644
index 0000000..91f2f05
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __XLAT_TABLES_H__
+#define __XLAT_TABLES_H__
+
+#include <xlat_tables_defs.h>
+
+#ifndef __ASSEMBLY__
+#include <stddef.h>
+#include <stdint.h>
+#include <xlat_mmu_helpers.h>
+
+/* Helper macro to define entries for mmap_region_t. It creates
+ * identity mappings for each region.
+ */
+#define MAP_REGION_FLAT(adr, sz, attr) MAP_REGION(adr, adr, sz, attr)
+
+/* Helper macro to define entries for mmap_region_t. It allows to
+ * re-map address mappings from 'pa' to 'va' for each region.
+ */
+#define MAP_REGION(pa, va, sz, attr) {(pa), (va), (sz), (attr)}
+
+/*
+ * Shifts and masks to access fields of an mmap_attr_t
+ */
+#define MT_TYPE_MASK	U(0x7)
+#define MT_TYPE(_attr)	((_attr) & MT_TYPE_MASK)
+/* Access permissions (RO/RW) */
+#define MT_PERM_SHIFT	U(3)
+/* Security state (SECURE/NS) */
+#define MT_SEC_SHIFT	U(4)
+/* Access permissions for instruction execution (EXECUTE/EXECUTE_NEVER) */
+#define MT_EXECUTE_SHIFT	U(5)
+
+/*
+ * Memory mapping attributes
+ */
+typedef enum  {
+	/*
+	 * Memory types supported.
+	 * These are organised so that, going down the list, the memory types
+	 * are getting weaker; conversely going up the list the memory types are
+	 * getting stronger.
+	 */
+	MT_DEVICE,
+	MT_NON_CACHEABLE,
+	MT_MEMORY,
+	/* Values up to 7 are reserved to add new memory types in the future */
+
+	MT_RO		= U(0) << MT_PERM_SHIFT,
+	MT_RW		= U(1) << MT_PERM_SHIFT,
+
+	MT_SECURE	= U(0) << MT_SEC_SHIFT,
+	MT_NS		= U(1) << MT_SEC_SHIFT,
+
+	/*
+	 * Access permissions for instruction execution are only relevant for
+	 * normal read-only memory, i.e. MT_MEMORY | MT_RO. They are ignored
+	 * (and potentially overridden) otherwise:
+	 *  - Device memory is always marked as execute-never.
+	 *  - Read-write normal memory is always marked as execute-never.
+	 */
+	MT_EXECUTE		= U(0) << MT_EXECUTE_SHIFT,
+	MT_EXECUTE_NEVER	= U(1) << MT_EXECUTE_SHIFT,
+} mmap_attr_t;
+
+#define MT_CODE		(MT_MEMORY | MT_RO | MT_EXECUTE)
+#define MT_RO_DATA	(MT_MEMORY | MT_RO | MT_EXECUTE_NEVER)
+
+/*
+ * Structure for specifying a single region of memory.
+ */
+typedef struct mmap_region {
+	unsigned long long	base_pa;
+	uintptr_t		base_va;
+	size_t			size;
+	mmap_attr_t		attr;
+} mmap_region_t;
+
+/* Generic translation table APIs */
+void init_xlat_tables(void);
+void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
+				size_t size, mmap_attr_t attr);
+void mmap_add(const mmap_region_t *mm);
+
+#endif /*__ASSEMBLY__*/
+#endif /* __XLAT_TABLES_H__ */
diff --git a/include/lib/xlat_tables/xlat_tables_arch.h b/include/lib/xlat_tables/xlat_tables_arch.h
new file mode 100644
index 0000000..165b161
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables_arch.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __XLAT_TABLES_ARCH_H__
+#define __XLAT_TABLES_ARCH_H__
+
+#ifdef AARCH32
+#include "aarch32/xlat_tables_aarch32.h"
+#else
+#include "aarch64/xlat_tables_aarch64.h"
+#endif
+
+/*
+ * Evaluates to 1 if the given virtual address space size is valid, or 0 if it's
+ * not.
+ *
+ * A valid size is one that is a power of 2 and is within the architectural
+ * limits. Not that these limits are different for AArch32 and AArch64.
+ */
+#define CHECK_VIRT_ADDR_SPACE_SIZE(size)			\
+	(((size) >= MIN_VIRT_ADDR_SPACE_SIZE) &&		\
+	((size) <= MAX_VIRT_ADDR_SPACE_SIZE) &&			\
+	IS_POWER_OF_TWO(size))
+
+/*
+ * Evaluates to 1 if the given physical address space size is a power of 2,
+ * or 0 if it's not.
+ */
+#define CHECK_PHY_ADDR_SPACE_SIZE(size)				\
+	(IS_POWER_OF_TWO(size))
+
+/*
+ * Compute the number of entries required at the initial lookup level to address
+ * the whole virtual address space.
+ */
+#define GET_NUM_BASE_LEVEL_ENTRIES(addr_space_size)			\
+	((addr_space_size) >>						\
+		XLAT_ADDR_SHIFT(GET_XLAT_TABLE_LEVEL_BASE(addr_space_size)))
+
+#endif /* __XLAT_TABLES_ARCH_H__ */
diff --git a/include/lib/xlat_tables/xlat_tables_defs.h b/include/lib/xlat_tables/xlat_tables_defs.h
new file mode 100644
index 0000000..3a7f245
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables_defs.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __XLAT_TABLES_DEFS_H__
+#define __XLAT_TABLES_DEFS_H__
+
+#include <arch.h>
+#include <utils_def.h>
+
+/* Miscellaneous MMU related constants */
+#define NUM_2MB_IN_GB		(U(1) << 9)
+#define NUM_4K_IN_2MB		(U(1) << 9)
+#define NUM_GB_IN_4GB		(U(1) << 2)
+
+#define TWO_MB_SHIFT		U(21)
+#define ONE_GB_SHIFT		U(30)
+#define FOUR_KB_SHIFT		U(12)
+
+#define ONE_GB_INDEX(x)		((x) >> ONE_GB_SHIFT)
+#define TWO_MB_INDEX(x)		((x) >> TWO_MB_SHIFT)
+#define FOUR_KB_INDEX(x)	((x) >> FOUR_KB_SHIFT)
+
+#define INVALID_DESC		U(0x0)
+/*
+ * A block descriptor points to a region of memory bigger than the granule size
+ * (e.g. a 2MB region when the granule size is 4KB).
+ */
+#define BLOCK_DESC		U(0x1) /* Table levels 0-2 */
+/* A table descriptor points to the next level of translation table. */
+#define TABLE_DESC		U(0x3) /* Table levels 0-2 */
+/*
+ * A page descriptor points to a page, i.e. a memory region whose size is the
+ * translation granule size (e.g. 4KB).
+ */
+#define PAGE_DESC		U(0x3) /* Table level 3 */
+
+#define DESC_MASK		U(0x3)
+
+#define FIRST_LEVEL_DESC_N	ONE_GB_SHIFT
+#define SECOND_LEVEL_DESC_N	TWO_MB_SHIFT
+#define THIRD_LEVEL_DESC_N	FOUR_KB_SHIFT
+
+/* XN: Translation regimes that support one VA range (EL2 and EL3). */
+#define XN			(ULL(1) << 2)
+/* UXN, PXN: Translation regimes that support two VA ranges (EL1&0). */
+#define UXN			(ULL(1) << 2)
+#define PXN			(ULL(1) << 1)
+#define CONT_HINT		(ULL(1) << 0)
+#define UPPER_ATTRS(x)		(((x) & ULL(0x7)) << 52)
+
+#define NON_GLOBAL		(U(1) << 9)
+#define ACCESS_FLAG		(U(1) << 8)
+#define NSH			(U(0x0) << 6)
+#define OSH			(U(0x2) << 6)
+#define ISH			(U(0x3) << 6)
+
+#define TABLE_ADDR_MASK		ULL(0x0000FFFFFFFFF000)
+
+/*
+ * The ARMv8-A architecture allows translation granule sizes of 4KB, 16KB or
+ * 64KB. However, TF only supports the 4KB case at the moment.
+ */
+#define PAGE_SIZE_SHIFT		FOUR_KB_SHIFT
+#define PAGE_SIZE		(U(1) << PAGE_SIZE_SHIFT)
+#define PAGE_SIZE_MASK		(PAGE_SIZE - 1)
+#define IS_PAGE_ALIGNED(addr)	(((addr) & PAGE_SIZE_MASK) == 0)
+
+#define XLAT_ENTRY_SIZE_SHIFT	U(3) /* Each MMU table entry is 8 bytes (1 << 3) */
+#define XLAT_ENTRY_SIZE		(U(1) << XLAT_ENTRY_SIZE_SHIFT)
+
+#define XLAT_TABLE_SIZE_SHIFT	PAGE_SIZE_SHIFT /* Size of one complete table */
+#define XLAT_TABLE_SIZE		(U(1) << XLAT_TABLE_SIZE_SHIFT)
+
+#define XLAT_TABLE_LEVEL_MAX	U(3)
+
+/* Values for number of entries in each MMU translation table */
+#define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
+#define XLAT_TABLE_ENTRIES	(U(1) << XLAT_TABLE_ENTRIES_SHIFT)
+#define XLAT_TABLE_ENTRIES_MASK	(XLAT_TABLE_ENTRIES - 1)
+
+/* Values to convert a memory address to an index into a translation table */
+#define L3_XLAT_ADDRESS_SHIFT	PAGE_SIZE_SHIFT
+#define L2_XLAT_ADDRESS_SHIFT	(L3_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
+#define L1_XLAT_ADDRESS_SHIFT	(L2_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
+#define L0_XLAT_ADDRESS_SHIFT	(L1_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
+#define XLAT_ADDR_SHIFT(level)	(PAGE_SIZE_SHIFT + \
+		  ((XLAT_TABLE_LEVEL_MAX - (level)) * XLAT_TABLE_ENTRIES_SHIFT))
+
+#define XLAT_BLOCK_SIZE(level)	((u_register_t)1 << XLAT_ADDR_SHIFT(level))
+/* Mask to get the bits used to index inside a block of a certain level */
+#define XLAT_BLOCK_MASK(level)	(XLAT_BLOCK_SIZE(level) - 1)
+/* Mask to get the address bits common to a block of a certain table level*/
+#define XLAT_ADDR_MASK(level)	(~XLAT_BLOCK_MASK(level))
+/*
+ * Extract from the given virtual address the index into the given lookup level.
+ * This macro assumes the system is using the 4KB translation granule.
+ */
+#define XLAT_TABLE_IDX(virtual_addr, level)	\
+	(((virtual_addr) >> XLAT_ADDR_SHIFT(level)) & ULL(0x1FF))
+
+/*
+ * The ARMv8 translation table descriptor format defines AP[2:1] as the Access
+ * Permissions bits, and does not define an AP[0] bit.
+ *
+ * AP[1] is valid only for a stage 1 translation that supports two VA ranges
+ * (i.e. in the ARMv8A.0 architecture, that is the S-EL1&0 regime).
+ *
+ * AP[1] is RES0 for stage 1 translations that support only one VA range
+ * (e.g. EL3).
+ */
+#define AP2_SHIFT			U(0x7)
+#define AP2_RO				U(0x1)
+#define AP2_RW				U(0x0)
+
+#define AP1_SHIFT			U(0x6)
+#define AP1_ACCESS_UNPRIVILEGED		U(0x1)
+#define AP1_NO_ACCESS_UNPRIVILEGED	U(0x0)
+
+/*
+ * The following definitions must all be passed to the LOWER_ATTRS() macro to
+ * get the right bitmask.
+ */
+#define AP_RO				(AP2_RO << 5)
+#define AP_RW				(AP2_RW << 5)
+#define AP_ACCESS_UNPRIVILEGED		(AP1_ACCESS_UNPRIVILEGED    << 4)
+#define AP_NO_ACCESS_UNPRIVILEGED	(AP1_NO_ACCESS_UNPRIVILEGED << 4)
+#define NS				(U(0x1) << 3)
+#define ATTR_NON_CACHEABLE_INDEX	U(0x2)
+#define ATTR_DEVICE_INDEX		U(0x1)
+#define ATTR_IWBWA_OWBWA_NTR_INDEX	U(0x0)
+#define LOWER_ATTRS(x)			(((x) & U(0xfff)) << 2)
+
+/* Normal Memory, Outer Write-Through non-transient, Inner Non-cacheable */
+#define ATTR_NON_CACHEABLE		MAKE_MAIR_NORMAL_MEMORY(MAIR_NORM_NC, MAIR_NORM_NC)
+/* Device-nGnRE */
+#define ATTR_DEVICE			MAIR_DEV_nGnRE
+/* Normal Memory, Outer Write-Back non-transient, Inner Write-Back non-transient */
+#define ATTR_IWBWA_OWBWA_NTR		MAKE_MAIR_NORMAL_MEMORY(MAIR_NORM_WB_NTR_RWA, MAIR_NORM_WB_NTR_RWA)
+#define MAIR_ATTR_SET(attr, index)	((attr) << ((index) << 3))
+#define ATTR_INDEX_MASK			U(0x3)
+#define ATTR_INDEX_GET(attr)		(((attr) >> 2) & ATTR_INDEX_MASK)
+
+/*
+ * Shift values for the attributes fields in a block or page descriptor.
+ * See section D4.3.3 in the ARMv8-A ARM (issue B.a).
+ */
+
+/* Memory attributes index field, AttrIndx[2:0]. */
+#define ATTR_INDEX_SHIFT		2
+/* Non-secure bit, NS. */
+#define NS_SHIFT			5
+/* Shareability field, SH[1:0] */
+#define SHAREABILITY_SHIFT		8
+/* The Access Flag, AF. */
+#define ACCESS_FLAG_SHIFT		10
+/* The not global bit, nG. */
+#define NOT_GLOBAL_SHIFT		11
+/* Contiguous hint bit. */
+#define CONT_HINT_SHIFT			52
+/* Execute-never bits, XN. */
+#define PXN_SHIFT			53
+#define XN_SHIFT			54
+#define UXN_SHIFT			XN_SHIFT
+
+/*
+ * Flags to override default values used to program system registers while
+ * enabling the MMU.
+ */
+#define DISABLE_DCACHE			(U(1) << 0)
+
+/*
+ * This flag marks the translation tables are Non-cacheable for MMU accesses.
+ * If the flag is not specified, by default the tables are cacheable.
+ */
+#define XLAT_TABLE_NC			(U(1) << 1)
+
+#endif /* __XLAT_TABLES_DEFS_H__ */
diff --git a/include/lib/xlat_tables/xlat_tables_v2.h b/include/lib/xlat_tables/xlat_tables_v2.h
new file mode 100644
index 0000000..73a9c53
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables_v2.h
@@ -0,0 +1,316 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __XLAT_TABLES_V2_H__
+#define __XLAT_TABLES_V2_H__
+
+#include <xlat_tables_defs.h>
+
+#ifndef __ASSEMBLY__
+#include <stddef.h>
+#include <stdint.h>
+#include <xlat_mmu_helpers.h>
+#include <xlat_tables_v2_helpers.h>
+
+/*
+ * Default granularity size for an mmap_region_t.
+ * Useful when no specific granularity is required.
+ *
+ * By default, choose the biggest possible block size allowed by the
+ * architectural state and granule size in order to minimize the number of page
+ * tables required for the mapping.
+ */
+#define REGION_DEFAULT_GRANULARITY	XLAT_BLOCK_SIZE(MIN_LVL_BLOCK_DESC)
+
+/* Helper macro to define an mmap_region_t. */
+#define MAP_REGION(_pa, _va, _sz, _attr)	\
+	_MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, REGION_DEFAULT_GRANULARITY)
+
+/* Helper macro to define an mmap_region_t with an identity mapping. */
+#define MAP_REGION_FLAT(_adr, _sz, _attr)			\
+	MAP_REGION(_adr, _adr, _sz, _attr)
+
+/*
+ * Helper macro to define an mmap_region_t to map with the desired granularity
+ * of translation tables.
+ *
+ * The granularity value passed to this macro must be a valid block or page
+ * size. When using a 4KB translation granule, this might be 4KB, 2MB or 1GB.
+ * Passing REGION_DEFAULT_GRANULARITY is also allowed and means that the library
+ * is free to choose the granularity for this region. In this case, it is
+ * equivalent to the MAP_REGION() macro.
+ */
+#define MAP_REGION2(_pa, _va, _sz, _attr, _gr)			\
+	_MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr)
+
+/*
+ * Shifts and masks to access fields of an mmap_attr_t
+ */
+#define MT_TYPE_MASK		U(0x7)
+#define MT_TYPE(_attr)		((_attr) & MT_TYPE_MASK)
+/* Access permissions (RO/RW) */
+#define MT_PERM_SHIFT		U(3)
+/* Security state (SECURE/NS) */
+#define MT_SEC_SHIFT		U(4)
+/* Access permissions for instruction execution (EXECUTE/EXECUTE_NEVER) */
+#define MT_EXECUTE_SHIFT	U(5)
+/*
+ * In the EL1&0 translation regime, mark the region as User (EL0) or
+ * Privileged (EL1). In the EL3 translation regime this has no effect.
+ */
+#define MT_USER_SHIFT		U(6)
+/* All other bits are reserved */
+
+/*
+ * Memory mapping attributes
+ */
+typedef enum  {
+	/*
+	 * Memory types supported.
+	 * These are organised so that, going down the list, the memory types
+	 * are getting weaker; conversely going up the list the memory types are
+	 * getting stronger.
+	 */
+	MT_DEVICE,
+	MT_NON_CACHEABLE,
+	MT_MEMORY,
+	/* Values up to 7 are reserved to add new memory types in the future */
+
+	MT_RO		= U(0) << MT_PERM_SHIFT,
+	MT_RW		= U(1) << MT_PERM_SHIFT,
+
+	MT_SECURE	= U(0) << MT_SEC_SHIFT,
+	MT_NS		= U(1) << MT_SEC_SHIFT,
+
+	/*
+	 * Access permissions for instruction execution are only relevant for
+	 * normal read-only memory, i.e. MT_MEMORY | MT_RO. They are ignored
+	 * (and potentially overridden) otherwise:
+	 *  - Device memory is always marked as execute-never.
+	 *  - Read-write normal memory is always marked as execute-never.
+	 */
+	MT_EXECUTE		= U(0) << MT_EXECUTE_SHIFT,
+	MT_EXECUTE_NEVER	= U(1) << MT_EXECUTE_SHIFT,
+
+	/*
+	 * When mapping a region at EL0 or EL1, this attribute will be used to
+	 * determine if a User mapping (EL0) will be created or a Privileged
+	 * mapping (EL1).
+	 */
+	MT_USER				= U(1) << MT_USER_SHIFT,
+	MT_PRIVILEGED			= U(0) << MT_USER_SHIFT,
+} mmap_attr_t;
+
+/* Compound attributes for most common usages */
+#define MT_CODE		(MT_MEMORY | MT_RO | MT_EXECUTE)
+#define MT_RO_DATA	(MT_MEMORY | MT_RO | MT_EXECUTE_NEVER)
+#define MT_RW_DATA	(MT_MEMORY | MT_RW | MT_EXECUTE_NEVER)
+
+/*
+ * Structure for specifying a single region of memory.
+ */
+typedef struct mmap_region {
+	unsigned long long	base_pa;
+	uintptr_t		base_va;
+	size_t			size;
+	mmap_attr_t		attr;
+	/* Desired granularity. See the MAP_REGION2() macro for more details. */
+	size_t			granularity;
+} mmap_region_t;
+
+/*
+ * Translation regimes supported by this library.
+ */
+typedef enum xlat_regime {
+	EL1_EL0_REGIME,
+	EL3_REGIME,
+} xlat_regime_t;
+
+/*
+ * Declare the translation context type.
+ * Its definition is private.
+ */
+typedef struct xlat_ctx xlat_ctx_t;
+
+/*
+ * Statically allocate a translation context and associated structures. Also
+ * initialize them.
+ *
+ * _ctx_name:
+ *   Prefix for the translation context variable.
+ *   E.g. If _ctx_name is 'foo', the variable will be called 'foo_xlat_ctx'.
+ *   Useful to distinguish multiple contexts from one another.
+ *
+ * _mmap_count:
+ *   Number of mmap_region_t to allocate.
+ *   Would typically be MAX_MMAP_REGIONS for the translation context describing
+ *   the BL image currently executing.
+ *
+ * _xlat_tables_count:
+ *   Number of sub-translation tables to allocate.
+ *   Would typically be MAX_XLAT_TABLES for the translation context describing
+ *   the BL image currently executing.
+ *   Note that this is only for sub-tables ; at the initial lookup level, there
+ *   is always a single table.
+ *
+ * _virt_addr_space_size, _phy_addr_space_size:
+ *   Size (in bytes) of the virtual (resp. physical) address space.
+ *   Would typically be PLAT_VIRT_ADDR_SPACE_SIZE
+ *   (resp. PLAT_PHY_ADDR_SPACE_SIZE) for the translation context describing the
+ *   BL image currently executing.
+ */
+#define REGISTER_XLAT_CONTEXT(_ctx_name, _mmap_count, _xlat_tables_count,	\
+			_virt_addr_space_size, _phy_addr_space_size)		\
+	_REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, _mmap_count,	\
+					 _xlat_tables_count,		\
+					 _virt_addr_space_size,		\
+					 _phy_addr_space_size,		\
+					 IMAGE_XLAT_DEFAULT_REGIME)
+
+/*
+ * Same as REGISTER_XLAT_CONTEXT plus the additional parameter _xlat_regime to
+ * specify the translation regime managed by this xlat_ctx_t instance. The
+ * values are the one from xlat_regime_t enumeration.
+ */
+#define REGISTER_XLAT_CONTEXT2(_ctx_name, _mmap_count, _xlat_tables_count,	\
+			_virt_addr_space_size, _phy_addr_space_size,		\
+			_xlat_regime)					\
+	_REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, _mmap_count,	\
+					 _xlat_tables_count,		\
+					 _virt_addr_space_size,		\
+					 _phy_addr_space_size,		\
+					 _xlat_regime)
+
+/******************************************************************************
+ * Generic translation table APIs.
+ * Each API comes in 2 variants:
+ * - one that acts on the current translation context for this BL image
+ * - another that acts on the given translation context instead. This variant
+ *   is named after the 1st version, with an additional '_ctx' suffix.
+ *****************************************************************************/
+
+/*
+ * Initialize translation tables from the current list of mmap regions. Calling
+ * this function marks the transition point after which static regions can no
+ * longer be added.
+ */
+void init_xlat_tables(void);
+void init_xlat_tables_ctx(xlat_ctx_t *ctx);
+
+/*
+ * Add a static region with defined base PA and base VA. This function can only
+ * be used before initializing the translation tables. The region cannot be
+ * removed afterwards.
+ */
+void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
+				size_t size, mmap_attr_t attr);
+void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm);
+
+/*
+ * Add an array of static regions with defined base PA and base VA. This
+ * function can only be used before initializing the translation tables. The
+ * regions cannot be removed afterwards.
+ */
+void mmap_add(const mmap_region_t *mm);
+void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm);
+
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+/*
+ * Add a dynamic region with defined base PA and base VA. This type of region
+ * can be added and removed even after the translation tables are initialized.
+ *
+ * Returns:
+ *        0: Success.
+ *   EINVAL: Invalid values were used as arguments.
+ *   ERANGE: Memory limits were surpassed.
+ *   ENOMEM: Not enough space in the mmap array or not enough free xlat tables.
+ *    EPERM: It overlaps another region in an invalid way.
+ */
+int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
+				size_t size, mmap_attr_t attr);
+int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm);
+
+/*
+ * Remove a region with the specified base VA and size. Only dynamic regions can
+ * be removed, and they can be removed even if the translation tables are
+ * initialized.
+ *
+ * Returns:
+ *        0: Success.
+ *   EINVAL: The specified region wasn't found.
+ *    EPERM: Trying to remove a static region.
+ */
+int mmap_remove_dynamic_region(uintptr_t base_va, size_t size);
+int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx,
+				uintptr_t base_va,
+				size_t size);
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/*
+ * Change the memory attributes of the memory region starting from a given
+ * virtual address in a set of translation tables.
+ *
+ * This function can only be used after the translation tables have been
+ * initialized.
+ *
+ * The base address of the memory region must be aligned on a page boundary.
+ * The size of this memory region must be a multiple of a page size.
+ * The memory region must be already mapped by the given translation tables
+ * and it must be mapped at the granularity of a page.
+ *
+ * Return 0 on success, a negative value on error.
+ *
+ * In case of error, the memory attributes remain unchanged and this function
+ * has no effect.
+ *
+ * ctx
+ *   Translation context to work on.
+ * base_va:
+ *   Virtual address of the 1st page to change the attributes of.
+ * size:
+ *   Size in bytes of the memory region.
+ * attr:
+ *   New attributes of the page tables. The attributes that can be changed are
+ *   data access (MT_RO/MT_RW), instruction access (MT_EXECUTE_NEVER/MT_EXECUTE)
+ *   and user/privileged access (MT_USER/MT_PRIVILEGED) in the case of contexts
+ *   that are used in the EL1&0 translation regime. Also, note that this
+ *   function doesn't allow to remap a region as RW and executable, or to remap
+ *   device memory as executable.
+ *
+ * NOTE: The caller of this function must be able to write to the translation
+ * tables, i.e. the memory where they are stored must be mapped with read-write
+ * access permissions. This function assumes it is the case. If this is not
+ * the case then this function might trigger a data abort exception.
+ *
+ * NOTE2: The caller is responsible for making sure that the targeted
+ * translation tables are not modified by any other code while this function is
+ * executing.
+ */
+int change_mem_attributes(xlat_ctx_t *ctx, uintptr_t base_va, size_t size,
+			mmap_attr_t attr);
+
+/*
+ * Query the memory attributes of a memory page in a set of translation tables.
+ *
+ * Return 0 on success, a negative error code on error.
+ * On success, the attributes are stored into *attributes.
+ *
+ * ctx
+ *   Translation context to work on.
+ * base_va
+ *   Virtual address of the page to get the attributes of.
+ *   There are no alignment restrictions on this address. The attributes of the
+ *   memory page it lies within are returned.
+ * attributes
+ *   Output parameter where to store the attributes of the targeted memory page.
+ */
+int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
+		mmap_attr_t *attributes);
+
+#endif /*__ASSEMBLY__*/
+#endif /* __XLAT_TABLES_V2_H__ */
diff --git a/include/lib/xlat_tables/xlat_tables_v2_helpers.h b/include/lib/xlat_tables/xlat_tables_v2_helpers.h
new file mode 100644
index 0000000..96dee1a
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables_v2_helpers.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This header file contains internal definitions that are not supposed to be
+ * used outside of this library code.
+ */
+
+#ifndef __XLAT_TABLES_V2_HELPERS_H__
+#define __XLAT_TABLES_V2_HELPERS_H__
+
+#ifndef __XLAT_TABLES_V2_H__
+#error "Do not include this header file directly. Include xlat_tables_v2.h instead."
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <cassert.h>
+#include <platform_def.h>
+#include <stddef.h>
+#include <xlat_tables_arch.h>
+#include <xlat_tables_defs.h>
+
+/* Forward declaration */
+struct mmap_region;
+
+/*
+ * Helper macro to define an mmap_region_t.  This macro allows to specify all
+ * the fields of the structure but its parameter list is not guaranteed to
+ * remain stable as we add members to mmap_region_t.
+ */
+#define _MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr)	\
+	{							\
+		.base_pa = (_pa),				\
+		.base_va = (_va),				\
+		.size = (_sz),					\
+		.attr = (_attr),				\
+		.granularity = (_gr),				\
+	}
+
+/* Struct that holds all information about the translation tables. */
+struct xlat_ctx {
+	/*
+	 * Max allowed Virtual and Physical Addresses.
+	 */
+	unsigned long long pa_max_address;
+	uintptr_t va_max_address;
+
+	/*
+	 * Array of all memory regions stored in order of ascending end address
+	 * and ascending size to simplify the code that allows overlapping
+	 * regions. The list is terminated by the first entry with size == 0.
+	 * The max size of the list is stored in `mmap_num`. `mmap` points to an
+	 * array of mmap_num + 1 elements, so that there is space for the final
+	 * null entry.
+	 */
+	struct mmap_region *mmap;
+	unsigned int mmap_num;
+
+	/*
+	 * Array of finer-grain translation tables.
+	 * For example, if the initial lookup level is 1 then this array would
+	 * contain both level-2 and level-3 entries.
+	 */
+	uint64_t (*tables)[XLAT_TABLE_ENTRIES];
+	unsigned int tables_num;
+	/*
+	 * Keep track of how many regions are mapped in each table. The base
+	 * table can't be unmapped so it isn't needed to keep track of it.
+	 */
+#if PLAT_XLAT_TABLES_DYNAMIC
+	int *tables_mapped_regions;
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+	unsigned int next_table;
+
+	/*
+	 * Base translation table. It doesn't need to have the same amount of
+	 * entries as the ones used for other levels.
+	 */
+	uint64_t *base_table;
+	unsigned int base_table_entries;
+
+	/*
+	* Max Physical and Virtual addresses currently in use by the
+	* translation tables. These might get updated as we map/unmap memory
+	* regions but they will never go beyond pa/va_max_address.
+	*/
+	unsigned long long max_pa;
+	uintptr_t max_va;
+
+	/* Level of the base translation table. */
+	unsigned int base_level;
+
+	/* Set to 1 when the translation tables are initialized. */
+	unsigned int initialized;
+
+	/*
+	 * Translation regime managed by this xlat_ctx_t. It takes the values of
+	 * the enumeration xlat_regime_t. The type is "int" to avoid a circular
+	 * dependency on xlat_tables_v2.h, but this member must be treated as
+	 * xlat_regime_t.
+	 */
+	int xlat_regime;
+};
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+#define _ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count)		\
+	static int _ctx_name##_mapped_regions[_xlat_tables_count];
+
+#define _REGISTER_DYNMAP_STRUCT(_ctx_name)				\
+	.tables_mapped_regions = _ctx_name##_mapped_regions,
+#else
+#define _ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count)		\
+	/* do nothing */
+
+#define _REGISTER_DYNMAP_STRUCT(_ctx_name)				\
+	/* do nothing */
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+#define _REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, _mmap_count, _xlat_tables_count,	\
+			_virt_addr_space_size, _phy_addr_space_size,		\
+			_xlat_regime)					\
+	CASSERT(CHECK_VIRT_ADDR_SPACE_SIZE(_virt_addr_space_size),		\
+		assert_invalid_virtual_addr_space_size_for_##_ctx_name);	\
+										\
+	CASSERT(CHECK_PHY_ADDR_SPACE_SIZE(_phy_addr_space_size),		\
+		assert_invalid_physical_addr_space_sizefor_##_ctx_name);	\
+										\
+	static mmap_region_t _ctx_name##_mmap[_mmap_count + 1];			\
+										\
+	static uint64_t _ctx_name##_xlat_tables[_xlat_tables_count]		\
+		[XLAT_TABLE_ENTRIES]						\
+		__aligned(XLAT_TABLE_SIZE) __section("xlat_table");		\
+										\
+	static uint64_t _ctx_name##_base_xlat_table				\
+		[GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size)]		\
+		__aligned(GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size)	\
+			* sizeof(uint64_t));					\
+										\
+	_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count)			\
+										\
+	static xlat_ctx_t _ctx_name##_xlat_ctx = {				\
+		.va_max_address = (_virt_addr_space_size) - 1,			\
+		.pa_max_address = (_phy_addr_space_size) - 1,			\
+		.mmap = _ctx_name##_mmap,					\
+		.mmap_num = _mmap_count,					\
+		.base_level = GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_size),	\
+		.base_table = _ctx_name##_base_xlat_table,			\
+		.base_table_entries =						\
+			GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size),	\
+		.tables = _ctx_name##_xlat_tables,				\
+		.tables_num = _xlat_tables_count,				\
+		 _REGISTER_DYNMAP_STRUCT(_ctx_name)				\
+		.xlat_regime = (_xlat_regime),					\
+		.max_pa = 0,							\
+		.max_va = 0,							\
+		.next_table = 0,						\
+		.initialized = 0,						\
+	}
+
+#if AARCH64
+
+/*
+ * This IMAGE_EL macro must not to be used outside the library, and it is only
+ * used in AArch64.
+ */
+#if defined(IMAGE_BL1) || defined(IMAGE_BL31)
+# define IMAGE_EL	3
+# define IMAGE_XLAT_DEFAULT_REGIME EL3_REGIME
+#else
+# define IMAGE_EL	1
+# define IMAGE_XLAT_DEFAULT_REGIME EL1_EL0_REGIME
+#endif
+
+#else /* if AARCH32 */
+
+/*
+ * The PL1&0 translation regime in AArch32 behaves like the EL1&0 regime in
+ * AArch64 except for the XN bits, but we set and unset them at the same time,
+ * so there's no difference in practice.
+ */
+#define IMAGE_XLAT_DEFAULT_REGIME EL1_EL0_REGIME
+
+#endif /* AARCH64 */
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* __XLAT_TABLES_V2_HELPERS_H__ */
diff --git a/include/plat/arm/board/common/board_arm_def.h b/include/plat/arm/board/common/board_arm_def.h
new file mode 100644
index 0000000..7a4594c
--- /dev/null
+++ b/include/plat/arm/board/common/board_arm_def.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __BOARD_ARM_DEF_H__
+#define __BOARD_ARM_DEF_H__
+
+#include <v2m_def.h>
+
+
+/*
+ * Required platform porting definitions common to all ARM
+ * development platforms
+ */
+
+/* Size of cacheable stacks */
+#if defined(IMAGE_BL1)
+#if TRUSTED_BOARD_BOOT
+# define PLATFORM_STACK_SIZE 0x1000
+#else
+# define PLATFORM_STACK_SIZE 0x440
+#endif
+#elif defined(IMAGE_BL2)
+# if TRUSTED_BOARD_BOOT
+#  define PLATFORM_STACK_SIZE 0x1000
+# else
+#  define PLATFORM_STACK_SIZE 0x400
+# endif
+#elif defined(IMAGE_BL2U)
+# define PLATFORM_STACK_SIZE 0x200
+#elif defined(IMAGE_BL31)
+# define PLATFORM_STACK_SIZE 0x400
+#elif defined(IMAGE_BL32)
+# define PLATFORM_STACK_SIZE 0x440
+#endif
+
+/*
+ * The constants below are not optimised for memory usage. Platforms that wish
+ * to optimise these constants should set `ARM_BOARD_OPTIMISE_MEM` to 1 and
+ * provide there own values.
+ */
+#if !ARM_BOARD_OPTIMISE_MEM
+/*
+ * PLAT_ARM_MMAP_ENTRIES depends on the number of entries in the
+ * plat_arm_mmap array defined for each BL stage.
+ *
+ * Provide relatively optimised values for the runtime images (BL31 and BL32).
+ * Optimisation is less important for the other, transient boot images so a
+ * common, maximum value is used across these images.
+ *
+ * They are also used for the dynamically mapped regions in the images that
+ * enable dynamic memory mapping.
+ */
+#if defined(IMAGE_BL31) || defined(IMAGE_BL32)
+# define PLAT_ARM_MMAP_ENTRIES		7
+# define MAX_XLAT_TABLES		5
+#else
+# define PLAT_ARM_MMAP_ENTRIES		11
+# define MAX_XLAT_TABLES		5
+#endif
+
+/*
+ * PLAT_ARM_MAX_BL1_RW_SIZE is calculated using the current BL1 RW debug size
+ * plus a little space for growth.
+ */
+#define PLAT_ARM_MAX_BL1_RW_SIZE	0xB000
+
+/*
+ * PLAT_ARM_MAX_BL2_SIZE is calculated using the current BL2 debug size plus a
+ * little space for growth.
+ */
+#if TRUSTED_BOARD_BOOT
+# define PLAT_ARM_MAX_BL2_SIZE		0x1E000
+#else
+# define PLAT_ARM_MAX_BL2_SIZE		0xF000
+#endif
+
+/*
+ * PLAT_ARM_MAX_BL31_SIZE is calculated using the current BL31 debug size plus a
+ * little space for growth.
+ */
+#define PLAT_ARM_MAX_BL31_SIZE		0x1D000
+
+#endif /* ARM_BOARD_OPTIMISE_MEM */
+
+#define MAX_IO_DEVICES			3
+#define MAX_IO_HANDLES			4
+
+#define PLAT_ARM_TRUSTED_SRAM_SIZE	0x00040000	/* 256 KB */
+
+/* Reserve the last block of flash for PSCI MEM PROTECT flag */
+#define PLAT_ARM_FIP_BASE		V2M_FLASH0_BASE
+#define PLAT_ARM_FIP_MAX_SIZE		(V2M_FLASH0_SIZE - V2M_FLASH_BLOCK_SIZE)
+
+#define PLAT_ARM_NVM_BASE		V2M_FLASH0_BASE
+#define PLAT_ARM_NVM_SIZE		(V2M_FLASH0_SIZE - V2M_FLASH_BLOCK_SIZE)
+
+/* PSCI memory protect definitions:
+ * This variable is stored in a non-secure flash because some ARM reference
+ * platforms do not have secure NVRAM. Real systems that provided MEM_PROTECT
+ * support must use a secure NVRAM to store the PSCI MEM_PROTECT definitions.
+ */
+#define PLAT_ARM_MEM_PROT_ADDR		(V2M_FLASH0_BASE + \
+					 V2M_FLASH0_SIZE - V2M_FLASH_BLOCK_SIZE)
+
+/*
+ * Map mem_protect flash region with read and write permissions
+ */
+#define ARM_V2M_MAP_MEM_PROTECT		MAP_REGION_FLAT(PLAT_ARM_MEM_PROT_ADDR,	\
+						V2M_FLASH_BLOCK_SIZE,		\
+						MT_DEVICE | MT_RW | MT_SECURE)
+
+#endif /* __BOARD_ARM_DEF_H__ */
diff --git a/include/plat/arm/board/common/board_css_def.h b/include/plat/arm/board/common/board_css_def.h
new file mode 100644
index 0000000..b0a6baf
--- /dev/null
+++ b/include/plat/arm/board/common/board_css_def.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __BOARD_CSS_DEF_H__
+#define __BOARD_CSS_DEF_H__
+
+#include <common_def.h>
+#include <soc_css_def.h>
+#include <utils_def.h>
+#include <v2m_def.h>
+
+/*
+ * Definitions common to all ARM CSS-based development platforms
+ */
+
+/* Platform ID address */
+#define BOARD_CSS_PLAT_ID_REG_ADDR		0x7ffe00e0
+
+/* Platform ID related accessors */
+#define BOARD_CSS_PLAT_ID_REG_ID_MASK		0x0f
+#define BOARD_CSS_PLAT_ID_REG_ID_SHIFT		0x0
+#define BOARD_CSS_PLAT_ID_REG_VERSION_MASK	0xf00
+#define BOARD_CSS_PLAT_ID_REG_VERSION_SHIFT	0x8
+#define BOARD_CSS_PLAT_TYPE_RTL			0x00
+#define BOARD_CSS_PLAT_TYPE_FPGA		0x01
+#define BOARD_CSS_PLAT_TYPE_EMULATOR		0x02
+#define BOARD_CSS_PLAT_TYPE_FVP			0x03
+
+#ifndef __ASSEMBLY__
+
+#include <mmio.h>
+
+#define BOARD_CSS_GET_PLAT_TYPE(addr)					\
+	((mmio_read_32(addr) & BOARD_CSS_PLAT_ID_REG_ID_MASK)		\
+	>> BOARD_CSS_PLAT_ID_REG_ID_SHIFT)
+
+#endif /* __ASSEMBLY__ */
+
+
+/*
+ * Required platform porting definitions common to all ARM CSS-based
+ * development platforms
+ */
+
+#define PLAT_ARM_DRAM2_SIZE			ULL(0x180000000)
+
+/* UART related constants */
+#define PLAT_ARM_BOOT_UART_BASE			SOC_CSS_UART0_BASE
+#define PLAT_ARM_BOOT_UART_CLK_IN_HZ		SOC_CSS_UART0_CLK_IN_HZ
+
+#define PLAT_ARM_BL31_RUN_UART_BASE		SOC_CSS_UART1_BASE
+#define PLAT_ARM_BL31_RUN_UART_CLK_IN_HZ	SOC_CSS_UART1_CLK_IN_HZ
+
+#define PLAT_ARM_SP_MIN_RUN_UART_BASE		SOC_CSS_UART1_BASE
+#define PLAT_ARM_SP_MIN_RUN_UART_CLK_IN_HZ	SOC_CSS_UART1_CLK_IN_HZ
+
+#define PLAT_ARM_CRASH_UART_BASE		PLAT_ARM_BL31_RUN_UART_BASE
+#define PLAT_ARM_CRASH_UART_CLK_IN_HZ		PLAT_ARM_BL31_RUN_UART_CLK_IN_HZ
+
+#define PLAT_ARM_TSP_UART_BASE			V2M_IOFPGA_UART0_BASE
+#define PLAT_ARM_TSP_UART_CLK_IN_HZ		V2M_IOFPGA_UART0_CLK_IN_HZ
+
+
+#endif /* __BOARD_CSS_DEF_H__ */
+
diff --git a/include/plat/arm/board/common/drivers/norflash.h b/include/plat/arm/board/common/drivers/norflash.h
new file mode 100644
index 0000000..5763b36
--- /dev/null
+++ b/include/plat/arm/board/common/drivers/norflash.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __NORFLASH_H_
+#define __NORFLASH_H_
+
+#include <stdint.h>
+
+/* First bus cycle */
+#define NOR_CMD_READ_ARRAY		0xFF
+#define NOR_CMD_READ_ID_CODE		0x90
+#define NOR_CMD_READ_QUERY		0x98
+#define NOR_CMD_READ_STATUS_REG		0x70
+#define NOR_CMD_CLEAR_STATUS_REG	0x50
+#define NOR_CMD_WRITE_TO_BUFFER		0xE8
+#define NOR_CMD_WORD_PROGRAM		0x40
+#define NOR_CMD_BLOCK_ERASE		0x20
+#define NOR_CMD_LOCK_UNLOCK		0x60
+#define NOR_CMD_BLOCK_ERASE_ACK		0xD0
+
+/* Second bus cycle */
+#define NOR_LOCK_BLOCK			0x01
+#define NOR_UNLOCK_BLOCK		0xD0
+
+/* Status register bits */
+#define NOR_DWS				(1 << 7)
+#define NOR_ESS				(1 << 6)
+#define NOR_ES				(1 << 5)
+#define NOR_PS				(1 << 4)
+#define NOR_VPPS			(1 << 3)
+#define NOR_PSS				(1 << 2)
+#define NOR_BLS				(1 << 1)
+#define NOR_BWS				(1 << 0)
+
+/* Public API */
+void nor_send_cmd(uintptr_t base_addr, unsigned long cmd);
+int nor_word_program(uintptr_t base_addr, unsigned long data);
+int nor_lock(uintptr_t base_addr);
+int nor_unlock(uintptr_t base_addr);
+int nor_erase(uintptr_t base_addr);
+
+#endif /* __NORFLASH_H_ */
+
diff --git a/include/plat/arm/board/common/v2m_def.h b/include/plat/arm/board/common/v2m_def.h
new file mode 100644
index 0000000..364b780
--- /dev/null
+++ b/include/plat/arm/board/common/v2m_def.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __V2M_DEF_H__
+#define __V2M_DEF_H__
+
+#include <arm_xlat_tables.h>
+
+
+/* V2M motherboard system registers & offsets */
+#define V2M_SYSREGS_BASE		0x1c010000
+#define V2M_SYS_ID			0x0
+#define V2M_SYS_SWITCH			0x4
+#define V2M_SYS_LED			0x8
+#define V2M_SYS_NVFLAGS			0x38
+#define V2M_SYS_NVFLAGSSET		0x38
+#define V2M_SYS_NVFLAGSCLR		0x3c
+#define V2M_SYS_CFGDATA			0xa0
+#define V2M_SYS_CFGCTRL			0xa4
+#define V2M_SYS_CFGSTATUS		0xa8
+
+#define V2M_CFGCTRL_START		(1 << 31)
+#define V2M_CFGCTRL_RW			(1 << 30)
+#define V2M_CFGCTRL_FUNC_SHIFT		20
+#define V2M_CFGCTRL_FUNC(fn)		(fn << V2M_CFGCTRL_FUNC_SHIFT)
+#define V2M_FUNC_CLK_GEN		0x01
+#define V2M_FUNC_TEMP			0x04
+#define V2M_FUNC_DB_RESET		0x05
+#define V2M_FUNC_SCC_CFG		0x06
+#define V2M_FUNC_SHUTDOWN		0x08
+#define V2M_FUNC_REBOOT			0x09
+
+/*
+ * V2M sysled bit definitions. The values written to this
+ * register are defined in arch.h & runtime_svc.h. Only
+ * used by the primary cpu to diagnose any cold boot issues.
+ *
+ * SYS_LED[0]   - Security state (S=0/NS=1)
+ * SYS_LED[2:1] - Exception Level (EL3-EL0)
+ * SYS_LED[7:3] - Exception Class (Sync/Async & origin)
+ *
+ */
+#define V2M_SYS_LED_SS_SHIFT		0x0
+#define V2M_SYS_LED_EL_SHIFT		0x1
+#define V2M_SYS_LED_EC_SHIFT		0x3
+
+#define V2M_SYS_LED_SS_MASK		0x1
+#define V2M_SYS_LED_EL_MASK		0x3
+#define V2M_SYS_LED_EC_MASK		0x1f
+
+/* V2M sysid register bits */
+#define V2M_SYS_ID_REV_SHIFT		28
+#define V2M_SYS_ID_HBI_SHIFT		16
+#define V2M_SYS_ID_BLD_SHIFT		12
+#define V2M_SYS_ID_ARCH_SHIFT		8
+#define V2M_SYS_ID_FPGA_SHIFT		0
+
+#define V2M_SYS_ID_REV_MASK		0xf
+#define V2M_SYS_ID_HBI_MASK		0xfff
+#define V2M_SYS_ID_BLD_MASK		0xf
+#define V2M_SYS_ID_ARCH_MASK		0xf
+#define V2M_SYS_ID_FPGA_MASK		0xff
+
+#define V2M_SYS_ID_BLD_LENGTH		4
+
+
+/* NOR Flash */
+#define V2M_FLASH0_BASE			0x08000000
+#define V2M_FLASH0_SIZE			0x04000000
+#define V2M_FLASH_BLOCK_SIZE		0x00040000	/* 256 KB */
+
+#define V2M_IOFPGA_BASE			0x1c000000
+#define V2M_IOFPGA_SIZE			0x03000000
+
+/* PL011 UART related constants */
+#define V2M_IOFPGA_UART0_BASE		0x1c090000
+#define V2M_IOFPGA_UART1_BASE		0x1c0a0000
+#define V2M_IOFPGA_UART2_BASE		0x1c0b0000
+#define V2M_IOFPGA_UART3_BASE		0x1c0c0000
+
+#define V2M_IOFPGA_UART0_CLK_IN_HZ	24000000
+#define V2M_IOFPGA_UART1_CLK_IN_HZ	24000000
+#define V2M_IOFPGA_UART2_CLK_IN_HZ	24000000
+#define V2M_IOFPGA_UART3_CLK_IN_HZ	24000000
+
+/* SP804 timer related constants */
+#define V2M_SP804_TIMER0_BASE		0x1C110000
+#define V2M_SP804_TIMER1_BASE		0x1C120000
+
+/* SP810 controller */
+#define V2M_SP810_BASE			0x1c020000
+#define V2M_SP810_CTRL_TIM0_SEL		(1 << 15)
+#define V2M_SP810_CTRL_TIM1_SEL		(1 << 17)
+#define V2M_SP810_CTRL_TIM2_SEL		(1 << 19)
+#define V2M_SP810_CTRL_TIM3_SEL		(1 << 21)
+
+/*
+ * The flash can be mapped either as read-only or read-write.
+ *
+ * If it is read-write then it should also be mapped as device memory because
+ * NOR flash programming involves sending a fixed, ordered sequence of commands.
+ *
+ * If it is read-only then it should also be mapped as:
+ * - Normal memory, because reading from NOR flash is transparent, it is like
+ *   reading from RAM.
+ * - Non-executable by default. If some parts of the flash need to be executable
+ *   then platform code is responsible for re-mapping the appropriate portion
+ *   of it as executable.
+ */
+#define V2M_MAP_FLASH0_RW		MAP_REGION_FLAT(V2M_FLASH0_BASE,\
+						V2M_FLASH0_SIZE,	\
+						MT_DEVICE | MT_RW | MT_SECURE)
+
+#define V2M_MAP_FLASH0_RO		MAP_REGION_FLAT(V2M_FLASH0_BASE,\
+						V2M_FLASH0_SIZE,	\
+						MT_RO_DATA | MT_SECURE)
+
+#define V2M_MAP_IOFPGA			MAP_REGION_FLAT(V2M_IOFPGA_BASE,\
+						V2M_IOFPGA_SIZE,		\
+						MT_DEVICE | MT_RW | MT_SECURE)
+
+
+
+#endif /* __V2M_DEF_H__ */
diff --git a/include/plat/arm/common/aarch64/arm_macros.S b/include/plat/arm/common/aarch64/arm_macros.S
new file mode 100644
index 0000000..12bf734
--- /dev/null
+++ b/include/plat/arm/common/aarch64/arm_macros.S
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __ARM_MACROS_S__
+#define __ARM_MACROS_S__
+
+#include <gic_common.h>
+#include <gicv2.h>
+#include <gicv3.h>
+#include <platform_def.h>
+
+.section .rodata.gic_reg_name, "aS"
+/* Applicable only to GICv2 and GICv3 with SRE disabled (legacy mode) */
+gicc_regs:
+	.asciz "gicc_hppir", "gicc_ahppir", "gicc_ctlr", ""
+
+/* Applicable only to GICv3 with SRE enabled */
+icc_regs:
+	.asciz "icc_hppir0_el1", "icc_hppir1_el1", "icc_ctlr_el3", ""
+
+/* Registers common to both GICv2 and GICv3 */
+gicd_pend_reg:
+	.asciz "gicd_ispendr regs (Offsets 0x200 - 0x278)\n"	\
+		" Offset:\t\t\tvalue\n"
+newline:
+	.asciz "\n"
+spacer:
+	.asciz ":\t\t0x"
+
+	/* ---------------------------------------------
+	 * The below utility macro prints out relevant GIC
+	 * registers whenever an unhandled exception is
+	 * taken in BL31 on ARM standard platforms.
+	 * Expects: GICD base in x16, GICC base in x17
+	 * Clobbers: x0 - x10, sp
+	 * ---------------------------------------------
+	 */
+	.macro arm_print_gic_regs
+	/* Check for GICv3 system register access */
+	mrs	x7, id_aa64pfr0_el1
+	ubfx	x7, x7, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_WIDTH
+	cmp	x7, #1
+	b.ne	print_gicv2
+
+	/* Check for SRE enable */
+	mrs	x8, ICC_SRE_EL3
+	tst	x8, #ICC_SRE_SRE_BIT
+	b.eq	print_gicv2
+
+	/* Load the icc reg list to x6 */
+	adr	x6, icc_regs
+	/* Load the icc regs to gp regs used by str_in_crash_buf_print */
+	mrs	x8, ICC_HPPIR0_EL1
+	mrs	x9, ICC_HPPIR1_EL1
+	mrs	x10, ICC_CTLR_EL3
+	/* Store to the crash buf and print to console */
+	bl	str_in_crash_buf_print
+	b	print_gic_common
+
+print_gicv2:
+	/* Load the gicc reg list to x6 */
+	adr	x6, gicc_regs
+	/* Load the gicc regs to gp regs used by str_in_crash_buf_print */
+	ldr	w8, [x17, #GICC_HPPIR]
+	ldr	w9, [x17, #GICC_AHPPIR]
+	ldr	w10, [x17, #GICC_CTLR]
+	/* Store to the crash buf and print to console */
+	bl	str_in_crash_buf_print
+
+print_gic_common:
+	/* Print the GICD_ISPENDR regs */
+	add	x7, x16, #GICD_ISPENDR
+	adr	x4, gicd_pend_reg
+	bl	asm_print_str
+gicd_ispendr_loop:
+	sub	x4, x7, x16
+	cmp	x4, #0x280
+	b.eq	exit_print_gic_regs
+	bl	asm_print_hex
+
+	adr	x4, spacer
+	bl	asm_print_str
+
+	ldr	x4, [x7], #8
+	bl	asm_print_hex
+
+	adr	x4, newline
+	bl	asm_print_str
+	b	gicd_ispendr_loop
+exit_print_gic_regs:
+	.endm
+
+#endif /* __ARM_MACROS_S__ */
diff --git a/include/plat/arm/common/aarch64/cci_macros.S b/include/plat/arm/common/aarch64/cci_macros.S
new file mode 100644
index 0000000..52e060d
--- /dev/null
+++ b/include/plat/arm/common/aarch64/cci_macros.S
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __CCI_MACROS_S__
+#define __CCI_MACROS_S__
+
+#include <cci.h>
+#include <platform_def.h>
+
+.section .rodata.cci_reg_name, "aS"
+cci_iface_regs:
+	.asciz "cci_snoop_ctrl_cluster0", "cci_snoop_ctrl_cluster1" , ""
+
+	/* ------------------------------------------------
+	 * The below required platform porting macro prints
+	 * out relevant interconnect registers whenever an
+	 * unhandled exception is taken in BL31.
+	 * Clobbers: x0 - x9, sp
+	 * ------------------------------------------------
+	 */
+	.macro print_cci_regs
+	adr	x6, cci_iface_regs
+	/* Store in x7 the base address of the first interface */
+	mov_imm	x7, (PLAT_ARM_CCI_BASE + SLAVE_IFACE_OFFSET(	\
+			PLAT_ARM_CCI_CLUSTER0_SL_IFACE_IX))
+	ldr	w8, [x7, #SNOOP_CTRL_REG]
+	/* Store in x7 the base address of the second interface */
+	mov_imm	x7, (PLAT_ARM_CCI_BASE + SLAVE_IFACE_OFFSET(	\
+			PLAT_ARM_CCI_CLUSTER1_SL_IFACE_IX))
+	ldr	w9, [x7, #SNOOP_CTRL_REG]
+	/* Store to the crash buf and print to console */
+	bl	str_in_crash_buf_print
+	.endm
+
+#endif /* __CCI_MACROS_S__ */
diff --git a/include/plat/arm/common/arm_common.ld.S b/include/plat/arm/common/arm_common.ld.S
new file mode 100644
index 0000000..478b08c
--- /dev/null
+++ b/include/plat/arm/common/arm_common.ld.S
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __ARM_COMMON_LD_S__
+#define __ARM_COMMON_LD_S__
+
+MEMORY {
+    EL3_SEC_DRAM (rw): ORIGIN = ARM_EL3_TZC_DRAM1_BASE, LENGTH = ARM_EL3_TZC_DRAM1_SIZE
+}
+
+SECTIONS
+{
+	. = ARM_EL3_TZC_DRAM1_BASE;
+	ASSERT(. == ALIGN(4096),
+	"ARM_EL3_TZC_DRAM_BASE address is not aligned on a page boundary.")
+	el3_tzc_dram (NOLOAD) : ALIGN(4096) {
+	__EL3_SEC_DRAM_START__ = .;
+	*(arm_el3_tzc_dram)
+	__EL3_SEC_DRAM_UNALIGNED_END__ = .;
+
+	. = NEXT(4096);
+	__EL3_SEC_DRAM_END__ = .;
+	} >EL3_SEC_DRAM
+}
+
+#endif /* __ARM_COMMON_LD_S__ */
diff --git a/include/plat/arm/common/arm_config.h b/include/plat/arm/common/arm_config.h
new file mode 100644
index 0000000..02e04fd
--- /dev/null
+++ b/include/plat/arm/common/arm_config.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __ARM_CONFIG_H__
+#define __ARM_CONFIG_H__
+
+#include <stdint.h>
+#include <utils_def.h>
+
+enum arm_config_flags {
+	/* Whether Base memory map is in use */
+	ARM_CONFIG_BASE_MMAP		= BIT(1),
+	/* Whether TZC should be configured */
+	ARM_CONFIG_HAS_TZC		= BIT(2),
+	/* FVP model has shifted affinity */
+	ARM_CONFIG_FVP_SHIFTED_AFF	= BIT(3),
+	/* FVP model has SMMUv3 affinity */
+	ARM_CONFIG_FVP_HAS_SMMUV3	= BIT(4),
+	/* FVP model has CCI (400 or 500/550) devices */
+	ARM_CONFIG_FVP_HAS_CCI400	= BIT(5),
+	ARM_CONFIG_FVP_HAS_CCI5XX	= BIT(6),
+};
+
+typedef struct arm_config {
+	unsigned long flags;
+} arm_config_t;
+
+
+/* If used, arm_config must be defined and populated in the platform port */
+extern arm_config_t arm_config;
+
+static inline const arm_config_t *get_arm_config(void)
+{
+	return &arm_config;
+}
+
+
+#endif /* __ARM_CONFIG_H__ */
diff --git a/include/plat/arm/common/arm_def.h b/include/plat/arm/common/arm_def.h
new file mode 100644
index 0000000..6cab91f
--- /dev/null
+++ b/include/plat/arm/common/arm_def.h
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __ARM_DEF_H__
+#define __ARM_DEF_H__
+
+#include <arch.h>
+#include <common_def.h>
+#include <gic_common.h>
+#include <interrupt_props.h>
+#include <platform_def.h>
+#include <tbbr_img_def.h>
+#include <utils_def.h>
+#include <xlat_tables_defs.h>
+
+
+/******************************************************************************
+ * Definitions common to all ARM standard platforms
+ *****************************************************************************/
+
+/* Special value used to verify platform parameters from BL2 to BL31 */
+#define ARM_BL31_PLAT_PARAM_VAL		0x0f1e2d3c4b5a6978ULL
+
+#define ARM_SYSTEM_COUNT		1
+
+#define ARM_CACHE_WRITEBACK_SHIFT	6
+
+/*
+ * Macros mapping the MPIDR Affinity levels to ARM Platform Power levels. The
+ * power levels have a 1:1 mapping with the MPIDR affinity levels.
+ */
+#define ARM_PWR_LVL0		MPIDR_AFFLVL0
+#define ARM_PWR_LVL1		MPIDR_AFFLVL1
+#define ARM_PWR_LVL2		MPIDR_AFFLVL2
+
+/*
+ *  Macros for local power states in ARM platforms encoded by State-ID field
+ *  within the power-state parameter.
+ */
+/* Local power state for power domains in Run state. */
+#define ARM_LOCAL_STATE_RUN	0
+/* Local power state for retention. Valid only for CPU power domains */
+#define ARM_LOCAL_STATE_RET	1
+/* Local power state for OFF/power-down. Valid for CPU and cluster power
+   domains */
+#define ARM_LOCAL_STATE_OFF	2
+
+/* Memory location options for TSP */
+#define ARM_TRUSTED_SRAM_ID		0
+#define ARM_TRUSTED_DRAM_ID		1
+#define ARM_DRAM_ID			2
+
+/* The first 4KB of Trusted SRAM are used as shared memory */
+#define ARM_TRUSTED_SRAM_BASE		0x04000000
+#define ARM_SHARED_RAM_BASE		ARM_TRUSTED_SRAM_BASE
+#define ARM_SHARED_RAM_SIZE		0x00001000	/* 4 KB */
+
+/* The remaining Trusted SRAM is used to load the BL images */
+#define ARM_BL_RAM_BASE			(ARM_SHARED_RAM_BASE +	\
+					 ARM_SHARED_RAM_SIZE)
+#define ARM_BL_RAM_SIZE			(PLAT_ARM_TRUSTED_SRAM_SIZE -	\
+					 ARM_SHARED_RAM_SIZE)
+
+/*
+ * The top 16MB of DRAM1 is configured as secure access only using the TZC
+ *   - SCP TZC DRAM: If present, DRAM reserved for SCP use
+ *   - AP TZC DRAM: The remaining TZC secured DRAM reserved for AP use
+ */
+#define ARM_TZC_DRAM1_SIZE		ULL(0x01000000)
+
+#define ARM_SCP_TZC_DRAM1_BASE		(ARM_DRAM1_BASE +		\
+					 ARM_DRAM1_SIZE -		\
+					 ARM_SCP_TZC_DRAM1_SIZE)
+#define ARM_SCP_TZC_DRAM1_SIZE		PLAT_ARM_SCP_TZC_DRAM1_SIZE
+#define ARM_SCP_TZC_DRAM1_END		(ARM_SCP_TZC_DRAM1_BASE +	\
+					 ARM_SCP_TZC_DRAM1_SIZE - 1)
+
+/*
+ * Define a 2MB region within the TZC secured DRAM for use by EL3 runtime
+ * firmware. This region is meant to be NOLOAD and will not be zero
+ * initialized. Data sections with the attribute `arm_el3_tzc_dram` will be
+ * placed here.
+ */
+#define ARM_EL3_TZC_DRAM1_BASE		(ARM_SCP_TZC_DRAM1_BASE - ARM_EL3_TZC_DRAM1_SIZE)
+#define ARM_EL3_TZC_DRAM1_SIZE		ULL(0x00200000) /* 2 MB */
+#define ARM_EL3_TZC_DRAM1_END		(ARM_EL3_TZC_DRAM1_BASE +	\
+					ARM_EL3_TZC_DRAM1_SIZE - 1)
+
+#define ARM_AP_TZC_DRAM1_BASE		(ARM_DRAM1_BASE +		\
+					 ARM_DRAM1_SIZE -		\
+					 ARM_TZC_DRAM1_SIZE)
+#define ARM_AP_TZC_DRAM1_SIZE		(ARM_TZC_DRAM1_SIZE -		\
+					 (ARM_SCP_TZC_DRAM1_SIZE +	\
+					 ARM_EL3_TZC_DRAM1_SIZE))
+#define ARM_AP_TZC_DRAM1_END		(ARM_AP_TZC_DRAM1_BASE +	\
+					 ARM_AP_TZC_DRAM1_SIZE - 1)
+
+/* Define the Access permissions for Secure peripherals to NS_DRAM */
+#if ARM_CRYPTOCELL_INTEG
+/*
+ * Allow Secure peripheral to read NS DRAM when integrated with CryptoCell.
+ * This is required by CryptoCell to authenticate BL33 which is loaded
+ * into the Non Secure DDR.
+ */
+#define ARM_TZC_NS_DRAM_S_ACCESS	TZC_REGION_S_RD
+#else
+#define ARM_TZC_NS_DRAM_S_ACCESS	TZC_REGION_S_NONE
+#endif
+
+#ifdef SPD_opteed
+/*
+ * BL2 needs to map 4MB at the end of TZC_DRAM1 in order to
+ * load/authenticate the trusted os extra image. The first 512KB of
+ * TZC_DRAM1 are reserved for trusted os (OPTEE). The extra image loading
+ * for OPTEE is paged image which only include the paging part using
+ * virtual memory but without "init" data. OPTEE will copy the "init" data
+ * (from pager image) to the first 512KB of TZC_DRAM, and then copy the
+ * extra image behind the "init" data.
+ */
+#define ARM_OPTEE_PAGEABLE_LOAD_BASE	(ARM_AP_TZC_DRAM1_BASE + \
+					 ARM_AP_TZC_DRAM1_SIZE - \
+					 ARM_OPTEE_PAGEABLE_LOAD_SIZE)
+#define ARM_OPTEE_PAGEABLE_LOAD_SIZE	0x400000
+#define ARM_OPTEE_PAGEABLE_LOAD_MEM	MAP_REGION_FLAT(		\
+					ARM_OPTEE_PAGEABLE_LOAD_BASE,	\
+					ARM_OPTEE_PAGEABLE_LOAD_SIZE,	\
+					MT_MEMORY | MT_RW | MT_SECURE)
+
+/*
+ * Map the memory for the OP-TEE core (also known as OP-TEE pager when paging
+ * support is enabled).
+ */
+#define ARM_MAP_OPTEE_CORE_MEM		MAP_REGION_FLAT(		\
+						BL32_BASE,		\
+						BL32_LIMIT - BL32_BASE,	\
+						MT_MEMORY | MT_RW | MT_SECURE)
+#endif /* SPD_opteed */
+
+#define ARM_NS_DRAM1_BASE		ARM_DRAM1_BASE
+#define ARM_NS_DRAM1_SIZE		(ARM_DRAM1_SIZE -		\
+					 ARM_TZC_DRAM1_SIZE)
+#define ARM_NS_DRAM1_END		(ARM_NS_DRAM1_BASE +		\
+					 ARM_NS_DRAM1_SIZE - 1)
+
+#define ARM_DRAM1_BASE			ULL(0x80000000)
+#define ARM_DRAM1_SIZE			ULL(0x80000000)
+#define ARM_DRAM1_END			(ARM_DRAM1_BASE +		\
+					 ARM_DRAM1_SIZE - 1)
+
+#define ARM_DRAM2_BASE			ULL(0x880000000)
+#define ARM_DRAM2_SIZE			PLAT_ARM_DRAM2_SIZE
+#define ARM_DRAM2_END			(ARM_DRAM2_BASE +		\
+					 ARM_DRAM2_SIZE - 1)
+
+#define ARM_IRQ_SEC_PHY_TIMER		29
+
+#define ARM_IRQ_SEC_SGI_0		8
+#define ARM_IRQ_SEC_SGI_1		9
+#define ARM_IRQ_SEC_SGI_2		10
+#define ARM_IRQ_SEC_SGI_3		11
+#define ARM_IRQ_SEC_SGI_4		12
+#define ARM_IRQ_SEC_SGI_5		13
+#define ARM_IRQ_SEC_SGI_6		14
+#define ARM_IRQ_SEC_SGI_7		15
+
+/*
+ * List of secure interrupts are deprecated, but are retained only to support
+ * legacy configurations.
+ */
+#define ARM_G1S_IRQS			ARM_IRQ_SEC_PHY_TIMER,		\
+					ARM_IRQ_SEC_SGI_1,		\
+					ARM_IRQ_SEC_SGI_2,		\
+					ARM_IRQ_SEC_SGI_3,		\
+					ARM_IRQ_SEC_SGI_4,		\
+					ARM_IRQ_SEC_SGI_5,		\
+					ARM_IRQ_SEC_SGI_7
+
+#define ARM_G0_IRQS			ARM_IRQ_SEC_SGI_0,		\
+					ARM_IRQ_SEC_SGI_6
+
+/*
+ * Define a list of Group 1 Secure and Group 0 interrupt properties as per GICv3
+ * terminology. On a GICv2 system or mode, the lists will be merged and treated
+ * as Group 0 interrupts.
+ */
+#define ARM_G1S_IRQ_PROPS(grp) \
+	INTR_PROP_DESC(ARM_IRQ_SEC_PHY_TIMER, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(ARM_IRQ_SEC_SGI_1, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(ARM_IRQ_SEC_SGI_2, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(ARM_IRQ_SEC_SGI_3, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(ARM_IRQ_SEC_SGI_4, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(ARM_IRQ_SEC_SGI_5, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(ARM_IRQ_SEC_SGI_7, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE)
+
+#define ARM_G0_IRQ_PROPS(grp) \
+	INTR_PROP_DESC(ARM_IRQ_SEC_SGI_0, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(ARM_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE)
+
+#define ARM_MAP_SHARED_RAM		MAP_REGION_FLAT(		\
+						ARM_SHARED_RAM_BASE,	\
+						ARM_SHARED_RAM_SIZE,	\
+						MT_DEVICE | MT_RW | MT_SECURE)
+
+#define ARM_MAP_NS_DRAM1		MAP_REGION_FLAT(		\
+						ARM_NS_DRAM1_BASE,	\
+						ARM_NS_DRAM1_SIZE,	\
+						MT_MEMORY | MT_RW | MT_NS)
+
+#define ARM_MAP_DRAM2			MAP_REGION_FLAT(		\
+						ARM_DRAM2_BASE,		\
+						ARM_DRAM2_SIZE,		\
+						MT_MEMORY | MT_RW | MT_NS)
+#ifdef SPD_tspd
+
+#define ARM_MAP_TSP_SEC_MEM		MAP_REGION_FLAT(		\
+						TSP_SEC_MEM_BASE,	\
+						TSP_SEC_MEM_SIZE,	\
+						MT_MEMORY | MT_RW | MT_SECURE)
+#endif
+
+#if ARM_BL31_IN_DRAM
+#define ARM_MAP_BL31_SEC_DRAM		MAP_REGION_FLAT(		\
+						BL31_BASE,		\
+						PLAT_ARM_MAX_BL31_SIZE,	\
+						MT_MEMORY | MT_RW | MT_SECURE)
+#endif
+
+#define ARM_MAP_EL3_TZC_DRAM		MAP_REGION_FLAT(			\
+						ARM_EL3_TZC_DRAM1_BASE,	\
+						ARM_EL3_TZC_DRAM1_SIZE,	\
+						MT_MEMORY | MT_RW | MT_SECURE)
+
+/*
+ * The number of regions like RO(code), coherent and data required by
+ * different BL stages which need to be mapped in the MMU.
+ */
+#if USE_COHERENT_MEM
+#define ARM_BL_REGIONS			3
+#else
+#define ARM_BL_REGIONS			2
+#endif
+
+#define MAX_MMAP_REGIONS		(PLAT_ARM_MMAP_ENTRIES +	\
+					 ARM_BL_REGIONS)
+
+/* Memory mapped Generic timer interfaces  */
+#define ARM_SYS_CNTCTL_BASE		0x2a430000
+#define ARM_SYS_CNTREAD_BASE		0x2a800000
+#define ARM_SYS_TIMCTL_BASE		0x2a810000
+
+#define ARM_CONSOLE_BAUDRATE		115200
+
+/* Trusted Watchdog constants */
+#define ARM_SP805_TWDG_BASE		0x2a490000
+#define ARM_SP805_TWDG_CLK_HZ		32768
+/* The TBBR document specifies a watchdog timeout of 256 seconds. SP805
+ * asserts reset after two consecutive countdowns (2 x 128 = 256 sec) */
+#define ARM_TWDG_TIMEOUT_SEC		128
+#define ARM_TWDG_LOAD_VAL		(ARM_SP805_TWDG_CLK_HZ * 	\
+					 ARM_TWDG_TIMEOUT_SEC)
+
+/******************************************************************************
+ * Required platform porting definitions common to all ARM standard platforms
+ *****************************************************************************/
+
+/*
+ * We need to access DRAM2 from BL2 for PSCI_MEM_PROTECT for
+ * AArch64 builds
+ */
+#ifdef AARCH64
+#define PLAT_PHY_ADDR_SPACE_SIZE			(1ull << 36)
+#define PLAT_VIRT_ADDR_SPACE_SIZE			(1ull << 36)
+#else
+#define PLAT_PHY_ADDR_SPACE_SIZE			(1ull << 32)
+#define PLAT_VIRT_ADDR_SPACE_SIZE			(1ull << 32)
+#endif
+
+
+/*
+ * This macro defines the deepest retention state possible. A higher state
+ * id will represent an invalid or a power down state.
+ */
+#define PLAT_MAX_RET_STATE		ARM_LOCAL_STATE_RET
+
+/*
+ * This macro defines the deepest power down states possible. Any state ID
+ * higher than this is invalid.
+ */
+#define PLAT_MAX_OFF_STATE		ARM_LOCAL_STATE_OFF
+
+/*
+ * Some data must be aligned on the biggest cache line size in the platform.
+ * This is known only to the platform as it might have a combination of
+ * integrated and external caches.
+ */
+#define CACHE_WRITEBACK_GRANULE		(1 << ARM_CACHE_WRITEBACK_SHIFT)
+
+
+/*******************************************************************************
+ * BL1 specific defines.
+ * BL1 RW data is relocated from ROM to RAM at runtime so we need 2 sets of
+ * addresses.
+ ******************************************************************************/
+#define BL1_RO_BASE			PLAT_ARM_TRUSTED_ROM_BASE
+#define BL1_RO_LIMIT			(PLAT_ARM_TRUSTED_ROM_BASE	\
+					 + PLAT_ARM_TRUSTED_ROM_SIZE)
+/*
+ * Put BL1 RW at the top of the Trusted SRAM.
+ */
+#define BL1_RW_BASE			(ARM_BL_RAM_BASE +		\
+						ARM_BL_RAM_SIZE -	\
+						PLAT_ARM_MAX_BL1_RW_SIZE)
+#define BL1_RW_LIMIT			(ARM_BL_RAM_BASE + ARM_BL_RAM_SIZE)
+
+/*******************************************************************************
+ * BL2 specific defines.
+ ******************************************************************************/
+#if ARM_BL31_IN_DRAM || (defined(AARCH32) && !defined(JUNO_AARCH32_EL3_RUNTIME))
+/*
+ * For AArch32 BL31 is not applicable.
+ * For AArch64 BL31 is loaded in the DRAM.
+ * Put BL2 just below BL1.
+ */
+#define BL2_BASE			(BL1_RW_BASE - PLAT_ARM_MAX_BL2_SIZE)
+#define BL2_LIMIT			BL1_RW_BASE
+#else
+/*
+ * Put BL2 just below BL31.
+ */
+#define BL2_BASE			(BL31_BASE - PLAT_ARM_MAX_BL2_SIZE)
+#define BL2_LIMIT			BL31_BASE
+#endif
+
+/*******************************************************************************
+ * BL31 specific defines.
+ ******************************************************************************/
+#if ARM_BL31_IN_DRAM
+/*
+ * Put BL31 at the bottom of TZC secured DRAM
+ */
+#define BL31_BASE			ARM_AP_TZC_DRAM1_BASE
+#define BL31_LIMIT			(ARM_AP_TZC_DRAM1_BASE +	\
+						PLAT_ARM_MAX_BL31_SIZE)
+#elif (RESET_TO_BL31)
+/*
+ * Put BL31_BASE in the middle of the Trusted SRAM.
+ */
+#define BL31_BASE			(ARM_TRUSTED_SRAM_BASE + \
+						(PLAT_ARM_TRUSTED_SRAM_SIZE >> 1))
+#define BL31_LIMIT			(ARM_BL_RAM_BASE + ARM_BL_RAM_SIZE)
+#else
+/*
+ * Put BL31 at the top of the Trusted SRAM.
+ */
+#define BL31_BASE			(ARM_BL_RAM_BASE +		\
+						ARM_BL_RAM_SIZE -	\
+						PLAT_ARM_MAX_BL31_SIZE)
+#define BL31_PROGBITS_LIMIT		BL1_RW_BASE
+#define BL31_LIMIT			(ARM_BL_RAM_BASE + ARM_BL_RAM_SIZE)
+#endif
+
+/*******************************************************************************
+ * BL32 specific defines.
+ ******************************************************************************/
+/*
+ * On ARM standard platforms, the TSP can execute from Trusted SRAM,
+ * Trusted DRAM (if available) or the DRAM region secured by the TrustZone
+ * controller.
+ */
+#if ARM_BL31_IN_DRAM
+# define TSP_SEC_MEM_BASE		(ARM_AP_TZC_DRAM1_BASE +	\
+						PLAT_ARM_MAX_BL31_SIZE)
+# define TSP_SEC_MEM_SIZE		(ARM_AP_TZC_DRAM1_SIZE -	\
+						PLAT_ARM_MAX_BL31_SIZE)
+# define BL32_BASE			(ARM_AP_TZC_DRAM1_BASE +	\
+						PLAT_ARM_MAX_BL31_SIZE)
+# define BL32_LIMIT			(ARM_AP_TZC_DRAM1_BASE +	\
+						ARM_AP_TZC_DRAM1_SIZE)
+#elif ARM_TSP_RAM_LOCATION_ID == ARM_TRUSTED_SRAM_ID
+# define TSP_SEC_MEM_BASE		ARM_BL_RAM_BASE
+# define TSP_SEC_MEM_SIZE		ARM_BL_RAM_SIZE
+# define TSP_PROGBITS_LIMIT		BL2_BASE
+# define BL32_BASE			ARM_BL_RAM_BASE
+# define BL32_LIMIT			BL31_BASE
+#elif ARM_TSP_RAM_LOCATION_ID == ARM_TRUSTED_DRAM_ID
+# define TSP_SEC_MEM_BASE		PLAT_ARM_TRUSTED_DRAM_BASE
+# define TSP_SEC_MEM_SIZE		PLAT_ARM_TRUSTED_DRAM_SIZE
+# define BL32_BASE			PLAT_ARM_TRUSTED_DRAM_BASE
+# define BL32_LIMIT			(PLAT_ARM_TRUSTED_DRAM_BASE	\
+						+ (1 << 21))
+#elif ARM_TSP_RAM_LOCATION_ID == ARM_DRAM_ID
+# define TSP_SEC_MEM_BASE		ARM_AP_TZC_DRAM1_BASE
+# define TSP_SEC_MEM_SIZE		ARM_AP_TZC_DRAM1_SIZE
+# define BL32_BASE			ARM_AP_TZC_DRAM1_BASE
+# define BL32_LIMIT			(ARM_AP_TZC_DRAM1_BASE +	\
+						ARM_AP_TZC_DRAM1_SIZE)
+#else
+# error "Unsupported ARM_TSP_RAM_LOCATION_ID value"
+#endif
+
+/* BL32 is mandatory in AArch32 */
+#ifndef AARCH32
+#ifdef SPD_none
+#undef BL32_BASE
+#endif /* SPD_none */
+#endif
+
+/*******************************************************************************
+ * FWU Images: NS_BL1U, BL2U & NS_BL2U defines.
+ ******************************************************************************/
+#define BL2U_BASE			BL2_BASE
+#if ARM_BL31_IN_DRAM || (defined(AARCH32) && !defined(JUNO_AARCH32_EL3_RUNTIME))
+/*
+ * For AArch32 BL31 is not applicable.
+ * For AArch64 BL31 is loaded in the DRAM.
+ * BL2U extends up to BL1.
+ */
+#define BL2U_LIMIT			BL1_RW_BASE
+#else
+/* BL2U extends up to BL31. */
+#define BL2U_LIMIT			BL31_BASE
+#endif
+#define NS_BL2U_BASE			ARM_NS_DRAM1_BASE
+#define NS_BL1U_BASE			(PLAT_ARM_NVM_BASE + 0x03EB8000)
+
+/*
+ * ID of the secure physical generic timer interrupt used by the TSP.
+ */
+#define TSP_IRQ_SEC_PHY_TIMER		ARM_IRQ_SEC_PHY_TIMER
+
+
+/*
+ * One cache line needed for bakery locks on ARM platforms
+ */
+#define PLAT_PERCPU_BAKERY_LOCK_SIZE		(1 * CACHE_WRITEBACK_GRANULE)
+
+
+#endif /* __ARM_DEF_H__ */
diff --git a/include/plat/arm/common/arm_sip_svc.h b/include/plat/arm/common/arm_sip_svc.h
new file mode 100644
index 0000000..68375af
--- /dev/null
+++ b/include/plat/arm/common/arm_sip_svc.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ARM_SIP_SVC_H__
+#define __ARM_SIP_SVC_H__
+
+/* SMC function IDs for SiP Service queries */
+
+#define ARM_SIP_SVC_CALL_COUNT		0x8200ff00
+#define ARM_SIP_SVC_UID			0x8200ff01
+/*					0x8200ff02 is reserved */
+#define ARM_SIP_SVC_VERSION		0x8200ff03
+
+/* Function ID for requesting state switch of lower EL */
+#define ARM_SIP_SVC_EXE_STATE_SWITCH	0x82000020
+
+/* ARM SiP Service Calls version numbers */
+#define ARM_SIP_SVC_VERSION_MAJOR		0x0
+#define ARM_SIP_SVC_VERSION_MINOR		0x2
+
+#endif /* __ARM_SIP_SVC_H__ */
diff --git a/include/plat/arm/common/arm_xlat_tables.h b/include/plat/arm/common/arm_xlat_tables.h
new file mode 100644
index 0000000..0923ad8
--- /dev/null
+++ b/include/plat/arm/common/arm_xlat_tables.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#if ARM_XLAT_TABLES_LIB_V1
+#include <xlat_tables.h>
+#else
+#include <xlat_tables_v2.h>
+#endif /* ARM_XLAT_TABLES_LIB_V1 */
diff --git a/include/plat/arm/common/plat_arm.h b/include/plat/arm/common/plat_arm.h
new file mode 100644
index 0000000..33d951c
--- /dev/null
+++ b/include/plat/arm/common/plat_arm.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __PLAT_ARM_H__
+#define __PLAT_ARM_H__
+
+#include <arm_xlat_tables.h>
+#include <bakery_lock.h>
+#include <cassert.h>
+#include <cpu_data.h>
+#include <stdint.h>
+#include <utils_def.h>
+
+/*******************************************************************************
+ * Forward declarations
+ ******************************************************************************/
+struct bl31_params;
+struct meminfo;
+struct image_info;
+
+#define ARM_CASSERT_MMAP						\
+	CASSERT((ARRAY_SIZE(plat_arm_mmap) + ARM_BL_REGIONS)		\
+		<= MAX_MMAP_REGIONS,					\
+		assert_max_mmap_regions);
+
+/*
+ * Utility functions common to ARM standard platforms
+ */
+void arm_setup_page_tables(uintptr_t total_base,
+			size_t total_size,
+			uintptr_t code_start,
+			uintptr_t code_limit,
+			uintptr_t rodata_start,
+			uintptr_t rodata_limit
+#if USE_COHERENT_MEM
+			, uintptr_t coh_start,
+			uintptr_t coh_limit
+#endif
+);
+
+#if defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32))
+/*
+ * Use this macro to instantiate lock before it is used in below
+ * arm_lock_xxx() macros
+ */
+#define ARM_INSTANTIATE_LOCK	DEFINE_BAKERY_LOCK(arm_lock)
+#define ARM_LOCK_GET_INSTANCE	(&arm_lock)
+/*
+ * These are wrapper macros to the Coherent Memory Bakery Lock API.
+ */
+#define arm_lock_init()		bakery_lock_init(&arm_lock)
+#define arm_lock_get()		bakery_lock_get(&arm_lock)
+#define arm_lock_release()	bakery_lock_release(&arm_lock)
+
+#else
+
+/*
+ * Empty macros for all other BL stages other than BL31 and BL32
+ */
+#define ARM_INSTANTIATE_LOCK	static int arm_lock __unused
+#define ARM_LOCK_GET_INSTANCE	0
+#define arm_lock_init()
+#define arm_lock_get()
+#define arm_lock_release()
+
+#endif /* defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32)) */
+
+#if ARM_RECOM_STATE_ID_ENC
+/*
+ * Macros used to parse state information from State-ID if it is using the
+ * recommended encoding for State-ID.
+ */
+#define ARM_LOCAL_PSTATE_WIDTH		4
+#define ARM_LOCAL_PSTATE_MASK		((1 << ARM_LOCAL_PSTATE_WIDTH) - 1)
+
+/* Macros to construct the composite power state */
+
+/* Make composite power state parameter till power level 0 */
+#if PSCI_EXTENDED_STATE_ID
+
+#define arm_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type) \
+		(((lvl0_state) << PSTATE_ID_SHIFT) | ((type) << PSTATE_TYPE_SHIFT))
+#else
+#define arm_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type) \
+		(((lvl0_state) << PSTATE_ID_SHIFT) | \
+		((pwr_lvl) << PSTATE_PWR_LVL_SHIFT) | \
+		((type) << PSTATE_TYPE_SHIFT))
+#endif /* __PSCI_EXTENDED_STATE_ID__ */
+
+/* Make composite power state parameter till power level 1 */
+#define arm_make_pwrstate_lvl1(lvl1_state, lvl0_state, pwr_lvl, type) \
+		(((lvl1_state) << ARM_LOCAL_PSTATE_WIDTH) | \
+		arm_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type))
+
+/* Make composite power state parameter till power level 2 */
+#define arm_make_pwrstate_lvl2(lvl2_state, lvl1_state, lvl0_state, pwr_lvl, type) \
+		(((lvl2_state) << (ARM_LOCAL_PSTATE_WIDTH * 2)) | \
+		arm_make_pwrstate_lvl1(lvl1_state, lvl0_state, pwr_lvl, type))
+
+#endif /* __ARM_RECOM_STATE_ID_ENC__ */
+
+/* ARM State switch error codes */
+#define STATE_SW_E_PARAM		(-2)
+#define STATE_SW_E_DENIED		(-3)
+
+/* IO storage utility functions */
+void arm_io_setup(void);
+
+/* Security utility functions */
+void arm_tzc400_setup(void);
+struct tzc_dmc500_driver_data;
+void arm_tzc_dmc500_setup(struct tzc_dmc500_driver_data *plat_driver_data);
+
+/* Systimer utility function */
+void arm_configure_sys_timer(void);
+
+/* PM utility functions */
+int arm_validate_power_state(unsigned int power_state,
+			    psci_power_state_t *req_state);
+int arm_validate_ns_entrypoint(uintptr_t entrypoint);
+void arm_system_pwr_domain_save(void);
+void arm_system_pwr_domain_resume(void);
+void arm_program_trusted_mailbox(uintptr_t address);
+int arm_psci_read_mem_protect(int *val);
+int arm_nor_psci_write_mem_protect(int val);
+void arm_nor_psci_do_mem_protect(void);
+int arm_psci_mem_protect_chk(uintptr_t base, u_register_t length);
+
+/* Topology utility function */
+int arm_check_mpidr(u_register_t mpidr);
+
+/* BL1 utility functions */
+void arm_bl1_early_platform_setup(void);
+void arm_bl1_platform_setup(void);
+void arm_bl1_plat_arch_setup(void);
+
+/* BL2 utility functions */
+void arm_bl2_early_platform_setup(struct meminfo *mem_layout);
+void arm_bl2_platform_setup(void);
+void arm_bl2_plat_arch_setup(void);
+uint32_t arm_get_spsr_for_bl32_entry(void);
+uint32_t arm_get_spsr_for_bl33_entry(void);
+int arm_bl2_handle_post_image_load(unsigned int image_id);
+
+/* BL2U utility functions */
+void arm_bl2u_early_platform_setup(struct meminfo *mem_layout,
+				void *plat_info);
+void arm_bl2u_platform_setup(void);
+void arm_bl2u_plat_arch_setup(void);
+
+/* BL31 utility functions */
+#if LOAD_IMAGE_V2
+void arm_bl31_early_platform_setup(void *from_bl2,
+				void *plat_params_from_bl2);
+#else
+void arm_bl31_early_platform_setup(struct bl31_params *from_bl2,
+				void *plat_params_from_bl2);
+#endif /* LOAD_IMAGE_V2 */
+void arm_bl31_platform_setup(void);
+void arm_bl31_plat_runtime_setup(void);
+void arm_bl31_plat_arch_setup(void);
+
+/* TSP utility functions */
+void arm_tsp_early_platform_setup(void);
+
+/* SP_MIN utility functions */
+void arm_sp_min_early_platform_setup(void *from_bl2,
+		void *plat_params_from_bl2);
+void arm_sp_min_plat_runtime_setup(void);
+
+/* FIP TOC validity check */
+int arm_io_is_toc_valid(void);
+
+/*
+ * Mandatory functions required in ARM standard platforms
+ */
+unsigned int plat_arm_get_cluster_core_count(u_register_t mpidr);
+void plat_arm_gic_driver_init(void);
+void plat_arm_gic_init(void);
+void plat_arm_gic_cpuif_enable(void);
+void plat_arm_gic_cpuif_disable(void);
+void plat_arm_gic_redistif_on(void);
+void plat_arm_gic_redistif_off(void);
+void plat_arm_gic_pcpu_init(void);
+void plat_arm_gic_save(void);
+void plat_arm_gic_resume(void);
+void plat_arm_security_setup(void);
+void plat_arm_pwrc_setup(void);
+void plat_arm_interconnect_init(void);
+void plat_arm_interconnect_enter_coherency(void);
+void plat_arm_interconnect_exit_coherency(void);
+
+#if ARM_PLAT_MT
+unsigned int plat_arm_get_cpu_pe_count(u_register_t mpidr);
+#endif
+
+#if LOAD_IMAGE_V2
+/*
+ * This function is called after loading SCP_BL2 image and it is used to perform
+ * any platform-specific actions required to handle the SCP firmware.
+ */
+int plat_arm_bl2_handle_scp_bl2(struct image_info *scp_bl2_image_info);
+#endif
+
+/*
+ * Optional functions required in ARM standard platforms
+ */
+void plat_arm_io_setup(void);
+int plat_arm_get_alt_image_source(
+	unsigned int image_id,
+	uintptr_t *dev_handle,
+	uintptr_t *image_spec);
+unsigned int plat_arm_calc_core_pos(u_register_t mpidr);
+const mmap_region_t *plat_arm_get_mmap(void);
+
+/* Allow platform to override psci_pm_ops during runtime */
+const plat_psci_ops_t *plat_arm_psci_override_pm_ops(plat_psci_ops_t *ops);
+
+/* Execution state switch in ARM platforms */
+int arm_execution_state_switch(unsigned int smc_fid,
+		uint32_t pc_hi,
+		uint32_t pc_lo,
+		uint32_t cookie_hi,
+		uint32_t cookie_lo,
+		void *handle);
+
+/* Disable Statistical Profiling Extensions helper */
+void arm_disable_spe(void);
+
+#endif /* __PLAT_ARM_H__ */
diff --git a/include/plat/arm/css/common/aarch64/css_macros.S b/include/plat/arm/css/common/aarch64/css_macros.S
new file mode 100644
index 0000000..b669ef6
--- /dev/null
+++ b/include/plat/arm/css/common/aarch64/css_macros.S
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __CSS_MACROS_S__
+#define __CSS_MACROS_S__
+
+#include <arm_macros.S>
+#include <platform_def.h>
+
+	/* ---------------------------------------------
+	 * The below required platform porting macro
+	 * prints out relevant GIC registers whenever an
+	 * unhandled exception is taken in BL31.
+	 * Clobbers: x0 - x10, x16, x17, sp
+	 * ---------------------------------------------
+	 */
+	.macro css_print_gic_regs
+	mov_imm	x16, PLAT_ARM_GICD_BASE
+	mov_imm	x17, PLAT_ARM_GICC_BASE
+	arm_print_gic_regs
+	.endm
+
+
+#endif /* __CSS_MACROS_S__ */
diff --git a/include/plat/arm/css/common/css_def.h b/include/plat/arm/css/common/css_def.h
new file mode 100644
index 0000000..a2c0b4e
--- /dev/null
+++ b/include/plat/arm/css/common/css_def.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CSS_DEF_H__
+#define __CSS_DEF_H__
+
+#include <arm_def.h>
+#include <gic_common.h>
+#include <interrupt_props.h>
+#include <tzc400.h>
+
+/*************************************************************************
+ * Definitions common to all ARM Compute SubSystems (CSS)
+ *************************************************************************/
+#define NSROM_BASE			0x1f000000
+#define NSROM_SIZE			0x00001000
+
+/* Following covers CSS Peripherals excluding NSROM and NSRAM  */
+#define CSS_DEVICE_BASE			0x20000000
+#define CSS_DEVICE_SIZE			0x0e000000
+
+#define NSRAM_BASE			0x2e000000
+#define NSRAM_SIZE			0x00008000
+
+/* System Security Control Registers */
+#define SSC_REG_BASE			0x2a420000
+#define SSC_GPRETN			(SSC_REG_BASE + 0x030)
+
+/* The slave_bootsecure controls access to GPU, DMC and CS. */
+#define CSS_NIC400_SLAVE_BOOTSECURE	8
+
+/* Interrupt handling constants */
+#define CSS_IRQ_MHU			69
+#define CSS_IRQ_GPU_SMMU_0		71
+#define CSS_IRQ_TZC			80
+#define CSS_IRQ_TZ_WDOG			86
+#define CSS_IRQ_SEC_SYS_TIMER		91
+
+/* MHU register offsets */
+#define MHU_CPU_INTR_S_SET_OFFSET	0x308
+
+/*
+ * Define a list of Group 1 Secure interrupt properties as per GICv3
+ * terminology. On a GICv2 system or mode, the interrupts will be treated as
+ * Group 0 interrupts.
+ */
+#define CSS_G1S_IRQ_PROPS(grp) \
+	INTR_PROP_DESC(CSS_IRQ_MHU, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(CSS_IRQ_GPU_SMMU_0, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(CSS_IRQ_TZC, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(CSS_IRQ_TZ_WDOG, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(CSS_IRQ_SEC_SYS_TIMER, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL)
+
+#if CSS_USE_SCMI_SDS_DRIVER
+/* Memory region for shared data storage */
+#define PLAT_ARM_SDS_MEM_BASE		ARM_SHARED_RAM_BASE
+#define PLAT_ARM_SDS_MEM_SIZE_MAX	0xDC0 /* 3520 bytes */
+/*
+ * The SCMI Channel is placed right after the SDS region
+ */
+#define CSS_SCMI_PAYLOAD_BASE		(PLAT_ARM_SDS_MEM_BASE + PLAT_ARM_SDS_MEM_SIZE_MAX)
+#define CSS_SCMI_MHU_DB_REG_OFF		MHU_CPU_INTR_S_SET_OFFSET
+
+/* Trusted mailbox base address common to all CSS */
+/* If SDS is present, then mailbox is at top of SRAM */
+#define PLAT_ARM_TRUSTED_MAILBOX_BASE	(ARM_SHARED_RAM_BASE + ARM_SHARED_RAM_SIZE - 0x8)
+
+/* Number of retries for SCP_RAM_READY flag */
+#define CSS_SCP_READY_10US_RETRIES		1000000 /* Effective timeout of 10000 ms */
+
+#else
+/*
+ * SCP <=> AP boot configuration
+ *
+ * The SCP/AP boot configuration is a 32-bit word located at a known offset from
+ * the start of the Trusted SRAM.
+ *
+ * Note that the value stored at this address is only valid at boot time, before
+ * the SCP_BL2 image is transferred to SCP.
+ */
+#define SCP_BOOT_CFG_ADDR		PLAT_CSS_SCP_COM_SHARED_MEM_BASE
+
+/* Trusted mailbox base address common to all CSS */
+/* If SDS is not present, then the mailbox is at the bottom of SRAM */
+#define PLAT_ARM_TRUSTED_MAILBOX_BASE	ARM_TRUSTED_SRAM_BASE
+
+#endif /* CSS_USE_SCMI_SDS_DRIVER */
+
+#define CSS_MAP_DEVICE			MAP_REGION_FLAT(		\
+						CSS_DEVICE_BASE,	\
+						CSS_DEVICE_SIZE,	\
+						MT_DEVICE | MT_RW | MT_SECURE)
+
+#define CSS_MAP_NSRAM			MAP_REGION_FLAT(		\
+						NSRAM_BASE,	\
+						NSRAM_SIZE,	\
+						MT_DEVICE | MT_RW | MT_SECURE)
+
+/* Platform ID address */
+#define SSC_VERSION_OFFSET			0x040
+
+#define SSC_VERSION_CONFIG_SHIFT		28
+#define SSC_VERSION_MAJOR_REV_SHIFT		24
+#define SSC_VERSION_MINOR_REV_SHIFT		20
+#define SSC_VERSION_DESIGNER_ID_SHIFT		12
+#define SSC_VERSION_PART_NUM_SHIFT		0x0
+#define SSC_VERSION_CONFIG_MASK			0xf
+#define SSC_VERSION_MAJOR_REV_MASK		0xf
+#define SSC_VERSION_MINOR_REV_MASK		0xf
+#define SSC_VERSION_DESIGNER_ID_MASK		0xff
+#define SSC_VERSION_PART_NUM_MASK		0xfff
+
+/* SSC debug configuration registers */
+#define SSC_DBGCFG_SET		0x14
+#define SSC_DBGCFG_CLR		0x18
+
+#define SPIDEN_INT_CLR_SHIFT	6
+#define SPIDEN_SEL_SET_SHIFT	7
+
+#ifndef __ASSEMBLY__
+
+/* SSC_VERSION related accessors */
+
+/* Returns the part number of the platform */
+#define GET_SSC_VERSION_PART_NUM(val)				\
+		(((val) >> SSC_VERSION_PART_NUM_SHIFT) &	\
+		SSC_VERSION_PART_NUM_MASK)
+
+/* Returns the configuration number of the platform */
+#define GET_SSC_VERSION_CONFIG(val)				\
+		(((val) >> SSC_VERSION_CONFIG_SHIFT) &		\
+		SSC_VERSION_CONFIG_MASK)
+
+#endif /* __ASSEMBLY__ */
+
+/*************************************************************************
+ * Required platform porting definitions common to all
+ * ARM Compute SubSystems (CSS)
+ ************************************************************************/
+
+/*
+ * The loading of SCP images(SCP_BL2 or SCP_BL2U) is done if there
+ * respective base addresses are defined (i.e SCP_BL2_BASE, SCP_BL2U_BASE).
+ * Hence, `CSS_LOAD_SCP_IMAGES` needs to be set to 1 if BL2 needs to load
+ * an SCP_BL2/SCP_BL2U image.
+ */
+#if CSS_LOAD_SCP_IMAGES
+
+#if ARM_BL31_IN_DRAM
+#error "SCP_BL2 is not expected to be loaded by BL2 for ARM_BL31_IN_DRAM config"
+#endif
+
+/*
+ * Load address of SCP_BL2 in CSS platform ports
+ * SCP_BL2 is loaded to the same place as BL31 but it shouldn't overwrite BL1
+ * rw data.  Once SCP_BL2 is transferred to the SCP, it is discarded and BL31
+ * is loaded over the top.
+ */
+#define SCP_BL2_BASE			(BL1_RW_BASE - PLAT_CSS_MAX_SCP_BL2_SIZE)
+#define SCP_BL2_LIMIT			BL1_RW_BASE
+
+#define SCP_BL2U_BASE			(BL1_RW_BASE - PLAT_CSS_MAX_SCP_BL2U_SIZE)
+#define SCP_BL2U_LIMIT			BL1_RW_BASE
+#endif /* CSS_LOAD_SCP_IMAGES */
+
+/* Load address of Non-Secure Image for CSS platform ports */
+#define PLAT_ARM_NS_IMAGE_OFFSET	0xE0000000
+
+/* TZC related constants */
+#define PLAT_ARM_TZC_FILTERS		TZC_400_REGION_ATTR_FILTER_BIT_ALL
+
+/*
+ * Parsing of CPU and Cluster states, as returned by 'Get CSS Power State' SCP
+ * command
+ */
+#define CSS_CLUSTER_PWR_STATE_ON	0
+#define CSS_CLUSTER_PWR_STATE_OFF	3
+
+#define CSS_CPU_PWR_STATE_ON		1
+#define CSS_CPU_PWR_STATE_OFF		0
+#define CSS_CPU_PWR_STATE(state, n)	(((state) >> (n)) & 1)
+
+#endif /* __CSS_DEF_H__ */
diff --git a/include/plat/arm/css/common/css_pm.h b/include/plat/arm/css/common/css_pm.h
new file mode 100644
index 0000000..3842875
--- /dev/null
+++ b/include/plat/arm/css/common/css_pm.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CSS_PM_H__
+#define __CSS_PM_H__
+
+#include <cdefs.h>
+#include <psci.h>
+#include <types.h>
+
+/* System power domain at level 2, as currently implemented by CSS platforms */
+#define CSS_SYSTEM_PWR_DMN_LVL		ARM_PWR_LVL2
+
+/* Macros to read the CSS power domain state */
+#define CSS_CORE_PWR_STATE(state)	(state)->pwr_domain_state[ARM_PWR_LVL0]
+#define CSS_CLUSTER_PWR_STATE(state)	(state)->pwr_domain_state[ARM_PWR_LVL1]
+#define CSS_SYSTEM_PWR_STATE(state)	\
+			((PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL) ?\
+			(state)->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] : 0)
+
+int css_pwr_domain_on(u_register_t mpidr);
+void css_pwr_domain_on_finish(const psci_power_state_t *target_state);
+void css_pwr_domain_off(const psci_power_state_t *target_state);
+void css_pwr_domain_suspend(const psci_power_state_t *target_state);
+void css_pwr_domain_suspend_finish(
+				const psci_power_state_t *target_state);
+void __dead2 css_system_off(void);
+void __dead2 css_system_reset(void);
+void css_cpu_standby(plat_local_state_t cpu_state);
+void css_get_sys_suspend_power_state(psci_power_state_t *req_state);
+int css_node_hw_state(u_register_t mpidr, unsigned int power_level);
+
+#endif /* __CSS_PM_H__ */
diff --git a/include/plat/arm/soc/common/soc_css.h b/include/plat/arm/soc/common/soc_css.h
new file mode 100644
index 0000000..897bf2e
--- /dev/null
+++ b/include/plat/arm/soc/common/soc_css.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SOC_CSS_H__
+#define __SOC_CSS_H__
+
+/*
+ * Utility functions for ARM CSS SoCs
+ */
+void soc_css_init_nic400(void);
+void soc_css_init_pcie(void);
+
+static inline void soc_css_security_setup(void)
+{
+	soc_css_init_nic400();
+	soc_css_init_pcie();
+}
+
+
+#endif /* __SOC_CSS_H__ */
diff --git a/include/plat/arm/soc/common/soc_css_def.h b/include/plat/arm/soc/common/soc_css_def.h
new file mode 100644
index 0000000..3206f4e
--- /dev/null
+++ b/include/plat/arm/soc/common/soc_css_def.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SOC_CSS_DEF_H__
+#define __SOC_CSS_DEF_H__
+
+#include <common_def.h>
+#include <utils_def.h>
+
+
+/*
+ * Definitions common to all ARM CSS SoCs
+ */
+
+/* Following covers ARM CSS SoC Peripherals and PCIe expansion area */
+#define SOC_CSS_DEVICE_BASE		0x40000000
+#define SOC_CSS_DEVICE_SIZE		0x40000000
+#define SOC_CSS_PCIE_CONTROL_BASE	0x7ff20000
+
+/* PL011 UART related constants */
+#define SOC_CSS_UART0_BASE		0x7ff80000
+#define SOC_CSS_UART1_BASE		0x7ff70000
+
+#define SOC_CSS_UART0_CLK_IN_HZ		7372800
+#define SOC_CSS_UART1_CLK_IN_HZ		7372800
+
+/* SoC NIC-400 Global Programmers View (GPV) */
+#define SOC_CSS_NIC400_BASE		0x7fd00000
+
+#define SOC_CSS_NIC400_USB_EHCI		0
+#define SOC_CSS_NIC400_TLX_MASTER	1
+#define SOC_CSS_NIC400_USB_OHCI		2
+#define SOC_CSS_NIC400_PL354_SMC	3
+/*
+ * The apb4_bridge controls access to:
+ *   - the PCIe configuration registers
+ *   - the MMU units for USB, HDLCD and DMA
+ */
+#define SOC_CSS_NIC400_APB4_BRIDGE	4
+
+/* Non-volatile counters */
+#define SOC_TRUSTED_NVCTR_BASE		0x7fe70000
+#define TFW_NVCTR_BASE			(SOC_TRUSTED_NVCTR_BASE + 0x0000)
+#define TFW_NVCTR_SIZE			4
+#define NTFW_CTR_BASE			(SOC_TRUSTED_NVCTR_BASE + 0x0004)
+#define NTFW_CTR_SIZE			4
+
+/* Keys */
+#define SOC_KEYS_BASE			0x7fe80000
+#define TZ_PUB_KEY_HASH_BASE		(SOC_KEYS_BASE + 0x0000)
+#define TZ_PUB_KEY_HASH_SIZE		32
+#define HU_KEY_BASE			(SOC_KEYS_BASE + 0x0020)
+#define HU_KEY_SIZE			16
+#define END_KEY_BASE			(SOC_KEYS_BASE + 0x0044)
+#define END_KEY_SIZE			32
+
+#define SOC_CSS_MAP_DEVICE		MAP_REGION_FLAT(		\
+						SOC_CSS_DEVICE_BASE,	\
+						SOC_CSS_DEVICE_SIZE,	\
+						MT_DEVICE | MT_RW | MT_SECURE)
+
+
+/*
+ * The bootsec_bridge controls access to a bunch of peripherals, e.g. the UARTs.
+ */
+#define SOC_CSS_NIC400_BOOTSEC_BRIDGE	5
+#define SOC_CSS_NIC400_BOOTSEC_BRIDGE_UART1	(1 << 12)
+
+/*
+ * Required platform porting definitions common to all ARM CSS SoCs
+ */
+#if JUNO_AARCH32_EL3_RUNTIME
+/*
+ * Following change is required to initialize TZC
+ * for enabling access to the HI_VECTOR (0xFFFF0000)
+ * location needed for JUNO AARCH32 support.
+ */
+#define PLAT_ARM_SCP_TZC_DRAM1_SIZE	ULL(0x8000)
+#else
+/* 2MB used for SCP DDR retraining */
+#define PLAT_ARM_SCP_TZC_DRAM1_SIZE	ULL(0x00200000)
+#endif
+
+#endif /* __SOC_CSS_DEF_H__ */
diff --git a/include/plat/common/common_def.h b/include/plat/common/common_def.h
new file mode 100644
index 0000000..a841c3d
--- /dev/null
+++ b/include/plat/common/common_def.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __COMMON_DEF_H__
+#define __COMMON_DEF_H__
+
+#include <bl_common.h>
+#include <platform_def.h>
+
+/******************************************************************************
+ * Required platform porting definitions that are expected to be common to
+ * all platforms
+ *****************************************************************************/
+
+/*
+ * Platform binary types for linking
+ */
+#ifdef AARCH32
+#define PLATFORM_LINKER_FORMAT          "elf32-littlearm"
+#define PLATFORM_LINKER_ARCH            arm
+#else
+#define PLATFORM_LINKER_FORMAT          "elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH            aarch64
+#endif /* AARCH32 */
+
+/*
+ * Generic platform constants
+ */
+#define FIRMWARE_WELCOME_STR		"Booting Trusted Firmware\n"
+
+#if LOAD_IMAGE_V2
+#define BL2_IMAGE_DESC {				\
+	.image_id = BL2_IMAGE_ID,			\
+	SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,	\
+		VERSION_2, image_info_t, 0),		\
+	.image_info.image_base = BL2_BASE,		\
+	.image_info.image_max_size = BL2_LIMIT - BL2_BASE,\
+	SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,	\
+		VERSION_2, entry_point_info_t, SECURE | EXECUTABLE),\
+	.ep_info.pc = BL2_BASE,				\
+}
+#else /* LOAD_IMAGE_V2 */
+#define BL2_IMAGE_DESC {				\
+	.image_id = BL2_IMAGE_ID,			\
+	SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,	\
+		VERSION_1, image_info_t, 0),		\
+	.image_info.image_base = BL2_BASE,		\
+	SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,	\
+		VERSION_1, entry_point_info_t, SECURE | EXECUTABLE),\
+	.ep_info.pc = BL2_BASE,				\
+}
+#endif /* LOAD_IMAGE_V2 */
+
+/*
+ * The following constants identify the extents of the code & read-only data
+ * regions. These addresses are used by the MMU setup code and therefore they
+ * must be page-aligned.
+ *
+ * When the code and read-only data are mapped as a single atomic section
+ * (i.e. when SEPARATE_CODE_AND_RODATA=0) then we treat the whole section as
+ * code by specifying the read-only data section as empty.
+ *
+ * BL1 is different than the other images in the sense that its read-write data
+ * originally lives in Trusted ROM and needs to be relocated in Trusted SRAM at
+ * run-time. Therefore, the read-write data in ROM can be mapped with the same
+ * memory attributes as the read-only data region. For this reason, BL1 uses
+ * different macros.
+ *
+ * Note that BL1_ROM_END is not necessarily aligned on a page boundary as it
+ * just points to the end of BL1's actual content in Trusted ROM. Therefore it
+ * needs to be rounded up to the next page size in order to map the whole last
+ * page of it with the right memory attributes.
+ */
+#if SEPARATE_CODE_AND_RODATA
+#define BL_CODE_BASE		(unsigned long)(&__TEXT_START__)
+#define BL_CODE_END		(unsigned long)(&__TEXT_END__)
+#define BL_RO_DATA_BASE		(unsigned long)(&__RODATA_START__)
+#define BL_RO_DATA_END		(unsigned long)(&__RODATA_END__)
+
+#define BL1_CODE_END		BL_CODE_END
+#define BL1_RO_DATA_BASE	(unsigned long)(&__RODATA_START__)
+#define BL1_RO_DATA_END		round_up(BL1_ROM_END, PAGE_SIZE)
+#else
+#define BL_CODE_BASE		(unsigned long)(&__RO_START__)
+#define BL_CODE_END		(unsigned long)(&__RO_END__)
+#define BL_RO_DATA_BASE		0
+#define BL_RO_DATA_END		0
+
+#define BL1_CODE_END		round_up(BL1_ROM_END, PAGE_SIZE)
+#define BL1_RO_DATA_BASE	0
+#define BL1_RO_DATA_END		0
+#endif /* SEPARATE_CODE_AND_RODATA */
+
+/*
+ * The next 2 constants identify the extents of the coherent memory region.
+ * These addresses are used by the MMU setup code and therefore they must be
+ * page-aligned.  It is the responsibility of the linker script to ensure that
+ * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
+ * page-aligned addresses.
+ */
+#define BL_COHERENT_RAM_BASE	(unsigned long)(&__COHERENT_RAM_START__)
+#define BL_COHERENT_RAM_END	(unsigned long)(&__COHERENT_RAM_END__)
+
+#endif /* __COMMON_DEF_H__ */
diff --git a/include/plat/common/plat_config.h b/include/plat/common/plat_config.h
deleted file mode 100644
index 20d3c03..0000000
--- a/include/plat/common/plat_config.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __PLAT_CONFIG_H__
-#define __PLAT_CONFIG_H__
-
-#define CONFIG_GICC_BASE_OFFSET		0x4
-
-
-#ifndef __ASSEMBLY__
-
-#include <cassert.h>
-
-
-enum plat_config_flags {
-	/* Whether Base FVP memory map is in use */
-	CONFIG_BASE_MMAP		= 0x1,
-	/* Whether CCI should be enabled */
-	CONFIG_HAS_CCI			= 0x2,
-	/* Whether TZC should be configured */
-	CONFIG_HAS_TZC			= 0x4
-};
-
-typedef struct plat_config {
-	unsigned int gicd_base;
-	unsigned int gicc_base;
-	unsigned int gich_base;
-	unsigned int gicv_base;
-	unsigned int max_aff0;
-	unsigned int max_aff1;
-	unsigned long flags;
-} plat_config_t;
-
-inline const plat_config_t *get_plat_config();
-
-
-CASSERT(CONFIG_GICC_BASE_OFFSET == __builtin_offsetof(
-	plat_config_t, gicc_base),
-	assert_gicc_base_offset_mismatch);
-
-/* If used, plat_config must be defined and populated in the platform port*/
-extern plat_config_t plat_config;
-
-inline const plat_config_t *get_plat_config()
-{
-	return &plat_config;
-}
-
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __PLAT_CONFIG_H__ */
diff --git a/include/plat/common/platform.h b/include/plat/common/platform.h
index 66d7bc9..e2bfa50 100644
--- a/include/plat/common/platform.h
+++ b/include/plat/common/platform.h
@@ -1,47 +1,36 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __PLATFORM_H__
 #define __PLATFORM_H__
 
+#include <psci.h>
 #include <stdint.h>
+#include <types.h>
 
 
 /*******************************************************************************
  * Forward declarations
  ******************************************************************************/
-struct plat_pm_ops;
+struct auth_img_desc_s;
 struct meminfo;
 struct image_info;
 struct entry_point_info;
 struct bl31_params;
+struct image_desc;
+struct bl_load_info;
+struct bl_params;
+
+/*******************************************************************************
+ * plat_get_rotpk_info() flags
+ ******************************************************************************/
+#define ROTPK_IS_HASH			(1 << 0)
+/* Flag used to skip verification of the certificate ROTPK while the platform
+   ROTPK is not deployed */
+#define ROTPK_NOT_DEPLOYED		(1 << 1)
 
 /*******************************************************************************
  * Function declarations
@@ -49,11 +38,25 @@
 /*******************************************************************************
  * Mandatory common functions
  ******************************************************************************/
-uint64_t plat_get_syscnt_freq(void);
-int plat_get_image_source(const char *image_name,
+unsigned long long plat_get_syscnt_freq(void) __deprecated;
+unsigned int plat_get_syscnt_freq2(void);
+
+int plat_get_image_source(unsigned int image_id,
 			uintptr_t *dev_handle,
 			uintptr_t *image_spec);
-unsigned long plat_get_ns_image_entrypoint(void);
+uintptr_t plat_get_ns_image_entrypoint(void);
+unsigned int plat_my_core_pos(void);
+int plat_core_pos_by_mpidr(u_register_t mpidr);
+
+#if STACK_PROTECTOR_ENABLED
+/*
+ * Return a new value to be used for the stack protection's canary.
+ *
+ * Ideally, this value is a random number that is impossible to predict by an
+ * attacker.
+ */
+u_register_t plat_get_stack_protector_canary(void);
+#endif /* STACK_PROTECTOR_ENABLED */
 
 /*******************************************************************************
  * Mandatory interrupt management functions
@@ -67,13 +70,38 @@
 				     uint32_t security_state);
 
 /*******************************************************************************
+ * Optional interrupt management functions, depending on chosen EL3 components.
+ ******************************************************************************/
+unsigned int plat_ic_get_running_priority(void);
+int plat_ic_is_spi(unsigned int id);
+int plat_ic_is_ppi(unsigned int id);
+int plat_ic_is_sgi(unsigned int id);
+unsigned int plat_ic_get_interrupt_active(unsigned int id);
+void plat_ic_disable_interrupt(unsigned int id);
+void plat_ic_enable_interrupt(unsigned int id);
+int plat_ic_has_interrupt_type(unsigned int type);
+void plat_ic_set_interrupt_type(unsigned int id, unsigned int type);
+void plat_ic_set_interrupt_priority(unsigned int id, unsigned int priority);
+void plat_ic_raise_el3_sgi(int sgi_num, u_register_t target);
+void plat_ic_set_spi_routing(unsigned int id, unsigned int routing_mode,
+		u_register_t mpidr);
+void plat_ic_set_interrupt_pending(unsigned int id);
+void plat_ic_clear_interrupt_pending(unsigned int id);
+unsigned int plat_ic_set_priority_mask(unsigned int mask);
+
+/*******************************************************************************
  * Optional common functions (may be overridden)
  ******************************************************************************/
-unsigned int platform_get_core_pos(unsigned long mpidr);
-unsigned long platform_get_stack(unsigned long mpidr);
-void plat_report_exception(unsigned long);
+uintptr_t plat_get_my_stack(void);
+void plat_report_exception(unsigned int exception_type);
 int plat_crash_console_init(void);
 int plat_crash_console_putc(int c);
+int plat_crash_console_flush(void);
+void plat_error_handler(int err) __dead2;
+void plat_panic_handler(void) __dead2;
+const char *plat_log_get_prefix(unsigned int log_level);
+void bl2_plat_preload_setup(void);
+int plat_try_next_boot_source(void);
 
 /*******************************************************************************
  * Mandatory BL1 functions
@@ -84,11 +112,11 @@
 struct meminfo *bl1_plat_sec_mem_layout(void);
 
 /*
- * This function allows the platform to change the entrypoint information for
- * BL2, after BL1 has loaded BL2 into memory but before BL2 is executed.
+ * The following function is mandatory when the
+ * firmware update feature is used.
  */
-void bl1_plat_set_bl2_ep_info(struct image_info *image,
-			      struct entry_point_info *ep);
+int bl1_plat_mem_check(uintptr_t mem_base, unsigned int mem_size,
+		unsigned int flags);
 
 /*******************************************************************************
  * Optional BL1 functions (may be overridden)
@@ -96,6 +124,25 @@
 void bl1_init_bl2_mem_layout(const struct meminfo *bl1_mem_layout,
 			     struct meminfo *bl2_mem_layout);
 
+/*
+ * The following functions are used for image loading process in BL1.
+ */
+void bl1_plat_set_ep_info(unsigned int image_id,
+		struct entry_point_info *ep_info);
+/*
+ * The following functions are mandatory when firmware update
+ * feature is used and optional otherwise.
+ */
+unsigned int bl1_plat_get_next_image_id(void);
+struct image_desc *bl1_plat_get_image_desc(unsigned int image_id);
+
+/*
+ * The following functions are used by firmware update
+ * feature and may optionally be overridden.
+ */
+__dead2 void bl1_plat_fwu_done(void *client_cookie, void *reserved);
+
+
 /*******************************************************************************
  * Mandatory BL2 functions
  ******************************************************************************/
@@ -104,9 +151,18 @@
 void bl2_platform_setup(void);
 struct meminfo *bl2_plat_sec_mem_layout(void);
 
+#if LOAD_IMAGE_V2
+/*
+ * This function can be used by the platforms to update/use image
+ * information for given `image_id`.
+ */
+int bl2_plat_handle_post_image_load(unsigned int image_id);
+
+#else /* LOAD_IMAGE_V2 */
+
 /*
  * This function returns a pointer to the shared memory that the platform has
- * kept aside to pass trusted firmware related information that BL3-1
+ * kept aside to pass trusted firmware related information that BL31
  * could need
  */
 struct bl31_params *bl2_plat_get_bl31_params(void);
@@ -119,14 +175,14 @@
 
 /*
  * This function flushes to main memory all the params that are
- * passed to BL3-1
+ * passed to BL31
  */
 void bl2_plat_flush_bl31_params(void);
 
 /*
  * The next 2 functions allow the platform to change the entrypoint information
- * for the mandatory 3rd level BL images, BL3-1 and BL3-3. This is done after
- * BL2 has loaded those images into memory but before BL3-1 is executed.
+ * for the mandatory 3rd level BL images, BL31 and BL33. This is done after
+ * BL2 has loaded those images into memory but before BL31 is executed.
  */
 void bl2_plat_set_bl31_ep_info(struct image_info *image,
 			       struct entry_point_info *ep);
@@ -134,67 +190,161 @@
 void bl2_plat_set_bl33_ep_info(struct image_info *image,
 			       struct entry_point_info *ep);
 
-/* Gets the memory layout for BL3-3 */
+/* Gets the memory layout for BL33 */
 void bl2_plat_get_bl33_meminfo(struct meminfo *mem_info);
 
 /*******************************************************************************
- * Conditionally mandatory BL2 functions: must be implemented if BL3-0 image
+ * Conditionally mandatory BL2 functions: must be implemented if SCP_BL2 image
  * is supported
  ******************************************************************************/
-/* Gets the memory layout for BL3-0 */
-void bl2_plat_get_bl30_meminfo(struct meminfo *mem_info);
+/* Gets the memory layout for SCP_BL2 */
+void bl2_plat_get_scp_bl2_meminfo(struct meminfo *mem_info);
 
 /*
- * This function is called after loading BL3-0 image and it is used to perform
+ * This function is called after loading SCP_BL2 image and it is used to perform
  * any platform-specific actions required to handle the SCP firmware.
  */
-int bl2_plat_handle_bl30(struct image_info *bl30_image_info);
+int bl2_plat_handle_scp_bl2(struct image_info *scp_bl2_image_info);
 
 /*******************************************************************************
- * Conditionally mandatory BL2 functions: must be implemented if BL3-2 image
+ * Conditionally mandatory BL2 functions: must be implemented if BL32 image
  * is supported
  ******************************************************************************/
 void bl2_plat_set_bl32_ep_info(struct image_info *image,
 			       struct entry_point_info *ep);
 
-/* Gets the memory layout for BL3-2 */
+/* Gets the memory layout for BL32 */
 void bl2_plat_get_bl32_meminfo(struct meminfo *mem_info);
 
+#endif /* LOAD_IMAGE_V2 */
+
 /*******************************************************************************
  * Optional BL2 functions (may be overridden)
  ******************************************************************************/
 
 /*******************************************************************************
- * Mandatory BL3-1 functions
+ * Mandatory BL2U functions.
  ******************************************************************************/
+void bl2u_early_platform_setup(struct meminfo *mem_layout,
+		void *plat_info);
+void bl2u_plat_arch_setup(void);
+void bl2u_platform_setup(void);
+
+/*******************************************************************************
+ * Conditionally mandatory BL2U functions for CSS platforms.
+ ******************************************************************************/
+/*
+ * This function is used to perform any platform-specific actions required to
+ * handle the BL2U_SCP firmware.
+ */
+int bl2u_plat_handle_scp_bl2u(void);
+
+/*******************************************************************************
+ * Mandatory BL31 functions
+ ******************************************************************************/
+#if LOAD_IMAGE_V2
+void bl31_early_platform_setup(void *from_bl2,
+				void *plat_params_from_bl2);
+#else
 void bl31_early_platform_setup(struct bl31_params *from_bl2,
 				void *plat_params_from_bl2);
+#endif
 void bl31_plat_arch_setup(void);
 void bl31_platform_setup(void);
+void bl31_plat_runtime_setup(void);
 struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type);
-struct image_info *bl31_plat_get_next_image_image_info(uint32_t type);
 
 /*******************************************************************************
- * Mandatory PSCI functions (BL3-1)
+ * Mandatory PSCI functions (BL31)
  ******************************************************************************/
-int platform_setup_pm(const struct plat_pm_ops **);
-int plat_get_max_afflvl(void);
-unsigned int plat_get_aff_count(unsigned int, unsigned long);
-unsigned int plat_get_aff_state(unsigned int, unsigned long);
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+			const struct plat_psci_ops **);
+const unsigned char *plat_get_power_domain_tree_desc(void);
 
 /*******************************************************************************
- * Optional BL3-1 functions (may be overridden)
+ * Optional PSCI functions (BL31).
+ ******************************************************************************/
+void plat_psci_stat_accounting_start(const psci_power_state_t *state_info);
+void plat_psci_stat_accounting_stop(const psci_power_state_t *state_info);
+u_register_t plat_psci_stat_get_residency(unsigned int lvl,
+			const psci_power_state_t *state_info,
+			int last_cpu_index);
+plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
+			const plat_local_state_t *states,
+			unsigned int ncpu);
+
+/*******************************************************************************
+ * Optional BL31 functions (may be overridden)
  ******************************************************************************/
 void bl31_plat_enable_mmu(uint32_t flags);
 
 /*******************************************************************************
- * Optional BL3-2 functions (may be overridden)
+ * Optional BL32 functions (may be overridden)
  ******************************************************************************/
 void bl32_plat_enable_mmu(uint32_t flags);
 
 /*******************************************************************************
- * Trusted Boot functions
+ * Trusted Board Boot functions
  ******************************************************************************/
-int plat_match_rotpk(const unsigned char *, unsigned int);
+int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len,
+			unsigned int *flags);
+int plat_get_nv_ctr(void *cookie, unsigned int *nv_ctr);
+int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr);
+int plat_set_nv_ctr2(void *cookie, const struct auth_img_desc_s *img_desc,
+		unsigned int nv_ctr);
+
+#if LOAD_IMAGE_V2
+/*******************************************************************************
+ * Mandatory BL image load functions(may be overridden).
+ ******************************************************************************/
+/*
+ * This function returns pointer to the list of images that the
+ * platform has populated to load.
+ */
+struct bl_load_info *plat_get_bl_image_load_info(void);
+
+/*
+ * This function returns a pointer to the shared memory that the
+ * platform has kept aside to pass trusted firmware related
+ * information that next BL image could need.
+ */
+struct bl_params *plat_get_next_bl_params(void);
+
+/*
+ * This function flushes to main memory all the params that are
+ * passed to next image.
+ */
+void plat_flush_next_bl_params(void);
+
+#endif /* LOAD_IMAGE_V2 */
+
+#if ENABLE_PLAT_COMPAT
+/*
+ * The below declarations are to enable compatibility for the platform ports
+ * using the old platform interface.
+ */
+
+/*******************************************************************************
+ * Optional common functions (may be overridden)
+ ******************************************************************************/
+unsigned int platform_get_core_pos(unsigned long mpidr);
+
+/*******************************************************************************
+ * Mandatory PSCI Compatibility functions (BL31)
+ ******************************************************************************/
+int platform_setup_pm(const plat_pm_ops_t **);
+
+unsigned int plat_get_aff_count(unsigned int, unsigned long);
+unsigned int plat_get_aff_state(unsigned int, unsigned long);
+#else /* __ENABLE_PLAT_COMPAT__ */
+/*
+ * The below function enable Trusted Firmware components like SPDs which
+ * haven't migrated to the new platform API to compile on platforms which
+ * have the compatibility layer disabled.
+ */
+unsigned int platform_get_core_pos(unsigned long mpidr) __deprecated;
+
+#endif /* __ENABLE_PLAT_COMPAT__ */
 
 #endif /* __PLATFORM_H__ */
+
diff --git a/include/services/std_svc.h b/include/services/std_svc.h
new file mode 100644
index 0000000..9fe70cc
--- /dev/null
+++ b/include/services/std_svc.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __STD_SVC_H__
+#define __STD_SVC_H__
+
+/* SMC function IDs for Standard Service queries */
+
+#define ARM_STD_SVC_CALL_COUNT		0x8400ff00
+#define ARM_STD_SVC_UID			0x8400ff01
+/*					0x8400ff02 is reserved */
+#define ARM_STD_SVC_VERSION		0x8400ff03
+
+/* ARM Standard Service Calls version numbers */
+#define STD_SVC_VERSION_MAJOR		0x0
+#define STD_SVC_VERSION_MINOR		0x1
+
+/*
+ * Get the ARM Standard Service argument from EL3 Runtime.
+ * This function must be implemented by EL3 Runtime and the
+ * `svc_mask` identifies the service. `svc_mask` is a bit
+ * mask identifying the range of SMC function IDs available
+ * to the service.
+ */
+uintptr_t get_arm_std_svc_args(unsigned int svc_mask);
+
+#endif /* __STD_SVC_H__ */
diff --git a/include/stdlib/assert.h b/include/stdlib/assert.h
deleted file mode 100644
index 5621f8c..0000000
--- a/include/stdlib/assert.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*-
- * Copyright (c) 1992, 1993
- *	The Regents of the University of California.  All rights reserved.
- * (c) UNIX System Laboratories, Inc.
- * All or some portions of this file are derived from material licensed
- * to the University of California by American Telephone and Telegraph
- * Co. or Unix System Laboratories, Inc. and are reproduced herein with
- * the permission of UNIX System Laboratories, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its contributors
- *    may be used to endorse or promote products derived from this software
- *    without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- *	@(#)assert.h	8.2 (Berkeley) 1/21/94
- * $FreeBSD$
- */
-
-#include <sys/cdefs.h>
-
-/*
- * Unlike other ANSI header files, <assert.h> may usefully be included
- * multiple times, with and without NDEBUG defined.
- */
-
-#undef assert
-#undef _assert
-
-#ifdef NDEBUG
-#define	assert(e)	((void)0)
-#define	_assert(e)	((void)0)
-#else
-#define	_assert(e)	assert(e)
-
-#define	assert(e)	((e) ? (void)0 : __assert(__func__, __FILE__, \
-			    __LINE__, #e))
-#endif /* NDEBUG */
-
-#ifndef _ASSERT_H_
-#define _ASSERT_H_
-__BEGIN_DECLS
-void __assert(const char *, const char *, int, const char *) __dead2;
-__END_DECLS
-#endif /* !_ASSERT_H_ */
diff --git a/include/stdlib/machine/_inttypes.h b/include/stdlib/machine/_inttypes.h
deleted file mode 100644
index 8dd07d6..0000000
--- a/include/stdlib/machine/_inttypes.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _MACHINE_INTTYPES_H_
-#define _MACHINE_INTTYPES_H_
-
-/*
- * Trusted Firmware does not depend on any definitions in this file. Content
- * will be added as needed.
- */
-
-#endif /* !_MACHINE_INTTYPES_H_ */
diff --git a/include/stdlib/machine/_limits.h b/include/stdlib/machine/_limits.h
deleted file mode 100644
index 49a768b..0000000
--- a/include/stdlib/machine/_limits.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*-
- * Copyright (c) 1988, 1993
- *	The Regents of the University of California.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- *	@(#)limits.h	8.3 (Berkeley) 1/4/94
- * $FreeBSD$
- */
-
-#ifndef	_MACHINE__LIMITS_H_
-#define	_MACHINE__LIMITS_H_
-
-/*
- * According to ANSI (section 2.2.4.2), the values below must be usable by
- * #if preprocessing directives.  Additionally, the expression must have the
- * same type as would an expression that is an object of the corresponding
- * type converted according to the integral promotions.  The subtraction for
- * INT_MIN, etc., is so the value is not unsigned; e.g., 0x80000000 is an
- * unsigned int for 32-bit two's complement ANSI compilers (section 3.1.3.2).
- */
-
-#define	__CHAR_BIT	8		/* number of bits in a char */
-
-#define	__SCHAR_MAX	0x7f		/* max value for a signed char */
-#define	__SCHAR_MIN	(-0x7f-1)	/* min value for a signed char */
-
-#define	__UCHAR_MAX	0xff		/* max value for an unsigned char */
-
-#define	__USHRT_MAX	0xffff		/* max value for an unsigned short */
-#define	__SHRT_MAX	0x7fff		/* max value for a short */
-#define	__SHRT_MIN	(-0x7fff-1)	/* min value for a short */
-
-#define	__UINT_MAX	0xffffffff	/* max value for an unsigned int */
-#define	__INT_MAX	0x7fffffff	/* max value for an int */
-#define	__INT_MIN	(-0x7fffffff-1)	/* min value for an int */
-
-#define	__ULONG_MAX	0xffffffffffffffff	/* max for an unsigned long */
-#define	__LONG_MAX	0x7fffffffffffffff	/* max for a long */
-#define	__LONG_MIN	(-0x7fffffffffffffff-1) /* min for a long */
-
-/* Long longs have the same size but not the same type as longs. */
-					/* max for an unsigned long long */
-#define	__ULLONG_MAX	0xffffffffffffffffULL
-#define	__LLONG_MAX	0x7fffffffffffffffLL	/* max for a long long */
-#define	__LLONG_MIN	(-0x7fffffffffffffffLL-1) /* min for a long long */
-
-#define	__SSIZE_MAX	__LONG_MAX	/* max value for a ssize_t */
-
-#define	__SIZE_T_MAX	__ULONG_MAX	/* max value for a size_t */
-
-#define	__OFF_MAX	__LONG_MAX	/* max value for an off_t */
-#define	__OFF_MIN	__LONG_MIN	/* min value for an off_t */
-
-/* Quads and longs are the same size.  Ensure they stay in sync. */
-#define	__UQUAD_MAX	(__ULONG_MAX)	/* max value for a uquad_t */
-#define	__QUAD_MAX	(__LONG_MAX)	/* max value for a quad_t */
-#define	__QUAD_MIN	(__LONG_MIN)	/* min value for a quad_t */
-
-#define	__LONG_BIT	64
-#define	__WORD_BIT	32
-
-/* Minimum signal stack size. */
-#define	__MINSIGSTKSZ	(1024 * 4)
-
-#endif /* !_MACHINE__LIMITS_H_ */
diff --git a/include/stdlib/machine/_stdint.h b/include/stdlib/machine/_stdint.h
deleted file mode 100644
index e36c659..0000000
--- a/include/stdlib/machine/_stdint.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*-
- * Copyright (c) 2001, 2002 Mike Barcroft <mike@FreeBSD.org>
- * Copyright (c) 2001 The NetBSD Foundation, Inc.
- * All rights reserved.
- *
- * This code is derived from software contributed to The NetBSD Foundation
- * by Klaus Klein.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#ifndef	_MACHINE__STDINT_H_
-#define	_MACHINE__STDINT_H_
-
-#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS)
-
-#define	INT8_C(c)		(c)
-#define	INT16_C(c)		(c)
-#define	INT32_C(c)		(c)
-#define	INT64_C(c)		(c ## L)
-
-#define	UINT8_C(c)		(c)
-#define	UINT16_C(c)		(c)
-#define	UINT32_C(c)		(c ## U)
-#define	UINT64_C(c)		(c ## UL)
-
-#define	INTMAX_C(c)		INT64_C(c)
-#define	UINTMAX_C(c)		UINT64_C(c)
-
-#endif /* !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) */
-
-#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS)
-
-/*
- * ISO/IEC 9899:1999
- * 7.18.2.1 Limits of exact-width integer types
- */
-/* Minimum values of exact-width signed integer types. */
-#define	INT8_MIN	(-0x7f-1)
-#define	INT16_MIN	(-0x7fff-1)
-#define	INT32_MIN	(-0x7fffffff-1)
-#define	INT64_MIN	(-0x7fffffffffffffffL-1)
-
-/* Maximum values of exact-width signed integer types. */
-#define	INT8_MAX	0x7f
-#define	INT16_MAX	0x7fff
-#define	INT32_MAX	0x7fffffff
-#define	INT64_MAX	0x7fffffffffffffffL
-
-/* Maximum values of exact-width unsigned integer types. */
-#define	UINT8_MAX	0xff
-#define	UINT16_MAX	0xffff
-#define	UINT32_MAX	0xffffffffU
-#define	UINT64_MAX	0xffffffffffffffffUL
-
-/*
- * ISO/IEC 9899:1999
- * 7.18.2.2  Limits of minimum-width integer types
- */
-/* Minimum values of minimum-width signed integer types. */
-#define	INT_LEAST8_MIN	INT8_MIN
-#define	INT_LEAST16_MIN	INT16_MIN
-#define	INT_LEAST32_MIN	INT32_MIN
-#define	INT_LEAST64_MIN	INT64_MIN
-
-/* Maximum values of minimum-width signed integer types. */
-#define	INT_LEAST8_MAX	INT8_MAX
-#define	INT_LEAST16_MAX	INT16_MAX
-#define	INT_LEAST32_MAX	INT32_MAX
-#define	INT_LEAST64_MAX	INT64_MAX
-
-/* Maximum values of minimum-width unsigned integer types. */
-#define	UINT_LEAST8_MAX	 UINT8_MAX
-#define	UINT_LEAST16_MAX UINT16_MAX
-#define	UINT_LEAST32_MAX UINT32_MAX
-#define	UINT_LEAST64_MAX UINT64_MAX
-
-/*
- * ISO/IEC 9899:1999
- * 7.18.2.3  Limits of fastest minimum-width integer types
- */
-/* Minimum values of fastest minimum-width signed integer types. */
-#define	INT_FAST8_MIN	INT32_MIN
-#define	INT_FAST16_MIN	INT32_MIN
-#define	INT_FAST32_MIN	INT32_MIN
-#define	INT_FAST64_MIN	INT64_MIN
-
-/* Maximum values of fastest minimum-width signed integer types. */
-#define	INT_FAST8_MAX	INT32_MAX
-#define	INT_FAST16_MAX	INT32_MAX
-#define	INT_FAST32_MAX	INT32_MAX
-#define	INT_FAST64_MAX	INT64_MAX
-
-/* Maximum values of fastest minimum-width unsigned integer types. */
-#define	UINT_FAST8_MAX	UINT32_MAX
-#define	UINT_FAST16_MAX	UINT32_MAX
-#define	UINT_FAST32_MAX	UINT32_MAX
-#define	UINT_FAST64_MAX	UINT64_MAX
-
-/*
- * ISO/IEC 9899:1999
- * 7.18.2.4  Limits of integer types capable of holding object pointers
- */
-#define	INTPTR_MIN	INT64_MIN
-#define	INTPTR_MAX	INT64_MAX
-#define	UINTPTR_MAX	UINT64_MAX
-
-/*
- * ISO/IEC 9899:1999
- * 7.18.2.5  Limits of greatest-width integer types
- */
-#define	INTMAX_MIN	INT64_MIN
-#define	INTMAX_MAX	INT64_MAX
-#define	UINTMAX_MAX	UINT64_MAX
-
-/*
- * ISO/IEC 9899:1999
- * 7.18.3  Limits of other integer types
- */
-/* Limits of ptrdiff_t. */
-#define	PTRDIFF_MIN	INT64_MIN	
-#define	PTRDIFF_MAX	INT64_MAX
-
-/* Limits of sig_atomic_t. */
-#define	SIG_ATOMIC_MIN	INT32_MIN
-#define	SIG_ATOMIC_MAX	INT32_MAX
-
-/* Limit of size_t. */
-#define	SIZE_MAX	UINT64_MAX
-
-#ifndef WCHAR_MIN /* Also possibly defined in <wchar.h> */
-/* Limits of wchar_t. */
-#define	WCHAR_MIN	INT32_MIN
-#define	WCHAR_MAX	INT32_MAX
-#endif
-
-/* Limits of wint_t. */
-#define	WINT_MIN	INT32_MIN
-#define	WINT_MAX	INT32_MAX
-
-#endif /* !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) */
-
-#endif /* !_MACHINE__STDINT_H_ */
diff --git a/include/stdlib/machine/_types.h b/include/stdlib/machine/_types.h
deleted file mode 100644
index 7e993c4..0000000
--- a/include/stdlib/machine/_types.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*-
- * Copyright (c) 2002 Mike Barcroft <mike@FreeBSD.org>
- * Copyright (c) 1990, 1993
- *	The Regents of the University of California.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 4. Neither the name of the University nor the names of its contributors
- *    may be used to endorse or promote products derived from this software
- *    without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- *	From: @(#)ansi.h	8.2 (Berkeley) 1/4/94
- *	From: @(#)types.h	8.3 (Berkeley) 1/5/94
- * $FreeBSD$
- */
-
-#ifndef _MACHINE__TYPES_H_
-#define	_MACHINE__TYPES_H_
-
-#ifndef _SYS_CDEFS_H_
-#error this file needs sys/cdefs.h as a prerequisite
-#endif
-
-/*
- * Basic types upon which most other types are built.
- */
-typedef	__signed char		__int8_t;
-typedef	unsigned char		__uint8_t;
-typedef	short			__int16_t;
-typedef	unsigned short		__uint16_t;
-typedef	int			__int32_t;
-typedef	unsigned int		__uint32_t;
-typedef	long			__int64_t;
-typedef	unsigned long		__uint64_t;
-
-/*
- * Standard type definitions.
- */
-typedef	__int32_t	__clock_t;		/* clock()... */
-typedef	__int64_t	__critical_t;
-typedef	double		__double_t;
-typedef	float		__float_t;
-typedef	__int64_t	__intfptr_t;
-typedef	__int64_t	__intmax_t;
-typedef	__int64_t	__intptr_t;
-typedef	__int32_t	__int_fast8_t;
-typedef	__int32_t	__int_fast16_t;
-typedef	__int32_t	__int_fast32_t;
-typedef	__int64_t	__int_fast64_t;
-typedef	__int8_t	__int_least8_t;
-typedef	__int16_t	__int_least16_t;
-typedef	__int32_t	__int_least32_t;
-typedef	__int64_t	__int_least64_t;
-typedef	__int64_t	__ptrdiff_t;		/* ptr1 - ptr2 */
-typedef	__int64_t	__register_t;
-typedef	__int64_t	__segsz_t;		/* segment size (in pages) */
-typedef	__uint64_t	__size_t;		/* sizeof() */
-typedef	__int64_t	__ssize_t;		/* byte count or error */
-typedef	__int64_t	__time_t;		/* time()... */
-typedef	__uint64_t	__uintfptr_t;
-typedef	__uint64_t	__uintmax_t;
-typedef	__uint64_t	__uintptr_t;
-typedef	__uint32_t	__uint_fast8_t;
-typedef	__uint32_t	__uint_fast16_t;
-typedef	__uint32_t	__uint_fast32_t;
-typedef	__uint64_t	__uint_fast64_t;
-typedef	__uint8_t	__uint_least8_t;
-typedef	__uint16_t	__uint_least16_t;
-typedef	__uint32_t	__uint_least32_t;
-typedef	__uint64_t	__uint_least64_t;
-typedef	__uint64_t	__u_register_t;
-typedef	__uint64_t	__vm_offset_t;
-typedef	__int64_t	__vm_ooffset_t;
-typedef	__uint64_t	__vm_paddr_t;
-typedef	__uint64_t	__vm_pindex_t;
-typedef	__uint64_t	__vm_size_t;
-
-/*
- * Unusual type definitions.
- */
-#ifdef __GNUCLIKE_BUILTIN_VARARGS
-typedef __builtin_va_list	__va_list;	/* internally known to gcc */
-#else
-typedef	char *			__va_list;
-#endif /* __GNUCLIKE_BUILTIN_VARARGS */
-#if defined(__GNUCLIKE_BUILTIN_VAALIST) && !defined(__GNUC_VA_LIST) \
-    && !defined(__NO_GNUC_VA_LIST)
-#define __GNUC_VA_LIST
-typedef __va_list		__gnuc_va_list;	/* compatibility w/GNU headers*/
-#endif
-
-#endif /* !_MACHINE__TYPES_H_ */
diff --git a/include/stdlib/stdio.h b/include/stdlib/stdio.h
deleted file mode 100644
index 60e081b..0000000
--- a/include/stdlib/stdio.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*-
- * Copyright (c) 1990, 1993
- *	The Regents of the University of California.  All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * Chris Torek.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its contributors
- *    may be used to endorse or promote products derived from this software
- *    without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- *	@(#)stdio.h	8.5 (Berkeley) 4/29/95
- * $FreeBSD$
- */
-
-/*
- * Portions copyright (c) 2013-2014, ARM Limited and Contributors.
- * All rights reserved.
- */
-
-
-#ifndef	_STDIO_H_
-#define	_STDIO_H_
-
-#include <sys/cdefs.h>
-#include <sys/_null.h>
-#include <sys/_types.h>
-
-#ifndef _SIZE_T_DECLARED
-typedef	__size_t	size_t;
-#define	_SIZE_T_DECLARED
-#endif
-
-#ifndef _SSIZE_T_DECLARED
-#define	_SSIZE_T_DECLARED
-typedef	__ssize_t	ssize_t;
-#endif
-
-#define	EOF	(-1)
-
-int	 printf(const char * __restrict, ...);
-int	 putchar(int);
-int	 puts(const char *);
-int	 sprintf(char * __restrict, const char * __restrict, ...);
-int	 vsprintf(char * __restrict, const char * __restrict,
-	   __va_list);
-
-int	 sscanf(const char *__restrict, char const *__restrict, ...);
-
-#if __ISO_C_VISIBLE >= 1999
-int	 snprintf(char * __restrict, size_t, const char * __restrict,
-	   ...) __printflike(3, 4);
-int	 vsnprintf(char * __restrict, size_t, const char * __restrict,
-	   __va_list) __printflike(3, 0);
-#endif
-
-#endif /* !_STDIO_H_ */
diff --git a/include/stdlib/string.h b/include/stdlib/string.h
deleted file mode 100644
index 61e8102..0000000
--- a/include/stdlib/string.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*-
- * Copyright (c) 1990, 1993
- *	The Regents of the University of California.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its contributors
- *    may be used to endorse or promote products derived from this software
- *    without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- *	@(#)string.h	8.1 (Berkeley) 6/2/93
- * $FreeBSD$
- */
-
-/*
- * Portions copyright (c) 2013-2014, ARM Limited and Contributors.
- * All rights reserved.
- */
-
-#ifndef _STRING_H_
-#define	_STRING_H_
-
-#include <sys/cdefs.h>
-#include <sys/_null.h>
-#include <sys/_types.h>
-
-#ifndef _SIZE_T_DECLARED
-typedef	__size_t	size_t;
-#define	_SIZE_T_DECLARED
-#endif
-
-__BEGIN_DECLS
-
-void	*memchr(const void *, int, size_t) __pure;
-int	 memcmp(const void *, const void *, size_t) __pure;
-void	*memcpy(void * __restrict, const void * __restrict, size_t);
-void	*memmove(void *, const void *, size_t);
-void	*memset(void *, int, size_t);
-
-char	*strchr(const char *, int) __pure;
-int	 strcmp(const char *, const char *) __pure;
-size_t	 strlen(const char *) __pure;
-int	 strncmp(const char *, const char *, size_t) __pure;
-int	 strcasecmp(const char *, const char *);
-
-__END_DECLS
-
-#endif /* _STRING_H_ */
diff --git a/include/stdlib/sys/cdefs.h b/include/stdlib/sys/cdefs.h
deleted file mode 100644
index 16fb151..0000000
--- a/include/stdlib/sys/cdefs.h
+++ /dev/null
@@ -1,686 +0,0 @@
-/*-
- * Copyright (c) 1991, 1993
- *	The Regents of the University of California.  All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * Berkeley Software Design, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 4. Neither the name of the University nor the names of its contributors
- *    may be used to endorse or promote products derived from this software
- *    without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- *	@(#)cdefs.h	8.8 (Berkeley) 1/9/95
- * $FreeBSD$
- */
-
-#ifndef	_SYS_CDEFS_H_
-#define	_SYS_CDEFS_H_
-
-#if defined(__cplusplus)
-#define	__BEGIN_DECLS	extern "C" {
-#define	__END_DECLS	}
-#else
-#define	__BEGIN_DECLS
-#define	__END_DECLS
-#endif
-
-/*
- * This code has been put in place to help reduce the addition of
- * compiler specific defines in FreeBSD code.  It helps to aid in
- * having a compiler-agnostic source tree.
- */
-
-#if defined(__GNUC__) || defined(__INTEL_COMPILER)
-
-#if __GNUC__ >= 3 || defined(__INTEL_COMPILER)
-#define __GNUCLIKE_ASM 3
-#define __GNUCLIKE_MATH_BUILTIN_CONSTANTS
-#else
-#define __GNUCLIKE_ASM 2
-#endif
-#define __GNUCLIKE___TYPEOF 1
-#define __GNUCLIKE___OFFSETOF 1
-#define __GNUCLIKE___SECTION 1
-
-#ifndef __INTEL_COMPILER
-# define __GNUCLIKE_CTOR_SECTION_HANDLING 1
-#endif
-
-#define __GNUCLIKE_BUILTIN_CONSTANT_P 1
-# if defined(__INTEL_COMPILER) && defined(__cplusplus) \
-    && __INTEL_COMPILER < 800
-#  undef __GNUCLIKE_BUILTIN_CONSTANT_P
-# endif
-
-#if (__GNUC_MINOR__ > 95 || __GNUC__ >= 3) && !defined(__INTEL_COMPILER)
-# define __GNUCLIKE_BUILTIN_VARARGS 1
-# define __GNUCLIKE_BUILTIN_STDARG 1
-# define __GNUCLIKE_BUILTIN_VAALIST 1
-#endif
-
-#if defined(__GNUC__)
-# define __GNUC_VA_LIST_COMPATIBILITY 1
-#endif
-
-#ifndef __INTEL_COMPILER
-# define __GNUCLIKE_BUILTIN_NEXT_ARG 1
-# define __GNUCLIKE_MATH_BUILTIN_RELOPS
-#endif
-
-#define __GNUCLIKE_BUILTIN_MEMCPY 1
-
-/* XXX: if __GNUC__ >= 2: not tested everywhere originally, where replaced */
-#define __CC_SUPPORTS_INLINE 1
-#define __CC_SUPPORTS___INLINE 1
-#define __CC_SUPPORTS___INLINE__ 1
-
-#define __CC_SUPPORTS___FUNC__ 1
-#define __CC_SUPPORTS_WARNING 1
-
-#define __CC_SUPPORTS_VARADIC_XXX 1 /* see varargs.h */
-
-#define __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 1
-
-#endif /* __GNUC__ || __INTEL_COMPILER */
-
-/*
- * Macro to test if we're using a specific version of gcc or later.
- */
-#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
-#define	__GNUC_PREREQ__(ma, mi)	\
-	(__GNUC__ > (ma) || __GNUC__ == (ma) && __GNUC_MINOR__ >= (mi))
-#else
-#define	__GNUC_PREREQ__(ma, mi)	0
-#endif
-
-/*
- * The __CONCAT macro is used to concatenate parts of symbol names, e.g.
- * with "#define OLD(foo) __CONCAT(old,foo)", OLD(foo) produces oldfoo.
- * The __CONCAT macro is a bit tricky to use if it must work in non-ANSI
- * mode -- there must be no spaces between its arguments, and for nested
- * __CONCAT's, all the __CONCAT's must be at the left.  __CONCAT can also
- * concatenate double-quoted strings produced by the __STRING macro, but
- * this only works with ANSI C.
- *
- * __XSTRING is like __STRING, but it expands any macros in its argument
- * first.  It is only available with ANSI C.
- */
-#if defined(__STDC__) || defined(__cplusplus)
-#define	__P(protos)	protos		/* full-blown ANSI C */
-#define	__CONCAT1(x,y)	x ## y
-#define	__CONCAT(x,y)	__CONCAT1(x,y)
-#define	__STRING(x)	#x		/* stringify without expanding x */
-#define	__XSTRING(x)	__STRING(x)	/* expand x, then stringify */
-
-#define	__const		const		/* define reserved names to standard */
-#define	__signed	signed
-#define	__volatile	volatile
-#if defined(__cplusplus)
-#define	__inline	inline		/* convert to C++ keyword */
-#else
-#if !(defined(__CC_SUPPORTS___INLINE))
-#define	__inline			/* delete GCC keyword */
-#endif /* ! __CC_SUPPORTS___INLINE */
-#endif /* !__cplusplus */
-
-#else	/* !(__STDC__ || __cplusplus) */
-#define	__P(protos)	()		/* traditional C preprocessor */
-#define	__CONCAT(x,y)	x/**/y
-#define	__STRING(x)	"x"
-
-#if !defined(__CC_SUPPORTS___INLINE)
-#define	__const				/* delete pseudo-ANSI C keywords */
-#define	__inline
-#define	__signed
-#define	__volatile
-/*
- * In non-ANSI C environments, new programs will want ANSI-only C keywords
- * deleted from the program and old programs will want them left alone.
- * When using a compiler other than gcc, programs using the ANSI C keywords
- * const, inline etc. as normal identifiers should define -DNO_ANSI_KEYWORDS.
- * When using "gcc -traditional", we assume that this is the intent; if
- * __GNUC__ is defined but __STDC__ is not, we leave the new keywords alone.
- */
-#ifndef	NO_ANSI_KEYWORDS
-#define	const				/* delete ANSI C keywords */
-#define	inline
-#define	signed
-#define	volatile
-#endif	/* !NO_ANSI_KEYWORDS */
-#endif	/* !__CC_SUPPORTS___INLINE */
-#endif	/* !(__STDC__ || __cplusplus) */
-
-/*
- * Compiler-dependent macros to help declare dead (non-returning) and
- * pure (no side effects) functions, and unused variables.  They are
- * null except for versions of gcc that are known to support the features
- * properly (old versions of gcc-2 supported the dead and pure features
- * in a different (wrong) way).  If we do not provide an implementation
- * for a given compiler, let the compile fail if it is told to use
- * a feature that we cannot live without.
- */
-#ifdef lint
-#define	__dead2
-#define	__pure2
-#define	__unused
-#define	__packed
-#define	__aligned(x)
-#define	__section(x)
-#else
-#if !__GNUC_PREREQ__(2, 5) && !defined(__INTEL_COMPILER)
-#define	__dead2
-#define	__pure2
-#define	__unused
-#endif
-#if __GNUC__ == 2 && __GNUC_MINOR__ >= 5 && __GNUC_MINOR__ < 7 && !defined(__INTEL_COMPILER)
-#define	__dead2		__attribute__((__noreturn__))
-#define	__pure2		__attribute__((__const__))
-#define	__unused
-/* XXX Find out what to do for __packed, __aligned and __section */
-#endif
-#if __GNUC_PREREQ__(2, 7)
-#define	__dead2		__attribute__((__noreturn__))
-#define	__pure2		__attribute__((__const__))
-#define	__unused	__attribute__((__unused__))
-#define	__used		__attribute__((__used__))
-#define	__packed	__attribute__((__packed__))
-#define	__aligned(x)	__attribute__((__aligned__(x)))
-#define	__section(x)	__attribute__((__section__(x)))
-#endif
-#if defined(__INTEL_COMPILER)
-#define __dead2		__attribute__((__noreturn__))
-#define __pure2		__attribute__((__const__))
-#define __unused	__attribute__((__unused__))
-#define __used		__attribute__((__used__))
-#define __packed	__attribute__((__packed__))
-#define __aligned(x)	__attribute__((__aligned__(x)))
-#define __section(x)	__attribute__((__section__(x)))
-#endif
-#endif
-
-#if !__GNUC_PREREQ__(2, 95)
-#define	__alignof(x)	__offsetof(struct { char __a; x __b; }, __b)
-#endif
-
-/*
- * Keywords added in C11.
- */
-#if defined(__cplusplus) && __cplusplus >= 201103L
-#define	_Alignas(e)		alignas(e)
-#define	_Alignof(e)		alignof(e)
-#define	_Noreturn		[[noreturn]]
-#define	_Static_assert(e, s)	static_assert(e, s)
-/* FIXME: change this to thread_local when clang in base supports it */
-#define	_Thread_local		__thread
-#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
-/* Do nothing.  They are language keywords. */
-#else
-/* Not supported.  Implement them using our versions. */
-#define	_Alignas(x)		__aligned(x)
-#define	_Alignof(x)		__alignof(x)
-#define	_Noreturn		__dead2
-#define	_Thread_local		__thread
-#ifdef __COUNTER__
-#define	_Static_assert(x, y)	__Static_assert(x, __COUNTER__)
-#define	__Static_assert(x, y)	___Static_assert(x, y)
-#define	___Static_assert(x, y)	typedef char __assert_ ## y[(x) ? 1 : -1]
-#else
-#define	_Static_assert(x, y)	struct __hack
-#endif
-#endif
-
-/*
- * Emulation of C11 _Generic().  Unlike the previously defined C11
- * keywords, it is not possible to implement this using exactly the same
- * syntax.  Therefore implement something similar under the name
- * __generic().  Unlike _Generic(), this macro can only distinguish
- * between a single type, so it requires nested invocations to
- * distinguish multiple cases.
- */
-
-#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
-#define	__generic(expr, t, yes, no)					\
-	_Generic(expr, t: yes, default: no)
-#elif __GNUC_PREREQ__(3, 1) && !defined(__cplusplus)
-#define	__generic(expr, t, yes, no)					\
-	__builtin_choose_expr(						\
-	    __builtin_types_compatible_p(__typeof(expr), t), yes, no)
-#endif
-
-#if __GNUC_PREREQ__(2, 96)
-#define	__malloc_like	__attribute__((__malloc__))
-#define	__pure		__attribute__((__pure__))
-#else
-#define	__malloc_like
-#define	__pure
-#endif
-
-#if __GNUC_PREREQ__(3, 1) || (defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 800)
-#define	__always_inline	__attribute__((__always_inline__))
-#else
-#define	__always_inline
-#endif
-
-#if __GNUC_PREREQ__(3, 1)
-#define	__noinline	__attribute__ ((__noinline__))
-#else
-#define	__noinline
-#endif
-
-#if __GNUC_PREREQ__(3, 3)
-#define __nonnull(x)	__attribute__((__nonnull__(x)))
-#else
-#define __nonnull(x)
-#endif
-
-#if __GNUC_PREREQ__(3, 4)
-#define	__fastcall	__attribute__((__fastcall__))
-#else
-#define	__fastcall
-#endif
-
-#if __GNUC_PREREQ__(4, 1)
-#define	__returns_twice	__attribute__((__returns_twice__))
-#else
-#define	__returns_twice
-#endif
-
-/* XXX: should use `#if __STDC_VERSION__ < 199901'. */
-#if !__GNUC_PREREQ__(2, 7) && !defined(__INTEL_COMPILER)
-#define	__func__	NULL
-#endif
-
-#if (defined(__INTEL_COMPILER) || (defined(__GNUC__) && __GNUC__ >= 2)) && !defined(__STRICT_ANSI__) || __STDC_VERSION__ >= 199901
-#define	__LONG_LONG_SUPPORTED
-#endif
-
-/* C++11 exposes a load of C99 stuff */
-#if defined(__cplusplus) && __cplusplus >= 201103L
-#define	__LONG_LONG_SUPPORTED
-#ifndef	__STDC_LIMIT_MACROS
-#define	__STDC_LIMIT_MACROS
-#endif
-#ifndef	__STDC_CONSTANT_MACROS
-#define	__STDC_CONSTANT_MACROS
-#endif
-#endif
-
-/*
- * GCC 2.95 provides `__restrict' as an extension to C90 to support the
- * C99-specific `restrict' type qualifier.  We happen to use `__restrict' as
- * a way to define the `restrict' type qualifier without disturbing older
- * software that is unaware of C99 keywords.
- */
-#if !(__GNUC__ == 2 && __GNUC_MINOR__ == 95)
-#if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901 || defined(lint)
-#define	__restrict
-#else
-#define	__restrict	restrict
-#endif
-#endif
-
-/*
- * GNU C version 2.96 adds explicit branch prediction so that
- * the CPU back-end can hint the processor and also so that
- * code blocks can be reordered such that the predicted path
- * sees a more linear flow, thus improving cache behavior, etc.
- *
- * The following two macros provide us with a way to utilize this
- * compiler feature.  Use __predict_true() if you expect the expression
- * to evaluate to true, and __predict_false() if you expect the
- * expression to evaluate to false.
- *
- * A few notes about usage:
- *
- *	* Generally, __predict_false() error condition checks (unless
- *	  you have some _strong_ reason to do otherwise, in which case
- *	  document it), and/or __predict_true() `no-error' condition
- *	  checks, assuming you want to optimize for the no-error case.
- *
- *	* Other than that, if you don't know the likelihood of a test
- *	  succeeding from empirical or other `hard' evidence, don't
- *	  make predictions.
- *
- *	* These are meant to be used in places that are run `a lot'.
- *	  It is wasteful to make predictions in code that is run
- *	  seldomly (e.g. at subsystem initialization time) as the
- *	  basic block reordering that this affects can often generate
- *	  larger code.
- */
-#if __GNUC_PREREQ__(2, 96)
-#define __predict_true(exp)     __builtin_expect((exp), 1)
-#define __predict_false(exp)    __builtin_expect((exp), 0)
-#else
-#define __predict_true(exp)     (exp)
-#define __predict_false(exp)    (exp)
-#endif
-
-#if __GNUC_PREREQ__(4, 2)
-#define	__hidden	__attribute__((__visibility__("hidden")))
-#define	__exported	__attribute__((__visibility__("default")))
-#else
-#define	__hidden
-#define	__exported
-#endif
-
-/*
- * We define this here since <stddef.h>, <sys/queue.h>, and <sys/types.h>
- * require it.
- */
-#if __GNUC_PREREQ__(4, 1)
-#define __offsetof(type, field)	 __builtin_offsetof(type, field)
-#else
-#ifndef __cplusplus
-#define	__offsetof(type, field) \
-	((__size_t)(__uintptr_t)((const volatile void *)&((type *)0)->field))
-#else
-#define __offsetof(type, field)					\
-  (__offsetof__ (reinterpret_cast <__size_t>			\
-                 (&reinterpret_cast <const volatile char &>	\
-                  (static_cast<type *> (0)->field))))
-#endif
-#endif
-#define	__rangeof(type, start, end) \
-	(__offsetof(type, end) - __offsetof(type, start))
-
-/*
- * Given the pointer x to the member m of the struct s, return
- * a pointer to the containing structure.  When using GCC, we first
- * assign pointer x to a local variable, to check that its type is
- * compatible with member m.
- */
-#if __GNUC_PREREQ__(3, 1)
-#define	__containerof(x, s, m) ({					\
-	const volatile __typeof(((s *)0)->m) *__x = (x);		\
-	__DEQUALIFY(s *, (const volatile char *)__x - __offsetof(s, m));\
-})
-#else
-#define	__containerof(x, s, m)						\
-	__DEQUALIFY(s *, (const volatile char *)(x) - __offsetof(s, m))
-#endif
-
-/*
- * Compiler-dependent macros to declare that functions take printf-like
- * or scanf-like arguments.  They are null except for versions of gcc
- * that are known to support the features properly (old versions of gcc-2
- * didn't permit keeping the keywords out of the application namespace).
- */
-#if !__GNUC_PREREQ__(2, 7) && !defined(__INTEL_COMPILER)
-#define	__printflike(fmtarg, firstvararg)
-#define	__scanflike(fmtarg, firstvararg)
-#define	__format_arg(fmtarg)
-#define	__strfmonlike(fmtarg, firstvararg)
-#define	__strftimelike(fmtarg, firstvararg)
-#else
-#define	__printflike(fmtarg, firstvararg) \
-	    __attribute__((__format__ (__printf__, fmtarg, firstvararg)))
-#define	__scanflike(fmtarg, firstvararg) \
-	    __attribute__((__format__ (__scanf__, fmtarg, firstvararg)))
-#define	__format_arg(fmtarg)	__attribute__((__format_arg__ (fmtarg)))
-#define	__strfmonlike(fmtarg, firstvararg) \
-	    __attribute__((__format__ (__strfmon__, fmtarg, firstvararg)))
-#define	__strftimelike(fmtarg, firstvararg) \
-	    __attribute__((__format__ (__strftime__, fmtarg, firstvararg)))
-#endif
-
-/* Compiler-dependent macros that rely on FreeBSD-specific extensions. */
-#if __FreeBSD_cc_version >= 300001 && defined(__GNUC__) && !defined(__INTEL_COMPILER)
-#define	__printf0like(fmtarg, firstvararg) \
-	    __attribute__((__format__ (__printf0__, fmtarg, firstvararg)))
-#else
-#define	__printf0like(fmtarg, firstvararg)
-#endif
-
-#if defined(__GNUC__) || defined(__INTEL_COMPILER)
-#ifndef __INTEL_COMPILER
-#define	__strong_reference(sym,aliassym)	\
-	extern __typeof (sym) aliassym __attribute__ ((__alias__ (#sym)))
-#endif
-#ifdef __STDC__
-#define	__weak_reference(sym,alias)	\
-	__asm__(".weak " #alias);	\
-	__asm__(".equ "  #alias ", " #sym)
-#define	__warn_references(sym,msg)	\
-	__asm__(".section .gnu.warning." #sym);	\
-	__asm__(".asciz \"" msg "\"");	\
-	__asm__(".previous")
-#define	__sym_compat(sym,impl,verid)	\
-	__asm__(".symver " #impl ", " #sym "@" #verid)
-#define	__sym_default(sym,impl,verid)	\
-	__asm__(".symver " #impl ", " #sym "@@" #verid)
-#else
-#define	__weak_reference(sym,alias)	\
-	__asm__(".weak alias");		\
-	__asm__(".equ alias, sym")
-#define	__warn_references(sym,msg)	\
-	__asm__(".section .gnu.warning.sym"); \
-	__asm__(".asciz \"msg\"");	\
-	__asm__(".previous")
-#define	__sym_compat(sym,impl,verid)	\
-	__asm__(".symver impl, sym@verid")
-#define	__sym_default(impl,sym,verid)	\
-	__asm__(".symver impl, sym@@verid")
-#endif	/* __STDC__ */
-#endif	/* __GNUC__ || __INTEL_COMPILER */
-
-#define	__GLOBL1(sym)	__asm__(".globl " #sym)
-#define	__GLOBL(sym)	__GLOBL1(sym)
-
-#if defined(__GNUC__) || defined(__INTEL_COMPILER)
-#define	__IDSTRING(name,string)	__asm__(".ident\t\"" string "\"")
-#else
-/*
- * The following definition might not work well if used in header files,
- * but it should be better than nothing.  If you want a "do nothing"
- * version, then it should generate some harmless declaration, such as:
- *    #define __IDSTRING(name,string)	struct __hack
- */
-#define	__IDSTRING(name,string)	static const char name[] __unused = string
-#endif
-
-/*
- * Embed the rcs id of a source file in the resulting library.  Note that in
- * more recent ELF binutils, we use .ident allowing the ID to be stripped.
- * Usage:
- *	__FBSDID("$FreeBSD$");
- */
-#ifndef	__FBSDID
-#if !defined(lint) && !defined(STRIP_FBSDID)
-#define	__FBSDID(s)	__IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
-#else
-#define	__FBSDID(s)	struct __hack
-#endif
-#endif
-
-#ifndef	__RCSID
-#ifndef	NO__RCSID
-#define	__RCSID(s)	__IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
-#else
-#define	__RCSID(s)	struct __hack
-#endif
-#endif
-
-#ifndef	__RCSID_SOURCE
-#ifndef	NO__RCSID_SOURCE
-#define	__RCSID_SOURCE(s)	__IDSTRING(__CONCAT(__rcsid_source_,__LINE__),s)
-#else
-#define	__RCSID_SOURCE(s)	struct __hack
-#endif
-#endif
-
-#ifndef	__SCCSID
-#ifndef	NO__SCCSID
-#define	__SCCSID(s)	__IDSTRING(__CONCAT(__sccsid_,__LINE__),s)
-#else
-#define	__SCCSID(s)	struct __hack
-#endif
-#endif
-
-#ifndef	__COPYRIGHT
-#ifndef	NO__COPYRIGHT
-#define	__COPYRIGHT(s)	__IDSTRING(__CONCAT(__copyright_,__LINE__),s)
-#else
-#define	__COPYRIGHT(s)	struct __hack
-#endif
-#endif
-
-#ifndef	__DECONST
-#define	__DECONST(type, var)	((type)(__uintptr_t)(const void *)(var))
-#endif
-
-#ifndef	__DEVOLATILE
-#define	__DEVOLATILE(type, var)	((type)(__uintptr_t)(volatile void *)(var))
-#endif
-
-#ifndef	__DEQUALIFY
-#define	__DEQUALIFY(type, var)	((type)(__uintptr_t)(const volatile void *)(var))
-#endif
-
-/*-
- * The following definitions are an extension of the behavior originally
- * implemented in <sys/_posix.h>, but with a different level of granularity.
- * POSIX.1 requires that the macros we test be defined before any standard
- * header file is included.
- *
- * Here's a quick run-down of the versions:
- *  defined(_POSIX_SOURCE)		1003.1-1988
- *  _POSIX_C_SOURCE == 1		1003.1-1990
- *  _POSIX_C_SOURCE == 2		1003.2-1992 C Language Binding Option
- *  _POSIX_C_SOURCE == 199309		1003.1b-1993
- *  _POSIX_C_SOURCE == 199506		1003.1c-1995, 1003.1i-1995,
- *					and the omnibus ISO/IEC 9945-1: 1996
- *  _POSIX_C_SOURCE == 200112		1003.1-2001
- *  _POSIX_C_SOURCE == 200809		1003.1-2008
- *
- * In addition, the X/Open Portability Guide, which is now the Single UNIX
- * Specification, defines a feature-test macro which indicates the version of
- * that specification, and which subsumes _POSIX_C_SOURCE.
- *
- * Our macros begin with two underscores to avoid namespace screwage.
- */
-
-/* Deal with IEEE Std. 1003.1-1990, in which _POSIX_C_SOURCE == 1. */
-#if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE == 1
-#undef _POSIX_C_SOURCE		/* Probably illegal, but beyond caring now. */
-#define	_POSIX_C_SOURCE		199009
-#endif
-
-/* Deal with IEEE Std. 1003.2-1992, in which _POSIX_C_SOURCE == 2. */
-#if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE == 2
-#undef _POSIX_C_SOURCE
-#define	_POSIX_C_SOURCE		199209
-#endif
-
-/* Deal with various X/Open Portability Guides and Single UNIX Spec. */
-#ifdef _XOPEN_SOURCE
-#if _XOPEN_SOURCE - 0 >= 700
-#define	__XSI_VISIBLE		700
-#undef _POSIX_C_SOURCE
-#define	_POSIX_C_SOURCE		200809
-#elif _XOPEN_SOURCE - 0 >= 600
-#define	__XSI_VISIBLE		600
-#undef _POSIX_C_SOURCE
-#define	_POSIX_C_SOURCE		200112
-#elif _XOPEN_SOURCE - 0 >= 500
-#define	__XSI_VISIBLE		500
-#undef _POSIX_C_SOURCE
-#define	_POSIX_C_SOURCE		199506
-#endif
-#endif
-
-/*
- * Deal with all versions of POSIX.  The ordering relative to the tests above is
- * important.
- */
-#if defined(_POSIX_SOURCE) && !defined(_POSIX_C_SOURCE)
-#define	_POSIX_C_SOURCE		198808
-#endif
-#ifdef _POSIX_C_SOURCE
-#if _POSIX_C_SOURCE >= 200809
-#define	__POSIX_VISIBLE		200809
-#define	__ISO_C_VISIBLE		1999
-#elif _POSIX_C_SOURCE >= 200112
-#define	__POSIX_VISIBLE		200112
-#define	__ISO_C_VISIBLE		1999
-#elif _POSIX_C_SOURCE >= 199506
-#define	__POSIX_VISIBLE		199506
-#define	__ISO_C_VISIBLE		1990
-#elif _POSIX_C_SOURCE >= 199309
-#define	__POSIX_VISIBLE		199309
-#define	__ISO_C_VISIBLE		1990
-#elif _POSIX_C_SOURCE >= 199209
-#define	__POSIX_VISIBLE		199209
-#define	__ISO_C_VISIBLE		1990
-#elif _POSIX_C_SOURCE >= 199009
-#define	__POSIX_VISIBLE		199009
-#define	__ISO_C_VISIBLE		1990
-#else
-#define	__POSIX_VISIBLE		198808
-#define	__ISO_C_VISIBLE		0
-#endif /* _POSIX_C_SOURCE */
-#else
-/*-
- * Deal with _ANSI_SOURCE:
- * If it is defined, and no other compilation environment is explicitly
- * requested, then define our internal feature-test macros to zero.  This
- * makes no difference to the preprocessor (undefined symbols in preprocessing
- * expressions are defined to have value zero), but makes it more convenient for
- * a test program to print out the values.
- *
- * If a program mistakenly defines _ANSI_SOURCE and some other macro such as
- * _POSIX_C_SOURCE, we will assume that it wants the broader compilation
- * environment (and in fact we will never get here).
- */
-#if defined(_ANSI_SOURCE)	/* Hide almost everything. */
-#define	__POSIX_VISIBLE		0
-#define	__XSI_VISIBLE		0
-#define	__BSD_VISIBLE		0
-#define	__ISO_C_VISIBLE		1990
-#elif defined(_C99_SOURCE)	/* Localism to specify strict C99 env. */
-#define	__POSIX_VISIBLE		0
-#define	__XSI_VISIBLE		0
-#define	__BSD_VISIBLE		0
-#define	__ISO_C_VISIBLE		1999
-#else				/* Default environment: show everything. */
-#define	__POSIX_VISIBLE		200809
-#define	__XSI_VISIBLE		700
-#define	__BSD_VISIBLE		1
-#define	__ISO_C_VISIBLE		1999
-#endif
-#endif
-
-#ifndef	__has_feature
-#define	__has_feature(x) 0
-#endif
-#ifndef	__has_include
-#define	__has_include(x) 0
-#endif
-#ifndef	__has_builtin
-#define	__has_builtin(x) 0
-#endif
-
-#if defined(__mips) || defined(__powerpc64__) || defined(__arm__)
-#define __NO_TLS 1
-#endif
-
-#endif /* !_SYS_CDEFS_H_ */
diff --git a/include/stdlib/sys/uuid.h b/include/stdlib/sys/uuid.h
deleted file mode 100644
index 5c4767b..0000000
--- a/include/stdlib/sys/uuid.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*-
- * Copyright (c) 2002 Marcel Moolenaar
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-/*
- * Portions copyright (c) 2014, ARM Limited and Contributors.
- * All rights reserved.
- */
-
-#ifndef _SYS_UUID_H_
-#define _SYS_UUID_H_
-
-#include <sys/cdefs.h>
-
-/* Length of a node address (an IEEE 802 address). */
-#define	_UUID_NODE_LEN		6
-
-/*
- * See also:
- *      http://www.opengroup.org/dce/info/draft-leach-uuids-guids-01.txt
- *      http://www.opengroup.org/onlinepubs/009629399/apdxa.htm
- *
- * A DCE 1.1 compatible source representation of UUIDs.
- */
-struct uuid {
-	uint32_t	time_low;
-	uint16_t	time_mid;
-	uint16_t	time_hi_and_version;
-	uint8_t		clock_seq_hi_and_reserved;
-	uint8_t		clock_seq_low;
-	uint8_t		node[_UUID_NODE_LEN];
-};
-
-/* XXX namespace pollution? */
-typedef struct uuid uuid_t;
-
-#endif /* _SYS_UUID_H_ */
diff --git a/include/tools_share/firmware_image_package.h b/include/tools_share/firmware_image_package.h
new file mode 100644
index 0000000..c39e6f0
--- /dev/null
+++ b/include/tools_share/firmware_image_package.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __FIRMWARE_IMAGE_PACKAGE_H__
+#define __FIRMWARE_IMAGE_PACKAGE_H__
+
+#include <stdint.h>
+#include <uuid.h>
+
+/* This is used as a signature to validate the blob header */
+#define TOC_HEADER_NAME	0xAA640001
+
+
+/* ToC Entry UUIDs */
+#define UUID_TRUSTED_UPDATE_FIRMWARE_SCP_BL2U \
+	{0x03279265, 0x742f, 0x44e6, 0x8d, 0xff, {0x57, 0x9a, 0xc1, 0xff, 0x06, 0x10} }
+#define UUID_TRUSTED_UPDATE_FIRMWARE_BL2U \
+	{0x37ebb360, 0xe5c1, 0x41ea, 0x9d, 0xf3, {0x19, 0xed, 0xa1, 0x1f, 0x68, 0x01} }
+#define UUID_TRUSTED_UPDATE_FIRMWARE_NS_BL2U \
+	{0x111d514f, 0xe52b, 0x494e, 0xb4, 0xc5, {0x83, 0xc2, 0xf7, 0x15, 0x84, 0x0a} }
+#define UUID_TRUSTED_FWU_CERT \
+	{0xb28a4071, 0xd618, 0x4c87, 0x8b, 0x2e, {0xc6, 0xdc, 0xcd, 0x50, 0xf0, 0x96} }
+#define UUID_TRUSTED_BOOT_FIRMWARE_BL2 \
+	{0x0becf95f, 0x224d, 0x4d3e, 0xa5, 0x44, {0xc3, 0x9d, 0x81, 0xc7, 0x3f, 0x0a} }
+#define UUID_SCP_FIRMWARE_SCP_BL2 \
+	{0x3dfd6697, 0xbe89, 0x49e8, 0xae, 0x5d, {0x78, 0xa1, 0x40, 0x60, 0x82, 0x13} }
+#define UUID_EL3_RUNTIME_FIRMWARE_BL31 \
+	{0x6d08d447, 0xfe4c, 0x4698, 0x9b, 0x95, {0x29, 0x50, 0xcb, 0xbd, 0x5a, 0x00} }
+#define UUID_SECURE_PAYLOAD_BL32 \
+	{0x89e1d005, 0xdc53, 0x4713, 0x8d, 0x2b, {0x50, 0x0a, 0x4b, 0x7a, 0x3e, 0x38} }
+#define UUID_SECURE_PAYLOAD_BL32_EXTRA1 \
+	{0x9bc2700b, 0x5a2a, 0x4078, 0x9f, 0x65, {0x0a, 0x56, 0x82, 0x73, 0x82, 0x88} }
+#define UUID_SECURE_PAYLOAD_BL32_EXTRA2 \
+	{0xb17ba88e, 0xa2cf, 0x4d3f, 0x85, 0xfd, {0xe7, 0xbb, 0xa5, 0x02, 0x20, 0xd9} }
+#define UUID_NON_TRUSTED_FIRMWARE_BL33 \
+	{0xa7eed0d6, 0xeafc, 0x4bd5, 0x97, 0x82, {0x99, 0x34, 0xf2, 0x34, 0xb6, 0xe4} }
+/* Key certificates */
+#define UUID_ROT_KEY_CERT \
+	{0x721d2d86, 0x60f8, 0x11e4, 0x92, 0x0b, {0x8b, 0xe7, 0x62, 0x16, 0x0f, 0x24} }
+#define UUID_TRUSTED_KEY_CERT \
+	{0x90e87e82, 0x60f8, 0x11e4, 0xa1, 0xb4, {0x77, 0x7a, 0x21, 0xb4, 0xf9, 0x4c} }
+#define UUID_NON_TRUSTED_WORLD_KEY_CERT \
+	{0x3d87671c, 0x635f, 0x11e4, 0x97, 0x8d, {0x27, 0xc0, 0xc7, 0x14, 0x8a, 0xbd} }
+#define UUID_SCP_FW_KEY_CERT \
+	{0xa1214202, 0x60f8, 0x11e4, 0x8d, 0x9b, {0xf3, 0x3c, 0x0e, 0x15, 0xa0, 0x14} }
+#define UUID_SOC_FW_KEY_CERT \
+	{0xccbeb88a, 0x60f9, 0x11e4, 0x9a, 0xd0, {0xeb, 0x48, 0x22, 0xd8, 0xdc, 0xf8} }
+#define UUID_TRUSTED_OS_FW_KEY_CERT \
+	{0x03d67794, 0x60fb, 0x11e4, 0x85, 0xdd, {0xb7, 0x10, 0x5b, 0x8c, 0xee, 0x04} }
+#define UUID_NON_TRUSTED_FW_KEY_CERT \
+	{0x2a83d58a, 0x60fb, 0x11e4, 0x8a, 0xaf, {0xdf, 0x30, 0xbb, 0xc4, 0x98, 0x59} }
+/* Content certificates */
+#define UUID_TRUSTED_BOOT_FW_CERT \
+	{0xea69e2d6, 0x635d, 0x11e4, 0x8d, 0x8c, {0x9f, 0xba, 0xbe, 0x99, 0x56, 0xa5} }
+#define UUID_SCP_FW_CONTENT_CERT \
+	{0x046fbe44, 0x635e, 0x11e4, 0xb2, 0x8b, {0x73, 0xd8, 0xea, 0xae, 0x96, 0x56} }
+#define UUID_SOC_FW_CONTENT_CERT \
+	{0x200cb2e2, 0x635e, 0x11e4, 0x9c, 0xe8, {0xab, 0xcc, 0xf9, 0x2b, 0xb6, 0x66} }
+#define UUID_TRUSTED_OS_FW_CONTENT_CERT \
+	{0x11449fa4, 0x635e, 0x11e4, 0x87, 0x28, {0x3f, 0x05, 0x72, 0x2a, 0xf3, 0x3d} }
+#define UUID_NON_TRUSTED_FW_CONTENT_CERT \
+	{0xf3c1c48e, 0x635d, 0x11e4, 0xa7, 0xa9, {0x87, 0xee, 0x40, 0xb2, 0x3f, 0xa7} }
+
+typedef struct fip_toc_header {
+	uint32_t	name;
+	uint32_t	serial_number;
+	uint64_t	flags;
+} fip_toc_header_t;
+
+typedef struct fip_toc_entry {
+	uuid_t		uuid;
+	uint64_t	offset_address;
+	uint64_t	size;
+	uint64_t	flags;
+} fip_toc_entry_t;
+
+#endif /* __FIRMWARE_IMAGE_PACKAGE_H__ */
diff --git a/include/tools_share/tbbr_oid.h b/include/tools_share/tbbr_oid.h
new file mode 100644
index 0000000..e57790c
--- /dev/null
+++ b/include/tools_share/tbbr_oid.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TBBR_OID_H__
+#define __TBBR_OID_H__
+
+/*
+ * The following is a list of OID values defined and reserved by ARM, which
+ * are used to define the extension fields of the certificate structure, as
+ * defined in the Trusted Board Boot Requirements (TBBR) specification,
+ * ARM DEN0006C-1.
+ */
+
+
+/* TrustedFirmwareNVCounter - Non-volatile counter extension */
+#define TRUSTED_FW_NVCOUNTER_OID		"1.3.6.1.4.1.4128.2100.1"
+/* NonTrustedFirmwareNVCounter - Non-volatile counter extension */
+#define NON_TRUSTED_FW_NVCOUNTER_OID		"1.3.6.1.4.1.4128.2100.2"
+
+
+/*
+ * Non-Trusted Firmware Updater Certificate
+ */
+
+/* APFirmwareUpdaterConfigHash - BL2U */
+#define AP_FWU_CFG_HASH_OID			"1.3.6.1.4.1.4128.2100.101"
+/* SCPFirmwareUpdaterConfigHash - SCP_BL2U */
+#define SCP_FWU_CFG_HASH_OID			"1.3.6.1.4.1.4128.2100.102"
+/* FirmwareUpdaterHash - NS_BL2U */
+#define FWU_HASH_OID				"1.3.6.1.4.1.4128.2100.103"
+/* TrustedWatchdogRefreshTime */
+#define TRUSTED_WATCHDOG_TIME_OID		"1.3.6.1.4.1.4128.2100.104"
+
+
+/*
+ * Trusted Boot Firmware Certificate
+ */
+
+/* TrustedBootFirmwareHash - BL2 */
+#define TRUSTED_BOOT_FW_HASH_OID		"1.3.6.1.4.1.4128.2100.201"
+
+
+/*
+ * Trusted Key Certificate
+ */
+
+/* PrimaryDebugCertificatePK */
+#define PRIMARY_DEBUG_PK_OID			"1.3.6.1.4.1.4128.2100.301"
+/* TrustedWorldPK */
+#define TRUSTED_WORLD_PK_OID			"1.3.6.1.4.1.4128.2100.302"
+/* NonTrustedWorldPK */
+#define NON_TRUSTED_WORLD_PK_OID		"1.3.6.1.4.1.4128.2100.303"
+
+
+/*
+ * Trusted Debug Certificate
+ */
+
+/* DebugScenario */
+#define TRUSTED_DEBUG_SCENARIO_OID		"1.3.6.1.4.1.4128.2100.401"
+/* SoC Specific */
+#define TRUSTED_DEBUG_SOC_SPEC_OID		"1.3.6.1.4.1.4128.2100.402"
+/* SecondaryDebugCertPK */
+#define SECONDARY_DEBUG_PK_OID			"1.3.6.1.4.1.4128.2100.403"
+
+
+/*
+ * SoC Firmware Key Certificate
+ */
+
+/* SoCFirmwareContentCertPK */
+#define SOC_FW_CONTENT_CERT_PK_OID		"1.3.6.1.4.1.4128.2100.501"
+
+
+/*
+ * SoC Firmware Content Certificate
+ */
+
+/* APRomPatchHash - BL1_PATCH */
+#define APROM_PATCH_HASH_OID			"1.3.6.1.4.1.4128.2100.601"
+/* SoCConfigHash */
+#define SOC_CONFIG_HASH_OID			"1.3.6.1.4.1.4128.2100.602"
+/* SoCAPFirmwareHash - BL31 */
+#define SOC_AP_FW_HASH_OID			"1.3.6.1.4.1.4128.2100.603"
+
+
+/*
+ * SCP Firmware Key Certificate
+ */
+
+/* SCPFirmwareContentCertPK */
+#define SCP_FW_CONTENT_CERT_PK_OID		"1.3.6.1.4.1.4128.2100.701"
+
+
+/*
+ * SCP Firmware Content Certificate
+ */
+
+/* SCPFirmwareHash - SCP_BL2 */
+#define SCP_FW_HASH_OID				"1.3.6.1.4.1.4128.2100.801"
+/* SCPRomPatchHash - SCP_BL1_PATCH */
+#define SCP_ROM_PATCH_HASH_OID			"1.3.6.1.4.1.4128.2100.802"
+
+
+/*
+ * Trusted OS Firmware Key Certificate
+ */
+
+/* TrustedOSFirmwareContentCertPK */
+#define TRUSTED_OS_FW_CONTENT_CERT_PK_OID	"1.3.6.1.4.1.4128.2100.901"
+
+
+/*
+ * Trusted OS Firmware Content Certificate
+ */
+
+/* TrustedOSFirmwareHash - BL32 */
+#define TRUSTED_OS_FW_HASH_OID			"1.3.6.1.4.1.4128.2100.1001"
+/* TrustedOSExtra1FirmwareHash - BL32 Extra1 */
+#define TRUSTED_OS_FW_EXTRA1_HASH_OID		"1.3.6.1.4.1.4128.2100.1002"
+/* TrustedOSExtra2FirmwareHash - BL32 Extra2 */
+#define TRUSTED_OS_FW_EXTRA2_HASH_OID		"1.3.6.1.4.1.4128.2100.1003"
+
+
+/*
+ * Non-Trusted Firmware Key Certificate
+ */
+
+/* NonTrustedFirmwareContentCertPK */
+#define NON_TRUSTED_FW_CONTENT_CERT_PK_OID	"1.3.6.1.4.1.4128.2100.1101"
+
+
+/*
+ * Non-Trusted Firmware Content Certificate
+ */
+
+/* NonTrustedWorldBootloaderHash - BL33 */
+#define NON_TRUSTED_WORLD_BOOTLOADER_HASH_OID	"1.3.6.1.4.1.4128.2100.1201"
+
+#endif /* __TBBR_OID_H__ */
diff --git a/include/tools_share/uuid.h b/include/tools_share/uuid.h
new file mode 100644
index 0000000..6d935bd
--- /dev/null
+++ b/include/tools_share/uuid.h
@@ -0,0 +1,62 @@
+/*-
+ * Copyright (c) 2002 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Portions copyright (c) 2014, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_UUID_H_
+#define _SYS_UUID_H_
+
+/* Length of a node address (an IEEE 802 address). */
+#define	_UUID_NODE_LEN		6
+
+/* Length of UUID string including dashes. */
+#define _UUID_STR_LEN		36
+
+/*
+ * See also:
+ *      http://www.opengroup.org/dce/info/draft-leach-uuids-guids-01.txt
+ *      http://www.opengroup.org/onlinepubs/009629399/apdxa.htm
+ *
+ * A DCE 1.1 compatible source representation of UUIDs.
+ */
+struct uuid {
+	uint32_t	time_low;
+	uint16_t	time_mid;
+	uint16_t	time_hi_and_version;
+	uint8_t		clock_seq_hi_and_reserved;
+	uint8_t		clock_seq_low;
+	uint8_t		node[_UUID_NODE_LEN];
+};
+
+/* XXX namespace pollution? */
+typedef struct uuid uuid_t;
+
+#endif /* _SYS_UUID_H_ */
diff --git a/lib/aarch32/cache_helpers.S b/lib/aarch32/cache_helpers.S
new file mode 100644
index 0000000..810af0f
--- /dev/null
+++ b/lib/aarch32/cache_helpers.S
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+	.globl	flush_dcache_range
+	.globl	clean_dcache_range
+	.globl	inv_dcache_range
+	.globl	dcsw_op_louis
+	.globl	dcsw_op_all
+	.globl	dcsw_op_level1
+	.globl	dcsw_op_level2
+	.globl	dcsw_op_level3
+
+/*
+ * This macro can be used for implementing various data cache operations `op`
+ */
+.macro do_dcache_maintenance_by_mva op, coproc, opc1, CRn, CRm, opc2
+	/* Exit early if size is zero */
+	cmp	r1, #0
+	beq	exit_loop_\op
+	dcache_line_size r2, r3
+	add	r1, r0, r1
+	sub	r3, r2, #1
+	bic	r0, r0, r3
+loop_\op:
+	stcopr	r0, \coproc, \opc1, \CRn, \CRm, \opc2
+	add	r0, r0, r2
+	cmp	r0, r1
+	blo	loop_\op
+	dsb	sy
+exit_loop_\op:
+	bx	lr
+.endm
+
+	/* ------------------------------------------
+	 * Clean+Invalidate from base address till
+	 * size. 'r0' = addr, 'r1' = size
+	 * ------------------------------------------
+	 */
+func flush_dcache_range
+	do_dcache_maintenance_by_mva cimvac, DCCIMVAC
+endfunc flush_dcache_range
+
+	/* ------------------------------------------
+	 * Clean from base address till size.
+	 * 'r0' = addr, 'r1' = size
+	 * ------------------------------------------
+	 */
+func clean_dcache_range
+	do_dcache_maintenance_by_mva cmvac, DCCMVAC
+endfunc clean_dcache_range
+
+	/* ------------------------------------------
+	 * Invalidate from base address till
+	 * size. 'r0' = addr, 'r1' = size
+	 * ------------------------------------------
+	 */
+func inv_dcache_range
+	do_dcache_maintenance_by_mva imvac, DCIMVAC
+endfunc inv_dcache_range
+
+	/* ----------------------------------------------------------------
+	 * Data cache operations by set/way to the level specified
+	 *
+	 * The main function, do_dcsw_op requires:
+	 * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+	 * as defined in arch.h
+	 * r1: The cache level to begin operation from
+	 * r2: clidr_el1
+	 * r3: The last cache level to operate on
+	 * and will carry out the operation on each data cache from level 0
+	 * to the level in r3 in sequence
+	 *
+	 * The dcsw_op macro sets up the r2 and r3 parameters based on
+	 * clidr_el1 cache information before invoking the main function
+	 * ----------------------------------------------------------------
+	 */
+
+	.macro	dcsw_op shift, fw, ls
+	ldcopr	r2, CLIDR
+	ubfx	r3, r2, \shift, \fw
+	lsl	r3, r3, \ls
+	mov	r1, #0
+	b	do_dcsw_op
+	.endm
+
+func do_dcsw_op
+	push	{r4-r12,lr}
+	adr	r11, dcsw_loop_table	// compute cache op based on the operation type
+	add	r6, r11, r0, lsl #3	// cache op is 2x32-bit instructions
+loop1:
+	add	r10, r1, r1, LSR #1	// Work out 3x current cache level
+	mov	r12, r2, LSR r10	// extract cache type bits from clidr
+	and	r12, r12, #7   		// mask the bits for current cache only
+	cmp	r12, #2			// see what cache we have at this level
+	blo	level_done      	// no cache or only instruction cache at this level
+
+	stcopr	r1, CSSELR		// select current cache level in csselr
+	isb				// isb to sych the new cssr&csidr
+	ldcopr	r12, CCSIDR		// read the new ccsidr
+	and	r10, r12, #7   		// extract the length of the cache lines
+	add	r10, r10, #4        	// add 4 (r10 = line length offset)
+	ubfx	r4, r12, #3, #10	// r4 = maximum way number (right aligned)
+	clz	r5, r4            	// r5 = the bit position of the way size increment
+	mov	r9, r4			// r9 working copy of the aligned max way number
+
+loop2:
+	ubfx	r7, r12, #13, #15	// r7 = max set number (right aligned)
+
+loop3:
+	orr	r0, r1, r9, LSL r5	// factor in the way number and cache level into r0
+	orr	r0, r0, r7, LSL r10	// factor in the set number
+
+	blx	r6
+	subs	r7, r7, #1              // decrement the set number
+	bhs	loop3
+	subs	r9, r9, #1              // decrement the way number
+	bhs	loop2
+level_done:
+	add	r1, r1, #2		// increment the cache number
+	cmp	r3, r1
+	dsb	sy			// ensure completion of previous cache maintenance instruction
+	bhi	loop1
+
+	mov	r6, #0
+	stcopr	r6, CSSELR		//select cache level 0 in csselr
+	dsb	sy
+	isb
+	pop	{r4-r12,pc}
+
+dcsw_loop_table:
+	stcopr	r0, DCISW
+	bx	lr
+	stcopr	r0, DCCISW
+	bx	lr
+	stcopr	r0, DCCSW
+	bx	lr
+
+endfunc do_dcsw_op
+
+	/* ---------------------------------------------------------------
+	 * Data cache operations by set/way till PoU.
+	 *
+	 * The function requires :
+	 * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+	 * as defined in arch.h
+	 * ---------------------------------------------------------------
+	 */
+func dcsw_op_louis
+	dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc	dcsw_op_louis
+
+	/* ---------------------------------------------------------------
+	 * Data cache operations by set/way till PoC.
+	 *
+	 * The function requires :
+	 * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+	 * as defined in arch.h
+	 * ---------------------------------------------------------------
+	 */
+func dcsw_op_all
+	dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc	dcsw_op_all
+
+
+	/* ---------------------------------------------------------------
+	 *  Helper macro for data cache operations by set/way for the
+	 *  level specified
+	 * ---------------------------------------------------------------
+	 */
+	.macro	dcsw_op_level level
+	ldcopr	r2, CLIDR
+	mov	r3, \level
+	sub	r1, r3, #2
+	b	do_dcsw_op
+	.endm
+
+	/* ---------------------------------------------------------------
+	 * Data cache operations by set/way for level 1 cache
+	 *
+	 * The main function, do_dcsw_op requires:
+	 * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+	 * as defined in arch.h
+	 * ---------------------------------------------------------------
+	 */
+func dcsw_op_level1
+	dcsw_op_level #(1 << LEVEL_SHIFT)
+endfunc dcsw_op_level1
+
+	/* ---------------------------------------------------------------
+	 * Data cache operations by set/way for level 2 cache
+	 *
+	 * The main function, do_dcsw_op requires:
+	 * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+	 * as defined in arch.h
+	 * ---------------------------------------------------------------
+	 */
+func dcsw_op_level2
+	dcsw_op_level #(2 << LEVEL_SHIFT)
+endfunc dcsw_op_level2
+
+	/* ---------------------------------------------------------------
+	 * Data cache operations by set/way for level 3 cache
+	 *
+	 * The main function, do_dcsw_op requires:
+	 * r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
+	 * as defined in arch.h
+	 * ---------------------------------------------------------------
+	 */
+func dcsw_op_level3
+	dcsw_op_level #(3 << LEVEL_SHIFT)
+endfunc dcsw_op_level3
diff --git a/lib/aarch32/misc_helpers.S b/lib/aarch32/misc_helpers.S
new file mode 100644
index 0000000..77cf6cd
--- /dev/null
+++ b/lib/aarch32/misc_helpers.S
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+
+	.globl	smc
+	.globl	zeromem
+	.globl	zero_normalmem
+	.globl	memcpy4
+	.globl	disable_mmu_icache_secure
+	.globl	disable_mmu_secure
+
+func smc
+	/*
+	 * For AArch32 only r0-r3 will be in the registers;
+	 * rest r4-r6 will be pushed on to the stack. So here, we'll
+	 * have to load them from the stack to registers r4-r6 explicitly.
+	 * Clobbers: r4-r6
+	 */
+	ldm	sp, {r4, r5, r6}
+	smc	#0
+endfunc smc
+
+/* -----------------------------------------------------------------------
+ * void zeromem(void *mem, unsigned int length)
+ *
+ * Initialise a region in normal memory to 0. This functions complies with the
+ * AAPCS and can be called from C code.
+ *
+ * -----------------------------------------------------------------------
+ */
+func zeromem
+	/*
+	 * Readable names for registers
+	 *
+	 * Registers r0, r1 and r2 are also set by zeromem which
+	 * branches into the fallback path directly, so cursor, length and
+	 * stop_address should not be retargeted to other registers.
+	 */
+	cursor       .req r0 /* Start address and then current address */
+	length       .req r1 /* Length in bytes of the region to zero out */
+	/*
+	 * Reusing the r1 register as length is only used at the beginning of
+	 * the function.
+	 */
+	stop_address .req r1  /* Address past the last zeroed byte */
+	zeroreg1     .req r2  /* Source register filled with 0 */
+	zeroreg2     .req r3  /* Source register filled with 0 */
+	tmp	     .req r12 /* Temporary scratch register */
+
+	mov	zeroreg1, #0
+
+	/* stop_address is the address past the last to zero */
+	add	stop_address, cursor, length
+
+	/*
+	 * Length cannot be used anymore as it shares the same register with
+	 * stop_address.
+	 */
+	.unreq	length
+
+	/*
+	 * If the start address is already aligned to 8 bytes, skip this loop.
+	 */
+	tst	cursor, #(8-1)
+	beq	.Lzeromem_8bytes_aligned
+
+	/* Calculate the next address aligned to 8 bytes */
+	orr	tmp, cursor, #(8-1)
+	adds	tmp, tmp, #1
+	/* If it overflows, fallback to byte per byte zeroing */
+	beq	.Lzeromem_1byte_aligned
+	/* If the next aligned address is after the stop address, fall back */
+	cmp	tmp, stop_address
+	bhs	.Lzeromem_1byte_aligned
+
+	/* zero byte per byte */
+1:
+	strb	zeroreg1, [cursor], #1
+	cmp	cursor, tmp
+	bne	1b
+
+	/* zero 8 bytes at a time */
+.Lzeromem_8bytes_aligned:
+
+	/* Calculate the last 8 bytes aligned address. */
+	bic	tmp, stop_address, #(8-1)
+
+	cmp	cursor, tmp
+	bhs	2f
+
+	mov	zeroreg2, #0
+1:
+	stmia	cursor!, {zeroreg1, zeroreg2}
+	cmp	cursor, tmp
+	blo	1b
+2:
+
+	/* zero byte per byte */
+.Lzeromem_1byte_aligned:
+	cmp	cursor, stop_address
+	beq	2f
+1:
+	strb	zeroreg1, [cursor], #1
+	cmp	cursor, stop_address
+	bne	1b
+2:
+	bx	lr
+
+	.unreq	cursor
+	/*
+	 * length is already unreq'ed to reuse the register for another
+	 * variable.
+	 */
+	.unreq	stop_address
+	.unreq	zeroreg1
+	.unreq	zeroreg2
+	.unreq	tmp
+endfunc zeromem
+
+/*
+ * AArch32 does not have special ways of zeroing normal memory as AArch64 does
+ * using the DC ZVA instruction, so we just alias zero_normalmem to zeromem.
+ */
+.equ	zero_normalmem, zeromem
+
+/* --------------------------------------------------------------------------
+ * void memcpy4(void *dest, const void *src, unsigned int length)
+ *
+ * Copy length bytes from memory area src to memory area dest.
+ * The memory areas should not overlap.
+ * Destination and source addresses must be 4-byte aligned.
+ * --------------------------------------------------------------------------
+ */
+func memcpy4
+#if ENABLE_ASSERTIONS
+	orr	r3, r0, r1
+	tst	r3, #0x3
+	ASM_ASSERT(eq)
+#endif
+/* copy 4 bytes at a time */
+m_loop4:
+	cmp	r2, #4
+	blo	m_loop1
+	ldr	r3, [r1], #4
+	str	r3, [r0], #4
+	sub	r2, r2, #4
+	b	m_loop4
+/* copy byte per byte */
+m_loop1:
+	cmp	r2,#0
+	beq	m_end
+	ldrb	r3, [r1], #1
+	strb	r3, [r0], #1
+	subs	r2, r2, #1
+	bne	m_loop1
+m_end:
+	bx	lr
+endfunc memcpy4
+
+/* ---------------------------------------------------------------------------
+ * Disable the MMU in Secure State
+ * ---------------------------------------------------------------------------
+ */
+
+func disable_mmu_secure
+	mov	r1, #(SCTLR_M_BIT | SCTLR_C_BIT)
+do_disable_mmu:
+	ldcopr	r0, SCTLR
+	bic	r0, r0, r1
+	stcopr	r0, SCTLR
+	isb				// ensure MMU is off
+	dsb	sy
+	bx	lr
+endfunc disable_mmu_secure
+
+
+func disable_mmu_icache_secure
+	ldr	r1, =(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
+	b	do_disable_mmu
+endfunc disable_mmu_icache_secure
diff --git a/lib/aarch64/cache_helpers.S b/lib/aarch64/cache_helpers.S
index dc60102..9c40b9d 100644
--- a/lib/aarch64/cache_helpers.S
+++ b/lib/aarch64/cache_helpers.S
@@ -1,37 +1,14 @@
 /*
  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
 #include <asm_macros.S>
 
 	.globl	flush_dcache_range
+	.globl	clean_dcache_range
 	.globl	inv_dcache_range
 	.globl	dcsw_op_louis
 	.globl	dcsw_op_all
@@ -39,24 +16,42 @@
 	.globl	dcsw_op_level2
 	.globl	dcsw_op_level3
 
+/*
+ * This macro can be used for implementing various data cache operations `op`
+ */
+.macro do_dcache_maintenance_by_mva op
+	/* Exit early if size is zero */
+	cbz	x1, exit_loop_\op
+	dcache_line_size x2, x3
+	add	x1, x0, x1
+	sub	x3, x2, #1
+	bic	x0, x0, x3
+loop_\op:
+	dc	\op, x0
+	add	x0, x0, x2
+	cmp	x0, x1
+	b.lo    loop_\op
+	dsb	sy
+exit_loop_\op:
+	ret
+.endm
 	/* ------------------------------------------
 	 * Clean+Invalidate from base address till
 	 * size. 'x0' = addr, 'x1' = size
 	 * ------------------------------------------
 	 */
 func flush_dcache_range
-	dcache_line_size x2, x3
-	add	x1, x0, x1
-	sub	x3, x2, #1
-	bic	x0, x0, x3
-flush_loop:
-	dc	civac, x0
-	add	x0, x0, x2
-	cmp	x0, x1
-	b.lo    flush_loop
-	dsb	sy
-	ret
+	do_dcache_maintenance_by_mva civac
+endfunc flush_dcache_range
 
+	/* ------------------------------------------
+	 * Clean from base address till size.
+	 * 'x0' = addr, 'x1' = size
+	 * ------------------------------------------
+	 */
+func clean_dcache_range
+	do_dcache_maintenance_by_mva cvac
+endfunc clean_dcache_range
 
 	/* ------------------------------------------
 	 * Invalidate from base address till
@@ -64,17 +59,8 @@
 	 * ------------------------------------------
 	 */
 func inv_dcache_range
-	dcache_line_size x2, x3
-	add	x1, x0, x1
-	sub	x3, x2, #1
-	bic	x0, x0, x3
-inv_loop:
-	dc	ivac, x0
-	add	x0, x0, x2
-	cmp	x0, x1
-	b.lo    inv_loop
-	dsb	sy
-	ret
+	do_dcache_maintenance_by_mva ivac
+endfunc inv_dcache_range
 
 
 	/* ---------------------------------------------------------------
@@ -112,7 +98,7 @@
 	lsr	x1, x0, x2		// extract cache type bits from clidr
 	and	x1, x1, #7		// mask the bits for current cache only
 	cmp	x1, #2			// see what cache we have at this level
-	b.lt	level_done		// nothing to do if no cache or icache
+	b.lo	level_done		// nothing to do if no cache or icache
 
 	msr	csselr_el1, x10		// select current cache level in csselr
 	isb				// isb to sych the new cssr&csidr
@@ -137,10 +123,10 @@
 	orr	w11, w9, w7		// combine cache, way and set number
 	dc	\_op, x11
 	subs	w7, w7, w17		// decrement set number
-	b.ge	loop3_\_op
+	b.hs	loop3_\_op
 
 	subs	x9, x9, x16		// decrement way number
-	b.ge	loop2_\_op
+	b.hs	loop2_\_op
 
 	b	level_done
 	.endm
@@ -148,12 +134,13 @@
 level_done:
 	add	x10, x10, #2		// increment cache number
 	cmp	x3, x10
-	b.gt    loop1
+	b.hi    loop1
 	msr	csselr_el1, xzr		// select cache level 0 in csselr
 	dsb	sy			// barrier to complete final cache operation
 	isb
 exit:
 	ret
+endfunc do_dcsw_op
 
 dcsw_loop_table:
 	dcsw_loop isw
@@ -163,10 +150,12 @@
 
 func dcsw_op_louis
 	dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc dcsw_op_louis
 
 
 func dcsw_op_all
 	dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc dcsw_op_all
 
 	/* ---------------------------------------------------------------
 	 *  Helper macro for data cache operations by set/way for the
@@ -189,6 +178,7 @@
 	 */
 func dcsw_op_level1
 	dcsw_op_level #(1 << LEVEL_SHIFT)
+endfunc dcsw_op_level1
 
 	/* ---------------------------------------------------------------
 	 * Data cache operations by set/way for level 2 cache
@@ -199,6 +189,7 @@
 	 */
 func dcsw_op_level2
 	dcsw_op_level #(2 << LEVEL_SHIFT)
+endfunc dcsw_op_level2
 
 	/* ---------------------------------------------------------------
 	 * Data cache operations by set/way for level 3 cache
@@ -209,3 +200,4 @@
 	 */
 func dcsw_op_level3
 	dcsw_op_level #(3 << LEVEL_SHIFT)
+endfunc dcsw_op_level3
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
index f605bf4..9dfe46a 100644
--- a/lib/aarch64/misc_helpers.S
+++ b/lib/aarch64/misc_helpers.S
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
@@ -37,10 +13,14 @@
 	.globl	eret
 	.globl	smc
 
+	.globl	zero_normalmem
+	.globl	zeromem
 	.globl	zeromem16
 	.globl	memcpy16
 
+	.globl	disable_mmu_el1
 	.globl	disable_mmu_el3
+	.globl	disable_mmu_icache_el1
 	.globl	disable_mmu_icache_el3
 
 #if SUPPORT_VFP
@@ -53,6 +33,7 @@
 	mov	x1, #MPIDR_AFFLVL_SHIFT
 	lsl	x0, x0, x1
 	ret
+endfunc get_afflvl_shift
 
 func mpidr_mask_lower_afflvls
 	cmp	x1, #3
@@ -62,43 +43,375 @@
 	lsr	x0, x0, x2
 	lsl	x0, x0, x2
 	ret
+endfunc mpidr_mask_lower_afflvls
 
 
 func eret
 	eret
+endfunc eret
 
 
 func smc
 	smc	#0
+endfunc smc
 
 /* -----------------------------------------------------------------------
  * void zeromem16(void *mem, unsigned int length);
  *
  * Initialise a memory region to 0.
  * The memory address must be 16-byte aligned.
+ * NOTE: This function is deprecated and zeromem should be used instead.
  * -----------------------------------------------------------------------
  */
-func zeromem16
-#if ASM_ASSERTION
-	tst	x0, #0xf
-	ASM_ASSERT(eq)
-#endif
-	add	x2, x0, x1
-/* zero 16 bytes at a time */
-z_loop16:
-	sub	x3, x2, x0
-	cmp	x3, #16
-	b.lt	z_loop1
-	stp	xzr, xzr, [x0], #16
-	b	z_loop16
-/* zero byte per byte */
-z_loop1:
-	cmp	x0, x2
-	b.eq	z_end
-	strb	wzr, [x0], #1
-	b	z_loop1
-z_end:	ret
+.equ	zeromem16, zeromem
 
+/* -----------------------------------------------------------------------
+ * void zero_normalmem(void *mem, unsigned int length);
+ *
+ * Initialise a region in normal memory to 0. This functions complies with the
+ * AAPCS and can be called from C code.
+ *
+ * NOTE: MMU must be enabled when using this function as it can only operate on
+ *       normal memory. It is intended to be mainly used from C code when MMU
+ *       is usually enabled.
+ * -----------------------------------------------------------------------
+ */
+.equ	zero_normalmem, zeromem_dczva
+
+/* -----------------------------------------------------------------------
+ * void zeromem(void *mem, unsigned int length);
+ *
+ * Initialise a region of device memory to 0. This functions complies with the
+ * AAPCS and can be called from C code.
+ *
+ * NOTE: When data caches and MMU are enabled, zero_normalmem can usually be
+ *       used instead for faster zeroing.
+ *
+ * -----------------------------------------------------------------------
+ */
+func zeromem
+	/* x2 is the address past the last zeroed address */
+	add	x2, x0, x1
+	/*
+	 * Uses the fallback path that does not use DC ZVA instruction and
+	 * therefore does not need enabled MMU
+	 */
+	b	.Lzeromem_dczva_fallback_entry
+endfunc zeromem
+
+/* -----------------------------------------------------------------------
+ * void zeromem_dczva(void *mem, unsigned int length);
+ *
+ * Fill a region of normal memory of size "length" in bytes with null bytes.
+ * MMU must be enabled and the memory be of
+ * normal type. This is because this function internally uses the DC ZVA
+ * instruction, which generates an Alignment fault if used on any type of
+ * Device memory (see section D3.4.9 of the ARMv8 ARM, issue k). When the MMU
+ * is disabled, all memory behaves like Device-nGnRnE memory (see section
+ * D4.2.8), hence the requirement on the MMU being enabled.
+ * NOTE: The code assumes that the block size as defined in DCZID_EL0
+ *       register is at least 16 bytes.
+ *
+ * -----------------------------------------------------------------------
+ */
+func zeromem_dczva
+
+	/*
+	 * The function consists of a series of loops that zero memory one byte
+	 * at a time, 16 bytes at a time or using the DC ZVA instruction to
+	 * zero aligned block of bytes, which is assumed to be more than 16.
+	 * In the case where the DC ZVA instruction cannot be used or if the
+	 * first 16 bytes loop would overflow, there is fallback path that does
+	 * not use DC ZVA.
+	 * Note: The fallback path is also used by the zeromem function that
+	 *       branches to it directly.
+	 *
+	 *              +---------+   zeromem_dczva
+	 *              |  entry  |
+	 *              +----+----+
+	 *                   |
+	 *                   v
+	 *              +---------+
+	 *              | checks  |>o-------+ (If any check fails, fallback)
+	 *              +----+----+         |
+	 *                   |              |---------------+
+	 *                   v              | Fallback path |
+	 *            +------+------+       |---------------+
+	 *            | 1 byte loop |       |
+	 *            +------+------+ .Lzeromem_dczva_initial_1byte_aligned_end
+	 *                   |              |
+	 *                   v              |
+	 *           +-------+-------+      |
+	 *           | 16 bytes loop |      |
+	 *           +-------+-------+      |
+	 *                   |              |
+	 *                   v              |
+	 *            +------+------+ .Lzeromem_dczva_blocksize_aligned
+	 *            | DC ZVA loop |       |
+	 *            +------+------+       |
+	 *       +--------+  |              |
+	 *       |        |  |              |
+	 *       |        v  v              |
+	 *       |   +-------+-------+ .Lzeromem_dczva_final_16bytes_aligned
+	 *       |   | 16 bytes loop |      |
+	 *       |   +-------+-------+      |
+	 *       |           |              |
+	 *       |           v              |
+	 *       |    +------+------+ .Lzeromem_dczva_final_1byte_aligned
+	 *       |    | 1 byte loop |       |
+	 *       |    +-------------+       |
+	 *       |           |              |
+	 *       |           v              |
+	 *       |       +---+--+           |
+	 *       |       | exit |           |
+	 *       |       +------+           |
+	 *       |			    |
+	 *       |           +--------------+    +------------------+ zeromem
+	 *       |           |  +----------------| zeromem function |
+	 *       |           |  |                +------------------+
+	 *       |           v  v
+	 *       |    +-------------+ .Lzeromem_dczva_fallback_entry
+	 *       |    | 1 byte loop |
+	 *       |    +------+------+
+	 *       |           |
+	 *       +-----------+
+	 */
+
+	/*
+	 * Readable names for registers
+	 *
+	 * Registers x0, x1 and x2 are also set by zeromem which
+	 * branches into the fallback path directly, so cursor, length and
+	 * stop_address should not be retargeted to other registers.
+	 */
+	cursor       .req x0 /* Start address and then current address */
+	length       .req x1 /* Length in bytes of the region to zero out */
+	/* Reusing x1 as length is never used after block_mask is set */
+	block_mask   .req x1 /* Bitmask of the block size read in DCZID_EL0 */
+	stop_address .req x2 /* Address past the last zeroed byte */
+	block_size   .req x3 /* Size of a block in bytes as read in DCZID_EL0 */
+	tmp1         .req x4
+	tmp2         .req x5
+
+#if ENABLE_ASSERTIONS
+	/*
+	 * Check for M bit (MMU enabled) of the current SCTLR_EL(1|3)
+	 * register value and panic if the MMU is disabled.
+	 */
+#if defined(IMAGE_BL1) || defined(IMAGE_BL31)
+	mrs	tmp1, sctlr_el3
+#else
+	mrs	tmp1, sctlr_el1
+#endif
+
+	tst	tmp1, #SCTLR_M_BIT
+	ASM_ASSERT(ne)
+#endif /* ENABLE_ASSERTIONS */
+
+	/* stop_address is the address past the last to zero */
+	add	stop_address, cursor, length
+
+	/*
+	 * Get block_size = (log2(<block size>) >> 2) (see encoding of
+	 * dczid_el0 reg)
+	 */
+	mrs	block_size, dczid_el0
+
+	/*
+	 * Select the 4 lowest bits and convert the extracted log2(<block size
+	 * in words>) to <block size in bytes>
+	 */
+	ubfx	block_size, block_size, #0, #4
+	mov	tmp2, #(1 << 2)
+	lsl	block_size, tmp2, block_size
+
+#if ENABLE_ASSERTIONS
+	/*
+	 * Assumes block size is at least 16 bytes to avoid manual realignment
+	 * of the cursor at the end of the DCZVA loop.
+	 */
+	cmp	block_size, #16
+	ASM_ASSERT(hs)
+#endif
+	/*
+	 * Not worth doing all the setup for a region less than a block and
+	 * protects against zeroing a whole block when the area to zero is
+	 * smaller than that. Also, as it is assumed that the block size is at
+	 * least 16 bytes, this also protects the initial aligning loops from
+	 * trying to zero 16 bytes when length is less than 16.
+	 */
+	cmp	length, block_size
+	b.lo	.Lzeromem_dczva_fallback_entry
+
+	/*
+	 * Calculate the bitmask of the block alignment. It will never
+	 * underflow as the block size is between 4 bytes and 2kB.
+	 * block_mask = block_size - 1
+	 */
+	sub	block_mask, block_size, #1
+
+	/*
+	 * length alias should not be used after this point unless it is
+	 * defined as a register other than block_mask's.
+	 */
+	 .unreq length
+
+	/*
+	 * If the start address is already aligned to zero block size, go
+	 * straight to the cache zeroing loop. This is safe because at this
+	 * point, the length cannot be smaller than a block size.
+	 */
+	tst	cursor, block_mask
+	b.eq	.Lzeromem_dczva_blocksize_aligned
+
+	/*
+	 * Calculate the first block-size-aligned address. It is assumed that
+	 * the zero block size is at least 16 bytes. This address is the last
+	 * address of this initial loop.
+	 */
+	orr	tmp1, cursor, block_mask
+	add	tmp1, tmp1, #1
+
+	/*
+	 * If the addition overflows, skip the cache zeroing loops. This is
+	 * quite unlikely however.
+	 */
+	cbz	tmp1, .Lzeromem_dczva_fallback_entry
+
+	/*
+	 * If the first block-size-aligned address is past the last address,
+	 * fallback to the simpler code.
+	 */
+	cmp	tmp1, stop_address
+	b.hi	.Lzeromem_dczva_fallback_entry
+
+	/*
+	 * If the start address is already aligned to 16 bytes, skip this loop.
+	 * It is safe to do this because tmp1 (the stop address of the initial
+	 * 16 bytes loop) will never be greater than the final stop address.
+	 */
+	tst	cursor, #0xf
+	b.eq	.Lzeromem_dczva_initial_1byte_aligned_end
+
+	/* Calculate the next address aligned to 16 bytes */
+	orr	tmp2, cursor, #0xf
+	add	tmp2, tmp2, #1
+	/* If it overflows, fallback to the simple path (unlikely) */
+	cbz	tmp2, .Lzeromem_dczva_fallback_entry
+	/*
+	 * Next aligned address cannot be after the stop address because the
+	 * length cannot be smaller than 16 at this point.
+	 */
+
+	/* First loop: zero byte per byte */
+1:
+	strb	wzr, [cursor], #1
+	cmp	cursor, tmp2
+	b.ne	1b
+.Lzeromem_dczva_initial_1byte_aligned_end:
+
+	/*
+	 * Second loop: we need to zero 16 bytes at a time from cursor to tmp1
+	 * before being able to use the code that deals with block-size-aligned
+	 * addresses.
+	 */
+	cmp	cursor, tmp1
+	b.hs	2f
+1:
+	stp	xzr, xzr, [cursor], #16
+	cmp	cursor, tmp1
+	b.lo	1b
+2:
+
+	/*
+	 * Third loop: zero a block at a time using DC ZVA cache block zeroing
+	 * instruction.
+	 */
+.Lzeromem_dczva_blocksize_aligned:
+	/*
+	 * Calculate the last block-size-aligned address. If the result equals
+	 * to the start address, the loop will exit immediately.
+	 */
+	bic	tmp1, stop_address, block_mask
+
+	cmp	cursor, tmp1
+	b.hs	2f
+1:
+	/* Zero the block containing the cursor */
+	dc	zva, cursor
+	/* Increment the cursor by the size of a block */
+	add	cursor, cursor, block_size
+	cmp	cursor, tmp1
+	b.lo	1b
+2:
+
+	/*
+	 * Fourth loop: zero 16 bytes at a time and then byte per byte the
+	 * remaining area
+	 */
+.Lzeromem_dczva_final_16bytes_aligned:
+	/*
+	 * Calculate the last 16 bytes aligned address. It is assumed that the
+	 * block size will never be smaller than 16 bytes so that the current
+	 * cursor is aligned to at least 16 bytes boundary.
+	 */
+	bic	tmp1, stop_address, #15
+
+	cmp	cursor, tmp1
+	b.hs	2f
+1:
+	stp	xzr, xzr, [cursor], #16
+	cmp	cursor, tmp1
+	b.lo	1b
+2:
+
+	/* Fifth and final loop: zero byte per byte */
+.Lzeromem_dczva_final_1byte_aligned:
+	cmp	cursor, stop_address
+	b.eq	2f
+1:
+	strb	wzr, [cursor], #1
+	cmp	cursor, stop_address
+	b.ne	1b
+2:
+	ret
+
+	/* Fallback for unaligned start addresses */
+.Lzeromem_dczva_fallback_entry:
+	/*
+	 * If the start address is already aligned to 16 bytes, skip this loop.
+	 */
+	tst	cursor, #0xf
+	b.eq	.Lzeromem_dczva_final_16bytes_aligned
+
+	/* Calculate the next address aligned to 16 bytes */
+	orr	tmp1, cursor, #15
+	add	tmp1, tmp1, #1
+	/* If it overflows, fallback to byte per byte zeroing */
+	cbz	tmp1, .Lzeromem_dczva_final_1byte_aligned
+	/* If the next aligned address is after the stop address, fall back */
+	cmp	tmp1, stop_address
+	b.hs	.Lzeromem_dczva_final_1byte_aligned
+
+	/* Fallback entry loop: zero byte per byte */
+1:
+	strb	wzr, [cursor], #1
+	cmp	cursor, tmp1
+	b.ne	1b
+
+	b	.Lzeromem_dczva_final_16bytes_aligned
+
+	.unreq	cursor
+	/*
+	 * length is already unreq'ed to reuse the register for another
+	 * variable.
+	 */
+	.unreq	stop_address
+	.unreq	block_size
+	.unreq	block_mask
+	.unreq	tmp1
+	.unreq	tmp2
+endfunc zeromem_dczva
 
 /* --------------------------------------------------------------------------
  * void memcpy16(void *dest, const void *src, unsigned int length)
@@ -109,7 +422,7 @@
  * --------------------------------------------------------------------------
  */
 func memcpy16
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
 	orr	x3, x0, x1
 	tst	x3, #0xf
 	ASM_ASSERT(eq)
@@ -117,7 +430,7 @@
 /* copy 16 bytes at a time */
 m_loop16:
 	cmp	x2, #16
-	b.lt	m_loop1
+	b.lo	m_loop1
 	ldp	x3, x4, [x1], #16
 	stp	x3, x4, [x0], #16
 	sub	x2, x2, #16
@@ -129,30 +442,53 @@
 	strb	w3, [x0], #1
 	subs	x2, x2, #1
 	b.ne	m_loop1
-m_end:	ret
+m_end:
+	ret
+endfunc memcpy16
 
 /* ---------------------------------------------------------------------------
  * Disable the MMU at EL3
- * This is implemented in assembler to ensure that the data cache is cleaned
- * and invalidated after the MMU is disabled without any intervening cacheable
- * data accesses
  * ---------------------------------------------------------------------------
  */
 
 func disable_mmu_el3
 	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
-do_disable_mmu:
+do_disable_mmu_el3:
 	mrs	x0, sctlr_el3
 	bic	x0, x0, x1
 	msr	sctlr_el3, x0
-	isb				// ensure MMU is off
-	mov	x0, #DCCISW		// DCache clean and invalidate
-	b	dcsw_op_all
+	isb	/* ensure MMU is off */
+	dsb	sy
+	ret
+endfunc disable_mmu_el3
 
 
 func disable_mmu_icache_el3
 	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
-	b	do_disable_mmu
+	b	do_disable_mmu_el3
+endfunc disable_mmu_icache_el3
+
+/* ---------------------------------------------------------------------------
+ * Disable the MMU at EL1
+ * ---------------------------------------------------------------------------
+ */
+
+func disable_mmu_el1
+	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
+do_disable_mmu_el1:
+	mrs	x0, sctlr_el1
+	bic	x0, x0, x1
+	msr	sctlr_el1, x0
+	isb	/* ensure MMU is off */
+	dsb	sy
+	ret
+endfunc disable_mmu_el1
+
+
+func disable_mmu_icache_el1
+	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
+	b	do_disable_mmu_el1
+endfunc disable_mmu_icache_el1
 
 /* ---------------------------------------------------------------------------
  * Enable the use of VFP at EL3
@@ -169,4 +505,5 @@
 	msr	cptr_el3, x0
 	isb
 	ret
+endfunc enable_vfp
 #endif
diff --git a/lib/aarch64/xlat_helpers.c b/lib/aarch64/xlat_helpers.c
deleted file mode 100644
index d401ffc..0000000
--- a/lib/aarch64/xlat_helpers.c
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <assert.h>
-
-/*******************************************************************************
- * Helper to create a level 1/2 table descriptor which points to a level 2/3
- * table.
- ******************************************************************************/
-unsigned long create_table_desc(unsigned long *next_table_ptr)
-{
-	unsigned long desc = (unsigned long) next_table_ptr;
-
-	/* Clear the last 12 bits */
-	desc >>= FOUR_KB_SHIFT;
-	desc <<= FOUR_KB_SHIFT;
-
-	desc |= TABLE_DESC;
-
-	return desc;
-}
-
-/*******************************************************************************
- * Helper to create a level 1/2/3 block descriptor which maps the va to addr
- ******************************************************************************/
-unsigned long create_block_desc(unsigned long desc,
-				unsigned long addr,
-				unsigned int level)
-{
-	switch (level) {
-	case LEVEL1:
-		desc |= (addr << FIRST_LEVEL_DESC_N) | BLOCK_DESC;
-		break;
-	case LEVEL2:
-		desc |= (addr << SECOND_LEVEL_DESC_N) | BLOCK_DESC;
-		break;
-	case LEVEL3:
-		desc |= (addr << THIRD_LEVEL_DESC_N) | TABLE_DESC;
-		break;
-	default:
-		assert(0);
-	}
-
-	return desc;
-}
-
-/*******************************************************************************
- * Helper to create a level 1/2/3 block descriptor which maps the va to output_
- * addr with Device nGnRE attributes.
- ******************************************************************************/
-unsigned long create_device_block(unsigned long output_addr,
-				  unsigned int level,
-				  unsigned int ns)
-{
-	unsigned long upper_attrs, lower_attrs, desc;
-
-	lower_attrs = LOWER_ATTRS(ACCESS_FLAG | OSH | AP_RW);
-	lower_attrs |= LOWER_ATTRS(ns | ATTR_DEVICE_INDEX);
-	upper_attrs = UPPER_ATTRS(XN);
-	desc = upper_attrs | lower_attrs;
-
-	return create_block_desc(desc, output_addr, level);
-}
-
-/*******************************************************************************
- * Helper to create a level 1/2/3 block descriptor which maps the va to output_
- * addr with inner-shareable normal wbwa read-only memory attributes.
- ******************************************************************************/
-unsigned long create_romem_block(unsigned long output_addr,
-				 unsigned int level,
-				 unsigned int ns)
-{
-	unsigned long upper_attrs, lower_attrs, desc;
-
-	lower_attrs = LOWER_ATTRS(ACCESS_FLAG | ISH | AP_RO);
-	lower_attrs |= LOWER_ATTRS(ns | ATTR_IWBWA_OWBWA_NTR_INDEX);
-	upper_attrs = UPPER_ATTRS(0ull);
-	desc = upper_attrs | lower_attrs;
-
-	return create_block_desc(desc, output_addr, level);
-}
-
-/*******************************************************************************
- * Helper to create a level 1/2/3 block descriptor which maps the va to output_
- * addr with inner-shareable normal wbwa read-write memory attributes.
- ******************************************************************************/
-unsigned long create_rwmem_block(unsigned long output_addr,
-				 unsigned int level,
-				 unsigned int ns)
-{
-	unsigned long upper_attrs, lower_attrs, desc;
-
-	lower_attrs = LOWER_ATTRS(ACCESS_FLAG | ISH | AP_RW);
-	lower_attrs |= LOWER_ATTRS(ns | ATTR_IWBWA_OWBWA_NTR_INDEX);
-	upper_attrs = UPPER_ATTRS(XN);
-	desc = upper_attrs | lower_attrs;
-
-	return create_block_desc(desc, output_addr, level);
-}
diff --git a/lib/aarch64/xlat_tables.c b/lib/aarch64/xlat_tables.c
index ddc9ba8..04cbf3c 100644
--- a/lib/aarch64/xlat_tables.c
+++ b/lib/aarch64/xlat_tables.c
@@ -1,351 +1,14 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
-#include <arch.h>
-#include <arch_helpers.h>
-#include <assert.h>
-#include <cassert.h>
-#include <platform_def.h>
-#include <string.h>
-#include <xlat_tables.h>
-
-
-#ifndef DEBUG_XLAT_TABLE
-#define DEBUG_XLAT_TABLE 0
-#endif
-
-#if DEBUG_XLAT_TABLE
-#define debug_print(...) printf(__VA_ARGS__)
-#else
-#define debug_print(...) ((void)0)
-#endif
-
-CASSERT(ADDR_SPACE_SIZE > 0, assert_valid_addr_space_size);
-
-#define UNSET_DESC	~0ul
-
-#define NUM_L1_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
-
-static uint64_t l1_xlation_table[NUM_L1_ENTRIES]
-__aligned(NUM_L1_ENTRIES * sizeof(uint64_t));
-
-static uint64_t xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES]
-__aligned(XLAT_TABLE_SIZE) __attribute__((section("xlat_table")));
-
-static unsigned next_xlat;
-static unsigned long max_pa;
-static unsigned long max_va;
-static unsigned long tcr_ps_bits;
-
 /*
- * Array of all memory regions stored in order of ascending base address.
- * The list is terminated by the first entry with size == 0.
+ * This file is deprecated and is retained here only for compatibility.
+ * The xlat_tables library can be found in `lib/xlat_tables` directory.
  */
-static mmap_region_t mmap[MAX_MMAP_REGIONS + 1];
-
-
-static void print_mmap(void)
-{
-#if DEBUG_XLAT_TABLE
-	debug_print("mmap:\n");
-	mmap_region_t *mm = mmap;
-	while (mm->size) {
-		debug_print(" %010lx %010lx %10lx %x\n", mm->base_va,
-					mm->base_pa, mm->size, mm->attr);
-		++mm;
-	};
-	debug_print("\n");
+#if !ERROR_DEPRECATED
+#include "../xlat_tables/xlat_tables_common.c"
+#include "../xlat_tables/aarch64/xlat_tables.c"
 #endif
-}
-
-void mmap_add_region(unsigned long base_pa, unsigned long base_va,
-			unsigned long size, unsigned attr)
-{
-	mmap_region_t *mm = mmap;
-	mmap_region_t *mm_last = mm + sizeof(mmap) / sizeof(mmap[0]) - 1;
-	unsigned long pa_end = base_pa + size - 1;
-	unsigned long va_end = base_va + size - 1;
-
-	assert(IS_PAGE_ALIGNED(base_pa));
-	assert(IS_PAGE_ALIGNED(base_va));
-	assert(IS_PAGE_ALIGNED(size));
-
-	if (!size)
-		return;
-
-	/* Find correct place in mmap to insert new region */
-	while (mm->base_va < base_va && mm->size)
-		++mm;
-
-	/* Make room for new region by moving other regions up by one place */
-	memmove(mm + 1, mm, (uintptr_t)mm_last - (uintptr_t)mm);
-
-	/* Check we haven't lost the empty sentinal from the end of the array */
-	assert(mm_last->size == 0);
-
-	mm->base_pa = base_pa;
-	mm->base_va = base_va;
-	mm->size = size;
-	mm->attr = attr;
-
-	if (pa_end > max_pa)
-		max_pa = pa_end;
-	if (va_end > max_va)
-		max_va = va_end;
-}
-
-void mmap_add(const mmap_region_t *mm)
-{
-	while (mm->size) {
-		mmap_add_region(mm->base_pa, mm->base_va, mm->size, mm->attr);
-		++mm;
-	}
-}
-
-static unsigned long mmap_desc(unsigned attr, unsigned long addr_pa,
-					unsigned level)
-{
-	unsigned long desc = addr_pa;
-
-	desc |= level == 3 ? TABLE_DESC : BLOCK_DESC;
-
-	desc |= attr & MT_NS ? LOWER_ATTRS(NS) : 0;
-
-	desc |= attr & MT_RW ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
-
-	desc |= LOWER_ATTRS(ACCESS_FLAG);
-
-	if (attr & MT_MEMORY) {
-		desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
-		if (attr & MT_RW)
-			desc |= UPPER_ATTRS(XN);
-	} else {
-		desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
-		desc |= UPPER_ATTRS(XN);
-	}
-
-	debug_print(attr & MT_MEMORY ? "MEM" : "DEV");
-	debug_print(attr & MT_RW ? "-RW" : "-RO");
-	debug_print(attr & MT_NS ? "-NS" : "-S");
-
-	return desc;
-}
-
-static int mmap_region_attr(mmap_region_t *mm, unsigned long base_va,
-					unsigned long size)
-{
-	int attr = mm->attr;
-
-	for (;;) {
-		++mm;
-
-		if (!mm->size)
-			return attr; /* Reached end of list */
-
-		if (mm->base_va >= base_va + size)
-			return attr; /* Next region is after area so end */
-
-		if (mm->base_va + mm->size <= base_va)
-			continue; /* Next region has already been overtaken */
-
-		if ((mm->attr & attr) == attr)
-			continue; /* Region doesn't override attribs so skip */
-
-		attr &= mm->attr;
-
-		if (mm->base_va > base_va ||
-			mm->base_va + mm->size < base_va + size)
-			return -1; /* Region doesn't fully cover our area */
-	}
-}
-
-static mmap_region_t *init_xlation_table(mmap_region_t *mm,
-					unsigned long base_va,
-					unsigned long *table, unsigned level)
-{
-	unsigned level_size_shift = L1_XLAT_ADDRESS_SHIFT - (level - 1) *
-						XLAT_TABLE_ENTRIES_SHIFT;
-	unsigned level_size = 1 << level_size_shift;
-	unsigned long level_index_mask = XLAT_TABLE_ENTRIES_MASK << level_size_shift;
-
-	assert(level <= 3);
-
-	debug_print("New xlat table:\n");
-
-	do  {
-		unsigned long desc = UNSET_DESC;
-
-		if (mm->base_va + mm->size <= base_va) {
-			/* Area now after the region so skip it */
-			++mm;
-			continue;
-		}
-
-		debug_print("      %010lx %8lx " + 6 - 2 * level, base_va,
-				level_size);
-
-		if (mm->base_va >= base_va + level_size) {
-			/* Next region is after area so nothing to map yet */
-			desc = INVALID_DESC;
-		} else if (mm->base_va <= base_va && mm->base_va + mm->size >=
-				base_va + level_size) {
-			/* Next region covers all of area */
-			int attr = mmap_region_attr(mm, base_va, level_size);
-			if (attr >= 0)
-				desc = mmap_desc(attr,
-					base_va - mm->base_va + mm->base_pa,
-					level);
-		}
-		/* else Next region only partially covers area, so need */
-
-		if (desc == UNSET_DESC) {
-			/* Area not covered by a region so need finer table */
-			unsigned long *new_table = xlat_tables[next_xlat++];
-			assert(next_xlat <= MAX_XLAT_TABLES);
-			desc = TABLE_DESC | (unsigned long)new_table;
-
-			/* Recurse to fill in new table */
-			mm = init_xlation_table(mm, base_va,
-						new_table, level+1);
-		}
-
-		debug_print("\n");
-
-		*table++ = desc;
-		base_va += level_size;
-	} while (mm->size && (base_va & level_index_mask));
-
-	return mm;
-}
-
-static unsigned int calc_physical_addr_size_bits(unsigned long max_addr)
-{
-	/* Physical address can't exceed 48 bits */
-	assert((max_addr & ADDR_MASK_48_TO_63) == 0);
-
-	/* 48 bits address */
-	if (max_addr & ADDR_MASK_44_TO_47)
-		return TCR_PS_BITS_256TB;
-
-	/* 44 bits address */
-	if (max_addr & ADDR_MASK_42_TO_43)
-		return TCR_PS_BITS_16TB;
-
-	/* 42 bits address */
-	if (max_addr & ADDR_MASK_40_TO_41)
-		return TCR_PS_BITS_4TB;
-
-	/* 40 bits address */
-	if (max_addr & ADDR_MASK_36_TO_39)
-		return TCR_PS_BITS_1TB;
-
-	/* 36 bits address */
-	if (max_addr & ADDR_MASK_32_TO_35)
-		return TCR_PS_BITS_64GB;
-
-	return TCR_PS_BITS_4GB;
-}
-
-void init_xlat_tables(void)
-{
-	print_mmap();
-	init_xlation_table(mmap, 0, l1_xlation_table, 1);
-	tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
-	assert(max_va < ADDR_SPACE_SIZE);
-}
-
-/*******************************************************************************
- * Macro generating the code for the function enabling the MMU in the given
- * exception level, assuming that the pagetables have already been created.
- *
- *   _el:		Exception level at which the function will run
- *   _tcr_extra:	Extra bits to set in the TCR register. This mask will
- *			be OR'ed with the default TCR value.
- *   _tlbi_fct:		Function to invalidate the TLBs at the current
- *			exception level
- ******************************************************************************/
-#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct)		\
-	void enable_mmu_el##_el(uint32_t flags)				\
-	{								\
-		uint64_t mair, tcr, ttbr;				\
-		uint32_t sctlr;						\
-									\
-		assert(IS_IN_EL(_el));					\
-		assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0);	\
-									\
-		/* Set attributes in the right indices of the MAIR */	\
-		mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);	\
-		mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,		\
-				ATTR_IWBWA_OWBWA_NTR_INDEX);		\
-		write_mair_el##_el(mair);				\
-									\
-		/* Invalidate TLBs at the current exception level */	\
-		_tlbi_fct();						\
-									\
-		/* Set TCR bits as well. */				\
-		/* Inner & outer WBWA & shareable + T0SZ = 32 */	\
-		tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA |	\
-			TCR_RGN_INNER_WBA |				\
-			(64 - __builtin_ctzl(ADDR_SPACE_SIZE));		\
-		tcr |= _tcr_extra;					\
-		write_tcr_el##_el(tcr);					\
-									\
-		/* Set TTBR bits as well */				\
-		ttbr = (uint64_t) l1_xlation_table;			\
-		write_ttbr0_el##_el(ttbr);				\
-									\
-		/* Ensure all translation table writes have drained */	\
-		/* into memory, the TLB invalidation is complete, */	\
-		/* and translation register writes are committed */	\
-		/* before enabling the MMU */				\
-		dsb();							\
-		isb();							\
-									\
-		sctlr = read_sctlr_el##_el();				\
-		sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;			\
-									\
-		if (flags & DISABLE_DCACHE)				\
-			sctlr &= ~SCTLR_C_BIT;				\
-		else							\
-			sctlr |= SCTLR_C_BIT;				\
-									\
-		write_sctlr_el##_el(sctlr);				\
-									\
-		/* Ensure the MMU enable takes effect immediately */	\
-		isb();							\
-	}
-
-/* Define EL1 and EL3 variants of the function enabling the MMU */
-DEFINE_ENABLE_MMU_EL(1,
-		(tcr_ps_bits << TCR_EL1_IPS_SHIFT),
-		tlbivmalle1)
-DEFINE_ENABLE_MMU_EL(3,
-		TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
-		tlbialle3)
diff --git a/lib/compiler-rt/LICENSE.TXT b/lib/compiler-rt/LICENSE.TXT
new file mode 100644
index 0000000..a17dc12
--- /dev/null
+++ b/lib/compiler-rt/LICENSE.TXT
@@ -0,0 +1,91 @@
+==============================================================================
+compiler_rt License
+==============================================================================
+
+The compiler_rt library is dual licensed under both the University of Illinois
+"BSD-Like" license and the MIT license.  As a user of this code you may choose
+to use it under either license.  As a contributor, you agree to allow your code
+to be used under both.
+
+Full text of the relevant licenses is included below.
+
+==============================================================================
+
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2009-2016 by the contributors listed in CREDITS.TXT
+
+All rights reserved.
+
+Developed by:
+
+    LLVM Team
+
+    University of Illinois at Urbana-Champaign
+
+    http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+    * Redistributions of source code must retain the above copyright notice,
+      this list of conditions and the following disclaimers.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimers in the
+      documentation and/or other materials provided with the distribution.
+
+    * Neither the names of the LLVM Team, University of Illinois at
+      Urbana-Champaign, nor the names of its contributors may be used to
+      endorse or promote products derived from this Software without specific
+      prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+==============================================================================
+
+Copyright (c) 2009-2015 by the contributors listed in CREDITS.TXT
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+==============================================================================
+Copyrights and Licenses for Third Party Software Distributed with LLVM:
+==============================================================================
+The LLVM software contains code written by third parties.  Such software will
+have its own individual LICENSE.TXT file in the directory in which it appears.
+This file will describe the copyrights, license, and restrictions which apply
+to that code.
+
+The disclaimer of warranty in the University of Illinois Open Source License
+applies to all code in the LLVM Distribution, and nothing in any of the
+other licenses gives permission to use the names of the LLVM Team or the
+University of Illinois to endorse or promote products derived from this
+Software.
+
diff --git a/lib/compiler-rt/builtins/arm/aeabi_uldivmod.S b/lib/compiler-rt/builtins/arm/aeabi_uldivmod.S
new file mode 100644
index 0000000..be343b6
--- /dev/null
+++ b/lib/compiler-rt/builtins/arm/aeabi_uldivmod.S
@@ -0,0 +1,46 @@
+//===-- aeabi_uldivmod.S - EABI uldivmod implementation -------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// struct { uint64_t quot, uint64_t rem}
+//        __aeabi_uldivmod(uint64_t numerator, uint64_t denominator) {
+//   uint64_t rem, quot;
+//   quot = __udivmoddi4(numerator, denominator, &rem);
+//   return {quot, rem};
+// }
+
+#if defined(__MINGW32__)
+#define __aeabi_uldivmod __rt_udiv64
+#endif
+
+        .syntax unified
+        .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_uldivmod)
+        push	{r6, lr}
+        sub	sp, sp, #16
+        add	r6, sp, #8
+        str	r6, [sp]
+#if defined(__MINGW32__)
+        movs    r6, r0
+        movs    r0, r2
+        movs    r2, r6
+        movs    r6, r1
+        movs    r1, r3
+        movs    r3, r6
+#endif
+        bl	SYMBOL_NAME(__udivmoddi4)
+        ldr	r2, [sp, #8]
+        ldr	r3, [sp, #12]
+        add	sp, sp, #16
+        pop	{r6, pc}
+END_COMPILERRT_FUNCTION(__aeabi_uldivmod)
+
+NO_EXEC_STACK_DIRECTIVE
+
diff --git a/lib/compiler-rt/builtins/assembly.h b/lib/compiler-rt/builtins/assembly.h
new file mode 100644
index 0000000..29d9f88
--- /dev/null
+++ b/lib/compiler-rt/builtins/assembly.h
@@ -0,0 +1,169 @@
+/* ===-- assembly.h - compiler-rt assembler support macros -----------------===
+ *
+ *                     The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file defines macros for use in compiler-rt assembler source.
+ * This file is not part of the interface of this library.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#ifndef COMPILERRT_ASSEMBLY_H
+#define COMPILERRT_ASSEMBLY_H
+
+#if defined(__POWERPC__) || defined(__powerpc__) || defined(__ppc__)
+#define SEPARATOR @
+#else
+#define SEPARATOR ;
+#endif
+
+#if defined(__APPLE__)
+#define HIDDEN(name) .private_extern name
+#define LOCAL_LABEL(name) L_##name
+// tell linker it can break up file at label boundaries
+#define FILE_LEVEL_DIRECTIVE .subsections_via_symbols
+#define SYMBOL_IS_FUNC(name)
+#define CONST_SECTION .const
+
+#define NO_EXEC_STACK_DIRECTIVE
+
+#elif defined(__ELF__)
+
+#define HIDDEN(name) .hidden name
+#define LOCAL_LABEL(name) .L_##name
+#define FILE_LEVEL_DIRECTIVE
+#if defined(__arm__)
+#define SYMBOL_IS_FUNC(name) .type name,%function
+#else
+#define SYMBOL_IS_FUNC(name) .type name,@function
+#endif
+#define CONST_SECTION .section .rodata
+
+#if defined(__GNU__) || defined(__ANDROID__) || defined(__FreeBSD__)
+#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits
+#else
+#define NO_EXEC_STACK_DIRECTIVE
+#endif
+
+#else // !__APPLE__ && !__ELF__
+
+#define HIDDEN(name)
+#define LOCAL_LABEL(name) .L ## name
+#define FILE_LEVEL_DIRECTIVE
+#define SYMBOL_IS_FUNC(name)                                                   \
+  .def name SEPARATOR                                                          \
+    .scl 2 SEPARATOR                                                           \
+    .type 32 SEPARATOR                                                         \
+  .endef
+#define CONST_SECTION .section .rdata,"rd"
+
+#define NO_EXEC_STACK_DIRECTIVE
+
+#endif
+
+#if defined(__arm__)
+#if defined(__ARM_ARCH_4T__) || __ARM_ARCH >= 5
+#define ARM_HAS_BX
+#endif
+#if !defined(__ARM_FEATURE_CLZ) && __ARM_ARCH_ISA_THUMB != 1 &&                \
+    (__ARM_ARCH >= 6 || (__ARM_ARCH == 5 && !defined(__ARM_ARCH_5__)))
+#define __ARM_FEATURE_CLZ
+#endif
+
+#ifdef ARM_HAS_BX
+#define JMP(r) bx r
+#define JMPc(r, c) bx##c r
+#else
+#define JMP(r) mov pc, r
+#define JMPc(r, c) mov##c pc, r
+#endif
+
+// pop {pc} can't switch Thumb mode on ARMv4T
+#if __ARM_ARCH >= 5
+#define POP_PC() pop {pc}
+#else
+#define POP_PC()                                                               \
+  pop {ip};                                                                    \
+  JMP(ip)
+#endif
+
+#if __ARM_ARCH_ISA_THUMB == 2
+#define IT(cond)  it cond
+#define ITT(cond) itt cond
+#else
+#define IT(cond)
+#define ITT(cond)
+#endif
+
+#if __ARM_ARCH_ISA_THUMB == 2
+#define WIDE(op) op.w
+#else
+#define WIDE(op) op
+#endif
+#endif
+
+#define GLUE2(a, b) a##b
+#define GLUE(a, b) GLUE2(a, b)
+#define SYMBOL_NAME(name) GLUE(__USER_LABEL_PREFIX__, name)
+
+#ifdef VISIBILITY_HIDDEN
+#define DECLARE_SYMBOL_VISIBILITY(name)                                        \
+  HIDDEN(SYMBOL_NAME(name)) SEPARATOR
+#else
+#define DECLARE_SYMBOL_VISIBILITY(name)
+#endif
+
+#define DEFINE_COMPILERRT_FUNCTION(name)                                       \
+  FILE_LEVEL_DIRECTIVE SEPARATOR                                               \
+  .globl SYMBOL_NAME(name) SEPARATOR                                           \
+  SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR                                  \
+  DECLARE_SYMBOL_VISIBILITY(name)                                              \
+  SYMBOL_NAME(name):
+
+#define DEFINE_COMPILERRT_THUMB_FUNCTION(name)                                 \
+  FILE_LEVEL_DIRECTIVE SEPARATOR                                               \
+  .globl SYMBOL_NAME(name) SEPARATOR                                           \
+  SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR                                  \
+  DECLARE_SYMBOL_VISIBILITY(name) SEPARATOR                                    \
+  .thumb_func SEPARATOR                                                        \
+  SYMBOL_NAME(name):
+
+#define DEFINE_COMPILERRT_PRIVATE_FUNCTION(name)                               \
+  FILE_LEVEL_DIRECTIVE SEPARATOR                                               \
+  .globl SYMBOL_NAME(name) SEPARATOR                                           \
+  SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR                                  \
+  HIDDEN(SYMBOL_NAME(name)) SEPARATOR                                          \
+  SYMBOL_NAME(name):
+
+#define DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(name)                     \
+  .globl name SEPARATOR                                                        \
+  SYMBOL_IS_FUNC(name) SEPARATOR                                               \
+  HIDDEN(name) SEPARATOR                                                       \
+  name:
+
+#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target)                         \
+  .globl SYMBOL_NAME(name) SEPARATOR                                           \
+  SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR                                  \
+  DECLARE_SYMBOL_VISIBILITY(SYMBOL_NAME(name)) SEPARATOR                       \
+  .set SYMBOL_NAME(name), SYMBOL_NAME(target) SEPARATOR
+
+#if defined(__ARM_EABI__)
+#define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name)                          \
+  DEFINE_COMPILERRT_FUNCTION_ALIAS(aeabi_name, name)
+#else
+#define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name)
+#endif
+
+#ifdef __ELF__
+#define END_COMPILERRT_FUNCTION(name)                                          \
+  .size SYMBOL_NAME(name), . - SYMBOL_NAME(name)
+#else
+#define END_COMPILERRT_FUNCTION(name)
+#endif
+
+#endif /* COMPILERRT_ASSEMBLY_H */
diff --git a/lib/compiler-rt/builtins/ctzdi2.c b/lib/compiler-rt/builtins/ctzdi2.c
new file mode 100644
index 0000000..db3c6fd
--- /dev/null
+++ b/lib/compiler-rt/builtins/ctzdi2.c
@@ -0,0 +1,29 @@
+/* ===-- ctzdi2.c - Implement __ctzdi2 -------------------------------------===
+ *
+ *                     The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file implements __ctzdi2 for the compiler_rt library.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#include "int_lib.h"
+
+/* Returns: the number of trailing 0-bits  */
+
+/* Precondition: a != 0 */
+
+COMPILER_RT_ABI si_int
+__ctzdi2(di_int a)
+{
+    dwords x;
+    x.all = a;
+    const si_int f = -(x.s.low == 0);
+    return __builtin_ctz((x.s.high & f) | (x.s.low & ~f)) +
+              (f & ((si_int)(sizeof(si_int) * CHAR_BIT)));
+}
diff --git a/lib/compiler-rt/builtins/int_endianness.h b/lib/compiler-rt/builtins/int_endianness.h
new file mode 100644
index 0000000..7995ddb
--- /dev/null
+++ b/lib/compiler-rt/builtins/int_endianness.h
@@ -0,0 +1,116 @@
+/* ===-- int_endianness.h - configuration header for compiler-rt ------------===
+ *
+ *		       The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file is a configuration header for compiler-rt.
+ * This file is not part of the interface of this library.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#ifndef INT_ENDIANNESS_H
+#define INT_ENDIANNESS_H
+
+#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
+    defined(__ORDER_LITTLE_ENDIAN__)
+
+/* Clang and GCC provide built-in endianness definitions. */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN    1
+#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN    0
+#endif /* __BYTE_ORDER__ */
+
+#else /* Compilers other than Clang or GCC. */
+
+#if defined(__SVR4) && defined(__sun)
+#include <sys/byteorder.h>
+
+#if defined(_BIG_ENDIAN)
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN    1
+#elif defined(_LITTLE_ENDIAN)
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN    0
+#else /* !_LITTLE_ENDIAN */
+#error "unknown endianness"
+#endif /* !_LITTLE_ENDIAN */
+
+#endif /* Solaris and AuroraUX. */
+
+/* .. */
+
+#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__) ||   \
+    defined(__minix)
+#include <sys/endian.h>
+
+#if _BYTE_ORDER == _BIG_ENDIAN
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN    1
+#elif _BYTE_ORDER == _LITTLE_ENDIAN
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN    0
+#endif /* _BYTE_ORDER */
+
+#endif /* *BSD */
+
+#if defined(__OpenBSD__) || defined(__Bitrig__)
+#include <machine/endian.h>
+
+#if _BYTE_ORDER == _BIG_ENDIAN
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN    1
+#elif _BYTE_ORDER == _LITTLE_ENDIAN
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN    0
+#endif /* _BYTE_ORDER */
+
+#endif /* OpenBSD and Bitrig. */
+
+/* .. */
+
+/* Mac OSX has __BIG_ENDIAN__ or __LITTLE_ENDIAN__ automatically set by the
+ * compiler (at least with GCC) */
+#if defined(__APPLE__) || defined(__ellcc__ )
+
+#ifdef __BIG_ENDIAN__
+#if __BIG_ENDIAN__
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN    1
+#endif
+#endif /* __BIG_ENDIAN__ */
+
+#ifdef __LITTLE_ENDIAN__
+#if __LITTLE_ENDIAN__
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN    0
+#endif
+#endif /* __LITTLE_ENDIAN__ */
+
+#endif /* Mac OSX */
+
+/* .. */
+
+#if defined(_WIN32)
+
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN    0
+
+#endif /* Windows */
+
+#endif /* Clang or GCC. */
+
+/* . */
+
+#if !defined(_YUGA_LITTLE_ENDIAN) || !defined(_YUGA_BIG_ENDIAN)
+#error Unable to determine endian
+#endif /* Check we found an endianness correctly. */
+
+#endif /* INT_ENDIANNESS_H */
diff --git a/lib/compiler-rt/builtins/int_lib.h b/lib/compiler-rt/builtins/int_lib.h
new file mode 100644
index 0000000..57dfc41
--- /dev/null
+++ b/lib/compiler-rt/builtins/int_lib.h
@@ -0,0 +1,127 @@
+/* ===-- int_lib.h - configuration header for compiler-rt  -----------------===
+ *
+ *                     The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file is a configuration header for compiler-rt.
+ * This file is not part of the interface of this library.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+/*
+ * Portions copyright (c) 2017, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef INT_LIB_H
+#define INT_LIB_H
+
+/* Assumption: Signed integral is 2's complement. */
+/* Assumption: Right shift of signed negative is arithmetic shift. */
+/* Assumption: Endianness is little or big (not mixed). */
+
+#if defined(__ELF__)
+#define FNALIAS(alias_name, original_name) \
+  void alias_name() __attribute__((alias(#original_name)))
+#else
+#define FNALIAS(alias, name) _Pragma("GCC error(\"alias unsupported on this file format\")")
+#endif
+
+/* ABI macro definitions */
+
+#if __ARM_EABI__
+# define ARM_EABI_FNALIAS(aeabi_name, name)         \
+  void __aeabi_##aeabi_name() __attribute__((alias("__" #name)));
+# ifdef COMPILER_RT_ARMHF_TARGET
+#   define COMPILER_RT_ABI
+# else
+#   define COMPILER_RT_ABI __attribute__((pcs("aapcs")))
+# endif
+#else
+# define ARM_EABI_FNALIAS(aeabi_name, name)
+# define COMPILER_RT_ABI
+#endif
+
+#ifdef _MSC_VER
+#define ALWAYS_INLINE __forceinline
+#define NOINLINE __declspec(noinline)
+#define NORETURN __declspec(noreturn)
+#define UNUSED
+#else
+#define ALWAYS_INLINE __attribute__((always_inline))
+#define NOINLINE __attribute__((noinline))
+#define NORETURN __attribute__((noreturn))
+#define UNUSED __attribute__((unused))
+#endif
+
+/*
+ * Kernel and boot environment can't use normal headers,
+ * so use the equivalent system headers.
+ */
+#  include <sys/limits.h>
+#  include <sys/stdint.h>
+#  include <sys/types.h>
+
+/* Include the commonly used internal type definitions. */
+#include "int_types.h"
+
+COMPILER_RT_ABI si_int __paritysi2(si_int a);
+COMPILER_RT_ABI si_int __paritydi2(di_int a);
+
+COMPILER_RT_ABI di_int __divdi3(di_int a, di_int b);
+COMPILER_RT_ABI si_int __divsi3(si_int a, si_int b);
+COMPILER_RT_ABI su_int __udivsi3(su_int n, su_int d);
+
+COMPILER_RT_ABI su_int __udivmodsi4(su_int a, su_int b, su_int* rem);
+COMPILER_RT_ABI du_int __udivmoddi4(du_int a, du_int b, du_int* rem);
+#ifdef CRT_HAS_128BIT
+COMPILER_RT_ABI si_int __clzti2(ti_int a);
+COMPILER_RT_ABI tu_int __udivmodti4(tu_int a, tu_int b, tu_int* rem);
+#endif
+
+/* Definitions for builtins unavailable on MSVC */
+#if defined(_MSC_VER) && !defined(__clang__)
+#include <intrin.h>
+
+uint32_t __inline __builtin_ctz(uint32_t value) {
+  unsigned long trailing_zero = 0;
+  if (_BitScanForward(&trailing_zero, value))
+    return trailing_zero;
+  return 32;
+}
+
+uint32_t __inline __builtin_clz(uint32_t value) {
+  unsigned long leading_zero = 0;
+  if (_BitScanReverse(&leading_zero, value))
+    return 31 - leading_zero;
+  return 32;
+}
+
+#if defined(_M_ARM) || defined(_M_X64)
+uint32_t __inline __builtin_clzll(uint64_t value) {
+  unsigned long leading_zero = 0;
+  if (_BitScanReverse64(&leading_zero, value))
+    return 63 - leading_zero;
+  return 64;
+}
+#else
+uint32_t __inline __builtin_clzll(uint64_t value) {
+  if (value == 0)
+    return 64;
+  uint32_t msh = (uint32_t)(value >> 32);
+  uint32_t lsh = (uint32_t)(value & 0xFFFFFFFF);
+  if (msh != 0)
+    return __builtin_clz(msh);
+  return 32 + __builtin_clz(lsh);
+}
+#endif
+
+#define __builtin_clzl __builtin_clzll
+#endif /* defined(_MSC_VER) && !defined(__clang__) */
+
+#endif /* INT_LIB_H */
diff --git a/lib/compiler-rt/builtins/int_math.h b/lib/compiler-rt/builtins/int_math.h
new file mode 100644
index 0000000..fc81fb7
--- /dev/null
+++ b/lib/compiler-rt/builtins/int_math.h
@@ -0,0 +1,114 @@
+/* ===-- int_math.h - internal math inlines ---------------------------------===
+ *
+ *                     The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===-----------------------------------------------------------------------===
+ *
+ * This file is not part of the interface of this library.
+ *
+ * This file defines substitutes for the libm functions used in some of the
+ * compiler-rt implementations, defined in such a way that there is not a direct
+ * dependency on libm or math.h. Instead, we use the compiler builtin versions
+ * where available. This reduces our dependencies on the system SDK by foisting
+ * the responsibility onto the compiler.
+ *
+ * ===-----------------------------------------------------------------------===
+ */
+
+#ifndef INT_MATH_H
+#define INT_MATH_H
+
+#ifndef __has_builtin
+#  define  __has_builtin(x) 0
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#include <math.h>
+#include <stdlib.h>
+#include <ymath.h>
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define CRT_INFINITY INFINITY
+#else
+#define CRT_INFINITY __builtin_huge_valf()
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_isfinite(x) _finite((x))
+#define crt_isinf(x) !_finite((x))
+#define crt_isnan(x) _isnan((x))
+#else
+/* Define crt_isfinite in terms of the builtin if available, otherwise provide
+ * an alternate version in terms of our other functions. This supports some
+ * versions of GCC which didn't have __builtin_isfinite.
+ */
+#if __has_builtin(__builtin_isfinite)
+#  define crt_isfinite(x) __builtin_isfinite((x))
+#elif defined(__GNUC__)
+#  define crt_isfinite(x) \
+  __extension__(({ \
+      __typeof((x)) x_ = (x); \
+      !crt_isinf(x_) && !crt_isnan(x_); \
+    }))
+#else
+#  error "Do not know how to check for infinity"
+#endif /* __has_builtin(__builtin_isfinite) */
+#define crt_isinf(x) __builtin_isinf((x))
+#define crt_isnan(x) __builtin_isnan((x))
+#endif /* _MSC_VER */
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_copysign(x, y) copysign((x), (y))
+#define crt_copysignf(x, y) copysignf((x), (y))
+#define crt_copysignl(x, y) copysignl((x), (y))
+#else
+#define crt_copysign(x, y) __builtin_copysign((x), (y))
+#define crt_copysignf(x, y) __builtin_copysignf((x), (y))
+#define crt_copysignl(x, y) __builtin_copysignl((x), (y))
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_fabs(x) fabs((x))
+#define crt_fabsf(x) fabsf((x))
+#define crt_fabsl(x) fabs((x))
+#else
+#define crt_fabs(x) __builtin_fabs((x))
+#define crt_fabsf(x) __builtin_fabsf((x))
+#define crt_fabsl(x) __builtin_fabsl((x))
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_fmax(x, y) __max((x), (y))
+#define crt_fmaxf(x, y) __max((x), (y))
+#define crt_fmaxl(x, y) __max((x), (y))
+#else
+#define crt_fmax(x, y) __builtin_fmax((x), (y))
+#define crt_fmaxf(x, y) __builtin_fmaxf((x), (y))
+#define crt_fmaxl(x, y) __builtin_fmaxl((x), (y))
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_logb(x) logb((x))
+#define crt_logbf(x) logbf((x))
+#define crt_logbl(x) logbl((x))
+#else
+#define crt_logb(x) __builtin_logb((x))
+#define crt_logbf(x) __builtin_logbf((x))
+#define crt_logbl(x) __builtin_logbl((x))
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_scalbn(x, y) scalbn((x), (y))
+#define crt_scalbnf(x, y) scalbnf((x), (y))
+#define crt_scalbnl(x, y) scalbnl((x), (y))
+#else
+#define crt_scalbn(x, y) __builtin_scalbn((x), (y))
+#define crt_scalbnf(x, y) __builtin_scalbnf((x), (y))
+#define crt_scalbnl(x, y) __builtin_scalbnl((x), (y))
+#endif
+
+#endif /* INT_MATH_H */
diff --git a/lib/compiler-rt/builtins/int_types.h b/lib/compiler-rt/builtins/int_types.h
new file mode 100644
index 0000000..660385e
--- /dev/null
+++ b/lib/compiler-rt/builtins/int_types.h
@@ -0,0 +1,166 @@
+/* ===-- int_lib.h - configuration header for compiler-rt  -----------------===
+ *
+ *                     The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file is not part of the interface of this library.
+ *
+ * This file defines various standard types, most importantly a number of unions
+ * used to access parts of larger types.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#ifndef INT_TYPES_H
+#define INT_TYPES_H
+
+#include "int_endianness.h"
+
+/* si_int is defined in Linux sysroot's asm-generic/siginfo.h */
+#ifdef si_int
+#undef si_int
+#endif
+typedef      int si_int;
+typedef unsigned su_int;
+
+typedef          long long di_int;
+typedef unsigned long long du_int;
+
+typedef union
+{
+    di_int all;
+    struct
+    {
+#if _YUGA_LITTLE_ENDIAN
+        su_int low;
+        si_int high;
+#else
+        si_int high;
+        su_int low;
+#endif /* _YUGA_LITTLE_ENDIAN */
+    }s;
+} dwords;
+
+typedef union
+{
+    du_int all;
+    struct
+    {
+#if _YUGA_LITTLE_ENDIAN
+        su_int low;
+        su_int high;
+#else
+        su_int high;
+        su_int low;
+#endif /* _YUGA_LITTLE_ENDIAN */
+    }s;
+} udwords;
+
+/* MIPS64 issue: PR 20098 */
+#if (defined(__LP64__) || defined(__wasm__)) && \
+    !(defined(__mips__) && defined(__clang__))
+#define CRT_HAS_128BIT
+#endif
+
+#ifdef CRT_HAS_128BIT
+typedef int      ti_int __attribute__ ((mode (TI)));
+typedef unsigned tu_int __attribute__ ((mode (TI)));
+
+typedef union
+{
+    ti_int all;
+    struct
+    {
+#if _YUGA_LITTLE_ENDIAN
+        du_int low;
+        di_int high;
+#else
+        di_int high;
+        du_int low;
+#endif /* _YUGA_LITTLE_ENDIAN */
+    }s;
+} twords;
+
+typedef union
+{
+    tu_int all;
+    struct
+    {
+#if _YUGA_LITTLE_ENDIAN
+        du_int low;
+        du_int high;
+#else
+        du_int high;
+        du_int low;
+#endif /* _YUGA_LITTLE_ENDIAN */
+    }s;
+} utwords;
+
+static __inline ti_int make_ti(di_int h, di_int l) {
+    twords r;
+    r.s.high = h;
+    r.s.low = l;
+    return r.all;
+}
+
+static __inline tu_int make_tu(du_int h, du_int l) {
+    utwords r;
+    r.s.high = h;
+    r.s.low = l;
+    return r.all;
+}
+
+#endif /* CRT_HAS_128BIT */
+
+typedef union
+{
+    su_int u;
+    float f;
+} float_bits;
+
+typedef union
+{
+    udwords u;
+    double  f;
+} double_bits;
+
+typedef struct
+{
+#if _YUGA_LITTLE_ENDIAN
+    udwords low;
+    udwords high;
+#else
+    udwords high;
+    udwords low;
+#endif /* _YUGA_LITTLE_ENDIAN */
+} uqwords;
+
+typedef union
+{
+    uqwords     u;
+    long double f;
+} long_double_bits;
+
+#if __STDC_VERSION__ >= 199901L
+typedef float _Complex Fcomplex;
+typedef double _Complex Dcomplex;
+typedef long double _Complex Lcomplex;
+
+#define COMPLEX_REAL(x) __real__(x)
+#define COMPLEX_IMAGINARY(x) __imag__(x)
+#else
+typedef struct { float real, imaginary; } Fcomplex;
+
+typedef struct { double real, imaginary; } Dcomplex;
+
+typedef struct { long double real, imaginary; } Lcomplex;
+
+#define COMPLEX_REAL(x) (x).real
+#define COMPLEX_IMAGINARY(x) (x).imaginary
+#endif
+#endif /* INT_TYPES_H */
+
diff --git a/lib/compiler-rt/builtins/udivmoddi4.c b/lib/compiler-rt/builtins/udivmoddi4.c
new file mode 100644
index 0000000..0c8b4ff
--- /dev/null
+++ b/lib/compiler-rt/builtins/udivmoddi4.c
@@ -0,0 +1,231 @@
+/* ===-- udivmoddi4.c - Implement __udivmoddi4 -----------------------------===
+ *
+ *                     The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file implements __udivmoddi4 for the compiler_rt library.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#include "int_lib.h"
+
+/* Effects: if rem != 0, *rem = a % b
+ * Returns: a / b
+ */
+
+/* Translated from Figure 3-40 of The PowerPC Compiler Writer's Guide */
+
+COMPILER_RT_ABI du_int
+__udivmoddi4(du_int a, du_int b, du_int* rem)
+{
+    const unsigned n_uword_bits = sizeof(su_int) * CHAR_BIT;
+    const unsigned n_udword_bits = sizeof(du_int) * CHAR_BIT;
+    udwords n;
+    n.all = a;
+    udwords d;
+    d.all = b;
+    udwords q;
+    udwords r;
+    unsigned sr;
+    /* special cases, X is unknown, K != 0 */
+    if (n.s.high == 0)
+    {
+        if (d.s.high == 0)
+        {
+            /* 0 X
+             * ---
+             * 0 X
+             */
+            if (rem)
+                *rem = n.s.low % d.s.low;
+            return n.s.low / d.s.low;
+        }
+        /* 0 X
+         * ---
+         * K X
+         */
+        if (rem)
+            *rem = n.s.low;
+        return 0;
+    }
+    /* n.s.high != 0 */
+    if (d.s.low == 0)
+    {
+        if (d.s.high == 0)
+        {
+            /* K X
+             * ---
+             * 0 0
+             */ 
+            if (rem)
+                *rem = n.s.high % d.s.low;
+            return n.s.high / d.s.low;
+        }
+        /* d.s.high != 0 */
+        if (n.s.low == 0)
+        {
+            /* K 0
+             * ---
+             * K 0
+             */
+            if (rem)
+            {
+                r.s.high = n.s.high % d.s.high;
+                r.s.low = 0;
+                *rem = r.all;
+            }
+            return n.s.high / d.s.high;
+        }
+        /* K K
+         * ---
+         * K 0
+         */
+        if ((d.s.high & (d.s.high - 1)) == 0)     /* if d is a power of 2 */
+        {
+            if (rem)
+            {
+                r.s.low = n.s.low;
+                r.s.high = n.s.high & (d.s.high - 1);
+                *rem = r.all;
+            }
+            return n.s.high >> __builtin_ctz(d.s.high);
+        }
+        /* K K
+         * ---
+         * K 0
+         */
+        sr = __builtin_clz(d.s.high) - __builtin_clz(n.s.high);
+        /* 0 <= sr <= n_uword_bits - 2 or sr large */
+        if (sr > n_uword_bits - 2)
+        {
+           if (rem)
+                *rem = n.all;
+            return 0;
+        }
+        ++sr;
+        /* 1 <= sr <= n_uword_bits - 1 */
+        /* q.all = n.all << (n_udword_bits - sr); */
+        q.s.low = 0;
+        q.s.high = n.s.low << (n_uword_bits - sr);
+        /* r.all = n.all >> sr; */
+        r.s.high = n.s.high >> sr;
+        r.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr);
+    }
+    else  /* d.s.low != 0 */
+    {
+        if (d.s.high == 0)
+        {
+            /* K X
+             * ---
+             * 0 K
+             */
+            if ((d.s.low & (d.s.low - 1)) == 0)     /* if d is a power of 2 */
+            {
+                if (rem)
+                    *rem = n.s.low & (d.s.low - 1);
+                if (d.s.low == 1)
+                    return n.all;
+                sr = __builtin_ctz(d.s.low);
+                q.s.high = n.s.high >> sr;
+                q.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr);
+                return q.all;
+            }
+            /* K X
+             * ---
+             * 0 K
+             */
+            sr = 1 + n_uword_bits + __builtin_clz(d.s.low) - __builtin_clz(n.s.high);
+            /* 2 <= sr <= n_udword_bits - 1
+             * q.all = n.all << (n_udword_bits - sr);
+             * r.all = n.all >> sr;
+             */
+            if (sr == n_uword_bits)
+            {
+                q.s.low = 0;
+                q.s.high = n.s.low;
+                r.s.high = 0;
+                r.s.low = n.s.high;
+            }
+            else if (sr < n_uword_bits)  // 2 <= sr <= n_uword_bits - 1
+            {
+                q.s.low = 0;
+                q.s.high = n.s.low << (n_uword_bits - sr);
+                r.s.high = n.s.high >> sr;
+                r.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr);
+            }
+            else              // n_uword_bits + 1 <= sr <= n_udword_bits - 1
+            {
+                q.s.low = n.s.low << (n_udword_bits - sr);
+                q.s.high = (n.s.high << (n_udword_bits - sr)) |
+                           (n.s.low >> (sr - n_uword_bits));
+                r.s.high = 0;
+                r.s.low = n.s.high >> (sr - n_uword_bits);
+            }
+        }
+        else
+        {
+            /* K X
+             * ---
+             * K K
+             */
+            sr = __builtin_clz(d.s.high) - __builtin_clz(n.s.high);
+            /* 0 <= sr <= n_uword_bits - 1 or sr large */
+            if (sr > n_uword_bits - 1)
+            {
+                if (rem)
+                    *rem = n.all;
+                return 0;
+            }
+            ++sr;
+            /* 1 <= sr <= n_uword_bits */
+            /*  q.all = n.all << (n_udword_bits - sr); */
+            q.s.low = 0;
+            if (sr == n_uword_bits)
+            {
+                q.s.high = n.s.low;
+                r.s.high = 0;
+                r.s.low = n.s.high;
+            }
+            else
+            {
+                q.s.high = n.s.low << (n_uword_bits - sr);
+                r.s.high = n.s.high >> sr;
+                r.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr);
+            }
+        }
+    }
+    /* Not a special case
+     * q and r are initialized with:
+     * q.all = n.all << (n_udword_bits - sr);
+     * r.all = n.all >> sr;
+     * 1 <= sr <= n_udword_bits - 1
+     */
+    su_int carry = 0;
+    for (; sr > 0; --sr)
+    {
+        /* r:q = ((r:q)  << 1) | carry */
+        r.s.high = (r.s.high << 1) | (r.s.low  >> (n_uword_bits - 1));
+        r.s.low  = (r.s.low  << 1) | (q.s.high >> (n_uword_bits - 1));
+        q.s.high = (q.s.high << 1) | (q.s.low  >> (n_uword_bits - 1));
+        q.s.low  = (q.s.low  << 1) | carry;
+        /* carry = 0;
+         * if (r.all >= d.all)
+         * {
+         *      r.all -= d.all;
+         *      carry = 1;
+         * }
+         */
+        const di_int s = (di_int)(d.all - r.all - 1) >> (n_udword_bits - 1);
+        carry = s & 1;
+        r.all -= d.all & s;
+    }
+    q.all = (q.all << 1) | carry;
+    if (rem)
+        *rem = r.all;
+    return q.all;
+}
diff --git a/lib/compiler-rt/compiler-rt.mk b/lib/compiler-rt/compiler-rt.mk
new file mode 100644
index 0000000..cb5ab31
--- /dev/null
+++ b/lib/compiler-rt/compiler-rt.mk
@@ -0,0 +1,35 @@
+#
+# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# Neither the name of ARM nor the names of its contributors may be used
+# to endorse or promote products derived from this software without specific
+# prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+
+ifeq (${ARCH},aarch32)
+COMPILER_RT_SRCS	:=	lib/compiler-rt/builtins/arm/aeabi_uldivmod.S	\
+				lib/compiler-rt/builtins/udivmoddi4.c		\
+				lib/compiler-rt/builtins/ctzdi2.c
+endif
diff --git a/lib/cpus/aarch32/aem_generic.S b/lib/cpus/aarch32/aem_generic.S
new file mode 100644
index 0000000..5f3d744
--- /dev/null
+++ b/lib/cpus/aarch32/aem_generic.S
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <aem_generic.h>
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cpu_macros.S>
+
+func aem_generic_core_pwr_dwn
+	/* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+	ldcopr	r0, SCTLR
+	tst	r0, #SCTLR_C_BIT
+	ASM_ASSERT(eq)
+#endif
+	/* ---------------------------------------------
+	 * Flush L1 cache to PoU.
+	 * ---------------------------------------------
+	 */
+	mov	r0, #DC_OP_CISW
+	b	dcsw_op_louis
+endfunc aem_generic_core_pwr_dwn
+
+
+func aem_generic_cluster_pwr_dwn
+	/* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+	ldcopr	r0, SCTLR
+	tst	r0, #SCTLR_C_BIT
+	ASM_ASSERT(eq)
+#endif
+	/* ---------------------------------------------
+	 * Flush L1 and L2 caches to PoC.
+	 * ---------------------------------------------
+	 */
+	mov	r0, #DC_OP_CISW
+	b	dcsw_op_all
+endfunc aem_generic_cluster_pwr_dwn
+
+/* cpu_ops for Base AEM FVP */
+declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \
+	aem_generic_core_pwr_dwn, \
+	aem_generic_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a32.S b/lib/cpus/aarch32/cortex_a32.S
new file mode 100644
index 0000000..2b6df27
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a32.S
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a32.h>
+#include <cpu_macros.S>
+
+
+	/* ---------------------------------------------
+	 * Disable intra-cluster coherency
+	 * Clobbers: r0-r1
+	 * ---------------------------------------------
+	 */
+func cortex_a32_disable_smp
+	ldcopr16	r0, r1, CORTEX_A32_CPUECTLR_EL1
+	bic	r0, r0, #CORTEX_A32_CPUECTLR_SMPEN_BIT
+	stcopr16	r0, r1, CORTEX_A32_CPUECTLR_EL1
+	isb
+	dsb	sy
+	bx	lr
+endfunc cortex_a32_disable_smp
+
+	/* -------------------------------------------------
+	 * The CPU Ops reset function for Cortex-A32.
+	 * Clobbers: r0-r1
+	 * -------------------------------------------------
+	 */
+func cortex_a32_reset_func
+	/* ---------------------------------------------
+	 * Enable the SMP bit.
+	 * ---------------------------------------------
+	 */
+	ldcopr16	r0, r1, CORTEX_A32_CPUECTLR_EL1
+	orr	r0, r0, #CORTEX_A32_CPUECTLR_SMPEN_BIT
+	stcopr16	r0, r1, CORTEX_A32_CPUECTLR_EL1
+	isb
+	bx	lr
+endfunc cortex_a32_reset_func
+
+	/* ----------------------------------------------------
+	 * The CPU Ops core power down function for Cortex-A32.
+	 * Clobbers: r0-r3
+	 * ----------------------------------------------------
+	 */
+func cortex_a32_core_pwr_dwn
+	/* r12 is pushed to meet the 8 byte stack alignment requirement */
+	push	{r12, lr}
+
+	/* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+	ldcopr	r0, SCTLR
+	tst	r0, #SCTLR_C_BIT
+	ASM_ASSERT(eq)
+#endif
+
+	/* ---------------------------------------------
+	 * Flush L1 caches.
+	 * ---------------------------------------------
+	 */
+	mov	r0, #DC_OP_CISW
+	bl	dcsw_op_level1
+
+	/* ---------------------------------------------
+	 * Come out of intra cluster coherency
+	 * ---------------------------------------------
+	 */
+	pop	{r12, lr}
+	b	cortex_a32_disable_smp
+endfunc cortex_a32_core_pwr_dwn
+
+	/* -------------------------------------------------------
+	 * The CPU Ops cluster power down function for Cortex-A32.
+	 * Clobbers: r0-r3
+	 * -------------------------------------------------------
+	 */
+func cortex_a32_cluster_pwr_dwn
+	/* r12 is pushed to meet the 8 byte stack alignment requirement */
+	push	{r12, lr}
+
+	/* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+	ldcopr	r0, SCTLR
+	tst	r0, #SCTLR_C_BIT
+	ASM_ASSERT(eq)
+#endif
+
+	/* ---------------------------------------------
+	 * Flush L1 cache.
+	 * ---------------------------------------------
+	 */
+	mov	r0, #DC_OP_CISW
+	bl	dcsw_op_level1
+
+	/* ---------------------------------------------
+	 * Disable the optional ACP.
+	 * ---------------------------------------------
+	 */
+	bl	plat_disable_acp
+
+	/* ---------------------------------------------
+	 * Flush L2 cache.
+	 * ---------------------------------------------
+	 */
+	mov	r0, #DC_OP_CISW
+	bl	dcsw_op_level2
+
+	/* ---------------------------------------------
+	 * Come out of intra cluster coherency
+	 * ---------------------------------------------
+	 */
+	pop	{r12, lr}
+	b	cortex_a32_disable_smp
+endfunc cortex_a32_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a32, CORTEX_A32_MIDR, \
+	cortex_a32_reset_func, \
+	cortex_a32_core_pwr_dwn, \
+	cortex_a32_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a53.S b/lib/cpus/aarch32/cortex_a53.S
new file mode 100644
index 0000000..74cedc3
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a53.S
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a53.h>
+#include <cpu_macros.S>
+#include <debug.h>
+
+#if A53_DISABLE_NON_TEMPORAL_HINT
+#undef ERRATA_A53_836870
+#define ERRATA_A53_836870	1
+#endif
+
+	/* ---------------------------------------------
+	 * Disable intra-cluster coherency
+	 * ---------------------------------------------
+	 */
+func cortex_a53_disable_smp
+	ldcopr16	r0, r1, CORTEX_A53_ECTLR
+	bic64_imm	r0, r1, CORTEX_A53_ECTLR_SMP_BIT
+	stcopr16	r0, r1, CORTEX_A53_ECTLR
+	isb
+	dsb	sy
+	bx	lr
+endfunc cortex_a53_disable_smp
+
+	/* --------------------------------------------------
+	 * Errata Workaround for Cortex A53 Errata #826319.
+	 * This applies only to revision <= r0p2 of Cortex A53.
+	 * Inputs:
+	 * r0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: r0-r3
+	 * --------------------------------------------------
+	 */
+func errata_a53_826319_wa
+	/*
+	 * Compare r0 against revision r0p2
+	 */
+	mov	r2, lr
+	bl	check_errata_826319
+	mov	lr, r2
+	cmp	r0, #ERRATA_NOT_APPLIES
+	beq	1f
+	ldcopr	r0, CORTEX_A53_L2ACTLR
+	bic	r0, #CORTEX_A53_L2ACTLR_ENABLE_UNIQUECLEAN
+	orr	r0, #CORTEX_A53_L2ACTLR_DISABLE_CLEAN_PUSH
+	stcopr	r0, CORTEX_A53_L2ACTLR
+1:
+	bx	lr
+endfunc errata_a53_826319_wa
+
+func check_errata_826319
+	mov	r1, #0x02
+	b	cpu_rev_var_ls
+endfunc check_errata_826319
+
+	/* ---------------------------------------------------------------------
+	 * Disable the cache non-temporal hint.
+	 *
+	 * This ignores the Transient allocation hint in the MAIR and treats
+	 * allocations the same as non-transient allocation types. As a result,
+	 * the LDNP and STNP instructions in AArch64 behave the same as the
+	 * equivalent LDP and STP instructions.
+	 *
+	 * This is relevant only for revisions <= r0p3 of Cortex-A53.
+	 * From r0p4 and onwards, the bit to disable the hint is enabled by
+	 * default at reset.
+	 *
+	 * Inputs:
+	 * r0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: r0-r3
+	 * ---------------------------------------------------------------------
+	 */
+func a53_disable_non_temporal_hint
+	/*
+	 * Compare r0 against revision r0p3
+	 */
+	mov		r2, lr
+	bl		check_errata_disable_non_temporal_hint
+	mov		lr, r2
+	cmp		r0, #ERRATA_NOT_APPLIES
+	beq		1f
+	ldcopr16	r0, r1, CORTEX_A53_CPUACTLR
+	orr64_imm	r0, r1, CORTEX_A53_CPUACTLR_DTAH
+	stcopr16	r0, r1, CORTEX_A53_CPUACTLR
+1:
+	bx		lr
+endfunc a53_disable_non_temporal_hint
+
+func check_errata_disable_non_temporal_hint
+	mov	r1, #0x03
+	b	cpu_rev_var_ls
+endfunc check_errata_disable_non_temporal_hint
+
+	/* --------------------------------------------------
+	 * Errata Workaround for Cortex A53 Errata #855873.
+	 *
+	 * This applies only to revisions >= r0p3 of Cortex A53.
+	 * Earlier revisions of the core are affected as well, but don't
+	 * have the chicken bit in the CPUACTLR register. It is expected that
+	 * the rich OS takes care of that, especially as the workaround is
+	 * shared with other erratas in those revisions of the CPU.
+	 * Inputs:
+	 * r0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: r0-r3
+	 * --------------------------------------------------
+	 */
+func errata_a53_855873_wa
+	/*
+	 * Compare r0 against revision r0p3 and higher
+	 */
+	mov		r2, lr
+	bl		check_errata_855873
+	mov		lr, r2
+	cmp		r0, #ERRATA_NOT_APPLIES
+	beq		1f
+	ldcopr16	r0, r1, CORTEX_A53_CPUACTLR
+	orr64_imm	r0, r1, CORTEX_A53_CPUACTLR_ENDCCASCI
+	stcopr16	r0, r1, CORTEX_A53_CPUACTLR
+1:
+	bx		lr
+endfunc errata_a53_855873_wa
+
+func check_errata_855873
+	mov	r1, #0x03
+	b	cpu_rev_var_hs
+endfunc check_errata_855873
+
+	/* -------------------------------------------------
+	 * The CPU Ops reset function for Cortex-A53.
+	 * Shall clobber: r0-r6
+	 * -------------------------------------------------
+	 */
+func cortex_a53_reset_func
+	mov	r5, lr
+	bl	cpu_get_rev_var
+	mov	r4, r0
+
+#if ERRATA_A53_826319
+	mov	r0, r4
+	bl	errata_a53_826319_wa
+#endif
+
+#if ERRATA_A53_836870
+	mov	r0, r4
+	bl	a53_disable_non_temporal_hint
+#endif
+
+#if ERRATA_A53_855873
+	mov	r0, r4
+	bl	errata_a53_855873_wa
+#endif
+
+	/* ---------------------------------------------
+	 * Enable the SMP bit.
+	 * ---------------------------------------------
+	 */
+	ldcopr16	r0, r1, CORTEX_A53_ECTLR
+	orr64_imm	r0, r1, CORTEX_A53_ECTLR_SMP_BIT
+	stcopr16	r0, r1,	CORTEX_A53_ECTLR
+	isb
+	bx	r5
+endfunc cortex_a53_reset_func
+
+	/* ----------------------------------------------------
+	 * The CPU Ops core power down function for Cortex-A53.
+	 * ----------------------------------------------------
+	 */
+func cortex_a53_core_pwr_dwn
+	push	{r12, lr}
+
+	/* Assert if cache is enabled */
+#if ASM_ASSERTION
+	ldcopr	r0, SCTLR
+	tst	r0, #SCTLR_C_BIT
+	ASM_ASSERT(eq)
+#endif
+
+	/* ---------------------------------------------
+	 * Flush L1 caches.
+	 * ---------------------------------------------
+	 */
+	mov	r0, #DC_OP_CISW
+	bl	dcsw_op_level1
+
+	/* ---------------------------------------------
+	 * Come out of intra cluster coherency
+	 * ---------------------------------------------
+	 */
+	pop	{r12, lr}
+	b	cortex_a53_disable_smp
+endfunc cortex_a53_core_pwr_dwn
+
+	/* -------------------------------------------------------
+	 * The CPU Ops cluster power down function for Cortex-A53.
+	 * Clobbers: r0-r3
+	 * -------------------------------------------------------
+	 */
+func cortex_a53_cluster_pwr_dwn
+	push	{r12, lr}
+
+	/* Assert if cache is enabled */
+#if ASM_ASSERTION
+	ldcopr	r0, SCTLR
+	tst	r0, #SCTLR_C_BIT
+	ASM_ASSERT(eq)
+#endif
+
+	/* ---------------------------------------------
+	 * Flush L1 caches.
+	 * ---------------------------------------------
+	 */
+	mov	r0, #DC_OP_CISW
+	bl	dcsw_op_level1
+
+	/* ---------------------------------------------
+	 * Disable the optional ACP.
+	 * ---------------------------------------------
+	 */
+	bl	plat_disable_acp
+
+	/* ---------------------------------------------
+	 * Flush L2 caches.
+	 * ---------------------------------------------
+	 */
+	mov	r0, #DC_OP_CISW
+	bl	dcsw_op_level2
+
+	/* ---------------------------------------------
+	 * Come out of intra cluster coherency
+	 * ---------------------------------------------
+	 */
+	pop	{r12, lr}
+	b	cortex_a53_disable_smp
+endfunc cortex_a53_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A53. Must follow AAPCS.
+ */
+func cortex_a53_errata_report
+	push	{r12, lr}
+
+	bl	cpu_get_rev_var
+	mov	r4, r0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata ERRATA_A53_826319, cortex_a53, 826319
+	report_errata ERRATA_A53_836870, cortex_a53, disable_non_temporal_hint
+	report_errata ERRATA_A53_855873, cortex_a53, 855873
+
+	pop	{r12, lr}
+	bx	lr
+endfunc cortex_a53_errata_report
+#endif
+
+declare_cpu_ops cortex_a53, CORTEX_A53_MIDR, \
+	cortex_a53_reset_func, \
+	cortex_a53_core_pwr_dwn, \
+	cortex_a53_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a57.S b/lib/cpus/aarch32/cortex_a57.S
new file mode 100644
index 0000000..b5189e7
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a57.S
@@ -0,0 +1,531 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a57.h>
+#include <cpu_macros.S>
+#include <debug.h>
+
+	/* ---------------------------------------------
+	 * Disable intra-cluster coherency
+	 * Clobbers: r0-r1
+	 * ---------------------------------------------
+	 */
+func cortex_a57_disable_smp
+	ldcopr16	r0, r1, CORTEX_A57_ECTLR
+	bic64_imm	r0, r1, CORTEX_A57_ECTLR_SMP_BIT
+	stcopr16	r0, r1, CORTEX_A57_ECTLR
+	bx	lr
+endfunc cortex_a57_disable_smp
+
+	/* ---------------------------------------------
+	 * Disable all types of L2 prefetches.
+	 * Clobbers: r0-r2
+	 * ---------------------------------------------
+	 */
+func cortex_a57_disable_l2_prefetch
+	ldcopr16	r0, r1, CORTEX_A57_ECTLR
+	orr64_imm	r0, r1, CORTEX_A57_ECTLR_DIS_TWD_ACC_PFTCH_BIT
+	bic64_imm	r0, r1, (CORTEX_A57_ECTLR_L2_IPFTCH_DIST_MASK | \
+				 CORTEX_A57_ECTLR_L2_DPFTCH_DIST_MASK)
+	stcopr16	r0, r1, CORTEX_A57_ECTLR
+	isb
+	dsb	ish
+	bx	lr
+endfunc cortex_a57_disable_l2_prefetch
+
+	/* ---------------------------------------------
+	 * Disable debug interfaces
+	 * ---------------------------------------------
+	 */
+func cortex_a57_disable_ext_debug
+	mov	r0, #1
+	stcopr	r0, DBGOSDLR
+	isb
+	dsb	sy
+	bx	lr
+endfunc cortex_a57_disable_ext_debug
+
+	/* --------------------------------------------------
+	 * Errata Workaround for Cortex A57 Errata #806969.
+	 * This applies only to revision r0p0 of Cortex A57.
+	 * Inputs:
+	 * r0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: r0-r3
+	 * --------------------------------------------------
+	 */
+func errata_a57_806969_wa
+	/*
+	 * Compare r0 against revision r0p0
+	 */
+	mov		r2, lr
+	bl		check_errata_806969
+	mov		lr, r2
+	cmp		r0, #ERRATA_NOT_APPLIES
+	beq		1f
+	ldcopr16	r0, r1, CORTEX_A57_CPUACTLR
+	orr64_imm	r0, r1, CORTEX_A57_CPUACTLR_NO_ALLOC_WBWA
+	stcopr16	r0, r1, CORTEX_A57_CPUACTLR
+1:
+	bx	lr
+endfunc errata_a57_806969_wa
+
+func check_errata_806969
+	mov	r1, #0x00
+	b	cpu_rev_var_ls
+endfunc check_errata_806969
+
+	/* ---------------------------------------------------
+	 * Errata Workaround for Cortex A57 Errata #813419.
+	 * This applies only to revision r0p0 of Cortex A57.
+	 * ---------------------------------------------------
+	 */
+func check_errata_813419
+	/*
+	 * Even though this is only needed for revision r0p0, it
+	 * is always applied due to limitations of the current
+	 * errata framework.
+	 */
+	mov	r0, #ERRATA_APPLIES
+	bx	lr
+endfunc check_errata_813419
+
+	/* ---------------------------------------------------
+	 * Errata Workaround for Cortex A57 Errata #813420.
+	 * This applies only to revision r0p0 of Cortex A57.
+	 * Inputs:
+	 * r0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: r0-r3
+	 * ---------------------------------------------------
+	 */
+func errata_a57_813420_wa
+	/*
+	 * Compare r0 against revision r0p0
+	 */
+	mov		r2, lr
+	bl		check_errata_813420
+	mov		lr, r2
+	cmp		r0, #ERRATA_NOT_APPLIES
+	beq		1f
+	ldcopr16	r0, r1, CORTEX_A57_CPUACTLR
+	orr64_imm	r0, r1, CORTEX_A57_CPUACTLR_DCC_AS_DCCI
+	stcopr16	r0, r1, CORTEX_A57_CPUACTLR
+1:
+	bx		lr
+endfunc errata_a57_813420_wa
+
+func check_errata_813420
+	mov	r1, #0x00
+	b	cpu_rev_var_ls
+endfunc check_errata_813420
+
+	/* --------------------------------------------------------------------
+	 * Disable the over-read from the LDNP instruction.
+	 *
+	 * This applies to all revisions <= r1p2. The performance degradation
+	 * observed with LDNP/STNP has been fixed on r1p3 and onwards.
+	 *
+	 * Inputs:
+	 * r0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: r0-r3
+	 * ---------------------------------------------------------------------
+	 */
+func a57_disable_ldnp_overread
+	/*
+	 * Compare r0 against revision r1p2
+	 */
+	mov		r2, lr
+	bl		check_errata_disable_ldnp_overread
+	mov		lr, r2
+	cmp		r0, #ERRATA_NOT_APPLIES
+	beq		1f
+	ldcopr16	r0, r1, CORTEX_A57_CPUACTLR
+	orr64_imm	r0, r1, CORTEX_A57_CPUACTLR_DIS_OVERREAD
+	stcopr16	r0, r1, CORTEX_A57_CPUACTLR
+1:
+	bx		lr
+endfunc a57_disable_ldnp_overread
+
+func check_errata_disable_ldnp_overread
+	mov	r1, #0x12
+	b	cpu_rev_var_ls
+endfunc check_errata_disable_ldnp_overread
+
+	/* ---------------------------------------------------
+	 * Errata Workaround for Cortex A57 Errata #826974.
+	 * This applies only to revision <= r1p1 of Cortex A57.
+	 * Inputs:
+	 * r0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: r0-r3
+	 * ---------------------------------------------------
+	 */
+func errata_a57_826974_wa
+	/*
+	 * Compare r0 against revision r1p1
+	 */
+	mov		r2, lr
+	bl		check_errata_826974
+	mov		lr, r2
+	cmp		r0, #ERRATA_NOT_APPLIES
+	beq		1f
+	ldcopr16	r0, r1, CORTEX_A57_CPUACTLR
+	orr64_imm	r0, r1, CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_DMB
+	stcopr16	r0, r1, CORTEX_A57_CPUACTLR
+1:
+	bx		lr
+endfunc errata_a57_826974_wa
+
+func check_errata_826974
+	mov	r1, #0x11
+	b	cpu_rev_var_ls
+endfunc check_errata_826974
+
+	/* ---------------------------------------------------
+	 * Errata Workaround for Cortex A57 Errata #826977.
+	 * This applies only to revision <= r1p1 of Cortex A57.
+	 * Inputs:
+	 * r0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: r0-r3
+	 * ---------------------------------------------------
+	 */
+func errata_a57_826977_wa
+	/*
+	 * Compare r0 against revision r1p1
+	 */
+	mov		r2, lr
+	bl		check_errata_826977
+	mov		lr, r2
+	cmp		r0, #ERRATA_NOT_APPLIES
+	beq		1f
+	ldcopr16	r0, r1, CORTEX_A57_CPUACTLR
+	orr64_imm	r0, r1, CORTEX_A57_CPUACTLR_GRE_NGRE_AS_NGNRE
+	stcopr16	r0, r1, CORTEX_A57_CPUACTLR
+1:
+	bx		lr
+endfunc errata_a57_826977_wa
+
+func check_errata_826977
+	mov	r1, #0x11
+	b	cpu_rev_var_ls
+endfunc check_errata_826977
+
+	/* ---------------------------------------------------
+	 * Errata Workaround for Cortex A57 Errata #828024.
+	 * This applies only to revision <= r1p1 of Cortex A57.
+	 * Inputs:
+	 * r0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: r0-r3
+	 * ---------------------------------------------------
+	 */
+func errata_a57_828024_wa
+	/*
+	 * Compare r0 against revision r1p1
+	 */
+	mov		r2, lr
+	bl		check_errata_828024
+	mov		lr, r2
+	cmp		r0, #ERRATA_NOT_APPLIES
+	beq		1f
+	ldcopr16	r0, r1, CORTEX_A57_CPUACTLR
+	/*
+	 * Setting the relevant bits in CORTEX_A57_CPUACTLR has to be done in 2
+	 * instructions here because the resulting bitmask doesn't fit in a
+	 * 16-bit value so it cannot be encoded in a single instruction.
+	 */
+	orr64_imm	r0, r1, CORTEX_A57_CPUACTLR_NO_ALLOC_WBWA
+	orr64_imm	r0, r1, (CORTEX_A57_CPUACTLR_DIS_L1_STREAMING | CORTEX_A57_CPUACTLR_DIS_STREAMING)
+	stcopr16	r0, r1, CORTEX_A57_CPUACTLR
+1:
+	bx		lr
+endfunc errata_a57_828024_wa
+
+func check_errata_828024
+	mov	r1, #0x11
+	b	cpu_rev_var_ls
+endfunc check_errata_828024
+
+	/* ---------------------------------------------------
+	 * Errata Workaround for Cortex A57 Errata #829520.
+	 * This applies only to revision <= r1p2 of Cortex A57.
+	 * Inputs:
+	 * r0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: r0-r3
+	 * ---------------------------------------------------
+	 */
+func errata_a57_829520_wa
+	/*
+	 * Compare r0 against revision r1p2
+	 */
+	mov		r2, lr
+	bl		check_errata_829520
+	mov		lr, r2
+	cmp		r0, #ERRATA_NOT_APPLIES
+	beq		1f
+	ldcopr16	r0, r1, CORTEX_A57_CPUACTLR
+	orr64_imm	r0, r1, CORTEX_A57_CPUACTLR_DIS_INDIRECT_PREDICTOR
+	stcopr16	r0, r1, CORTEX_A57_CPUACTLR
+1:
+	bx		lr
+endfunc errata_a57_829520_wa
+
+func check_errata_829520
+	mov	r1, #0x12
+	b	cpu_rev_var_ls
+endfunc check_errata_829520
+
+	/* ---------------------------------------------------
+	 * Errata Workaround for Cortex A57 Errata #833471.
+	 * This applies only to revision <= r1p2 of Cortex A57.
+	 * Inputs:
+	 * r0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: r0-r3
+	 * ---------------------------------------------------
+	 */
+func errata_a57_833471_wa
+	/*
+	 * Compare r0 against revision r1p2
+	 */
+	mov		r2, lr
+	bl		check_errata_833471
+	mov		lr, r2
+	cmp		r0, #ERRATA_NOT_APPLIES
+	beq		1f
+	ldcopr16	r0, r1, CORTEX_A57_CPUACTLR
+	orr64_imm	r1, r1, CORTEX_A57_CPUACTLR_FORCE_FPSCR_FLUSH
+	stcopr16	r0, r1, CORTEX_A57_CPUACTLR
+1:
+	bx		lr
+endfunc errata_a57_833471_wa
+
+func check_errata_833471
+	mov	r1, #0x12
+	b	cpu_rev_var_ls
+endfunc check_errata_833471
+
+	/* ---------------------------------------------------
+	 * Errata Workaround for Cortex A57 Errata #859972.
+	 * This applies only to revision <= r1p3 of Cortex A57.
+	 * Inputs:
+	 * r0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: r0-r3
+	 * ---------------------------------------------------
+	 */
+func errata_a57_859972_wa
+	mov		r2, lr
+	bl		check_errata_859972
+	mov		lr, r2
+	cmp		r0, #ERRATA_NOT_APPLIES
+	beq		1f
+	ldcopr16	r0, r1, CORTEX_A57_CPUACTLR
+	orr64_imm	r1, r1, CORTEX_A57_CPUACTLR_DIS_INSTR_PREFETCH
+	stcopr16	r0, r1, CORTEX_A57_CPUACTLR
+1:
+	bx		lr
+endfunc errata_a57_859972_wa
+
+func check_errata_859972
+	mov	r1, #0x13
+	b	cpu_rev_var_ls
+endfunc check_errata_859972
+
+	/* -------------------------------------------------
+	 * The CPU Ops reset function for Cortex-A57.
+	 * Shall clobber: r0-r6
+	 * -------------------------------------------------
+	 */
+func cortex_a57_reset_func
+	mov	r5, lr
+	bl	cpu_get_rev_var
+	mov	r4, r0
+
+#if ERRATA_A57_806969
+	mov	r0, r4
+	bl	errata_a57_806969_wa
+#endif
+
+#if ERRATA_A57_813420
+	mov	r0, r4
+	bl	errata_a57_813420_wa
+#endif
+
+#if A57_DISABLE_NON_TEMPORAL_HINT
+	mov	r0, r4
+	bl	a57_disable_ldnp_overread
+#endif
+
+#if ERRATA_A57_826974
+	mov	r0, r4
+	bl	errata_a57_826974_wa
+#endif
+
+#if ERRATA_A57_826977
+	mov	r0, r4
+	bl	errata_a57_826977_wa
+#endif
+
+#if ERRATA_A57_828024
+	mov	r0, r4
+	bl	errata_a57_828024_wa
+#endif
+
+#if ERRATA_A57_829520
+	mov	r0, r4
+	bl	errata_a57_829520_wa
+#endif
+
+#if ERRATA_A57_833471
+	mov	r0, r4
+	bl	errata_a57_833471_wa
+#endif
+
+#if ERRATA_A57_859972
+	mov	r0, r4
+	bl	errata_a57_859972_wa
+#endif
+
+	/* ---------------------------------------------
+	 * Enable the SMP bit.
+	 * ---------------------------------------------
+	 */
+	ldcopr16	r0, r1, CORTEX_A57_ECTLR
+	orr64_imm	r0, r1, CORTEX_A57_ECTLR_SMP_BIT
+	stcopr16	r0, r1,	CORTEX_A57_ECTLR
+	isb
+	bx	r5
+endfunc cortex_a57_reset_func
+
+	/* ----------------------------------------------------
+	 * The CPU Ops core power down function for Cortex-A57.
+	 * ----------------------------------------------------
+	 */
+func cortex_a57_core_pwr_dwn
+	push	{r12, lr}
+
+	/* Assert if cache is enabled */
+#if ASM_ASSERTION
+	ldcopr	r0, SCTLR
+	tst	r0, #SCTLR_C_BIT
+	ASM_ASSERT(eq)
+#endif
+
+	/* ---------------------------------------------
+	 * Disable the L2 prefetches.
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a57_disable_l2_prefetch
+
+	/* ---------------------------------------------
+	 * Flush L1 caches.
+	 * ---------------------------------------------
+	 */
+	mov	r0, #DC_OP_CISW
+	bl	dcsw_op_level1
+
+	/* ---------------------------------------------
+	 * Come out of intra cluster coherency
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a57_disable_smp
+
+	/* ---------------------------------------------
+	 * Force the debug interfaces to be quiescent
+	 * ---------------------------------------------
+	 */
+	pop	{r12, lr}
+	b	cortex_a57_disable_ext_debug
+endfunc cortex_a57_core_pwr_dwn
+
+	/* -------------------------------------------------------
+	 * The CPU Ops cluster power down function for Cortex-A57.
+	 * Clobbers: r0-r3
+	 * -------------------------------------------------------
+	 */
+func cortex_a57_cluster_pwr_dwn
+	push	{r12, lr}
+
+	/* Assert if cache is enabled */
+#if ASM_ASSERTION
+	ldcopr	r0, SCTLR
+	tst	r0, #SCTLR_C_BIT
+	ASM_ASSERT(eq)
+#endif
+
+	/* ---------------------------------------------
+	 * Disable the L2 prefetches.
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a57_disable_l2_prefetch
+
+	/* ---------------------------------------------
+	 * Flush L1 caches.
+	 * ---------------------------------------------
+	 */
+	mov	r0, #DC_OP_CISW
+	bl	dcsw_op_level1
+
+	/* ---------------------------------------------
+	 * Disable the optional ACP.
+	 * ---------------------------------------------
+	 */
+	bl	plat_disable_acp
+
+	/* ---------------------------------------------
+	 * Flush L2 caches.
+	 * ---------------------------------------------
+	 */
+	mov	r0, #DC_OP_CISW
+	bl	dcsw_op_level2
+
+	/* ---------------------------------------------
+	 * Come out of intra cluster coherency
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a57_disable_smp
+
+	/* ---------------------------------------------
+	 * Force the debug interfaces to be quiescent
+	 * ---------------------------------------------
+	 */
+	pop	{r12, lr}
+	b	cortex_a57_disable_ext_debug
+endfunc cortex_a57_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A57. Must follow AAPCS.
+ */
+func cortex_a57_errata_report
+	push	{r12, lr}
+
+	bl	cpu_get_rev_var
+	mov	r4, r0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata ERRATA_A57_806969, cortex_a57, 806969
+	report_errata ERRATA_A57_813419, cortex_a57, 813419
+	report_errata ERRATA_A57_813420, cortex_a57, 813420
+	report_errata A57_DISABLE_NON_TEMPORAL_HINT, cortex_a57, \
+		disable_ldnp_overread
+	report_errata ERRATA_A57_826974, cortex_a57, 826974
+	report_errata ERRATA_A57_826977, cortex_a57, 826977
+	report_errata ERRATA_A57_828024, cortex_a57, 828024
+	report_errata ERRATA_A57_829520, cortex_a57, 829520
+	report_errata ERRATA_A57_833471, cortex_a57, 833471
+	report_errata ERRATA_A57_859972, cortex_a57, 859972
+
+	pop	{r12, lr}
+	bx	lr
+endfunc cortex_a57_errata_report
+#endif
+
+declare_cpu_ops cortex_a57, CORTEX_A57_MIDR, \
+	cortex_a57_reset_func, \
+	cortex_a57_core_pwr_dwn, \
+	cortex_a57_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a72.S b/lib/cpus/aarch32/cortex_a72.S
new file mode 100644
index 0000000..69cc2ea
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a72.S
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a72.h>
+#include <cpu_macros.S>
+#include <debug.h>
+
+	/* ---------------------------------------------
+	 * Disable all types of L2 prefetches.
+	 * ---------------------------------------------
+	 */
+func cortex_a72_disable_l2_prefetch
+	ldcopr16	r0, r1, CORTEX_A72_ECTLR
+	orr64_imm	r0, r1, CORTEX_A72_ECTLR_DIS_TWD_ACC_PFTCH_BIT
+	bic64_imm	r0, r1, (CORTEX_A72_ECTLR_L2_IPFTCH_DIST_MASK | \
+				CORTEX_A72_ECTLR_L2_DPFTCH_DIST_MASK)
+	stcopr16	r0, r1, CORTEX_A72_ECTLR
+	isb
+	bx	lr
+endfunc cortex_a72_disable_l2_prefetch
+
+	/* ---------------------------------------------
+	 * Disable the load-store hardware prefetcher.
+	 * ---------------------------------------------
+	 */
+func cortex_a72_disable_hw_prefetcher
+	ldcopr16	r0, r1, CORTEX_A72_CPUACTLR
+	orr64_imm	r0, r1, CORTEX_A72_CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH
+	stcopr16	r0, r1, CORTEX_A72_CPUACTLR
+	isb
+	dsb	ish
+	bx	lr
+endfunc cortex_a72_disable_hw_prefetcher
+
+	/* ---------------------------------------------
+	 * Disable intra-cluster coherency
+	 * Clobbers: r0-r1
+	 * ---------------------------------------------
+	 */
+func cortex_a72_disable_smp
+	ldcopr16	r0, r1, CORTEX_A72_ECTLR
+	bic64_imm	r0, r1, CORTEX_A72_ECTLR_SMP_BIT
+	stcopr16	r0, r1, CORTEX_A72_ECTLR
+	bx	lr
+endfunc cortex_a72_disable_smp
+
+	/* ---------------------------------------------
+	 * Disable debug interfaces
+	 * ---------------------------------------------
+	 */
+func cortex_a72_disable_ext_debug
+	mov	r0, #1
+	stcopr	r0, DBGOSDLR
+	isb
+	dsb	sy
+	bx	lr
+endfunc cortex_a72_disable_ext_debug
+
+	/* ---------------------------------------------------
+	 * Errata Workaround for Cortex A72 Errata #859971.
+	 * This applies only to revision <= r0p3 of Cortex A72.
+	 * Inputs:
+	 * r0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: r0-r3
+	 * ---------------------------------------------------
+	 */
+func errata_a72_859971_wa
+	mov		r2,lr
+	bl		check_errata_859971
+	mov		lr, r2
+	cmp		r0, #ERRATA_NOT_APPLIES
+	beq		1f
+	ldcopr16	r0, r1, CORTEX_A72_CPUACTLR
+	orr64_imm	r1, r1, CORTEX_A72_CPUACTLR_DIS_INSTR_PREFETCH
+	stcopr16	r0, r1, CORTEX_A72_CPUACTLR
+1:
+	bx   		lr
+endfunc errata_a72_859971_wa
+
+func check_errata_859971
+	mov		r1, #0x03
+	b		cpu_rev_var_ls
+endfunc check_errata_859971
+
+
+	/* -------------------------------------------------
+	 * The CPU Ops reset function for Cortex-A72.
+	 * -------------------------------------------------
+	 */
+func cortex_a72_reset_func
+	mov	r5, lr
+	bl	cpu_get_rev_var
+	mov	r4, r0
+
+#if ERRATA_A72_859971
+	mov	r0, r4
+	bl	errata_a72_859971_wa
+#endif
+	/* ---------------------------------------------
+	 * Enable the SMP bit.
+	 * ---------------------------------------------
+	 */
+	ldcopr16	r0, r1, CORTEX_A72_ECTLR
+	orr64_imm	r0, r1, CORTEX_A72_ECTLR_SMP_BIT
+	stcopr16	r0, r1,	CORTEX_A72_ECTLR
+	isb
+	bx	lr
+endfunc cortex_a72_reset_func
+
+	/* ----------------------------------------------------
+	 * The CPU Ops core power down function for Cortex-A72.
+	 * ----------------------------------------------------
+	 */
+func cortex_a72_core_pwr_dwn
+	push	{r12, lr}
+
+	/* Assert if cache is enabled */
+#if ASM_ASSERTION
+	ldcopr	r0, SCTLR
+	tst	r0, #SCTLR_C_BIT
+	ASM_ASSERT(eq)
+#endif
+
+	/* ---------------------------------------------
+	 * Disable the L2 prefetches.
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a72_disable_l2_prefetch
+
+	/* ---------------------------------------------
+	 * Disable the load-store hardware prefetcher.
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a72_disable_hw_prefetcher
+
+	/* ---------------------------------------------
+	 * Flush L1 caches.
+	 * ---------------------------------------------
+	 */
+	mov	r0, #DC_OP_CISW
+	bl	dcsw_op_level1
+
+	/* ---------------------------------------------
+	 * Come out of intra cluster coherency
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a72_disable_smp
+
+	/* ---------------------------------------------
+	 * Force the debug interfaces to be quiescent
+	 * ---------------------------------------------
+	 */
+	pop	{r12, lr}
+	b	cortex_a72_disable_ext_debug
+endfunc cortex_a72_core_pwr_dwn
+
+	/* -------------------------------------------------------
+	 * The CPU Ops cluster power down function for Cortex-A72.
+	 * -------------------------------------------------------
+	 */
+func cortex_a72_cluster_pwr_dwn
+	push	{r12, lr}
+
+	/* Assert if cache is enabled */
+#if ASM_ASSERTION
+	ldcopr	r0, SCTLR
+	tst	r0, #SCTLR_C_BIT
+	ASM_ASSERT(eq)
+#endif
+
+	/* ---------------------------------------------
+	 * Disable the L2 prefetches.
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a72_disable_l2_prefetch
+
+	/* ---------------------------------------------
+	 * Disable the load-store hardware prefetcher.
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a72_disable_hw_prefetcher
+
+#if !SKIP_A72_L1_FLUSH_PWR_DWN
+	/* ---------------------------------------------
+	 * Flush L1 caches.
+	 * ---------------------------------------------
+	 */
+	mov	r0, #DC_OP_CISW
+	bl	dcsw_op_level1
+#endif
+
+	/* ---------------------------------------------
+	 * Disable the optional ACP.
+	 * ---------------------------------------------
+	 */
+	bl	plat_disable_acp
+
+	/* -------------------------------------------------
+	 * Flush the L2 caches.
+	 * -------------------------------------------------
+	 */
+	mov	r0, #DC_OP_CISW
+	bl	dcsw_op_level2
+
+	/* ---------------------------------------------
+	 * Come out of intra cluster coherency
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a72_disable_smp
+
+	/* ---------------------------------------------
+	 * Force the debug interfaces to be quiescent
+	 * ---------------------------------------------
+	 */
+	pop	{r12, lr}
+	b	cortex_a72_disable_ext_debug
+endfunc cortex_a72_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A72. Must follow AAPCS.
+ */
+func cortex_a72_errata_report
+	push	{r12, lr}
+
+	bl	cpu_get_rev_var
+	mov	r4, r0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata ERRATA_A72_859971, cortex_a72, 859971
+
+	pop	{r12, lr}
+	bx	lr
+endfunc cortex_a72_errata_report
+#endif
+
+declare_cpu_ops cortex_a72, CORTEX_A72_MIDR, \
+	cortex_a72_reset_func, \
+	cortex_a72_core_pwr_dwn, \
+	cortex_a72_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S
new file mode 100644
index 0000000..bfdc1e4
--- /dev/null
+++ b/lib/cpus/aarch32/cpu_helpers.S
@@ -0,0 +1,246 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cpu_data.h>
+#include <cpu_macros.S>
+
+#if defined(IMAGE_BL1) || defined(IMAGE_BL32)
+	/*
+	 * The reset handler common to all platforms.  After a matching
+	 * cpu_ops structure entry is found, the correponding reset_handler
+	 * in the cpu_ops is invoked. The reset handler is invoked very early
+	 * in the boot sequence and it is assumed that we can clobber r0 - r10
+	 * without the need to follow AAPCS.
+	 * Clobbers: r0 - r10
+	 */
+	.globl	reset_handler
+func reset_handler
+	mov	r10, lr
+
+	/* The plat_reset_handler can clobber r0 - r9 */
+	bl	plat_reset_handler
+
+	/* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
+	bl	get_cpu_ops_ptr
+
+#if ENABLE_ASSERTIONS
+	cmp	r0, #0
+	ASM_ASSERT(ne)
+#endif
+
+	/* Get the cpu_ops reset handler */
+	ldr	r1, [r0, #CPU_RESET_FUNC]
+	cmp	r1, #0
+	mov	lr, r10
+	bxne	r1
+	bx	lr
+endfunc reset_handler
+
+#endif /* IMAGE_BL1 || IMAGE_BL32 */
+
+#ifdef IMAGE_BL32 /* The power down core and cluster is needed only in  BL32 */
+	/*
+	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
+	 *
+	 * Prepare CPU power down function for all platforms. The function takes
+	 * a domain level to be powered down as its parameter. After the cpu_ops
+	 * pointer is retrieved from cpu_data, the handler for requested power
+	 * level is called.
+	 */
+	.globl	prepare_cpu_pwr_dwn
+func prepare_cpu_pwr_dwn
+	/*
+	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
+	 * power down handler for the last power level
+	 */
+	mov	r2, #(CPU_MAX_PWR_DWN_OPS - 1)
+	cmp	r0, r2
+	movhi	r0, r2
+
+	push	{r0, lr}
+	bl	_cpu_data
+	pop	{r2, lr}
+
+	ldr	r0, [r0, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+	cmp	r0, #0
+	ASM_ASSERT(ne)
+#endif
+
+	/* Get the appropriate power down handler */
+	mov	r1, #CPU_PWR_DWN_OPS
+	add	r1, r1, r2, lsl #2
+	ldr	r1, [r0, r1]
+	bx	r1
+endfunc prepare_cpu_pwr_dwn
+
+	/*
+	 * Initializes the cpu_ops_ptr if not already initialized
+	 * in cpu_data. This must only be called after the data cache
+	 * is enabled. AAPCS is followed.
+	 */
+	.globl	init_cpu_ops
+func init_cpu_ops
+	push	{r4 - r6, lr}
+	bl	_cpu_data
+	mov	r6, r0
+	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
+	cmp	r1, #0
+	bne	1f
+	bl	get_cpu_ops_ptr
+#if ENABLE_ASSERTIONS
+	cmp	r0, #0
+	ASM_ASSERT(ne)
+#endif
+	str	r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
+1:
+	pop	{r4 - r6, pc}
+endfunc init_cpu_ops
+
+#endif /* IMAGE_BL32 */
+
+	/*
+	 * The below function returns the cpu_ops structure matching the
+	 * midr of the core. It reads the MIDR and finds the matching
+	 * entry in cpu_ops entries. Only the implementation and part number
+	 * are used to match the entries.
+	 * Return :
+	 *     r0 - The matching cpu_ops pointer on Success
+	 *     r0 - 0 on failure.
+	 * Clobbers: r0 - r5
+	 */
+	.globl	get_cpu_ops_ptr
+func get_cpu_ops_ptr
+	/* Get the cpu_ops start and end locations */
+	ldr	r4, =(__CPU_OPS_START__ + CPU_MIDR)
+	ldr	r5, =(__CPU_OPS_END__ + CPU_MIDR)
+
+	/* Initialize the return parameter */
+	mov	r0, #0
+
+	/* Read the MIDR_EL1 */
+	ldcopr	r2, MIDR
+	ldr	r3, =CPU_IMPL_PN_MASK
+
+	/* Retain only the implementation and part number using mask */
+	and	r2, r2, r3
+1:
+	/* Check if we have reached end of list */
+	cmp	r4, r5
+	bhs	error_exit
+
+	/* load the midr from the cpu_ops */
+	ldr	r1, [r4], #CPU_OPS_SIZE
+	and	r1, r1, r3
+
+	/* Check if midr matches to midr of this core */
+	cmp	r1, r2
+	bne	1b
+
+	/* Subtract the increment and offset to get the cpu-ops pointer */
+	sub	r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
+error_exit:
+	bx	lr
+endfunc get_cpu_ops_ptr
+
+/*
+ * Extract CPU revision and variant, and combine them into a single numeric for
+ * easier comparison.
+ */
+	.globl	cpu_get_rev_var
+func cpu_get_rev_var
+	ldcopr	r1, MIDR
+
+	/*
+	 * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
+	 * r0[0:7] as variant[7:4] and revision[3:0]:
+	 *
+	 * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
+	 * extract r1[3:0] into r0[3:0] retaining other bits.
+	 */
+	ubfx	r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
+	bfi	r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
+	bx	lr
+endfunc cpu_get_rev_var
+
+/*
+ * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
+ * application purposes. If the revision-variant is less than or same as a given
+ * value, indicates that errata applies; otherwise not.
+ */
+	.globl	cpu_rev_var_ls
+func cpu_rev_var_ls
+	cmp	r0, r1
+	movls	r0, #ERRATA_APPLIES
+	movhi	r0, #ERRATA_NOT_APPLIES
+	bx	lr
+endfunc cpu_rev_var_ls
+
+/*
+ * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
+ * application purposes. If the revision-variant is higher than or same as a
+ * given value, indicates that errata applies; otherwise not.
+ */
+	.globl	cpu_rev_var_hs
+func cpu_rev_var_hs
+	cmp	r0, r1
+	movge	r0, #ERRATA_APPLIES
+	movlt	r0, #ERRATA_NOT_APPLIES
+	bx	lr
+endfunc cpu_rev_var_hs
+
+#if REPORT_ERRATA
+/*
+ * void print_errata_status(void);
+ *
+ * Function to print errata status for CPUs of its class. Must be called only:
+ *
+ *   - with MMU and data caches are enabled;
+ *   - after cpu_ops have been initialized in per-CPU data.
+ */
+	.globl print_errata_status
+func print_errata_status
+	push	{r4, lr}
+#ifdef IMAGE_BL1
+	/*
+	 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
+	 * directly.
+	 */
+	bl	get_cpu_ops_ptr
+	ldr	r0, [r0, #CPU_ERRATA_FUNC]
+	cmp	r0, #0
+	blxne	r0
+#else
+	/*
+	 * Retrieve pointer to cpu_ops, and further, the errata printing
+	 * function. If it's non-NULL, jump to the function in turn.
+	 */
+	bl	_cpu_data
+	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
+	ldr	r0, [r1, #CPU_ERRATA_FUNC]
+	cmp	r0, #0
+	beq	1f
+
+	mov	r4, r0
+
+	/*
+	 * Load pointers to errata lock and printed flag. Call
+	 * errata_needs_reporting to check whether this CPU needs to report
+	 * errata status pertaining to its class.
+	 */
+	ldr	r0, [r1, #CPU_ERRATA_LOCK]
+	ldr	r1, [r1, #CPU_ERRATA_PRINTED]
+	bl	errata_needs_reporting
+	cmp	r0, #0
+	blxne	r4
+1:
+#endif
+	pop	{r4, pc}
+endfunc print_errata_status
+#endif
diff --git a/lib/cpus/aarch64/aem_generic.S b/lib/cpus/aarch64/aem_generic.S
index 58a64a6..7592e3d 100644
--- a/lib/cpus/aarch64/aem_generic.S
+++ b/lib/cpus/aarch64/aem_generic.S
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 #include <aem_generic.h>
 #include <arch.h>
@@ -49,6 +25,7 @@
 	 * ---------------------------------------------
 	 */
 	b	dcsw_op_louis
+endfunc aem_generic_core_pwr_dwn
 
 
 func aem_generic_cluster_pwr_dwn
@@ -67,6 +44,7 @@
 	 */
 	mov	x0, #DCCISW
 	b	dcsw_op_all
+endfunc aem_generic_cluster_pwr_dwn
 
 	/* ---------------------------------------------
 	 * This function provides cpu specific
@@ -77,13 +55,22 @@
 	 * reported.
 	 * ---------------------------------------------
 	 */
+.section .rodata.aem_generic_regs, "aS"
+aem_generic_regs:  /* The ascii list of register names to be reported */
+	.asciz	"" /* no registers to report */
+
 func aem_generic_cpu_reg_dump
-	mov	x6, #0 /* no registers to report */
+	adr	x6, aem_generic_regs
 	ret
+endfunc aem_generic_cpu_reg_dump
 
 
 /* cpu_ops for Base AEM FVP */
-declare_cpu_ops aem_generic, BASE_AEM_MIDR, 1
+declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \
+	aem_generic_core_pwr_dwn, \
+	aem_generic_cluster_pwr_dwn
 
 /* cpu_ops for Foundation FVP */
-declare_cpu_ops aem_generic, FOUNDATION_AEM_MIDR, 1
+declare_cpu_ops aem_generic, FOUNDATION_AEM_MIDR, CPU_NO_RESET_FUNC, \
+	aem_generic_core_pwr_dwn, \
+	aem_generic_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a35.S b/lib/cpus/aarch64/cortex_a35.S
new file mode 100644
index 0000000..b22189c
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a35.S
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <cortex_a35.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+	/* ---------------------------------------------
+	 * Disable L1 data cache and unified L2 cache
+	 * ---------------------------------------------
+	 */
+func cortex_a35_disable_dcache
+	mrs	x1, sctlr_el3
+	bic	x1, x1, #SCTLR_C_BIT
+	msr	sctlr_el3, x1
+	isb
+	ret
+endfunc cortex_a35_disable_dcache
+
+	/* ---------------------------------------------
+	 * Disable intra-cluster coherency
+	 * ---------------------------------------------
+	 */
+func cortex_a35_disable_smp
+	mrs	x0, CORTEX_A35_CPUECTLR_EL1
+	bic	x0, x0, #CORTEX_A35_CPUECTLR_SMPEN_BIT
+	msr	CORTEX_A35_CPUECTLR_EL1, x0
+	isb
+	dsb	sy
+	ret
+endfunc cortex_a35_disable_smp
+
+	/* -------------------------------------------------
+	 * The CPU Ops reset function for Cortex-A35.
+	 * Clobbers: x0
+	 * -------------------------------------------------
+	 */
+func cortex_a35_reset_func
+	/* ---------------------------------------------
+	 * Enable the SMP bit.
+	 * ---------------------------------------------
+	 */
+	mrs	x0, CORTEX_A35_CPUECTLR_EL1
+	orr	x0, x0, #CORTEX_A35_CPUECTLR_SMPEN_BIT
+	msr	CORTEX_A35_CPUECTLR_EL1, x0
+	isb
+	ret
+endfunc cortex_a35_reset_func
+
+func cortex_a35_core_pwr_dwn
+	mov	x18, x30
+
+	/* ---------------------------------------------
+	 * Turn off caches.
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a35_disable_dcache
+
+	/* ---------------------------------------------
+	 * Flush L1 caches.
+	 * ---------------------------------------------
+	 */
+	mov	x0, #DCCISW
+	bl	dcsw_op_level1
+
+	/* ---------------------------------------------
+	 * Come out of intra cluster coherency
+	 * ---------------------------------------------
+	 */
+	mov	x30, x18
+	b	cortex_a35_disable_smp
+endfunc cortex_a35_core_pwr_dwn
+
+func cortex_a35_cluster_pwr_dwn
+	mov	x18, x30
+
+	/* ---------------------------------------------
+	 * Turn off caches.
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a35_disable_dcache
+
+	/* ---------------------------------------------
+	 * Flush L1 caches.
+	 * ---------------------------------------------
+	 */
+	mov	x0, #DCCISW
+	bl	dcsw_op_level1
+
+	/* ---------------------------------------------
+	 * Disable the optional ACP.
+	 * ---------------------------------------------
+	 */
+	bl	plat_disable_acp
+
+	/* ---------------------------------------------
+	 * Flush L2 caches.
+	 * ---------------------------------------------
+	 */
+	mov	x0, #DCCISW
+	bl	dcsw_op_level2
+
+	/* ---------------------------------------------
+	 * Come out of intra cluster coherency
+	 * ---------------------------------------------
+	 */
+	mov	x30, x18
+	b	cortex_a35_disable_smp
+endfunc cortex_a35_cluster_pwr_dwn
+
+	/* ---------------------------------------------
+	 * This function provides cortex_a35 specific
+	 * register information for crash reporting.
+	 * It needs to return with x6 pointing to
+	 * a list of register names in ascii and
+	 * x8 - x15 having values of registers to be
+	 * reported.
+	 * ---------------------------------------------
+	 */
+.section .rodata.cortex_a35_regs, "aS"
+cortex_a35_regs:  /* The ascii list of register names to be reported */
+	.asciz	"cpuectlr_el1", ""
+
+func cortex_a35_cpu_reg_dump
+	adr	x6, cortex_a35_regs
+	mrs	x8, CORTEX_A35_CPUECTLR_EL1
+	ret
+endfunc cortex_a35_cpu_reg_dump
+
+declare_cpu_ops cortex_a35, CORTEX_A35_MIDR, \
+	cortex_a35_reset_func, \
+	cortex_a35_core_pwr_dwn, \
+	cortex_a35_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a53.S b/lib/cpus/aarch64/cortex_a53.S
index 188f3c1..3e480bc 100644
--- a/lib/cpus/aarch64/cortex_a53.S
+++ b/lib/cpus/aarch64/cortex_a53.S
@@ -1,39 +1,21 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 #include <arch.h>
 #include <asm_macros.S>
 #include <bl_common.h>
 #include <cortex_a53.h>
 #include <cpu_macros.S>
+#include <debug.h>
 #include <plat_macros.S>
 
+#if A53_DISABLE_NON_TEMPORAL_HINT
+#undef ERRATA_A53_836870
+#define ERRATA_A53_836870	1
+#endif
+
 	/* ---------------------------------------------
 	 * Disable L1 data cache and unified L2 cache
 	 * ---------------------------------------------
@@ -44,34 +26,174 @@
 	msr	sctlr_el3, x1
 	isb
 	ret
+endfunc cortex_a53_disable_dcache
 
 	/* ---------------------------------------------
 	 * Disable intra-cluster coherency
 	 * ---------------------------------------------
 	 */
 func cortex_a53_disable_smp
-	mrs	x0, CPUECTLR_EL1
-	bic	x0, x0, #CPUECTLR_SMP_BIT
-	msr	CPUECTLR_EL1, x0
+	mrs	x0, CORTEX_A53_ECTLR_EL1
+	bic	x0, x0, #CORTEX_A53_ECTLR_SMP_BIT
+	msr	CORTEX_A53_ECTLR_EL1, x0
 	isb
 	dsb	sy
 	ret
+endfunc cortex_a53_disable_smp
 
+	/* --------------------------------------------------
+	 * Errata Workaround for Cortex A53 Errata #826319.
+	 * This applies only to revision <= r0p2 of Cortex A53.
+	 * Inputs:
+	 * x0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: x0-x17
+	 * --------------------------------------------------
+	 */
+func errata_a53_826319_wa
+	/*
+	 * Compare x0 against revision r0p2
+	 */
+	mov	x17, x30
+	bl	check_errata_826319
+	cbz	x0, 1f
+	mrs	x1, CORTEX_A53_L2ACTLR_EL1
+	bic	x1, x1, #CORTEX_A53_L2ACTLR_ENABLE_UNIQUECLEAN
+	orr	x1, x1, #CORTEX_A53_L2ACTLR_DISABLE_CLEAN_PUSH
+	msr	CORTEX_A53_L2ACTLR_EL1, x1
+1:
+	ret	x17
+endfunc errata_a53_826319_wa
+
+func check_errata_826319
+	mov	x1, #0x02
+	b	cpu_rev_var_ls
+endfunc check_errata_826319
+
+	/* ---------------------------------------------------------------------
+	 * Disable the cache non-temporal hint.
+	 *
+	 * This ignores the Transient allocation hint in the MAIR and treats
+	 * allocations the same as non-transient allocation types. As a result,
+	 * the LDNP and STNP instructions in AArch64 behave the same as the
+	 * equivalent LDP and STP instructions.
+	 *
+	 * This is relevant only for revisions <= r0p3 of Cortex-A53.
+	 * From r0p4 and onwards, the bit to disable the hint is enabled by
+	 * default at reset.
+	 *
+	 * Inputs:
+	 * x0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: x0-x17
+	 * ---------------------------------------------------------------------
+	 */
+func a53_disable_non_temporal_hint
+	/*
+	 * Compare x0 against revision r0p3
+	 */
+	mov	x17, x30
+	bl	check_errata_disable_non_temporal_hint
+	cbz	x0, 1f
+	mrs	x1, CORTEX_A53_CPUACTLR_EL1
+	orr	x1, x1, #CORTEX_A53_CPUACTLR_EL1_DTAH
+	msr	CORTEX_A53_CPUACTLR_EL1, x1
+1:
+	ret	x17
+endfunc a53_disable_non_temporal_hint
+
+func check_errata_disable_non_temporal_hint
+	mov	x1, #0x03
+	b	cpu_rev_var_ls
+endfunc check_errata_disable_non_temporal_hint
+
+	/* --------------------------------------------------
+	 * Errata Workaround for Cortex A53 Errata #855873.
+	 *
+	 * This applies only to revisions >= r0p3 of Cortex A53.
+	 * Earlier revisions of the core are affected as well, but don't
+	 * have the chicken bit in the CPUACTLR register. It is expected that
+	 * the rich OS takes care of that, especially as the workaround is
+	 * shared with other erratas in those revisions of the CPU.
+	 * Inputs:
+	 * x0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: x0-x17
+	 * --------------------------------------------------
+	 */
+func errata_a53_855873_wa
+	/*
+	 * Compare x0 against revision r0p3 and higher
+	 */
+        mov     x17, x30
+        bl      check_errata_855873
+        cbz     x0, 1f
+
+	mrs	x1, CORTEX_A53_CPUACTLR_EL1
+	orr	x1, x1, #CORTEX_A53_CPUACTLR_EL1_ENDCCASCI
+	msr	CORTEX_A53_CPUACTLR_EL1, x1
+1:
+	ret	x17
+endfunc errata_a53_855873_wa
+
+func check_errata_855873
+	mov	x1, #0x03
+	b	cpu_rev_var_hs
+endfunc check_errata_855873
+
+/*
+ * Errata workaround for Cortex A53 Errata #835769.
+ * This applies to revisions <= r0p4 of Cortex A53.
+ * This workaround is statically enabled at build time.
+ */
+func check_errata_835769
+	mov	x1, #0x04
+	b	cpu_rev_var_ls
+endfunc check_errata_835769
+
+/*
+ * Errata workaround for Cortex A53 Errata #843419.
+ * This applies to revisions <= r0p4 of Cortex A53.
+ * This workaround is statically enabled at build time.
+ */
+func check_errata_843419
+	mov	x1, #0x04
+	b	cpu_rev_var_ls
+endfunc check_errata_843419
+
+	/* -------------------------------------------------
+	 * The CPU Ops reset function for Cortex-A53.
+	 * Shall clobber: x0-x19
+	 * -------------------------------------------------
+	 */
 func cortex_a53_reset_func
+	mov	x19, x30
+	bl	cpu_get_rev_var
+	mov	x18, x0
+
+
+#if ERRATA_A53_826319
+	mov	x0, x18
+	bl	errata_a53_826319_wa
+#endif
+
+#if ERRATA_A53_836870
+	mov	x0, x18
+	bl	a53_disable_non_temporal_hint
+#endif
+
+#if ERRATA_A53_855873
+	mov	x0, x18
+	bl	errata_a53_855873_wa
+#endif
+
 	/* ---------------------------------------------
-	 * As a bare minimum enable the SMP bit if it is
-	 * not already set.
-	 * Clobbers : x0
+	 * Enable the SMP bit.
 	 * ---------------------------------------------
 	 */
-	mrs	x0, CPUECTLR_EL1
-	tst	x0, #CPUECTLR_SMP_BIT
-	b.ne	skip_smp_setup
-	orr	x0, x0, #CPUECTLR_SMP_BIT
-	msr	CPUECTLR_EL1, x0
+	mrs	x0, CORTEX_A53_ECTLR_EL1
+	orr	x0, x0, #CORTEX_A53_ECTLR_SMP_BIT
+	msr	CORTEX_A53_ECTLR_EL1, x0
 	isb
-skip_smp_setup:
-	ret
+	ret	x19
+endfunc cortex_a53_reset_func
 
 func cortex_a53_core_pwr_dwn
 	mov	x18, x30
@@ -95,6 +217,7 @@
 	 */
 	mov	x30, x18
 	b	cortex_a53_disable_smp
+endfunc cortex_a53_core_pwr_dwn
 
 func cortex_a53_cluster_pwr_dwn
 	mov	x18, x30
@@ -131,6 +254,32 @@
 	 */
 	mov	x30, x18
 	b	cortex_a53_disable_smp
+endfunc cortex_a53_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A53. Must follow AAPCS.
+ */
+func cortex_a53_errata_report
+	stp	x8, x30, [sp, #-16]!
+
+	bl	cpu_get_rev_var
+	mov	x8, x0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata ERRATA_A53_826319, cortex_a53, 826319
+	report_errata ERRATA_A53_835769, cortex_a53, 835769
+	report_errata ERRATA_A53_836870, cortex_a53, disable_non_temporal_hint
+	report_errata ERRATA_A53_843419, cortex_a53, 843419
+	report_errata ERRATA_A53_855873, cortex_a53, 855873
+
+	ldp	x8, x30, [sp], #16
+	ret
+endfunc cortex_a53_errata_report
+#endif
 
 	/* ---------------------------------------------
 	 * This function provides cortex_a53 specific
@@ -143,11 +292,19 @@
 	 */
 .section .rodata.cortex_a53_regs, "aS"
 cortex_a53_regs:  /* The ascii list of register names to be reported */
-	.asciz	"cpuectlr_el1", ""
+	.asciz	"cpuectlr_el1", "cpumerrsr_el1", "l2merrsr_el1", \
+		"cpuactlr_el1", ""
 
 func cortex_a53_cpu_reg_dump
 	adr	x6, cortex_a53_regs
-	mrs	x8, CPUECTLR_EL1
+	mrs	x8, CORTEX_A53_ECTLR_EL1
+	mrs	x9, CORTEX_A53_MERRSR_EL1
+	mrs	x10, CORTEX_A53_L2MERRSR_EL1
+	mrs	x11, CORTEX_A53_CPUACTLR_EL1
 	ret
+endfunc cortex_a53_cpu_reg_dump
 
-declare_cpu_ops cortex_a53, CORTEX_A53_MIDR
+declare_cpu_ops cortex_a53, CORTEX_A53_MIDR, \
+	cortex_a53_reset_func, \
+	cortex_a53_core_pwr_dwn, \
+	cortex_a53_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a55.S b/lib/cpus/aarch64/cortex_a55.S
new file mode 100644
index 0000000..741c773
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a55.S
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <cortex_a55.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+	/* ---------------------------------------------
+	 * HW will do the cache maintenance while powering down
+	 * ---------------------------------------------
+	 */
+func cortex_a55_core_pwr_dwn
+	/* ---------------------------------------------
+	 * Enable CPU power down bit in power control register
+	 * ---------------------------------------------
+	 */
+	mrs	x0, CORTEX_A55_CPUPWRCTLR_EL1
+	orr	x0, x0, #CORTEX_A55_CORE_PWRDN_EN_MASK
+	msr	CORTEX_A55_CPUPWRCTLR_EL1, x0
+	isb
+	ret
+endfunc cortex_a55_core_pwr_dwn
+
+	/* ---------------------------------------------
+	 * This function provides cortex_a55 specific
+	 * register information for crash reporting.
+	 * It needs to return with x6 pointing to
+	 * a list of register names in ascii and
+	 * x8 - x15 having values of registers to be
+	 * reported.
+	 * ---------------------------------------------
+	 */
+.section .rodata.cortex_a55_regs, "aS"
+cortex_a55_regs:  /* The ascii list of register names to be reported */
+	.asciz	"cpuectlr_el1", ""
+
+func cortex_a55_cpu_reg_dump
+	adr	x6, cortex_a55_regs
+	mrs	x8, CORTEX_A55_CPUECTLR_EL1
+	ret
+endfunc cortex_a55_cpu_reg_dump
+
+declare_cpu_ops cortex_a55, CORTEX_A55_MIDR, \
+	CPU_NO_RESET_FUNC, \
+	cortex_a55_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index eb6c736..a720e98 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 #include <arch.h>
 #include <asm_macros.S>
@@ -33,6 +9,7 @@
 #include <bl_common.h>
 #include <cortex_a57.h>
 #include <cpu_macros.S>
+#include <debug.h>
 #include <plat_macros.S>
 
 	/* ---------------------------------------------
@@ -45,31 +22,34 @@
 	msr	sctlr_el3, x1
 	isb
 	ret
+endfunc cortex_a57_disable_dcache
 
 	/* ---------------------------------------------
 	 * Disable all types of L2 prefetches.
 	 * ---------------------------------------------
 	 */
 func cortex_a57_disable_l2_prefetch
-	mrs	x0, CPUECTLR_EL1
-	orr	x0, x0, #CPUECTLR_DIS_TWD_ACC_PFTCH_BIT
-	mov	x1, #CPUECTLR_L2_IPFTCH_DIST_MASK
-	orr	x1, x1, #CPUECTLR_L2_DPFTCH_DIST_MASK
+	mrs	x0, CORTEX_A57_ECTLR_EL1
+	orr	x0, x0, #CORTEX_A57_ECTLR_DIS_TWD_ACC_PFTCH_BIT
+	mov	x1, #CORTEX_A57_ECTLR_L2_IPFTCH_DIST_MASK
+	orr	x1, x1, #CORTEX_A57_ECTLR_L2_DPFTCH_DIST_MASK
 	bic	x0, x0, x1
-	msr	CPUECTLR_EL1, x0
+	msr	CORTEX_A57_ECTLR_EL1, x0
 	isb
 	dsb	ish
 	ret
+endfunc cortex_a57_disable_l2_prefetch
 
 	/* ---------------------------------------------
 	 * Disable intra-cluster coherency
 	 * ---------------------------------------------
 	 */
 func cortex_a57_disable_smp
-	mrs	x0, CPUECTLR_EL1
-	bic	x0, x0, #CPUECTLR_SMP_BIT
-	msr	CPUECTLR_EL1, x0
+	mrs	x0, CORTEX_A57_ECTLR_EL1
+	bic	x0, x0, #CORTEX_A57_ECTLR_SMP_BIT
+	msr	CORTEX_A57_ECTLR_EL1, x0
 	ret
+endfunc cortex_a57_disable_smp
 
 	/* ---------------------------------------------
 	 * Disable debug interfaces
@@ -81,113 +61,338 @@
 	isb
 	dsb	sy
 	ret
+endfunc cortex_a57_disable_ext_debug
 
 	/* --------------------------------------------------
 	 * Errata Workaround for Cortex A57 Errata #806969.
 	 * This applies only to revision r0p0 of Cortex A57.
 	 * Inputs:
 	 * x0: variant[4:7] and revision[0:3] of current cpu.
-	 * Clobbers : x0 - x5
+	 * Shall clobber: x0-x17
 	 * --------------------------------------------------
 	 */
 func errata_a57_806969_wa
 	/*
 	 * Compare x0 against revision r0p0
 	 */
-	cbz	x0, apply_806969
-#if DEBUG
-	b	print_revision_warning
-#else
-	ret
-#endif
-apply_806969:
-	/*
-	 * Test if errata has already been applied in an earlier
-	 * invocation of the reset handler and does not need to
-	 * be applied again.
-	 */
-	mrs	x1, CPUACTLR_EL1
-	tst	x1, #CPUACTLR_NO_ALLOC_WBWA
-	b.ne	skip_806969
-	orr	x1, x1, #CPUACTLR_NO_ALLOC_WBWA
-	msr	CPUACTLR_EL1, x1
-skip_806969:
-	ret
+	mov	x17, x30
+	bl	check_errata_806969
+	cbz	x0, 1f
+	mrs	x1, CORTEX_A57_CPUACTLR_EL1
+	orr	x1, x1, #CORTEX_A57_CPUACTLR_EL1_NO_ALLOC_WBWA
+	msr	CORTEX_A57_CPUACTLR_EL1, x1
+1:
+	ret	x17
+endfunc errata_a57_806969_wa
 
+func check_errata_806969
+	mov	x1, #0x00
+	b	cpu_rev_var_ls
+endfunc check_errata_806969
+
+	/* ---------------------------------------------------
+	 * Errata Workaround for Cortex A57 Errata #813419.
+	 * This applies only to revision r0p0 of Cortex A57.
+	 * ---------------------------------------------------
+	 */
+func check_errata_813419
+	/*
+	 * Even though this is only needed for revision r0p0, it
+	 * is always applied due to limitations of the current
+	 * errata framework.
+	 */
+	mov	x0, #ERRATA_APPLIES
+	ret
+endfunc check_errata_813419
 
 	/* ---------------------------------------------------
 	 * Errata Workaround for Cortex A57 Errata #813420.
 	 * This applies only to revision r0p0 of Cortex A57.
 	 * Inputs:
 	 * x0: variant[4:7] and revision[0:3] of current cpu.
-	 * Clobbers : x0 - x5
+	 * Shall clobber: x0-x17
 	 * ---------------------------------------------------
 	 */
 func errata_a57_813420_wa
 	/*
 	 * Compare x0 against revision r0p0
 	 */
-	cbz	x0, apply_813420
-#if DEBUG
-	b	print_revision_warning
-#else
-	ret
-#endif
-apply_813420:
-	/*
-	 * Test if errata has already been applied in an earlier
-	 * invocation of the reset handler and does not need to
-	 * be applied again.
+	mov	x17, x30
+	bl	check_errata_813420
+	cbz	x0, 1f
+	mrs	x1, CORTEX_A57_CPUACTLR_EL1
+	orr	x1, x1, #CORTEX_A57_CPUACTLR_EL1_DCC_AS_DCCI
+	msr	CORTEX_A57_CPUACTLR_EL1, x1
+1:
+	ret	x17
+endfunc errata_a57_813420_wa
+
+func check_errata_813420
+	mov	x1, #0x00
+	b	cpu_rev_var_ls
+endfunc check_errata_813420
+
+	/* --------------------------------------------------------------------
+	 * Disable the over-read from the LDNP instruction.
+	 *
+	 * This applies to all revisions <= r1p2. The performance degradation
+	 * observed with LDNP/STNP has been fixed on r1p3 and onwards.
+	 *
+	 * Inputs:
+	 * x0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: x0-x17
+	 * ---------------------------------------------------------------------
 	 */
-	mrs	x1, CPUACTLR_EL1
-	tst	x1, #CPUACTLR_DCC_AS_DCCI
-	b.ne	skip_813420
-	orr	x1, x1, #CPUACTLR_DCC_AS_DCCI
-	msr	CPUACTLR_EL1, x1
-skip_813420:
-	ret
+func a57_disable_ldnp_overread
+	/*
+	 * Compare x0 against revision r1p2
+	 */
+	mov	x17, x30
+	bl	check_errata_disable_ldnp_overread
+	cbz	x0, 1f
+	mrs	x1, CORTEX_A57_CPUACTLR_EL1
+	orr	x1, x1, #CORTEX_A57_CPUACTLR_EL1_DIS_OVERREAD
+	msr	CORTEX_A57_CPUACTLR_EL1, x1
+1:
+	ret	x17
+endfunc a57_disable_ldnp_overread
+
+func check_errata_disable_ldnp_overread
+	mov	x1, #0x12
+	b	cpu_rev_var_ls
+endfunc check_errata_disable_ldnp_overread
+
+	/* ---------------------------------------------------
+	 * Errata Workaround for Cortex A57 Errata #826974.
+	 * This applies only to revision <= r1p1 of Cortex A57.
+	 * Inputs:
+	 * x0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: x0-x17
+	 * ---------------------------------------------------
+	 */
+func errata_a57_826974_wa
+	/*
+	 * Compare x0 against revision r1p1
+	 */
+	mov	x17, x30
+	bl	check_errata_826974
+	cbz	x0, 1f
+	mrs	x1, CORTEX_A57_CPUACTLR_EL1
+	orr	x1, x1, #CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_DMB
+	msr	CORTEX_A57_CPUACTLR_EL1, x1
+1:
+	ret	x17
+endfunc errata_a57_826974_wa
+
+func check_errata_826974
+	mov	x1, #0x11
+	b	cpu_rev_var_ls
+endfunc check_errata_826974
+
+	/* ---------------------------------------------------
+	 * Errata Workaround for Cortex A57 Errata #826977.
+	 * This applies only to revision <= r1p1 of Cortex A57.
+	 * Inputs:
+	 * x0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: x0-x17
+	 * ---------------------------------------------------
+	 */
+func errata_a57_826977_wa
+	/*
+	 * Compare x0 against revision r1p1
+	 */
+	mov	x17, x30
+	bl	check_errata_826977
+	cbz	x0, 1f
+	mrs	x1, CORTEX_A57_CPUACTLR_EL1
+	orr	x1, x1, #CORTEX_A57_CPUACTLR_EL1_GRE_NGRE_AS_NGNRE
+	msr	CORTEX_A57_CPUACTLR_EL1, x1
+1:
+	ret	x17
+endfunc errata_a57_826977_wa
+
+func check_errata_826977
+	mov	x1, #0x11
+	b	cpu_rev_var_ls
+endfunc check_errata_826977
+
+	/* ---------------------------------------------------
+	 * Errata Workaround for Cortex A57 Errata #828024.
+	 * This applies only to revision <= r1p1 of Cortex A57.
+	 * Inputs:
+	 * x0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: x0-x17
+	 * ---------------------------------------------------
+	 */
+func errata_a57_828024_wa
+	/*
+	 * Compare x0 against revision r1p1
+	 */
+	mov	x17, x30
+	bl	check_errata_828024
+	cbz	x0, 1f
+	mrs	x1, CORTEX_A57_CPUACTLR_EL1
+	/*
+	 * Setting the relevant bits in CPUACTLR_EL1 has to be done in 2
+	 * instructions here because the resulting bitmask doesn't fit in a
+	 * 16-bit value so it cannot be encoded in a single instruction.
+	 */
+	orr	x1, x1, #CORTEX_A57_CPUACTLR_EL1_NO_ALLOC_WBWA
+	orr	x1, x1, #(CORTEX_A57_CPUACTLR_EL1_DIS_L1_STREAMING | \
+			  CORTEX_A57_CPUACTLR_EL1_DIS_STREAMING)
+	msr	CORTEX_A57_CPUACTLR_EL1, x1
+1:
+	ret	x17
+endfunc errata_a57_828024_wa
+
+func check_errata_828024
+	mov	x1, #0x11
+	b	cpu_rev_var_ls
+endfunc check_errata_828024
+
+	/* ---------------------------------------------------
+	 * Errata Workaround for Cortex A57 Errata #829520.
+	 * This applies only to revision <= r1p2 of Cortex A57.
+	 * Inputs:
+	 * x0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: x0-x17
+	 * ---------------------------------------------------
+	 */
+func errata_a57_829520_wa
+	/*
+	 * Compare x0 against revision r1p2
+	 */
+	mov	x17, x30
+	bl	check_errata_829520
+	cbz	x0, 1f
+	mrs	x1, CORTEX_A57_CPUACTLR_EL1
+	orr	x1, x1, #CORTEX_A57_CPUACTLR_EL1_DIS_INDIRECT_PREDICTOR
+	msr	CORTEX_A57_CPUACTLR_EL1, x1
+1:
+	ret	x17
+endfunc errata_a57_829520_wa
+
+func check_errata_829520
+	mov	x1, #0x12
+	b	cpu_rev_var_ls
+endfunc check_errata_829520
+
+	/* ---------------------------------------------------
+	 * Errata Workaround for Cortex A57 Errata #833471.
+	 * This applies only to revision <= r1p2 of Cortex A57.
+	 * Inputs:
+	 * x0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber: x0-x17
+	 * ---------------------------------------------------
+	 */
+func errata_a57_833471_wa
+	/*
+	 * Compare x0 against revision r1p2
+	 */
+	mov	x17, x30
+	bl	check_errata_833471
+	cbz	x0, 1f
+	mrs	x1, CORTEX_A57_CPUACTLR_EL1
+	orr	x1, x1, #CORTEX_A57_CPUACTLR_EL1_FORCE_FPSCR_FLUSH
+	msr	CORTEX_A57_CPUACTLR_EL1, x1
+1:
+	ret	x17
+endfunc errata_a57_833471_wa
+
+func check_errata_833471
+	mov	x1, #0x12
+	b	cpu_rev_var_ls
+endfunc check_errata_833471
+
+	/* --------------------------------------------------
+	 * Errata Workaround for Cortex A57 Errata #859972.
+	 * This applies only to revision <= r1p3 of Cortex A57.
+	 * Inputs:
+	 * x0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber:
+	 * --------------------------------------------------
+	 */
+func errata_a57_859972_wa
+	mov	x17, x30
+	bl	check_errata_859972
+	cbz	x0, 1f
+	mrs	x1, CORTEX_A57_CPUACTLR_EL1
+	orr	x1, x1, #CORTEX_A57_CPUACTLR_EL1_DIS_INSTR_PREFETCH
+	msr	CORTEX_A57_CPUACTLR_EL1, x1
+1:
+	ret	x17
+endfunc errata_a57_859972_wa
+
+func check_errata_859972
+	mov	x1, #0x13
+	b	cpu_rev_var_ls
+endfunc check_errata_859972
 
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A57.
-	 * Clobbers: x0-x5, x15, x19, x30
+	 * Shall clobber: x0-x19
 	 * -------------------------------------------------
 	 */
 func cortex_a57_reset_func
 	mov	x19, x30
-	mrs	x0, midr_el1
-
-	/*
-	 * Extract the variant[20:23] and revision[0:3] from x0
-	 * and pack it in x15[0:7] as variant[4:7] and revision[0:3].
-	 * First extract x0[16:23] to x15[0:7] and zero fill the rest.
-	 * Then extract x0[0:3] into x15[0:3] retaining other bits.
-	 */
-	ubfx	x15, x0, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
-	bfxil	x15, x0, #MIDR_REV_SHIFT, #MIDR_REV_BITS
+	bl	cpu_get_rev_var
+	mov	x18, x0
 
 #if ERRATA_A57_806969
-	mov	x0, x15
+	mov	x0, x18
 	bl	errata_a57_806969_wa
 #endif
 
 #if ERRATA_A57_813420
-	mov	x0, x15
+	mov	x0, x18
 	bl	errata_a57_813420_wa
 #endif
 
+#if A57_DISABLE_NON_TEMPORAL_HINT
+	mov	x0, x18
+	bl	a57_disable_ldnp_overread
+#endif
+
+#if ERRATA_A57_826974
+	mov	x0, x18
+	bl	errata_a57_826974_wa
+#endif
+
+#if ERRATA_A57_826977
+	mov	x0, x18
+	bl	errata_a57_826977_wa
+#endif
+
+#if ERRATA_A57_828024
+	mov	x0, x18
+	bl	errata_a57_828024_wa
+#endif
+
+#if ERRATA_A57_829520
+	mov	x0, x18
+	bl	errata_a57_829520_wa
+#endif
+
+#if ERRATA_A57_833471
+	mov	x0, x18
+	bl	errata_a57_833471_wa
+#endif
+
+#if ERRATA_A57_859972
+	mov	x0, x18
+	bl	errata_a57_859972_wa
+#endif
+
 	/* ---------------------------------------------
-	 * As a bare minimum enable the SMP bit if it is
-	 * not already set.
+	 * Enable the SMP bit.
 	 * ---------------------------------------------
 	 */
-	mrs	x0, CPUECTLR_EL1
-	tst	x0, #CPUECTLR_SMP_BIT
-	b.ne	skip_smp_setup
-	orr	x0, x0, #CPUECTLR_SMP_BIT
-	msr	CPUECTLR_EL1, x0
-skip_smp_setup:
+	mrs	x0, CORTEX_A57_ECTLR_EL1
+	orr	x0, x0, #CORTEX_A57_ECTLR_SMP_BIT
+	msr	CORTEX_A57_ECTLR_EL1, x0
 	isb
 	ret	x19
+endfunc cortex_a57_reset_func
 
 	/* ----------------------------------------------------
 	 * The CPU Ops core power down function for Cortex-A57.
@@ -227,6 +432,7 @@
 	 */
 	mov	x30, x18
 	b	cortex_a57_disable_ext_debug
+endfunc cortex_a57_core_pwr_dwn
 
 	/* -------------------------------------------------------
 	 * The CPU Ops cluster power down function for Cortex-A57.
@@ -280,6 +486,39 @@
 	 */
 	mov	x30, x18
 	b	cortex_a57_disable_ext_debug
+endfunc cortex_a57_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A57. Must follow AAPCS.
+ */
+func cortex_a57_errata_report
+	stp	x8, x30, [sp, #-16]!
+
+	bl	cpu_get_rev_var
+	mov	x8, x0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata ERRATA_A57_806969, cortex_a57, 806969
+	report_errata ERRATA_A57_813419, cortex_a57, 813419
+	report_errata ERRATA_A57_813420, cortex_a57, 813420
+	report_errata A57_DISABLE_NON_TEMPORAL_HINT, cortex_a57, \
+		disable_ldnp_overread
+	report_errata ERRATA_A57_826974, cortex_a57, 826974
+	report_errata ERRATA_A57_826977, cortex_a57, 826977
+	report_errata ERRATA_A57_828024, cortex_a57, 828024
+	report_errata ERRATA_A57_829520, cortex_a57, 829520
+	report_errata ERRATA_A57_833471, cortex_a57, 833471
+	report_errata ERRATA_A57_859972, cortex_a57, 859972
+
+
+	ldp	x8, x30, [sp], #16
+	ret
+endfunc cortex_a57_errata_report
+#endif
 
 	/* ---------------------------------------------
 	 * This function provides cortex_a57 specific
@@ -292,12 +531,18 @@
 	 */
 .section .rodata.cortex_a57_regs, "aS"
 cortex_a57_regs:  /* The ascii list of register names to be reported */
-	.asciz	"cpuectlr_el1", ""
+	.asciz	"cpuectlr_el1", "cpumerrsr_el1", "l2merrsr_el1", ""
 
 func cortex_a57_cpu_reg_dump
 	adr	x6, cortex_a57_regs
-	mrs	x8, CPUECTLR_EL1
+	mrs	x8, CORTEX_A57_ECTLR_EL1
+	mrs	x9, CORTEX_A57_MERRSR_EL1
+	mrs	x10, CORTEX_A57_L2MERRSR_EL1
 	ret
+endfunc cortex_a57_cpu_reg_dump
 
 
-declare_cpu_ops cortex_a57, CORTEX_A57_MIDR
+declare_cpu_ops cortex_a57, CORTEX_A57_MIDR, \
+	cortex_a57_reset_func, \
+	cortex_a57_core_pwr_dwn, \
+	cortex_a57_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
new file mode 100644
index 0000000..b034125
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a72.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+	/* ---------------------------------------------
+	 * Disable L1 data cache and unified L2 cache
+	 * ---------------------------------------------
+	 */
+func cortex_a72_disable_dcache
+	mrs	x1, sctlr_el3
+	bic	x1, x1, #SCTLR_C_BIT
+	msr	sctlr_el3, x1
+	isb
+	ret
+endfunc cortex_a72_disable_dcache
+
+	/* ---------------------------------------------
+	 * Disable all types of L2 prefetches.
+	 * ---------------------------------------------
+	 */
+func cortex_a72_disable_l2_prefetch
+	mrs	x0, CORTEX_A72_ECTLR_EL1
+	orr	x0, x0, #CORTEX_A72_ECTLR_DIS_TWD_ACC_PFTCH_BIT
+	mov	x1, #CORTEX_A72_ECTLR_L2_IPFTCH_DIST_MASK
+	orr	x1, x1, #CORTEX_A72_ECTLR_L2_DPFTCH_DIST_MASK
+	bic	x0, x0, x1
+	msr	CORTEX_A72_ECTLR_EL1, x0
+	isb
+	ret
+endfunc cortex_a72_disable_l2_prefetch
+
+	/* ---------------------------------------------
+	 * Disable the load-store hardware prefetcher.
+	 * ---------------------------------------------
+	 */
+func cortex_a72_disable_hw_prefetcher
+	mrs	x0, CORTEX_A72_CPUACTLR_EL1
+	orr	x0, x0, #CORTEX_A72_CPUACTLR_EL1_DISABLE_L1_DCACHE_HW_PFTCH
+	msr	CORTEX_A72_CPUACTLR_EL1, x0
+	isb
+	dsb	ish
+	ret
+endfunc cortex_a72_disable_hw_prefetcher
+
+	/* ---------------------------------------------
+	 * Disable intra-cluster coherency
+	 * ---------------------------------------------
+	 */
+func cortex_a72_disable_smp
+	mrs	x0, CORTEX_A72_ECTLR_EL1
+	bic	x0, x0, #CORTEX_A72_ECTLR_SMP_BIT
+	msr	CORTEX_A72_ECTLR_EL1, x0
+	ret
+endfunc cortex_a72_disable_smp
+
+	/* ---------------------------------------------
+	 * Disable debug interfaces
+	 * ---------------------------------------------
+	 */
+func cortex_a72_disable_ext_debug
+	mov	x0, #1
+	msr	osdlr_el1, x0
+	isb
+	dsb	sy
+	ret
+endfunc cortex_a72_disable_ext_debug
+
+	/* --------------------------------------------------
+	 * Errata Workaround for Cortex A72 Errata #859971.
+	 * This applies only to revision <= r0p3 of Cortex A72.
+	 * Inputs:
+	 * x0: variant[4:7] and revision[0:3] of current cpu.
+	 * Shall clobber:
+	 * --------------------------------------------------
+	 */
+func errata_a72_859971_wa
+	mov	x17,x30
+	bl	check_errata_859971
+	cbz	x0, 1f
+	mrs	x1, CORTEX_A72_CPUACTLR_EL1
+	orr	x1, x1, #CORTEX_A72_CPUACTLR_EL1_DIS_INSTR_PREFETCH
+	msr	CORTEX_A72_CPUACTLR_EL1, x1
+1:
+	ret	x17
+endfunc errata_a72_859971_wa
+
+func check_errata_859971
+	mov	x1, #0x03
+	b	cpu_rev_var_ls
+endfunc check_errata_859971
+
+	/* -------------------------------------------------
+	 * The CPU Ops reset function for Cortex-A72.
+	 * -------------------------------------------------
+	 */
+func cortex_a72_reset_func
+	mov	x19, x30
+	bl	cpu_get_rev_var
+	mov	x18, x0
+
+#if ERRATA_A72_859971
+	mov	x0, x18
+	bl	errata_a72_859971_wa
+#endif
+	/* ---------------------------------------------
+	 * Enable the SMP bit.
+	 * ---------------------------------------------
+	 */
+	mrs	x0, CORTEX_A72_ECTLR_EL1
+	orr	x0, x0, #CORTEX_A72_ECTLR_SMP_BIT
+	msr	CORTEX_A72_ECTLR_EL1, x0
+	isb
+	ret x19
+endfunc cortex_a72_reset_func
+
+	/* ----------------------------------------------------
+	 * The CPU Ops core power down function for Cortex-A72.
+	 * ----------------------------------------------------
+	 */
+func cortex_a72_core_pwr_dwn
+	mov	x18, x30
+
+	/* ---------------------------------------------
+	 * Turn off caches.
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a72_disable_dcache
+
+	/* ---------------------------------------------
+	 * Disable the L2 prefetches.
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a72_disable_l2_prefetch
+
+	/* ---------------------------------------------
+	 * Disable the load-store hardware prefetcher.
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a72_disable_hw_prefetcher
+
+	/* ---------------------------------------------
+	 * Flush L1 caches.
+	 * ---------------------------------------------
+	 */
+	mov	x0, #DCCISW
+	bl	dcsw_op_level1
+
+	/* ---------------------------------------------
+	 * Come out of intra cluster coherency
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a72_disable_smp
+
+	/* ---------------------------------------------
+	 * Force the debug interfaces to be quiescent
+	 * ---------------------------------------------
+	 */
+	mov	x30, x18
+	b	cortex_a72_disable_ext_debug
+endfunc cortex_a72_core_pwr_dwn
+
+	/* -------------------------------------------------------
+	 * The CPU Ops cluster power down function for Cortex-A72.
+	 * -------------------------------------------------------
+	 */
+func cortex_a72_cluster_pwr_dwn
+	mov	x18, x30
+
+	/* ---------------------------------------------
+	 * Turn off caches.
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a72_disable_dcache
+
+	/* ---------------------------------------------
+	 * Disable the L2 prefetches.
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a72_disable_l2_prefetch
+
+	/* ---------------------------------------------
+	 * Disable the load-store hardware prefetcher.
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a72_disable_hw_prefetcher
+
+#if !SKIP_A72_L1_FLUSH_PWR_DWN
+	/* ---------------------------------------------
+	 * Flush L1 caches.
+	 * ---------------------------------------------
+	 */
+	mov	x0, #DCCISW
+	bl	dcsw_op_level1
+#endif
+
+	/* ---------------------------------------------
+	 * Disable the optional ACP.
+	 * ---------------------------------------------
+	 */
+	bl	plat_disable_acp
+
+	/* -------------------------------------------------
+	 * Flush the L2 caches.
+	 * -------------------------------------------------
+	 */
+	mov	x0, #DCCISW
+	bl	dcsw_op_level2
+
+	/* ---------------------------------------------
+	 * Come out of intra cluster coherency
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a72_disable_smp
+
+	/* ---------------------------------------------
+	 * Force the debug interfaces to be quiescent
+	 * ---------------------------------------------
+	 */
+	mov	x30, x18
+	b	cortex_a72_disable_ext_debug
+endfunc cortex_a72_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A72. Must follow AAPCS.
+ */
+func cortex_a72_errata_report
+	stp	x8, x30, [sp, #-16]!
+
+	bl	cpu_get_rev_var
+	mov	x8, x0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata ERRATA_A72_859971, cortex_a72, 859971
+
+	ldp	x8, x30, [sp], #16
+	ret
+endfunc cortex_a72_errata_report
+#endif
+
+	/* ---------------------------------------------
+	 * This function provides cortex_a72 specific
+	 * register information for crash reporting.
+	 * It needs to return with x6 pointing to
+	 * a list of register names in ascii and
+	 * x8 - x15 having values of registers to be
+	 * reported.
+	 * ---------------------------------------------
+	 */
+.section .rodata.cortex_a72_regs, "aS"
+cortex_a72_regs:  /* The ascii list of register names to be reported */
+	.asciz	"cpuectlr_el1", "cpumerrsr_el1", "l2merrsr_el1", ""
+
+func cortex_a72_cpu_reg_dump
+	adr	x6, cortex_a72_regs
+	mrs	x8, CORTEX_A72_ECTLR_EL1
+	mrs	x9, CORTEX_A72_MERRSR_EL1
+	mrs	x10, CORTEX_A72_L2MERRSR_EL1
+	ret
+endfunc cortex_a72_cpu_reg_dump
+
+
+declare_cpu_ops cortex_a72, CORTEX_A72_MIDR, \
+	cortex_a72_reset_func, \
+	cortex_a72_core_pwr_dwn, \
+	cortex_a72_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S
new file mode 100644
index 0000000..f642816
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a73.S
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <cortex_a73.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+	/* ---------------------------------------------
+	 * Disable L1 data cache
+	 * ---------------------------------------------
+	 */
+func cortex_a73_disable_dcache
+	mrs	x1, sctlr_el3
+	bic	x1, x1, #SCTLR_C_BIT
+	msr	sctlr_el3, x1
+	isb
+	ret
+endfunc cortex_a73_disable_dcache
+
+	/* ---------------------------------------------
+	 * Disable intra-cluster coherency
+	 * ---------------------------------------------
+	 */
+func cortex_a73_disable_smp
+	mrs	x0, CORTEX_A73_CPUECTLR_EL1
+	bic	x0, x0, #CORTEX_A73_CPUECTLR_SMP_BIT
+	msr	CORTEX_A73_CPUECTLR_EL1, x0
+	isb
+	dsb	sy
+	ret
+endfunc cortex_a73_disable_smp
+
+func cortex_a73_reset_func
+	/* ---------------------------------------------
+	 * Enable the SMP bit.
+	 * Clobbers : x0
+	 * ---------------------------------------------
+	 */
+	mrs	x0, CORTEX_A73_CPUECTLR_EL1
+	orr	x0, x0, #CORTEX_A73_CPUECTLR_SMP_BIT
+	msr	CORTEX_A73_CPUECTLR_EL1, x0
+	isb
+	ret
+endfunc cortex_a73_reset_func
+
+func cortex_a73_core_pwr_dwn
+	mov	x18, x30
+
+	/* ---------------------------------------------
+	 * Turn off caches.
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a73_disable_dcache
+
+	/* ---------------------------------------------
+	 * Flush L1 caches.
+	 * ---------------------------------------------
+	 */
+	mov	x0, #DCCISW
+	bl	dcsw_op_level1
+
+	/* ---------------------------------------------
+	 * Come out of intra cluster coherency
+	 * ---------------------------------------------
+	 */
+	mov	x30, x18
+	b	cortex_a73_disable_smp
+endfunc cortex_a73_core_pwr_dwn
+
+func cortex_a73_cluster_pwr_dwn
+	mov	x18, x30
+
+	/* ---------------------------------------------
+	 * Turn off caches.
+	 * ---------------------------------------------
+	 */
+	bl	cortex_a73_disable_dcache
+
+	/* ---------------------------------------------
+	 * Flush L1 caches.
+	 * ---------------------------------------------
+	 */
+	mov	x0, #DCCISW
+	bl	dcsw_op_level1
+
+	/* ---------------------------------------------
+	 * Disable the optional ACP.
+	 * ---------------------------------------------
+	 */
+	bl	plat_disable_acp
+
+	/* ---------------------------------------------
+	 * Flush L2 caches.
+	 * ---------------------------------------------
+	 */
+	mov	x0, #DCCISW
+	bl	dcsw_op_level2
+
+	/* ---------------------------------------------
+	 * Come out of intra cluster coherency
+	 * ---------------------------------------------
+	 */
+	mov	x30, x18
+	b	cortex_a73_disable_smp
+endfunc cortex_a73_cluster_pwr_dwn
+
+	/* ---------------------------------------------
+	 * This function provides cortex_a73 specific
+	 * register information for crash reporting.
+	 * It needs to return with x6 pointing to
+	 * a list of register names in ascii and
+	 * x8 - x15 having values of registers to be
+	 * reported.
+	 * ---------------------------------------------
+	 */
+.section .rodata.cortex_a73_regs, "aS"
+cortex_a73_regs:  /* The ascii list of register names to be reported */
+	.asciz	"cpuectlr_el1", "l2merrsr_el1", ""
+
+func cortex_a73_cpu_reg_dump
+	adr	x6, cortex_a73_regs
+	mrs	x8, CORTEX_A73_CPUECTLR_EL1
+	mrs	x9, CORTEX_A73_L2MERRSR_EL1
+	ret
+endfunc cortex_a73_cpu_reg_dump
+
+declare_cpu_ops cortex_a73, CORTEX_A73_MIDR, \
+	cortex_a73_reset_func, \
+	cortex_a73_core_pwr_dwn, \
+	cortex_a73_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S
new file mode 100644
index 0000000..1f4500c
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a75.S
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include <cortex_a75.h>
+
+	/* ---------------------------------------------
+	 * HW will do the cache maintenance while powering down
+	 * ---------------------------------------------
+	 */
+func cortex_a75_core_pwr_dwn
+	/* ---------------------------------------------
+	 * Enable CPU power down bit in power control register
+	 * ---------------------------------------------
+	 */
+	mrs	x0, CORTEX_A75_CPUPWRCTLR_EL1
+	orr	x0, x0, #CORTEX_A75_CORE_PWRDN_EN_MASK
+	msr	CORTEX_A75_CPUPWRCTLR_EL1, x0
+	isb
+	ret
+endfunc cortex_a75_core_pwr_dwn
+
+	/* ---------------------------------------------
+	 * This function provides cortex_a75 specific
+	 * register information for crash reporting.
+	 * It needs to return with x6 pointing to
+	 * a list of register names in ascii and
+	 * x8 - x15 having values of registers to be
+	 * reported.
+	 * ---------------------------------------------
+	 */
+.section .rodata.cortex_a75_regs, "aS"
+cortex_a75_regs:  /* The ascii list of register names to be reported */
+	.asciz	"cpuectlr_el1", ""
+
+func cortex_a75_cpu_reg_dump
+	adr	x6, cortex_a75_regs
+	mrs	x8, CORTEX_A75_CPUECTLR_EL1
+	ret
+endfunc cortex_a75_cpu_reg_dump
+
+declare_cpu_ops cortex_a75, CORTEX_A75_MIDR, \
+	CPU_NO_RESET_FUNC, \
+	cortex_a75_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
index bebe7c0..2384553 100644
--- a/lib/cpus/aarch64/cpu_helpers.S
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -1,43 +1,21 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
 #include <asm_macros.S>
 #include <assert_macros.S>
-#include <cpu_macros.S>
-#if IMAGE_BL31
+#ifdef IMAGE_BL31
 #include <cpu_data.h>
 #endif
+#include <cpu_macros.S>
+#include <debug.h>
+#include <errata_report.h>
 
  /* Reset fn is needed in BL at reset vector */
-#if IMAGE_BL1 || IMAGE_BL31
+#if defined(IMAGE_BL1) || defined(IMAGE_BL31)
 	/*
 	 * The reset handler common to all platforms.  After a matching
 	 * cpu_ops structure entry is found, the correponding reset_handler
@@ -53,7 +31,7 @@
 
 	/* Get the matching cpu_ops pointer */
 	bl	get_cpu_ops_ptr
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
 	cmp	x0, #0
 	ASM_ASSERT(ne)
 #endif
@@ -67,50 +45,48 @@
 	br	x2
 1:
 	ret
+endfunc reset_handler
 
 #endif /* IMAGE_BL1 || IMAGE_BL31 */
 
-#if IMAGE_BL31 /* The power down core and cluster is needed only in  BL31 */
+#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in  BL31 */
 	/*
-	 * The prepare core power down function for all platforms.  After
-	 * the cpu_ops pointer is retrieved from cpu_data, the corresponding
-	 * pwr_dwn_core in the cpu_ops is invoked.
+	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
+	 *
+	 * Prepare CPU power down function for all platforms. The function takes
+	 * a domain level to be powered down as its parameter. After the cpu_ops
+	 * pointer is retrieved from cpu_data, the handler for requested power
+	 * level is called.
 	 */
-	.globl	prepare_core_pwr_dwn
-func prepare_core_pwr_dwn
+	.globl	prepare_cpu_pwr_dwn
+func prepare_cpu_pwr_dwn
+	/*
+	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
+	 * power down handler for the last power level
+	 */
+	mov_imm	x2, (CPU_MAX_PWR_DWN_OPS - 1)
+	cmp	x0, x2
+	csel	x2, x2, x0, hi
+
 	mrs	x1, tpidr_el3
 	ldr	x0, [x1, #CPU_DATA_CPU_OPS_PTR]
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
 	cmp	x0, #0
 	ASM_ASSERT(ne)
 #endif
 
-	/* Get the cpu_ops core_pwr_dwn handler */
-	ldr	x1, [x0, #CPU_PWR_DWN_CORE]
+	/* Get the appropriate power down handler */
+	mov	x1, #CPU_PWR_DWN_OPS
+	add	x1, x1, x2, lsl #3
+	ldr	x1, [x0, x1]
 	br	x1
-
-	/*
-	 * The prepare cluster power down function for all platforms.  After
-	 * the cpu_ops pointer is retrieved from cpu_data, the corresponding
-	 * pwr_dwn_cluster in the cpu_ops is invoked.
-	 */
-	.globl	prepare_cluster_pwr_dwn
-func prepare_cluster_pwr_dwn
-	mrs	x1, tpidr_el3
-	ldr	x0, [x1, #CPU_DATA_CPU_OPS_PTR]
-#if ASM_ASSERTION
-	cmp	x0, #0
-	ASM_ASSERT(ne)
-#endif
-
-	/* Get the cpu_ops cluster_pwr_dwn handler */
-	ldr	x1, [x0, #CPU_PWR_DWN_CLUSTER]
-	br	x1
+endfunc prepare_cpu_pwr_dwn
 
 
 	/*
 	 * Initializes the cpu_ops_ptr if not already initialized
-	 * in cpu_data. This can be called without a runtime stack.
+	 * in cpu_data. This can be called without a runtime stack, but may
+	 * only be called after the MMU is enabled.
 	 * clobbers: x0 - x6, x10
 	 */
 	.globl	init_cpu_ops
@@ -120,29 +96,18 @@
 	cbnz	x0, 1f
 	mov	x10, x30
 	bl	get_cpu_ops_ptr
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
 	cmp	x0, #0
 	ASM_ASSERT(ne)
 #endif
 	str	x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
-
-	/*
-	 * Make sure that any pre-fetched cache copies are invalidated.
-	 * Ensure that we are running with cache disable else we
-	 * invalidate our own update.
-	 */
-#if ASM_ASSERTION
-	mrs	x1, sctlr_el3
-	tst	x1, #SCTLR_C_BIT
-	ASM_ASSERT(eq)
-#endif
-	dc	ivac, x6
 	mov x30, x10
 1:
 	ret
+endfunc init_cpu_ops
 #endif /* IMAGE_BL31 */
 
-#if IMAGE_BL31 && CRASH_REPORTING
+#if defined(IMAGE_BL31) && CRASH_REPORTING
 	/*
 	 * The cpu specific registers which need to be reported in a crash
 	 * are reported via cpu_ops cpu_reg_dump function. After a matching
@@ -164,6 +129,7 @@
 1:
 	mov	x30, x16
 	ret
+endfunc do_cpu_reg_dump
 #endif
 
 	/*
@@ -208,29 +174,110 @@
 	sub	x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
 error_exit:
 	ret
+endfunc get_cpu_ops_ptr
 
-#if DEBUG
+/*
+ * Extract CPU revision and variant, and combine them into a single numeric for
+ * easier comparison.
+ */
+	.globl	cpu_get_rev_var
+func cpu_get_rev_var
+	mrs	x1, midr_el1
+
 	/*
-	 * This function prints a warning message to the crash console
-	 * if the CPU revision/part number does not match the errata
-	 * workaround enabled in the build.
-	 * Clobber: x30, x0 - x5
+	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
+	 * as variant[7:4] and revision[3:0] of x0.
+	 *
+	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
+	 * extract x1[3:0] into x0[3:0] retaining other bits.
 	 */
-.section .rodata.rev_warn_str, "aS"
-rev_warn_str:
-	.asciz "Warning: Skipping Errata workaround for non matching CPU revision number.\n"
+	ubfx	x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
+	bfxil	x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
+	ret
+endfunc cpu_get_rev_var
 
-	.globl	print_revision_warning
-func print_revision_warning
-	mov	x5, x30
-	/* Ensure the console is initialized */
-	bl	plat_crash_console_init
-	/* Check if the console is initialized */
-	cbz	x0, 1f
-	/* The console is initialized */
-	adr	x4, rev_warn_str
-	bl	asm_print_str
-1:
-	ret	x5
+/*
+ * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
+ * application purposes. If the revision-variant is less than or same as a given
+ * value, indicates that errata applies; otherwise not.
+ */
+	.globl	cpu_rev_var_ls
+func cpu_rev_var_ls
+	mov	x2, #ERRATA_APPLIES
+	mov	x3, #ERRATA_NOT_APPLIES
+	cmp	x0, x1
+	csel	x0, x2, x3, ls
+	ret
+endfunc cpu_rev_var_ls
+
+/*
+ * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
+ * application purposes. If the revision-variant is higher than or same as a
+ * given value, indicates that errata applies; otherwise not.
+ */
+	.globl	cpu_rev_var_hs
+func cpu_rev_var_hs
+	mov	x2, #ERRATA_APPLIES
+	mov	x3, #ERRATA_NOT_APPLIES
+	cmp	x0, x1
+	csel	x0, x2, x3, hs
+	ret
+endfunc cpu_rev_var_hs
+
+#if REPORT_ERRATA
+/*
+ * void print_errata_status(void);
+ *
+ * Function to print errata status for CPUs of its class. Must be called only:
+ *
+ *   - with MMU and data caches are enabled;
+ *   - after cpu_ops have been initialized in per-CPU data.
+ */
+	.globl print_errata_status
+func print_errata_status
+#ifdef IMAGE_BL1
+	/*
+	 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
+	 * directly.
+	 */
+	stp	xzr, x30, [sp, #-16]!
+	bl	get_cpu_ops_ptr
+	ldp	xzr, x30, [sp], #16
+	ldr	x1, [x0, #CPU_ERRATA_FUNC]
+	cbnz	x1, .Lprint
+#else
+	/*
+	 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
+	 * errata printing function. If it's non-NULL, jump to the function in
+	 * turn.
+	 */
+	mrs	x0, tpidr_el3
+	ldr	x1, [x0, #CPU_DATA_CPU_OPS_PTR]
+	ldr	x0, [x1, #CPU_ERRATA_FUNC]
+	cbz	x0, .Lnoprint
+
+	/*
+	 * Printing errata status requires atomically testing the printed flag.
+	 */
+	stp	x19, x30, [sp, #-16]!
+	mov	x19, x0
+
+	/*
+	 * Load pointers to errata lock and printed flag. Call
+	 * errata_needs_reporting to check whether this CPU needs to report
+	 * errata status pertaining to its class.
+	 */
+	ldr	x0, [x1, #CPU_ERRATA_LOCK]
+	ldr	x1, [x1, #CPU_ERRATA_PRINTED]
+	bl	errata_needs_reporting
+	mov	x1, x19
+	ldp	x19, x30, [sp], #16
+	cbnz	x0, .Lprint
 #endif
-
+.Lnoprint:
+	ret
+.Lprint:
+	/* Jump to errata reporting function for this CPU */
+	br	x1
+endfunc print_errata_status
+#endif
diff --git a/lib/cpus/aarch64/denver.S b/lib/cpus/aarch64/denver.S
new file mode 100644
index 0000000..a6225d4
--- /dev/null
+++ b/lib/cpus/aarch64/denver.S
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <denver.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+	.global	denver_disable_dco
+
+	/* ---------------------------------------------
+	 * Disable debug interfaces
+	 * ---------------------------------------------
+	 */
+func denver_disable_ext_debug
+	mov	x0, #1
+	msr	osdlr_el1, x0
+	isb
+	dsb	sy
+	ret
+endfunc denver_disable_ext_debug
+
+	/* ----------------------------------------------------
+	 * Enable dynamic code optimizer (DCO)
+	 * ----------------------------------------------------
+	 */
+func denver_enable_dco
+	mrs	x0, mpidr_el1
+	and	x0, x0, #0xF
+	mov	x1, #1
+	lsl	x1, x1, x0
+	msr	s3_0_c15_c0_2, x1
+	ret
+endfunc denver_enable_dco
+
+	/* ----------------------------------------------------
+	 * Disable dynamic code optimizer (DCO)
+	 * ----------------------------------------------------
+	 */
+func denver_disable_dco
+
+	/* turn off background work */
+	mrs	x0, mpidr_el1
+	and	x0, x0, #0xF
+	mov	x1, #1
+	lsl	x1, x1, x0
+	lsl	x2, x1, #16
+	msr	s3_0_c15_c0_2, x2
+	isb
+
+	/* wait till the background work turns off */
+1:	mrs	x2, s3_0_c15_c0_2
+	lsr	x2, x2, #32
+	and	w2, w2, 0xFFFF
+	and	x2, x2, x1
+	cbnz	x2, 1b
+
+	ret
+endfunc denver_disable_dco
+
+	/* -------------------------------------------------
+	 * The CPU Ops reset function for Denver.
+	 * -------------------------------------------------
+	 */
+func denver_reset_func
+
+	mov	x19, x30
+
+	/* ----------------------------------------------------
+	 * Enable dynamic code optimizer (DCO)
+	 * ----------------------------------------------------
+	 */
+	bl	denver_enable_dco
+
+	ret	x19
+endfunc denver_reset_func
+
+	/* ----------------------------------------------------
+	 * The CPU Ops core power down function for Denver.
+	 * ----------------------------------------------------
+	 */
+func denver_core_pwr_dwn
+
+	mov	x19, x30
+
+	/* ---------------------------------------------
+	 * Force the debug interfaces to be quiescent
+	 * ---------------------------------------------
+	 */
+	bl	denver_disable_ext_debug
+
+	ret	x19
+endfunc denver_core_pwr_dwn
+
+	/* -------------------------------------------------------
+	 * The CPU Ops cluster power down function for Denver.
+	 * -------------------------------------------------------
+	 */
+func denver_cluster_pwr_dwn
+	ret
+endfunc denver_cluster_pwr_dwn
+
+	/* ---------------------------------------------
+	 * This function provides Denver specific
+	 * register information for crash reporting.
+	 * It needs to return with x6 pointing to
+	 * a list of register names in ascii and
+	 * x8 - x15 having values of registers to be
+	 * reported.
+	 * ---------------------------------------------
+	 */
+.section .rodata.denver_regs, "aS"
+denver_regs:  /* The ascii list of register names to be reported */
+	.asciz	"actlr_el1", ""
+
+func denver_cpu_reg_dump
+	adr	x6, denver_regs
+	mrs	x8, ACTLR_EL1
+	ret
+endfunc denver_cpu_reg_dump
+
+declare_cpu_ops denver, DENVER_MIDR_PN0, \
+	denver_reset_func, \
+	denver_core_pwr_dwn, \
+	denver_cluster_pwr_dwn
+
+declare_cpu_ops denver, DENVER_MIDR_PN1, \
+	denver_reset_func, \
+	denver_core_pwr_dwn, \
+	denver_cluster_pwr_dwn
+
+declare_cpu_ops denver, DENVER_MIDR_PN2, \
+	denver_reset_func, \
+	denver_core_pwr_dwn, \
+	denver_cluster_pwr_dwn
+
+declare_cpu_ops denver, DENVER_MIDR_PN3, \
+	denver_reset_func, \
+	denver_core_pwr_dwn, \
+	denver_cluster_pwr_dwn
+
+declare_cpu_ops denver, DENVER_MIDR_PN4, \
+	denver_reset_func, \
+	denver_core_pwr_dwn, \
+	denver_cluster_pwr_dwn
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index 1c5512e..31adfb4 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -1,57 +1,170 @@
 #
-# Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
 #
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# Neither the name of ARM nor the names of its contributors may be used
-# to endorse or promote products derived from this software without specific
-# prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
 #
 
 # Cortex A57 specific optimisation to skip L1 cache flush when
 # cluster is powered down.
 SKIP_A57_L1_FLUSH_PWR_DWN	?=0
 
+# Flag to disable the cache non-temporal hint.
+# It is enabled by default.
+A53_DISABLE_NON_TEMPORAL_HINT	?=1
+
+# Flag to disable the cache non-temporal hint.
+# It is enabled by default.
+A57_DISABLE_NON_TEMPORAL_HINT	?=1
+
 # Process SKIP_A57_L1_FLUSH_PWR_DWN flag
 $(eval $(call assert_boolean,SKIP_A57_L1_FLUSH_PWR_DWN))
 $(eval $(call add_define,SKIP_A57_L1_FLUSH_PWR_DWN))
 
+# Process A53_DISABLE_NON_TEMPORAL_HINT flag
+$(eval $(call assert_boolean,A53_DISABLE_NON_TEMPORAL_HINT))
+$(eval $(call add_define,A53_DISABLE_NON_TEMPORAL_HINT))
 
-# CPU Errata Build flags. These should be enabled by the
-# platform if the errata needs to be applied.
+# Process A57_DISABLE_NON_TEMPORAL_HINT flag
+$(eval $(call assert_boolean,A57_DISABLE_NON_TEMPORAL_HINT))
+$(eval $(call add_define,A57_DISABLE_NON_TEMPORAL_HINT))
 
-# Flag to apply errata 806969 during reset. This errata applies only to
-# revision r0p0 of the Cortex A57 cpu.
+
+# CPU Errata Build flags.
+# These should be enabled by the platform if the erratum workaround needs to be
+# applied.
+
+# Flag to apply erratum 826319 workaround during reset. This erratum applies
+# only to revision <= r0p2 of the Cortex A53 cpu.
+ERRATA_A53_826319	?=0
+
+# Flag to apply erratum 835769 workaround at compile and link time.  This
+# erratum applies to revision <= r0p4 of the Cortex A53 cpu. Enabling this
+# workaround can lead the linker to create "*.stub" sections.
+ERRATA_A53_835769	?=0
+
+# Flag to apply erratum 836870 workaround during reset. This erratum applies
+# only to revision <= r0p3 of the Cortex A53 cpu. From r0p4 and onwards, this
+# erratum workaround is enabled by default in hardware.
+ERRATA_A53_836870	?=0
+
+# Flag to apply erratum 843419 workaround at link time.
+# This erratum applies to revision <= r0p4 of the Cortex A53 cpu. Enabling this
+# workaround could lead the linker to emit "*.stub" sections which are 4kB
+# aligned.
+ERRATA_A53_843419	?=0
+
+# Flag to apply errata 855873 during reset. This errata applies to all
+# revisions of the Cortex A53 CPU, but this firmware workaround only works
+# for revisions r0p3 and higher. Earlier revisions are taken care
+# of by the rich OS.
+ERRATA_A53_855873	?=0
+
+# Flag to apply erratum 806969 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A57 cpu.
 ERRATA_A57_806969	?=0
 
-# Flag to apply errata 813420 during reset. This errata applies only to
-# revision r0p0 of the Cortex A57 cpu.
+# Flag to apply erratum 813419 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A57 cpu.
+ERRATA_A57_813419	?=0
+
+# Flag to apply erratum 813420  workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A57 cpu.
 ERRATA_A57_813420	?=0
 
+# Flag to apply erratum 826974 workaround during reset. This erratum applies
+# only to revision <= r1p1 of the Cortex A57 cpu.
+ERRATA_A57_826974	?=0
+
+# Flag to apply erratum 826977 workaround during reset. This erratum applies
+# only to revision <= r1p1 of the Cortex A57 cpu.
+ERRATA_A57_826977	?=0
+
+# Flag to apply erratum 828024 workaround during reset. This erratum applies
+# only to revision <= r1p1 of the Cortex A57 cpu.
+ERRATA_A57_828024	?=0
+
+# Flag to apply erratum 829520 workaround during reset. This erratum applies
+# only to revision <= r1p2 of the Cortex A57 cpu.
+ERRATA_A57_829520	?=0
+
+# Flag to apply erratum 833471 workaround during reset. This erratum applies
+# only to revision <= r1p2 of the Cortex A57 cpu.
+ERRATA_A57_833471	?=0
+
+# Flag to apply erratum 855972 workaround during reset. This erratum applies
+# only to revision <= r1p3 of the Cortex A57 cpu.
+ERRATA_A57_859972	?=0
+
+# Flag to apply erratum 855971 workaround during reset. This erratum applies
+# only to revision <= r0p3 of the Cortex A72 cpu.
+ERRATA_A72_859971	?=0
+
+# Process ERRATA_A53_826319 flag
+$(eval $(call assert_boolean,ERRATA_A53_826319))
+$(eval $(call add_define,ERRATA_A53_826319))
+
+# Process ERRATA_A53_835769 flag
+$(eval $(call assert_boolean,ERRATA_A53_835769))
+$(eval $(call add_define,ERRATA_A53_835769))
+
+# Process ERRATA_A53_836870 flag
+$(eval $(call assert_boolean,ERRATA_A53_836870))
+$(eval $(call add_define,ERRATA_A53_836870))
+
+# Process ERRATA_A53_843419 flag
+$(eval $(call assert_boolean,ERRATA_A53_843419))
+$(eval $(call add_define,ERRATA_A53_843419))
+
+# Process ERRATA_A53_855873 flag
+$(eval $(call assert_boolean,ERRATA_A53_855873))
+$(eval $(call add_define,ERRATA_A53_855873))
+
 # Process ERRATA_A57_806969 flag
 $(eval $(call assert_boolean,ERRATA_A57_806969))
 $(eval $(call add_define,ERRATA_A57_806969))
 
+# Process ERRATA_A57_813419 flag
+$(eval $(call assert_boolean,ERRATA_A57_813419))
+$(eval $(call add_define,ERRATA_A57_813419))
+
 # Process ERRATA_A57_813420 flag
 $(eval $(call assert_boolean,ERRATA_A57_813420))
 $(eval $(call add_define,ERRATA_A57_813420))
+
+# Process ERRATA_A57_826974 flag
+$(eval $(call assert_boolean,ERRATA_A57_826974))
+$(eval $(call add_define,ERRATA_A57_826974))
+
+# Process ERRATA_A57_826977 flag
+$(eval $(call assert_boolean,ERRATA_A57_826977))
+$(eval $(call add_define,ERRATA_A57_826977))
+
+# Process ERRATA_A57_828024 flag
+$(eval $(call assert_boolean,ERRATA_A57_828024))
+$(eval $(call add_define,ERRATA_A57_828024))
+
+# Process ERRATA_A57_829520 flag
+$(eval $(call assert_boolean,ERRATA_A57_829520))
+$(eval $(call add_define,ERRATA_A57_829520))
+
+# Process ERRATA_A57_833471 flag
+$(eval $(call assert_boolean,ERRATA_A57_833471))
+$(eval $(call add_define,ERRATA_A57_833471))
+
+# Process ERRATA_A57_859972 flag
+$(eval $(call assert_boolean,ERRATA_A57_859972))
+$(eval $(call add_define,ERRATA_A57_859972))
+
+# Process ERRATA_A72_859971 flag
+$(eval $(call assert_boolean,ERRATA_A72_859971))
+$(eval $(call add_define,ERRATA_A72_859971))
+
+# Errata build flags
+ifneq (${ERRATA_A53_843419},0)
+TF_LDFLAGS_aarch64	+= --fix-cortex-a53-843419
+endif
+
+ifneq (${ERRATA_A53_835769},0)
+TF_CFLAGS_aarch64	+= -mfix-cortex-a53-835769
+TF_LDFLAGS_aarch64	+= --fix-cortex-a53-835769
+endif
diff --git a/lib/cpus/errata_report.c b/lib/cpus/errata_report.c
new file mode 100644
index 0000000..8d9f704
--- /dev/null
+++ b/lib/cpus/errata_report.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* Runtime firmware routines to report errata status for the current CPU. */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cpu_data.h>
+#include <debug.h>
+#include <errata_report.h>
+#include <spinlock.h>
+#include <utils.h>
+
+#ifdef IMAGE_BL1
+# define BL_STRING	"BL1"
+#elif defined(AARCH64) && defined(IMAGE_BL31)
+# define BL_STRING	"BL31"
+#elif defined(AARCH32) && defined(IMAGE_BL32)
+# define BL_STRING	"BL32"
+#else
+# error This image should not be printing errata status
+#endif
+
+/* Errata format: BL stage, CPU, errata ID, message */
+#define ERRATA_FORMAT	"%s: %s: errata workaround for %s was %s\n"
+
+/*
+ * Returns whether errata needs to be reported. Passed arguments are private to
+ * a CPU type.
+ */
+int errata_needs_reporting(spinlock_t *lock, uint32_t *reported)
+{
+	int report_now;
+
+	/* If already reported, return false. */
+	if (*reported)
+		return 0;
+
+	/*
+	 * Acquire lock. Determine whether status needs reporting, and then mark
+	 * report status to true.
+	 */
+	spin_lock(lock);
+	report_now = !(*reported);
+	if (report_now)
+		*reported = 1;
+	spin_unlock(lock);
+
+	return report_now;
+}
+
+/*
+ * Print errata status message.
+ *
+ * Unknown: WARN
+ * Missing: WARN
+ * Applied: INFO
+ * Not applied: VERBOSE
+ */
+void errata_print_msg(unsigned int status, const char *cpu, const char *id)
+{
+	/* Errata status strings */
+	static const char *const errata_status_str[] = {
+		[ERRATA_NOT_APPLIES] = "not applied",
+		[ERRATA_APPLIES] = "applied",
+		[ERRATA_MISSING] = "missing!"
+	};
+	static const char *const __unused bl_str = BL_STRING;
+	const char *msg __unused;
+
+
+	assert(status < ARRAY_SIZE(errata_status_str));
+	assert(cpu);
+	assert(id);
+
+	msg = errata_status_str[status];
+
+	switch (status) {
+	case ERRATA_NOT_APPLIES:
+		VERBOSE(ERRATA_FORMAT, bl_str, cpu, id, msg);
+		break;
+
+	case ERRATA_APPLIES:
+		INFO(ERRATA_FORMAT, bl_str, cpu, id, msg);
+		break;
+
+	case ERRATA_MISSING:
+		WARN(ERRATA_FORMAT, bl_str, cpu, id, msg);
+		break;
+
+	default:
+		WARN(ERRATA_FORMAT, bl_str, cpu, id, "unknown");
+		break;
+	}
+}
diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c
new file mode 100644
index 0000000..3e7a5b7
--- /dev/null
+++ b/lib/el3_runtime/aarch32/context_mgmt.c
@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <smcc_helpers.h>
+#include <string.h>
+#include <utils.h>
+
+/*******************************************************************************
+ * Context management library initialisation routine. This library is used by
+ * runtime services to share pointers to 'cpu_context' structures for the secure
+ * and non-secure states. Management of the structures and their associated
+ * memory is not done by the context management library e.g. the PSCI service
+ * manages the cpu context used for entry from and exit to the non-secure state.
+ * The Secure payload manages the context(s) corresponding to the secure state.
+ * It also uses this library to get access to the non-secure
+ * state cpu context pointers.
+ ******************************************************************************/
+void cm_init(void)
+{
+	/*
+	 * The context management library has only global data to initialize, but
+	 * that will be done when the BSS is zeroed out
+	 */
+}
+
+/*******************************************************************************
+ * The following function initializes the cpu_context 'ctx' for
+ * first use, and sets the initial entrypoint state as specified by the
+ * entry_point_info structure.
+ *
+ * The security state to initialize is determined by the SECURE attribute
+ * of the entry_point_info. The function returns a pointer to the initialized
+ * context and sets this as the next context to return to.
+ *
+ * The EE and ST attributes are used to configure the endianness and secure
+ * timer availability for the new execution context.
+ *
+ * To prepare the register state for entry call cm_prepare_el3_exit() and
+ * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
+ * cm_e1_sysreg_context_restore().
+ ******************************************************************************/
+static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
+{
+	unsigned int security_state;
+	uint32_t scr, sctlr;
+	regs_t *reg_ctx;
+
+	assert(ctx);
+
+	security_state = GET_SECURITY_STATE(ep->h.attr);
+
+	/* Clear any residual register values from the context */
+	zeromem(ctx, sizeof(*ctx));
+
+	reg_ctx = get_regs_ctx(ctx);
+
+	/*
+	 * Base the context SCR on the current value, adjust for entry point
+	 * specific requirements
+	 */
+	scr = read_scr();
+	scr &= ~(SCR_NS_BIT | SCR_HCE_BIT);
+
+	if (security_state != SECURE)
+		scr |= SCR_NS_BIT;
+
+	if (security_state != SECURE) {
+		/*
+		 * Set up SCTLR for the Non-secure context.
+		 *
+		 * SCTLR.EE: Endianness is taken from the entrypoint attributes.
+		 *
+		 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as
+		 *  required by PSCI specification)
+		 *
+		 * Set remaining SCTLR fields to their architecturally defined
+		 * values. Some fields reset to an IMPLEMENTATION DEFINED value:
+		 *
+		 * SCTLR.TE: Set to zero so that exceptions to an Exception
+		 *  Level executing at PL1 are taken to A32 state.
+		 *
+		 * SCTLR.V: Set to zero to select the normal exception vectors
+		 *  with base address held in VBAR.
+		 */
+		assert(((ep->spsr >> SPSR_E_SHIFT) & SPSR_E_MASK) ==
+			(EP_GET_EE(ep->h.attr) >> EP_EE_SHIFT));
+
+		sctlr = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0;
+		sctlr |= (SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_V_BIT));
+		write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr);
+	}
+
+	/*
+	 * The target exception level is based on the spsr mode requested. If
+	 * execution is requested to hyp mode, HVC is enabled via SCR.HCE.
+	 */
+	if (GET_M32(ep->spsr) == MODE32_hyp)
+		scr |= SCR_HCE_BIT;
+
+	/*
+	 * Store the initialised values for SCTLR and SCR in the cpu_context.
+	 * The Hyp mode registers are not part of the saved context and are
+	 * set-up in cm_prepare_el3_exit().
+	 */
+	write_ctx_reg(reg_ctx, CTX_SCR, scr);
+	write_ctx_reg(reg_ctx, CTX_LR, ep->pc);
+	write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr);
+
+	/*
+	 * Store the r0-r3 value from the entrypoint into the context
+	 * Use memcpy as we are in control of the layout of the structures
+	 */
+	memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t));
+}
+
+/*******************************************************************************
+ * The following function initializes the cpu_context for a CPU specified by
+ * its `cpu_idx` for first use, and sets the initial entrypoint state as
+ * specified by the entry_point_info structure.
+ ******************************************************************************/
+void cm_init_context_by_index(unsigned int cpu_idx,
+			      const entry_point_info_t *ep)
+{
+	cpu_context_t *ctx;
+	ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
+	cm_init_context_common(ctx, ep);
+}
+
+/*******************************************************************************
+ * The following function initializes the cpu_context for the current CPU
+ * for first use, and sets the initial entrypoint state as specified by the
+ * entry_point_info structure.
+ ******************************************************************************/
+void cm_init_my_context(const entry_point_info_t *ep)
+{
+	cpu_context_t *ctx;
+	ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
+	cm_init_context_common(ctx, ep);
+}
+
+/*******************************************************************************
+ * Prepare the CPU system registers for first entry into secure or normal world
+ *
+ * If execution is requested to hyp mode, HSCTLR is initialized
+ * If execution is requested to non-secure PL1, and the CPU supports
+ * HYP mode then HYP mode is disabled by configuring all necessary HYP mode
+ * registers.
+ ******************************************************************************/
+void cm_prepare_el3_exit(uint32_t security_state)
+{
+	uint32_t hsctlr, scr;
+	cpu_context_t *ctx = cm_get_context(security_state);
+
+	assert(ctx);
+
+	if (security_state == NON_SECURE) {
+		scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR);
+		if (scr & SCR_HCE_BIT) {
+			/* Use SCTLR value to initialize HSCTLR */
+			hsctlr = read_ctx_reg(get_regs_ctx(ctx),
+						 CTX_NS_SCTLR);
+			hsctlr |= HSCTLR_RES1;
+			/* Temporarily set the NS bit to access HSCTLR */
+			write_scr(read_scr() | SCR_NS_BIT);
+			/*
+			 * Make sure the write to SCR is complete so that
+			 * we can access HSCTLR
+			 */
+			isb();
+			write_hsctlr(hsctlr);
+			isb();
+
+			write_scr(read_scr() & ~SCR_NS_BIT);
+			isb();
+		} else if (read_id_pfr1() &
+			(ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) {
+			/*
+			 * Set the NS bit to access NS copies of certain banked
+			 * registers
+			 */
+			write_scr(read_scr() | SCR_NS_BIT);
+			isb();
+
+			/*
+			 * Hyp / PL2 present but unused, need to disable safely.
+			 * HSCTLR can be ignored in this case.
+			 *
+			 * Set HCR to its architectural reset value so that
+			 * Non-secure operations do not trap to Hyp mode.
+			 */
+			write_hcr(HCR_RESET_VAL);
+
+			/*
+			 * Set HCPTR to its architectural reset value so that
+			 * Non-secure access from EL1 or EL0 to trace and to
+			 * Advanced SIMD and floating point functionality does
+			 * not trap to Hyp mode.
+			 */
+			write_hcptr(HCPTR_RESET_VAL);
+
+			/*
+			 * Initialise CNTHCTL. All fields are architecturally
+			 * UNKNOWN on reset and are set to zero except for
+			 * field(s) listed below.
+			 *
+			 * CNTHCTL.PL1PCEN: Disable traps to Hyp mode of
+			 *  Non-secure EL0 and EL1 accessed to the physical
+			 *  timer registers.
+			 *
+			 * CNTHCTL.PL1PCTEN: Disable traps to Hyp mode of
+			 *  Non-secure EL0 and EL1 accessed to the physical
+			 *  counter registers.
+			 */
+			write_cnthctl(CNTHCTL_RESET_VAL |
+					PL1PCEN_BIT | PL1PCTEN_BIT);
+
+			/*
+			 * Initialise CNTVOFF to zero as it resets to an
+			 * IMPLEMENTATION DEFINED value.
+			 */
+			write64_cntvoff(0);
+
+			/*
+			 * Set VPIDR and VMPIDR to match MIDR_EL1 and MPIDR
+			 * respectively.
+			 */
+			write_vpidr(read_midr());
+			write_vmpidr(read_mpidr());
+
+			/*
+			 * Initialise VTTBR, setting all fields rather than
+			 * relying on the hw. Some fields are architecturally
+			 * UNKNOWN at reset.
+			 *
+			 * VTTBR.VMID: Set to zero which is the architecturally
+			 *  defined reset value. Even though EL1&0 stage 2
+			 *  address translation is disabled, cache maintenance
+			 *  operations depend on the VMID.
+			 *
+			 * VTTBR.BADDR: Set to zero as EL1&0 stage 2 address
+			 *  translation is disabled.
+			 */
+			write64_vttbr(VTTBR_RESET_VAL &
+				~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT)
+				| (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT)));
+
+			/*
+			 * Initialise HDCR, setting all the fields rather than
+			 * relying on hw.
+			 *
+			 * HDCR.HPMN: Set to value of PMCR.N which is the
+			 *  architecturally-defined reset value.
+			 */
+			write_hdcr(HDCR_RESET_VAL |
+				((read_pmcr() & PMCR_N_BITS) >> PMCR_N_SHIFT));
+
+			/*
+			 * Set HSTR to its architectural reset value so that
+			 * access to system registers in the cproc=1111
+			 * encoding space do not trap to Hyp mode.
+			 */
+			write_hstr(HSTR_RESET_VAL);
+			/*
+			 * Set CNTHP_CTL to its architectural reset value to
+			 * disable the EL2 physical timer and prevent timer
+			 * interrupts. Some fields are architecturally UNKNOWN
+			 * on reset and are set to zero.
+			 */
+			write_cnthp_ctl(CNTHP_CTL_RESET_VAL);
+			isb();
+
+			write_scr(read_scr() & ~SCR_NS_BIT);
+			isb();
+		}
+	}
+}
diff --git a/lib/el3_runtime/aarch32/cpu_data.S b/lib/el3_runtime/aarch32/cpu_data.S
new file mode 100644
index 0000000..68d6415
--- /dev/null
+++ b/lib/el3_runtime/aarch32/cpu_data.S
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <cpu_data.h>
+
+	.globl	_cpu_data
+	.globl	_cpu_data_by_index
+
+/* -----------------------------------------------------------------
+ * cpu_data_t *_cpu_data(void)
+ *
+ * Return the cpu_data structure for the current CPU.
+ * -----------------------------------------------------------------
+ */
+func _cpu_data
+	/* r12 is pushed to meet the 8 byte stack alignment requirement */
+	push	{r12, lr}
+	bl	plat_my_core_pos
+	pop	{r12, lr}
+	b	_cpu_data_by_index
+endfunc _cpu_data
+
+/* -----------------------------------------------------------------
+ * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index)
+ *
+ * Return the cpu_data structure for the CPU with given linear index
+ *
+ * This can be called without a valid stack.
+ * clobbers: r0, r1
+ * -----------------------------------------------------------------
+ */
+func _cpu_data_by_index
+	mov_imm	r1, CPU_DATA_SIZE
+	mul	r0, r0, r1
+	ldr	r1, =percpu_data
+	add	r0, r0, r1
+	bx	lr
+endfunc _cpu_data_by_index
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
new file mode 100644
index 0000000..db16a9f
--- /dev/null
+++ b/lib/el3_runtime/aarch64/context.S
@@ -0,0 +1,418 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+
+	.global	el1_sysregs_context_save
+	.global el1_sysregs_context_save_post_ops
+	.global	el1_sysregs_context_restore
+#if CTX_INCLUDE_FPREGS
+	.global	fpregs_context_save
+	.global	fpregs_context_restore
+#endif
+	.global	save_gp_registers
+	.global	restore_gp_registers_eret
+	.global	restore_gp_registers_callee_eret
+	.global	el3_exit
+
+/* -----------------------------------------------------
+ * The following function strictly follows the AArch64
+ * PCS to use x9-x17 (temporary caller-saved registers)
+ * to save EL1 system register context. It assumes that
+ * 'x0' is pointing to a 'el1_sys_regs' structure where
+ * the register context will be saved.
+ * -----------------------------------------------------
+ */
+func el1_sysregs_context_save
+
+	mrs	x9, spsr_el1
+	mrs	x10, elr_el1
+	stp	x9, x10, [x0, #CTX_SPSR_EL1]
+
+	mrs	x15, sctlr_el1
+	mrs	x16, actlr_el1
+	stp	x15, x16, [x0, #CTX_SCTLR_EL1]
+
+	mrs	x17, cpacr_el1
+	mrs	x9, csselr_el1
+	stp	x17, x9, [x0, #CTX_CPACR_EL1]
+
+	mrs	x10, sp_el1
+	mrs	x11, esr_el1
+	stp	x10, x11, [x0, #CTX_SP_EL1]
+
+	mrs	x12, ttbr0_el1
+	mrs	x13, ttbr1_el1
+	stp	x12, x13, [x0, #CTX_TTBR0_EL1]
+
+	mrs	x14, mair_el1
+	mrs	x15, amair_el1
+	stp	x14, x15, [x0, #CTX_MAIR_EL1]
+
+	mrs	x16, tcr_el1
+	mrs	x17, tpidr_el1
+	stp	x16, x17, [x0, #CTX_TCR_EL1]
+
+	mrs	x9, tpidr_el0
+	mrs	x10, tpidrro_el0
+	stp	x9, x10, [x0, #CTX_TPIDR_EL0]
+
+	mrs	x13, par_el1
+	mrs	x14, far_el1
+	stp	x13, x14, [x0, #CTX_PAR_EL1]
+
+	mrs	x15, afsr0_el1
+	mrs	x16, afsr1_el1
+	stp	x15, x16, [x0, #CTX_AFSR0_EL1]
+
+	mrs	x17, contextidr_el1
+	mrs	x9, vbar_el1
+	stp	x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
+
+	mrs	x10, pmcr_el0
+	str	x10, [x0, #CTX_PMCR_EL0]
+
+	/* Save AArch32 system registers if the build has instructed so */
+#if CTX_INCLUDE_AARCH32_REGS
+	mrs	x11, spsr_abt
+	mrs	x12, spsr_und
+	stp	x11, x12, [x0, #CTX_SPSR_ABT]
+
+	mrs	x13, spsr_irq
+	mrs	x14, spsr_fiq
+	stp	x13, x14, [x0, #CTX_SPSR_IRQ]
+
+	mrs	x15, dacr32_el2
+	mrs	x16, ifsr32_el2
+	stp	x15, x16, [x0, #CTX_DACR32_EL2]
+
+	mrs	x17, fpexc32_el2
+	str	x17, [x0, #CTX_FP_FPEXC32_EL2]
+#endif
+
+	/* Save NS timer registers if the build has instructed so */
+#if NS_TIMER_SWITCH
+	mrs	x10, cntp_ctl_el0
+	mrs	x11, cntp_cval_el0
+	stp	x10, x11, [x0, #CTX_CNTP_CTL_EL0]
+
+	mrs	x12, cntv_ctl_el0
+	mrs	x13, cntv_cval_el0
+	stp	x12, x13, [x0, #CTX_CNTV_CTL_EL0]
+
+	mrs	x14, cntkctl_el1
+	str	x14, [x0, #CTX_CNTKCTL_EL1]
+#endif
+
+	ret
+endfunc el1_sysregs_context_save
+
+/* -----------------------------------------------------
+ * The following function strictly follows the AArch64
+ * PCS to use x9-x17 (temporary caller-saved registers)
+ * to do post operations after saving the EL1 system
+ * register context.
+ * -----------------------------------------------------
+ */
+func el1_sysregs_context_save_post_ops
+#if ENABLE_SPE_FOR_LOWER_ELS
+	/* Detect if SPE is implemented */
+	mrs	x9, id_aa64dfr0_el1
+	ubfx	x9, x9, #ID_AA64DFR0_PMS_SHIFT, #ID_AA64DFR0_PMS_LENGTH
+	cmp	x9, #0x1
+	b.ne	1f
+
+	/*
+	 * Before switching from normal world to secure world
+	 * the profiling buffers need to be drained out to memory.  This is
+	 * required to avoid an invalid memory access when TTBR is switched
+	 * for entry to SEL1.
+	 */
+	.arch	armv8.2-a+profile
+	psb	csync
+	dsb	nsh
+	.arch	armv8-a
+1:
+#endif
+	ret
+endfunc el1_sysregs_context_save_post_ops
+
+/* -----------------------------------------------------
+ * The following function strictly follows the AArch64
+ * PCS to use x9-x17 (temporary caller-saved registers)
+ * to restore EL1 system register context.  It assumes
+ * that 'x0' is pointing to a 'el1_sys_regs' structure
+ * from where the register context will be restored
+ * -----------------------------------------------------
+ */
+func el1_sysregs_context_restore
+
+	ldp	x9, x10, [x0, #CTX_SPSR_EL1]
+	msr	spsr_el1, x9
+	msr	elr_el1, x10
+
+	ldp	x15, x16, [x0, #CTX_SCTLR_EL1]
+	msr	sctlr_el1, x15
+	msr	actlr_el1, x16
+
+	ldp	x17, x9, [x0, #CTX_CPACR_EL1]
+	msr	cpacr_el1, x17
+	msr	csselr_el1, x9
+
+	ldp	x10, x11, [x0, #CTX_SP_EL1]
+	msr	sp_el1, x10
+	msr	esr_el1, x11
+
+	ldp	x12, x13, [x0, #CTX_TTBR0_EL1]
+	msr	ttbr0_el1, x12
+	msr	ttbr1_el1, x13
+
+	ldp	x14, x15, [x0, #CTX_MAIR_EL1]
+	msr	mair_el1, x14
+	msr	amair_el1, x15
+
+	ldp	x16, x17, [x0, #CTX_TCR_EL1]
+	msr	tcr_el1, x16
+	msr	tpidr_el1, x17
+
+	ldp	x9, x10, [x0, #CTX_TPIDR_EL0]
+	msr	tpidr_el0, x9
+	msr	tpidrro_el0, x10
+
+	ldp	x13, x14, [x0, #CTX_PAR_EL1]
+	msr	par_el1, x13
+	msr	far_el1, x14
+
+	ldp	x15, x16, [x0, #CTX_AFSR0_EL1]
+	msr	afsr0_el1, x15
+	msr	afsr1_el1, x16
+
+	ldp	x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
+	msr	contextidr_el1, x17
+	msr	vbar_el1, x9
+
+	ldr	x10, [x0, #CTX_PMCR_EL0]
+	msr	pmcr_el0, x10
+
+	/* Restore AArch32 system registers if the build has instructed so */
+#if CTX_INCLUDE_AARCH32_REGS
+	ldp	x11, x12, [x0, #CTX_SPSR_ABT]
+	msr	spsr_abt, x11
+	msr	spsr_und, x12
+
+	ldp	x13, x14, [x0, #CTX_SPSR_IRQ]
+	msr	spsr_irq, x13
+	msr	spsr_fiq, x14
+
+	ldp	x15, x16, [x0, #CTX_DACR32_EL2]
+	msr	dacr32_el2, x15
+	msr	ifsr32_el2, x16
+
+	ldr	x17, [x0, #CTX_FP_FPEXC32_EL2]
+	msr	fpexc32_el2, x17
+#endif
+	/* Restore NS timer registers if the build has instructed so */
+#if NS_TIMER_SWITCH
+	ldp	x10, x11, [x0, #CTX_CNTP_CTL_EL0]
+	msr	cntp_ctl_el0, x10
+	msr	cntp_cval_el0, x11
+
+	ldp	x12, x13, [x0, #CTX_CNTV_CTL_EL0]
+	msr	cntv_ctl_el0, x12
+	msr	cntv_cval_el0, x13
+
+	ldr	x14, [x0, #CTX_CNTKCTL_EL1]
+	msr	cntkctl_el1, x14
+#endif
+
+	/* No explict ISB required here as ERET covers it */
+	ret
+endfunc el1_sysregs_context_restore
+
+/* -----------------------------------------------------
+ * The following function follows the aapcs_64 strictly
+ * to use x9-x17 (temporary caller-saved registers
+ * according to AArch64 PCS) to save floating point
+ * register context. It assumes that 'x0' is pointing to
+ * a 'fp_regs' structure where the register context will
+ * be saved.
+ *
+ * Access to VFP registers will trap if CPTR_EL3.TFP is
+ * set.  However currently we don't use VFP registers
+ * nor set traps in Trusted Firmware, and assume it's
+ * cleared
+ *
+ * TODO: Revisit when VFP is used in secure world
+ * -----------------------------------------------------
+ */
+#if CTX_INCLUDE_FPREGS
+func fpregs_context_save
+	stp	q0, q1, [x0, #CTX_FP_Q0]
+	stp	q2, q3, [x0, #CTX_FP_Q2]
+	stp	q4, q5, [x0, #CTX_FP_Q4]
+	stp	q6, q7, [x0, #CTX_FP_Q6]
+	stp	q8, q9, [x0, #CTX_FP_Q8]
+	stp	q10, q11, [x0, #CTX_FP_Q10]
+	stp	q12, q13, [x0, #CTX_FP_Q12]
+	stp	q14, q15, [x0, #CTX_FP_Q14]
+	stp	q16, q17, [x0, #CTX_FP_Q16]
+	stp	q18, q19, [x0, #CTX_FP_Q18]
+	stp	q20, q21, [x0, #CTX_FP_Q20]
+	stp	q22, q23, [x0, #CTX_FP_Q22]
+	stp	q24, q25, [x0, #CTX_FP_Q24]
+	stp	q26, q27, [x0, #CTX_FP_Q26]
+	stp	q28, q29, [x0, #CTX_FP_Q28]
+	stp	q30, q31, [x0, #CTX_FP_Q30]
+
+	mrs	x9, fpsr
+	str	x9, [x0, #CTX_FP_FPSR]
+
+	mrs	x10, fpcr
+	str	x10, [x0, #CTX_FP_FPCR]
+
+	ret
+endfunc fpregs_context_save
+
+/* -----------------------------------------------------
+ * The following function follows the aapcs_64 strictly
+ * to use x9-x17 (temporary caller-saved registers
+ * according to AArch64 PCS) to restore floating point
+ * register context. It assumes that 'x0' is pointing to
+ * a 'fp_regs' structure from where the register context
+ * will be restored.
+ *
+ * Access to VFP registers will trap if CPTR_EL3.TFP is
+ * set.  However currently we don't use VFP registers
+ * nor set traps in Trusted Firmware, and assume it's
+ * cleared
+ *
+ * TODO: Revisit when VFP is used in secure world
+ * -----------------------------------------------------
+ */
+func fpregs_context_restore
+	ldp	q0, q1, [x0, #CTX_FP_Q0]
+	ldp	q2, q3, [x0, #CTX_FP_Q2]
+	ldp	q4, q5, [x0, #CTX_FP_Q4]
+	ldp	q6, q7, [x0, #CTX_FP_Q6]
+	ldp	q8, q9, [x0, #CTX_FP_Q8]
+	ldp	q10, q11, [x0, #CTX_FP_Q10]
+	ldp	q12, q13, [x0, #CTX_FP_Q12]
+	ldp	q14, q15, [x0, #CTX_FP_Q14]
+	ldp	q16, q17, [x0, #CTX_FP_Q16]
+	ldp	q18, q19, [x0, #CTX_FP_Q18]
+	ldp	q20, q21, [x0, #CTX_FP_Q20]
+	ldp	q22, q23, [x0, #CTX_FP_Q22]
+	ldp	q24, q25, [x0, #CTX_FP_Q24]
+	ldp	q26, q27, [x0, #CTX_FP_Q26]
+	ldp	q28, q29, [x0, #CTX_FP_Q28]
+	ldp	q30, q31, [x0, #CTX_FP_Q30]
+
+	ldr	x9, [x0, #CTX_FP_FPSR]
+	msr	fpsr, x9
+
+	ldr	x10, [x0, #CTX_FP_FPCR]
+	msr	fpcr, x10
+
+	/*
+	 * No explict ISB required here as ERET to
+	 * switch to secure EL1 or non-secure world
+	 * covers it
+	 */
+
+	ret
+endfunc fpregs_context_restore
+#endif /* CTX_INCLUDE_FPREGS */
+
+/* -----------------------------------------------------
+ * The following functions are used to save and restore
+ * all the general purpose registers. Ideally we would
+ * only save and restore the callee saved registers when
+ * a world switch occurs but that type of implementation
+ * is more complex. So currently we will always save and
+ * restore these registers on entry and exit of EL3.
+ * These are not macros to ensure their invocation fits
+ * within the 32 instructions per exception vector.
+ * clobbers: x18
+ * -----------------------------------------------------
+ */
+func save_gp_registers
+	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+	stp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+	stp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
+	stp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
+	stp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
+	stp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+	mrs	x18, sp_el0
+	str	x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
+	ret
+endfunc save_gp_registers
+
+func restore_gp_registers_eret
+	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+	b	restore_gp_registers_callee_eret
+endfunc restore_gp_registers_eret
+
+func restore_gp_registers_callee_eret
+	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+	ldp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+	ldp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+	ldp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+	ldp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+	ldp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+	ldp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+	ldp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
+	ldp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
+	ldp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
+	ldp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+	ldp	x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+	msr	sp_el0, x17
+	ldp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+	eret
+endfunc	restore_gp_registers_callee_eret
+
+	/* -----------------------------------------------------
+	 * This routine assumes that the SP_EL3 is pointing to
+	 * a valid context structure from where the gp regs and
+	 * other special registers can be retrieved.
+	 * -----------------------------------------------------
+	 */
+func el3_exit
+	/* -----------------------------------------------------
+	 * Save the current SP_EL0 i.e. the EL3 runtime stack
+	 * which will be used for handling the next SMC. Then
+	 * switch to SP_EL3
+	 * -----------------------------------------------------
+	 */
+	mov	x17, sp
+	msr	spsel, #1
+	str	x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
+
+	/* -----------------------------------------------------
+	 * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
+	 * -----------------------------------------------------
+	 */
+	ldr	x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
+	ldp	x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+	msr	scr_el3, x18
+	msr	spsr_el3, x16
+	msr	elr_el3, x17
+
+	/* Restore saved general purpose registers and return */
+	b	restore_gp_registers_eret
+endfunc el3_exit
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
new file mode 100644
index 0000000..c8232df
--- /dev/null
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -0,0 +1,568 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <interrupt_mgmt.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <pubsub_events.h>
+#include <smcc_helpers.h>
+#include <string.h>
+#include <utils.h>
+
+
+/*******************************************************************************
+ * Context management library initialisation routine. This library is used by
+ * runtime services to share pointers to 'cpu_context' structures for the secure
+ * and non-secure states. Management of the structures and their associated
+ * memory is not done by the context management library e.g. the PSCI service
+ * manages the cpu context used for entry from and exit to the non-secure state.
+ * The Secure payload dispatcher service manages the context(s) corresponding to
+ * the secure state. It also uses this library to get access to the non-secure
+ * state cpu context pointers.
+ * Lastly, this library provides the api to make SP_EL3 point to the cpu context
+ * which will used for programming an entry into a lower EL. The same context
+ * will used to save state upon exception entry from that EL.
+ ******************************************************************************/
+void cm_init(void)
+{
+	/*
+	 * The context management library has only global data to intialize, but
+	 * that will be done when the BSS is zeroed out
+	 */
+}
+
+/*******************************************************************************
+ * The following function initializes the cpu_context 'ctx' for
+ * first use, and sets the initial entrypoint state as specified by the
+ * entry_point_info structure.
+ *
+ * The security state to initialize is determined by the SECURE attribute
+ * of the entry_point_info. The function returns a pointer to the initialized
+ * context and sets this as the next context to return to.
+ *
+ * The EE and ST attributes are used to configure the endianess and secure
+ * timer availability for the new execution context.
+ *
+ * To prepare the register state for entry call cm_prepare_el3_exit() and
+ * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
+ * cm_e1_sysreg_context_restore().
+ ******************************************************************************/
+static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
+{
+	unsigned int security_state;
+	uint32_t scr_el3, pmcr_el0;
+	el3_state_t *state;
+	gp_regs_t *gp_regs;
+	unsigned long sctlr_elx;
+
+	assert(ctx);
+
+	security_state = GET_SECURITY_STATE(ep->h.attr);
+
+	/* Clear any residual register values from the context */
+	zeromem(ctx, sizeof(*ctx));
+
+	/*
+	 * SCR_EL3 was initialised during reset sequence in macro
+	 * el3_arch_init_common. This code modifies the SCR_EL3 fields that
+	 * affect the next EL.
+	 *
+	 * The following fields are initially set to zero and then updated to
+	 * the required value depending on the state of the SPSR_EL3 and the
+	 * Security state and entrypoint attributes of the next EL.
+	 */
+	scr_el3 = read_scr();
+	scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT |
+			SCR_ST_BIT | SCR_HCE_BIT);
+	/*
+	 * SCR_NS: Set the security state of the next EL.
+	 */
+	if (security_state != SECURE)
+		scr_el3 |= SCR_NS_BIT;
+	/*
+	 * SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next
+	 *  Exception level as specified by SPSR.
+	 */
+	if (GET_RW(ep->spsr) == MODE_RW_64)
+		scr_el3 |= SCR_RW_BIT;
+	/*
+	 * SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical
+	 *  Secure timer registers to EL3, from AArch64 state only, if specified
+	 *  by the entrypoint attributes.
+	 */
+	if (EP_GET_ST(ep->h.attr))
+		scr_el3 |= SCR_ST_BIT;
+
+#ifndef HANDLE_EA_EL3_FIRST
+	/*
+	 * SCR_EL3.EA: Do not route External Abort and SError Interrupt External
+	 *  to EL3 when executing at a lower EL. When executing at EL3, External
+	 *  Aborts are taken to EL3.
+	 */
+	scr_el3 &= ~SCR_EA_BIT;
+#endif
+
+#ifdef IMAGE_BL31
+	/*
+	 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ rounting as
+	 *  indicated by the interrupt routing model for BL31.
+	 */
+	scr_el3 |= get_scr_el3_from_routing_model(security_state);
+#endif
+
+	/*
+	 * SCR_EL3.HCE: Enable HVC instructions if next execution state is
+	 * AArch64 and next EL is EL2, or if next execution state is AArch32 and
+	 * next mode is Hyp.
+	 */
+	if ((GET_RW(ep->spsr) == MODE_RW_64
+	     && GET_EL(ep->spsr) == MODE_EL2)
+	    || (GET_RW(ep->spsr) != MODE_RW_64
+		&& GET_M32(ep->spsr) == MODE32_hyp)) {
+		scr_el3 |= SCR_HCE_BIT;
+	}
+
+	/*
+	 * Initialise SCTLR_EL1 to the reset value corresponding to the target
+	 * execution state setting all fields rather than relying of the hw.
+	 * Some fields have architecturally UNKNOWN reset values and these are
+	 * set to zero.
+	 *
+	 * SCTLR.EE: Endianness is taken from the entrypoint attributes.
+	 *
+	 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as
+	 *  required by PSCI specification)
+	 */
+	sctlr_elx = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0;
+	if (GET_RW(ep->spsr) == MODE_RW_64)
+		sctlr_elx |= SCTLR_EL1_RES1;
+	else {
+		/*
+		 * If the target execution state is AArch32 then the following
+		 * fields need to be set.
+		 *
+		 * SCTRL_EL1.nTWE: Set to one so that EL0 execution of WFE
+		 *  instructions are not trapped to EL1.
+		 *
+		 * SCTLR_EL1.nTWI: Set to one so that EL0 execution of WFI
+		 *  instructions are not trapped to EL1.
+		 *
+		 * SCTLR_EL1.CP15BEN: Set to one to enable EL0 execution of the
+		 *  CP15DMB, CP15DSB, and CP15ISB instructions.
+		 */
+		sctlr_elx |= SCTLR_AARCH32_EL1_RES1 | SCTLR_CP15BEN_BIT
+					| SCTLR_NTWI_BIT | SCTLR_NTWE_BIT;
+	}
+
+	/*
+	 * Store the initialised SCTLR_EL1 value in the cpu_context - SCTLR_EL2
+	 * and other EL2 registers are set up by cm_preapre_ns_entry() as they
+	 * are not part of the stored cpu_context.
+	 */
+	write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
+
+	if (security_state == SECURE) {
+		/*
+		 * Initialise PMCR_EL0 for secure context only, setting all
+		 * fields rather than relying on hw. Some fields are
+		 * architecturally UNKNOWN on reset.
+		 *
+		 * PMCR_EL0.LC: Set to one so that cycle counter overflow, that
+		 *  is recorded in PMOVSCLR_EL0[31], occurs on the increment
+		 *  that changes PMCCNTR_EL0[63] from 1 to 0.
+		 *
+		 * PMCR_EL0.DP: Set to one so that the cycle counter,
+		 *  PMCCNTR_EL0 does not count when event counting is prohibited.
+		 *
+		 * PMCR_EL0.X: Set to zero to disable export of events.
+		 *
+		 * PMCR_EL0.D: Set to zero so that, when enabled, PMCCNTR_EL0
+		 *  counts on every clock cycle.
+		 */
+		pmcr_el0 = ((PMCR_EL0_RESET_VAL | PMCR_EL0_LC_BIT
+				| PMCR_EL0_DP_BIT)
+				& ~(PMCR_EL0_X_BIT | PMCR_EL0_D_BIT));
+		write_ctx_reg(get_sysregs_ctx(ctx), CTX_PMCR_EL0, pmcr_el0);
+	}
+
+	/* Populate EL3 state so that we've the right context before doing ERET */
+	state = get_el3state_ctx(ctx);
+	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
+	write_ctx_reg(state, CTX_ELR_EL3, ep->pc);
+	write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr);
+
+	/*
+	 * Store the X0-X7 value from the entrypoint into the context
+	 * Use memcpy as we are in control of the layout of the structures
+	 */
+	gp_regs = get_gpregs_ctx(ctx);
+	memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t));
+}
+
+/*******************************************************************************
+ * The following function initializes the cpu_context for a CPU specified by
+ * its `cpu_idx` for first use, and sets the initial entrypoint state as
+ * specified by the entry_point_info structure.
+ ******************************************************************************/
+void cm_init_context_by_index(unsigned int cpu_idx,
+			      const entry_point_info_t *ep)
+{
+	cpu_context_t *ctx;
+	ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
+	cm_init_context_common(ctx, ep);
+}
+
+/*******************************************************************************
+ * The following function initializes the cpu_context for the current CPU
+ * for first use, and sets the initial entrypoint state as specified by the
+ * entry_point_info structure.
+ ******************************************************************************/
+void cm_init_my_context(const entry_point_info_t *ep)
+{
+	cpu_context_t *ctx;
+	ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
+	cm_init_context_common(ctx, ep);
+}
+
+/*******************************************************************************
+ * Prepare the CPU system registers for first entry into secure or normal world
+ *
+ * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized
+ * If execution is requested to non-secure EL1 or svc mode, and the CPU supports
+ * EL2 then EL2 is disabled by configuring all necessary EL2 registers.
+ * For all entries, the EL1 registers are initialized from the cpu_context
+ ******************************************************************************/
+void cm_prepare_el3_exit(uint32_t security_state)
+{
+	uint32_t sctlr_elx, scr_el3, mdcr_el2;
+	cpu_context_t *ctx = cm_get_context(security_state);
+
+	assert(ctx);
+
+	if (security_state == NON_SECURE) {
+		scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
+		if (scr_el3 & SCR_HCE_BIT) {
+			/* Use SCTLR_EL1.EE value to initialise sctlr_el2 */
+			sctlr_elx = read_ctx_reg(get_sysregs_ctx(ctx),
+						 CTX_SCTLR_EL1);
+			sctlr_elx &= SCTLR_EE_BIT;
+			sctlr_elx |= SCTLR_EL2_RES1;
+			write_sctlr_el2(sctlr_elx);
+		} else if (EL_IMPLEMENTED(2)) {
+			/*
+			 * EL2 present but unused, need to disable safely.
+			 * SCTLR_EL2 can be ignored in this case.
+			 *
+			 * Initialise all fields in HCR_EL2, except HCR_EL2.RW,
+			 * to zero so that Non-secure operations do not trap to
+			 * EL2.
+			 *
+			 * HCR_EL2.RW: Set this field to match SCR_EL3.RW
+			 */
+			write_hcr_el2((scr_el3 & SCR_RW_BIT) ? HCR_RW_BIT : 0);
+
+			/*
+			 * Initialise CPTR_EL2 setting all fields rather than
+			 * relying on the hw. All fields have architecturally
+			 * UNKNOWN reset values.
+			 *
+			 * CPTR_EL2.TCPAC: Set to zero so that Non-secure EL1
+			 *  accesses to the CPACR_EL1 or CPACR from both
+			 *  Execution states do not trap to EL2.
+			 *
+			 * CPTR_EL2.TTA: Set to zero so that Non-secure System
+			 *  register accesses to the trace registers from both
+			 *  Execution states do not trap to EL2.
+			 *
+			 * CPTR_EL2.TFP: Set to zero so that Non-secure accesses
+			 *  to SIMD and floating-point functionality from both
+			 *  Execution states do not trap to EL2.
+			 */
+			write_cptr_el2(CPTR_EL2_RESET_VAL &
+					~(CPTR_EL2_TCPAC_BIT | CPTR_EL2_TTA_BIT
+					| CPTR_EL2_TFP_BIT));
+
+			/*
+			 * Initiliase CNTHCTL_EL2. All fields are
+			 * architecturally UNKNOWN on reset and are set to zero
+			 * except for field(s) listed below.
+			 *
+			 * CNTHCTL_EL2.EL1PCEN: Set to one to disable traps to
+			 *  Hyp mode of Non-secure EL0 and EL1 accesses to the
+			 *  physical timer registers.
+			 *
+			 * CNTHCTL_EL2.EL1PCTEN: Set to one to disable traps to
+			 *  Hyp mode of  Non-secure EL0 and EL1 accesses to the
+			 *  physical counter registers.
+			 */
+			write_cnthctl_el2(CNTHCTL_RESET_VAL |
+						EL1PCEN_BIT | EL1PCTEN_BIT);
+
+			/*
+			 * Initialise CNTVOFF_EL2 to zero as it resets to an
+			 * architecturally UNKNOWN value.
+			 */
+			write_cntvoff_el2(0);
+
+			/*
+			 * Set VPIDR_EL2 and VMPIDR_EL2 to match MIDR_EL1 and
+			 * MPIDR_EL1 respectively.
+			 */
+			write_vpidr_el2(read_midr_el1());
+			write_vmpidr_el2(read_mpidr_el1());
+
+			/*
+			 * Initialise VTTBR_EL2. All fields are architecturally
+			 * UNKNOWN on reset.
+			 *
+			 * VTTBR_EL2.VMID: Set to zero. Even though EL1&0 stage
+			 *  2 address translation is disabled, cache maintenance
+			 *  operations depend on the VMID.
+			 *
+			 * VTTBR_EL2.BADDR: Set to zero as EL1&0 stage 2 address
+			 *  translation is disabled.
+			 */
+			write_vttbr_el2(VTTBR_RESET_VAL &
+				~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT)
+				| (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT)));
+
+			/*
+			 * Initialise MDCR_EL2, setting all fields rather than
+			 * relying on hw. Some fields are architecturally
+			 * UNKNOWN on reset.
+			 *
+			 * MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical
+			 * profiling controls to EL2.
+			 *
+			 * MDCR_EL2.E2PB (ARM v8.2): SPE enabled in non-secure
+			 * state. Accesses to profiling buffer controls at
+			 * non-secure EL1 are not trapped to EL2.
+			 *
+			 * MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and
+			 *  EL1 System register accesses to the Debug ROM
+			 *  registers are not trapped to EL2.
+			 *
+			 * MDCR_EL2.TDOSA: Set to zero so that Non-secure EL1
+			 *  System register accesses to the powerdown debug
+			 *  registers are not trapped to EL2.
+			 *
+			 * MDCR_EL2.TDA: Set to zero so that System register
+			 *  accesses to the debug registers do not trap to EL2.
+			 *
+			 * MDCR_EL2.TDE: Set to zero so that debug exceptions
+			 *  are not routed to EL2.
+			 *
+			 * MDCR_EL2.HPME: Set to zero to disable EL2 Performance
+			 *  Monitors.
+			 *
+			 * MDCR_EL2.TPM: Set to zero so that Non-secure EL0 and
+			 *  EL1 accesses to all Performance Monitors registers
+			 *  are not trapped to EL2.
+			 *
+			 * MDCR_EL2.TPMCR: Set to zero so that Non-secure EL0
+			 *  and EL1 accesses to the PMCR_EL0 or PMCR are not
+			 *  trapped to EL2.
+			 *
+			 * MDCR_EL2.HPMN: Set to value of PMCR_EL0.N which is the
+			 *  architecturally-defined reset value.
+			 */
+			mdcr_el2 = ((MDCR_EL2_RESET_VAL |
+					((read_pmcr_el0() & PMCR_EL0_N_BITS)
+					>> PMCR_EL0_N_SHIFT)) &
+					~(MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT
+					| MDCR_EL2_TDA_BIT | MDCR_EL2_TDE_BIT
+					| MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT
+					| MDCR_EL2_TPMCR_BIT));
+
+#if ENABLE_SPE_FOR_LOWER_ELS
+			uint64_t id_aa64dfr0_el1;
+
+			/* Detect if SPE is implemented */
+			id_aa64dfr0_el1 = read_id_aa64dfr0_el1() >>
+				ID_AA64DFR0_PMS_SHIFT;
+			if ((id_aa64dfr0_el1 & ID_AA64DFR0_PMS_MASK) == 1) {
+				/*
+				 * Make sure traps to EL2 are not generated if
+				 * EL2 is implemented but not used.
+				 */
+				mdcr_el2 &= ~MDCR_EL2_TPMS;
+				mdcr_el2 |= MDCR_EL2_E2PB(MDCR_EL2_E2PB_EL1);
+			}
+#endif
+
+			write_mdcr_el2(mdcr_el2);
+
+			/*
+			 * Initialise HSTR_EL2. All fields are architecturally
+			 * UNKNOWN on reset.
+			 *
+			 * HSTR_EL2.T<n>: Set all these fields to zero so that
+			 *  Non-secure EL0 or EL1 accesses to System registers
+			 *  do not trap to EL2.
+			 */
+			write_hstr_el2(HSTR_EL2_RESET_VAL & ~(HSTR_EL2_T_MASK));
+			/*
+			 * Initialise CNTHP_CTL_EL2. All fields are
+			 * architecturally UNKNOWN on reset.
+			 *
+			 * CNTHP_CTL_EL2:ENABLE: Set to zero to disable the EL2
+			 *  physical timer and prevent timer interrupts.
+			 */
+			write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL &
+						~(CNTHP_CTL_ENABLE_BIT));
+		}
+	}
+
+	cm_el1_sysregs_context_restore(security_state);
+	cm_set_next_eret_context(security_state);
+}
+
+/*******************************************************************************
+ * The next four functions are used by runtime services to save and restore
+ * EL1 context on the 'cpu_context' structure for the specified security
+ * state.
+ ******************************************************************************/
+void cm_el1_sysregs_context_save(uint32_t security_state)
+{
+	cpu_context_t *ctx;
+
+	ctx = cm_get_context(security_state);
+	assert(ctx);
+
+	el1_sysregs_context_save(get_sysregs_ctx(ctx));
+	el1_sysregs_context_save_post_ops();
+
+#if IMAGE_BL31
+	if (security_state == SECURE)
+		PUBLISH_EVENT(cm_exited_secure_world);
+	else
+		PUBLISH_EVENT(cm_exited_normal_world);
+#endif
+}
+
+void cm_el1_sysregs_context_restore(uint32_t security_state)
+{
+	cpu_context_t *ctx;
+
+	ctx = cm_get_context(security_state);
+	assert(ctx);
+
+	el1_sysregs_context_restore(get_sysregs_ctx(ctx));
+
+#if IMAGE_BL31
+	if (security_state == SECURE)
+		PUBLISH_EVENT(cm_entering_secure_world);
+	else
+		PUBLISH_EVENT(cm_entering_normal_world);
+#endif
+}
+
+/*******************************************************************************
+ * This function populates ELR_EL3 member of 'cpu_context' pertaining to the
+ * given security state with the given entrypoint
+ ******************************************************************************/
+void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint)
+{
+	cpu_context_t *ctx;
+	el3_state_t *state;
+
+	ctx = cm_get_context(security_state);
+	assert(ctx);
+
+	/* Populate EL3 state so that ERET jumps to the correct entry */
+	state = get_el3state_ctx(ctx);
+	write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
+}
+
+/*******************************************************************************
+ * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context'
+ * pertaining to the given security state
+ ******************************************************************************/
+void cm_set_elr_spsr_el3(uint32_t security_state,
+			uintptr_t entrypoint, uint32_t spsr)
+{
+	cpu_context_t *ctx;
+	el3_state_t *state;
+
+	ctx = cm_get_context(security_state);
+	assert(ctx);
+
+	/* Populate EL3 state so that ERET jumps to the correct entry */
+	state = get_el3state_ctx(ctx);
+	write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
+	write_ctx_reg(state, CTX_SPSR_EL3, spsr);
+}
+
+/*******************************************************************************
+ * This function updates a single bit in the SCR_EL3 member of the 'cpu_context'
+ * pertaining to the given security state using the value and bit position
+ * specified in the parameters. It preserves all other bits.
+ ******************************************************************************/
+void cm_write_scr_el3_bit(uint32_t security_state,
+			  uint32_t bit_pos,
+			  uint32_t value)
+{
+	cpu_context_t *ctx;
+	el3_state_t *state;
+	uint32_t scr_el3;
+
+	ctx = cm_get_context(security_state);
+	assert(ctx);
+
+	/* Ensure that the bit position is a valid one */
+	assert((1 << bit_pos) & SCR_VALID_BIT_MASK);
+
+	/* Ensure that the 'value' is only a bit wide */
+	assert(value <= 1);
+
+	/*
+	 * Get the SCR_EL3 value from the cpu context, clear the desired bit
+	 * and set it to its new value.
+	 */
+	state = get_el3state_ctx(ctx);
+	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
+	scr_el3 &= ~(1 << bit_pos);
+	scr_el3 |= value << bit_pos;
+	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
+}
+
+/*******************************************************************************
+ * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the
+ * given security state.
+ ******************************************************************************/
+uint32_t cm_get_scr_el3(uint32_t security_state)
+{
+	cpu_context_t *ctx;
+	el3_state_t *state;
+
+	ctx = cm_get_context(security_state);
+	assert(ctx);
+
+	/* Populate EL3 state so that ERET jumps to the correct entry */
+	state = get_el3state_ctx(ctx);
+	return read_ctx_reg(state, CTX_SCR_EL3);
+}
+
+/*******************************************************************************
+ * This function is used to program the context that's used for exception
+ * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
+ * the required security state
+ ******************************************************************************/
+void cm_set_next_eret_context(uint32_t security_state)
+{
+	cpu_context_t *ctx;
+
+	ctx = cm_get_context(security_state);
+	assert(ctx);
+
+	cm_set_next_context(ctx);
+}
diff --git a/lib/el3_runtime/aarch64/cpu_data.S b/lib/el3_runtime/aarch64/cpu_data.S
new file mode 100644
index 0000000..96be081
--- /dev/null
+++ b/lib/el3_runtime/aarch64/cpu_data.S
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <cpu_data.h>
+
+.globl	init_cpu_data_ptr
+.globl	_cpu_data_by_index
+
+/* -----------------------------------------------------------------
+ * void init_cpu_data_ptr(void)
+ *
+ * Initialise the TPIDR_EL3 register to refer to the cpu_data_t
+ * for the calling CPU. This must be called before cm_get_cpu_data()
+ *
+ * This can be called without a valid stack. It assumes that
+ * plat_my_core_pos() does not clobber register x10.
+ * clobbers: x0, x1, x10
+ * -----------------------------------------------------------------
+ */
+func init_cpu_data_ptr
+	mov	x10, x30
+	bl	plat_my_core_pos
+	bl	_cpu_data_by_index
+	msr	tpidr_el3, x0
+	ret	x10
+endfunc init_cpu_data_ptr
+
+/* -----------------------------------------------------------------
+ * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index)
+ *
+ * Return the cpu_data structure for the CPU with given linear index
+ *
+ * This can be called without a valid stack.
+ * clobbers: x0, x1
+ * -----------------------------------------------------------------
+ */
+func _cpu_data_by_index
+	mov_imm	x1, CPU_DATA_SIZE
+	mul	x0, x0, x1
+	adr	x1, percpu_data
+	add	x0, x0, x1
+	ret
+endfunc _cpu_data_by_index
diff --git a/lib/el3_runtime/cpu_data_array.c b/lib/el3_runtime/cpu_data_array.c
new file mode 100644
index 0000000..10b1a7c
--- /dev/null
+++ b/lib/el3_runtime/cpu_data_array.c
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cassert.h>
+#include <cpu_data.h>
+#include <platform_def.h>
+
+/* The per_cpu_ptr_cache_t space allocation */
+cpu_data_t percpu_data[PLATFORM_CORE_COUNT];
diff --git a/lib/libfdt/fdt.c b/lib/libfdt/fdt.c
new file mode 100644
index 0000000..22286a1
--- /dev/null
+++ b/lib/libfdt/fdt.c
@@ -0,0 +1,251 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public
+ *     License along with this library; if not, write to the Free
+ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ *     MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ *  b) Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *     1. Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *     2. Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+int fdt_check_header(const void *fdt)
+{
+	if (fdt_magic(fdt) == FDT_MAGIC) {
+		/* Complete tree */
+		if (fdt_version(fdt) < FDT_FIRST_SUPPORTED_VERSION)
+			return -FDT_ERR_BADVERSION;
+		if (fdt_last_comp_version(fdt) > FDT_LAST_SUPPORTED_VERSION)
+			return -FDT_ERR_BADVERSION;
+	} else if (fdt_magic(fdt) == FDT_SW_MAGIC) {
+		/* Unfinished sequential-write blob */
+		if (fdt_size_dt_struct(fdt) == 0)
+			return -FDT_ERR_BADSTATE;
+	} else {
+		return -FDT_ERR_BADMAGIC;
+	}
+
+	return 0;
+}
+
+const void *fdt_offset_ptr(const void *fdt, int offset, unsigned int len)
+{
+	unsigned absoffset = offset + fdt_off_dt_struct(fdt);
+
+	if ((absoffset < offset)
+	    || ((absoffset + len) < absoffset)
+	    || (absoffset + len) > fdt_totalsize(fdt))
+		return NULL;
+
+	if (fdt_version(fdt) >= 0x11)
+		if (((offset + len) < offset)
+		    || ((offset + len) > fdt_size_dt_struct(fdt)))
+			return NULL;
+
+	return _fdt_offset_ptr(fdt, offset);
+}
+
+uint32_t fdt_next_tag(const void *fdt, int startoffset, int *nextoffset)
+{
+	const fdt32_t *tagp, *lenp;
+	uint32_t tag;
+	int offset = startoffset;
+	const char *p;
+
+	*nextoffset = -FDT_ERR_TRUNCATED;
+	tagp = fdt_offset_ptr(fdt, offset, FDT_TAGSIZE);
+	if (!tagp)
+		return FDT_END; /* premature end */
+	tag = fdt32_to_cpu(*tagp);
+	offset += FDT_TAGSIZE;
+
+	*nextoffset = -FDT_ERR_BADSTRUCTURE;
+	switch (tag) {
+	case FDT_BEGIN_NODE:
+		/* skip name */
+		do {
+			p = fdt_offset_ptr(fdt, offset++, 1);
+		} while (p && (*p != '\0'));
+		if (!p)
+			return FDT_END; /* premature end */
+		break;
+
+	case FDT_PROP:
+		lenp = fdt_offset_ptr(fdt, offset, sizeof(*lenp));
+		if (!lenp)
+			return FDT_END; /* premature end */
+		/* skip-name offset, length and value */
+		offset += sizeof(struct fdt_property) - FDT_TAGSIZE
+			+ fdt32_to_cpu(*lenp);
+		break;
+
+	case FDT_END:
+	case FDT_END_NODE:
+	case FDT_NOP:
+		break;
+
+	default:
+		return FDT_END;
+	}
+
+	if (!fdt_offset_ptr(fdt, startoffset, offset - startoffset))
+		return FDT_END; /* premature end */
+
+	*nextoffset = FDT_TAGALIGN(offset);
+	return tag;
+}
+
+int _fdt_check_node_offset(const void *fdt, int offset)
+{
+	if ((offset < 0) || (offset % FDT_TAGSIZE)
+	    || (fdt_next_tag(fdt, offset, &offset) != FDT_BEGIN_NODE))
+		return -FDT_ERR_BADOFFSET;
+
+	return offset;
+}
+
+int _fdt_check_prop_offset(const void *fdt, int offset)
+{
+	if ((offset < 0) || (offset % FDT_TAGSIZE)
+	    || (fdt_next_tag(fdt, offset, &offset) != FDT_PROP))
+		return -FDT_ERR_BADOFFSET;
+
+	return offset;
+}
+
+int fdt_next_node(const void *fdt, int offset, int *depth)
+{
+	int nextoffset = 0;
+	uint32_t tag;
+
+	if (offset >= 0)
+		if ((nextoffset = _fdt_check_node_offset(fdt, offset)) < 0)
+			return nextoffset;
+
+	do {
+		offset = nextoffset;
+		tag = fdt_next_tag(fdt, offset, &nextoffset);
+
+		switch (tag) {
+		case FDT_PROP:
+		case FDT_NOP:
+			break;
+
+		case FDT_BEGIN_NODE:
+			if (depth)
+				(*depth)++;
+			break;
+
+		case FDT_END_NODE:
+			if (depth && ((--(*depth)) < 0))
+				return nextoffset;
+			break;
+
+		case FDT_END:
+			if ((nextoffset >= 0)
+			    || ((nextoffset == -FDT_ERR_TRUNCATED) && !depth))
+				return -FDT_ERR_NOTFOUND;
+			else
+				return nextoffset;
+		}
+	} while (tag != FDT_BEGIN_NODE);
+
+	return offset;
+}
+
+int fdt_first_subnode(const void *fdt, int offset)
+{
+	int depth = 0;
+
+	offset = fdt_next_node(fdt, offset, &depth);
+	if (offset < 0 || depth != 1)
+		return -FDT_ERR_NOTFOUND;
+
+	return offset;
+}
+
+int fdt_next_subnode(const void *fdt, int offset)
+{
+	int depth = 1;
+
+	/*
+	 * With respect to the parent, the depth of the next subnode will be
+	 * the same as the last.
+	 */
+	do {
+		offset = fdt_next_node(fdt, offset, &depth);
+		if (offset < 0 || depth < 1)
+			return -FDT_ERR_NOTFOUND;
+	} while (depth > 1);
+
+	return offset;
+}
+
+const char *_fdt_find_string(const char *strtab, int tabsize, const char *s)
+{
+	int len = strlen(s) + 1;
+	const char *last = strtab + tabsize - len;
+	const char *p;
+
+	for (p = strtab; p <= last; p++)
+		if (memcmp(p, s, len) == 0)
+			return p;
+	return NULL;
+}
+
+int fdt_move(const void *fdt, void *buf, int bufsize)
+{
+	FDT_CHECK_HEADER(fdt);
+
+	if (fdt_totalsize(fdt) > bufsize)
+		return -FDT_ERR_NOSPACE;
+
+	memmove(buf, fdt, fdt_totalsize(fdt));
+	return 0;
+}
diff --git a/lib/libfdt/fdt_addresses.c b/lib/libfdt/fdt_addresses.c
new file mode 100644
index 0000000..eff4dbc
--- /dev/null
+++ b/lib/libfdt/fdt_addresses.c
@@ -0,0 +1,96 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2014 David Gibson <david@gibson.dropbear.id.au>
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public
+ *     License along with this library; if not, write to the Free
+ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ *     MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ *  b) Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *     1. Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *     2. Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+int fdt_address_cells(const void *fdt, int nodeoffset)
+{
+	const fdt32_t *ac;
+	int val;
+	int len;
+
+	ac = fdt_getprop(fdt, nodeoffset, "#address-cells", &len);
+	if (!ac)
+		return 2;
+
+	if (len != sizeof(*ac))
+		return -FDT_ERR_BADNCELLS;
+
+	val = fdt32_to_cpu(*ac);
+	if ((val <= 0) || (val > FDT_MAX_NCELLS))
+		return -FDT_ERR_BADNCELLS;
+
+	return val;
+}
+
+int fdt_size_cells(const void *fdt, int nodeoffset)
+{
+	const fdt32_t *sc;
+	int val;
+	int len;
+
+	sc = fdt_getprop(fdt, nodeoffset, "#size-cells", &len);
+	if (!sc)
+		return 2;
+
+	if (len != sizeof(*sc))
+		return -FDT_ERR_BADNCELLS;
+
+	val = fdt32_to_cpu(*sc);
+	if ((val < 0) || (val > FDT_MAX_NCELLS))
+		return -FDT_ERR_BADNCELLS;
+
+	return val;
+}
diff --git a/lib/libfdt/fdt_empty_tree.c b/lib/libfdt/fdt_empty_tree.c
new file mode 100644
index 0000000..f72d13b
--- /dev/null
+++ b/lib/libfdt/fdt_empty_tree.c
@@ -0,0 +1,84 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2012 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public
+ *     License along with this library; if not, write to the Free
+ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ *     MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ *  b) Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *     1. Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *     2. Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+int fdt_create_empty_tree(void *buf, int bufsize)
+{
+	int err;
+
+	err = fdt_create(buf, bufsize);
+	if (err)
+		return err;
+
+	err = fdt_finish_reservemap(buf);
+	if (err)
+		return err;
+
+	err = fdt_begin_node(buf, "");
+	if (err)
+		return err;
+
+	err =  fdt_end_node(buf);
+	if (err)
+		return err;
+
+	err = fdt_finish(buf);
+	if (err)
+		return err;
+
+	return fdt_open_into(buf, buf, bufsize);
+}
+
diff --git a/lib/libfdt/fdt_ro.c b/lib/libfdt/fdt_ro.c
new file mode 100644
index 0000000..0459098
--- /dev/null
+++ b/lib/libfdt/fdt_ro.c
@@ -0,0 +1,703 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public
+ *     License along with this library; if not, write to the Free
+ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ *     MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ *  b) Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *     1. Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *     2. Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+static int _fdt_nodename_eq(const void *fdt, int offset,
+			    const char *s, int len)
+{
+	const char *p = fdt_offset_ptr(fdt, offset + FDT_TAGSIZE, len+1);
+
+	if (! p)
+		/* short match */
+		return 0;
+
+	if (memcmp(p, s, len) != 0)
+		return 0;
+
+	if (p[len] == '\0')
+		return 1;
+	else if (!memchr(s, '@', len) && (p[len] == '@'))
+		return 1;
+	else
+		return 0;
+}
+
+const char *fdt_string(const void *fdt, int stroffset)
+{
+	return (const char *)fdt + fdt_off_dt_strings(fdt) + stroffset;
+}
+
+static int _fdt_string_eq(const void *fdt, int stroffset,
+			  const char *s, int len)
+{
+	const char *p = fdt_string(fdt, stroffset);
+
+	return (strlen(p) == len) && (memcmp(p, s, len) == 0);
+}
+
+uint32_t fdt_get_max_phandle(const void *fdt)
+{
+	uint32_t max_phandle = 0;
+	int offset;
+
+	for (offset = fdt_next_node(fdt, -1, NULL);;
+	     offset = fdt_next_node(fdt, offset, NULL)) {
+		uint32_t phandle;
+
+		if (offset == -FDT_ERR_NOTFOUND)
+			return max_phandle;
+
+		if (offset < 0)
+			return (uint32_t)-1;
+
+		phandle = fdt_get_phandle(fdt, offset);
+		if (phandle == (uint32_t)-1)
+			continue;
+
+		if (phandle > max_phandle)
+			max_phandle = phandle;
+	}
+
+	return 0;
+}
+
+int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size)
+{
+	FDT_CHECK_HEADER(fdt);
+	*address = fdt64_to_cpu(_fdt_mem_rsv(fdt, n)->address);
+	*size = fdt64_to_cpu(_fdt_mem_rsv(fdt, n)->size);
+	return 0;
+}
+
+int fdt_num_mem_rsv(const void *fdt)
+{
+	int i = 0;
+
+	while (fdt64_to_cpu(_fdt_mem_rsv(fdt, i)->size) != 0)
+		i++;
+	return i;
+}
+
+static int _nextprop(const void *fdt, int offset)
+{
+	uint32_t tag;
+	int nextoffset;
+
+	do {
+		tag = fdt_next_tag(fdt, offset, &nextoffset);
+
+		switch (tag) {
+		case FDT_END:
+			if (nextoffset >= 0)
+				return -FDT_ERR_BADSTRUCTURE;
+			else
+				return nextoffset;
+
+		case FDT_PROP:
+			return offset;
+		}
+		offset = nextoffset;
+	} while (tag == FDT_NOP);
+
+	return -FDT_ERR_NOTFOUND;
+}
+
+int fdt_subnode_offset_namelen(const void *fdt, int offset,
+			       const char *name, int namelen)
+{
+	int depth;
+
+	FDT_CHECK_HEADER(fdt);
+
+	for (depth = 0;
+	     (offset >= 0) && (depth >= 0);
+	     offset = fdt_next_node(fdt, offset, &depth))
+		if ((depth == 1)
+		    && _fdt_nodename_eq(fdt, offset, name, namelen))
+			return offset;
+
+	if (depth < 0)
+		return -FDT_ERR_NOTFOUND;
+	return offset; /* error */
+}
+
+int fdt_subnode_offset(const void *fdt, int parentoffset,
+		       const char *name)
+{
+	return fdt_subnode_offset_namelen(fdt, parentoffset, name, strlen(name));
+}
+
+int fdt_path_offset_namelen(const void *fdt, const char *path, int namelen)
+{
+	const char *end = path + namelen;
+	const char *p = path;
+	int offset = 0;
+
+	FDT_CHECK_HEADER(fdt);
+
+	/* see if we have an alias */
+	if (*path != '/') {
+		const char *q = memchr(path, '/', end - p);
+
+		if (!q)
+			q = end;
+
+		p = fdt_get_alias_namelen(fdt, p, q - p);
+		if (!p)
+			return -FDT_ERR_BADPATH;
+		offset = fdt_path_offset(fdt, p);
+
+		p = q;
+	}
+
+	while (p < end) {
+		const char *q;
+
+		while (*p == '/') {
+			p++;
+			if (p == end)
+				return offset;
+		}
+		q = memchr(p, '/', end - p);
+		if (! q)
+			q = end;
+
+		offset = fdt_subnode_offset_namelen(fdt, offset, p, q-p);
+		if (offset < 0)
+			return offset;
+
+		p = q;
+	}
+
+	return offset;
+}
+
+int fdt_path_offset(const void *fdt, const char *path)
+{
+	return fdt_path_offset_namelen(fdt, path, strlen(path));
+}
+
+const char *fdt_get_name(const void *fdt, int nodeoffset, int *len)
+{
+	const struct fdt_node_header *nh = _fdt_offset_ptr(fdt, nodeoffset);
+	int err;
+
+	if (((err = fdt_check_header(fdt)) != 0)
+	    || ((err = _fdt_check_node_offset(fdt, nodeoffset)) < 0))
+			goto fail;
+
+	if (len)
+		*len = strlen(nh->name);
+
+	return nh->name;
+
+ fail:
+	if (len)
+		*len = err;
+	return NULL;
+}
+
+int fdt_first_property_offset(const void *fdt, int nodeoffset)
+{
+	int offset;
+
+	if ((offset = _fdt_check_node_offset(fdt, nodeoffset)) < 0)
+		return offset;
+
+	return _nextprop(fdt, offset);
+}
+
+int fdt_next_property_offset(const void *fdt, int offset)
+{
+	if ((offset = _fdt_check_prop_offset(fdt, offset)) < 0)
+		return offset;
+
+	return _nextprop(fdt, offset);
+}
+
+const struct fdt_property *fdt_get_property_by_offset(const void *fdt,
+						      int offset,
+						      int *lenp)
+{
+	int err;
+	const struct fdt_property *prop;
+
+	if ((err = _fdt_check_prop_offset(fdt, offset)) < 0) {
+		if (lenp)
+			*lenp = err;
+		return NULL;
+	}
+
+	prop = _fdt_offset_ptr(fdt, offset);
+
+	if (lenp)
+		*lenp = fdt32_to_cpu(prop->len);
+
+	return prop;
+}
+
+const struct fdt_property *fdt_get_property_namelen(const void *fdt,
+						    int offset,
+						    const char *name,
+						    int namelen, int *lenp)
+{
+	for (offset = fdt_first_property_offset(fdt, offset);
+	     (offset >= 0);
+	     (offset = fdt_next_property_offset(fdt, offset))) {
+		const struct fdt_property *prop;
+
+		if (!(prop = fdt_get_property_by_offset(fdt, offset, lenp))) {
+			offset = -FDT_ERR_INTERNAL;
+			break;
+		}
+		if (_fdt_string_eq(fdt, fdt32_to_cpu(prop->nameoff),
+				   name, namelen))
+			return prop;
+	}
+
+	if (lenp)
+		*lenp = offset;
+	return NULL;
+}
+
+const struct fdt_property *fdt_get_property(const void *fdt,
+					    int nodeoffset,
+					    const char *name, int *lenp)
+{
+	return fdt_get_property_namelen(fdt, nodeoffset, name,
+					strlen(name), lenp);
+}
+
+const void *fdt_getprop_namelen(const void *fdt, int nodeoffset,
+				const char *name, int namelen, int *lenp)
+{
+	const struct fdt_property *prop;
+
+	prop = fdt_get_property_namelen(fdt, nodeoffset, name, namelen, lenp);
+	if (! prop)
+		return NULL;
+
+	return prop->data;
+}
+
+const void *fdt_getprop_by_offset(const void *fdt, int offset,
+				  const char **namep, int *lenp)
+{
+	const struct fdt_property *prop;
+
+	prop = fdt_get_property_by_offset(fdt, offset, lenp);
+	if (!prop)
+		return NULL;
+	if (namep)
+		*namep = fdt_string(fdt, fdt32_to_cpu(prop->nameoff));
+	return prop->data;
+}
+
+const void *fdt_getprop(const void *fdt, int nodeoffset,
+			const char *name, int *lenp)
+{
+	return fdt_getprop_namelen(fdt, nodeoffset, name, strlen(name), lenp);
+}
+
+uint32_t fdt_get_phandle(const void *fdt, int nodeoffset)
+{
+	const fdt32_t *php;
+	int len;
+
+	/* FIXME: This is a bit sub-optimal, since we potentially scan
+	 * over all the properties twice. */
+	php = fdt_getprop(fdt, nodeoffset, "phandle", &len);
+	if (!php || (len != sizeof(*php))) {
+		php = fdt_getprop(fdt, nodeoffset, "linux,phandle", &len);
+		if (!php || (len != sizeof(*php)))
+			return 0;
+	}
+
+	return fdt32_to_cpu(*php);
+}
+
+const char *fdt_get_alias_namelen(const void *fdt,
+				  const char *name, int namelen)
+{
+	int aliasoffset;
+
+	aliasoffset = fdt_path_offset(fdt, "/aliases");
+	if (aliasoffset < 0)
+		return NULL;
+
+	return fdt_getprop_namelen(fdt, aliasoffset, name, namelen, NULL);
+}
+
+const char *fdt_get_alias(const void *fdt, const char *name)
+{
+	return fdt_get_alias_namelen(fdt, name, strlen(name));
+}
+
+int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen)
+{
+	int pdepth = 0, p = 0;
+	int offset, depth, namelen;
+	const char *name;
+
+	FDT_CHECK_HEADER(fdt);
+
+	if (buflen < 2)
+		return -FDT_ERR_NOSPACE;
+
+	for (offset = 0, depth = 0;
+	     (offset >= 0) && (offset <= nodeoffset);
+	     offset = fdt_next_node(fdt, offset, &depth)) {
+		while (pdepth > depth) {
+			do {
+				p--;
+			} while (buf[p-1] != '/');
+			pdepth--;
+		}
+
+		if (pdepth >= depth) {
+			name = fdt_get_name(fdt, offset, &namelen);
+			if (!name)
+				return namelen;
+			if ((p + namelen + 1) <= buflen) {
+				memcpy(buf + p, name, namelen);
+				p += namelen;
+				buf[p++] = '/';
+				pdepth++;
+			}
+		}
+
+		if (offset == nodeoffset) {
+			if (pdepth < (depth + 1))
+				return -FDT_ERR_NOSPACE;
+
+			if (p > 1) /* special case so that root path is "/", not "" */
+				p--;
+			buf[p] = '\0';
+			return 0;
+		}
+	}
+
+	if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0))
+		return -FDT_ERR_BADOFFSET;
+	else if (offset == -FDT_ERR_BADOFFSET)
+		return -FDT_ERR_BADSTRUCTURE;
+
+	return offset; /* error from fdt_next_node() */
+}
+
+int fdt_supernode_atdepth_offset(const void *fdt, int nodeoffset,
+				 int supernodedepth, int *nodedepth)
+{
+	int offset, depth;
+	int supernodeoffset = -FDT_ERR_INTERNAL;
+
+	FDT_CHECK_HEADER(fdt);
+
+	if (supernodedepth < 0)
+		return -FDT_ERR_NOTFOUND;
+
+	for (offset = 0, depth = 0;
+	     (offset >= 0) && (offset <= nodeoffset);
+	     offset = fdt_next_node(fdt, offset, &depth)) {
+		if (depth == supernodedepth)
+			supernodeoffset = offset;
+
+		if (offset == nodeoffset) {
+			if (nodedepth)
+				*nodedepth = depth;
+
+			if (supernodedepth > depth)
+				return -FDT_ERR_NOTFOUND;
+			else
+				return supernodeoffset;
+		}
+	}
+
+	if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0))
+		return -FDT_ERR_BADOFFSET;
+	else if (offset == -FDT_ERR_BADOFFSET)
+		return -FDT_ERR_BADSTRUCTURE;
+
+	return offset; /* error from fdt_next_node() */
+}
+
+int fdt_node_depth(const void *fdt, int nodeoffset)
+{
+	int nodedepth;
+	int err;
+
+	err = fdt_supernode_atdepth_offset(fdt, nodeoffset, 0, &nodedepth);
+	if (err)
+		return (err < 0) ? err : -FDT_ERR_INTERNAL;
+	return nodedepth;
+}
+
+int fdt_parent_offset(const void *fdt, int nodeoffset)
+{
+	int nodedepth = fdt_node_depth(fdt, nodeoffset);
+
+	if (nodedepth < 0)
+		return nodedepth;
+	return fdt_supernode_atdepth_offset(fdt, nodeoffset,
+					    nodedepth - 1, NULL);
+}
+
+int fdt_node_offset_by_prop_value(const void *fdt, int startoffset,
+				  const char *propname,
+				  const void *propval, int proplen)
+{
+	int offset;
+	const void *val;
+	int len;
+
+	FDT_CHECK_HEADER(fdt);
+
+	/* FIXME: The algorithm here is pretty horrible: we scan each
+	 * property of a node in fdt_getprop(), then if that didn't
+	 * find what we want, we scan over them again making our way
+	 * to the next node.  Still it's the easiest to implement
+	 * approach; performance can come later. */
+	for (offset = fdt_next_node(fdt, startoffset, NULL);
+	     offset >= 0;
+	     offset = fdt_next_node(fdt, offset, NULL)) {
+		val = fdt_getprop(fdt, offset, propname, &len);
+		if (val && (len == proplen)
+		    && (memcmp(val, propval, len) == 0))
+			return offset;
+	}
+
+	return offset; /* error from fdt_next_node() */
+}
+
+int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle)
+{
+	int offset;
+
+	if ((phandle == 0) || (phandle == -1))
+		return -FDT_ERR_BADPHANDLE;
+
+	FDT_CHECK_HEADER(fdt);
+
+	/* FIXME: The algorithm here is pretty horrible: we
+	 * potentially scan each property of a node in
+	 * fdt_get_phandle(), then if that didn't find what
+	 * we want, we scan over them again making our way to the next
+	 * node.  Still it's the easiest to implement approach;
+	 * performance can come later. */
+	for (offset = fdt_next_node(fdt, -1, NULL);
+	     offset >= 0;
+	     offset = fdt_next_node(fdt, offset, NULL)) {
+		if (fdt_get_phandle(fdt, offset) == phandle)
+			return offset;
+	}
+
+	return offset; /* error from fdt_next_node() */
+}
+
+int fdt_stringlist_contains(const char *strlist, int listlen, const char *str)
+{
+	int len = strlen(str);
+	const char *p;
+
+	while (listlen >= len) {
+		if (memcmp(str, strlist, len+1) == 0)
+			return 1;
+		p = memchr(strlist, '\0', listlen);
+		if (!p)
+			return 0; /* malformed strlist.. */
+		listlen -= (p-strlist) + 1;
+		strlist = p + 1;
+	}
+	return 0;
+}
+
+int fdt_stringlist_count(const void *fdt, int nodeoffset, const char *property)
+{
+	const char *list, *end;
+	int length, count = 0;
+
+	list = fdt_getprop(fdt, nodeoffset, property, &length);
+	if (!list)
+		return -length;
+
+	end = list + length;
+
+	while (list < end) {
+		length = strnlen(list, end - list) + 1;
+
+		/* Abort if the last string isn't properly NUL-terminated. */
+		if (list + length > end)
+			return -FDT_ERR_BADVALUE;
+
+		list += length;
+		count++;
+	}
+
+	return count;
+}
+
+int fdt_stringlist_search(const void *fdt, int nodeoffset, const char *property,
+			  const char *string)
+{
+	int length, len, idx = 0;
+	const char *list, *end;
+
+	list = fdt_getprop(fdt, nodeoffset, property, &length);
+	if (!list)
+		return -length;
+
+	len = strlen(string) + 1;
+	end = list + length;
+
+	while (list < end) {
+		length = strnlen(list, end - list) + 1;
+
+		/* Abort if the last string isn't properly NUL-terminated. */
+		if (list + length > end)
+			return -FDT_ERR_BADVALUE;
+
+		if (length == len && memcmp(list, string, length) == 0)
+			return idx;
+
+		list += length;
+		idx++;
+	}
+
+	return -FDT_ERR_NOTFOUND;
+}
+
+const char *fdt_stringlist_get(const void *fdt, int nodeoffset,
+			       const char *property, int idx,
+			       int *lenp)
+{
+	const char *list, *end;
+	int length;
+
+	list = fdt_getprop(fdt, nodeoffset, property, &length);
+	if (!list) {
+		if (lenp)
+			*lenp = length;
+
+		return NULL;
+	}
+
+	end = list + length;
+
+	while (list < end) {
+		length = strnlen(list, end - list) + 1;
+
+		/* Abort if the last string isn't properly NUL-terminated. */
+		if (list + length > end) {
+			if (lenp)
+				*lenp = -FDT_ERR_BADVALUE;
+
+			return NULL;
+		}
+
+		if (idx == 0) {
+			if (lenp)
+				*lenp = length - 1;
+
+			return list;
+		}
+
+		list += length;
+		idx--;
+	}
+
+	if (lenp)
+		*lenp = -FDT_ERR_NOTFOUND;
+
+	return NULL;
+}
+
+int fdt_node_check_compatible(const void *fdt, int nodeoffset,
+			      const char *compatible)
+{
+	const void *prop;
+	int len;
+
+	prop = fdt_getprop(fdt, nodeoffset, "compatible", &len);
+	if (!prop)
+		return len;
+
+	return !fdt_stringlist_contains(prop, len, compatible);
+}
+
+int fdt_node_offset_by_compatible(const void *fdt, int startoffset,
+				  const char *compatible)
+{
+	int offset, err;
+
+	FDT_CHECK_HEADER(fdt);
+
+	/* FIXME: The algorithm here is pretty horrible: we scan each
+	 * property of a node in fdt_node_check_compatible(), then if
+	 * that didn't find what we want, we scan over them again
+	 * making our way to the next node.  Still it's the easiest to
+	 * implement approach; performance can come later. */
+	for (offset = fdt_next_node(fdt, startoffset, NULL);
+	     offset >= 0;
+	     offset = fdt_next_node(fdt, offset, NULL)) {
+		err = fdt_node_check_compatible(fdt, offset, compatible);
+		if ((err < 0) && (err != -FDT_ERR_NOTFOUND))
+			return err;
+		else if (err == 0)
+			return offset;
+	}
+
+	return offset; /* error from fdt_next_node() */
+}
diff --git a/lib/libfdt/fdt_rw.c b/lib/libfdt/fdt_rw.c
new file mode 100644
index 0000000..2eed4f5
--- /dev/null
+++ b/lib/libfdt/fdt_rw.c
@@ -0,0 +1,490 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public
+ *     License along with this library; if not, write to the Free
+ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ *     MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ *  b) Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *     1. Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *     2. Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+static int _fdt_blocks_misordered(const void *fdt,
+			      int mem_rsv_size, int struct_size)
+{
+	return (fdt_off_mem_rsvmap(fdt) < FDT_ALIGN(sizeof(struct fdt_header), 8))
+		|| (fdt_off_dt_struct(fdt) <
+		    (fdt_off_mem_rsvmap(fdt) + mem_rsv_size))
+		|| (fdt_off_dt_strings(fdt) <
+		    (fdt_off_dt_struct(fdt) + struct_size))
+		|| (fdt_totalsize(fdt) <
+		    (fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt)));
+}
+
+static int _fdt_rw_check_header(void *fdt)
+{
+	FDT_CHECK_HEADER(fdt);
+
+	if (fdt_version(fdt) < 17)
+		return -FDT_ERR_BADVERSION;
+	if (_fdt_blocks_misordered(fdt, sizeof(struct fdt_reserve_entry),
+				   fdt_size_dt_struct(fdt)))
+		return -FDT_ERR_BADLAYOUT;
+	if (fdt_version(fdt) > 17)
+		fdt_set_version(fdt, 17);
+
+	return 0;
+}
+
+#define FDT_RW_CHECK_HEADER(fdt) \
+	{ \
+		int __err; \
+		if ((__err = _fdt_rw_check_header(fdt)) != 0) \
+			return __err; \
+	}
+
+static inline int _fdt_data_size(void *fdt)
+{
+	return fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt);
+}
+
+static int _fdt_splice(void *fdt, void *splicepoint, int oldlen, int newlen)
+{
+	char *p = splicepoint;
+	char *end = (char *)fdt + _fdt_data_size(fdt);
+
+	if (((p + oldlen) < p) || ((p + oldlen) > end))
+		return -FDT_ERR_BADOFFSET;
+	if ((p < (char *)fdt) || ((end - oldlen + newlen) < (char *)fdt))
+		return -FDT_ERR_BADOFFSET;
+	if ((end - oldlen + newlen) > ((char *)fdt + fdt_totalsize(fdt)))
+		return -FDT_ERR_NOSPACE;
+	memmove(p + newlen, p + oldlen, end - p - oldlen);
+	return 0;
+}
+
+static int _fdt_splice_mem_rsv(void *fdt, struct fdt_reserve_entry *p,
+			       int oldn, int newn)
+{
+	int delta = (newn - oldn) * sizeof(*p);
+	int err;
+	err = _fdt_splice(fdt, p, oldn * sizeof(*p), newn * sizeof(*p));
+	if (err)
+		return err;
+	fdt_set_off_dt_struct(fdt, fdt_off_dt_struct(fdt) + delta);
+	fdt_set_off_dt_strings(fdt, fdt_off_dt_strings(fdt) + delta);
+	return 0;
+}
+
+static int _fdt_splice_struct(void *fdt, void *p,
+			      int oldlen, int newlen)
+{
+	int delta = newlen - oldlen;
+	int err;
+
+	if ((err = _fdt_splice(fdt, p, oldlen, newlen)))
+		return err;
+
+	fdt_set_size_dt_struct(fdt, fdt_size_dt_struct(fdt) + delta);
+	fdt_set_off_dt_strings(fdt, fdt_off_dt_strings(fdt) + delta);
+	return 0;
+}
+
+static int _fdt_splice_string(void *fdt, int newlen)
+{
+	void *p = (char *)fdt
+		+ fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt);
+	int err;
+
+	if ((err = _fdt_splice(fdt, p, 0, newlen)))
+		return err;
+
+	fdt_set_size_dt_strings(fdt, fdt_size_dt_strings(fdt) + newlen);
+	return 0;
+}
+
+static int _fdt_find_add_string(void *fdt, const char *s)
+{
+	char *strtab = (char *)fdt + fdt_off_dt_strings(fdt);
+	const char *p;
+	char *new;
+	int len = strlen(s) + 1;
+	int err;
+
+	p = _fdt_find_string(strtab, fdt_size_dt_strings(fdt), s);
+	if (p)
+		/* found it */
+		return (p - strtab);
+
+	new = strtab + fdt_size_dt_strings(fdt);
+	err = _fdt_splice_string(fdt, len);
+	if (err)
+		return err;
+
+	memcpy(new, s, len);
+	return (new - strtab);
+}
+
+int fdt_add_mem_rsv(void *fdt, uint64_t address, uint64_t size)
+{
+	struct fdt_reserve_entry *re;
+	int err;
+
+	FDT_RW_CHECK_HEADER(fdt);
+
+	re = _fdt_mem_rsv_w(fdt, fdt_num_mem_rsv(fdt));
+	err = _fdt_splice_mem_rsv(fdt, re, 0, 1);
+	if (err)
+		return err;
+
+	re->address = cpu_to_fdt64(address);
+	re->size = cpu_to_fdt64(size);
+	return 0;
+}
+
+int fdt_del_mem_rsv(void *fdt, int n)
+{
+	struct fdt_reserve_entry *re = _fdt_mem_rsv_w(fdt, n);
+
+	FDT_RW_CHECK_HEADER(fdt);
+
+	if (n >= fdt_num_mem_rsv(fdt))
+		return -FDT_ERR_NOTFOUND;
+
+	return _fdt_splice_mem_rsv(fdt, re, 1, 0);
+}
+
+static int _fdt_resize_property(void *fdt, int nodeoffset, const char *name,
+				int len, struct fdt_property **prop)
+{
+	int oldlen;
+	int err;
+
+	*prop = fdt_get_property_w(fdt, nodeoffset, name, &oldlen);
+	if (! (*prop))
+		return oldlen;
+
+	if ((err = _fdt_splice_struct(fdt, (*prop)->data, FDT_TAGALIGN(oldlen),
+				      FDT_TAGALIGN(len))))
+		return err;
+
+	(*prop)->len = cpu_to_fdt32(len);
+	return 0;
+}
+
+static int _fdt_add_property(void *fdt, int nodeoffset, const char *name,
+			     int len, struct fdt_property **prop)
+{
+	int proplen;
+	int nextoffset;
+	int namestroff;
+	int err;
+
+	if ((nextoffset = _fdt_check_node_offset(fdt, nodeoffset)) < 0)
+		return nextoffset;
+
+	namestroff = _fdt_find_add_string(fdt, name);
+	if (namestroff < 0)
+		return namestroff;
+
+	*prop = _fdt_offset_ptr_w(fdt, nextoffset);
+	proplen = sizeof(**prop) + FDT_TAGALIGN(len);
+
+	err = _fdt_splice_struct(fdt, *prop, 0, proplen);
+	if (err)
+		return err;
+
+	(*prop)->tag = cpu_to_fdt32(FDT_PROP);
+	(*prop)->nameoff = cpu_to_fdt32(namestroff);
+	(*prop)->len = cpu_to_fdt32(len);
+	return 0;
+}
+
+int fdt_set_name(void *fdt, int nodeoffset, const char *name)
+{
+	char *namep;
+	int oldlen, newlen;
+	int err;
+
+	FDT_RW_CHECK_HEADER(fdt);
+
+	namep = (char *)(uintptr_t)fdt_get_name(fdt, nodeoffset, &oldlen);
+	if (!namep)
+		return oldlen;
+
+	newlen = strlen(name);
+
+	err = _fdt_splice_struct(fdt, namep, FDT_TAGALIGN(oldlen+1),
+				 FDT_TAGALIGN(newlen+1));
+	if (err)
+		return err;
+
+	memcpy(namep, name, newlen+1);
+	return 0;
+}
+
+int fdt_setprop(void *fdt, int nodeoffset, const char *name,
+		const void *val, int len)
+{
+	struct fdt_property *prop;
+	int err;
+
+	FDT_RW_CHECK_HEADER(fdt);
+
+	err = _fdt_resize_property(fdt, nodeoffset, name, len, &prop);
+	if (err == -FDT_ERR_NOTFOUND)
+		err = _fdt_add_property(fdt, nodeoffset, name, len, &prop);
+	if (err)
+		return err;
+
+	memcpy(prop->data, val, len);
+	return 0;
+}
+
+int fdt_appendprop(void *fdt, int nodeoffset, const char *name,
+		   const void *val, int len)
+{
+	struct fdt_property *prop;
+	int err, oldlen, newlen;
+
+	FDT_RW_CHECK_HEADER(fdt);
+
+	prop = fdt_get_property_w(fdt, nodeoffset, name, &oldlen);
+	if (prop) {
+		newlen = len + oldlen;
+		err = _fdt_splice_struct(fdt, prop->data,
+					 FDT_TAGALIGN(oldlen),
+					 FDT_TAGALIGN(newlen));
+		if (err)
+			return err;
+		prop->len = cpu_to_fdt32(newlen);
+		memcpy(prop->data + oldlen, val, len);
+	} else {
+		err = _fdt_add_property(fdt, nodeoffset, name, len, &prop);
+		if (err)
+			return err;
+		memcpy(prop->data, val, len);
+	}
+	return 0;
+}
+
+int fdt_delprop(void *fdt, int nodeoffset, const char *name)
+{
+	struct fdt_property *prop;
+	int len, proplen;
+
+	FDT_RW_CHECK_HEADER(fdt);
+
+	prop = fdt_get_property_w(fdt, nodeoffset, name, &len);
+	if (! prop)
+		return len;
+
+	proplen = sizeof(*prop) + FDT_TAGALIGN(len);
+	return _fdt_splice_struct(fdt, prop, proplen, 0);
+}
+
+int fdt_add_subnode_namelen(void *fdt, int parentoffset,
+			    const char *name, int namelen)
+{
+	struct fdt_node_header *nh;
+	int offset, nextoffset;
+	int nodelen;
+	int err;
+	uint32_t tag;
+	fdt32_t *endtag;
+
+	FDT_RW_CHECK_HEADER(fdt);
+
+	offset = fdt_subnode_offset_namelen(fdt, parentoffset, name, namelen);
+	if (offset >= 0)
+		return -FDT_ERR_EXISTS;
+	else if (offset != -FDT_ERR_NOTFOUND)
+		return offset;
+
+	/* Try to place the new node after the parent's properties */
+	fdt_next_tag(fdt, parentoffset, &nextoffset); /* skip the BEGIN_NODE */
+	do {
+		offset = nextoffset;
+		tag = fdt_next_tag(fdt, offset, &nextoffset);
+	} while ((tag == FDT_PROP) || (tag == FDT_NOP));
+
+	nh = _fdt_offset_ptr_w(fdt, offset);
+	nodelen = sizeof(*nh) + FDT_TAGALIGN(namelen+1) + FDT_TAGSIZE;
+
+	err = _fdt_splice_struct(fdt, nh, 0, nodelen);
+	if (err)
+		return err;
+
+	nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE);
+	memset(nh->name, 0, FDT_TAGALIGN(namelen+1));
+	memcpy(nh->name, name, namelen);
+	endtag = (fdt32_t *)((char *)nh + nodelen - FDT_TAGSIZE);
+	*endtag = cpu_to_fdt32(FDT_END_NODE);
+
+	return offset;
+}
+
+int fdt_add_subnode(void *fdt, int parentoffset, const char *name)
+{
+	return fdt_add_subnode_namelen(fdt, parentoffset, name, strlen(name));
+}
+
+int fdt_del_node(void *fdt, int nodeoffset)
+{
+	int endoffset;
+
+	FDT_RW_CHECK_HEADER(fdt);
+
+	endoffset = _fdt_node_end_offset(fdt, nodeoffset);
+	if (endoffset < 0)
+		return endoffset;
+
+	return _fdt_splice_struct(fdt, _fdt_offset_ptr_w(fdt, nodeoffset),
+				  endoffset - nodeoffset, 0);
+}
+
+static void _fdt_packblocks(const char *old, char *new,
+			    int mem_rsv_size, int struct_size)
+{
+	int mem_rsv_off, struct_off, strings_off;
+
+	mem_rsv_off = FDT_ALIGN(sizeof(struct fdt_header), 8);
+	struct_off = mem_rsv_off + mem_rsv_size;
+	strings_off = struct_off + struct_size;
+
+	memmove(new + mem_rsv_off, old + fdt_off_mem_rsvmap(old), mem_rsv_size);
+	fdt_set_off_mem_rsvmap(new, mem_rsv_off);
+
+	memmove(new + struct_off, old + fdt_off_dt_struct(old), struct_size);
+	fdt_set_off_dt_struct(new, struct_off);
+	fdt_set_size_dt_struct(new, struct_size);
+
+	memmove(new + strings_off, old + fdt_off_dt_strings(old),
+		fdt_size_dt_strings(old));
+	fdt_set_off_dt_strings(new, strings_off);
+	fdt_set_size_dt_strings(new, fdt_size_dt_strings(old));
+}
+
+int fdt_open_into(const void *fdt, void *buf, int bufsize)
+{
+	int err;
+	int mem_rsv_size, struct_size;
+	int newsize;
+	const char *fdtstart = fdt;
+	const char *fdtend = fdtstart + fdt_totalsize(fdt);
+	char *tmp;
+
+	FDT_CHECK_HEADER(fdt);
+
+	mem_rsv_size = (fdt_num_mem_rsv(fdt)+1)
+		* sizeof(struct fdt_reserve_entry);
+
+	if (fdt_version(fdt) >= 17) {
+		struct_size = fdt_size_dt_struct(fdt);
+	} else {
+		struct_size = 0;
+		while (fdt_next_tag(fdt, struct_size, &struct_size) != FDT_END)
+			;
+		if (struct_size < 0)
+			return struct_size;
+	}
+
+	if (!_fdt_blocks_misordered(fdt, mem_rsv_size, struct_size)) {
+		/* no further work necessary */
+		err = fdt_move(fdt, buf, bufsize);
+		if (err)
+			return err;
+		fdt_set_version(buf, 17);
+		fdt_set_size_dt_struct(buf, struct_size);
+		fdt_set_totalsize(buf, bufsize);
+		return 0;
+	}
+
+	/* Need to reorder */
+	newsize = FDT_ALIGN(sizeof(struct fdt_header), 8) + mem_rsv_size
+		+ struct_size + fdt_size_dt_strings(fdt);
+
+	if (bufsize < newsize)
+		return -FDT_ERR_NOSPACE;
+
+	/* First attempt to build converted tree at beginning of buffer */
+	tmp = buf;
+	/* But if that overlaps with the old tree... */
+	if (((tmp + newsize) > fdtstart) && (tmp < fdtend)) {
+		/* Try right after the old tree instead */
+		tmp = (char *)(uintptr_t)fdtend;
+		if ((tmp + newsize) > ((char *)buf + bufsize))
+			return -FDT_ERR_NOSPACE;
+	}
+
+	_fdt_packblocks(fdt, tmp, mem_rsv_size, struct_size);
+	memmove(buf, tmp, newsize);
+
+	fdt_set_magic(buf, FDT_MAGIC);
+	fdt_set_totalsize(buf, bufsize);
+	fdt_set_version(buf, 17);
+	fdt_set_last_comp_version(buf, 16);
+	fdt_set_boot_cpuid_phys(buf, fdt_boot_cpuid_phys(fdt));
+
+	return 0;
+}
+
+int fdt_pack(void *fdt)
+{
+	int mem_rsv_size;
+
+	FDT_RW_CHECK_HEADER(fdt);
+
+	mem_rsv_size = (fdt_num_mem_rsv(fdt)+1)
+		* sizeof(struct fdt_reserve_entry);
+	_fdt_packblocks(fdt, fdt, mem_rsv_size, fdt_size_dt_struct(fdt));
+	fdt_set_totalsize(fdt, _fdt_data_size(fdt));
+
+	return 0;
+}
diff --git a/lib/libfdt/fdt_strerror.c b/lib/libfdt/fdt_strerror.c
new file mode 100644
index 0000000..e6c3cee
--- /dev/null
+++ b/lib/libfdt/fdt_strerror.c
@@ -0,0 +1,96 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public
+ *     License along with this library; if not, write to the Free
+ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ *     MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ *  b) Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *     1. Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *     2. Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+struct fdt_errtabent {
+	const char *str;
+};
+
+#define FDT_ERRTABENT(val) \
+	[(val)] = { .str = #val, }
+
+static struct fdt_errtabent fdt_errtable[] = {
+	FDT_ERRTABENT(FDT_ERR_NOTFOUND),
+	FDT_ERRTABENT(FDT_ERR_EXISTS),
+	FDT_ERRTABENT(FDT_ERR_NOSPACE),
+
+	FDT_ERRTABENT(FDT_ERR_BADOFFSET),
+	FDT_ERRTABENT(FDT_ERR_BADPATH),
+	FDT_ERRTABENT(FDT_ERR_BADSTATE),
+
+	FDT_ERRTABENT(FDT_ERR_TRUNCATED),
+	FDT_ERRTABENT(FDT_ERR_BADMAGIC),
+	FDT_ERRTABENT(FDT_ERR_BADVERSION),
+	FDT_ERRTABENT(FDT_ERR_BADSTRUCTURE),
+	FDT_ERRTABENT(FDT_ERR_BADLAYOUT),
+};
+#define FDT_ERRTABSIZE	(sizeof(fdt_errtable) / sizeof(fdt_errtable[0]))
+
+const char *fdt_strerror(int errval)
+{
+	if (errval > 0)
+		return "<valid offset/length>";
+	else if (errval == 0)
+		return "<no error>";
+	else if (errval > -FDT_ERRTABSIZE) {
+		const char *s = fdt_errtable[-errval].str;
+
+		if (s)
+			return s;
+	}
+
+	return "<unknown error>";
+}
diff --git a/lib/libfdt/fdt_sw.c b/lib/libfdt/fdt_sw.c
new file mode 100644
index 0000000..6a80485
--- /dev/null
+++ b/lib/libfdt/fdt_sw.c
@@ -0,0 +1,288 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public
+ *     License along with this library; if not, write to the Free
+ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ *     MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ *  b) Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *     1. Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *     2. Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+static int _fdt_sw_check_header(void *fdt)
+{
+	if (fdt_magic(fdt) != FDT_SW_MAGIC)
+		return -FDT_ERR_BADMAGIC;
+	/* FIXME: should check more details about the header state */
+	return 0;
+}
+
+#define FDT_SW_CHECK_HEADER(fdt) \
+	{ \
+		int err; \
+		if ((err = _fdt_sw_check_header(fdt)) != 0) \
+			return err; \
+	}
+
+static void *_fdt_grab_space(void *fdt, size_t len)
+{
+	int offset = fdt_size_dt_struct(fdt);
+	int spaceleft;
+
+	spaceleft = fdt_totalsize(fdt) - fdt_off_dt_struct(fdt)
+		- fdt_size_dt_strings(fdt);
+
+	if ((offset + len < offset) || (offset + len > spaceleft))
+		return NULL;
+
+	fdt_set_size_dt_struct(fdt, offset + len);
+	return _fdt_offset_ptr_w(fdt, offset);
+}
+
+int fdt_create(void *buf, int bufsize)
+{
+	void *fdt = buf;
+
+	if (bufsize < sizeof(struct fdt_header))
+		return -FDT_ERR_NOSPACE;
+
+	memset(buf, 0, bufsize);
+
+	fdt_set_magic(fdt, FDT_SW_MAGIC);
+	fdt_set_version(fdt, FDT_LAST_SUPPORTED_VERSION);
+	fdt_set_last_comp_version(fdt, FDT_FIRST_SUPPORTED_VERSION);
+	fdt_set_totalsize(fdt,  bufsize);
+
+	fdt_set_off_mem_rsvmap(fdt, FDT_ALIGN(sizeof(struct fdt_header),
+					      sizeof(struct fdt_reserve_entry)));
+	fdt_set_off_dt_struct(fdt, fdt_off_mem_rsvmap(fdt));
+	fdt_set_off_dt_strings(fdt, bufsize);
+
+	return 0;
+}
+
+int fdt_resize(void *fdt, void *buf, int bufsize)
+{
+	size_t headsize, tailsize;
+	char *oldtail, *newtail;
+
+	FDT_SW_CHECK_HEADER(fdt);
+
+	headsize = fdt_off_dt_struct(fdt);
+	tailsize = fdt_size_dt_strings(fdt);
+
+	if ((headsize + tailsize) > bufsize)
+		return -FDT_ERR_NOSPACE;
+
+	oldtail = (char *)fdt + fdt_totalsize(fdt) - tailsize;
+	newtail = (char *)buf + bufsize - tailsize;
+
+	/* Two cases to avoid clobbering data if the old and new
+	 * buffers partially overlap */
+	if (buf <= fdt) {
+		memmove(buf, fdt, headsize);
+		memmove(newtail, oldtail, tailsize);
+	} else {
+		memmove(newtail, oldtail, tailsize);
+		memmove(buf, fdt, headsize);
+	}
+
+	fdt_set_off_dt_strings(buf, bufsize);
+	fdt_set_totalsize(buf, bufsize);
+
+	return 0;
+}
+
+int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size)
+{
+	struct fdt_reserve_entry *re;
+	int offset;
+
+	FDT_SW_CHECK_HEADER(fdt);
+
+	if (fdt_size_dt_struct(fdt))
+		return -FDT_ERR_BADSTATE;
+
+	offset = fdt_off_dt_struct(fdt);
+	if ((offset + sizeof(*re)) > fdt_totalsize(fdt))
+		return -FDT_ERR_NOSPACE;
+
+	re = (struct fdt_reserve_entry *)((char *)fdt + offset);
+	re->address = cpu_to_fdt64(addr);
+	re->size = cpu_to_fdt64(size);
+
+	fdt_set_off_dt_struct(fdt, offset + sizeof(*re));
+
+	return 0;
+}
+
+int fdt_finish_reservemap(void *fdt)
+{
+	return fdt_add_reservemap_entry(fdt, 0, 0);
+}
+
+int fdt_begin_node(void *fdt, const char *name)
+{
+	struct fdt_node_header *nh;
+	int namelen = strlen(name) + 1;
+
+	FDT_SW_CHECK_HEADER(fdt);
+
+	nh = _fdt_grab_space(fdt, sizeof(*nh) + FDT_TAGALIGN(namelen));
+	if (! nh)
+		return -FDT_ERR_NOSPACE;
+
+	nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE);
+	memcpy(nh->name, name, namelen);
+	return 0;
+}
+
+int fdt_end_node(void *fdt)
+{
+	fdt32_t *en;
+
+	FDT_SW_CHECK_HEADER(fdt);
+
+	en = _fdt_grab_space(fdt, FDT_TAGSIZE);
+	if (! en)
+		return -FDT_ERR_NOSPACE;
+
+	*en = cpu_to_fdt32(FDT_END_NODE);
+	return 0;
+}
+
+static int _fdt_find_add_string(void *fdt, const char *s)
+{
+	char *strtab = (char *)fdt + fdt_totalsize(fdt);
+	const char *p;
+	int strtabsize = fdt_size_dt_strings(fdt);
+	int len = strlen(s) + 1;
+	int struct_top, offset;
+
+	p = _fdt_find_string(strtab - strtabsize, strtabsize, s);
+	if (p)
+		return p - strtab;
+
+	/* Add it */
+	offset = -strtabsize - len;
+	struct_top = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt);
+	if (fdt_totalsize(fdt) + offset < struct_top)
+		return 0; /* no more room :( */
+
+	memcpy(strtab + offset, s, len);
+	fdt_set_size_dt_strings(fdt, strtabsize + len);
+	return offset;
+}
+
+int fdt_property(void *fdt, const char *name, const void *val, int len)
+{
+	struct fdt_property *prop;
+	int nameoff;
+
+	FDT_SW_CHECK_HEADER(fdt);
+
+	nameoff = _fdt_find_add_string(fdt, name);
+	if (nameoff == 0)
+		return -FDT_ERR_NOSPACE;
+
+	prop = _fdt_grab_space(fdt, sizeof(*prop) + FDT_TAGALIGN(len));
+	if (! prop)
+		return -FDT_ERR_NOSPACE;
+
+	prop->tag = cpu_to_fdt32(FDT_PROP);
+	prop->nameoff = cpu_to_fdt32(nameoff);
+	prop->len = cpu_to_fdt32(len);
+	memcpy(prop->data, val, len);
+	return 0;
+}
+
+int fdt_finish(void *fdt)
+{
+	char *p = (char *)fdt;
+	fdt32_t *end;
+	int oldstroffset, newstroffset;
+	uint32_t tag;
+	int offset, nextoffset;
+
+	FDT_SW_CHECK_HEADER(fdt);
+
+	/* Add terminator */
+	end = _fdt_grab_space(fdt, sizeof(*end));
+	if (! end)
+		return -FDT_ERR_NOSPACE;
+	*end = cpu_to_fdt32(FDT_END);
+
+	/* Relocate the string table */
+	oldstroffset = fdt_totalsize(fdt) - fdt_size_dt_strings(fdt);
+	newstroffset = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt);
+	memmove(p + newstroffset, p + oldstroffset, fdt_size_dt_strings(fdt));
+	fdt_set_off_dt_strings(fdt, newstroffset);
+
+	/* Walk the structure, correcting string offsets */
+	offset = 0;
+	while ((tag = fdt_next_tag(fdt, offset, &nextoffset)) != FDT_END) {
+		if (tag == FDT_PROP) {
+			struct fdt_property *prop =
+				_fdt_offset_ptr_w(fdt, offset);
+			int nameoff;
+
+			nameoff = fdt32_to_cpu(prop->nameoff);
+			nameoff += fdt_size_dt_strings(fdt);
+			prop->nameoff = cpu_to_fdt32(nameoff);
+		}
+		offset = nextoffset;
+	}
+	if (nextoffset < 0)
+		return nextoffset;
+
+	/* Finally, adjust the header */
+	fdt_set_totalsize(fdt, newstroffset + fdt_size_dt_strings(fdt));
+	fdt_set_magic(fdt, FDT_MAGIC);
+	return 0;
+}
diff --git a/lib/libfdt/fdt_wip.c b/lib/libfdt/fdt_wip.c
new file mode 100644
index 0000000..6aaab39
--- /dev/null
+++ b/lib/libfdt/fdt_wip.c
@@ -0,0 +1,139 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public
+ *     License along with this library; if not, write to the Free
+ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ *     MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ *  b) Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *     1. Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *     2. Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+int fdt_setprop_inplace_namelen_partial(void *fdt, int nodeoffset,
+					const char *name, int namelen,
+					uint32_t idx, const void *val,
+					int len)
+{
+	void *propval;
+	int proplen;
+
+	propval = fdt_getprop_namelen_w(fdt, nodeoffset, name, namelen,
+					&proplen);
+	if (!propval)
+		return proplen;
+
+	if (proplen < (len + idx))
+		return -FDT_ERR_NOSPACE;
+
+	memcpy((char *)propval + idx, val, len);
+	return 0;
+}
+
+int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name,
+			const void *val, int len)
+{
+	const void *propval;
+	int proplen;
+
+	propval = fdt_getprop(fdt, nodeoffset, name, &proplen);
+	if (! propval)
+		return proplen;
+
+	if (proplen != len)
+		return -FDT_ERR_NOSPACE;
+
+	return fdt_setprop_inplace_namelen_partial(fdt, nodeoffset, name,
+						   strlen(name), 0,
+						   val, len);
+}
+
+static void _fdt_nop_region(void *start, int len)
+{
+	fdt32_t *p;
+
+	for (p = start; (char *)p < ((char *)start + len); p++)
+		*p = cpu_to_fdt32(FDT_NOP);
+}
+
+int fdt_nop_property(void *fdt, int nodeoffset, const char *name)
+{
+	struct fdt_property *prop;
+	int len;
+
+	prop = fdt_get_property_w(fdt, nodeoffset, name, &len);
+	if (! prop)
+		return len;
+
+	_fdt_nop_region(prop, len + sizeof(*prop));
+
+	return 0;
+}
+
+int _fdt_node_end_offset(void *fdt, int offset)
+{
+	int depth = 0;
+
+	while ((offset >= 0) && (depth >= 0))
+		offset = fdt_next_node(fdt, offset, &depth);
+
+	return offset;
+}
+
+int fdt_nop_node(void *fdt, int nodeoffset)
+{
+	int endoffset;
+
+	endoffset = _fdt_node_end_offset(fdt, nodeoffset);
+	if (endoffset < 0)
+		return endoffset;
+
+	_fdt_nop_region(fdt_offset_ptr_w(fdt, nodeoffset, 0),
+			endoffset - nodeoffset);
+	return 0;
+}
diff --git a/lib/libfdt/libfdt.mk b/lib/libfdt/libfdt.mk
new file mode 100644
index 0000000..d03dde2
--- /dev/null
+++ b/lib/libfdt/libfdt.mk
@@ -0,0 +1,17 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+LIBFDT_SRCS	:=	$(addprefix lib/libfdt/,	\
+			fdt.c				\
+			fdt_addresses.c			\
+			fdt_empty_tree.c		\
+			fdt_ro.c			\
+			fdt_rw.c			\
+			fdt_strerror.c			\
+			fdt_sw.c			\
+			fdt_wip.c)			\
+
+INCLUDES	+=	-Iinclude/lib/libfdt
diff --git a/lib/libfdt/libfdt_internal.h b/lib/libfdt/libfdt_internal.h
new file mode 100644
index 0000000..02cfa6f
--- /dev/null
+++ b/lib/libfdt/libfdt_internal.h
@@ -0,0 +1,95 @@
+#ifndef _LIBFDT_INTERNAL_H
+#define _LIBFDT_INTERNAL_H
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public
+ *     License along with this library; if not, write to the Free
+ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ *     MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ *  b) Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *     1. Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *     2. Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <fdt.h>
+
+#define FDT_ALIGN(x, a)		(((x) + (a) - 1) & ~((a) - 1))
+#define FDT_TAGALIGN(x)		(FDT_ALIGN((x), FDT_TAGSIZE))
+
+#define FDT_CHECK_HEADER(fdt) \
+	{ \
+		int __err; \
+		if ((__err = fdt_check_header(fdt)) != 0) \
+			return __err; \
+	}
+
+int _fdt_check_node_offset(const void *fdt, int offset);
+int _fdt_check_prop_offset(const void *fdt, int offset);
+const char *_fdt_find_string(const char *strtab, int tabsize, const char *s);
+int _fdt_node_end_offset(void *fdt, int nodeoffset);
+
+static inline const void *_fdt_offset_ptr(const void *fdt, int offset)
+{
+	return (const char *)fdt + fdt_off_dt_struct(fdt) + offset;
+}
+
+static inline void *_fdt_offset_ptr_w(void *fdt, int offset)
+{
+	return (void *)(uintptr_t)_fdt_offset_ptr(fdt, offset);
+}
+
+static inline const struct fdt_reserve_entry *_fdt_mem_rsv(const void *fdt, int n)
+{
+	const struct fdt_reserve_entry *rsv_table =
+		(const struct fdt_reserve_entry *)
+		((const char *)fdt + fdt_off_mem_rsvmap(fdt));
+
+	return rsv_table + n;
+}
+static inline struct fdt_reserve_entry *_fdt_mem_rsv_w(void *fdt, int n)
+{
+	return (void *)(uintptr_t)_fdt_mem_rsv(fdt, n);
+}
+
+#define FDT_SW_MAGIC		(~FDT_MAGIC)
+
+#endif /* _LIBFDT_INTERNAL_H */
diff --git a/lib/locks/bakery/bakery_lock_coherent.c b/lib/locks/bakery/bakery_lock_coherent.c
index 5d538ce..a857e03 100644
--- a/lib/locks/bakery/bakery_lock_coherent.c
+++ b/lib/locks/bakery/bakery_lock_coherent.c
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch_helpers.h>
@@ -63,27 +39,15 @@
 	assert(entry < BAKERY_LOCK_MAX_CPUS);		\
 } while (0)
 
-/* Convert a ticket to priority */
-#define PRIORITY(t, pos)	(((t) << 8) | (pos))
-
-
-/* Initialize Bakery Lock to reset ownership and all ticket values */
-void bakery_lock_init(bakery_lock_t *bakery)
-{
-	assert(bakery);
-
-	/* All ticket values need to be 0 */
-	memset(bakery, 0, sizeof(*bakery));
-	bakery->owner = NO_OWNER;
-}
-
-
 /* Obtain a ticket for a given CPU */
 static unsigned int bakery_get_ticket(bakery_lock_t *bakery, unsigned int me)
 {
 	unsigned int my_ticket, their_ticket;
 	unsigned int they;
 
+	/* Prevent recursive acquisition */
+	assert(!bakery_ticket_number(bakery->lock_data[me]));
+
 	/*
 	 * Flag that we're busy getting our ticket. All CPUs are iterated in the
 	 * order of their ordinal position to decide the maximum ticket value
@@ -95,9 +59,9 @@
 	 * value, not the ticket value alone.
 	 */
 	my_ticket = 0;
-	bakery->entering[me] = 1;
+	bakery->lock_data[me] = make_bakery_data(CHOOSING_TICKET, my_ticket);
 	for (they = 0; they < BAKERY_LOCK_MAX_CPUS; they++) {
-		their_ticket = bakery->number[they];
+		their_ticket = bakery_ticket_number(bakery->lock_data[they]);
 		if (their_ticket > my_ticket)
 			my_ticket = their_ticket;
 	}
@@ -107,8 +71,7 @@
 	 * finish calculating our ticket value that we're done
 	 */
 	++my_ticket;
-	bakery->number[me] = my_ticket;
-	bakery->entering[me] = 0;
+	bakery->lock_data[me] = make_bakery_data(CHOSEN_TICKET, my_ticket);
 
 	return my_ticket;
 }
@@ -129,14 +92,12 @@
 {
 	unsigned int they, me;
 	unsigned int my_ticket, my_prio, their_ticket;
+	unsigned int their_bakery_data;
 
-	me = platform_get_core_pos(read_mpidr_el1());
+	me = plat_my_core_pos();
 
 	assert_bakery_entry_valid(me, bakery);
 
-	/* Prevent recursive acquisition */
-	assert(bakery->owner != me);
-
 	/* Get a ticket */
 	my_ticket = bakery_get_ticket(bakery, me);
 
@@ -150,14 +111,15 @@
 			continue;
 
 		/* Wait for the contender to get their ticket */
-		while (bakery->entering[they])
-			;
+		do {
+			their_bakery_data = bakery->lock_data[they];
+		} while (bakery_is_choosing(their_bakery_data));
 
 		/*
 		 * If the other party is a contender, they'll have non-zero
 		 * (valid) ticket value. If they do, compare priorities
 		 */
-		their_ticket = bakery->number[they];
+		their_ticket = bakery_ticket_number(their_bakery_data);
 		if (their_ticket && (PRIORITY(their_ticket, they) < my_prio)) {
 			/*
 			 * They have higher priority (lower value). Wait for
@@ -167,29 +129,27 @@
 			 */
 			do {
 				wfe();
-			} while (their_ticket == bakery->number[they]);
+			} while (their_ticket ==
+				bakery_ticket_number(bakery->lock_data[they]));
 		}
 	}
-
 	/* Lock acquired */
-	bakery->owner = me;
 }
 
 
 /* Release the lock and signal contenders */
 void bakery_lock_release(bakery_lock_t *bakery)
 {
-	unsigned int me = platform_get_core_pos(read_mpidr_el1());
+	unsigned int me = plat_my_core_pos();
 
 	assert_bakery_entry_valid(me, bakery);
-	assert(bakery->owner == me);
+	assert(bakery_ticket_number(bakery->lock_data[me]));
 
 	/*
-	 * Release lock by resetting ownership and ticket. Then signal other
+	 * Release lock by resetting ticket. Then signal other
 	 * waiting contenders
 	 */
-	bakery->owner = NO_OWNER;
-	bakery->number[me] = 0;
+	bakery->lock_data[me] = 0;
 	dsb();
 	sev();
 }
diff --git a/lib/locks/bakery/bakery_lock_normal.c b/lib/locks/bakery/bakery_lock_normal.c
index a325fd4..8f59215 100644
--- a/lib/locks/bakery/bakery_lock_normal.c
+++ b/lib/locks/bakery/bakery_lock_normal.c
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch_helpers.h>
@@ -56,35 +32,57 @@
  * accesses regardless of status of address translation.
  */
 
-/* Convert a ticket to priority */
-#define PRIORITY(t, pos)	(((t) << 8) | (pos))
+#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
+/*
+ * Verify that the platform defined value for the per-cpu space for bakery locks is
+ * a multiple of the cache line size, to prevent multiple CPUs writing to the same
+ * bakery lock cache line
+ *
+ * Using this value, if provided, rather than the linker generated value results in
+ * more efficient code
+ */
+CASSERT((PLAT_PERCPU_BAKERY_LOCK_SIZE & (CACHE_WRITEBACK_GRANULE - 1)) == 0, \
+	PLAT_PERCPU_BAKERY_LOCK_SIZE_not_cacheline_multiple);
+#define PERCPU_BAKERY_LOCK_SIZE (PLAT_PERCPU_BAKERY_LOCK_SIZE)
+#else
+/*
+ * Use the linker defined symbol which has evaluated the size reqiurement.
+ * This is not as efficient as using a platform defined constant
+ */
+extern void *__PERCPU_BAKERY_LOCK_SIZE__;
+#define PERCPU_BAKERY_LOCK_SIZE ((uintptr_t)&__PERCPU_BAKERY_LOCK_SIZE__)
+#endif
 
-#define CHOOSING_TICKET		0x1
-#define CHOOSING_DONE		0x0
-
-#define bakery_is_choosing(info)	(info & 0x1)
-#define bakery_ticket_number(info)	((info >> 1) & 0x7FFF)
-#define make_bakery_data(choosing, number) \
-		(((choosing & 0x1) | (number << 1)) & 0xFFFF)
-
-/* This macro assumes that the bakery_info array is located at the offset specified */
-#define get_my_bakery_info(offset, id)		\
-	(((bakery_info_t *) (((uint8_t *)_cpu_data()) + offset)) + id)
-
-#define get_bakery_info_by_index(offset, id, ix)	\
-	(((bakery_info_t *) (((uint8_t *)_cpu_data_by_index(ix)) + offset)) + id)
+#define get_bakery_info(cpu_ix, lock)	\
+	(bakery_info_t *)((uintptr_t)lock + cpu_ix * PERCPU_BAKERY_LOCK_SIZE)
 
 #define write_cache_op(addr, cached)	\
 				do {	\
-					(cached ? dccvac((uint64_t)addr) :\
-						dcivac((uint64_t)addr));\
+					(cached ? dccvac((uintptr_t)addr) :\
+						dcivac((uintptr_t)addr));\
 						dsbish();\
 				} while (0)
 
 #define read_cache_op(addr, cached)	if (cached) \
-					    dccivac((uint64_t)addr)
+					    dccivac((uintptr_t)addr)
 
-static unsigned int bakery_get_ticket(int id, unsigned int offset,
+/* Helper function to check if the lock is acquired */
+static inline int is_lock_acquired(const bakery_info_t *my_bakery_info,
+							int is_cached)
+{
+	/*
+	 * Even though lock data is updated only by the owning cpu and
+	 * appropriate cache maintenance operations are performed,
+	 * if the previous update was done when the cpu was not participating
+	 * in coherency, then there is a chance that cache maintenance
+	 * operations were not propagated to all the caches in the system.
+	 * Hence do a `read_cache_op()` prior to read.
+	 */
+	read_cache_op(my_bakery_info, is_cached);
+	return !!(bakery_ticket_number(my_bakery_info->lock_data));
+}
+
+static unsigned int bakery_get_ticket(bakery_lock_t *lock,
 						unsigned int me, int is_cached)
 {
 	unsigned int my_ticket, their_ticket;
@@ -95,9 +93,12 @@
 	 * Obtain a reference to the bakery information for this cpu and ensure
 	 * it is not NULL.
 	 */
-	my_bakery_info = get_my_bakery_info(offset, id);
+	my_bakery_info = get_bakery_info(me, lock);
 	assert(my_bakery_info);
 
+	/* Prevent recursive acquisition.*/
+	assert(!is_lock_acquired(my_bakery_info, is_cached));
+
 	/*
 	 * Tell other contenders that we are through the bakery doorway i.e.
 	 * going to allocate a ticket for this cpu.
@@ -119,7 +120,7 @@
 		 * Get a reference to the other contender's bakery info and
 		 * ensure that a stale copy is not read.
 		 */
-		their_bakery_info = get_bakery_info_by_index(offset, id, they);
+		their_bakery_info = get_bakery_info(they, lock);
 		assert(their_bakery_info);
 
 		read_cache_op(their_bakery_info, is_cached);
@@ -138,26 +139,29 @@
 	 * finish calculating our ticket value that we're done
 	 */
 	++my_ticket;
-	my_bakery_info->lock_data = make_bakery_data(CHOOSING_DONE, my_ticket);
+	my_bakery_info->lock_data = make_bakery_data(CHOSEN_TICKET, my_ticket);
 
 	write_cache_op(my_bakery_info, is_cached);
 
 	return my_ticket;
 }
 
-void bakery_lock_get(unsigned int id, unsigned int offset)
+void bakery_lock_get(bakery_lock_t *lock)
 {
 	unsigned int they, me, is_cached;
 	unsigned int my_ticket, my_prio, their_ticket;
 	bakery_info_t *their_bakery_info;
-	uint16_t their_bakery_data;
+	unsigned int their_bakery_data;
 
-	me = platform_get_core_pos(read_mpidr_el1());
-
+	me = plat_my_core_pos();
+#ifdef AARCH32
+	is_cached = read_sctlr() & SCTLR_C_BIT;
+#else
 	is_cached = read_sctlr_el3() & SCTLR_C_BIT;
+#endif
 
 	/* Get a ticket */
-	my_ticket = bakery_get_ticket(id, offset, me, is_cached);
+	my_ticket = bakery_get_ticket(lock, me, is_cached);
 
 	/*
 	 * Now that we got our ticket, compute our priority value, then compare
@@ -172,17 +176,14 @@
 		 * Get a reference to the other contender's bakery info and
 		 * ensure that a stale copy is not read.
 		 */
-		their_bakery_info = get_bakery_info_by_index(offset, id, they);
+		their_bakery_info = get_bakery_info(they, lock);
 		assert(their_bakery_info);
-		read_cache_op(their_bakery_info, is_cached);
-
-		their_bakery_data = their_bakery_info->lock_data;
 
 		/* Wait for the contender to get their ticket */
-		while (bakery_is_choosing(their_bakery_data)) {
+		do {
 			read_cache_op(their_bakery_info, is_cached);
 			their_bakery_data = their_bakery_info->lock_data;
-		}
+		} while (bakery_is_choosing(their_bakery_data));
 
 		/*
 		 * If the other party is a contender, they'll have non-zero
@@ -203,14 +204,22 @@
 				== bakery_ticket_number(their_bakery_info->lock_data));
 		}
 	}
+	/* Lock acquired */
 }
 
-void bakery_lock_release(unsigned int id, unsigned int offset)
+void bakery_lock_release(bakery_lock_t *lock)
 {
 	bakery_info_t *my_bakery_info;
+#ifdef AARCH32
+	unsigned int is_cached = read_sctlr() & SCTLR_C_BIT;
+#else
 	unsigned int is_cached = read_sctlr_el3() & SCTLR_C_BIT;
+#endif
 
-	my_bakery_info = get_my_bakery_info(offset, id);
+	my_bakery_info = get_bakery_info(plat_my_core_pos(), lock);
+
+	assert(is_lock_acquired(my_bakery_info, is_cached));
+
 	my_bakery_info->lock_data = 0;
 	write_cache_op(my_bakery_info, is_cached);
 	sev();
diff --git a/lib/locks/exclusive/aarch32/spinlock.S b/lib/locks/exclusive/aarch32/spinlock.S
new file mode 100644
index 0000000..bc77bc9
--- /dev/null
+++ b/lib/locks/exclusive/aarch32/spinlock.S
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+	.globl	spin_lock
+	.globl	spin_unlock
+
+
+func spin_lock
+	mov	r2, #1
+1:
+	ldrex	r1, [r0]
+	cmp	r1, #0
+	wfene
+	strexeq	r1, r2, [r0]
+	cmpeq	r1, #0
+	bne	1b
+	dmb
+	bx	lr
+endfunc spin_lock
+
+
+func spin_unlock
+	mov	r1, #0
+	stl	r1, [r0]
+	bx	lr
+endfunc spin_unlock
diff --git a/lib/locks/exclusive/aarch64/spinlock.S b/lib/locks/exclusive/aarch64/spinlock.S
new file mode 100644
index 0000000..e2f9eaa
--- /dev/null
+++ b/lib/locks/exclusive/aarch64/spinlock.S
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+	.globl	spin_lock
+	.globl	spin_unlock
+
+#if ARM_ARCH_AT_LEAST(8, 1)
+
+/*
+ * When compiled for ARMv8.1 or later, choose spin locks based on Compare and
+ * Swap instruction.
+ */
+# define USE_CAS	1
+
+/*
+ * Lock contenders using CAS, upon failing to acquire the lock, wait with the
+ * monitor in open state. Therefore, a normal store upon unlocking won't
+ * generate an SEV. Use explicit SEV instruction with CAS unlock.
+ */
+# define COND_SEV()	sev
+
+#else
+
+# define USE_CAS	0
+
+/*
+ * Lock contenders using exclusive pairs, upon failing to acquire the lock, wait
+ * with the monitor in exclusive state. A normal store upon unlocking will
+ * implicitly generate an envent; so, no explicit SEV with unlock is required.
+ */
+# define COND_SEV()
+
+#endif
+
+#if USE_CAS
+
+	.arch	armv8.1-a
+
+/*
+ * Acquire lock using Compare and Swap instruction.
+ *
+ * Compare for 0 with acquire semantics, and swap 1. Wait until CAS returns
+ * 0.
+ *
+ * void spin_lock(spinlock_t *lock);
+ */
+func spin_lock
+	mov	w2, #1
+	sevl
+1:
+	wfe
+	mov	w1, wzr
+	casa	w1, w2, [x0]
+	cbnz	w1, 1b
+	ret
+endfunc spin_lock
+
+	.arch	armv8-a
+
+#else /* !USE_CAS */
+
+/*
+ * Acquire lock using load-/store-exclusive instruction pair.
+ *
+ * void spin_lock(spinlock_t *lock);
+ */
+func spin_lock
+	mov	w2, #1
+	sevl
+l1:	wfe
+l2:	ldaxr	w1, [x0]
+	cbnz	w1, l1
+	stxr	w1, w2, [x0]
+	cbnz	w1, l2
+	ret
+endfunc spin_lock
+
+#endif /* USE_CAS */
+
+/*
+ * Release lock previously acquired by spin_lock.
+ *
+ * Unconditionally write 0, and conditionally generate an event.
+ *
+ * void spin_unlock(spinlock_t *lock);
+ */
+func spin_unlock
+	stlr	wzr, [x0]
+	COND_SEV()
+	ret
+endfunc spin_unlock
diff --git a/lib/locks/exclusive/spinlock.S b/lib/locks/exclusive/spinlock.S
index 5eae2b0..2141f98 100644
--- a/lib/locks/exclusive/spinlock.S
+++ b/lib/locks/exclusive/spinlock.S
@@ -1,50 +1,9 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
-#include <asm_macros.S>
-
-	.globl	spin_lock
-	.globl	spin_unlock
-
-
-func spin_lock
-	mov	w2, #1
-	sevl
-l1:	wfe
-l2:	ldaxr	w1, [x0]
-	cbnz	w1, l1
-	stxr	w1, w2, [x0]
-	cbnz	w1, l2
-	ret
-
-
-func spin_unlock
-	stlr	wzr, [x0]
-	ret
+#if !ERROR_DEPRECATED
+#include "./aarch64/spinlock.S"
+#endif
diff --git a/lib/optee/optee_utils.c b/lib/optee/optee_utils.c
new file mode 100644
index 0000000..deb948c
--- /dev/null
+++ b/lib/optee/optee_utils.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <desc_image_load.h>
+#include <errno.h>
+#include <optee_utils.h>
+
+/*
+ * load_addr_hi and load_addr_lo: image load address.
+ * image_id: 0 - pager, 1 - paged
+ * size: image size in bytes.
+ */
+typedef struct optee_image {
+	uint32_t load_addr_hi;
+	uint32_t load_addr_lo;
+	uint32_t image_id;
+	uint32_t size;
+} optee_image_t;
+
+#define OPTEE_PAGER_IMAGE_ID		0
+#define OPTEE_PAGED_IMAGE_ID		1
+#define OPTEE_MAX_IMAGE_NUM		2
+
+#define TEE_MAGIC_NUM_OPTEE		0x4554504f
+/*
+ * magic: header magic number.
+ * version: OPTEE header version:
+ * 	1 - not supported
+ * 	2 - supported
+ * arch: OPTEE os architecture type: 0 - AARCH32, 1 - AARCH64.
+ * flags: unused currently.
+ * nb_images: number of images.
+ */
+typedef struct optee_header {
+	uint32_t magic;
+	uint8_t version;
+	uint8_t arch;
+	uint16_t flags;
+	uint32_t nb_images;
+	optee_image_t optee_image[];
+} optee_header_t;
+
+/*******************************************************************************
+ * Check if it is a valid tee header
+ * Return 1 if valid
+ * Return 0 if invalid
+ ******************************************************************************/
+static inline int tee_validate_header(optee_header_t *optee_header)
+{
+	if ((optee_header->magic == TEE_MAGIC_NUM_OPTEE) &&
+		(optee_header->version == 2) &&
+		(optee_header->nb_images <= OPTEE_MAX_IMAGE_NUM)) {
+		return 1;
+	}
+
+	WARN("Not a known TEE, use default loading options.\n");
+	return 0;
+}
+
+/*******************************************************************************
+ * Parse the OPTEE image
+ * Return 0 on success or a negative error code otherwise.
+ ******************************************************************************/
+static int parse_optee_image(image_info_t *image_info,
+		optee_image_t *optee_image)
+{
+	uintptr_t init_load_addr, free_end, requested_end;
+	size_t init_size;
+
+	init_load_addr = ((uint64_t)optee_image->load_addr_hi << 32) |
+					optee_image->load_addr_lo;
+	init_size = optee_image->size;
+
+	/*
+	 * -1 indicates loader decided address; take our pre-mapped area
+	 * for current image since arm-tf could not allocate memory dynamically
+	 */
+	if (init_load_addr == -1)
+		init_load_addr = image_info->image_base;
+
+	/* Check that the default end address doesn't overflow */
+	if (check_uptr_overflow(image_info->image_base,
+				image_info->image_max_size - 1))
+		return -1;
+	free_end = image_info->image_base + (image_info->image_max_size - 1);
+
+	/* Check that the image end address doesn't overflow */
+	if (check_uptr_overflow(init_load_addr, init_size - 1))
+		return -1;
+	requested_end = init_load_addr + (init_size - 1);
+	/*
+	 * Check that the requested RAM location is within reserved
+	 * space for OPTEE.
+	 */
+	if (!((init_load_addr >= image_info->image_base) &&
+			(requested_end <= free_end))) {
+		WARN("The load address in optee header %p - %p is not in reserved area: %p - %p.\n",
+				(void *)init_load_addr,
+				(void *)(init_load_addr + init_size),
+				(void *)image_info->image_base,
+				(void *)(image_info->image_base +
+					image_info->image_max_size));
+		return -1;
+	}
+
+	/*
+	 * Remove the skip attr from image_info, the image will be loaded.
+	 * The default attr in image_info is "IMAGE_ATTRIB_SKIP_LOADING", which
+	 * mean the image will not be loaded. Here, we parse the header image to
+	 * know that the extra image need to be loaded, so remove the skip attr.
+	 */
+	image_info->h.attr &= ~IMAGE_ATTRIB_SKIP_LOADING;
+
+	/* Update image base and size of image_info */
+	image_info->image_base = init_load_addr;
+	image_info->image_size = init_size;
+
+	return 0;
+}
+
+/*******************************************************************************
+ * Parse the OPTEE header
+ * Return 0 on success or a negative error code otherwise.
+ ******************************************************************************/
+int parse_optee_header(entry_point_info_t *header_ep,
+		image_info_t *pager_image_info,
+		image_info_t *paged_image_info)
+
+{
+	optee_header_t *optee_header;
+	int num, ret;
+
+	assert(header_ep);
+	optee_header = (optee_header_t *)header_ep->pc;
+	assert(optee_header);
+
+	/*
+	 * OPTEE image has 3 types:
+	 *
+	 * 1. Plain OPTEE bin without header.
+	 *	Original bin without header, return directly,
+	 *	BL32_EXTRA1_IMAGE_ID and BL32_EXTRA2_IMAGE_ID will be skipped.
+	 *
+	 * 2. OPTEE bin with header bin, but no paging.
+	 *	Header available and nb_images = 1, remove skip attr for
+	 *	BL32_EXTRA1_IMAGE_ID. BL32_EXTRA1_IMAGE_ID will be loaded,
+	 *	and BL32_EXTRA2_IMAGE_ID be skipped.
+	 *
+	 * 3. OPTEE image with paging support.
+	 *	Header available and nb_images = 2, there are 3 bins: header,
+	 *	pager and pageable. Remove skip attr for BL32_EXTRA1_IMAGE_ID
+	 *	and BL32_EXTRA2_IMAGE_ID to load pager and paged bin.
+	 */
+	if (!tee_validate_header(optee_header)) {
+		INFO("Invalid OPTEE header, legacy mode.\n");
+		/* Set legacy OPTEE runtime arch - aarch64 */
+		header_ep->args.arg0 = MODE_RW_64;
+		return 0;
+	}
+
+	/* Print the OPTEE header information */
+	INFO("OPTEE ep=0x%x\n", (unsigned int)header_ep->pc);
+	INFO("OPTEE header info:\n");
+	INFO("      magic=0x%x\n", optee_header->magic);
+	INFO("      version=0x%x\n", optee_header->version);
+	INFO("      arch=0x%x\n", optee_header->arch);
+	INFO("      flags=0x%x\n", optee_header->flags);
+	INFO("      nb_images=0x%x\n", optee_header->nb_images);
+
+	/* Parse OPTEE image */
+	for (num = 0; num < optee_header->nb_images; num++) {
+		if (optee_header->optee_image[num].image_id ==
+				OPTEE_PAGER_IMAGE_ID) {
+			ret = parse_optee_image(pager_image_info,
+				&optee_header->optee_image[num]);
+		} else if (optee_header->optee_image[num].image_id ==
+				OPTEE_PAGED_IMAGE_ID) {
+			ret = parse_optee_image(paged_image_info,
+				&optee_header->optee_image[num]);
+		} else {
+			ERROR("Parse optee image failed.\n");
+			return -1;
+		}
+
+		if (ret != 0)
+			return -1;
+	}
+
+	/*
+	 * Update "pc" value which should comes from pager image. After the
+	 * header image is parsed, it will be unuseful, and the actual
+	 * execution image after BL31 is pager image.
+	 */
+	header_ep->pc =	pager_image_info->image_base;
+
+	/*
+	 * The paged load address and size are populated in
+	 * header image arguments so that can be read by the
+	 * BL32 SPD.
+	 */
+	header_ep->args.arg1 = paged_image_info->image_base;
+	header_ep->args.arg2 = paged_image_info->image_size;
+
+	/* Set OPTEE runtime arch - aarch32/aarch64 */
+	if (optee_header->arch == 0)
+		header_ep->args.arg0 = MODE_RW_32;
+	else
+		header_ep->args.arg0 = MODE_RW_64;
+
+	return 0;
+}
diff --git a/lib/pmf/pmf_main.c b/lib/pmf/pmf_main.c
new file mode 100644
index 0000000..2cf260e
--- /dev/null
+++ b/lib/pmf/pmf_main.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <platform.h>
+#include <pmf.h>
+#include <string.h>
+
+/*******************************************************************************
+ * The 'pmf_svc_descs' array holds the PMF service descriptors exported by
+ * services by placing them in the 'pmf_svc_descs' linker section.
+ * The 'pmf_svc_descs_indices' array holds the index of a descriptor in the
+ * 'pmf_svc_descs' array. The TIF[15:10] bits in the time-stamp id are used
+ * to get an index into the 'pmf_svc_descs_indices' array. This gives the
+ * index of the descriptor in the 'pmf_svc_descs' array  which contains the
+ * service function pointers.
+ ******************************************************************************/
+extern uintptr_t __PMF_SVC_DESCS_START__;
+extern uintptr_t __PMF_SVC_DESCS_END__;
+#define PMF_SVC_DESCS_START		((uintptr_t)(&__PMF_SVC_DESCS_START__))
+#define PMF_SVC_DESCS_END		((uintptr_t)(&__PMF_SVC_DESCS_END__))
+extern void *__PERCPU_TIMESTAMP_SIZE__;
+#define PMF_PERCPU_TIMESTAMP_SIZE	((uintptr_t)&__PERCPU_TIMESTAMP_SIZE__)
+extern uintptr_t __PMF_TIMESTAMP_START__;
+#define PMF_TIMESTAMP_ARRAY_START	((uintptr_t)&__PMF_TIMESTAMP_START__)
+extern uintptr_t __PMF_TIMESTAMP_END__;
+#define PMF_TIMESTAMP_ARRAY_END		((uintptr_t)&__PMF_TIMESTAMP_END__)
+
+#define PMF_SVC_DESCS_MAX		10
+
+/*
+ * This is used to traverse through registered PMF services.
+ */
+static pmf_svc_desc_t *pmf_svc_descs;
+
+/*
+ * This array is used to store registered PMF services in sorted order.
+ */
+static int pmf_svc_descs_indices[PMF_SVC_DESCS_MAX];
+
+/*
+ * This is used to track total number of successfully registered PMF services.
+ */
+static int pmf_num_services;
+
+/*
+ * This is the main PMF function that initialize registered
+ * PMF services and also sort them in ascending order.
+ */
+int pmf_setup(void)
+{
+	int rc, ii, jj = 0;
+	int pmf_svc_descs_num, temp_val;
+
+	/* If no PMF services are registered then simply bail out */
+	pmf_svc_descs_num = (PMF_SVC_DESCS_END - PMF_SVC_DESCS_START)/
+				 sizeof(pmf_svc_desc_t);
+	if (pmf_svc_descs_num == 0)
+		return 0;
+
+	assert(pmf_svc_descs_num < PMF_SVC_DESCS_MAX);
+
+	pmf_svc_descs = (pmf_svc_desc_t *) PMF_SVC_DESCS_START;
+	for (ii = 0; ii < pmf_svc_descs_num; ii++) {
+
+		assert(pmf_svc_descs[ii].get_ts);
+
+		/*
+		 * Call the initialization routine for this
+		 * PMF service, if it is defined.
+		 */
+		if (pmf_svc_descs[ii].init) {
+			rc = pmf_svc_descs[ii].init();
+			if (rc) {
+				WARN("Could not initialize PMF"
+					"service %s - skipping \n",
+					pmf_svc_descs[ii].name);
+				continue;
+			}
+		}
+
+		/* Update the pmf_svc_descs_indices array */
+		pmf_svc_descs_indices[jj++] = ii;
+	}
+
+	pmf_num_services = jj;
+
+	/*
+	 * Sort the successfully registered PMF services
+	 * according to service ID
+	 */
+	for (ii = 1; ii < pmf_num_services; ii++) {
+		for (jj = 0; jj < (pmf_num_services - ii); jj++) {
+			if ((pmf_svc_descs[jj].svc_config & PMF_SVC_ID_MASK) >
+				(pmf_svc_descs[jj + 1].svc_config &
+						PMF_SVC_ID_MASK)) {
+				temp_val = pmf_svc_descs_indices[jj];
+				pmf_svc_descs_indices[jj] =
+						pmf_svc_descs_indices[jj+1];
+				pmf_svc_descs_indices[jj+1] = temp_val;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * This function implements binary search to find registered
+ * PMF service based on Service ID provided in `tid` argument.
+ */
+static pmf_svc_desc_t *get_service(unsigned int tid)
+{
+	int low = 0;
+	int mid;
+	int high = pmf_num_services;
+	unsigned int svc_id = tid & PMF_SVC_ID_MASK;
+	int index;
+	unsigned int desc_svc_id;
+
+	if (pmf_num_services == 0)
+		return NULL;
+
+	assert(pmf_svc_descs);
+
+	do {
+		mid = (low + high) / 2;
+		index = pmf_svc_descs_indices[mid];
+
+		desc_svc_id = pmf_svc_descs[index].svc_config & PMF_SVC_ID_MASK;
+		if (svc_id < desc_svc_id)
+			high = mid - 1;
+		if (svc_id > desc_svc_id)
+			low = mid + 1;
+	} while ((svc_id != desc_svc_id) && (low <= high));
+
+	/*
+	 * Make sure the Service found supports the tid range.
+	 */
+	if ((svc_id == desc_svc_id) && ((tid & PMF_TID_MASK) <
+		(pmf_svc_descs[index].svc_config & PMF_TID_MASK)))
+		return (pmf_svc_desc_t *)&pmf_svc_descs[index];
+
+	return NULL;
+}
+
+/*
+ * This function gets the time-stamp value for the PMF services
+ * registered for SMC interface based on `tid` and `mpidr`.
+ */
+int pmf_get_timestamp_smc(unsigned int tid,
+		u_register_t mpidr,
+		unsigned int flags,
+		unsigned long long *ts_value)
+{
+	pmf_svc_desc_t *svc_desc;
+	assert(ts_value);
+
+	/* Search for registered service. */
+	svc_desc = get_service(tid);
+
+	if ((svc_desc == NULL) || (plat_core_pos_by_mpidr(mpidr) < 0)) {
+		*ts_value = 0;
+		return -EINVAL;
+	} else {
+		/* Call the service time-stamp handler. */
+		*ts_value = svc_desc->get_ts(tid, mpidr, flags);
+		return 0;
+	}
+}
+
+/*
+ * This function can be used to dump `ts` value for given `tid`.
+ * Assumption is that the console is already initialized.
+ */
+void __pmf_dump_timestamp(unsigned int tid, unsigned long long ts)
+{
+	tf_printf("PMF:cpu %u	tid %u	ts %llu\n",
+		plat_my_core_pos(), tid, ts);
+}
+
+/*
+ * This function calculate the address identified by
+ * `base_addr`, `tid` and `cpuid`.
+ */
+static inline uintptr_t calc_ts_addr(uintptr_t base_addr,
+		unsigned int tid,
+		unsigned int cpuid)
+{
+	assert(cpuid < PLATFORM_CORE_COUNT);
+	assert(base_addr >= PMF_TIMESTAMP_ARRAY_START);
+	assert(base_addr < ((PMF_TIMESTAMP_ARRAY_START +
+		PMF_PERCPU_TIMESTAMP_SIZE) - ((tid & PMF_TID_MASK) *
+		sizeof(unsigned long long))));
+
+	base_addr += ((cpuid * PMF_PERCPU_TIMESTAMP_SIZE) +
+		((tid & PMF_TID_MASK) * sizeof(unsigned long long)));
+
+	return base_addr;
+}
+
+/*
+ * This function stores the `ts` value to the storage identified by
+ * `base_addr`, `tid` and current cpu id.
+ * Note: The timestamp addresses are cache line aligned per cpu
+ * and only the owning CPU would ever write into it.
+ */
+void __pmf_store_timestamp(uintptr_t base_addr,
+			unsigned int tid,
+			unsigned long long ts)
+{
+	unsigned long long *ts_addr = (unsigned long long *)calc_ts_addr(base_addr,
+				 tid, plat_my_core_pos());
+	*ts_addr = ts;
+}
+
+/*
+ * This is the cached version of `pmf_store_my_timestamp`
+ * Note: The timestamp addresses are cache line aligned per cpu
+ * and only the owning CPU would ever write into it.
+ */
+void __pmf_store_timestamp_with_cache_maint(uintptr_t base_addr,
+			unsigned int tid,
+			unsigned long long ts)
+{
+	unsigned long long *ts_addr = (unsigned long long *)calc_ts_addr(base_addr,
+				 tid, plat_my_core_pos());
+	*ts_addr = ts;
+	flush_dcache_range((uintptr_t)ts_addr, sizeof(unsigned long long));
+}
+
+/*
+ * This function retrieves the `ts` value from the storage identified by
+ * `base_addr`, `tid` and `cpuid`.
+ * Note: The timestamp addresses are cache line aligned per cpu.
+ */
+unsigned long long __pmf_get_timestamp(uintptr_t base_addr,
+			unsigned int tid,
+			unsigned int cpuid,
+			unsigned int flags)
+{
+	assert(cpuid < PLATFORM_CORE_COUNT);
+	unsigned long long *ts_addr = (unsigned long long *)calc_ts_addr(base_addr,
+				tid, cpuid);
+
+	if (flags & PMF_CACHE_MAINT)
+		inv_dcache_range((uintptr_t)ts_addr, sizeof(unsigned long long));
+
+	return *ts_addr;
+}
diff --git a/lib/pmf/pmf_smc.c b/lib/pmf/pmf_smc.c
new file mode 100644
index 0000000..248c1fa
--- /dev/null
+++ b/lib/pmf/pmf_smc.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <assert.h>
+#include <debug.h>
+#include <platform.h>
+#include <pmf.h>
+#include <smcc_helpers.h>
+
+/*
+ * This function is responsible for handling all PMF SMC calls.
+ */
+uintptr_t pmf_smc_handler(unsigned int smc_fid,
+			u_register_t x1,
+			u_register_t x2,
+			u_register_t x3,
+			u_register_t x4,
+			void *cookie,
+			void *handle,
+			u_register_t flags)
+{
+	int rc;
+	unsigned long long ts_value;
+
+	if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
+
+		x1 = (uint32_t)x1;
+		x2 = (uint32_t)x2;
+		x3 = (uint32_t)x3;
+
+		switch (smc_fid) {
+		case PMF_SMC_GET_TIMESTAMP_32:
+			/*
+			 * Return error code and the captured
+			 * time-stamp to the caller.
+			 * x0 --> error code.
+			 * x1 - x2 --> time-stamp value.
+			 */
+			rc = pmf_get_timestamp_smc(x1, x2, x3, &ts_value);
+			SMC_RET3(handle, rc, (uint32_t)ts_value,
+					(uint32_t)(ts_value >> 32));
+
+		default:
+			break;
+		}
+	} else {
+		switch (smc_fid) {
+		case PMF_SMC_GET_TIMESTAMP_64:
+			/*
+			 * Return error code and the captured
+			 * time-stamp to the caller.
+			 * x0 --> error code.
+			 * x1 --> time-stamp value.
+			 */
+			rc = pmf_get_timestamp_smc(x1, x2, x3, &ts_value);
+			SMC_RET2(handle, rc, ts_value);
+
+		default:
+			break;
+		}
+	}
+
+	WARN("Unimplemented PMF Call: 0x%x \n", smc_fid);
+	SMC_RET1(handle, SMC_UNK);
+}
diff --git a/lib/psci/aarch32/psci_helpers.S b/lib/psci/aarch32/psci_helpers.S
new file mode 100644
index 0000000..9373d4f
--- /dev/null
+++ b/lib/psci/aarch32/psci_helpers.S
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <platform_def.h>
+#include <psci.h>
+
+	.globl	psci_do_pwrdown_cache_maintenance
+	.globl	psci_do_pwrup_cache_maintenance
+	.globl	psci_power_down_wfi
+
+/* -----------------------------------------------------------------------
+ * void psci_do_pwrdown_cache_maintenance(unsigned int power level);
+ *
+ * This function performs cache maintenance for the specified power
+ * level. The levels of cache affected are determined by the power
+ * level which is passed as the argument i.e. level 0 results
+ * in a flush of the L1 cache. Both the L1 and L2 caches are flushed
+ * for a higher power level.
+ *
+ * Additionally, this function also ensures that stack memory is correctly
+ * flushed out to avoid coherency issues due to a change in its memory
+ * attributes after the data cache is disabled.
+ * -----------------------------------------------------------------------
+ */
+func psci_do_pwrdown_cache_maintenance
+	push	{r4, lr}
+
+	/* ----------------------------------------------
+	 * Turn OFF cache and do stack maintenance
+	 * prior to cpu operations . This sequence is
+	 * different from AArch64 because in AArch32 the
+	 * assembler routines for cpu operations utilize
+	 * the stack whereas in AArch64 it doesn't.
+	 * ----------------------------------------------
+	 */
+	mov	r4, r0
+	bl	do_stack_maintenance
+
+	/* ---------------------------------------------
+	 * Invoke CPU-specifc power down operations for
+	 * the appropriate level
+	 * ---------------------------------------------
+	 */
+	mov	r0, r4
+	pop	{r4, lr}
+	b	prepare_cpu_pwr_dwn
+endfunc psci_do_pwrdown_cache_maintenance
+
+
+/* -----------------------------------------------------------------------
+ * void psci_do_pwrup_cache_maintenance(void);
+ *
+ * This function performs cache maintenance after this cpu is powered up.
+ * Currently, this involves managing the used stack memory before turning
+ * on the data cache.
+ * -----------------------------------------------------------------------
+ */
+func psci_do_pwrup_cache_maintenance
+	/* r12 is pushed to meet the 8 byte stack alignment requirement */
+	push	{r12, lr}
+
+	/* ---------------------------------------------
+	 * Ensure any inflight stack writes have made it
+	 * to main memory.
+	 * ---------------------------------------------
+	 */
+	dmb	st
+
+	/* ---------------------------------------------
+	 * Calculate and store the size of the used
+	 * stack memory in r1. Calculate and store the
+	 * stack base address in r0.
+	 * ---------------------------------------------
+	 */
+	bl	plat_get_my_stack
+	mov	r1, sp
+	sub	r1, r0, r1
+	mov	r0, sp
+	bl	inv_dcache_range
+
+	/* ---------------------------------------------
+	 * Enable the data cache.
+	 * ---------------------------------------------
+	 */
+	ldcopr	r0, SCTLR
+	orr	r0, r0, #SCTLR_C_BIT
+	stcopr	r0, SCTLR
+	isb
+
+	pop	{r12, pc}
+endfunc psci_do_pwrup_cache_maintenance
+
+	/* ---------------------------------------------
+	 * void do_stack_maintenance(void)
+	 * Do stack maintenance by flushing the used
+	 * stack to the main memory and invalidating the
+	 * remainder.
+	 * ---------------------------------------------
+	 */
+func do_stack_maintenance
+	push	{r4, lr}
+	bl	plat_get_my_stack
+
+	/* Turn off the D-cache */
+	ldcopr	r1, SCTLR
+	bic	r1, #SCTLR_C_BIT
+	stcopr	r1, SCTLR
+	isb
+
+	/* ---------------------------------------------
+	 * Calculate and store the size of the used
+	 * stack memory in r1.
+	 * ---------------------------------------------
+	 */
+	mov	r4, r0
+	mov	r1, sp
+	sub	r1, r0, r1
+	mov	r0, sp
+	bl	flush_dcache_range
+
+	/* ---------------------------------------------
+	 * Calculate and store the size of the unused
+	 * stack memory in r1. Calculate and store the
+	 * stack base address in r0.
+	 * ---------------------------------------------
+	 */
+	sub	r0, r4, #PLATFORM_STACK_SIZE
+	sub	r1, sp, r0
+	bl	inv_dcache_range
+
+	pop	{r4, pc}
+endfunc do_stack_maintenance
+
+/* -----------------------------------------------------------------------
+ * This function is called to indicate to the power controller that it
+ * is safe to power down this cpu. It should not exit the wfi and will
+ * be released from reset upon power up.
+ * -----------------------------------------------------------------------
+ */
+func psci_power_down_wfi
+	dsb	sy		// ensure write buffer empty
+	wfi
+	no_ret	plat_panic_handler
+endfunc psci_power_down_wfi
diff --git a/lib/psci/aarch64/psci_helpers.S b/lib/psci/aarch64/psci_helpers.S
new file mode 100644
index 0000000..afe21eb
--- /dev/null
+++ b/lib/psci/aarch64/psci_helpers.S
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <platform_def.h>
+#include <psci.h>
+
+	.globl	psci_do_pwrdown_cache_maintenance
+	.globl	psci_do_pwrup_cache_maintenance
+	.globl	psci_power_down_wfi
+#if !ERROR_DEPRECATED
+	.globl psci_entrypoint
+#endif
+
+/* -----------------------------------------------------------------------
+ * void psci_do_pwrdown_cache_maintenance(unsigned int power level);
+ *
+ * This function performs cache maintenance for the specified power
+ * level. The levels of cache affected are determined by the power
+ * level which is passed as the argument i.e. level 0 results
+ * in a flush of the L1 cache. Both the L1 and L2 caches are flushed
+ * for a higher power level.
+ *
+ * Additionally, this function also ensures that stack memory is correctly
+ * flushed out to avoid coherency issues due to a change in its memory
+ * attributes after the data cache is disabled.
+ * -----------------------------------------------------------------------
+ */
+func psci_do_pwrdown_cache_maintenance
+	stp     x29, x30, [sp,#-16]!
+	stp     x19, x20, [sp,#-16]!
+
+	/* ---------------------------------------------
+	 * Invoke CPU-specific power down operations for
+	 * the appropriate level
+	 * ---------------------------------------------
+	 */
+	bl	prepare_cpu_pwr_dwn
+
+	/* ---------------------------------------------
+	 * Do stack maintenance by flushing the used
+	 * stack to the main memory and invalidating the
+	 * remainder.
+	 * ---------------------------------------------
+	 */
+	bl	plat_get_my_stack
+
+	/* ---------------------------------------------
+	 * Calculate and store the size of the used
+	 * stack memory in x1.
+	 * ---------------------------------------------
+	 */
+	mov	x19, x0
+	mov	x1, sp
+	sub	x1, x0, x1
+	mov	x0, sp
+	bl	flush_dcache_range
+
+	/* ---------------------------------------------
+	 * Calculate and store the size of the unused
+	 * stack memory in x1. Calculate and store the
+	 * stack base address in x0.
+	 * ---------------------------------------------
+	 */
+	sub	x0, x19, #PLATFORM_STACK_SIZE
+	sub	x1, sp, x0
+	bl	inv_dcache_range
+
+	ldp	x19, x20, [sp], #16
+	ldp	x29, x30, [sp], #16
+	ret
+endfunc psci_do_pwrdown_cache_maintenance
+
+
+/* -----------------------------------------------------------------------
+ * void psci_do_pwrup_cache_maintenance(void);
+ *
+ * This function performs cache maintenance after this cpu is powered up.
+ * Currently, this involves managing the used stack memory before turning
+ * on the data cache.
+ * -----------------------------------------------------------------------
+ */
+func psci_do_pwrup_cache_maintenance
+	stp	x29, x30, [sp,#-16]!
+
+	/* ---------------------------------------------
+	 * Ensure any inflight stack writes have made it
+	 * to main memory.
+	 * ---------------------------------------------
+	 */
+	dmb	st
+
+	/* ---------------------------------------------
+	 * Calculate and store the size of the used
+	 * stack memory in x1. Calculate and store the
+	 * stack base address in x0.
+	 * ---------------------------------------------
+	 */
+	bl	plat_get_my_stack
+	mov	x1, sp
+	sub	x1, x0, x1
+	mov	x0, sp
+	bl	inv_dcache_range
+
+	/* ---------------------------------------------
+	 * Enable the data cache.
+	 * ---------------------------------------------
+	 */
+	mrs	x0, sctlr_el3
+	orr	x0, x0, #SCTLR_C_BIT
+	msr	sctlr_el3, x0
+	isb
+
+	ldp	x29, x30, [sp], #16
+	ret
+endfunc psci_do_pwrup_cache_maintenance
+
+/* -----------------------------------------------------------------------
+ * void psci_power_down_wfi(void);
+ * This function is called to indicate to the power controller that it
+ * is safe to power down this cpu. It should not exit the wfi and will
+ * be released from reset upon power up.
+ * -----------------------------------------------------------------------
+ */
+func psci_power_down_wfi
+	dsb	sy		// ensure write buffer empty
+	wfi
+	no_ret	plat_panic_handler
+endfunc psci_power_down_wfi
+
+/* -----------------------------------------------------------------------
+ * void psci_entrypoint(void);
+ * The deprecated entry point for PSCI on warm boot for AArch64.
+ * -----------------------------------------------------------------------
+ */
+func_deprecated psci_entrypoint
+	b	bl31_warm_entrypoint
+endfunc_deprecated psci_entrypoint
diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c
new file mode 100644
index 0000000..4502c24
--- /dev/null
+++ b/lib/psci/psci_common.c
@@ -0,0 +1,1029 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <platform.h>
+#include <string.h>
+#include <utils.h>
+#include "psci_private.h"
+
+/*
+ * SPD power management operations, expected to be supplied by the registered
+ * SPD on successful SP initialization
+ */
+const spd_pm_ops_t *psci_spd_pm;
+
+/*
+ * PSCI requested local power state map. This array is used to store the local
+ * power states requested by a CPU for power levels from level 1 to
+ * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power
+ * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a
+ * CPU are the same.
+ *
+ * During state coordination, the platform is passed an array containing the
+ * local states requested for a particular non cpu power domain by each cpu
+ * within the domain.
+ *
+ * TODO: Dense packing of the requested states will cause cache thrashing
+ * when multiple power domains write to it. If we allocate the requested
+ * states at each power level in a cache-line aligned per-domain memory,
+ * the cache thrashing can be avoided.
+ */
+static plat_local_state_t
+	psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT];
+
+
+/*******************************************************************************
+ * Arrays that hold the platform's power domain tree information for state
+ * management of power domains.
+ * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain
+ * which is an ancestor of a CPU power domain.
+ * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain
+ ******************************************************************************/
+non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]
+#if USE_COHERENT_MEM
+__section("tzfw_coherent_mem")
+#endif
+;
+
+/* Lock for PSCI state coordination */
+DEFINE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
+
+cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * Pointer to functions exported by the platform to complete power mgmt. ops
+ ******************************************************************************/
+const plat_psci_ops_t *psci_plat_pm_ops;
+
+/******************************************************************************
+ * Check that the maximum power level supported by the platform makes sense
+ *****************************************************************************/
+CASSERT(PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL && \
+		PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL, \
+		assert_platform_max_pwrlvl_check);
+
+/*
+ * The plat_local_state used by the platform is one of these types: RUN,
+ * RETENTION and OFF. The platform can define further sub-states for each type
+ * apart from RUN. This categorization is done to verify the sanity of the
+ * psci_power_state passed by the platform and to print debug information. The
+ * categorization is done on the basis of the following conditions:
+ *
+ * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN.
+ *
+ * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is
+ *    STATE_TYPE_RETN.
+ *
+ * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is
+ *    STATE_TYPE_OFF.
+ */
+typedef enum plat_local_state_type {
+	STATE_TYPE_RUN = 0,
+	STATE_TYPE_RETN,
+	STATE_TYPE_OFF
+} plat_local_state_type_t;
+
+/* The macro used to categorize plat_local_state. */
+#define find_local_state_type(plat_local_state)					\
+		((plat_local_state) ? ((plat_local_state > PLAT_MAX_RET_STATE)	\
+		? STATE_TYPE_OFF : STATE_TYPE_RETN)				\
+		: STATE_TYPE_RUN)
+
+/******************************************************************************
+ * Check that the maximum retention level supported by the platform is less
+ * than the maximum off level.
+ *****************************************************************************/
+CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE, \
+		assert_platform_max_off_and_retn_state_check);
+
+/******************************************************************************
+ * This function ensures that the power state parameter in a CPU_SUSPEND request
+ * is valid. If so, it returns the requested states for each power level.
+ *****************************************************************************/
+int psci_validate_power_state(unsigned int power_state,
+			      psci_power_state_t *state_info)
+{
+	/* Check SBZ bits in power state are zero */
+	if (psci_check_power_state(power_state))
+		return PSCI_E_INVALID_PARAMS;
+
+	assert(psci_plat_pm_ops->validate_power_state);
+
+	/* Validate the power_state using platform pm_ops */
+	return psci_plat_pm_ops->validate_power_state(power_state, state_info);
+}
+
+/******************************************************************************
+ * This function retrieves the `psci_power_state_t` for system suspend from
+ * the platform.
+ *****************************************************************************/
+void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
+{
+	/*
+	 * Assert that the required pm_ops hook is implemented to ensure that
+	 * the capability detected during psci_setup() is valid.
+	 */
+	assert(psci_plat_pm_ops->get_sys_suspend_power_state);
+
+	/*
+	 * Query the platform for the power_state required for system suspend
+	 */
+	psci_plat_pm_ops->get_sys_suspend_power_state(state_info);
+}
+
+/*******************************************************************************
+ * This function verifies that the all the other cores in the system have been
+ * turned OFF and the current CPU is the last running CPU in the system.
+ * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false)
+ * otherwise.
+ ******************************************************************************/
+unsigned int psci_is_last_on_cpu(void)
+{
+	unsigned int cpu_idx, my_idx = plat_my_core_pos();
+
+	for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
+		if (cpu_idx == my_idx) {
+			assert(psci_get_aff_info_state() == AFF_STATE_ON);
+			continue;
+		}
+
+		if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF)
+			return 0;
+	}
+
+	return 1;
+}
+
+/*******************************************************************************
+ * Routine to return the maximum power level to traverse to after a cpu has
+ * been physically powered up. It is expected to be called immediately after
+ * reset from assembler code.
+ ******************************************************************************/
+static unsigned int get_power_on_target_pwrlvl(void)
+{
+	unsigned int pwrlvl;
+
+	/*
+	 * Assume that this cpu was suspended and retrieve its target power
+	 * level. If it is invalid then it could only have been turned off
+	 * earlier. PLAT_MAX_PWR_LVL will be the highest power level a
+	 * cpu can be turned off to.
+	 */
+	pwrlvl = psci_get_suspend_pwrlvl();
+	if (pwrlvl == PSCI_INVALID_PWR_LVL)
+		pwrlvl = PLAT_MAX_PWR_LVL;
+	return pwrlvl;
+}
+
+/******************************************************************************
+ * Helper function to update the requested local power state array. This array
+ * does not store the requested state for the CPU power level. Hence an
+ * assertion is added to prevent us from accessing the wrong index.
+ *****************************************************************************/
+static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
+					 unsigned int cpu_idx,
+					 plat_local_state_t req_pwr_state)
+{
+	/*
+	 * This should never happen, we have this here to avoid
+	 * "array subscript is above array bounds" errors in GCC.
+	 */
+	assert(pwrlvl > PSCI_CPU_PWR_LVL);
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Warray-bounds"
+	psci_req_local_pwr_states[pwrlvl - 1][cpu_idx] = req_pwr_state;
+#pragma GCC diagnostic pop
+}
+
+/******************************************************************************
+ * This function initializes the psci_req_local_pwr_states.
+ *****************************************************************************/
+void psci_init_req_local_pwr_states(void)
+{
+	/* Initialize the requested state of all non CPU power domains as OFF */
+	memset(&psci_req_local_pwr_states, PLAT_MAX_OFF_STATE,
+			sizeof(psci_req_local_pwr_states));
+}
+
+/******************************************************************************
+ * Helper function to return a reference to an array containing the local power
+ * states requested by each cpu for a power domain at 'pwrlvl'. The size of the
+ * array will be the number of cpu power domains of which this power domain is
+ * an ancestor. These requested states will be used to determine a suitable
+ * target state for this power domain during psci state coordination. An
+ * assertion is added to prevent us from accessing the CPU power level.
+ *****************************************************************************/
+static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
+							 unsigned int cpu_idx)
+{
+	assert(pwrlvl > PSCI_CPU_PWR_LVL);
+
+	return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx];
+}
+
+/*
+ * psci_non_cpu_pd_nodes can be placed either in normal memory or coherent
+ * memory.
+ *
+ * With !USE_COHERENT_MEM, psci_non_cpu_pd_nodes is placed in normal memory,
+ * it's accessed by both cached and non-cached participants. To serve the common
+ * minimum, perform a cache flush before read and after write so that non-cached
+ * participants operate on latest data in main memory.
+ *
+ * When USE_COHERENT_MEM is used, psci_non_cpu_pd_nodes is placed in coherent
+ * memory. With HW_ASSISTED_COHERENCY, all PSCI participants are cache-coherent.
+ * In both cases, no cache operations are required.
+ */
+
+/*
+ * Retrieve local state of non-CPU power domain node from a non-cached CPU,
+ * after any required cache maintenance operation.
+ */
+static plat_local_state_t get_non_cpu_pd_node_local_state(
+		unsigned int parent_idx)
+{
+#if !USE_COHERENT_MEM || !HW_ASSISTED_COHERENCY
+	flush_dcache_range(
+			(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
+			sizeof(psci_non_cpu_pd_nodes[parent_idx]));
+#endif
+	return psci_non_cpu_pd_nodes[parent_idx].local_state;
+}
+
+/*
+ * Update local state of non-CPU power domain node from a cached CPU; perform
+ * any required cache maintenance operation afterwards.
+ */
+static void set_non_cpu_pd_node_local_state(unsigned int parent_idx,
+		plat_local_state_t state)
+{
+	psci_non_cpu_pd_nodes[parent_idx].local_state = state;
+#if !USE_COHERENT_MEM || !HW_ASSISTED_COHERENCY
+	flush_dcache_range(
+			(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
+			sizeof(psci_non_cpu_pd_nodes[parent_idx]));
+#endif
+}
+
+/******************************************************************************
+ * Helper function to return the current local power state of each power domain
+ * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
+ * function will be called after a cpu is powered on to find the local state
+ * each power domain has emerged from.
+ *****************************************************************************/
+void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
+				      psci_power_state_t *target_state)
+{
+	unsigned int parent_idx, lvl;
+	plat_local_state_t *pd_state = target_state->pwr_domain_state;
+
+	pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
+	parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
+
+	/* Copy the local power state from node to state_info */
+	for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+		pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx);
+		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+	}
+
+	/* Set the the higher levels to RUN */
+	for (; lvl <= PLAT_MAX_PWR_LVL; lvl++)
+		target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
+}
+
+/******************************************************************************
+ * Helper function to set the target local power state that each power domain
+ * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will
+ * enter. This function will be called after coordination of requested power
+ * states has been done for each power level.
+ *****************************************************************************/
+static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
+					const psci_power_state_t *target_state)
+{
+	unsigned int parent_idx, lvl;
+	const plat_local_state_t *pd_state = target_state->pwr_domain_state;
+
+	psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
+
+	/*
+	 * Need to flush as local_state might be accessed with Data Cache
+	 * disabled during power on
+	 */
+	psci_flush_cpu_data(psci_svc_cpu_data.local_state);
+
+	parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
+
+	/* Copy the local_state from state_info */
+	for (lvl = 1; lvl <= end_pwrlvl; lvl++) {
+		set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]);
+		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+	}
+}
+
+
+/*******************************************************************************
+ * PSCI helper function to get the parent nodes corresponding to a cpu_index.
+ ******************************************************************************/
+void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
+				      unsigned int end_lvl,
+				      unsigned int node_index[])
+{
+	unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
+	unsigned int i;
+
+	for (i = PSCI_CPU_PWR_LVL + 1; i <= end_lvl; i++) {
+		*node_index++ = parent_node;
+		parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node;
+	}
+}
+
+/******************************************************************************
+ * This function is invoked post CPU power up and initialization. It sets the
+ * affinity info state, target power state and requested power state for the
+ * current CPU and all its ancestor power domains to RUN.
+ *****************************************************************************/
+void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
+{
+	unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl;
+	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
+
+	/* Reset the local_state to RUN for the non cpu power domains. */
+	for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+		set_non_cpu_pd_node_local_state(parent_idx,
+				PSCI_LOCAL_STATE_RUN);
+		psci_set_req_local_pwr_state(lvl,
+					     cpu_idx,
+					     PSCI_LOCAL_STATE_RUN);
+		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+	}
+
+	/* Set the affinity info state to ON */
+	psci_set_aff_info_state(AFF_STATE_ON);
+
+	psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
+	psci_flush_cpu_data(psci_svc_cpu_data);
+}
+
+/******************************************************************************
+ * This function is passed the local power states requested for each power
+ * domain (state_info) between the current CPU domain and its ancestors until
+ * the target power level (end_pwrlvl). It updates the array of requested power
+ * states with this information.
+ *
+ * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
+ * retrieves the states requested by all the cpus of which the power domain at
+ * that level is an ancestor. It passes this information to the platform to
+ * coordinate and return the target power state. If the target state for a level
+ * is RUN then subsequent levels are not considered. At the CPU level, state
+ * coordination is not required. Hence, the requested and the target states are
+ * the same.
+ *
+ * The 'state_info' is updated with the target state for each level between the
+ * CPU and the 'end_pwrlvl' and returned to the caller.
+ *
+ * This function will only be invoked with data cache enabled and while
+ * powering down a core.
+ *****************************************************************************/
+void psci_do_state_coordination(unsigned int end_pwrlvl,
+				psci_power_state_t *state_info)
+{
+	unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
+	unsigned int start_idx, ncpus;
+	plat_local_state_t target_state, *req_states;
+
+	assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
+	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
+
+	/* For level 0, the requested state will be equivalent
+	   to target state */
+	for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+
+		/* First update the requested power state */
+		psci_set_req_local_pwr_state(lvl, cpu_idx,
+					     state_info->pwr_domain_state[lvl]);
+
+		/* Get the requested power states for this power level */
+		start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
+		req_states = psci_get_req_local_pwr_states(lvl, start_idx);
+
+		/*
+		 * Let the platform coordinate amongst the requested states at
+		 * this power level and return the target local power state.
+		 */
+		ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
+		target_state = plat_get_target_pwr_state(lvl,
+							 req_states,
+							 ncpus);
+
+		state_info->pwr_domain_state[lvl] = target_state;
+
+		/* Break early if the negotiated target power state is RUN */
+		if (is_local_state_run(state_info->pwr_domain_state[lvl]))
+			break;
+
+		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+	}
+
+	/*
+	 * This is for cases when we break out of the above loop early because
+	 * the target power state is RUN at a power level < end_pwlvl.
+	 * We update the requested power state from state_info and then
+	 * set the target state as RUN.
+	 */
+	for (lvl = lvl + 1; lvl <= end_pwrlvl; lvl++) {
+		psci_set_req_local_pwr_state(lvl, cpu_idx,
+					     state_info->pwr_domain_state[lvl]);
+		state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
+
+	}
+
+	/* Update the target state in the power domain nodes */
+	psci_set_target_local_pwr_states(end_pwrlvl, state_info);
+}
+
+/******************************************************************************
+ * This function validates a suspend request by making sure that if a standby
+ * state is requested then no power level is turned off and the highest power
+ * level is placed in a standby/retention state.
+ *
+ * It also ensures that the state level X will enter is not shallower than the
+ * state level X + 1 will enter.
+ *
+ * This validation will be enabled only for DEBUG builds as the platform is
+ * expected to perform these validations as well.
+ *****************************************************************************/
+int psci_validate_suspend_req(const psci_power_state_t *state_info,
+			      unsigned int is_power_down_state)
+{
+	unsigned int max_off_lvl, target_lvl, max_retn_lvl;
+	plat_local_state_t state;
+	plat_local_state_type_t req_state_type, deepest_state_type;
+	int i;
+
+	/* Find the target suspend power level */
+	target_lvl = psci_find_target_suspend_lvl(state_info);
+	if (target_lvl == PSCI_INVALID_PWR_LVL)
+		return PSCI_E_INVALID_PARAMS;
+
+	/* All power domain levels are in a RUN state to begin with */
+	deepest_state_type = STATE_TYPE_RUN;
+
+	for (i = target_lvl; i >= PSCI_CPU_PWR_LVL; i--) {
+		state = state_info->pwr_domain_state[i];
+		req_state_type = find_local_state_type(state);
+
+		/*
+		 * While traversing from the highest power level to the lowest,
+		 * the state requested for lower levels has to be the same or
+		 * deeper i.e. equal to or greater than the state at the higher
+		 * levels. If this condition is true, then the requested state
+		 * becomes the deepest state encountered so far.
+		 */
+		if (req_state_type < deepest_state_type)
+			return PSCI_E_INVALID_PARAMS;
+		deepest_state_type = req_state_type;
+	}
+
+	/* Find the highest off power level */
+	max_off_lvl = psci_find_max_off_lvl(state_info);
+
+	/* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */
+	max_retn_lvl = PSCI_INVALID_PWR_LVL;
+	if (target_lvl != max_off_lvl)
+		max_retn_lvl = target_lvl;
+
+	/*
+	 * If this is not a request for a power down state then max off level
+	 * has to be invalid and max retention level has to be a valid power
+	 * level.
+	 */
+	if (!is_power_down_state && (max_off_lvl != PSCI_INVALID_PWR_LVL ||
+				    max_retn_lvl == PSCI_INVALID_PWR_LVL))
+		return PSCI_E_INVALID_PARAMS;
+
+	return PSCI_E_SUCCESS;
+}
+
+/******************************************************************************
+ * This function finds the highest power level which will be powered down
+ * amongst all the power levels specified in the 'state_info' structure
+ *****************************************************************************/
+unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info)
+{
+	int i;
+
+	for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
+		if (is_local_state_off(state_info->pwr_domain_state[i]))
+			return i;
+	}
+
+	return PSCI_INVALID_PWR_LVL;
+}
+
+/******************************************************************************
+ * This functions finds the level of the highest power domain which will be
+ * placed in a low power state during a suspend operation.
+ *****************************************************************************/
+unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
+{
+	int i;
+
+	for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
+		if (!is_local_state_run(state_info->pwr_domain_state[i]))
+			return i;
+	}
+
+	return PSCI_INVALID_PWR_LVL;
+}
+
+/*******************************************************************************
+ * This function is passed a cpu_index and the highest level in the topology
+ * tree that the operation should be applied to. It picks up locks in order of
+ * increasing power domain level in the range specified.
+ ******************************************************************************/
+void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
+				   unsigned int cpu_idx)
+{
+	unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
+	unsigned int level;
+
+	/* No locking required for level 0. Hence start locking from level 1 */
+	for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) {
+		psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]);
+		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+	}
+}
+
+/*******************************************************************************
+ * This function is passed a cpu_index and the highest level in the topology
+ * tree that the operation should be applied to. It releases the locks in order
+ * of decreasing power domain level in the range specified.
+ ******************************************************************************/
+void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
+				   unsigned int cpu_idx)
+{
+	unsigned int parent_idx, parent_nodes[PLAT_MAX_PWR_LVL] = {0};
+	int level;
+
+	/* Get the parent nodes */
+	psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
+
+	/* Unlock top down. No unlocking required for level 0. */
+	for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1; level--) {
+		parent_idx = parent_nodes[level - 1];
+		psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]);
+	}
+}
+
+/*******************************************************************************
+ * Simple routine to determine whether a mpidr is valid or not.
+ ******************************************************************************/
+int psci_validate_mpidr(u_register_t mpidr)
+{
+	if (plat_core_pos_by_mpidr(mpidr) < 0)
+		return PSCI_E_INVALID_PARAMS;
+
+	return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * This function determines the full entrypoint information for the requested
+ * PSCI entrypoint on power on/resume and returns it.
+ ******************************************************************************/
+#ifdef AARCH32
+static int psci_get_ns_ep_info(entry_point_info_t *ep,
+			       uintptr_t entrypoint,
+			       u_register_t context_id)
+{
+	u_register_t ep_attr;
+	unsigned int aif, ee, mode;
+	u_register_t scr = read_scr();
+	u_register_t ns_sctlr, sctlr;
+
+	/* Switch to non secure state */
+	write_scr(scr | SCR_NS_BIT);
+	isb();
+	ns_sctlr = read_sctlr();
+
+	sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;
+
+	/* Return to original state */
+	write_scr(scr);
+	isb();
+	ee = 0;
+
+	ep_attr = NON_SECURE | EP_ST_DISABLE;
+	if (sctlr & SCTLR_EE_BIT) {
+		ep_attr |= EP_EE_BIG;
+		ee = 1;
+	}
+	SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
+
+	ep->pc = entrypoint;
+	zeromem(&ep->args, sizeof(ep->args));
+	ep->args.arg0 = context_id;
+
+	mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
+
+	/*
+	 * TODO: Choose async. exception bits if HYP mode is not
+	 * implemented according to the values of SCR.{AW, FW} bits
+	 */
+	aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;
+
+	ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);
+
+	return PSCI_E_SUCCESS;
+}
+
+#else
+static int psci_get_ns_ep_info(entry_point_info_t *ep,
+			       uintptr_t entrypoint,
+			       u_register_t context_id)
+{
+	u_register_t ep_attr, sctlr;
+	unsigned int daif, ee, mode;
+	u_register_t ns_scr_el3 = read_scr_el3();
+	u_register_t ns_sctlr_el1 = read_sctlr_el1();
+
+	sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1;
+	ee = 0;
+
+	ep_attr = NON_SECURE | EP_ST_DISABLE;
+	if (sctlr & SCTLR_EE_BIT) {
+		ep_attr |= EP_EE_BIG;
+		ee = 1;
+	}
+	SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
+
+	ep->pc = entrypoint;
+	zeromem(&ep->args, sizeof(ep->args));
+	ep->args.arg0 = context_id;
+
+	/*
+	 * Figure out whether the cpu enters the non-secure address space
+	 * in aarch32 or aarch64
+	 */
+	if (ns_scr_el3 & SCR_RW_BIT) {
+
+		/*
+		 * Check whether a Thumb entry point has been provided for an
+		 * aarch64 EL
+		 */
+		if (entrypoint & 0x1)
+			return PSCI_E_INVALID_ADDRESS;
+
+		mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1;
+
+		ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+	} else {
+
+		mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
+
+		/*
+		 * TODO: Choose async. exception bits if HYP mode is not
+		 * implemented according to the values of SCR.{AW, FW} bits
+		 */
+		daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
+
+		ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif);
+	}
+
+	return PSCI_E_SUCCESS;
+}
+#endif
+
+/*******************************************************************************
+ * This function validates the entrypoint with the platform layer if the
+ * appropriate pm_ops hook is exported by the platform and returns the
+ * 'entry_point_info'.
+ ******************************************************************************/
+int psci_validate_entry_point(entry_point_info_t *ep,
+			      uintptr_t entrypoint,
+			      u_register_t context_id)
+{
+	int rc;
+
+	/* Validate the entrypoint using platform psci_ops */
+	if (psci_plat_pm_ops->validate_ns_entrypoint) {
+		rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
+		if (rc != PSCI_E_SUCCESS)
+			return PSCI_E_INVALID_ADDRESS;
+	}
+
+	/*
+	 * Verify and derive the re-entry information for
+	 * the non-secure world from the non-secure state from
+	 * where this call originated.
+	 */
+	rc = psci_get_ns_ep_info(ep, entrypoint, context_id);
+	return rc;
+}
+
+/*******************************************************************************
+ * Generic handler which is called when a cpu is physically powered on. It
+ * traverses the node information and finds the highest power level powered
+ * off and performs generic, architectural, platform setup and state management
+ * to power on that power level and power levels below it.
+ * e.g. For a cpu that's been powered on, it will call the platform specific
+ * code to enable the gic cpu interface and for a cluster it will enable
+ * coherency at the interconnect level in addition to gic cpu interface.
+ ******************************************************************************/
+void psci_warmboot_entrypoint(void)
+{
+	unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos();
+	psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
+
+	/*
+	 * Verify that we have been explicitly turned ON or resumed from
+	 * suspend.
+	 */
+	if (psci_get_aff_info_state() == AFF_STATE_OFF) {
+		ERROR("Unexpected affinity info state");
+		panic();
+	}
+
+	/*
+	 * Get the maximum power domain level to traverse to after this cpu
+	 * has been physically powered up.
+	 */
+	end_pwrlvl = get_power_on_target_pwrlvl();
+
+	/*
+	 * This function acquires the lock corresponding to each power level so
+	 * that by the time all locks are taken, the system topology is snapshot
+	 * and state management can be done safely.
+	 */
+	psci_acquire_pwr_domain_locks(end_pwrlvl,
+				      cpu_idx);
+
+#if ENABLE_PSCI_STAT
+	plat_psci_stat_accounting_stop(&state_info);
+#endif
+
+	psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
+
+	/*
+	 * This CPU could be resuming from suspend or it could have just been
+	 * turned on. To distinguish between these 2 cases, we examine the
+	 * affinity state of the CPU:
+	 *  - If the affinity state is ON_PENDING then it has just been
+	 *    turned on.
+	 *  - Else it is resuming from suspend.
+	 *
+	 * Depending on the type of warm reset identified, choose the right set
+	 * of power management handler and perform the generic, architecture
+	 * and platform specific handling.
+	 */
+	if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING)
+		psci_cpu_on_finish(cpu_idx, &state_info);
+	else
+		psci_cpu_suspend_finish(cpu_idx, &state_info);
+
+	/*
+	 * Set the requested and target state of this CPU and all the higher
+	 * power domains which are ancestors of this CPU to run.
+	 */
+	psci_set_pwr_domains_to_run(end_pwrlvl);
+
+#if ENABLE_PSCI_STAT
+	/*
+	 * Update PSCI stats.
+	 * Caches are off when writing stats data on the power down path.
+	 * Since caches are now enabled, it's necessary to do cache
+	 * maintenance before reading that same data.
+	 */
+	psci_stats_update_pwr_up(end_pwrlvl, &state_info);
+#endif
+
+	/*
+	 * This loop releases the lock corresponding to each power level
+	 * in the reverse order to which they were acquired.
+	 */
+	psci_release_pwr_domain_locks(end_pwrlvl,
+				      cpu_idx);
+}
+
+/*******************************************************************************
+ * This function initializes the set of hooks that PSCI invokes as part of power
+ * management operation. The power management hooks are expected to be provided
+ * by the SPD, after it finishes all its initialization
+ ******************************************************************************/
+void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
+{
+	assert(pm);
+	psci_spd_pm = pm;
+
+	if (pm->svc_migrate)
+		psci_caps |= define_psci_cap(PSCI_MIG_AARCH64);
+
+	if (pm->svc_migrate_info)
+		psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64)
+				| define_psci_cap(PSCI_MIG_INFO_TYPE);
+}
+
+/*******************************************************************************
+ * This function invokes the migrate info hook in the spd_pm_ops. It performs
+ * the necessary return value validation. If the Secure Payload is UP and
+ * migrate capable, it returns the mpidr of the CPU on which the Secure payload
+ * is resident through the mpidr parameter. Else the value of the parameter on
+ * return is undefined.
+ ******************************************************************************/
+int psci_spd_migrate_info(u_register_t *mpidr)
+{
+	int rc;
+
+	if (!psci_spd_pm || !psci_spd_pm->svc_migrate_info)
+		return PSCI_E_NOT_SUPPORTED;
+
+	rc = psci_spd_pm->svc_migrate_info(mpidr);
+
+	assert(rc == PSCI_TOS_UP_MIG_CAP || rc == PSCI_TOS_NOT_UP_MIG_CAP \
+		|| rc == PSCI_TOS_NOT_PRESENT_MP || rc == PSCI_E_NOT_SUPPORTED);
+
+	return rc;
+}
+
+
+/*******************************************************************************
+ * This function prints the state of all power domains present in the
+ * system
+ ******************************************************************************/
+void psci_print_power_domain_map(void)
+{
+#if LOG_LEVEL >= LOG_LEVEL_INFO
+	unsigned int idx;
+	plat_local_state_t state;
+	plat_local_state_type_t state_type;
+
+	/* This array maps to the PSCI_STATE_X definitions in psci.h */
+	static const char * const psci_state_type_str[] = {
+		"ON",
+		"RETENTION",
+		"OFF",
+	};
+
+	INFO("PSCI Power Domain Map:\n");
+	for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - PLATFORM_CORE_COUNT);
+							idx++) {
+		state_type = find_local_state_type(
+				psci_non_cpu_pd_nodes[idx].local_state);
+		INFO("  Domain Node : Level %u, parent_node %d,"
+				" State %s (0x%x)\n",
+				psci_non_cpu_pd_nodes[idx].level,
+				psci_non_cpu_pd_nodes[idx].parent_node,
+				psci_state_type_str[state_type],
+				psci_non_cpu_pd_nodes[idx].local_state);
+	}
+
+	for (idx = 0; idx < PLATFORM_CORE_COUNT; idx++) {
+		state = psci_get_cpu_local_state_by_idx(idx);
+		state_type = find_local_state_type(state);
+		INFO("  CPU Node : MPID 0x%llx, parent_node %d,"
+				" State %s (0x%x)\n",
+				(unsigned long long)psci_cpu_pd_nodes[idx].mpidr,
+				psci_cpu_pd_nodes[idx].parent_node,
+				psci_state_type_str[state_type],
+				psci_get_cpu_local_state_by_idx(idx));
+	}
+#endif
+}
+
+/******************************************************************************
+ * Return whether any secondaries were powered up with CPU_ON call. A CPU that
+ * have ever been powered up would have set its MPDIR value to something other
+ * than PSCI_INVALID_MPIDR. Note that MPDIR isn't reset back to
+ * PSCI_INVALID_MPIDR when a CPU is powered down later, so the return value is
+ * meaningful only when called on the primary CPU during early boot.
+ *****************************************************************************/
+int psci_secondaries_brought_up(void)
+{
+	unsigned int idx, n_valid = 0;
+
+	for (idx = 0; idx < ARRAY_SIZE(psci_cpu_pd_nodes); idx++) {
+		if (psci_cpu_pd_nodes[idx].mpidr != PSCI_INVALID_MPIDR)
+			n_valid++;
+	}
+
+	assert(n_valid);
+
+	return (n_valid > 1);
+}
+
+#if ENABLE_PLAT_COMPAT
+/*******************************************************************************
+ * PSCI Compatibility helper function to return the 'power_state' parameter of
+ * the PSCI CPU SUSPEND request for the current CPU. Returns PSCI_INVALID_DATA
+ * if not invoked within CPU_SUSPEND for the current CPU.
+ ******************************************************************************/
+int psci_get_suspend_powerstate(void)
+{
+	/* Sanity check to verify that CPU is within CPU_SUSPEND */
+	if (psci_get_aff_info_state() == AFF_STATE_ON &&
+		!is_local_state_run(psci_get_cpu_local_state()))
+		return psci_power_state_compat[plat_my_core_pos()];
+
+	return PSCI_INVALID_DATA;
+}
+
+/*******************************************************************************
+ * PSCI Compatibility helper function to return the state id of the current
+ * cpu encoded in the 'power_state' parameter. Returns PSCI_INVALID_DATA
+ * if not invoked within CPU_SUSPEND for the current CPU.
+ ******************************************************************************/
+int psci_get_suspend_stateid(void)
+{
+	unsigned int power_state;
+	power_state = psci_get_suspend_powerstate();
+	if (power_state != PSCI_INVALID_DATA)
+		return psci_get_pstate_id(power_state);
+
+	return PSCI_INVALID_DATA;
+}
+
+/*******************************************************************************
+ * PSCI Compatibility helper function to return the state id encoded in the
+ * 'power_state' parameter of the CPU specified by 'mpidr'. Returns
+ * PSCI_INVALID_DATA if the CPU is not in CPU_SUSPEND.
+ ******************************************************************************/
+int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
+{
+	int cpu_idx = plat_core_pos_by_mpidr(mpidr);
+
+	if (cpu_idx == -1)
+		return PSCI_INVALID_DATA;
+
+	/* Sanity check to verify that the CPU is in CPU_SUSPEND */
+	if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_ON &&
+		!is_local_state_run(psci_get_cpu_local_state_by_idx(cpu_idx)))
+		return psci_get_pstate_id(psci_power_state_compat[cpu_idx]);
+
+	return PSCI_INVALID_DATA;
+}
+
+/*******************************************************************************
+ * This function returns highest affinity level which is in OFF
+ * state. The affinity instance with which the level is associated is
+ * determined by the caller.
+ ******************************************************************************/
+unsigned int psci_get_max_phys_off_afflvl(void)
+{
+	psci_power_state_t state_info;
+
+	zeromem(&state_info, sizeof(state_info));
+	psci_get_target_local_pwr_states(PLAT_MAX_PWR_LVL, &state_info);
+
+	return psci_find_target_suspend_lvl(&state_info);
+}
+
+/*******************************************************************************
+ * PSCI Compatibility helper function to return target affinity level requested
+ * for the CPU_SUSPEND. This function assumes affinity levels correspond to
+ * power domain levels on the platform.
+ ******************************************************************************/
+int psci_get_suspend_afflvl(void)
+{
+	return psci_get_suspend_pwrlvl();
+}
+
+#endif
+
+/*******************************************************************************
+ * Initiate power down sequence, by calling power down operations registered for
+ * this CPU.
+ ******************************************************************************/
+void psci_do_pwrdown_sequence(unsigned int power_level)
+{
+#if HW_ASSISTED_COHERENCY
+	/*
+	 * With hardware-assisted coherency, the CPU drivers only initiate the
+	 * power down sequence, without performing cache-maintenance operations
+	 * in software. Data caches and MMU remain enabled both before and after
+	 * this call.
+	 */
+	prepare_cpu_pwr_dwn(power_level);
+#else
+	/*
+	 * Without hardware-assisted coherency, the CPU drivers disable data
+	 * caches and MMU, then perform cache-maintenance operations in
+	 * software.
+	 *
+	 * We ought to call prepare_cpu_pwr_dwn() to initiate power down
+	 * sequence. We currently have data caches and MMU enabled, but the
+	 * function will return with data caches and MMU disabled. We must
+	 * ensure that the stack memory is flushed out to memory before we start
+	 * popping from it again.
+	 */
+	psci_do_pwrdown_cache_maintenance(power_level);
+#endif
+}
diff --git a/lib/psci/psci_lib.mk b/lib/psci/psci_lib.mk
new file mode 100644
index 0000000..1d4aac4
--- /dev/null
+++ b/lib/psci/psci_lib.mk
@@ -0,0 +1,35 @@
+#
+# Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PSCI_LIB_SOURCES	:=	lib/el3_runtime/cpu_data_array.c	\
+				lib/el3_runtime/${ARCH}/cpu_data.S	\
+				lib/el3_runtime/${ARCH}/context_mgmt.c	\
+				lib/cpus/${ARCH}/cpu_helpers.S		\
+				lib/cpus/errata_report.c		\
+				lib/locks/exclusive/${ARCH}/spinlock.S	\
+				lib/psci/psci_off.c			\
+				lib/psci/psci_on.c			\
+				lib/psci/psci_suspend.c			\
+				lib/psci/psci_common.c			\
+				lib/psci/psci_main.c			\
+				lib/psci/psci_setup.c			\
+				lib/psci/psci_system_off.c		\
+				lib/psci/psci_mem_protect.c		\
+				lib/psci/${ARCH}/psci_helpers.S
+
+ifeq (${ARCH}, aarch64)
+PSCI_LIB_SOURCES	+=	lib/el3_runtime/aarch64/context.S
+endif
+
+ifeq (${USE_COHERENT_MEM}, 1)
+PSCI_LIB_SOURCES		+=	lib/locks/bakery/bakery_lock_coherent.c
+else
+PSCI_LIB_SOURCES		+=	lib/locks/bakery/bakery_lock_normal.c
+endif
+
+ifeq (${ENABLE_PSCI_STAT}, 1)
+PSCI_LIB_SOURCES		+=	lib/psci/psci_stat.c
+endif
diff --git a/lib/psci/psci_main.c b/lib/psci/psci_main.c
new file mode 100644
index 0000000..4105e63
--- /dev/null
+++ b/lib/psci/psci_main.c
@@ -0,0 +1,471 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <platform.h>
+#include <pmf.h>
+#include <runtime_instr.h>
+#include <smcc.h>
+#include <string.h>
+#include "psci_private.h"
+
+/*******************************************************************************
+ * PSCI frontend api for servicing SMCs. Described in the PSCI spec.
+ ******************************************************************************/
+int psci_cpu_on(u_register_t target_cpu,
+		uintptr_t entrypoint,
+		u_register_t context_id)
+
+{
+	int rc;
+	entry_point_info_t ep;
+
+	/* Determine if the cpu exists of not */
+	rc = psci_validate_mpidr(target_cpu);
+	if (rc != PSCI_E_SUCCESS)
+		return PSCI_E_INVALID_PARAMS;
+
+	/* Validate the entry point and get the entry_point_info */
+	rc = psci_validate_entry_point(&ep, entrypoint, context_id);
+	if (rc != PSCI_E_SUCCESS)
+		return rc;
+
+	/*
+	 * To turn this cpu on, specify which power
+	 * levels need to be turned on
+	 */
+	return psci_cpu_on_start(target_cpu, &ep);
+}
+
+unsigned int psci_version(void)
+{
+	return PSCI_MAJOR_VER | PSCI_MINOR_VER;
+}
+
+int psci_cpu_suspend(unsigned int power_state,
+		     uintptr_t entrypoint,
+		     u_register_t context_id)
+{
+	int rc;
+	unsigned int target_pwrlvl, is_power_down_state;
+	entry_point_info_t ep;
+	psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
+	plat_local_state_t cpu_pd_state;
+
+	/* Validate the power_state parameter */
+	rc = psci_validate_power_state(power_state, &state_info);
+	if (rc != PSCI_E_SUCCESS) {
+		assert(rc == PSCI_E_INVALID_PARAMS);
+		return rc;
+	}
+
+	/*
+	 * Get the value of the state type bit from the power state parameter.
+	 */
+	is_power_down_state = psci_get_pstate_type(power_state);
+
+	/* Sanity check the requested suspend levels */
+	assert(psci_validate_suspend_req(&state_info, is_power_down_state)
+			== PSCI_E_SUCCESS);
+
+	target_pwrlvl = psci_find_target_suspend_lvl(&state_info);
+	if (target_pwrlvl == PSCI_INVALID_PWR_LVL) {
+		ERROR("Invalid target power level for suspend operation\n");
+		panic();
+	}
+
+	/* Fast path for CPU standby.*/
+	if (is_cpu_standby_req(is_power_down_state, target_pwrlvl)) {
+		if  (!psci_plat_pm_ops->cpu_standby)
+			return PSCI_E_INVALID_PARAMS;
+
+		/*
+		 * Set the state of the CPU power domain to the platform
+		 * specific retention state and enter the standby state.
+		 */
+		cpu_pd_state = state_info.pwr_domain_state[PSCI_CPU_PWR_LVL];
+		psci_set_cpu_local_state(cpu_pd_state);
+
+#if ENABLE_PSCI_STAT
+		plat_psci_stat_accounting_start(&state_info);
+#endif
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+		PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
+		    RT_INSTR_ENTER_HW_LOW_PWR,
+		    PMF_NO_CACHE_MAINT);
+#endif
+
+		psci_plat_pm_ops->cpu_standby(cpu_pd_state);
+
+		/* Upon exit from standby, set the state back to RUN. */
+		psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+		PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
+		    RT_INSTR_EXIT_HW_LOW_PWR,
+		    PMF_NO_CACHE_MAINT);
+#endif
+
+#if ENABLE_PSCI_STAT
+		plat_psci_stat_accounting_stop(&state_info);
+
+		/* Update PSCI stats */
+		psci_stats_update_pwr_up(PSCI_CPU_PWR_LVL, &state_info);
+#endif
+
+		return PSCI_E_SUCCESS;
+	}
+
+	/*
+	 * If a power down state has been requested, we need to verify entry
+	 * point and program entry information.
+	 */
+	if (is_power_down_state) {
+		rc = psci_validate_entry_point(&ep, entrypoint, context_id);
+		if (rc != PSCI_E_SUCCESS)
+			return rc;
+	}
+
+	/*
+	 * Do what is needed to enter the power down state. Upon success,
+	 * enter the final wfi which will power down this CPU. This function
+	 * might return if the power down was abandoned for any reason, e.g.
+	 * arrival of an interrupt
+	 */
+	psci_cpu_suspend_start(&ep,
+			    target_pwrlvl,
+			    &state_info,
+			    is_power_down_state);
+
+	return PSCI_E_SUCCESS;
+}
+
+
+int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id)
+{
+	int rc;
+	psci_power_state_t state_info;
+	entry_point_info_t ep;
+
+	/* Check if the current CPU is the last ON CPU in the system */
+	if (!psci_is_last_on_cpu())
+		return PSCI_E_DENIED;
+
+	/* Validate the entry point and get the entry_point_info */
+	rc = psci_validate_entry_point(&ep, entrypoint, context_id);
+	if (rc != PSCI_E_SUCCESS)
+		return rc;
+
+	/* Query the psci_power_state for system suspend */
+	psci_query_sys_suspend_pwrstate(&state_info);
+
+	/* Ensure that the psci_power_state makes sense */
+	assert(psci_find_target_suspend_lvl(&state_info) == PLAT_MAX_PWR_LVL);
+	assert(psci_validate_suspend_req(&state_info, PSTATE_TYPE_POWERDOWN)
+						== PSCI_E_SUCCESS);
+	assert(is_local_state_off(state_info.pwr_domain_state[PLAT_MAX_PWR_LVL]));
+
+	/*
+	 * Do what is needed to enter the system suspend state. This function
+	 * might return if the power down was abandoned for any reason, e.g.
+	 * arrival of an interrupt
+	 */
+	psci_cpu_suspend_start(&ep,
+			    PLAT_MAX_PWR_LVL,
+			    &state_info,
+			    PSTATE_TYPE_POWERDOWN);
+
+	return PSCI_E_SUCCESS;
+}
+
+int psci_cpu_off(void)
+{
+	int rc;
+	unsigned int target_pwrlvl = PLAT_MAX_PWR_LVL;
+
+	/*
+	 * Do what is needed to power off this CPU and possible higher power
+	 * levels if it able to do so. Upon success, enter the final wfi
+	 * which will power down this CPU.
+	 */
+	rc = psci_do_cpu_off(target_pwrlvl);
+
+	/*
+	 * The only error cpu_off can return is E_DENIED. So check if that's
+	 * indeed the case.
+	 */
+	assert(rc == PSCI_E_DENIED);
+
+	return rc;
+}
+
+int psci_affinity_info(u_register_t target_affinity,
+		       unsigned int lowest_affinity_level)
+{
+	int target_idx;
+
+	/* We dont support level higher than PSCI_CPU_PWR_LVL */
+	if (lowest_affinity_level > PSCI_CPU_PWR_LVL)
+		return PSCI_E_INVALID_PARAMS;
+
+	/* Calculate the cpu index of the target */
+	target_idx = plat_core_pos_by_mpidr(target_affinity);
+	if (target_idx == -1)
+		return PSCI_E_INVALID_PARAMS;
+
+	return psci_get_aff_info_state_by_idx(target_idx);
+}
+
+int psci_migrate(u_register_t target_cpu)
+{
+	int rc;
+	u_register_t resident_cpu_mpidr;
+
+	rc = psci_spd_migrate_info(&resident_cpu_mpidr);
+	if (rc != PSCI_TOS_UP_MIG_CAP)
+		return (rc == PSCI_TOS_NOT_UP_MIG_CAP) ?
+			  PSCI_E_DENIED : PSCI_E_NOT_SUPPORTED;
+
+	/*
+	 * Migrate should only be invoked on the CPU where
+	 * the Secure OS is resident.
+	 */
+	if (resident_cpu_mpidr != read_mpidr_el1())
+		return PSCI_E_NOT_PRESENT;
+
+	/* Check the validity of the specified target cpu */
+	rc = psci_validate_mpidr(target_cpu);
+	if (rc != PSCI_E_SUCCESS)
+		return PSCI_E_INVALID_PARAMS;
+
+	assert(psci_spd_pm && psci_spd_pm->svc_migrate);
+
+	rc = psci_spd_pm->svc_migrate(read_mpidr_el1(), target_cpu);
+	assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
+
+	return rc;
+}
+
+int psci_migrate_info_type(void)
+{
+	u_register_t resident_cpu_mpidr;
+
+	return psci_spd_migrate_info(&resident_cpu_mpidr);
+}
+
+long psci_migrate_info_up_cpu(void)
+{
+	u_register_t resident_cpu_mpidr;
+	int rc;
+
+	/*
+	 * Return value of this depends upon what
+	 * psci_spd_migrate_info() returns.
+	 */
+	rc = psci_spd_migrate_info(&resident_cpu_mpidr);
+	if (rc != PSCI_TOS_NOT_UP_MIG_CAP && rc != PSCI_TOS_UP_MIG_CAP)
+		return PSCI_E_INVALID_PARAMS;
+
+	return resident_cpu_mpidr;
+}
+
+int psci_node_hw_state(u_register_t target_cpu,
+		       unsigned int power_level)
+{
+	int rc;
+
+	/* Validate target_cpu */
+	rc = psci_validate_mpidr(target_cpu);
+	if (rc != PSCI_E_SUCCESS)
+		return PSCI_E_INVALID_PARAMS;
+
+	/* Validate power_level against PLAT_MAX_PWR_LVL */
+	if (power_level > PLAT_MAX_PWR_LVL)
+		return PSCI_E_INVALID_PARAMS;
+
+	/*
+	 * Dispatch this call to platform to query power controller, and pass on
+	 * to the caller what it returns
+	 */
+	assert(psci_plat_pm_ops->get_node_hw_state);
+	rc = psci_plat_pm_ops->get_node_hw_state(target_cpu, power_level);
+	assert((rc >= HW_ON && rc <= HW_STANDBY) || rc == PSCI_E_NOT_SUPPORTED
+			|| rc == PSCI_E_INVALID_PARAMS);
+	return rc;
+}
+
+int psci_features(unsigned int psci_fid)
+{
+	unsigned int local_caps = psci_caps;
+
+	/* Check if it is a 64 bit function */
+	if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64)
+		local_caps &= PSCI_CAP_64BIT_MASK;
+
+	/* Check for invalid fid */
+	if (!(is_std_svc_call(psci_fid) && is_valid_fast_smc(psci_fid)
+			&& is_psci_fid(psci_fid)))
+		return PSCI_E_NOT_SUPPORTED;
+
+
+	/* Check if the psci fid is supported or not */
+	if (!(local_caps & define_psci_cap(psci_fid)))
+		return PSCI_E_NOT_SUPPORTED;
+
+	/* Format the feature flags */
+	if (psci_fid == PSCI_CPU_SUSPEND_AARCH32 ||
+			psci_fid == PSCI_CPU_SUSPEND_AARCH64) {
+		/*
+		 * The trusted firmware does not support OS Initiated Mode.
+		 */
+		return (FF_PSTATE << FF_PSTATE_SHIFT) |
+			((!FF_SUPPORTS_OS_INIT_MODE) << FF_MODE_SUPPORT_SHIFT);
+	}
+
+	/* Return 0 for all other fid's */
+	return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * PSCI top level handler for servicing SMCs.
+ ******************************************************************************/
+u_register_t psci_smc_handler(uint32_t smc_fid,
+			  u_register_t x1,
+			  u_register_t x2,
+			  u_register_t x3,
+			  u_register_t x4,
+			  void *cookie,
+			  void *handle,
+			  u_register_t flags)
+{
+	if (is_caller_secure(flags))
+		return SMC_UNK;
+
+	/* Check the fid against the capabilities */
+	if (!(psci_caps & define_psci_cap(smc_fid)))
+		return SMC_UNK;
+
+	if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
+		/* 32-bit PSCI function, clear top parameter bits */
+
+		x1 = (uint32_t)x1;
+		x2 = (uint32_t)x2;
+		x3 = (uint32_t)x3;
+
+		switch (smc_fid) {
+		case PSCI_VERSION:
+			return psci_version();
+
+		case PSCI_CPU_OFF:
+			return psci_cpu_off();
+
+		case PSCI_CPU_SUSPEND_AARCH32:
+			return psci_cpu_suspend(x1, x2, x3);
+
+		case PSCI_CPU_ON_AARCH32:
+			return psci_cpu_on(x1, x2, x3);
+
+		case PSCI_AFFINITY_INFO_AARCH32:
+			return psci_affinity_info(x1, x2);
+
+		case PSCI_MIG_AARCH32:
+			return psci_migrate(x1);
+
+		case PSCI_MIG_INFO_TYPE:
+			return psci_migrate_info_type();
+
+		case PSCI_MIG_INFO_UP_CPU_AARCH32:
+			return psci_migrate_info_up_cpu();
+
+		case PSCI_NODE_HW_STATE_AARCH32:
+			return psci_node_hw_state(x1, x2);
+
+		case PSCI_SYSTEM_SUSPEND_AARCH32:
+			return psci_system_suspend(x1, x2);
+
+		case PSCI_SYSTEM_OFF:
+			psci_system_off();
+			/* We should never return from psci_system_off() */
+
+		case PSCI_SYSTEM_RESET:
+			psci_system_reset();
+			/* We should never return from psci_system_reset() */
+
+		case PSCI_FEATURES:
+			return psci_features(x1);
+
+#if ENABLE_PSCI_STAT
+		case PSCI_STAT_RESIDENCY_AARCH32:
+			return psci_stat_residency(x1, x2);
+
+		case PSCI_STAT_COUNT_AARCH32:
+			return psci_stat_count(x1, x2);
+#endif
+		case PSCI_MEM_PROTECT:
+			return psci_mem_protect(x1);
+
+		case PSCI_MEM_CHK_RANGE_AARCH32:
+			return psci_mem_chk_range(x1, x2);
+
+		case PSCI_SYSTEM_RESET2_AARCH32:
+			/* We should never return from psci_system_reset2() */
+			return psci_system_reset2(x1, x2);
+
+		default:
+			break;
+		}
+	} else {
+		/* 64-bit PSCI function */
+
+		switch (smc_fid) {
+		case PSCI_CPU_SUSPEND_AARCH64:
+			return psci_cpu_suspend(x1, x2, x3);
+
+		case PSCI_CPU_ON_AARCH64:
+			return psci_cpu_on(x1, x2, x3);
+
+		case PSCI_AFFINITY_INFO_AARCH64:
+			return psci_affinity_info(x1, x2);
+
+		case PSCI_MIG_AARCH64:
+			return psci_migrate(x1);
+
+		case PSCI_MIG_INFO_UP_CPU_AARCH64:
+			return psci_migrate_info_up_cpu();
+
+		case PSCI_NODE_HW_STATE_AARCH64:
+			return psci_node_hw_state(x1, x2);
+
+		case PSCI_SYSTEM_SUSPEND_AARCH64:
+			return psci_system_suspend(x1, x2);
+
+#if ENABLE_PSCI_STAT
+		case PSCI_STAT_RESIDENCY_AARCH64:
+			return psci_stat_residency(x1, x2);
+
+		case PSCI_STAT_COUNT_AARCH64:
+			return psci_stat_count(x1, x2);
+#endif
+
+		case PSCI_MEM_CHK_RANGE_AARCH64:
+			return psci_mem_chk_range(x1, x2);
+
+		case PSCI_SYSTEM_RESET2_AARCH64:
+			/* We should never return from psci_system_reset2() */
+			return psci_system_reset2(x1, x2);
+
+		default:
+			break;
+		}
+	}
+
+	WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid);
+	return SMC_UNK;
+}
diff --git a/lib/psci/psci_mem_protect.c b/lib/psci/psci_mem_protect.c
new file mode 100644
index 0000000..fca84e9
--- /dev/null
+++ b/lib/psci/psci_mem_protect.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <limits.h>
+#include <utils.h>
+#include "psci_private.h"
+
+int psci_mem_protect(unsigned int enable)
+{
+	int val;
+
+	assert(psci_plat_pm_ops->read_mem_protect);
+	assert(psci_plat_pm_ops->write_mem_protect);
+
+	if (psci_plat_pm_ops->read_mem_protect(&val) < 0)
+		return PSCI_E_NOT_SUPPORTED;
+	if (psci_plat_pm_ops->write_mem_protect(enable) < 0)
+		return PSCI_E_NOT_SUPPORTED;
+
+	return val != 0;
+}
+
+int psci_mem_chk_range(uintptr_t base, u_register_t length)
+{
+	int ret;
+
+	assert(psci_plat_pm_ops->mem_protect_chk);
+
+	if (length == 0 || check_uptr_overflow(base, length-1))
+		return PSCI_E_DENIED;
+
+	ret = psci_plat_pm_ops->mem_protect_chk(base, length);
+	return (ret < 0) ? PSCI_E_DENIED : PSCI_E_SUCCESS;
+}
diff --git a/lib/psci/psci_off.c b/lib/psci/psci_off.c
new file mode 100644
index 0000000..231deea
--- /dev/null
+++ b/lib/psci/psci_off.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <platform.h>
+#include <pmf.h>
+#include <runtime_instr.h>
+#include <string.h>
+#include "psci_private.h"
+
+/******************************************************************************
+ * Construct the psci_power_state to request power OFF at all power levels.
+ ******************************************************************************/
+static void psci_set_power_off_state(psci_power_state_t *state_info)
+{
+	unsigned int lvl;
+
+	for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++)
+		state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE;
+}
+
+/******************************************************************************
+ * Top level handler which is called when a cpu wants to power itself down.
+ * It's assumed that along with turning the cpu power domain off, power
+ * domains at higher levels will be turned off as far as possible. It finds
+ * the highest level where a domain has to be powered off by traversing the
+ * node information and then performs generic, architectural, platform setup
+ * and state management required to turn OFF that power domain and domains
+ * below it. e.g. For a cpu that's to be powered OFF, it could mean programming
+ * the power controller whereas for a cluster that's to be powered off, it will
+ * call the platform specific code which will disable coherency at the
+ * interconnect level if the cpu is the last in the cluster and also the
+ * program the power controller.
+ ******************************************************************************/
+int psci_do_cpu_off(unsigned int end_pwrlvl)
+{
+	int rc = PSCI_E_SUCCESS, idx = plat_my_core_pos();
+	psci_power_state_t state_info;
+
+	/*
+	 * This function must only be called on platforms where the
+	 * CPU_OFF platform hooks have been implemented.
+	 */
+	assert(psci_plat_pm_ops->pwr_domain_off);
+
+	/* Construct the psci_power_state for CPU_OFF */
+	psci_set_power_off_state(&state_info);
+
+	/*
+	 * This function acquires the lock corresponding to each power
+	 * level so that by the time all locks are taken, the system topology
+	 * is snapshot and state management can be done safely.
+	 */
+	psci_acquire_pwr_domain_locks(end_pwrlvl,
+				      idx);
+
+	/*
+	 * Call the cpu off handler registered by the Secure Payload Dispatcher
+	 * to let it do any bookkeeping. Assume that the SPD always reports an
+	 * E_DENIED error if SP refuse to power down
+	 */
+	if (psci_spd_pm && psci_spd_pm->svc_off) {
+		rc = psci_spd_pm->svc_off(0);
+		if (rc)
+			goto exit;
+	}
+
+	/*
+	 * This function is passed the requested state info and
+	 * it returns the negotiated state info for each power level upto
+	 * the end level specified.
+	 */
+	psci_do_state_coordination(end_pwrlvl, &state_info);
+
+#if ENABLE_PSCI_STAT
+	/* Update the last cpu for each level till end_pwrlvl */
+	psci_stats_update_pwr_down(end_pwrlvl, &state_info);
+#endif
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+
+	/*
+	 * Flush cache line so that even if CPU power down happens
+	 * the timestamp update is reflected in memory.
+	 */
+	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
+		RT_INSTR_ENTER_CFLUSH,
+		PMF_CACHE_MAINT);
+#endif
+
+	/*
+	 * Arch. management. Initiate power down sequence.
+	 */
+	psci_do_pwrdown_sequence(psci_find_max_off_lvl(&state_info));
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
+		RT_INSTR_EXIT_CFLUSH,
+		PMF_NO_CACHE_MAINT);
+#endif
+
+	/*
+	 * Plat. management: Perform platform specific actions to turn this
+	 * cpu off e.g. exit cpu coherency, program the power controller etc.
+	 */
+	psci_plat_pm_ops->pwr_domain_off(&state_info);
+
+#if ENABLE_PSCI_STAT
+	plat_psci_stat_accounting_start(&state_info);
+#endif
+
+exit:
+	/*
+	 * Release the locks corresponding to each power level in the
+	 * reverse order to which they were acquired.
+	 */
+	psci_release_pwr_domain_locks(end_pwrlvl,
+				      idx);
+
+	/*
+	 * Check if all actions needed to safely power down this cpu have
+	 * successfully completed.
+	 */
+	if (rc == PSCI_E_SUCCESS) {
+		/*
+		 * Set the affinity info state to OFF. When caches are disabled,
+		 * this writes directly to main memory, so cache maintenance is
+		 * required to ensure that later cached reads of aff_info_state
+		 * return AFF_STATE_OFF. A dsbish() ensures ordering of the
+		 * update to the affinity info state prior to cache line
+		 * invalidation.
+		 */
+		psci_flush_cpu_data(psci_svc_cpu_data.aff_info_state);
+		psci_set_aff_info_state(AFF_STATE_OFF);
+		psci_dsbish();
+		psci_inv_cpu_data(psci_svc_cpu_data.aff_info_state);
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+
+		/*
+		 * Update the timestamp with cache off.  We assume this
+		 * timestamp can only be read from the current CPU and the
+		 * timestamp cache line will be flushed before return to
+		 * normal world on wakeup.
+		 */
+		PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
+		    RT_INSTR_ENTER_HW_LOW_PWR,
+		    PMF_NO_CACHE_MAINT);
+#endif
+
+		if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi) {
+			/* This function must not return */
+			psci_plat_pm_ops->pwr_domain_pwr_down_wfi(&state_info);
+		} else {
+			/*
+			 * Enter a wfi loop which will allow the power
+			 * controller to physically power down this cpu.
+			 */
+			psci_power_down_wfi();
+		}
+	}
+
+	return rc;
+}
diff --git a/lib/psci/psci_on.c b/lib/psci/psci_on.c
new file mode 100644
index 0000000..53b044e
--- /dev/null
+++ b/lib/psci/psci_on.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <platform.h>
+#include <pubsub_events.h>
+#include <stddef.h>
+#include "psci_private.h"
+
+/*******************************************************************************
+ * This function checks whether a cpu which has been requested to be turned on
+ * is OFF to begin with.
+ ******************************************************************************/
+static int cpu_on_validate_state(aff_info_state_t aff_state)
+{
+	if (aff_state == AFF_STATE_ON)
+		return PSCI_E_ALREADY_ON;
+
+	if (aff_state == AFF_STATE_ON_PENDING)
+		return PSCI_E_ON_PENDING;
+
+	assert(aff_state == AFF_STATE_OFF);
+	return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * Generic handler which is called to physically power on a cpu identified by
+ * its mpidr. It performs the generic, architectural, platform setup and state
+ * management to power on the target cpu e.g. it will ensure that
+ * enough information is stashed for it to resume execution in the non-secure
+ * security state.
+ *
+ * The state of all the relevant power domains are changed after calling the
+ * platform handler as it can return error.
+ ******************************************************************************/
+int psci_cpu_on_start(u_register_t target_cpu,
+		      entry_point_info_t *ep)
+{
+	int rc;
+	unsigned int target_idx = plat_core_pos_by_mpidr(target_cpu);
+	aff_info_state_t target_aff_state;
+
+	/* Calling function must supply valid input arguments */
+	assert((int) target_idx >= 0);
+	assert(ep != NULL);
+
+	/*
+	 * This function must only be called on platforms where the
+	 * CPU_ON platform hooks have been implemented.
+	 */
+	assert(psci_plat_pm_ops->pwr_domain_on &&
+			psci_plat_pm_ops->pwr_domain_on_finish);
+
+	/* Protect against multiple CPUs trying to turn ON the same target CPU */
+	psci_spin_lock_cpu(target_idx);
+
+	/*
+	 * Generic management: Ensure that the cpu is off to be
+	 * turned on.
+	 * Perform cache maintanence ahead of reading the target CPU state to
+	 * ensure that the data is not stale.
+	 * There is a theoretical edge case where the cache may contain stale
+	 * data for the target CPU data - this can occur under the following
+	 * conditions:
+	 * - the target CPU is in another cluster from the current
+	 * - the target CPU was the last CPU to shutdown on its cluster
+	 * - the cluster was removed from coherency as part of the CPU shutdown
+	 *
+	 * In this case the cache maintenace that was performed as part of the
+	 * target CPUs shutdown was not seen by the current CPU's cluster. And
+	 * so the cache may contain stale data for the target CPU.
+	 */
+	flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
+	rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx));
+	if (rc != PSCI_E_SUCCESS)
+		goto exit;
+
+	/*
+	 * Call the cpu on handler registered by the Secure Payload Dispatcher
+	 * to let it do any bookeeping. If the handler encounters an error, it's
+	 * expected to assert within
+	 */
+	if (psci_spd_pm && psci_spd_pm->svc_on)
+		psci_spd_pm->svc_on(target_cpu);
+
+	/*
+	 * Set the Affinity info state of the target cpu to ON_PENDING.
+	 * Flush aff_info_state as it will be accessed with caches
+	 * turned OFF.
+	 */
+	psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
+	flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
+
+	/*
+	 * The cache line invalidation by the target CPU after setting the
+	 * state to OFF (see psci_do_cpu_off()), could cause the update to
+	 * aff_info_state to be invalidated. Retry the update if the target
+	 * CPU aff_info_state is not ON_PENDING.
+	 */
+	target_aff_state = psci_get_aff_info_state_by_idx(target_idx);
+	if (target_aff_state != AFF_STATE_ON_PENDING) {
+		assert(target_aff_state == AFF_STATE_OFF);
+		psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
+		flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
+
+		assert(psci_get_aff_info_state_by_idx(target_idx) == AFF_STATE_ON_PENDING);
+	}
+
+	/*
+	 * Perform generic, architecture and platform specific handling.
+	 */
+	/*
+	 * Plat. management: Give the platform the current state
+	 * of the target cpu to allow it to perform the necessary
+	 * steps to power on.
+	 */
+	rc = psci_plat_pm_ops->pwr_domain_on(target_cpu);
+	assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
+
+	if (rc == PSCI_E_SUCCESS)
+		/* Store the re-entry information for the non-secure world. */
+		cm_init_context_by_index(target_idx, ep);
+	else {
+		/* Restore the state on error. */
+		psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF);
+		flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
+	}
+
+exit:
+	psci_spin_unlock_cpu(target_idx);
+	return rc;
+}
+
+/*******************************************************************************
+ * The following function finish an earlier power on request. They
+ * are called by the common finisher routine in psci_common.c. The `state_info`
+ * is the psci_power_state from which this CPU has woken up from.
+ ******************************************************************************/
+void psci_cpu_on_finish(unsigned int cpu_idx,
+			psci_power_state_t *state_info)
+{
+	/*
+	 * Plat. management: Perform the platform specific actions
+	 * for this cpu e.g. enabling the gic or zeroing the mailbox
+	 * register. The actual state of this cpu has already been
+	 * changed.
+	 */
+	psci_plat_pm_ops->pwr_domain_on_finish(state_info);
+
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+	/*
+	 * Arch. management: Enable data cache and manage stack memory
+	 */
+	psci_do_pwrup_cache_maintenance();
+#endif
+
+	/*
+	 * All the platform specific actions for turning this cpu
+	 * on have completed. Perform enough arch.initialization
+	 * to run in the non-secure address space.
+	 */
+	psci_arch_setup();
+
+	/*
+	 * Lock the CPU spin lock to make sure that the context initialization
+	 * is done. Since the lock is only used in this function to create
+	 * a synchronization point with cpu_on_start(), it can be released
+	 * immediately.
+	 */
+	psci_spin_lock_cpu(cpu_idx);
+	psci_spin_unlock_cpu(cpu_idx);
+
+	/* Ensure we have been explicitly woken up by another cpu */
+	assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING);
+
+	/*
+	 * Call the cpu on finish handler registered by the Secure Payload
+	 * Dispatcher to let it do any bookeeping. If the handler encounters an
+	 * error, it's expected to assert within
+	 */
+	if (psci_spd_pm && psci_spd_pm->svc_on_finish)
+		psci_spd_pm->svc_on_finish(0);
+
+	PUBLISH_EVENT(psci_cpu_on_finish);
+
+	/* Populate the mpidr field within the cpu node array */
+	/* This needs to be done only once */
+	psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK;
+
+	/*
+	 * Generic management: Now we just need to retrieve the
+	 * information that we had stashed away during the cpu_on
+	 * call to set this cpu on its way.
+	 */
+	cm_prepare_el3_exit(NON_SECURE);
+}
diff --git a/lib/psci/psci_private.h b/lib/psci/psci_private.h
new file mode 100644
index 0000000..504fb9e
--- /dev/null
+++ b/lib/psci/psci_private.h
@@ -0,0 +1,279 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PSCI_PRIVATE_H__
+#define __PSCI_PRIVATE_H__
+
+#include <arch.h>
+#include <bakery_lock.h>
+#include <bl_common.h>
+#include <cpu_data.h>
+#include <psci.h>
+#include <spinlock.h>
+
+#if HW_ASSISTED_COHERENCY
+
+/*
+ * On systems with hardware-assisted coherency, make PSCI cache operations NOP,
+ * as PSCI participants are cache-coherent, and there's no need for explicit
+ * cache maintenance operations or barriers to coordinate their state.
+ */
+#define psci_flush_dcache_range(addr, size)
+#define psci_flush_cpu_data(member)
+#define psci_inv_cpu_data(member)
+
+#define psci_dsbish()
+
+/*
+ * On systems where participant CPUs are cache-coherent, we can use spinlocks
+ * instead of bakery locks.
+ */
+#define DEFINE_PSCI_LOCK(_name)		spinlock_t _name
+#define DECLARE_PSCI_LOCK(_name)	extern DEFINE_PSCI_LOCK(_name)
+
+#define psci_lock_get(non_cpu_pd_node)				\
+	spin_lock(&psci_locks[(non_cpu_pd_node)->lock_index])
+#define psci_lock_release(non_cpu_pd_node)			\
+	spin_unlock(&psci_locks[(non_cpu_pd_node)->lock_index])
+
+#else
+
+/*
+ * If not all PSCI participants are cache-coherent, perform cache maintenance
+ * and issue barriers wherever required to coordinate state.
+ */
+#define psci_flush_dcache_range(addr, size)	flush_dcache_range(addr, size)
+#define psci_flush_cpu_data(member)		flush_cpu_data(member)
+#define psci_inv_cpu_data(member)		inv_cpu_data(member)
+
+#define psci_dsbish()				dsbish()
+
+/*
+ * Use bakery locks for state coordination as not all PSCI participants are
+ * cache coherent.
+ */
+#define DEFINE_PSCI_LOCK(_name)		DEFINE_BAKERY_LOCK(_name)
+#define DECLARE_PSCI_LOCK(_name)	DECLARE_BAKERY_LOCK(_name)
+
+#define psci_lock_get(non_cpu_pd_node)				\
+	bakery_lock_get(&psci_locks[(non_cpu_pd_node)->lock_index])
+#define psci_lock_release(non_cpu_pd_node)			\
+	bakery_lock_release(&psci_locks[(non_cpu_pd_node)->lock_index])
+
+#endif
+
+#define psci_lock_init(non_cpu_pd_node, idx)			\
+	((non_cpu_pd_node)[(idx)].lock_index = (idx))
+
+/*
+ * The PSCI capability which are provided by the generic code but does not
+ * depend on the platform or spd capabilities.
+ */
+#define PSCI_GENERIC_CAP	\
+			(define_psci_cap(PSCI_VERSION) |		\
+			define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) |	\
+			define_psci_cap(PSCI_FEATURES))
+
+/*
+ * The PSCI capabilities mask for 64 bit functions.
+ */
+#define PSCI_CAP_64BIT_MASK	\
+			(define_psci_cap(PSCI_CPU_SUSPEND_AARCH64) |	\
+			define_psci_cap(PSCI_CPU_ON_AARCH64) |		\
+			define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) |	\
+			define_psci_cap(PSCI_MIG_AARCH64) |		\
+			define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) |	\
+			define_psci_cap(PSCI_NODE_HW_STATE_AARCH64) |	\
+			define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64) |	\
+			define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64) |	\
+			define_psci_cap(PSCI_STAT_COUNT_AARCH64) |	\
+			define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64) |	\
+			define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64))
+
+/*
+ * Helper macros to get/set the fields of PSCI per-cpu data.
+ */
+#define psci_set_aff_info_state(aff_state) \
+		set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state)
+#define psci_get_aff_info_state() \
+		get_cpu_data(psci_svc_cpu_data.aff_info_state)
+#define psci_get_aff_info_state_by_idx(idx) \
+		get_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state)
+#define psci_set_aff_info_state_by_idx(idx, aff_state) \
+		set_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state,\
+					aff_state)
+#define psci_get_suspend_pwrlvl() \
+		get_cpu_data(psci_svc_cpu_data.target_pwrlvl)
+#define psci_set_suspend_pwrlvl(target_lvl) \
+		set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl)
+#define psci_set_cpu_local_state(state) \
+		set_cpu_data(psci_svc_cpu_data.local_state, state)
+#define psci_get_cpu_local_state() \
+		get_cpu_data(psci_svc_cpu_data.local_state)
+#define psci_get_cpu_local_state_by_idx(idx) \
+		get_cpu_data_by_index(idx, psci_svc_cpu_data.local_state)
+
+/*
+ * Helper macros for the CPU level spinlocks
+ */
+#define psci_spin_lock_cpu(idx)	spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock)
+#define psci_spin_unlock_cpu(idx) spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock)
+
+/* Helper macro to identify a CPU standby request in PSCI Suspend call */
+#define is_cpu_standby_req(is_power_down_state, retn_lvl) \
+		(((!(is_power_down_state)) && ((retn_lvl) == 0)) ? 1 : 0)
+
+/*******************************************************************************
+ * The following two data structures implement the power domain tree. The tree
+ * is used to track the state of all the nodes i.e. power domain instances
+ * described by the platform. The tree consists of nodes that describe CPU power
+ * domains i.e. leaf nodes and all other power domains which are parents of a
+ * CPU power domain i.e. non-leaf nodes.
+ ******************************************************************************/
+typedef struct non_cpu_pwr_domain_node {
+	/*
+	 * Index of the first CPU power domain node level 0 which has this node
+	 * as its parent.
+	 */
+	unsigned int cpu_start_idx;
+
+	/*
+	 * Number of CPU power domains which are siblings of the domain indexed
+	 * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx
+	 * -> cpu_start_idx + ncpus' have this node as their parent.
+	 */
+	unsigned int ncpus;
+
+	/*
+	 * Index of the parent power domain node.
+	 * TODO: Figure out whether to whether using pointer is more efficient.
+	 */
+	unsigned int parent_node;
+
+	plat_local_state_t local_state;
+
+	unsigned char level;
+
+	/* For indexing the psci_lock array*/
+	unsigned char lock_index;
+} non_cpu_pd_node_t;
+
+typedef struct cpu_pwr_domain_node {
+	u_register_t mpidr;
+
+	/*
+	 * Index of the parent power domain node.
+	 * TODO: Figure out whether to whether using pointer is more efficient.
+	 */
+	unsigned int parent_node;
+
+	/*
+	 * A CPU power domain does not require state coordination like its
+	 * parent power domains. Hence this node does not include a bakery
+	 * lock. A spinlock is required by the CPU_ON handler to prevent a race
+	 * when multiple CPUs try to turn ON the same target CPU.
+	 */
+	spinlock_t cpu_lock;
+} cpu_pd_node_t;
+
+/*******************************************************************************
+ * Data prototypes
+ ******************************************************************************/
+extern const plat_psci_ops_t *psci_plat_pm_ops;
+extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
+extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
+extern unsigned int psci_caps;
+
+/* One lock is required per non-CPU power domain node */
+DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
+
+/*******************************************************************************
+ * SPD's power management hooks registered with PSCI
+ ******************************************************************************/
+extern const spd_pm_ops_t *psci_spd_pm;
+
+/*******************************************************************************
+ * Function prototypes
+ ******************************************************************************/
+/* Private exported functions from psci_common.c */
+int psci_validate_power_state(unsigned int power_state,
+			      psci_power_state_t *state_info);
+void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info);
+int psci_validate_mpidr(u_register_t mpidr);
+void psci_init_req_local_pwr_states(void);
+void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
+				      psci_power_state_t *target_state);
+int psci_validate_entry_point(entry_point_info_t *ep,
+			uintptr_t entrypoint, u_register_t context_id);
+void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
+				      unsigned int end_lvl,
+				      unsigned int node_index[]);
+void psci_do_state_coordination(unsigned int end_pwrlvl,
+				psci_power_state_t *state_info);
+void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
+				   unsigned int cpu_idx);
+void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
+				   unsigned int cpu_idx);
+int psci_validate_suspend_req(const psci_power_state_t *state_info,
+			      unsigned int is_power_down_state_req);
+unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);
+unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info);
+void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl);
+void psci_print_power_domain_map(void);
+unsigned int psci_is_last_on_cpu(void);
+int psci_spd_migrate_info(u_register_t *mpidr);
+void psci_do_pwrdown_sequence(unsigned int power_level);
+
+/*
+ * CPU power down is directly called only when HW_ASSISTED_COHERENCY is
+ * available. Otherwise, this needs post-call stack maintenance, which is
+ * handled in assembly.
+ */
+void prepare_cpu_pwr_dwn(unsigned int power_level);
+
+/* Private exported functions from psci_on.c */
+int psci_cpu_on_start(u_register_t target_cpu,
+		      entry_point_info_t *ep);
+
+void psci_cpu_on_finish(unsigned int cpu_idx,
+			psci_power_state_t *state_info);
+
+/* Private exported functions from psci_off.c */
+int psci_do_cpu_off(unsigned int end_pwrlvl);
+
+/* Private exported functions from psci_suspend.c */
+void psci_cpu_suspend_start(entry_point_info_t *ep,
+			unsigned int end_pwrlvl,
+			psci_power_state_t *state_info,
+			unsigned int is_power_down_state_req);
+
+void psci_cpu_suspend_finish(unsigned int cpu_idx,
+			psci_power_state_t *state_info);
+
+/* Private exported functions from psci_helpers.S */
+void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level);
+void psci_do_pwrup_cache_maintenance(void);
+
+/* Private exported functions from psci_system_off.c */
+void __dead2 psci_system_off(void);
+void __dead2 psci_system_reset(void);
+int psci_system_reset2(uint32_t reset_type, u_register_t cookie);
+
+/* Private exported functions from psci_stat.c */
+void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
+			const psci_power_state_t *state_info);
+void psci_stats_update_pwr_up(unsigned int end_pwrlvl,
+			const psci_power_state_t *state_info);
+u_register_t psci_stat_residency(u_register_t target_cpu,
+			unsigned int power_state);
+u_register_t psci_stat_count(u_register_t target_cpu,
+			unsigned int power_state);
+
+/* Private exported functions from psci_mem_protect.c */
+int psci_mem_protect(unsigned int enable);
+int psci_mem_chk_range(uintptr_t base, u_register_t length);
+
+#endif /* __PSCI_PRIVATE_H__ */
diff --git a/lib/psci/psci_setup.c b/lib/psci/psci_setup.c
new file mode 100644
index 0000000..a841dda
--- /dev/null
+++ b/lib/psci/psci_setup.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <errata_report.h>
+#include <platform.h>
+#include <stddef.h>
+#include "psci_private.h"
+
+/*******************************************************************************
+ * Per cpu non-secure contexts used to program the architectural state prior
+ * return to the normal world.
+ * TODO: Use the memory allocator to set aside memory for the contexts instead
+ * of relying on platform defined constants.
+ ******************************************************************************/
+static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT];
+
+/******************************************************************************
+ * Define the psci capability variable.
+ *****************************************************************************/
+unsigned int psci_caps;
+
+/*******************************************************************************
+ * Function which initializes the 'psci_non_cpu_pd_nodes' or the
+ * 'psci_cpu_pd_nodes' corresponding to the power level.
+ ******************************************************************************/
+static void psci_init_pwr_domain_node(unsigned int node_idx,
+					unsigned int parent_idx,
+					unsigned int level)
+{
+	if (level > PSCI_CPU_PWR_LVL) {
+		psci_non_cpu_pd_nodes[node_idx].level = level;
+		psci_lock_init(psci_non_cpu_pd_nodes, node_idx);
+		psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx;
+		psci_non_cpu_pd_nodes[node_idx].local_state =
+							 PLAT_MAX_OFF_STATE;
+	} else {
+		psci_cpu_data_t *svc_cpu_data;
+
+		psci_cpu_pd_nodes[node_idx].parent_node = parent_idx;
+
+		/* Initialize with an invalid mpidr */
+		psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR;
+
+		svc_cpu_data =
+			&(_cpu_data_by_index(node_idx)->psci_svc_cpu_data);
+
+		/* Set the Affinity Info for the cores as OFF */
+		svc_cpu_data->aff_info_state = AFF_STATE_OFF;
+
+		/* Invalidate the suspend level for the cpu */
+		svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL;
+
+		/* Set the power state to OFF state */
+		svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
+
+		psci_flush_dcache_range((uintptr_t)svc_cpu_data,
+						 sizeof(*svc_cpu_data));
+
+		cm_set_context_by_index(node_idx,
+					(void *) &psci_ns_context[node_idx],
+					NON_SECURE);
+	}
+}
+
+/*******************************************************************************
+ * This functions updates cpu_start_idx and ncpus field for each of the node in
+ * psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of
+ * the CPUs and check whether they match with the parent of the previous
+ * CPU. The basic assumption for this work is that children of the same parent
+ * are allocated adjacent indices. The platform should ensure this though proper
+ * mapping of the CPUs to indices via plat_core_pos_by_mpidr() and
+ * plat_my_core_pos() APIs.
+ *******************************************************************************/
+static void psci_update_pwrlvl_limits(void)
+{
+	int j;
+	unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0};
+	unsigned int temp_index[PLAT_MAX_PWR_LVL], cpu_idx;
+
+	for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
+		psci_get_parent_pwr_domain_nodes(cpu_idx,
+						 PLAT_MAX_PWR_LVL,
+						 temp_index);
+		for (j = PLAT_MAX_PWR_LVL - 1; j >= 0; j--) {
+			if (temp_index[j] != nodes_idx[j]) {
+				nodes_idx[j] = temp_index[j];
+				psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx
+					= cpu_idx;
+			}
+			psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++;
+		}
+	}
+}
+
+/*******************************************************************************
+ * Core routine to populate the power domain tree. The tree descriptor passed by
+ * the platform is populated breadth-first and the first entry in the map
+ * informs the number of root power domains. The parent nodes of the root nodes
+ * will point to an invalid entry(-1).
+ ******************************************************************************/
+static void populate_power_domain_tree(const unsigned char *topology)
+{
+	unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl;
+	unsigned int node_index = 0, parent_node_index = 0, num_children;
+	int level = PLAT_MAX_PWR_LVL;
+
+	/*
+	 * For each level the inputs are:
+	 * - number of nodes at this level in plat_array i.e. num_nodes_at_level
+	 *   This is the sum of values of nodes at the parent level.
+	 * - Index of first entry at this level in the plat_array i.e.
+	 *   parent_node_index.
+	 * - Index of first free entry in psci_non_cpu_pd_nodes[] or
+	 *   psci_cpu_pd_nodes[] i.e. node_index depending upon the level.
+	 */
+	while (level >= PSCI_CPU_PWR_LVL) {
+		num_nodes_at_next_lvl = 0;
+		/*
+		 * For each entry (parent node) at this level in the plat_array:
+		 * - Find the number of children
+		 * - Allocate a node in a power domain array for each child
+		 * - Set the parent of the child to the parent_node_index - 1
+		 * - Increment parent_node_index to point to the next parent
+		 * - Accumulate the number of children at next level.
+		 */
+		for (i = 0; i < num_nodes_at_lvl; i++) {
+			assert(parent_node_index <=
+					PSCI_NUM_NON_CPU_PWR_DOMAINS);
+			num_children = topology[parent_node_index];
+
+			for (j = node_index;
+				j < node_index + num_children; j++)
+				psci_init_pwr_domain_node(j,
+							  parent_node_index - 1,
+							  level);
+
+			node_index = j;
+			num_nodes_at_next_lvl += num_children;
+			parent_node_index++;
+		}
+
+		num_nodes_at_lvl = num_nodes_at_next_lvl;
+		level--;
+
+		/* Reset the index for the cpu power domain array */
+		if (level == PSCI_CPU_PWR_LVL)
+			node_index = 0;
+	}
+
+	/* Validate the sanity of array exported by the platform */
+	assert(j == PLATFORM_CORE_COUNT);
+}
+
+/*******************************************************************************
+ * This function does the architectural setup and takes the warm boot
+ * entry-point `mailbox_ep` as an argument. The function also initializes the
+ * power domain topology tree by querying the platform. The power domain nodes
+ * higher than the CPU are populated in the array psci_non_cpu_pd_nodes[] and
+ * the CPU power domains are populated in psci_cpu_pd_nodes[]. The platform
+ * exports its static topology map through the
+ * populate_power_domain_topology_tree() API. The algorithm populates the
+ * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
+ * topology map.  On a platform that implements two clusters of 2 cpus each,
+ * and supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would
+ * look like this:
+ *
+ * ---------------------------------------------------
+ * | system node | cluster 0 node  | cluster 1 node  |
+ * ---------------------------------------------------
+ *
+ * And populated psci_cpu_pd_nodes would look like this :
+ * <-    cpus cluster0   -><-   cpus cluster1   ->
+ * ------------------------------------------------
+ * |   CPU 0   |   CPU 1   |   CPU 2   |   CPU 3  |
+ * ------------------------------------------------
+ ******************************************************************************/
+int psci_setup(const psci_lib_args_t *lib_args)
+{
+	const unsigned char *topology_tree;
+
+	assert(VERIFY_PSCI_LIB_ARGS_V1(lib_args));
+
+	/* Do the Architectural initialization */
+	psci_arch_setup();
+
+	/* Query the topology map from the platform */
+	topology_tree = plat_get_power_domain_tree_desc();
+
+	/* Populate the power domain arrays using the platform topology map */
+	populate_power_domain_tree(topology_tree);
+
+	/* Update the CPU limits for each node in psci_non_cpu_pd_nodes */
+	psci_update_pwrlvl_limits();
+
+	/* Populate the mpidr field of cpu node for this CPU */
+	psci_cpu_pd_nodes[plat_my_core_pos()].mpidr =
+		read_mpidr() & MPIDR_AFFINITY_MASK;
+
+	psci_init_req_local_pwr_states();
+
+	/*
+	 * Set the requested and target state of this CPU and all the higher
+	 * power domain levels for this CPU to run.
+	 */
+	psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
+
+	plat_setup_psci_ops((uintptr_t)lib_args->mailbox_ep, &psci_plat_pm_ops);
+	assert(psci_plat_pm_ops);
+
+	/*
+	 * Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs
+	 * during warm boot, possibly before data cache is enabled.
+	 */
+	psci_flush_dcache_range((uintptr_t)&psci_plat_pm_ops,
+					sizeof(psci_plat_pm_ops));
+
+	/* Initialize the psci capability */
+	psci_caps = PSCI_GENERIC_CAP;
+
+	if (psci_plat_pm_ops->pwr_domain_off)
+		psci_caps |=  define_psci_cap(PSCI_CPU_OFF);
+	if (psci_plat_pm_ops->pwr_domain_on &&
+			psci_plat_pm_ops->pwr_domain_on_finish)
+		psci_caps |=  define_psci_cap(PSCI_CPU_ON_AARCH64);
+	if (psci_plat_pm_ops->pwr_domain_suspend &&
+			psci_plat_pm_ops->pwr_domain_suspend_finish) {
+		psci_caps |=  define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
+		if (psci_plat_pm_ops->get_sys_suspend_power_state)
+			psci_caps |=  define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
+	}
+	if (psci_plat_pm_ops->system_off)
+		psci_caps |=  define_psci_cap(PSCI_SYSTEM_OFF);
+	if (psci_plat_pm_ops->system_reset)
+		psci_caps |=  define_psci_cap(PSCI_SYSTEM_RESET);
+	if (psci_plat_pm_ops->get_node_hw_state)
+		psci_caps |= define_psci_cap(PSCI_NODE_HW_STATE_AARCH64);
+	if (psci_plat_pm_ops->read_mem_protect &&
+			psci_plat_pm_ops->write_mem_protect)
+		psci_caps |= define_psci_cap(PSCI_MEM_PROTECT);
+	if (psci_plat_pm_ops->mem_protect_chk)
+		psci_caps |= define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64);
+	if (psci_plat_pm_ops->system_reset2)
+		psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64);
+
+#if ENABLE_PSCI_STAT
+	psci_caps |=  define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64);
+	psci_caps |=  define_psci_cap(PSCI_STAT_COUNT_AARCH64);
+#endif
+
+	return 0;
+}
+
+/*******************************************************************************
+ * This duplicates what the primary cpu did after a cold boot in BL1. The same
+ * needs to be done when a cpu is hotplugged in. This function could also over-
+ * ride any EL3 setup done by BL1 as this code resides in rw memory.
+ ******************************************************************************/
+void psci_arch_setup(void)
+{
+	/* Program the counter frequency */
+	write_cntfrq_el0(plat_get_syscnt_freq2());
+
+	/* Initialize the cpu_ops pointer. */
+	init_cpu_ops();
+
+	/* Having initialized cpu_ops, we can now print errata status */
+	print_errata_status();
+}
+
+/******************************************************************************
+ * PSCI Library interface to initialize the cpu context for the next non
+ * secure image during cold boot. The relevant registers in the cpu context
+ * need to be retrieved and programmed on return from this interface.
+ *****************************************************************************/
+void psci_prepare_next_non_secure_ctx(entry_point_info_t *next_image_info)
+{
+	assert(GET_SECURITY_STATE(next_image_info->h.attr) == NON_SECURE);
+	cm_init_my_context(next_image_info);
+	cm_prepare_el3_exit(NON_SECURE);
+}
diff --git a/lib/psci/psci_stat.c b/lib/psci/psci_stat.c
new file mode 100644
index 0000000..3e79c5d
--- /dev/null
+++ b/lib/psci/psci_stat.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <platform.h>
+#include <platform_def.h>
+#include "psci_private.h"
+
+#ifndef PLAT_MAX_PWR_LVL_STATES
+#define PLAT_MAX_PWR_LVL_STATES 2
+#endif
+
+/* Following structure is used for PSCI STAT */
+typedef struct psci_stat {
+	u_register_t residency;
+	u_register_t count;
+} psci_stat_t;
+
+/*
+ * Following is used to keep track of the last cpu
+ * that goes to power down in non cpu power domains.
+ */
+static int last_cpu_in_non_cpu_pd[PSCI_NUM_NON_CPU_PWR_DOMAINS] = {-1};
+
+/*
+ * Following are used to store PSCI STAT values for
+ * CPU and non CPU power domains.
+ */
+static psci_stat_t psci_cpu_stat[PLATFORM_CORE_COUNT]
+				[PLAT_MAX_PWR_LVL_STATES];
+static psci_stat_t psci_non_cpu_stat[PSCI_NUM_NON_CPU_PWR_DOMAINS]
+				[PLAT_MAX_PWR_LVL_STATES];
+
+/*
+ * This functions returns the index into the `psci_stat_t` array given the
+ * local power state and power domain level. If the platform implements the
+ * `get_pwr_lvl_state_idx` pm hook, then that will be used to return the index.
+ */
+static int get_stat_idx(plat_local_state_t local_state, int pwr_lvl)
+{
+	int idx;
+
+	if (psci_plat_pm_ops->get_pwr_lvl_state_idx == NULL) {
+		assert(PLAT_MAX_PWR_LVL_STATES == 2);
+		if (is_local_state_retn(local_state))
+			return 0;
+
+		assert(is_local_state_off(local_state));
+		return 1;
+	}
+
+	idx = psci_plat_pm_ops->get_pwr_lvl_state_idx(local_state, pwr_lvl);
+	assert((idx >= 0) && (idx < PLAT_MAX_PWR_LVL_STATES));
+	return idx;
+}
+
+/*******************************************************************************
+ * This function is passed the target local power states for each power
+ * domain (state_info) between the current CPU domain and its ancestors until
+ * the target power level (end_pwrlvl).
+ *
+ * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
+ * updates the `last_cpu_in_non_cpu_pd[]` with last power down cpu id.
+ *
+ * This function will only be invoked with data cache enabled and while
+ * powering down a core.
+ ******************************************************************************/
+void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
+			const psci_power_state_t *state_info)
+{
+	unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
+
+	assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
+	assert(state_info);
+
+	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
+
+	for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+
+		/* Break early if the target power state is RUN */
+		if (is_local_state_run(state_info->pwr_domain_state[lvl]))
+			break;
+
+		/*
+		 * The power domain is entering a low power state, so this is
+		 * the last CPU for this power domain
+		 */
+		last_cpu_in_non_cpu_pd[parent_idx] = cpu_idx;
+
+		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+	}
+
+}
+
+/*******************************************************************************
+ * This function updates the PSCI STATS(residency time and count) for CPU
+ * and NON-CPU power domains.
+ * It is called with caches enabled and locks acquired(for NON-CPU domain)
+ ******************************************************************************/
+void psci_stats_update_pwr_up(unsigned int end_pwrlvl,
+			const psci_power_state_t *state_info)
+{
+	unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
+	int stat_idx;
+	plat_local_state_t local_state;
+	u_register_t residency;
+
+	assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
+	assert(state_info);
+
+	/* Get the index into the stats array */
+	local_state = state_info->pwr_domain_state[PSCI_CPU_PWR_LVL];
+	stat_idx = get_stat_idx(local_state, PSCI_CPU_PWR_LVL);
+
+	/* Call into platform interface to calculate residency. */
+	residency = plat_psci_stat_get_residency(PSCI_CPU_PWR_LVL,
+	    state_info, cpu_idx);
+
+	/* Update CPU stats. */
+	psci_cpu_stat[cpu_idx][stat_idx].residency += residency;
+	psci_cpu_stat[cpu_idx][stat_idx].count++;
+
+	/*
+	 * Check what power domains above CPU were off
+	 * prior to this CPU powering on.
+	 */
+	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
+	for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+		local_state = state_info->pwr_domain_state[lvl];
+		if (is_local_state_run(local_state)) {
+			/* Break early */
+			break;
+		}
+
+		assert(last_cpu_in_non_cpu_pd[parent_idx] != -1);
+
+		/* Call into platform interface to calculate residency. */
+		residency = plat_psci_stat_get_residency(lvl, state_info,
+		    last_cpu_in_non_cpu_pd[parent_idx]);
+
+		/* Initialize back to reset value */
+		last_cpu_in_non_cpu_pd[parent_idx] = -1;
+
+		/* Get the index into the stats array */
+		stat_idx = get_stat_idx(local_state, lvl);
+
+		/* Update non cpu stats */
+		psci_non_cpu_stat[parent_idx][stat_idx].residency += residency;
+		psci_non_cpu_stat[parent_idx][stat_idx].count++;
+
+		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+	}
+
+}
+
+/*******************************************************************************
+ * This function returns the appropriate count and residency time of the
+ * local state for the highest power level expressed in the `power_state`
+ * for the node represented by `target_cpu`.
+ ******************************************************************************/
+static int psci_get_stat(u_register_t target_cpu, unsigned int power_state,
+			 psci_stat_t *psci_stat)
+{
+	int rc;
+	unsigned int pwrlvl, lvl, parent_idx, stat_idx, target_idx;
+	psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
+	plat_local_state_t local_state;
+
+	/* Validate the target_cpu parameter and determine the cpu index */
+	target_idx = plat_core_pos_by_mpidr(target_cpu);
+	if (target_idx == -1)
+		return PSCI_E_INVALID_PARAMS;
+
+	/* Validate the power_state parameter */
+	if (!psci_plat_pm_ops->translate_power_state_by_mpidr)
+		rc = psci_validate_power_state(power_state, &state_info);
+	else
+		rc = psci_plat_pm_ops->translate_power_state_by_mpidr(
+				target_cpu, power_state, &state_info);
+
+	if (rc != PSCI_E_SUCCESS)
+		return PSCI_E_INVALID_PARAMS;
+
+	/* Find the highest power level */
+	pwrlvl = psci_find_target_suspend_lvl(&state_info);
+	if (pwrlvl == PSCI_INVALID_PWR_LVL) {
+		ERROR("Invalid target power level for PSCI statistics operation\n");
+		panic();
+	}
+
+	/* Get the index into the stats array */
+	local_state = state_info.pwr_domain_state[pwrlvl];
+	stat_idx = get_stat_idx(local_state, pwrlvl);
+
+	if (pwrlvl > PSCI_CPU_PWR_LVL) {
+		/* Get the power domain index */
+		parent_idx = psci_cpu_pd_nodes[target_idx].parent_node;
+		for (lvl = PSCI_CPU_PWR_LVL + 1; lvl < pwrlvl; lvl++)
+			parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+
+		/* Get the non cpu power domain stats */
+		*psci_stat = psci_non_cpu_stat[parent_idx][stat_idx];
+	} else {
+		/* Get the cpu power domain stats */
+		*psci_stat = psci_cpu_stat[target_idx][stat_idx];
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+/* This is the top level function for PSCI_STAT_RESIDENCY SMC. */
+u_register_t psci_stat_residency(u_register_t target_cpu,
+		unsigned int power_state)
+{
+	psci_stat_t psci_stat;
+	int rc = psci_get_stat(target_cpu, power_state, &psci_stat);
+
+	if (rc == PSCI_E_SUCCESS)
+		return psci_stat.residency;
+	else
+		return 0;
+}
+
+/* This is the top level function for PSCI_STAT_COUNT SMC. */
+u_register_t psci_stat_count(u_register_t target_cpu,
+	unsigned int power_state)
+{
+	psci_stat_t psci_stat;
+	int rc = psci_get_stat(target_cpu, power_state, &psci_stat);
+
+	if (rc == PSCI_E_SUCCESS)
+		return psci_stat.count;
+	else
+		return 0;
+}
diff --git a/lib/psci/psci_suspend.c b/lib/psci/psci_suspend.c
new file mode 100644
index 0000000..40ecdee
--- /dev/null
+++ b/lib/psci/psci_suspend.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <cpu_data.h>
+#include <debug.h>
+#include <platform.h>
+#include <pmf.h>
+#include <runtime_instr.h>
+#include <stddef.h>
+#include "psci_private.h"
+
+/*******************************************************************************
+ * This function does generic and platform specific operations after a wake-up
+ * from standby/retention states at multiple power levels.
+ ******************************************************************************/
+static void psci_suspend_to_standby_finisher(unsigned int cpu_idx,
+					     unsigned int end_pwrlvl)
+{
+	psci_power_state_t state_info;
+
+	psci_acquire_pwr_domain_locks(end_pwrlvl,
+				cpu_idx);
+
+	/*
+	 * Find out which retention states this CPU has exited from until the
+	 * 'end_pwrlvl'. The exit retention state could be deeper than the entry
+	 * state as a result of state coordination amongst other CPUs post wfi.
+	 */
+	psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
+
+	/*
+	 * Plat. management: Allow the platform to do operations
+	 * on waking up from retention.
+	 */
+	psci_plat_pm_ops->pwr_domain_suspend_finish(&state_info);
+
+	/*
+	 * Set the requested and target state of this CPU and all the higher
+	 * power domain levels for this CPU to run.
+	 */
+	psci_set_pwr_domains_to_run(end_pwrlvl);
+
+	psci_release_pwr_domain_locks(end_pwrlvl,
+				cpu_idx);
+}
+
+/*******************************************************************************
+ * This function does generic and platform specific suspend to power down
+ * operations.
+ ******************************************************************************/
+static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
+					  entry_point_info_t *ep,
+					  psci_power_state_t *state_info)
+{
+	unsigned int max_off_lvl = psci_find_max_off_lvl(state_info);
+
+	/* Save PSCI target power level for the suspend finisher handler */
+	psci_set_suspend_pwrlvl(end_pwrlvl);
+
+	/*
+	 * Flush the target power level as it might be accessed on power up with
+	 * Data cache disabled.
+	 */
+	psci_flush_cpu_data(psci_svc_cpu_data.target_pwrlvl);
+
+	/*
+	 * Call the cpu suspend handler registered by the Secure Payload
+	 * Dispatcher to let it do any book-keeping. If the handler encounters an
+	 * error, it's expected to assert within
+	 */
+	if (psci_spd_pm && psci_spd_pm->svc_suspend)
+		psci_spd_pm->svc_suspend(max_off_lvl);
+
+#if !HW_ASSISTED_COHERENCY
+	/*
+	 * Plat. management: Allow the platform to perform any early
+	 * actions required to power down the CPU. This might be useful for
+	 * HW_ASSISTED_COHERENCY = 0 platforms that can safely perform these
+	 * actions with data caches enabled.
+	 */
+	if (psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early)
+		psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early(state_info);
+#endif
+
+	/*
+	 * Store the re-entry information for the non-secure world.
+	 */
+	cm_init_my_context(ep);
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+
+	/*
+	 * Flush cache line so that even if CPU power down happens
+	 * the timestamp update is reflected in memory.
+	 */
+	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
+		RT_INSTR_ENTER_CFLUSH,
+		PMF_CACHE_MAINT);
+#endif
+
+	/*
+	 * Arch. management. Initiate power down sequence.
+	 * TODO : Introduce a mechanism to query the cache level to flush
+	 * and the cpu-ops power down to perform from the platform.
+	 */
+	psci_do_pwrdown_sequence(max_off_lvl);
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
+		RT_INSTR_EXIT_CFLUSH,
+		PMF_NO_CACHE_MAINT);
+#endif
+}
+
+/*******************************************************************************
+ * Top level handler which is called when a cpu wants to suspend its execution.
+ * It is assumed that along with suspending the cpu power domain, power domains
+ * at higher levels until the target power level will be suspended as well. It
+ * coordinates with the platform to negotiate the target state for each of
+ * the power domain level till the target power domain level. It then performs
+ * generic, architectural, platform setup and state management required to
+ * suspend that power domain level and power domain levels below it.
+ * e.g. For a cpu that's to be suspended, it could mean programming the
+ * power controller whereas for a cluster that's to be suspended, it will call
+ * the platform specific code which will disable coherency at the interconnect
+ * level if the cpu is the last in the cluster and also the program the power
+ * controller.
+ *
+ * All the required parameter checks are performed at the beginning and after
+ * the state transition has been done, no further error is expected and it is
+ * not possible to undo any of the actions taken beyond that point.
+ ******************************************************************************/
+void psci_cpu_suspend_start(entry_point_info_t *ep,
+			    unsigned int end_pwrlvl,
+			    psci_power_state_t *state_info,
+			    unsigned int is_power_down_state)
+{
+	int skip_wfi = 0;
+	unsigned int idx = plat_my_core_pos();
+
+	/*
+	 * This function must only be called on platforms where the
+	 * CPU_SUSPEND platform hooks have been implemented.
+	 */
+	assert(psci_plat_pm_ops->pwr_domain_suspend &&
+			psci_plat_pm_ops->pwr_domain_suspend_finish);
+
+	/*
+	 * This function acquires the lock corresponding to each power
+	 * level so that by the time all locks are taken, the system topology
+	 * is snapshot and state management can be done safely.
+	 */
+	psci_acquire_pwr_domain_locks(end_pwrlvl,
+				      idx);
+
+	/*
+	 * We check if there are any pending interrupts after the delay
+	 * introduced by lock contention to increase the chances of early
+	 * detection that a wake-up interrupt has fired.
+	 */
+	if (read_isr_el1()) {
+		skip_wfi = 1;
+		goto exit;
+	}
+
+	/*
+	 * This function is passed the requested state info and
+	 * it returns the negotiated state info for each power level upto
+	 * the end level specified.
+	 */
+	psci_do_state_coordination(end_pwrlvl, state_info);
+
+#if ENABLE_PSCI_STAT
+	/* Update the last cpu for each level till end_pwrlvl */
+	psci_stats_update_pwr_down(end_pwrlvl, state_info);
+#endif
+
+	if (is_power_down_state)
+		psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info);
+
+	/*
+	 * Plat. management: Allow the platform to perform the
+	 * necessary actions to turn off this cpu e.g. set the
+	 * platform defined mailbox with the psci entrypoint,
+	 * program the power controller etc.
+	 */
+	psci_plat_pm_ops->pwr_domain_suspend(state_info);
+
+#if ENABLE_PSCI_STAT
+	plat_psci_stat_accounting_start(state_info);
+#endif
+
+exit:
+	/*
+	 * Release the locks corresponding to each power level in the
+	 * reverse order to which they were acquired.
+	 */
+	psci_release_pwr_domain_locks(end_pwrlvl,
+				  idx);
+	if (skip_wfi)
+		return;
+
+	if (is_power_down_state) {
+#if ENABLE_RUNTIME_INSTRUMENTATION
+
+		/*
+		 * Update the timestamp with cache off.  We assume this
+		 * timestamp can only be read from the current CPU and the
+		 * timestamp cache line will be flushed before return to
+		 * normal world on wakeup.
+		 */
+		PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
+		    RT_INSTR_ENTER_HW_LOW_PWR,
+		    PMF_NO_CACHE_MAINT);
+#endif
+
+		/* The function calls below must not return */
+		if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi)
+			psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info);
+		else
+			psci_power_down_wfi();
+	}
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
+	    RT_INSTR_ENTER_HW_LOW_PWR,
+	    PMF_NO_CACHE_MAINT);
+#endif
+
+#if ENABLE_PSCI_STAT
+	plat_psci_stat_accounting_start(state_info);
+#endif
+
+	/*
+	 * We will reach here if only retention/standby states have been
+	 * requested at multiple power levels. This means that the cpu
+	 * context will be preserved.
+	 */
+	wfi();
+
+#if ENABLE_PSCI_STAT
+	plat_psci_stat_accounting_stop(state_info);
+	psci_stats_update_pwr_up(end_pwrlvl, state_info);
+#endif
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
+	    RT_INSTR_EXIT_HW_LOW_PWR,
+	    PMF_NO_CACHE_MAINT);
+#endif
+
+	/*
+	 * After we wake up from context retaining suspend, call the
+	 * context retaining suspend finisher.
+	 */
+	psci_suspend_to_standby_finisher(idx, end_pwrlvl);
+}
+
+/*******************************************************************************
+ * The following functions finish an earlier suspend request. They
+ * are called by the common finisher routine in psci_common.c. The `state_info`
+ * is the psci_power_state from which this CPU has woken up from.
+ ******************************************************************************/
+void psci_cpu_suspend_finish(unsigned int cpu_idx,
+			     psci_power_state_t *state_info)
+{
+	unsigned int counter_freq;
+	unsigned int max_off_lvl;
+
+	/* Ensure we have been woken up from a suspended state */
+	assert(psci_get_aff_info_state() == AFF_STATE_ON && is_local_state_off(\
+			state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]));
+
+	/*
+	 * Plat. management: Perform the platform specific actions
+	 * before we change the state of the cpu e.g. enabling the
+	 * gic or zeroing the mailbox register. If anything goes
+	 * wrong then assert as there is no way to recover from this
+	 * situation.
+	 */
+	psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
+
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+	/* Arch. management: Enable the data cache, stack memory maintenance. */
+	psci_do_pwrup_cache_maintenance();
+#endif
+
+	/* Re-init the cntfrq_el0 register */
+	counter_freq = plat_get_syscnt_freq2();
+	write_cntfrq_el0(counter_freq);
+
+	/*
+	 * Call the cpu suspend finish handler registered by the Secure Payload
+	 * Dispatcher to let it do any bookeeping. If the handler encounters an
+	 * error, it's expected to assert within
+	 */
+	if (psci_spd_pm && psci_spd_pm->svc_suspend_finish) {
+		max_off_lvl = psci_find_max_off_lvl(state_info);
+		assert (max_off_lvl != PSCI_INVALID_PWR_LVL);
+		psci_spd_pm->svc_suspend_finish(max_off_lvl);
+	}
+
+	/* Invalidate the suspend level for the cpu */
+	psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL);
+
+	/*
+	 * Generic management: Now we just need to retrieve the
+	 * information that we had stashed away during the suspend
+	 * call to set this cpu on its way.
+	 */
+	cm_prepare_el3_exit(NON_SECURE);
+}
diff --git a/lib/psci/psci_system_off.c b/lib/psci/psci_system_off.c
new file mode 100644
index 0000000..13e9f4a
--- /dev/null
+++ b/lib/psci/psci_system_off.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <console.h>
+#include <debug.h>
+#include <platform.h>
+#include <stddef.h>
+#include "psci_private.h"
+
+void __dead2 psci_system_off(void)
+{
+	psci_print_power_domain_map();
+
+	assert(psci_plat_pm_ops->system_off);
+
+	/* Notify the Secure Payload Dispatcher */
+	if (psci_spd_pm && psci_spd_pm->svc_system_off) {
+		psci_spd_pm->svc_system_off();
+	}
+
+	console_flush();
+
+	/* Call the platform specific hook */
+	psci_plat_pm_ops->system_off();
+
+	/* This function does not return. We should never get here */
+}
+
+void __dead2 psci_system_reset(void)
+{
+	psci_print_power_domain_map();
+
+	assert(psci_plat_pm_ops->system_reset);
+
+	/* Notify the Secure Payload Dispatcher */
+	if (psci_spd_pm && psci_spd_pm->svc_system_reset) {
+		psci_spd_pm->svc_system_reset();
+	}
+
+	console_flush();
+
+	/* Call the platform specific hook */
+	psci_plat_pm_ops->system_reset();
+
+	/* This function does not return. We should never get here */
+}
+
+int psci_system_reset2(uint32_t reset_type, u_register_t cookie)
+{
+	int is_vendor;
+
+	psci_print_power_domain_map();
+
+	assert(psci_plat_pm_ops->system_reset2);
+
+	is_vendor = (reset_type >> PSCI_RESET2_TYPE_VENDOR_SHIFT) & 1;
+	if (!is_vendor) {
+		/*
+		 * Only WARM_RESET is allowed for architectural type resets.
+		 */
+		if (reset_type != PSCI_RESET2_SYSTEM_WARM_RESET)
+			return PSCI_E_INVALID_PARAMS;
+		if (psci_plat_pm_ops->write_mem_protect &&
+		    psci_plat_pm_ops->write_mem_protect(0) < 0) {
+			return PSCI_E_NOT_SUPPORTED;
+		}
+	}
+
+	/* Notify the Secure Payload Dispatcher */
+	if (psci_spd_pm && psci_spd_pm->svc_system_reset) {
+		psci_spd_pm->svc_system_reset();
+	}
+	console_flush();
+
+	return psci_plat_pm_ops->system_reset2(is_vendor, reset_type, cookie);
+}
diff --git a/lib/semihosting/aarch32/semihosting_call.S b/lib/semihosting/aarch32/semihosting_call.S
new file mode 100644
index 0000000..aced3d1
--- /dev/null
+++ b/lib/semihosting/aarch32/semihosting_call.S
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+	.globl	semihosting_call
+
+func semihosting_call
+	svc	#0x123456
+	bx	lr
+endfunc semihosting_call
diff --git a/lib/semihosting/aarch64/semihosting_call.S b/lib/semihosting/aarch64/semihosting_call.S
index e6a9675..97d2bca 100644
--- a/lib/semihosting/aarch64/semihosting_call.S
+++ b/lib/semihosting/aarch64/semihosting_call.S
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <asm_macros.S>
@@ -35,3 +11,4 @@
 func semihosting_call
 	hlt	#0xf000
 	ret
+endfunc semihosting_call
diff --git a/lib/semihosting/semihosting.c b/lib/semihosting/semihosting.c
index 849ec12..2ba43f3 100644
--- a/lib/semihosting/semihosting.c
+++ b/lib/semihosting/semihosting.c
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <assert.h>
@@ -125,6 +101,7 @@
 			    const uintptr_t buffer)
 {
 	smh_file_read_write_block_t write_block;
+	long result = -EINVAL;
 
 	if ((length == NULL) || (buffer == (uintptr_t)NULL))
 		return -EINVAL;
@@ -133,10 +110,12 @@
 	write_block.buffer = (uintptr_t)buffer; /* cast away const */
 	write_block.length = *length;
 
-	*length = semihosting_call(SEMIHOSTING_SYS_WRITE,
+	result = semihosting_call(SEMIHOSTING_SYS_WRITE,
 				   (void *) &write_block);
 
-	return *length;
+	*length = result;
+
+	return (result == 0) ? 0 : -EINVAL;
 }
 
 long semihosting_file_close(long file_handle)
diff --git a/lib/stack_protector/aarch32/asm_stack_protector.S b/lib/stack_protector/aarch32/asm_stack_protector.S
new file mode 100644
index 0000000..19b7525
--- /dev/null
+++ b/lib/stack_protector/aarch32/asm_stack_protector.S
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+
+	.globl	update_stack_protector_canary
+
+/* -----------------------------------------------------------------------
+ * void update_stack_protector_canary(void)
+ *
+ * Change the value of the canary used for stack smashing attacks protection.
+ * Note: This must be called when it is safe to call C code, but this cannot be
+ * called by C code. Doing this will make the check fail when the calling
+ * function returns.
+ * -----------------------------------------------------------------------
+ */
+
+func update_stack_protector_canary
+	/* Use r4 as it is callee-saved */
+	mov	r4, lr
+	bl	plat_get_stack_protector_canary
+
+	/* Update the canary with the returned value */
+	ldr	r1,  =__stack_chk_guard
+	str	r0, [r1]
+	bx	r4
+endfunc update_stack_protector_canary
+
+
diff --git a/lib/stack_protector/aarch64/asm_stack_protector.S b/lib/stack_protector/aarch64/asm_stack_protector.S
new file mode 100644
index 0000000..c2245d3
--- /dev/null
+++ b/lib/stack_protector/aarch64/asm_stack_protector.S
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+
+	.globl	update_stack_protector_canary
+
+/* -----------------------------------------------------------------------
+ * void update_stack_protector_canary(void)
+ *
+ * Change the value of the canary used for stack smashing attacks protection.
+ * Note: This must be called when it is safe to call C code, but this cannot be
+ * called by C code. Doing this will make the check fail when the calling
+ * function returns.
+ * -----------------------------------------------------------------------
+ */
+
+func update_stack_protector_canary
+	/* Use x19 as it is callee-saved */
+	mov	x19, x30
+	bl	plat_get_stack_protector_canary
+
+	/* Update the canary with the returned value */
+	adrp	x1,  __stack_chk_guard
+	str	x0, [x1, #:lo12:__stack_chk_guard]
+	ret	x19
+endfunc update_stack_protector_canary
+
+
diff --git a/lib/stack_protector/stack_protector.c b/lib/stack_protector/stack_protector.c
new file mode 100644
index 0000000..fba5e1f
--- /dev/null
+++ b/lib/stack_protector/stack_protector.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <debug.h>
+#include <platform.h>
+#include <stdint.h>
+
+/*
+ * Canary value used by the compiler runtime checks to detect stack corruption.
+ *
+ * Force the canary to be in .data to allow predictable memory layout relatively
+ * to the stacks.
+ */
+u_register_t  __attribute__((section(".data.stack_protector_canary")))
+	__stack_chk_guard = (u_register_t) 3288484550995823360ULL;
+
+/*
+ * Function called when the stack's canary check fails, which means the stack
+ * was corrupted. It must not return.
+ */
+void __dead2 __stack_chk_fail(void)
+{
+#if DEBUG
+	ERROR("Stack corruption detected\n");
+#endif
+	panic();
+}
+
diff --git a/lib/stack_protector/stack_protector.mk b/lib/stack_protector/stack_protector.mk
new file mode 100644
index 0000000..0f0d90f
--- /dev/null
+++ b/lib/stack_protector/stack_protector.mk
@@ -0,0 +1,19 @@
+#
+# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Boolean macro to be used in C code
+STACK_PROTECTOR_ENABLED := 0
+
+ifneq (${ENABLE_STACK_PROTECTOR},0)
+STACK_PROTECTOR_ENABLED := 1
+BL_COMMON_SOURCES	+=	lib/stack_protector/stack_protector.c			\
+				lib/stack_protector/${ARCH}/asm_stack_protector.S
+
+TF_CFLAGS		+=	-fstack-protector-${ENABLE_STACK_PROTECTOR}
+endif
+
+$(eval $(call add_define,STACK_PROTECTOR_ENABLED))
+
diff --git a/lib/stdlib/abort.c b/lib/stdlib/abort.c
index 862bf9c..af19ccf 100644
--- a/lib/stdlib/abort.c
+++ b/lib/stdlib/abort.c
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <debug.h>
diff --git a/lib/stdlib/assert.c b/lib/stdlib/assert.c
index 90a1afe..97fab4b 100644
--- a/lib/stdlib/assert.c
+++ b/lib/stdlib/assert.c
@@ -1,41 +1,36 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
+#include <assert.h>
+#include <console.h>
 #include <debug.h>
+#include <platform.h>
 
 /*
- * This is a basic implementation. This could be improved.
- */
-void __assert (const char *function, const char *file, unsigned int line,
-		const char *assertion)
+* Only print the output if PLAT_LOG_LEVEL_ASSERT is higher or equal to
+* LOG_LEVEL_INFO, which is the default value for builds with DEBUG=1.
+*/
+
+#if PLAT_LOG_LEVEL_ASSERT >= LOG_LEVEL_VERBOSE
+void __assert(const char *file, unsigned int line, const char *assertion)
 {
-	tf_printf("ASSERT: %s <%d> : %s\n", function, line, assertion);
-	while(1);
+	tf_printf("ASSERT: %s:%d:%s\n", file, line, assertion);
+	console_flush();
+	plat_panic_handler();
 }
+#elif PLAT_LOG_LEVEL_ASSERT >= LOG_LEVEL_INFO
+void __assert(const char *file, unsigned int line)
+{
+	tf_printf("ASSERT: %s:%d\n", file, line);
+	console_flush();
+	plat_panic_handler();
+}
+#else
+void __assert(void)
+{
+	plat_panic_handler();
+}
+#endif
diff --git a/lib/stdlib/exit.c b/lib/stdlib/exit.c
index 3e77591..3d23d7b 100644
--- a/lib/stdlib/exit.c
+++ b/lib/stdlib/exit.c
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <debug.h>
diff --git a/lib/stdlib/mem.c b/lib/stdlib/mem.c
index f1f335a..65b62fd 100644
--- a/lib/stdlib/mem.c
+++ b/lib/stdlib/mem.c
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <stddef.h> /* size_t */
@@ -48,10 +24,10 @@
  */
 int memcmp(const void *s1, const void *s2, size_t len)
 {
-	const char *s = s1;
-	const char *d = s2;
-	char dc;
-	char sc;
+	const unsigned char *s = s1;
+	const unsigned char *d = s2;
+	unsigned char sc;
+	unsigned char dc;
 
 	while (len--) {
 		sc = *s++;
diff --git a/lib/stdlib/printf.c b/lib/stdlib/printf.c
index 323ec0f..f615641 100644
--- a/lib/stdlib/printf.c
+++ b/lib/stdlib/printf.c
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <stdio.h>
diff --git a/lib/stdlib/putchar.c b/lib/stdlib/putchar.c
index 85e4fbd..8265667 100644
--- a/lib/stdlib/putchar.c
+++ b/lib/stdlib/putchar.c
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <stdio.h>
diff --git a/lib/stdlib/puts.c b/lib/stdlib/puts.c
index ca88fc5..693a6bf 100644
--- a/lib/stdlib/puts.c
+++ b/lib/stdlib/puts.c
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <stdio.h>
diff --git a/lib/stdlib/sscanf.c b/lib/stdlib/sscanf.c
index e9f5c4a..674ae79 100644
--- a/lib/stdlib/sscanf.c
+++ b/lib/stdlib/sscanf.c
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <sys/cdefs.h>
diff --git a/lib/stdlib/std.c b/lib/stdlib/std.c
deleted file mode 100644
index 5f6ef75..0000000
--- a/lib/stdlib/std.c
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/* Include the various implemented functions */
-#include "abort.c"
-#include "assert.c"
-#include "exit.c"
-#include "mem.c"
-#include "printf.c"
-#include "putchar.c"
-#include "puts.c"
-#include "sscanf.c"
-#include "strchr.c"
-#include "strcmp.c"
-#include "strlen.c"
-#include "strncmp.c"
-#include "subr_prf.c"
diff --git a/lib/stdlib/stdlib.mk b/lib/stdlib/stdlib.mk
new file mode 100644
index 0000000..8211623
--- /dev/null
+++ b/lib/stdlib/stdlib.mk
@@ -0,0 +1,25 @@
+#
+# Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+STDLIB_SRCS	:=	$(addprefix lib/stdlib/,	\
+			abort.c				\
+			assert.c			\
+			exit.c				\
+			mem.c				\
+			printf.c			\
+			putchar.c			\
+			puts.c				\
+			sscanf.c			\
+			strchr.c			\
+			strcmp.c			\
+			strlen.c			\
+			strncmp.c			\
+			strnlen.c			\
+			subr_prf.c			\
+			timingsafe_bcmp.c)
+
+INCLUDES	+=	-Iinclude/lib/stdlib		\
+			-Iinclude/lib/stdlib/sys
diff --git a/lib/stdlib/strnlen.c b/lib/stdlib/strnlen.c
new file mode 100644
index 0000000..d48502b
--- /dev/null
+++ b/lib/stdlib/strnlen.c
@@ -0,0 +1,45 @@
+/*-
+ * Copyright (c) 2009 David Schultz <das@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Portions copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ */
+
+#include <sys/cdefs.h>
+
+#include <string.h>
+
+size_t
+strnlen(const char *s, size_t maxlen)
+{
+	size_t len;
+
+	for (len = 0; len < maxlen; len++, s++) {
+		if (!*s)
+			break;
+	}
+	return (len);
+}
diff --git a/lib/stdlib/timingsafe_bcmp.c b/lib/stdlib/timingsafe_bcmp.c
new file mode 100644
index 0000000..d098158
--- /dev/null
+++ b/lib/stdlib/timingsafe_bcmp.c
@@ -0,0 +1,36 @@
+/*	$OpenBSD: timingsafe_bcmp.c,v 1.3 2015/08/31 02:53:57 guenther Exp $	*/
+/*
+ * Copyright (c) 2010 Damien Miller.  All rights reserved.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <string.h>
+
+int __timingsafe_bcmp(const void *, const void *, size_t);
+
+int
+__timingsafe_bcmp(const void *b1, const void *b2, size_t n)
+{
+	const unsigned char *p1 = b1, *p2 = b2;
+	int ret = 0;
+
+	for (; n > 0; n--)
+		ret |= *p1++ ^ *p2++;
+	return (ret != 0);
+}
+
+__weak_reference(__timingsafe_bcmp, timingsafe_bcmp);
diff --git a/lib/utils/mem_region.c b/lib/utils/mem_region.c
new file mode 100644
index 0000000..31c6231
--- /dev/null
+++ b/lib/utils/mem_region.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <utils.h>
+
+/*
+ * All the regions defined in mem_region_t must have the following properties
+ *
+ * - Any contiguous regions must be merged into a single entry.
+ * - The number of bytes of each region must be greater than zero.
+ * - The calculation of the highest address within the region (base + nbytes-1)
+ *   doesn't produce an overflow.
+ *
+ * These conditions must be fulfilled by the caller and they aren't checked
+ * at runtime.
+ */
+
+/*
+ * zero_normalmem all the regions defined in tbl.
+ * It assumes that MMU is enabled and the memory is Normal memory.
+ * tbl must be a valid pointer to a memory mem_region_t array,
+ * nregions is the size of the array.
+ */
+void clear_mem_regions(mem_region_t *tbl, size_t nregions)
+{
+	size_t i;
+
+	assert(tbl);
+	assert(nregions > 0);
+
+	for (i = 0; i < nregions; i++) {
+		assert(tbl->nbytes > 0);
+		assert(!check_uptr_overflow(tbl->base, tbl->nbytes-1));
+		zero_normalmem((void *) (tbl->base), tbl->nbytes);
+		tbl++;
+	}
+}
+
+/*
+ * This function checks that a region (addr + nbytes-1) of memory is totally
+ * covered by one of the regions defined in tbl.
+ * tbl must be a valid pointer to a memory mem_region_t array, nregions
+ * is the size of the array and the region described by addr and nbytes must
+ * not generate an overflow.
+ * Returns:
+ *  -1 means that the region is not covered by any of the regions
+ *     described in tbl.
+ *   0 the region (addr + nbytes-1) is covered by one of the regions described
+ *     in tbl
+ */
+int mem_region_in_array_chk(mem_region_t *tbl, size_t nregions,
+			    uintptr_t addr, size_t nbytes)
+{
+	uintptr_t region_start, region_end, start, end;
+	size_t i;
+
+	assert(tbl);
+	assert(nbytes > 0);
+	assert(!check_uptr_overflow(addr, nbytes-1));
+
+	region_start = addr;
+	region_end = addr + (nbytes - 1);
+	for (i = 0; i < nregions; i++) {
+		assert(tbl->nbytes > 0);
+		assert(!check_uptr_overflow(tbl->base, tbl->nbytes-1));
+		start = tbl->base;
+		end = start + (tbl->nbytes - 1);
+		if (region_start >= start && region_end <= end)
+			return 0;
+		tbl++;
+	}
+
+	return -1;
+}
diff --git a/lib/xlat_tables/aarch32/xlat_tables.c b/lib/xlat_tables/aarch32/xlat_tables.c
new file mode 100644
index 0000000..c7e34f2
--- /dev/null
+++ b/lib/xlat_tables/aarch32/xlat_tables.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <platform_def.h>
+#include <utils.h>
+#include <xlat_tables_arch.h>
+#include <xlat_tables.h>
+#include "../xlat_tables_private.h"
+
+#define XLAT_TABLE_LEVEL_BASE	\
+       GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE)
+
+#define NUM_BASE_LEVEL_ENTRIES	\
+       GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
+
+static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
+		__aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
+
+#if ENABLE_ASSERTIONS
+static unsigned long long get_max_supported_pa(void)
+{
+	/* Physical address space size for long descriptor format. */
+	return (1ULL << 40) - 1ULL;
+}
+#endif /* ENABLE_ASSERTIONS */
+
+int xlat_arch_current_el(void)
+{
+	/*
+	 * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
+	 * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
+	 */
+	return 3;
+}
+
+uint64_t xlat_arch_get_xn_desc(int el __unused)
+{
+	return UPPER_ATTRS(XN);
+}
+
+void init_xlat_tables(void)
+{
+	unsigned long long max_pa;
+	uintptr_t max_va;
+	print_mmap();
+	init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
+						&max_va, &max_pa);
+
+	assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
+	assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
+	assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
+}
+
+/*******************************************************************************
+ * Function for enabling the MMU in Secure PL1, assuming that the
+ * page-tables have already been created.
+ ******************************************************************************/
+void enable_mmu_secure(unsigned int flags)
+{
+	unsigned int mair0, ttbcr, sctlr;
+	uint64_t ttbr0;
+
+	assert(IS_IN_SECURE());
+	assert((read_sctlr() & SCTLR_M_BIT) == 0);
+
+	/* Set attributes in the right indices of the MAIR */
+	mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
+	mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
+			ATTR_IWBWA_OWBWA_NTR_INDEX);
+	mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
+			ATTR_NON_CACHEABLE_INDEX);
+	write_mair0(mair0);
+
+	/* Invalidate TLBs at the current exception level */
+	tlbiall();
+
+	/*
+	 * Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1.
+	 */
+	if (flags & XLAT_TABLE_NC) {
+		/* Inner & outer non-cacheable non-shareable. */
+		ttbcr = TTBCR_EAE_BIT |
+			TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
+			TTBCR_RGN0_INNER_NC |
+			(32 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));
+	} else {
+		/* Inner & outer WBWA & shareable. */
+		ttbcr = TTBCR_EAE_BIT |
+			TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
+			TTBCR_RGN0_INNER_WBA |
+			(32 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));
+	}
+	ttbcr |= TTBCR_EPD1_BIT;
+	write_ttbcr(ttbcr);
+
+	/* Set TTBR0 bits as well */
+	ttbr0 = (uintptr_t) base_xlation_table;
+	write64_ttbr0(ttbr0);
+	write64_ttbr1(0);
+
+	/*
+	 * Ensure all translation table writes have drained
+	 * into memory, the TLB invalidation is complete,
+	 * and translation register writes are committed
+	 * before enabling the MMU
+	 */
+	dsbish();
+	isb();
+
+	sctlr = read_sctlr();
+	sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
+
+	if (flags & DISABLE_DCACHE)
+		sctlr &= ~SCTLR_C_BIT;
+	else
+		sctlr |= SCTLR_C_BIT;
+
+	write_sctlr(sctlr);
+
+	/* Ensure the MMU enable takes effect immediately */
+	isb();
+}
diff --git a/lib/xlat_tables/aarch64/xlat_tables.c b/lib/xlat_tables/aarch64/xlat_tables.c
new file mode 100644
index 0000000..28ae1f7
--- /dev/null
+++ b/lib/xlat_tables/aarch64/xlat_tables.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <common_def.h>
+#include <platform_def.h>
+#include <sys/types.h>
+#include <utils.h>
+#include <xlat_tables.h>
+#include <xlat_tables_arch.h>
+#include "../xlat_tables_private.h"
+
+#define XLAT_TABLE_LEVEL_BASE	\
+       GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE)
+
+#define NUM_BASE_LEVEL_ENTRIES	\
+       GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
+
+static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
+		__aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
+
+static unsigned long long tcr_ps_bits;
+
+static unsigned long long calc_physical_addr_size_bits(
+					unsigned long long max_addr)
+{
+	/* Physical address can't exceed 48 bits */
+	assert((max_addr & ADDR_MASK_48_TO_63) == 0);
+
+	/* 48 bits address */
+	if (max_addr & ADDR_MASK_44_TO_47)
+		return TCR_PS_BITS_256TB;
+
+	/* 44 bits address */
+	if (max_addr & ADDR_MASK_42_TO_43)
+		return TCR_PS_BITS_16TB;
+
+	/* 42 bits address */
+	if (max_addr & ADDR_MASK_40_TO_41)
+		return TCR_PS_BITS_4TB;
+
+	/* 40 bits address */
+	if (max_addr & ADDR_MASK_36_TO_39)
+		return TCR_PS_BITS_1TB;
+
+	/* 36 bits address */
+	if (max_addr & ADDR_MASK_32_TO_35)
+		return TCR_PS_BITS_64GB;
+
+	return TCR_PS_BITS_4GB;
+}
+
+#if ENABLE_ASSERTIONS
+/* Physical Address ranges supported in the AArch64 Memory Model */
+static const unsigned int pa_range_bits_arr[] = {
+	PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
+	PARANGE_0101
+};
+
+static unsigned long long get_max_supported_pa(void)
+{
+	u_register_t pa_range = read_id_aa64mmfr0_el1() &
+						ID_AA64MMFR0_EL1_PARANGE_MASK;
+
+	/* All other values are reserved */
+	assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
+
+	return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
+}
+#endif /* ENABLE_ASSERTIONS */
+
+int xlat_arch_current_el(void)
+{
+	int el = GET_EL(read_CurrentEl());
+
+	assert(el > 0);
+
+	return el;
+}
+
+uint64_t xlat_arch_get_xn_desc(int el)
+{
+	if (el == 3) {
+		return UPPER_ATTRS(XN);
+	} else {
+		assert(el == 1);
+		return UPPER_ATTRS(PXN);
+	}
+}
+
+void init_xlat_tables(void)
+{
+	unsigned long long max_pa;
+	uintptr_t max_va;
+	print_mmap();
+	init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
+			   &max_va, &max_pa);
+
+	assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
+	assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
+	assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
+
+	tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
+}
+
+/*******************************************************************************
+ * Macro generating the code for the function enabling the MMU in the given
+ * exception level, assuming that the pagetables have already been created.
+ *
+ *   _el:		Exception level at which the function will run
+ *   _tcr_extra:	Extra bits to set in the TCR register. This mask will
+ *			be OR'ed with the default TCR value.
+ *   _tlbi_fct:		Function to invalidate the TLBs at the current
+ *			exception level
+ ******************************************************************************/
+#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct)		\
+	void enable_mmu_el##_el(unsigned int flags)				\
+	{								\
+		uint64_t mair, tcr, ttbr;				\
+		uint32_t sctlr;						\
+									\
+		assert(IS_IN_EL(_el));					\
+		assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0);	\
+									\
+		/* Set attributes in the right indices of the MAIR */	\
+		mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);	\
+		mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,		\
+				ATTR_IWBWA_OWBWA_NTR_INDEX);		\
+		mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE,		\
+				ATTR_NON_CACHEABLE_INDEX);		\
+		write_mair_el##_el(mair);				\
+									\
+		/* Invalidate TLBs at the current exception level */	\
+		_tlbi_fct();						\
+									\
+		/* Set TCR bits as well. */				\
+		/* Set T0SZ to (64 - width of virtual address space) */	\
+		if (flags & XLAT_TABLE_NC) {				\
+			/* Inner & outer non-cacheable non-shareable. */\
+			tcr = TCR_SH_NON_SHAREABLE |			\
+				TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC |	\
+				(64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));\
+		} else {						\
+			/* Inner & outer WBWA & shareable. */		\
+			tcr = TCR_SH_INNER_SHAREABLE |			\
+				TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA |	\
+				(64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));\
+		}							\
+		tcr |= _tcr_extra;					\
+		write_tcr_el##_el(tcr);					\
+									\
+		/* Set TTBR bits as well */				\
+		ttbr = (uint64_t) base_xlation_table;			\
+		write_ttbr0_el##_el(ttbr);				\
+									\
+		/* Ensure all translation table writes have drained */	\
+		/* into memory, the TLB invalidation is complete, */	\
+		/* and translation register writes are committed */	\
+		/* before enabling the MMU */				\
+		dsbish();						\
+		isb();							\
+									\
+		sctlr = read_sctlr_el##_el();				\
+		sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;			\
+									\
+		if (flags & DISABLE_DCACHE)				\
+			sctlr &= ~SCTLR_C_BIT;				\
+		else							\
+			sctlr |= SCTLR_C_BIT;				\
+									\
+		write_sctlr_el##_el(sctlr);				\
+									\
+		/* Ensure the MMU enable takes effect immediately */	\
+		isb();							\
+	}
+
+/* Define EL1 and EL3 variants of the function enabling the MMU */
+DEFINE_ENABLE_MMU_EL(1,
+		/*
+		 * TCR_EL1.EPD1: Disable translation table walk for addresses
+		 * that are translated using TTBR1_EL1.
+		 */
+		TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT),
+		tlbivmalle1)
+DEFINE_ENABLE_MMU_EL(3,
+		TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
+		tlbialle3)
diff --git a/lib/xlat_tables/xlat_tables_common.c b/lib/xlat_tables/xlat_tables_common.c
new file mode 100644
index 0000000..c6fa10e
--- /dev/null
+++ b/lib/xlat_tables/xlat_tables_common.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cassert.h>
+#include <common_def.h>
+#include <debug.h>
+#include <platform_def.h>
+#include <string.h>
+#include <types.h>
+#include <utils.h>
+#include <xlat_tables.h>
+#include "xlat_tables_private.h"
+
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+#define LVL0_SPACER ""
+#define LVL1_SPACER "  "
+#define LVL2_SPACER "    "
+#define LVL3_SPACER "      "
+#define get_level_spacer(level)		\
+			(((level) == U(0)) ? LVL0_SPACER : \
+			(((level) == U(1)) ? LVL1_SPACER : \
+			(((level) == U(2)) ? LVL2_SPACER : LVL3_SPACER)))
+#define debug_print(...) tf_printf(__VA_ARGS__)
+#else
+#define debug_print(...) ((void)0)
+#endif
+
+#define UNSET_DESC	~0ull
+
+static uint64_t xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES]
+			__aligned(XLAT_TABLE_SIZE) __section("xlat_table");
+
+static unsigned int next_xlat;
+static unsigned long long xlat_max_pa;
+static uintptr_t xlat_max_va;
+
+static uint64_t execute_never_mask;
+
+/*
+ * Array of all memory regions stored in order of ascending base address.
+ * The list is terminated by the first entry with size == 0.
+ */
+static mmap_region_t mmap[MAX_MMAP_REGIONS + 1];
+
+
+void print_mmap(void)
+{
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+	debug_print("mmap:\n");
+	mmap_region_t *mm = mmap;
+	while (mm->size) {
+		debug_print(" VA:%p  PA:0x%llx  size:0x%zx  attr:0x%x\n",
+				(void *)mm->base_va, mm->base_pa,
+				mm->size, mm->attr);
+		++mm;
+	};
+	debug_print("\n");
+#endif
+}
+
+void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
+			size_t size, mmap_attr_t attr)
+{
+	mmap_region_t *mm = mmap;
+	mmap_region_t *mm_last = mm + ARRAY_SIZE(mmap) - 1;
+	unsigned long long end_pa = base_pa + size - 1;
+	uintptr_t end_va = base_va + size - 1;
+
+	assert(IS_PAGE_ALIGNED(base_pa));
+	assert(IS_PAGE_ALIGNED(base_va));
+	assert(IS_PAGE_ALIGNED(size));
+
+	if (!size)
+		return;
+
+	assert(base_pa < end_pa); /* Check for overflows */
+	assert(base_va < end_va);
+
+	assert((base_va + (uintptr_t)size - (uintptr_t)1) <=
+					(PLAT_VIRT_ADDR_SPACE_SIZE - 1));
+	assert((base_pa + (unsigned long long)size - 1ULL) <=
+					(PLAT_PHY_ADDR_SPACE_SIZE - 1));
+
+#if ENABLE_ASSERTIONS
+
+	/* Check for PAs and VAs overlaps with all other regions */
+	for (mm = mmap; mm->size; ++mm) {
+
+		uintptr_t mm_end_va = mm->base_va + mm->size - 1;
+
+		/*
+		 * Check if one of the regions is completely inside the other
+		 * one.
+		 */
+		int fully_overlapped_va =
+			((base_va >= mm->base_va) && (end_va <= mm_end_va)) ||
+			((mm->base_va >= base_va) && (mm_end_va <= end_va));
+
+		/*
+		 * Full VA overlaps are only allowed if both regions are
+		 * identity mapped (zero offset) or have the same VA to PA
+		 * offset. Also, make sure that it's not the exact same area.
+		 */
+		if (fully_overlapped_va) {
+			assert((mm->base_va - mm->base_pa) ==
+			       (base_va - base_pa));
+			assert((base_va != mm->base_va) || (size != mm->size));
+		} else {
+			/*
+			 * If the regions do not have fully overlapping VAs,
+			 * then they must have fully separated VAs and PAs.
+			 * Partial overlaps are not allowed
+			 */
+
+			unsigned long long mm_end_pa =
+						     mm->base_pa + mm->size - 1;
+
+			int separated_pa =
+				(end_pa < mm->base_pa) || (base_pa > mm_end_pa);
+			int separated_va =
+				(end_va < mm->base_va) || (base_va > mm_end_va);
+
+			assert(separated_va && separated_pa);
+		}
+	}
+
+	mm = mmap; /* Restore pointer to the start of the array */
+
+#endif /* ENABLE_ASSERTIONS */
+
+	/* Find correct place in mmap to insert new region */
+	while (mm->base_va < base_va && mm->size)
+		++mm;
+
+	/*
+	 * If a section is contained inside another one with the same base
+	 * address, it must be placed after the one it is contained in:
+	 *
+	 * 1st |-----------------------|
+	 * 2nd |------------|
+	 * 3rd |------|
+	 *
+	 * This is required for mmap_region_attr() to get the attributes of the
+	 * small region correctly.
+	 */
+	while ((mm->base_va == base_va) && (mm->size > size))
+		++mm;
+
+	/* Make room for new region by moving other regions up by one place */
+	memmove(mm + 1, mm, (uintptr_t)mm_last - (uintptr_t)mm);
+
+	/* Check we haven't lost the empty sentinal from the end of the array */
+	assert(mm_last->size == 0);
+
+	mm->base_pa = base_pa;
+	mm->base_va = base_va;
+	mm->size = size;
+	mm->attr = attr;
+
+	if (end_pa > xlat_max_pa)
+		xlat_max_pa = end_pa;
+	if (end_va > xlat_max_va)
+		xlat_max_va = end_va;
+}
+
+void mmap_add(const mmap_region_t *mm)
+{
+	while (mm->size) {
+		mmap_add_region(mm->base_pa, mm->base_va, mm->size, mm->attr);
+		++mm;
+	}
+}
+
+static uint64_t mmap_desc(mmap_attr_t attr, unsigned long long addr_pa,
+							unsigned int level)
+{
+	uint64_t desc;
+	int mem_type;
+
+	/* Make sure that the granularity is fine enough to map this address. */
+	assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0);
+
+	desc = addr_pa;
+	/*
+	 * There are different translation table descriptors for level 3 and the
+	 * rest.
+	 */
+	desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
+	desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
+	desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
+	desc |= LOWER_ATTRS(ACCESS_FLAG);
+
+	/*
+	 * Deduce shareability domain and executability of the memory region
+	 * from the memory type.
+	 *
+	 * Data accesses to device memory and non-cacheable normal memory are
+	 * coherent for all observers in the system, and correspondingly are
+	 * always treated as being Outer Shareable. Therefore, for these 2 types
+	 * of memory, it is not strictly needed to set the shareability field
+	 * in the translation tables.
+	 */
+	mem_type = MT_TYPE(attr);
+	if (mem_type == MT_DEVICE) {
+		desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
+		/*
+		 * Always map device memory as execute-never.
+		 * This is to avoid the possibility of a speculative instruction
+		 * fetch, which could be an issue if this memory region
+		 * corresponds to a read-sensitive peripheral.
+		 */
+		desc |= execute_never_mask;
+
+	} else { /* Normal memory */
+		/*
+		 * Always map read-write normal memory as execute-never.
+		 * (Trusted Firmware doesn't self-modify its code, therefore
+		 * R/W memory is reserved for data storage, which must not be
+		 * executable.)
+		 * Note that setting the XN bit here is for consistency only.
+		 * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
+		 * which makes any writable memory region to be treated as
+		 * execute-never, regardless of the value of the XN bit in the
+		 * translation table.
+		 *
+		 * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
+		 * attribute to figure out the value of the XN bit.
+		 */
+		if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER)) {
+			desc |= execute_never_mask;
+		}
+
+		if (mem_type == MT_MEMORY) {
+			desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
+		} else {
+			assert(mem_type == MT_NON_CACHEABLE);
+			desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
+		}
+	}
+
+	debug_print((mem_type == MT_MEMORY) ? "MEM" :
+		((mem_type == MT_NON_CACHEABLE) ? "NC" : "DEV"));
+	debug_print(attr & MT_RW ? "-RW" : "-RO");
+	debug_print(attr & MT_NS ? "-NS" : "-S");
+	debug_print(attr & MT_EXECUTE_NEVER ? "-XN" : "-EXEC");
+	return desc;
+}
+
+/*
+ * Look for the innermost region that contains the area at `base_va` with size
+ * `size`. Populate *attr with the attributes of this region.
+ *
+ * On success, this function returns 0.
+ * If there are partial overlaps (meaning that a smaller size is needed) or if
+ * the region can't be found in the given area, it returns -1. In this case the
+ * value pointed by attr should be ignored by the caller.
+ */
+static int mmap_region_attr(mmap_region_t *mm, uintptr_t base_va,
+					size_t size, mmap_attr_t *attr)
+{
+	/* Don't assume that the area is contained in the first region */
+	int ret = -1;
+
+	/*
+	 * Get attributes from last (innermost) region that contains the
+	 * requested area. Don't stop as soon as one region doesn't contain it
+	 * because there may be other internal regions that contain this area:
+	 *
+	 * |-----------------------------1-----------------------------|
+	 * |----2----|     |-------3-------|    |----5----|
+	 *                   |--4--|
+	 *
+	 *                   |---| <- Area we want the attributes of.
+	 *
+	 * In this example, the area is contained in regions 1, 3 and 4 but not
+	 * in region 2. The loop shouldn't stop at region 2 as inner regions
+	 * have priority over outer regions, it should stop at region 5.
+	 */
+	for (;; ++mm) {
+
+		if (!mm->size)
+			return ret; /* Reached end of list */
+
+		if (mm->base_va > base_va + size - 1)
+			return ret; /* Next region is after area so end */
+
+		if (mm->base_va + mm->size - 1 < base_va)
+			continue; /* Next region has already been overtaken */
+
+		if (!ret && mm->attr == *attr)
+			continue; /* Region doesn't override attribs so skip */
+
+		if (mm->base_va > base_va ||
+			mm->base_va + mm->size - 1 < base_va + size - 1)
+			return -1; /* Region doesn't fully cover our area */
+
+		*attr = mm->attr;
+		ret = 0;
+	}
+	return ret;
+}
+
+static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
+					uintptr_t base_va,
+					uint64_t *table,
+					unsigned int level)
+{
+	assert(level >= XLAT_TABLE_LEVEL_MIN && level <= XLAT_TABLE_LEVEL_MAX);
+
+	unsigned int level_size_shift =
+		       L0_XLAT_ADDRESS_SHIFT - level * XLAT_TABLE_ENTRIES_SHIFT;
+	u_register_t level_size = (u_register_t)1 << level_size_shift;
+	u_register_t level_index_mask =
+		((u_register_t)XLAT_TABLE_ENTRIES_MASK) << level_size_shift;
+
+	debug_print("New xlat table:\n");
+
+	do  {
+		uint64_t desc = UNSET_DESC;
+
+		if (!mm->size) {
+			/* Done mapping regions; finish zeroing the table */
+			desc = INVALID_DESC;
+		} else if (mm->base_va + mm->size - 1 < base_va) {
+			/* This area is after the region so get next region */
+			++mm;
+			continue;
+		}
+
+		debug_print("%s VA:%p size:0x%llx ", get_level_spacer(level),
+			(void *)base_va, (unsigned long long)level_size);
+
+		if (mm->base_va > base_va + level_size - 1) {
+			/* Next region is after this area. Nothing to map yet */
+			desc = INVALID_DESC;
+		/* Make sure that the current level allows block descriptors */
+		} else if (level >= XLAT_BLOCK_LEVEL_MIN) {
+			/*
+			 * Try to get attributes of this area. It will fail if
+			 * there are partially overlapping regions. On success,
+			 * it will return the innermost region's attributes.
+			 */
+			mmap_attr_t attr;
+			int r = mmap_region_attr(mm, base_va, level_size, &attr);
+
+			if (!r) {
+				desc = mmap_desc(attr,
+					base_va - mm->base_va + mm->base_pa,
+					level);
+			}
+		}
+
+		if (desc == UNSET_DESC) {
+			/* Area not covered by a region so need finer table */
+			uint64_t *new_table = xlat_tables[next_xlat++];
+			assert(next_xlat <= MAX_XLAT_TABLES);
+			desc = TABLE_DESC | (uintptr_t)new_table;
+
+			/* Recurse to fill in new table */
+			mm = init_xlation_table_inner(mm, base_va,
+						new_table, level+1);
+		}
+
+		debug_print("\n");
+
+		*table++ = desc;
+		base_va += level_size;
+	} while ((base_va & level_index_mask) &&
+		 (base_va - 1 < PLAT_VIRT_ADDR_SPACE_SIZE - 1));
+
+	return mm;
+}
+
+void init_xlation_table(uintptr_t base_va, uint64_t *table,
+			unsigned int level, uintptr_t *max_va,
+			unsigned long long *max_pa)
+{
+	execute_never_mask = xlat_arch_get_xn_desc(xlat_arch_current_el());
+	init_xlation_table_inner(mmap, base_va, table, level);
+	*max_va = xlat_max_va;
+	*max_pa = xlat_max_pa;
+}
diff --git a/lib/xlat_tables/xlat_tables_private.h b/lib/xlat_tables/xlat_tables_private.h
new file mode 100644
index 0000000..50d6bd5
--- /dev/null
+++ b/lib/xlat_tables/xlat_tables_private.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __XLAT_TABLES_PRIVATE_H__
+#define __XLAT_TABLES_PRIVATE_H__
+
+#include <cassert.h>
+#include <platform_def.h>
+#include <xlat_tables_arch.h>
+
+/*
+ * If the platform hasn't defined a physical and a virtual address space size
+ * default to ADDR_SPACE_SIZE.
+ */
+#if ERROR_DEPRECATED
+# ifdef ADDR_SPACE_SIZE
+#  error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
+# endif
+#elif defined(ADDR_SPACE_SIZE)
+# ifndef PLAT_PHY_ADDR_SPACE_SIZE
+#  define PLAT_PHY_ADDR_SPACE_SIZE	ADDR_SPACE_SIZE
+# endif
+# ifndef PLAT_VIRT_ADDR_SPACE_SIZE
+#  define PLAT_VIRT_ADDR_SPACE_SIZE	ADDR_SPACE_SIZE
+# endif
+#endif
+
+CASSERT(CHECK_VIRT_ADDR_SPACE_SIZE(PLAT_VIRT_ADDR_SPACE_SIZE),
+	assert_valid_virt_addr_space_size);
+
+CASSERT(CHECK_PHY_ADDR_SPACE_SIZE(PLAT_PHY_ADDR_SPACE_SIZE),
+	assert_valid_phy_addr_space_size);
+
+/* Alias to retain compatibility with the old #define name */
+#define XLAT_BLOCK_LEVEL_MIN	MIN_LVL_BLOCK_DESC
+
+void print_mmap(void);
+
+/* Returns the current Exception Level. The returned EL must be 1 or higher. */
+int xlat_arch_current_el(void);
+
+/*
+ * Returns the bit mask that has to be ORed to the rest of a translation table
+ * descriptor so that execution of code is prohibited at the given Exception
+ * Level.
+ */
+uint64_t xlat_arch_get_xn_desc(int el);
+
+void init_xlation_table(uintptr_t base_va, uint64_t *table,
+			unsigned int level, uintptr_t *max_va,
+			unsigned long long *max_pa);
+
+#endif /* __XLAT_TABLES_PRIVATE_H__ */
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
new file mode 100644
index 0000000..642f799
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cassert.h>
+#include <platform_def.h>
+#include <utils.h>
+#include <utils_def.h>
+#include <xlat_tables_v2.h>
+#include "../xlat_tables_private.h"
+
+#if ENABLE_ASSERTIONS
+unsigned long long xlat_arch_get_max_supported_pa(void)
+{
+	/* Physical address space size for long descriptor format. */
+	return (1ull << 40) - 1ull;
+}
+#endif /* ENABLE_ASSERTIONS*/
+
+int is_mmu_enabled_ctx(const xlat_ctx_t *ctx __unused)
+{
+	return (read_sctlr() & SCTLR_M_BIT) != 0;
+}
+
+void xlat_arch_tlbi_va(uintptr_t va)
+{
+	/*
+	 * Ensure the translation table write has drained into memory before
+	 * invalidating the TLB entry.
+	 */
+	dsbishst();
+
+	tlbimvaais(TLBI_ADDR(va));
+}
+
+void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime __unused)
+{
+	/*
+	 * Ensure the translation table write has drained into memory before
+	 * invalidating the TLB entry.
+	 */
+	dsbishst();
+
+	tlbimvaais(TLBI_ADDR(va));
+}
+
+void xlat_arch_tlbi_va_sync(void)
+{
+	/* Invalidate all entries from branch predictors. */
+	bpiallis();
+
+	/*
+	 * A TLB maintenance instruction can complete at any time after
+	 * it is issued, but is only guaranteed to be complete after the
+	 * execution of DSB by the PE that executed the TLB maintenance
+	 * instruction. After the TLB invalidate instruction is
+	 * complete, no new memory accesses using the invalidated TLB
+	 * entries will be observed by any observer of the system
+	 * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
+	 * "Ordering and completion of TLB maintenance instructions".
+	 */
+	dsbish();
+
+	/*
+	 * The effects of a completed TLB maintenance instruction are
+	 * only guaranteed to be visible on the PE that executed the
+	 * instruction after the execution of an ISB instruction by the
+	 * PE that executed the TLB maintenance instruction.
+	 */
+	isb();
+}
+
+int xlat_arch_current_el(void)
+{
+	/*
+	 * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
+	 * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
+	 */
+	return 3;
+}
+
+/*******************************************************************************
+ * Function for enabling the MMU in Secure PL1, assuming that the page tables
+ * have already been created.
+ ******************************************************************************/
+void enable_mmu_arch(unsigned int flags,
+		uint64_t *base_table,
+		unsigned long long max_pa,
+		uintptr_t max_va)
+{
+	u_register_t mair0, ttbcr, sctlr;
+	uint64_t ttbr0;
+
+	assert(IS_IN_SECURE());
+
+	sctlr = read_sctlr();
+	assert((sctlr & SCTLR_M_BIT) == 0);
+
+	/* Invalidate TLBs at the current exception level */
+	tlbiall();
+
+	/* Set attributes in the right indices of the MAIR */
+	mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
+	mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
+			ATTR_IWBWA_OWBWA_NTR_INDEX);
+	mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
+			ATTR_NON_CACHEABLE_INDEX);
+
+	/*
+	 * Configure the control register for stage 1 of the PL1&0 translation
+	 * regime.
+	 */
+
+	/* Use the Long-descriptor translation table format. */
+	ttbcr = TTBCR_EAE_BIT;
+
+	/*
+	 * Disable translation table walk for addresses that are translated
+	 * using TTBR1. Therefore, only TTBR0 is used.
+	 */
+	ttbcr |= TTBCR_EPD1_BIT;
+
+	/*
+	 * Limit the input address ranges and memory region sizes translated
+	 * using TTBR0 to the given virtual address space size, if smaller than
+	 * 32 bits.
+	 */
+	if (max_va != UINT32_MAX) {
+		uintptr_t virtual_addr_space_size = max_va + 1;
+		assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
+		/*
+		 * __builtin_ctzll(0) is undefined but here we are guaranteed
+		 * that virtual_addr_space_size is in the range [1, UINT32_MAX].
+		 */
+		ttbcr |= 32 - __builtin_ctzll(virtual_addr_space_size);
+	}
+
+	/*
+	 * Set the cacheability and shareability attributes for memory
+	 * associated with translation table walks using TTBR0.
+	 */
+	if (flags & XLAT_TABLE_NC) {
+		/* Inner & outer non-cacheable non-shareable. */
+		ttbcr |= TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
+			TTBCR_RGN0_INNER_NC;
+	} else {
+		/* Inner & outer WBWA & shareable. */
+		ttbcr |= TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
+			TTBCR_RGN0_INNER_WBA;
+	}
+
+	/* Set TTBR0 bits as well */
+	ttbr0 = (uint64_t)(uintptr_t) base_table;
+#if ARM_ARCH_AT_LEAST(8, 2)
+	/*
+	 * Enable CnP bit so as to share page tables with all PEs.
+	 * Mandatory for ARMv8.2 implementations.
+	 */
+	ttbr0 |= TTBR_CNP_BIT;
+#endif
+
+	/* Now program the relevant system registers */
+	write_mair0(mair0);
+	write_ttbcr(ttbcr);
+	write64_ttbr0(ttbr0);
+	write64_ttbr1(0);
+
+	/*
+	 * Ensure all translation table writes have drained
+	 * into memory, the TLB invalidation is complete,
+	 * and translation register writes are committed
+	 * before enabling the MMU
+	 */
+	dsbish();
+	isb();
+
+	sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
+
+	if (flags & DISABLE_DCACHE)
+		sctlr &= ~SCTLR_C_BIT;
+	else
+		sctlr |= SCTLR_C_BIT;
+
+	write_sctlr(sctlr);
+
+	/* Ensure the MMU enable takes effect immediately */
+	isb();
+}
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h b/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h
new file mode 100644
index 0000000..509395d
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __XLAT_TABLES_ARCH_PRIVATE_H__
+#define __XLAT_TABLES_ARCH_PRIVATE_H__
+
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+/*
+ * Return the execute-never mask that will prevent instruction fetch at the
+ * given translation regime.
+ */
+static inline uint64_t xlat_arch_regime_get_xn_desc(xlat_regime_t regime __unused)
+{
+	return UPPER_ATTRS(XN);
+}
+
+#endif /* __XLAT_TABLES_ARCH_PRIVATE_H__ */
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
new file mode 100644
index 0000000..eda38d3
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <cassert.h>
+#include <common_def.h>
+#include <sys/types.h>
+#include <utils.h>
+#include <utils_def.h>
+#include <xlat_tables_v2.h>
+#include "../xlat_tables_private.h"
+
+static unsigned long long calc_physical_addr_size_bits(
+					unsigned long long max_addr)
+{
+	/* Physical address can't exceed 48 bits */
+	assert((max_addr & ADDR_MASK_48_TO_63) == 0);
+
+	/* 48 bits address */
+	if (max_addr & ADDR_MASK_44_TO_47)
+		return TCR_PS_BITS_256TB;
+
+	/* 44 bits address */
+	if (max_addr & ADDR_MASK_42_TO_43)
+		return TCR_PS_BITS_16TB;
+
+	/* 42 bits address */
+	if (max_addr & ADDR_MASK_40_TO_41)
+		return TCR_PS_BITS_4TB;
+
+	/* 40 bits address */
+	if (max_addr & ADDR_MASK_36_TO_39)
+		return TCR_PS_BITS_1TB;
+
+	/* 36 bits address */
+	if (max_addr & ADDR_MASK_32_TO_35)
+		return TCR_PS_BITS_64GB;
+
+	return TCR_PS_BITS_4GB;
+}
+
+#if ENABLE_ASSERTIONS
+/* Physical Address ranges supported in the AArch64 Memory Model */
+static const unsigned int pa_range_bits_arr[] = {
+	PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
+	PARANGE_0101
+};
+
+unsigned long long xlat_arch_get_max_supported_pa(void)
+{
+	u_register_t pa_range = read_id_aa64mmfr0_el1() &
+						ID_AA64MMFR0_EL1_PARANGE_MASK;
+
+	/* All other values are reserved */
+	assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
+
+	return (1ull << pa_range_bits_arr[pa_range]) - 1ull;
+}
+#endif /* ENABLE_ASSERTIONS*/
+
+int is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
+{
+	if (ctx->xlat_regime == EL1_EL0_REGIME) {
+		assert(xlat_arch_current_el() >= 1);
+		return (read_sctlr_el1() & SCTLR_M_BIT) != 0;
+	} else {
+		assert(ctx->xlat_regime == EL3_REGIME);
+		assert(xlat_arch_current_el() >= 3);
+		return (read_sctlr_el3() & SCTLR_M_BIT) != 0;
+	}
+}
+
+
+void xlat_arch_tlbi_va(uintptr_t va)
+{
+#if IMAGE_EL == 1
+	assert(IS_IN_EL(1));
+	xlat_arch_tlbi_va_regime(va, EL1_EL0_REGIME);
+#elif IMAGE_EL == 3
+	assert(IS_IN_EL(3));
+	xlat_arch_tlbi_va_regime(va, EL3_REGIME);
+#endif
+}
+
+void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime)
+{
+	/*
+	 * Ensure the translation table write has drained into memory before
+	 * invalidating the TLB entry.
+	 */
+	dsbishst();
+
+	/*
+	 * This function only supports invalidation of TLB entries for the EL3
+	 * and EL1&0 translation regimes.
+	 *
+	 * Also, it is architecturally UNDEFINED to invalidate TLBs of a higher
+	 * exception level (see section D4.9.2 of the ARM ARM rev B.a).
+	 */
+	if (xlat_regime == EL1_EL0_REGIME) {
+		assert(xlat_arch_current_el() >= 1);
+		tlbivaae1is(TLBI_ADDR(va));
+	} else {
+		assert(xlat_regime == EL3_REGIME);
+		assert(xlat_arch_current_el() >= 3);
+		tlbivae3is(TLBI_ADDR(va));
+	}
+}
+
+void xlat_arch_tlbi_va_sync(void)
+{
+	/*
+	 * A TLB maintenance instruction can complete at any time after
+	 * it is issued, but is only guaranteed to be complete after the
+	 * execution of DSB by the PE that executed the TLB maintenance
+	 * instruction. After the TLB invalidate instruction is
+	 * complete, no new memory accesses using the invalidated TLB
+	 * entries will be observed by any observer of the system
+	 * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
+	 * "Ordering and completion of TLB maintenance instructions".
+	 */
+	dsbish();
+
+	/*
+	 * The effects of a completed TLB maintenance instruction are
+	 * only guaranteed to be visible on the PE that executed the
+	 * instruction after the execution of an ISB instruction by the
+	 * PE that executed the TLB maintenance instruction.
+	 */
+	isb();
+}
+
+int xlat_arch_current_el(void)
+{
+	int el = GET_EL(read_CurrentEl());
+
+	assert(el > 0);
+
+	return el;
+}
+
+/*******************************************************************************
+ * Macro generating the code for the function enabling the MMU in the given
+ * exception level, assuming that the pagetables have already been created.
+ *
+ *   _el:		Exception level at which the function will run
+ *   _tlbi_fct:		Function to invalidate the TLBs at the current
+ *			exception level
+ ******************************************************************************/
+#define DEFINE_ENABLE_MMU_EL(_el, _tlbi_fct)				\
+	static void enable_mmu_internal_el##_el(int flags,		\
+						uint64_t mair,		\
+						uint64_t tcr,		\
+						uint64_t ttbr)		\
+	{								\
+		uint32_t sctlr = read_sctlr_el##_el();			\
+		assert((sctlr & SCTLR_M_BIT) == 0);			\
+									\
+		/* Invalidate TLBs at the current exception level */	\
+		_tlbi_fct();						\
+									\
+		write_mair_el##_el(mair);				\
+		write_tcr_el##_el(tcr);					\
+									\
+		/* Set TTBR bits as well */				\
+		if (ARM_ARCH_AT_LEAST(8, 2)) {				\
+			/* Enable CnP bit so as to share page tables */	\
+			/* with all PEs. This is mandatory for */	\
+			/* ARMv8.2 implementations. */			\
+			ttbr |= TTBR_CNP_BIT;				\
+		}							\
+		write_ttbr0_el##_el(ttbr);				\
+									\
+		/* Ensure all translation table writes have drained */	\
+		/* into memory, the TLB invalidation is complete, */	\
+		/* and translation register writes are committed */	\
+		/* before enabling the MMU */				\
+		dsbish();						\
+		isb();							\
+									\
+		sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;			\
+		if (flags & DISABLE_DCACHE)				\
+			sctlr &= ~SCTLR_C_BIT;				\
+		else							\
+			sctlr |= SCTLR_C_BIT;				\
+									\
+		write_sctlr_el##_el(sctlr);				\
+									\
+		/* Ensure the MMU enable takes effect immediately */	\
+		isb();							\
+	}
+
+/* Define EL1 and EL3 variants of the function enabling the MMU */
+#if IMAGE_EL == 1
+DEFINE_ENABLE_MMU_EL(1, tlbivmalle1)
+#elif IMAGE_EL == 3
+DEFINE_ENABLE_MMU_EL(3, tlbialle3)
+#endif
+
+void enable_mmu_arch(unsigned int flags,
+		uint64_t *base_table,
+		unsigned long long max_pa,
+		uintptr_t max_va)
+{
+	uint64_t mair, ttbr, tcr;
+
+	/* Set attributes in the right indices of the MAIR. */
+	mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
+	mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
+	mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX);
+
+	ttbr = (uint64_t) base_table;
+
+	/*
+	 * Set TCR bits as well.
+	 */
+
+	/*
+	 * Limit the input address ranges and memory region sizes translated
+	 * using TTBR0 to the given virtual address space size.
+	 */
+	assert(max_va < UINTPTR_MAX);
+	uintptr_t virtual_addr_space_size = max_va + 1;
+	assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
+	/*
+	 * __builtin_ctzll(0) is undefined but here we are guaranteed that
+	 * virtual_addr_space_size is in the range [1,UINTPTR_MAX].
+	 */
+	tcr = 64 - __builtin_ctzll(virtual_addr_space_size);
+
+	/*
+	 * Set the cacheability and shareability attributes for memory
+	 * associated with translation table walks.
+	 */
+	if (flags & XLAT_TABLE_NC) {
+		/* Inner & outer non-cacheable non-shareable. */
+		tcr |= TCR_SH_NON_SHAREABLE |
+			TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC;
+	} else {
+		/* Inner & outer WBWA & shareable. */
+		tcr |= TCR_SH_INNER_SHAREABLE |
+			TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA;
+	}
+
+	/*
+	 * It is safer to restrict the max physical address accessible by the
+	 * hardware as much as possible.
+	 */
+	unsigned long long tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
+
+#if IMAGE_EL == 1
+	assert(IS_IN_EL(1));
+	/*
+	 * TCR_EL1.EPD1: Disable translation table walk for addresses that are
+	 * translated using TTBR1_EL1.
+	 */
+	tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
+	enable_mmu_internal_el1(flags, mair, tcr, ttbr);
+#elif IMAGE_EL == 3
+	assert(IS_IN_EL(3));
+	tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
+	enable_mmu_internal_el3(flags, mair, tcr, ttbr);
+#endif
+}
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h b/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h
new file mode 100644
index 0000000..d201590
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __XLAT_TABLES_ARCH_PRIVATE_H__
+#define __XLAT_TABLES_ARCH_PRIVATE_H__
+
+#include <assert.h>
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+/*
+ * Return the execute-never mask that will prevent instruction fetch at all ELs
+ * that are part of the given translation regime.
+ */
+static inline uint64_t xlat_arch_regime_get_xn_desc(xlat_regime_t regime)
+{
+	if (regime == EL1_EL0_REGIME) {
+		return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN);
+	} else {
+		assert(regime == EL3_REGIME);
+		return UPPER_ATTRS(XN);
+	}
+}
+
+#endif /* __XLAT_TABLES_ARCH_PRIVATE_H__ */
diff --git a/lib/xlat_tables_v2/xlat_tables.mk b/lib/xlat_tables_v2/xlat_tables.mk
new file mode 100644
index 0000000..06dd844
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables.mk
@@ -0,0 +1,11 @@
+#
+# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+XLAT_TABLES_LIB_SRCS	:=	$(addprefix lib/xlat_tables_v2/,	\
+				${ARCH}/xlat_tables_arch.c		\
+				xlat_tables_internal.c)
+
+INCLUDES		+=	-Ilib/xlat_tables_v2/${ARCH}
diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_internal.c
new file mode 100644
index 0000000..0acfacb
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables_internal.c
@@ -0,0 +1,1662 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <common_def.h>
+#include <debug.h>
+#include <errno.h>
+#include <platform_def.h>
+#include <string.h>
+#include <types.h>
+#include <utils.h>
+#include <xlat_tables_arch_private.h>
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+#include "xlat_tables_private.h"
+
+/*
+ * Each platform can define the size of its physical and virtual address spaces.
+ * If the platform hasn't defined one or both of them, default to
+ * ADDR_SPACE_SIZE. The latter is deprecated, though.
+ */
+#if ERROR_DEPRECATED
+# ifdef ADDR_SPACE_SIZE
+#  error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
+# endif
+#elif defined(ADDR_SPACE_SIZE)
+# ifndef PLAT_PHY_ADDR_SPACE_SIZE
+#  define PLAT_PHY_ADDR_SPACE_SIZE	ADDR_SPACE_SIZE
+# endif
+# ifndef PLAT_VIRT_ADDR_SPACE_SIZE
+#  define PLAT_VIRT_ADDR_SPACE_SIZE	ADDR_SPACE_SIZE
+# endif
+#endif
+
+/*
+ * Allocate and initialise the default translation context for the BL image
+ * currently executing.
+ */
+REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
+		PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+/*
+ * The following functions assume that they will be called using subtables only.
+ * The base table can't be unmapped, so it is not needed to do any special
+ * handling for it.
+ */
+
+/*
+ * Returns the index of the array corresponding to the specified translation
+ * table.
+ */
+static int xlat_table_get_index(xlat_ctx_t *ctx, const uint64_t *table)
+{
+	for (unsigned int i = 0; i < ctx->tables_num; i++)
+		if (ctx->tables[i] == table)
+			return i;
+
+	/*
+	 * Maybe we were asked to get the index of the base level table, which
+	 * should never happen.
+	 */
+	assert(0);
+
+	return -1;
+}
+
+/* Returns a pointer to an empty translation table. */
+static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
+{
+	for (unsigned int i = 0; i < ctx->tables_num; i++)
+		if (ctx->tables_mapped_regions[i] == 0)
+			return ctx->tables[i];
+
+	return NULL;
+}
+
+/* Increments region count for a given table. */
+static void xlat_table_inc_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
+{
+	ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]++;
+}
+
+/* Decrements region count for a given table. */
+static void xlat_table_dec_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
+{
+	ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]--;
+}
+
+/* Returns 0 if the speficied table isn't empty, otherwise 1. */
+static int xlat_table_is_empty(xlat_ctx_t *ctx, const uint64_t *table)
+{
+	return !ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)];
+}
+
+#else /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/* Returns a pointer to the first empty translation table. */
+static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
+{
+	assert(ctx->next_table < ctx->tables_num);
+
+	return ctx->tables[ctx->next_table++];
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/*
+ * Returns a block/page table descriptor for the given level and attributes.
+ */
+uint64_t xlat_desc(const xlat_ctx_t *ctx, mmap_attr_t attr,
+		   unsigned long long addr_pa, int level)
+{
+	uint64_t desc;
+	int mem_type;
+
+	/* Make sure that the granularity is fine enough to map this address. */
+	assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0);
+
+	desc = addr_pa;
+	/*
+	 * There are different translation table descriptors for level 3 and the
+	 * rest.
+	 */
+	desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
+	/*
+	 * Always set the access flag, as TF doesn't manage access flag faults.
+	 * Deduce other fields of the descriptor based on the MT_NS and MT_RW
+	 * memory region attributes.
+	 */
+	desc |= LOWER_ATTRS(ACCESS_FLAG);
+
+	desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
+	desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
+
+	/*
+	 * Do not allow unprivileged access when the mapping is for a privileged
+	 * EL. For translation regimes that do not have mappings for access for
+	 * lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED.
+	 */
+	if (ctx->xlat_regime == EL1_EL0_REGIME) {
+		if (attr & MT_USER) {
+			/* EL0 mapping requested, so we give User access */
+			desc |= LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED);
+		} else {
+			/* EL1 mapping requested, no User access granted */
+			desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
+		}
+	} else {
+		assert(ctx->xlat_regime == EL3_REGIME);
+		desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
+	}
+
+	/*
+	 * Deduce shareability domain and executability of the memory region
+	 * from the memory type of the attributes (MT_TYPE).
+	 *
+	 * Data accesses to device memory and non-cacheable normal memory are
+	 * coherent for all observers in the system, and correspondingly are
+	 * always treated as being Outer Shareable. Therefore, for these 2 types
+	 * of memory, it is not strictly needed to set the shareability field
+	 * in the translation tables.
+	 */
+	mem_type = MT_TYPE(attr);
+	if (mem_type == MT_DEVICE) {
+		desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
+		/*
+		 * Always map device memory as execute-never.
+		 * This is to avoid the possibility of a speculative instruction
+		 * fetch, which could be an issue if this memory region
+		 * corresponds to a read-sensitive peripheral.
+		 */
+		desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
+
+	} else { /* Normal memory */
+		/*
+		 * Always map read-write normal memory as execute-never.
+		 * (Trusted Firmware doesn't self-modify its code, therefore
+		 * R/W memory is reserved for data storage, which must not be
+		 * executable.)
+		 * Note that setting the XN bit here is for consistency only.
+		 * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
+		 * which makes any writable memory region to be treated as
+		 * execute-never, regardless of the value of the XN bit in the
+		 * translation table.
+		 *
+		 * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
+		 * attribute to figure out the value of the XN bit.  The actual
+		 * XN bit(s) to set in the descriptor depends on the context's
+		 * translation regime and the policy applied in
+		 * xlat_arch_regime_get_xn_desc().
+		 */
+		if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER)) {
+			desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
+		}
+
+		if (mem_type == MT_MEMORY) {
+			desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
+		} else {
+			assert(mem_type == MT_NON_CACHEABLE);
+			desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
+		}
+	}
+
+	return desc;
+}
+
+/*
+ * Enumeration of actions that can be made when mapping table entries depending
+ * on the previous value in that entry and information about the region being
+ * mapped.
+ */
+typedef enum {
+
+	/* Do nothing */
+	ACTION_NONE,
+
+	/* Write a block (or page, if in level 3) entry. */
+	ACTION_WRITE_BLOCK_ENTRY,
+
+	/*
+	 * Create a new table and write a table entry pointing to it. Recurse
+	 * into it for further processing.
+	 */
+	ACTION_CREATE_NEW_TABLE,
+
+	/*
+	 * There is a table descriptor in this entry, read it and recurse into
+	 * that table for further processing.
+	 */
+	ACTION_RECURSE_INTO_TABLE,
+
+} action_t;
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+/*
+ * Recursive function that writes to the translation tables and unmaps the
+ * specified region.
+ */
+static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
+				     const uintptr_t table_base_va,
+				     uint64_t *const table_base,
+				     const int table_entries,
+				     const unsigned int level)
+{
+	assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
+
+	uint64_t *subtable;
+	uint64_t desc;
+
+	uintptr_t table_idx_va;
+	uintptr_t table_idx_end_va; /* End VA of this entry */
+
+	uintptr_t region_end_va = mm->base_va + mm->size - 1;
+
+	int table_idx;
+
+	if (mm->base_va > table_base_va) {
+		/* Find the first index of the table affected by the region. */
+		table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
+
+		table_idx = (table_idx_va - table_base_va) >>
+			    XLAT_ADDR_SHIFT(level);
+
+		assert(table_idx < table_entries);
+	} else {
+		/* Start from the beginning of the table. */
+		table_idx_va = table_base_va;
+		table_idx = 0;
+	}
+
+	while (table_idx < table_entries) {
+
+		table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1;
+
+		desc = table_base[table_idx];
+		uint64_t desc_type = desc & DESC_MASK;
+
+		action_t action = ACTION_NONE;
+
+		if ((mm->base_va <= table_idx_va) &&
+		    (region_end_va >= table_idx_end_va)) {
+
+			/* Region covers all block */
+
+			if (level == 3) {
+				/*
+				 * Last level, only page descriptors allowed,
+				 * erase it.
+				 */
+				assert(desc_type == PAGE_DESC);
+
+				action = ACTION_WRITE_BLOCK_ENTRY;
+			} else {
+				/*
+				 * Other levels can have table descriptors. If
+				 * so, recurse into it and erase descriptors
+				 * inside it as needed. If there is a block
+				 * descriptor, just erase it. If an invalid
+				 * descriptor is found, this table isn't
+				 * actually mapped, which shouldn't happen.
+				 */
+				if (desc_type == TABLE_DESC) {
+					action = ACTION_RECURSE_INTO_TABLE;
+				} else {
+					assert(desc_type == BLOCK_DESC);
+					action = ACTION_WRITE_BLOCK_ENTRY;
+				}
+			}
+
+		} else if ((mm->base_va <= table_idx_end_va) ||
+			   (region_end_va >= table_idx_va)) {
+
+			/*
+			 * Region partially covers block.
+			 *
+			 * It can't happen in level 3.
+			 *
+			 * There must be a table descriptor here, if not there
+			 * was a problem when mapping the region.
+			 */
+
+			assert(level < 3);
+
+			assert(desc_type == TABLE_DESC);
+
+			action = ACTION_RECURSE_INTO_TABLE;
+		}
+
+		if (action == ACTION_WRITE_BLOCK_ENTRY) {
+
+			table_base[table_idx] = INVALID_DESC;
+			xlat_arch_tlbi_va_regime(table_idx_va, ctx->xlat_regime);
+
+		} else if (action == ACTION_RECURSE_INTO_TABLE) {
+
+			subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+
+			/* Recurse to write into subtable */
+			xlat_tables_unmap_region(ctx, mm, table_idx_va,
+						 subtable, XLAT_TABLE_ENTRIES,
+						 level + 1);
+
+			/*
+			 * If the subtable is now empty, remove its reference.
+			 */
+			if (xlat_table_is_empty(ctx, subtable)) {
+				table_base[table_idx] = INVALID_DESC;
+				xlat_arch_tlbi_va_regime(table_idx_va,
+						ctx->xlat_regime);
+			}
+
+		} else {
+			assert(action == ACTION_NONE);
+		}
+
+		table_idx++;
+		table_idx_va += XLAT_BLOCK_SIZE(level);
+
+		/* If reached the end of the region, exit */
+		if (region_end_va <= table_idx_va)
+			break;
+	}
+
+	if (level > ctx->base_level)
+		xlat_table_dec_regions_count(ctx, table_base);
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/*
+ * From the given arguments, it decides which action to take when mapping the
+ * specified region.
+ */
+static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
+		const int desc_type, const unsigned long long dest_pa,
+		const uintptr_t table_entry_base_va, const unsigned int level)
+{
+	uintptr_t mm_end_va = mm->base_va + mm->size - 1;
+	uintptr_t table_entry_end_va =
+			table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1;
+
+	/*
+	 * The descriptor types allowed depend on the current table level.
+	 */
+
+	if ((mm->base_va <= table_entry_base_va) &&
+	    (mm_end_va >= table_entry_end_va)) {
+
+		/*
+		 * Table entry is covered by region
+		 * --------------------------------
+		 *
+		 * This means that this table entry can describe the whole
+		 * translation with this granularity in principle.
+		 */
+
+		if (level == 3) {
+			/*
+			 * Last level, only page descriptors are allowed.
+			 */
+			if (desc_type == PAGE_DESC) {
+				/*
+				 * There's another region mapped here, don't
+				 * overwrite.
+				 */
+				return ACTION_NONE;
+			} else {
+				assert(desc_type == INVALID_DESC);
+				return ACTION_WRITE_BLOCK_ENTRY;
+			}
+
+		} else {
+
+			/*
+			 * Other levels. Table descriptors are allowed. Block
+			 * descriptors too, but they have some limitations.
+			 */
+
+			if (desc_type == TABLE_DESC) {
+				/* There's already a table, recurse into it. */
+				return ACTION_RECURSE_INTO_TABLE;
+
+			} else if (desc_type == INVALID_DESC) {
+				/*
+				 * There's nothing mapped here, create a new
+				 * entry.
+				 *
+				 * Check if the destination granularity allows
+				 * us to use a block descriptor or we need a
+				 * finer table for it.
+				 *
+				 * Also, check if the current level allows block
+				 * descriptors. If not, create a table instead.
+				 */
+				if ((dest_pa & XLAT_BLOCK_MASK(level)) ||
+				    (level < MIN_LVL_BLOCK_DESC) ||
+				    (mm->granularity < XLAT_BLOCK_SIZE(level)))
+					return ACTION_CREATE_NEW_TABLE;
+				else
+					return ACTION_WRITE_BLOCK_ENTRY;
+
+			} else {
+				/*
+				 * There's another region mapped here, don't
+				 * overwrite.
+				 */
+				assert(desc_type == BLOCK_DESC);
+
+				return ACTION_NONE;
+			}
+		}
+
+	} else if ((mm->base_va <= table_entry_end_va) ||
+		   (mm_end_va >= table_entry_base_va)) {
+
+		/*
+		 * Region partially covers table entry
+		 * -----------------------------------
+		 *
+		 * This means that this table entry can't describe the whole
+		 * translation, a finer table is needed.
+
+		 * There cannot be partial block overlaps in level 3. If that
+		 * happens, some of the preliminary checks when adding the
+		 * mmap region failed to detect that PA and VA must at least be
+		 * aligned to PAGE_SIZE.
+		 */
+		assert(level < 3);
+
+		if (desc_type == INVALID_DESC) {
+			/*
+			 * The block is not fully covered by the region. Create
+			 * a new table, recurse into it and try to map the
+			 * region with finer granularity.
+			 */
+			return ACTION_CREATE_NEW_TABLE;
+
+		} else {
+			assert(desc_type == TABLE_DESC);
+			/*
+			 * The block is not fully covered by the region, but
+			 * there is already a table here. Recurse into it and
+			 * try to map with finer granularity.
+			 *
+			 * PAGE_DESC for level 3 has the same value as
+			 * TABLE_DESC, but this code can't run on a level 3
+			 * table because there can't be overlaps in level 3.
+			 */
+			return ACTION_RECURSE_INTO_TABLE;
+		}
+	}
+
+	/*
+	 * This table entry is outside of the region specified in the arguments,
+	 * don't write anything to it.
+	 */
+	return ACTION_NONE;
+}
+
+/*
+ * Recursive function that writes to the translation tables and maps the
+ * specified region. On success, it returns the VA of the last byte that was
+ * succesfully mapped. On error, it returns the VA of the next entry that
+ * should have been mapped.
+ */
+static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
+				   const uintptr_t table_base_va,
+				   uint64_t *const table_base,
+				   const int table_entries,
+				   const unsigned int level)
+{
+	assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
+
+	uintptr_t mm_end_va = mm->base_va + mm->size - 1;
+
+	uintptr_t table_idx_va;
+	unsigned long long table_idx_pa;
+
+	uint64_t *subtable;
+	uint64_t desc;
+
+	int table_idx;
+
+	if (mm->base_va > table_base_va) {
+		/* Find the first index of the table affected by the region. */
+		table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
+
+		table_idx = (table_idx_va - table_base_va) >>
+			    XLAT_ADDR_SHIFT(level);
+
+		assert(table_idx < table_entries);
+	} else {
+		/* Start from the beginning of the table. */
+		table_idx_va = table_base_va;
+		table_idx = 0;
+	}
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+	if (level > ctx->base_level)
+		xlat_table_inc_regions_count(ctx, table_base);
+#endif
+
+	while (table_idx < table_entries) {
+
+		desc = table_base[table_idx];
+
+		table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
+
+		action_t action = xlat_tables_map_region_action(mm,
+			desc & DESC_MASK, table_idx_pa, table_idx_va, level);
+
+		if (action == ACTION_WRITE_BLOCK_ENTRY) {
+
+			table_base[table_idx] =
+				xlat_desc(ctx, mm->attr, table_idx_pa, level);
+
+		} else if (action == ACTION_CREATE_NEW_TABLE) {
+
+			subtable = xlat_table_get_empty(ctx);
+			if (subtable == NULL) {
+				/* Not enough free tables to map this region */
+				return table_idx_va;
+			}
+
+			/* Point to new subtable from this one. */
+			table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
+
+			/* Recurse to write into subtable */
+			uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
+					       subtable, XLAT_TABLE_ENTRIES,
+					       level + 1);
+			if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
+				return end_va;
+
+		} else if (action == ACTION_RECURSE_INTO_TABLE) {
+
+			subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+			/* Recurse to write into subtable */
+			uintptr_t end_va =  xlat_tables_map_region(ctx, mm, table_idx_va,
+					       subtable, XLAT_TABLE_ENTRIES,
+					       level + 1);
+			if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
+				return end_va;
+
+		} else {
+
+			assert(action == ACTION_NONE);
+
+		}
+
+		table_idx++;
+		table_idx_va += XLAT_BLOCK_SIZE(level);
+
+		/* If reached the end of the region, exit */
+		if (mm_end_va <= table_idx_va)
+			break;
+	}
+
+	return table_idx_va - 1;
+}
+
+void print_mmap(mmap_region_t *const mmap)
+{
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+	tf_printf("mmap:\n");
+	mmap_region_t *mm = mmap;
+
+	while (mm->size) {
+		tf_printf(" VA:%p  PA:0x%llx  size:0x%zx  attr:0x%x",
+				(void *)mm->base_va, mm->base_pa,
+				mm->size, mm->attr);
+		tf_printf(" granularity:0x%zx\n", mm->granularity);
+		++mm;
+	};
+	tf_printf("\n");
+#endif
+}
+
+/*
+ * Function that verifies that a region can be mapped.
+ * Returns:
+ *        0: Success, the mapping is allowed.
+ *   EINVAL: Invalid values were used as arguments.
+ *   ERANGE: The memory limits were surpassed.
+ *   ENOMEM: There is not enough memory in the mmap array.
+ *    EPERM: Region overlaps another one in an invalid way.
+ */
+static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm)
+{
+	unsigned long long base_pa = mm->base_pa;
+	uintptr_t base_va = mm->base_va;
+	size_t size = mm->size;
+	size_t granularity = mm->granularity;
+
+	unsigned long long end_pa = base_pa + size - 1;
+	uintptr_t end_va = base_va + size - 1;
+
+	if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
+			!IS_PAGE_ALIGNED(size))
+		return -EINVAL;
+
+	if ((granularity != XLAT_BLOCK_SIZE(1)) &&
+		(granularity != XLAT_BLOCK_SIZE(2)) &&
+		(granularity != XLAT_BLOCK_SIZE(3))) {
+		return -EINVAL;
+	}
+
+	/* Check for overflows */
+	if ((base_pa > end_pa) || (base_va > end_va))
+		return -ERANGE;
+
+	if ((base_va + (uintptr_t)size - (uintptr_t)1) > ctx->va_max_address)
+		return -ERANGE;
+
+	if ((base_pa + (unsigned long long)size - 1ULL) > ctx->pa_max_address)
+		return -ERANGE;
+
+	/* Check that there is space in the ctx->mmap array */
+	if (ctx->mmap[ctx->mmap_num - 1].size != 0)
+		return -ENOMEM;
+
+	/* Check for PAs and VAs overlaps with all other regions */
+	for (mmap_region_t *mm_cursor = ctx->mmap;
+						mm_cursor->size; ++mm_cursor) {
+
+		uintptr_t mm_cursor_end_va = mm_cursor->base_va
+							+ mm_cursor->size - 1;
+
+		/*
+		 * Check if one of the regions is completely inside the other
+		 * one.
+		 */
+		int fully_overlapped_va =
+			((base_va >= mm_cursor->base_va) &&
+					(end_va <= mm_cursor_end_va)) ||
+
+			((mm_cursor->base_va >= base_va) &&
+						(mm_cursor_end_va <= end_va));
+
+		/*
+		 * Full VA overlaps are only allowed if both regions are
+		 * identity mapped (zero offset) or have the same VA to PA
+		 * offset. Also, make sure that it's not the exact same area.
+		 * This can only be done with static regions.
+		 */
+		if (fully_overlapped_va) {
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+			if ((mm->attr & MT_DYNAMIC) ||
+						(mm_cursor->attr & MT_DYNAMIC))
+				return -EPERM;
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+			if ((mm_cursor->base_va - mm_cursor->base_pa) !=
+							(base_va - base_pa))
+				return -EPERM;
+
+			if ((base_va == mm_cursor->base_va) &&
+						(size == mm_cursor->size))
+				return -EPERM;
+
+		} else {
+			/*
+			 * If the regions do not have fully overlapping VAs,
+			 * then they must have fully separated VAs and PAs.
+			 * Partial overlaps are not allowed
+			 */
+
+			unsigned long long mm_cursor_end_pa =
+				     mm_cursor->base_pa + mm_cursor->size - 1;
+
+			int separated_pa =
+				(end_pa < mm_cursor->base_pa) ||
+				(base_pa > mm_cursor_end_pa);
+			int separated_va =
+				(end_va < mm_cursor->base_va) ||
+				(base_va > mm_cursor_end_va);
+
+			if (!(separated_va && separated_pa))
+				return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
+{
+	mmap_region_t *mm_cursor = ctx->mmap;
+	mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
+	unsigned long long end_pa = mm->base_pa + mm->size - 1;
+	uintptr_t end_va = mm->base_va + mm->size - 1;
+	int ret;
+
+	/* Ignore empty regions */
+	if (!mm->size)
+		return;
+
+	/* Static regions must be added before initializing the xlat tables. */
+	assert(!ctx->initialized);
+
+	ret = mmap_add_region_check(ctx, mm);
+	if (ret != 0) {
+		ERROR("mmap_add_region_check() failed. error %d\n", ret);
+		assert(0);
+		return;
+	}
+
+	/*
+	 * Find correct place in mmap to insert new region.
+	 *
+	 * 1 - Lower region VA end first.
+	 * 2 - Smaller region size first.
+	 *
+	 * VA  0                                   0xFF
+	 *
+	 * 1st |------|
+	 * 2nd |------------|
+	 * 3rd                 |------|
+	 * 4th                            |---|
+	 * 5th                                   |---|
+	 * 6th                            |----------|
+	 * 7th |-------------------------------------|
+	 *
+	 * This is required for overlapping regions only. It simplifies adding
+	 * regions with the loop in xlat_tables_init_internal because the outer
+	 * ones won't overwrite block or page descriptors of regions added
+	 * previously.
+	 *
+	 * Overlapping is only allowed for static regions.
+	 */
+
+	while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va
+	       && mm_cursor->size)
+		++mm_cursor;
+
+	while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va)
+	       && (mm_cursor->size < mm->size))
+		++mm_cursor;
+
+	/* Make room for new region by moving other regions up by one place */
+	memmove(mm_cursor + 1, mm_cursor,
+		(uintptr_t)mm_last - (uintptr_t)mm_cursor);
+
+	/*
+	 * Check we haven't lost the empty sentinel from the end of the array.
+	 * This shouldn't happen as we have checked in mmap_add_region_check
+	 * that there is free space.
+	 */
+	assert(mm_last->size == 0);
+
+	*mm_cursor = *mm;
+
+	if (end_pa > ctx->max_pa)
+		ctx->max_pa = end_pa;
+	if (end_va > ctx->max_va)
+		ctx->max_va = end_va;
+}
+
+void mmap_add_region(unsigned long long base_pa,
+				uintptr_t base_va,
+				size_t size,
+				mmap_attr_t attr)
+{
+	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
+	mmap_add_region_ctx(&tf_xlat_ctx, &mm);
+}
+
+
+void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
+{
+	while (mm->size) {
+		mmap_add_region_ctx(ctx, mm);
+		mm++;
+	}
+}
+
+void mmap_add(const mmap_region_t *mm)
+{
+	mmap_add_ctx(&tf_xlat_ctx, mm);
+}
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
+{
+	mmap_region_t *mm_cursor = ctx->mmap;
+	mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
+	unsigned long long end_pa = mm->base_pa + mm->size - 1;
+	uintptr_t end_va = mm->base_va + mm->size - 1;
+	int ret;
+
+	/* Nothing to do */
+	if (!mm->size)
+		return 0;
+
+	/* Now this region is a dynamic one */
+	mm->attr |= MT_DYNAMIC;
+
+	ret = mmap_add_region_check(ctx, mm);
+	if (ret != 0)
+		return ret;
+
+	/*
+	 * Find the adequate entry in the mmap array in the same way done for
+	 * static regions in mmap_add_region_ctx().
+	 */
+
+	while ((mm_cursor->base_va + mm_cursor->size - 1)
+					< end_va && mm_cursor->size)
+		++mm_cursor;
+
+	while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va)
+				&& (mm_cursor->size < mm->size))
+		++mm_cursor;
+
+	/* Make room for new region by moving other regions up by one place */
+	memmove(mm_cursor + 1, mm_cursor,
+		     (uintptr_t)mm_last - (uintptr_t)mm_cursor);
+
+	/*
+	 * Check we haven't lost the empty sentinal from the end of the array.
+	 * This shouldn't happen as we have checked in mmap_add_region_check
+	 * that there is free space.
+	 */
+	assert(mm_last->size == 0);
+
+	*mm_cursor = *mm;
+
+	/*
+	 * Update the translation tables if the xlat tables are initialized. If
+	 * not, this region will be mapped when they are initialized.
+	 */
+	if (ctx->initialized) {
+		uintptr_t end_va = xlat_tables_map_region(ctx, mm_cursor,
+				0, ctx->base_table, ctx->base_table_entries,
+				ctx->base_level);
+
+		/* Failed to map, remove mmap entry, unmap and return error. */
+		if (end_va != mm_cursor->base_va + mm_cursor->size - 1) {
+			memmove(mm_cursor, mm_cursor + 1,
+				(uintptr_t)mm_last - (uintptr_t)mm_cursor);
+
+			/*
+			 * Check if the mapping function actually managed to map
+			 * anything. If not, just return now.
+			 */
+			if (mm_cursor->base_va >= end_va)
+				return -ENOMEM;
+
+			/*
+			 * Something went wrong after mapping some table
+			 * entries, undo every change done up to this point.
+			 */
+			mmap_region_t unmap_mm = {
+					.base_pa = 0,
+					.base_va = mm->base_va,
+					.size = end_va - mm->base_va,
+					.attr = 0
+			};
+			xlat_tables_unmap_region(ctx, &unmap_mm, 0, ctx->base_table,
+							ctx->base_table_entries, ctx->base_level);
+
+			return -ENOMEM;
+		}
+
+		/*
+		 * Make sure that all entries are written to the memory. There
+		 * is no need to invalidate entries when mapping dynamic regions
+		 * because new table/block/page descriptors only replace old
+		 * invalid descriptors, that aren't TLB cached.
+		 */
+		dsbishst();
+	}
+
+	if (end_pa > ctx->max_pa)
+		ctx->max_pa = end_pa;
+	if (end_va > ctx->max_va)
+		ctx->max_va = end_va;
+
+	return 0;
+}
+
+int mmap_add_dynamic_region(unsigned long long base_pa,
+			    uintptr_t base_va, size_t size, mmap_attr_t attr)
+{
+	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
+	return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
+}
+
+/*
+ * Removes the region with given base Virtual Address and size from the given
+ * context.
+ *
+ * Returns:
+ *        0: Success.
+ *   EINVAL: Invalid values were used as arguments (region not found).
+ *    EPERM: Tried to remove a static region.
+ */
+int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
+				   size_t size)
+{
+	mmap_region_t *mm = ctx->mmap;
+	mmap_region_t *mm_last = mm + ctx->mmap_num;
+	int update_max_va_needed = 0;
+	int update_max_pa_needed = 0;
+
+	/* Check sanity of mmap array. */
+	assert(mm[ctx->mmap_num].size == 0);
+
+	while (mm->size) {
+		if ((mm->base_va == base_va) && (mm->size == size))
+			break;
+		++mm;
+	}
+
+	/* Check that the region was found */
+	if (mm->size == 0)
+		return -EINVAL;
+
+	/* If the region is static it can't be removed */
+	if (!(mm->attr & MT_DYNAMIC))
+		return -EPERM;
+
+	/* Check if this region is using the top VAs or PAs. */
+	if ((mm->base_va + mm->size - 1) == ctx->max_va)
+		update_max_va_needed = 1;
+	if ((mm->base_pa + mm->size - 1) == ctx->max_pa)
+		update_max_pa_needed = 1;
+
+	/* Update the translation tables if needed */
+	if (ctx->initialized) {
+		xlat_tables_unmap_region(ctx, mm, 0, ctx->base_table,
+					 ctx->base_table_entries,
+					 ctx->base_level);
+		xlat_arch_tlbi_va_sync();
+	}
+
+	/* Remove this region by moving the rest down by one place. */
+	memmove(mm, mm + 1, (uintptr_t)mm_last - (uintptr_t)mm);
+
+	/* Check if we need to update the max VAs and PAs */
+	if (update_max_va_needed) {
+		ctx->max_va = 0;
+		mm = ctx->mmap;
+		while (mm->size) {
+			if ((mm->base_va + mm->size - 1) > ctx->max_va)
+				ctx->max_va = mm->base_va + mm->size - 1;
+			++mm;
+		}
+	}
+
+	if (update_max_pa_needed) {
+		ctx->max_pa = 0;
+		mm = ctx->mmap;
+		while (mm->size) {
+			if ((mm->base_pa + mm->size - 1) > ctx->max_pa)
+				ctx->max_pa = mm->base_pa + mm->size - 1;
+			++mm;
+		}
+	}
+
+	return 0;
+}
+
+int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
+{
+	return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
+					base_va, size);
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+
+/* Print the attributes of the specified block descriptor. */
+static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
+{
+	int mem_type_index = ATTR_INDEX_GET(desc);
+	xlat_regime_t xlat_regime = ctx->xlat_regime;
+
+	if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
+		tf_printf("MEM");
+	} else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
+		tf_printf("NC");
+	} else {
+		assert(mem_type_index == ATTR_DEVICE_INDEX);
+		tf_printf("DEV");
+	}
+
+	const char *priv_str = "(PRIV)";
+	const char *user_str = "(USER)";
+
+	/*
+	 * Showing Privileged vs Unprivileged only makes sense for EL1&0
+	 * mappings
+	 */
+	const char *ro_str = "-RO";
+	const char *rw_str = "-RW";
+	const char *no_access_str = "-NOACCESS";
+
+	if (xlat_regime == EL3_REGIME) {
+		/* For EL3, the AP[2] bit is all what matters */
+		tf_printf((desc & LOWER_ATTRS(AP_RO)) ? ro_str : rw_str);
+	} else {
+		const char *ap_str = (desc & LOWER_ATTRS(AP_RO)) ? ro_str : rw_str;
+		tf_printf(ap_str);
+		tf_printf(priv_str);
+		/*
+		 * EL0 can only have the same permissions as EL1 or no
+		 * permissions at all.
+		 */
+		tf_printf((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED))
+			  ? ap_str : no_access_str);
+		tf_printf(user_str);
+	}
+
+	const char *xn_str = "-XN";
+	const char *exec_str = "-EXEC";
+
+	if (xlat_regime == EL3_REGIME) {
+		/* For EL3, the XN bit is all what matters */
+		tf_printf(LOWER_ATTRS(XN) & desc ? xn_str : exec_str);
+	} else {
+		/* For EL0 and EL1, we need to know who has which rights */
+		tf_printf(LOWER_ATTRS(PXN) & desc ? xn_str : exec_str);
+		tf_printf(priv_str);
+
+		tf_printf(LOWER_ATTRS(UXN) & desc ? xn_str : exec_str);
+		tf_printf(user_str);
+	}
+
+	tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
+}
+
+static const char * const level_spacers[] = {
+	"[LV0] ",
+	"  [LV1] ",
+	"    [LV2] ",
+	"      [LV3] "
+};
+
+static const char *invalid_descriptors_ommited =
+		"%s(%d invalid descriptors omitted)\n";
+
+/*
+ * Recursive function that reads the translation tables passed as an argument
+ * and prints their status.
+ */
+static void xlat_tables_print_internal(xlat_ctx_t *ctx,
+		const uintptr_t table_base_va,
+		uint64_t *const table_base, const int table_entries,
+		const unsigned int level)
+{
+	assert(level <= XLAT_TABLE_LEVEL_MAX);
+
+	uint64_t desc;
+	uintptr_t table_idx_va = table_base_va;
+	int table_idx = 0;
+
+	size_t level_size = XLAT_BLOCK_SIZE(level);
+
+	/*
+	 * Keep track of how many invalid descriptors are counted in a row.
+	 * Whenever multiple invalid descriptors are found, only the first one
+	 * is printed, and a line is added to inform about how many descriptors
+	 * have been omitted.
+	 */
+	int invalid_row_count = 0;
+
+	while (table_idx < table_entries) {
+
+		desc = table_base[table_idx];
+
+		if ((desc & DESC_MASK) == INVALID_DESC) {
+
+			if (invalid_row_count == 0) {
+				tf_printf("%sVA:%p size:0x%zx\n",
+					  level_spacers[level],
+					  (void *)table_idx_va, level_size);
+			}
+			invalid_row_count++;
+
+		} else {
+
+			if (invalid_row_count > 1) {
+				tf_printf(invalid_descriptors_ommited,
+					  level_spacers[level],
+					  invalid_row_count - 1);
+			}
+			invalid_row_count = 0;
+
+			/*
+			 * Check if this is a table or a block. Tables are only
+			 * allowed in levels other than 3, but DESC_PAGE has the
+			 * same value as DESC_TABLE, so we need to check.
+			 */
+			if (((desc & DESC_MASK) == TABLE_DESC) &&
+					(level < XLAT_TABLE_LEVEL_MAX)) {
+				/*
+				 * Do not print any PA for a table descriptor,
+				 * as it doesn't directly map physical memory
+				 * but instead points to the next translation
+				 * table in the translation table walk.
+				 */
+				tf_printf("%sVA:%p size:0x%zx\n",
+					  level_spacers[level],
+					  (void *)table_idx_va, level_size);
+
+				uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
+
+				xlat_tables_print_internal(ctx, table_idx_va,
+					(uint64_t *)addr_inner,
+					XLAT_TABLE_ENTRIES, level + 1);
+			} else {
+				tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
+					  level_spacers[level],
+					  (void *)table_idx_va,
+					  (unsigned long long)(desc & TABLE_ADDR_MASK),
+					  level_size);
+				xlat_desc_print(ctx, desc);
+				tf_printf("\n");
+			}
+		}
+
+		table_idx++;
+		table_idx_va += level_size;
+	}
+
+	if (invalid_row_count > 1) {
+		tf_printf(invalid_descriptors_ommited,
+			  level_spacers[level], invalid_row_count - 1);
+	}
+}
+
+#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+void xlat_tables_print(xlat_ctx_t *ctx)
+{
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+	const char *xlat_regime_str;
+	if (ctx->xlat_regime == EL1_EL0_REGIME) {
+		xlat_regime_str = "1&0";
+	} else {
+		assert(ctx->xlat_regime == EL3_REGIME);
+		xlat_regime_str = "3";
+	}
+	VERBOSE("Translation tables state:\n");
+	VERBOSE("  Xlat regime:     EL%s\n", xlat_regime_str);
+	VERBOSE("  Max allowed PA:  0x%llx\n", ctx->pa_max_address);
+	VERBOSE("  Max allowed VA:  %p\n", (void *) ctx->va_max_address);
+	VERBOSE("  Max mapped PA:   0x%llx\n", ctx->max_pa);
+	VERBOSE("  Max mapped VA:   %p\n", (void *) ctx->max_va);
+
+	VERBOSE("  Initial lookup level: %i\n", ctx->base_level);
+	VERBOSE("  Entries @initial lookup level: %i\n",
+		ctx->base_table_entries);
+
+	int used_page_tables;
+#if PLAT_XLAT_TABLES_DYNAMIC
+	used_page_tables = 0;
+	for (unsigned int i = 0; i < ctx->tables_num; ++i) {
+		if (ctx->tables_mapped_regions[i] != 0)
+			++used_page_tables;
+	}
+#else
+	used_page_tables = ctx->next_table;
+#endif
+	VERBOSE("  Used %i sub-tables out of %i (spare: %i)\n",
+		used_page_tables, ctx->tables_num,
+		ctx->tables_num - used_page_tables);
+
+	xlat_tables_print_internal(ctx, 0, ctx->base_table,
+				   ctx->base_table_entries, ctx->base_level);
+#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+}
+
+void init_xlat_tables_ctx(xlat_ctx_t *ctx)
+{
+	assert(ctx != NULL);
+	assert(!ctx->initialized);
+	assert(ctx->xlat_regime == EL3_REGIME || ctx->xlat_regime == EL1_EL0_REGIME);
+	assert(!is_mmu_enabled_ctx(ctx));
+
+	mmap_region_t *mm = ctx->mmap;
+
+	print_mmap(mm);
+
+	/* All tables must be zeroed before mapping any region. */
+
+	for (unsigned int i = 0; i < ctx->base_table_entries; i++)
+		ctx->base_table[i] = INVALID_DESC;
+
+	for (unsigned int j = 0; j < ctx->tables_num; j++) {
+#if PLAT_XLAT_TABLES_DYNAMIC
+		ctx->tables_mapped_regions[j] = 0;
+#endif
+		for (unsigned int i = 0; i < XLAT_TABLE_ENTRIES; i++)
+			ctx->tables[j][i] = INVALID_DESC;
+	}
+
+	while (mm->size) {
+		uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0, ctx->base_table,
+				ctx->base_table_entries, ctx->base_level);
+
+		if (end_va != mm->base_va + mm->size - 1) {
+			ERROR("Not enough memory to map region:\n"
+			      " VA:%p  PA:0x%llx  size:0x%zx  attr:0x%x\n",
+			      (void *)mm->base_va, mm->base_pa, mm->size, mm->attr);
+			panic();
+		}
+
+		mm++;
+	}
+
+	assert(ctx->pa_max_address <= xlat_arch_get_max_supported_pa());
+	assert(ctx->max_va <= ctx->va_max_address);
+	assert(ctx->max_pa <= ctx->pa_max_address);
+
+	ctx->initialized = 1;
+
+	xlat_tables_print(ctx);
+}
+
+void init_xlat_tables(void)
+{
+	init_xlat_tables_ctx(&tf_xlat_ctx);
+}
+
+/*
+ * If dynamic allocation of new regions is disabled then by the time we call the
+ * function enabling the MMU, we'll have registered all the memory regions to
+ * map for the system's lifetime. Therefore, at this point we know the maximum
+ * physical address that will ever be mapped.
+ *
+ * If dynamic allocation is enabled then we can't make any such assumption
+ * because the maximum physical address could get pushed while adding a new
+ * region. Therefore, in this case we have to assume that the whole address
+ * space size might be mapped.
+ */
+#ifdef PLAT_XLAT_TABLES_DYNAMIC
+#define MAX_PHYS_ADDR	tf_xlat_ctx.pa_max_address
+#else
+#define MAX_PHYS_ADDR	tf_xlat_ctx.max_pa
+#endif
+
+#ifdef AARCH32
+
+void enable_mmu_secure(unsigned int flags)
+{
+	enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+			tf_xlat_ctx.va_max_address);
+}
+
+#else
+
+void enable_mmu_el1(unsigned int flags)
+{
+	enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+			tf_xlat_ctx.va_max_address);
+}
+
+void enable_mmu_el3(unsigned int flags)
+{
+	enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+			tf_xlat_ctx.va_max_address);
+}
+
+#endif /* AARCH32 */
+
+/*
+ * Do a translation table walk to find the block or page descriptor that maps
+ * virtual_addr.
+ *
+ * On success, return the address of the descriptor within the translation
+ * table. Its lookup level is stored in '*out_level'.
+ * On error, return NULL.
+ *
+ * xlat_table_base
+ *   Base address for the initial lookup level.
+ * xlat_table_base_entries
+ *   Number of entries in the translation table for the initial lookup level.
+ * virt_addr_space_size
+ *   Size in bytes of the virtual address space.
+ */
+static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
+				       void *xlat_table_base,
+				       int xlat_table_base_entries,
+				       unsigned long long virt_addr_space_size,
+				       int *out_level)
+{
+	unsigned int start_level;
+	uint64_t *table;
+	int entries;
+
+	VERBOSE("%s(%p)\n", __func__, (void *)virtual_addr);
+
+	start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size);
+	VERBOSE("Starting translation table walk from level %i\n", start_level);
+
+	table = xlat_table_base;
+	entries = xlat_table_base_entries;
+
+	for (unsigned int level = start_level;
+	     level <= XLAT_TABLE_LEVEL_MAX;
+	     ++level) {
+		int idx;
+		uint64_t desc;
+		uint64_t desc_type;
+
+		VERBOSE("Table address: %p\n", (void *)table);
+
+		idx = XLAT_TABLE_IDX(virtual_addr, level);
+		VERBOSE("Index into level %i table: %i\n", level, idx);
+		if (idx >= entries) {
+			VERBOSE("Invalid address\n");
+			return NULL;
+		}
+
+		desc = table[idx];
+		desc_type = desc & DESC_MASK;
+		VERBOSE("Descriptor at level %i: 0x%llx\n", level,
+				(unsigned long long)desc);
+
+		if (desc_type == INVALID_DESC) {
+			VERBOSE("Invalid entry (memory not mapped)\n");
+			return NULL;
+		}
+
+		if (level == XLAT_TABLE_LEVEL_MAX) {
+			/*
+			 * There can't be table entries at the final lookup
+			 * level.
+			 */
+			assert(desc_type == PAGE_DESC);
+			VERBOSE("Descriptor mapping a memory page (size: 0x%llx)\n",
+				(unsigned long long)XLAT_BLOCK_SIZE(XLAT_TABLE_LEVEL_MAX));
+			*out_level = level;
+			return &table[idx];
+		}
+
+		if (desc_type == BLOCK_DESC) {
+			VERBOSE("Descriptor mapping a memory block (size: 0x%llx)\n",
+				(unsigned long long)XLAT_BLOCK_SIZE(level));
+			*out_level = level;
+			return &table[idx];
+		}
+
+		assert(desc_type == TABLE_DESC);
+		VERBOSE("Table descriptor, continuing xlat table walk...\n");
+		table = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+		entries = XLAT_TABLE_ENTRIES;
+	}
+
+	/*
+	 * This shouldn't be reached, the translation table walk should end at
+	 * most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop.
+	 */
+	assert(0);
+
+	return NULL;
+}
+
+
+static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
+		mmap_attr_t *attributes, uint64_t **table_entry,
+		unsigned long long *addr_pa, int *table_level)
+{
+	uint64_t *entry;
+	uint64_t desc;
+	int level;
+	unsigned long long virt_addr_space_size;
+
+	/*
+	 * Sanity-check arguments.
+	 */
+	assert(ctx != NULL);
+	assert(ctx->initialized);
+	assert(ctx->xlat_regime == EL1_EL0_REGIME || ctx->xlat_regime == EL3_REGIME);
+
+	virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1;
+	assert(virt_addr_space_size > 0);
+
+	entry = find_xlat_table_entry(base_va,
+				ctx->base_table,
+				ctx->base_table_entries,
+				virt_addr_space_size,
+				&level);
+	if (entry == NULL) {
+		WARN("Address %p is not mapped.\n", (void *)base_va);
+		return -EINVAL;
+	}
+
+	if (addr_pa != NULL) {
+		*addr_pa = *entry & TABLE_ADDR_MASK;
+	}
+
+	if (table_entry != NULL) {
+		*table_entry = entry;
+	}
+
+	if (table_level != NULL) {
+		*table_level = level;
+	}
+
+	desc = *entry;
+
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+	VERBOSE("Attributes: ");
+	xlat_desc_print(ctx, desc);
+	tf_printf("\n");
+#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+	assert(attributes != NULL);
+	*attributes = 0;
+
+	int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+
+	if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
+		*attributes |= MT_MEMORY;
+	} else if (attr_index == ATTR_NON_CACHEABLE_INDEX) {
+		*attributes |= MT_NON_CACHEABLE;
+	} else {
+		assert(attr_index == ATTR_DEVICE_INDEX);
+		*attributes |= MT_DEVICE;
+	}
+
+	int ap2_bit = (desc >> AP2_SHIFT) & 1;
+
+	if (ap2_bit == AP2_RW)
+		*attributes |= MT_RW;
+
+	if (ctx->xlat_regime == EL1_EL0_REGIME) {
+		int ap1_bit = (desc >> AP1_SHIFT) & 1;
+		if (ap1_bit == AP1_ACCESS_UNPRIVILEGED)
+			*attributes |= MT_USER;
+	}
+
+	int ns_bit = (desc >> NS_SHIFT) & 1;
+
+	if (ns_bit == 1)
+		*attributes |= MT_NS;
+
+	uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
+
+	if ((desc & xn_mask) == xn_mask) {
+		*attributes |= MT_EXECUTE_NEVER;
+	} else {
+		assert((desc & xn_mask) == 0);
+	}
+
+	return 0;
+}
+
+
+int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
+		mmap_attr_t *attributes)
+{
+	return get_mem_attributes_internal(ctx, base_va, attributes,
+					   NULL, NULL, NULL);
+}
+
+
+int change_mem_attributes(xlat_ctx_t *ctx,
+			uintptr_t base_va,
+			size_t size,
+			mmap_attr_t attr)
+{
+	/* Note: This implementation isn't optimized. */
+
+	assert(ctx != NULL);
+	assert(ctx->initialized);
+
+	unsigned long long virt_addr_space_size =
+		(unsigned long long)ctx->va_max_address + 1;
+	assert(virt_addr_space_size > 0);
+
+	if (!IS_PAGE_ALIGNED(base_va)) {
+		WARN("%s: Address %p is not aligned on a page boundary.\n",
+		     __func__, (void *)base_va);
+		return -EINVAL;
+	}
+
+	if (size == 0) {
+		WARN("%s: Size is 0.\n", __func__);
+		return -EINVAL;
+	}
+
+	if ((size % PAGE_SIZE) != 0) {
+		WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
+		     __func__, size);
+		return -EINVAL;
+	}
+
+	if (((attr & MT_EXECUTE_NEVER) == 0) && ((attr & MT_RW) != 0)) {
+		WARN("%s() doesn't allow to remap memory as read-write and executable.\n",
+		     __func__);
+		return -EINVAL;
+	}
+
+	int pages_count = size / PAGE_SIZE;
+
+	VERBOSE("Changing memory attributes of %i pages starting from address %p...\n",
+		pages_count, (void *)base_va);
+
+	uintptr_t base_va_original = base_va;
+
+	/*
+	 * Sanity checks.
+	 */
+	for (int i = 0; i < pages_count; ++i) {
+		uint64_t *entry;
+		uint64_t desc;
+		int level;
+
+		entry = find_xlat_table_entry(base_va,
+					      ctx->base_table,
+					      ctx->base_table_entries,
+					      virt_addr_space_size,
+					      &level);
+		if (entry == NULL) {
+			WARN("Address %p is not mapped.\n", (void *)base_va);
+			return -EINVAL;
+		}
+
+		desc = *entry;
+
+		/*
+		 * Check that all the required pages are mapped at page
+		 * granularity.
+		 */
+		if (((desc & DESC_MASK) != PAGE_DESC) ||
+			(level != XLAT_TABLE_LEVEL_MAX)) {
+			WARN("Address %p is not mapped at the right granularity.\n",
+			     (void *)base_va);
+			WARN("Granularity is 0x%llx, should be 0x%x.\n",
+			     (unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE);
+			return -EINVAL;
+		}
+
+		/*
+		 * If the region type is device, it shouldn't be executable.
+		 */
+		int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+		if (attr_index == ATTR_DEVICE_INDEX) {
+			if ((attr & MT_EXECUTE_NEVER) == 0) {
+				WARN("Setting device memory as executable at address %p.",
+				     (void *)base_va);
+				return -EINVAL;
+			}
+		}
+
+		base_va += PAGE_SIZE;
+	}
+
+	/* Restore original value. */
+	base_va = base_va_original;
+
+	VERBOSE("%s: All pages are mapped, now changing their attributes...\n",
+		__func__);
+
+	for (int i = 0; i < pages_count; ++i) {
+
+		mmap_attr_t old_attr, new_attr;
+		uint64_t *entry;
+		int level;
+		unsigned long long addr_pa;
+
+		get_mem_attributes_internal(ctx, base_va, &old_attr,
+					    &entry, &addr_pa, &level);
+
+		VERBOSE("Old attributes: 0x%x\n", old_attr);
+
+		/*
+		 * From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and
+		 * MT_USER/MT_PRIVILEGED are taken into account. Any other
+		 * information is ignored.
+		 */
+
+		/* Clean the old attributes so that they can be rebuilt. */
+		new_attr = old_attr & ~(MT_RW|MT_EXECUTE_NEVER|MT_USER);
+
+		/*
+		 * Update attributes, but filter out the ones this function
+		 * isn't allowed to change.
+		 */
+		new_attr |= attr & (MT_RW|MT_EXECUTE_NEVER|MT_USER);
+
+		VERBOSE("New attributes: 0x%x\n", new_attr);
+
+		/*
+		 * The break-before-make sequence requires writing an invalid
+		 * descriptor and making sure that the system sees the change
+		 * before writing the new descriptor.
+		 */
+		*entry = INVALID_DESC;
+
+		/* Invalidate any cached copy of this mapping in the TLBs. */
+		xlat_arch_tlbi_va_regime(base_va, ctx->xlat_regime);
+
+		/* Ensure completion of the invalidation. */
+		xlat_arch_tlbi_va_sync();
+
+		/* Write new descriptor */
+		*entry = xlat_desc(ctx, new_attr, addr_pa, level);
+
+		base_va += PAGE_SIZE;
+	}
+
+	/* Ensure that the last descriptor writen is seen by the system. */
+	dsbish();
+
+	return 0;
+}
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
new file mode 100644
index 0000000..79efbeb
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __XLAT_TABLES_PRIVATE_H__
+#define __XLAT_TABLES_PRIVATE_H__
+
+#include <platform_def.h>
+#include <xlat_tables_defs.h>
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+/*
+ * Shifts and masks to access fields of an mmap_attr_t
+ */
+/* Dynamic or static */
+#define MT_DYN_SHIFT		30 /* 31 would cause undefined behaviours */
+
+/*
+ * Memory mapping private attributes
+ *
+ * Private attributes not exposed in the mmap_attr_t enum.
+ */
+typedef enum  {
+	/*
+	 * Regions mapped before the MMU can't be unmapped dynamically (they are
+	 * static) and regions mapped with MMU enabled can be unmapped. This
+	 * behaviour can't be overridden.
+	 *
+	 * Static regions can overlap each other, dynamic regions can't.
+	 */
+	MT_STATIC	= 0 << MT_DYN_SHIFT,
+	MT_DYNAMIC	= 1 << MT_DYN_SHIFT
+} mmap_priv_attr_t;
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/*
+ * Invalidate all TLB entries that match the given virtual address. This
+ * operation applies to all PEs in the same Inner Shareable domain as the PE
+ * that executes this function. This functions must be called for every
+ * translation table entry that is modified.
+ *
+ * xlat_arch_tlbi_va() applies the invalidation to the exception level of the
+ * current translation regime, whereas xlat_arch_tlbi_va_regime() applies it to
+ * the given translation regime.
+ *
+ * Note, however, that it is architecturally UNDEFINED to invalidate TLB entries
+ * pertaining to a higher exception level, e.g. invalidating EL3 entries from
+ * S-EL1.
+ */
+void xlat_arch_tlbi_va(uintptr_t va);
+void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime);
+
+/*
+ * This function has to be called at the end of any code that uses the function
+ * xlat_arch_tlbi_va().
+ */
+void xlat_arch_tlbi_va_sync(void);
+
+/* Print VA, PA, size and attributes of all regions in the mmap array. */
+void print_mmap(mmap_region_t *const mmap);
+
+/*
+ * Print the current state of the translation tables by reading them from
+ * memory.
+ */
+void xlat_tables_print(xlat_ctx_t *ctx);
+
+/*
+ * Architecture-specific initialization code.
+ */
+
+/* Returns the current Exception Level. The returned EL must be 1 or higher. */
+int xlat_arch_current_el(void);
+
+/*
+ * Return the maximum physical address supported by the hardware.
+ * This value depends on the execution state (AArch32/AArch64).
+ */
+unsigned long long xlat_arch_get_max_supported_pa(void);
+
+/* Enable MMU and configure it to use the specified translation tables. */
+void enable_mmu_arch(unsigned int flags, uint64_t *base_table,
+		unsigned long long pa, uintptr_t max_va);
+
+/*
+ * Return 1 if the MMU of the translation regime managed by the given xlat_ctx_t
+ * is enabled, 0 otherwise.
+ */
+int is_mmu_enabled_ctx(const xlat_ctx_t *ctx);
+
+#endif /* __XLAT_TABLES_PRIVATE_H__ */
diff --git a/license.md b/license.md
deleted file mode 100644
index 941b741..0000000
--- a/license.md
+++ /dev/null
@@ -1,26 +0,0 @@
-Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
-  list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice, this
-  list of conditions and the following disclaimer in the documentation and/or
-  other materials provided with the distribution.
-
-* Neither the name of ARM nor the names of its contributors may be used to
-  endorse or promote products derived from this software without specific prior
-  written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/license.rst b/license.rst
new file mode 100644
index 0000000..c51e595
--- /dev/null
+++ b/license.rst
@@ -0,0 +1,38 @@
+Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+-  Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+
+-  Redistributions in binary form must reproduce the above copyright notice, this
+   list of conditions and the following disclaimer in the documentation and/or
+   other materials provided with the distribution.
+
+-  Neither the name of ARM nor the names of its contributors may be used to
+   endorse or promote products derived from this software without specific prior
+   written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--------------
+
+Note:
+Individual files contain the following tag instead of the full license text.
+
+::
+
+    SPDX-License-Identifier:    BSD-3-Clause
+
+This enables machine processing of license information based on the SPDX
+License Identifiers that are here available: http://spdx.org/licenses/
diff --git a/maintainers.rst b/maintainers.rst
new file mode 100644
index 0000000..388073e
--- /dev/null
+++ b/maintainers.rst
@@ -0,0 +1,102 @@
+ARM Trusted Firmware Maintainers
+================================
+
+ARM Trusted Firmware is an ARM maintained project. All contributions are
+ultimately merged by the maintainers listed below. Technical ownership of some
+parts of the codebase is delegated to the sub-maintainers listed below. An
+acknowledgement from these sub-maintainers may be required before the
+maintainers merge a contribution.
+
+Maintainers
+-----------
+
+Dan Handley (dan.handley@arm.com, `danh-arm`_)
+
+David Cunado (david.cunado@arm.com, `davidcunado-arm`_)
+
+OPTEE and QEMU platform sub-maintainer
+--------------------------------------
+
+Jens Wiklander (jens.wiklander@linaro.org, `jenswi-linaro`_)
+
+Files:
+
+-  docs/spd/optee-dispatcher.md
+-  docs/plat/qemu.md
+-  services/spd/opteed/\*
+-  plat/qemu/\*
+
+TLK/Trusty SPDs and NVidia platforms sub-maintainer
+---------------------------------------------------
+
+Varun Wadekar (vwadekar@nvidia.com, `vwadekar`_)
+
+Files:
+
+-  docs/spd/tlk-dispatcher.md
+-  docs/spd/trusty-dispatcher.md
+-  include/bl32/payloads/tlk.h
+-  include/lib/cpus/aarch64/denver.h
+-  lib/cpus/aarch64/denver.S
+-  services/spd/tlkd/\*
+-  services/spd/trusty/\*
+-  plat/nvidia/\*
+
+eMMC/UFS drivers and HiSilicon platform sub-maintainer
+------------------------------------------------------
+
+Haojian Zhuang (haojian.zhuang@linaro.org, `hzhuang1`_)
+
+Files:
+
+-  docs/plat/hikey.md
+-  docs/plat/hikey960.md
+-  drivers/emmc/\*
+-  drivers/partition/\*
+-  drivers/synopsys/emmc/\*
+-  drivers/synopsys/ufs/\*
+-  drivers/ufs/\*
+-  include/drivers/dw\_ufs.h
+-  include/drivers/emmc.h
+-  include/drivers/ufs.h
+-  include/drivers/synopsys/dw\_mmc.h
+-  plat/hisilicon/\*
+
+MediaTek platform sub-maintainer
+--------------------------------
+
+Yidi Lin (林以廸 yidi.lin@mediatek.com, `mtk09422`_)
+
+Files:
+
+-  plat/mediatek/\*
+
+RockChip platform sub-maintainer
+--------------------------------
+
+Tony Xie (tony.xie@rock-chips.com, `TonyXie06`_
+or `rkchrome`_)
+
+Files:
+
+-  plat/rockchip/\*
+
+Xilinx platform sub-maintainer
+------------------------------
+
+Sören Brinkmann (soren.brinkmann@xilinx.com, `sorenb-xlnx`_)
+
+Files:
+
+-  docs/plat/xilinx-zynqmp.md
+-  plat/xilinx/\*
+
+.. _danh-arm: https://github.com/danh-arm
+.. _davidcunado-arm: https://github.com/davidcunado-arm
+.. _jenswi-linaro: https://github.com/jenswi-linaro
+.. _vwadekar: https://github.com/vwadekar
+.. _hzhuang1: https://github.com/hzhuang1
+.. _mtk09422: https://github.com/mtk09422
+.. _TonyXie06: https://github.com/TonyXie06
+.. _rkchrome: https://github.com/rkchrome
+.. _sorenb-xlnx: https://github.com/sorenb-xlnx
diff --git a/make_helpers/build_env.mk b/make_helpers/build_env.mk
new file mode 100644
index 0000000..83093bd
--- /dev/null
+++ b/make_helpers/build_env.mk
@@ -0,0 +1,72 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# This file contains the logic to identify and include any relevant
+# build environment specific make include files.
+
+ifndef BUILD_ENV_MK
+    BUILD_ENV_MK        :=      $(lastword $(MAKEFILE_LIST))
+
+    # Block possible built-in command definitions that are not fully portable.
+    # This traps occurences that need replacing with our OS portable macros
+    COPY                :=      $$(error "Replace COPY with call to SHELL_COPY or SHELL_COPY_TREE.")
+    CP                  :=      $$(error "Replace CP with call to SHELL_COPY or SHELL_COPY_TREE.")
+    DEL                 :=      $$(error "Replace DEL with call to SHELL_DELETE.")
+    MD                  :=      $$(error "Replace MD with call to MAKE_PREREQ_DIR.")
+    MKDIR               :=      $$(error "Replace MKDIR with call to MAKE_PREREQ_DIR.")
+    RD                  :=      $$(error "Replace RD with call to SHELL_REMOVE_DIR.")
+    RM                  :=      $$(error "Replace RM with call to SHELL_DELETE.")
+    RMDIR               :=      $$(error "Replace RMDIR with call to SHELL_REMOVE_DIR.")
+
+    ENV_FILE_TO_INCLUDE := unix.mk
+    ifdef OSTYPE
+        ifneq ($(findstring ${OSTYPE}, cygwin),)
+            ENV_FILE_TO_INCLUDE := cygwin.mk
+        else
+            ifneq ($(findstring ${OSTYPE}, MINGW32 mingw msys),)
+                ENV_FILE_TO_INCLUDE := msys.mk
+            endif
+        endif
+    else
+        ifdef MSYSTEM
+            # Although the MINGW MSYS shell sets OSTYPE as msys in its environment,
+            # it does not appear in the GNU make view of environment variables.
+            # We use MSYSTEM as an alternative, as that is seen by make
+            ifneq ($(findstring ${MSYSTEM}, MINGW32 mingw msys),)
+                OSTYPE ?= msys
+                ENV_FILE_TO_INCLUDE := msys.mk
+            endif
+        else
+            ifdef OS
+                ifneq ($(findstring ${OS}, Windows_NT),)
+                    ENV_FILE_TO_INCLUDE := windows.mk
+                endif
+            endif
+        endif
+    endif
+    include ${MAKE_HELPERS_DIRECTORY}${ENV_FILE_TO_INCLUDE}
+    ENV_FILE_TO_INCLUDE :=
+
+    ifndef SHELL_COPY
+        $(error "SHELL_COPY not defined for build environment.")
+    endif
+    ifndef SHELL_COPY_TREE
+        $(error "SHELL_COPY_TREE not defined for build environment.")
+    endif
+    ifndef SHELL_DELETE_ALL
+        $(error "SHELL_DELETE_ALL not defined for build environment.")
+    endif
+    ifndef SHELL_DELETE
+        $(error "SHELL_DELETE not defined for build environment.")
+    endif
+    ifndef MAKE_PREREQ_DIR
+        $(error "MAKE_PREREQ_DIR not defined for build environment.")
+    endif
+    ifndef SHELL_REMOVE_DIR
+        $(error "SHELL_REMOVE_DIR not defined for build environment.")
+    endif
+
+endif
diff --git a/make_helpers/build_macros.mk b/make_helpers/build_macros.mk
new file mode 100644
index 0000000..e59a64b
--- /dev/null
+++ b/make_helpers/build_macros.mk
@@ -0,0 +1,338 @@
+#
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Report an error if the eval make function is not available.
+$(eval eval_available := T)
+ifneq (${eval_available},T)
+    $(error This makefile only works with a Make program that supports $$(eval))
+endif
+
+# Some utility macros for manipulating awkward (whitespace) characters.
+blank			:=
+space			:=${blank} ${blank}
+
+# A user defined function to recursively search for a filename below a directory
+#    $1 is the directory root of the recursive search (blank for current directory).
+#    $2 is the file name to search for.
+define rwildcard
+$(strip $(foreach d,$(wildcard ${1}*),$(call rwildcard,${d}/,${2}) $(filter $(subst *,%,%${2}),${d})))
+endef
+
+# This table is used in converting lower case to upper case.
+uppercase_table:=a,A b,B c,C d,D e,E f,F g,G h,H i,I j,J k,K l,L m,M n,N o,O p,P q,Q r,R s,S t,T u,U v,V w,W x,X y,Y z,Z
+
+# Internal macro used for converting lower case to upper case.
+#   $(1) = upper case table
+#   $(2) = String to convert
+define uppercase_internal
+$(if $(1),$$(subst $(firstword $(1)),$(call uppercase_internal,$(wordlist 2,$(words $(1)),$(1)),$(2))),$(2))
+endef
+
+# A macro for converting a string to upper case
+#   $(1) = String to convert
+define uppercase
+$(eval uppercase_result:=$(call uppercase_internal,$(uppercase_table),$(1)))$(uppercase_result)
+endef
+
+# Convenience function for adding build definitions
+# $(eval $(call add_define,FOO)) will have:
+# -DFOO if $(FOO) is empty; -DFOO=$(FOO) otherwise
+define add_define
+    DEFINES			+=	-D$(1)$(if $(value $(1)),=$(value $(1)),)
+endef
+
+# Convenience function for adding build definitions
+# $(eval $(call add_define_val,FOO,BAR)) will have:
+# -DFOO=BAR
+define add_define_val
+    DEFINES			+=	-D$(1)=$(2)
+endef
+
+# Convenience function for verifying option has a boolean value
+# $(eval $(call assert_boolean,FOO)) will assert FOO is 0 or 1
+define assert_boolean
+    $(if $(filter-out 0 1,$($1)),$(error $1 must be boolean))
+endef
+
+0-9 := 0 1 2 3 4 5 6 7 8 9
+
+# Function to verify that a given option $(1) contains a numeric value
+define assert_numeric
+$(if $($(1)),,$(error $(1) must not be empty))
+$(eval __numeric := $($(1)))
+$(foreach d,$(0-9),$(eval __numeric := $(subst $(d),,$(__numeric))))
+$(if $(__numeric),$(error $(1) must be numeric))
+endef
+
+# IMG_LINKERFILE defines the linker script corresponding to a BL stage
+#   $(1) = BL stage (2, 30, 31, 32, 33)
+define IMG_LINKERFILE
+    ${BUILD_DIR}/bl$(1).ld
+endef
+
+# IMG_MAPFILE defines the output file describing the memory map corresponding
+# to a BL stage
+#   $(1) = BL stage (2, 30, 31, 32, 33)
+define IMG_MAPFILE
+    ${BUILD_DIR}/bl$(1).map
+endef
+
+# IMG_ELF defines the elf file corresponding to a BL stage
+#   $(1) = BL stage (2, 30, 31, 32, 33)
+define IMG_ELF
+    ${BUILD_DIR}/bl$(1).elf
+endef
+
+# IMG_DUMP defines the symbols dump file corresponding to a BL stage
+#   $(1) = BL stage (2, 30, 31, 32, 33)
+define IMG_DUMP
+    ${BUILD_DIR}/bl$(1).dump
+endef
+
+# IMG_BIN defines the default image file corresponding to a BL stage
+#   $(1) = BL stage (2, 30, 31, 32, 33)
+define IMG_BIN
+    ${BUILD_PLAT}/bl$(1).bin
+endef
+
+# FIP_ADD_PAYLOAD appends the command line arguments required by fiptool
+# to package a new payload. Optionally, it adds the dependency on this payload
+#   $(1) = payload filename (i.e. bl31.bin)
+#   $(2) = command line option for the specified payload (i.e. --bl31)
+#   $(3) = fip target dependency (optional) (i.e. bl31)
+define FIP_ADD_PAYLOAD
+    $(eval FIP_ARGS += $(2) $(1))
+    $(eval $(if $(3),FIP_DEPS += $(3)))
+endef
+
+# CERT_ADD_CMD_OPT adds a new command line option to the cert_create invocation
+#   $(1) = parameter filename
+#   $(2) = cert_create command line option for the specified parameter
+#   $(3) = input parameter (false if empty)
+define CERT_ADD_CMD_OPT
+    $(eval $(if $(3),CRT_DEPS += $(1)))
+    $(eval CRT_ARGS += $(2) $(1))
+endef
+
+# FIP_ADD_IMG allows the platform to specify an image to be packed in the FIP
+# using a build option. It also adds a dependency on the image file, aborting
+# the build if the file does not exist.
+#   $(1) = build option to specify the image filename (SCP_BL2, BL33, etc)
+#   $(2) = command line option for fiptool (scp_bl2, bl33, etc)
+# Example:
+#   $(eval $(call FIP_ADD_IMG,BL33,--bl33))
+define FIP_ADD_IMG
+    CRT_DEPS += check_$(1)
+    FIP_DEPS += check_$(1)
+    $(call FIP_ADD_PAYLOAD,$(value $(1)),$(2))
+
+check_$(1):
+	$$(if $(value $(1)),,$$(error "Platform '${PLAT}' requires $(1). Please set $(1) to point to the right file"))
+endef
+
+# FWU_FIP_ADD_PAYLOAD appends the command line arguments required by fiptool
+# to package a new FWU payload. Optionally, it  adds the dependency on this payload
+#   $(1) = payload filename (e.g. ns_bl2u.bin)
+#   $(2) = command line option for the specified payload (e.g. --fwu)
+#   $(3) = fip target dependency (optional) (e.g. ns_bl2u)
+define FWU_FIP_ADD_PAYLOAD
+    $(eval $(if $(3),FWU_FIP_DEPS += $(3)))
+    $(eval FWU_FIP_ARGS += $(2) $(1))
+endef
+
+# FWU_CERT_ADD_CMD_OPT adds a new command line option to the cert_create invocation
+#   $(1) = parameter filename
+#   $(2) = cert_create command line option for the specified parameter
+#   $(3) = input parameter (false if empty)
+define FWU_CERT_ADD_CMD_OPT
+    $(eval $(if $(3),FWU_CRT_DEPS += $(1)))
+    $(eval FWU_CRT_ARGS += $(2) $(1))
+endef
+
+# FWU_FIP_ADD_IMG allows the platform to pack a binary image in the FWU FIP
+#   $(1) build option to specify the image filename (BL2U, NS_BL2U, etc)
+#   $(2) command line option for fiptool (bl2u, ns_bl2u, etc)
+# Example:
+#   $(eval $(call FWU_FIP_ADD_IMG,BL2U,--bl2u))
+define FWU_FIP_ADD_IMG
+    FWU_CRT_DEPS += check_$(1)
+    FWU_FIP_DEPS += check_$(1)
+    $(call FWU_FIP_ADD_PAYLOAD,$(value $(1)),$(2))
+
+check_$(1):
+	$$(if $(value $(1)),,$$(error "Platform '${PLAT}' requires $(1). Please set $(1) to point to the right file"))
+endef
+
+################################################################################
+# Auxiliary macros to build TF images from sources
+################################################################################
+
+MAKE_DEP = -Wp,-MD,$(DEP) -MT $$@ -MP
+
+# MAKE_C builds a C source file and generates the dependency file
+#   $(1) = output directory
+#   $(2) = source file (%.c)
+#   $(3) = BL stage (2, 2u, 30, 31, 32, 33)
+define MAKE_C
+
+$(eval OBJ := $(1)/$(patsubst %.c,%.o,$(notdir $(2))))
+$(eval DEP := $(patsubst %.o,%.d,$(OBJ)))
+$(eval IMAGE := IMAGE_BL$(call uppercase,$(3)))
+
+$(OBJ): $(2) | bl$(3)_dirs
+	@echo "  CC      $$<"
+	$$(Q)$$(CC) $$(TF_CFLAGS) $$(CFLAGS) -D$(IMAGE) $(MAKE_DEP) -c $$< -o $$@
+
+-include $(DEP)
+
+endef
+
+
+# MAKE_S builds an assembly source file and generates the dependency file
+#   $(1) = output directory
+#   $(2) = assembly file (%.S)
+#   $(3) = BL stage (2, 2u, 30, 31, 32, 33)
+define MAKE_S
+
+$(eval OBJ := $(1)/$(patsubst %.S,%.o,$(notdir $(2))))
+$(eval DEP := $(patsubst %.o,%.d,$(OBJ)))
+$(eval IMAGE := IMAGE_BL$(call uppercase,$(3)))
+
+$(OBJ): $(2) | bl$(3)_dirs
+	@echo "  AS      $$<"
+	$$(Q)$$(AS) $$(ASFLAGS) -D$(IMAGE) $(MAKE_DEP) -c $$< -o $$@
+
+-include $(DEP)
+
+endef
+
+
+# MAKE_LD generate the linker script using the C preprocessor
+#   $(1) = output linker script
+#   $(2) = input template
+#   $(3) = BL stage (2, 2u, 30, 31, 32, 33)
+define MAKE_LD
+
+$(eval DEP := $(1).d)
+
+$(1): $(2) | bl$(3)_dirs
+	@echo "  PP      $$<"
+	$$(Q)$$(CPP) $$(CPPFLAGS) -P -D__ASSEMBLY__ -D__LINKER__ $(MAKE_DEP) -o $$@ $$<
+
+-include $(DEP)
+
+endef
+
+
+# MAKE_OBJS builds both C and assembly source files
+#   $(1) = output directory
+#   $(2) = list of source files (both C and assembly)
+#   $(3) = BL stage (2, 30, 31, 32, 33)
+define MAKE_OBJS
+        $(eval C_OBJS := $(filter %.c,$(2)))
+        $(eval REMAIN := $(filter-out %.c,$(2)))
+        $(eval $(foreach obj,$(C_OBJS),$(call MAKE_C,$(1),$(obj),$(3))))
+
+        $(eval S_OBJS := $(filter %.S,$(REMAIN)))
+        $(eval REMAIN := $(filter-out %.S,$(REMAIN)))
+        $(eval $(foreach obj,$(S_OBJS),$(call MAKE_S,$(1),$(obj),$(3))))
+
+        $(and $(REMAIN),$(error Unexpected source files present: $(REMAIN)))
+endef
+
+
+# NOTE: The line continuation '\' is required in the next define otherwise we
+# end up with a line-feed characer at the end of the last c filename.
+# Also bear this issue in mind if extending the list of supported filetypes.
+define SOURCES_TO_OBJS
+        $(notdir $(patsubst %.c,%.o,$(filter %.c,$(1)))) \
+        $(notdir $(patsubst %.S,%.o,$(filter %.S,$(1))))
+endef
+
+
+# MAKE_TOOL_ARGS macro defines the command line arguments for fiptool for
+# each BL image. Arguments:
+#   $(1) = BL stage (2, 30, 31, 32, 33)
+#   $(2) = Binary file
+#   $(3) = FIP command line option (if empty, image will not be included in the FIP)
+define MAKE_TOOL_ARGS
+        $(if $(3),$(eval $(call FIP_ADD_PAYLOAD,$(2),--$(3),bl$(1))))
+endef
+
+# Allow overriding the timestamp, for example for reproducible builds, or to
+# synchronize timestamps across multiple projects.
+# This must be set to a C string (including quotes where applicable).
+BUILD_MESSAGE_TIMESTAMP ?= __TIME__", "__DATE__
+
+# MAKE_BL macro defines the targets and options to build each BL image.
+# Arguments:
+#   $(1) = BL stage (2, 2u, 30, 31, 32, 33)
+#   $(2) = FIP command line option (if empty, image will not be included in the FIP)
+define MAKE_BL
+        $(eval BUILD_DIR  := ${BUILD_PLAT}/bl$(1))
+        $(eval BL_SOURCES := $(BL$(call uppercase,$(1))_SOURCES))
+        $(eval SOURCES    := $(BL_SOURCES) $(BL_COMMON_SOURCES) $(PLAT_BL_COMMON_SOURCES))
+        $(eval OBJS       := $(addprefix $(BUILD_DIR)/,$(call SOURCES_TO_OBJS,$(SOURCES))))
+        $(eval LINKERFILE := $(call IMG_LINKERFILE,$(1)))
+        $(eval MAPFILE    := $(call IMG_MAPFILE,$(1)))
+        $(eval ELF        := $(call IMG_ELF,$(1)))
+        $(eval DUMP       := $(call IMG_DUMP,$(1)))
+        $(eval BIN        := $(call IMG_BIN,$(1)))
+        $(eval BL_LINKERFILE := $(BL$(call uppercase,$(1))_LINKERFILE))
+        # We use sort only to get a list of unique object directory names.
+        # ordering is not relevant but sort removes duplicates.
+        $(eval TEMP_OBJ_DIRS := $(sort $(dir ${OBJS} ${LINKERFILE})))
+        # The $(dir ) function leaves a trailing / on the directory names
+        # Rip off the / to match directory names with make rule targets.
+        $(eval OBJ_DIRS   := $(patsubst %/,%,$(TEMP_OBJ_DIRS)))
+
+# Create generators for object directory structure
+
+$(eval $(call MAKE_PREREQ_DIR,${BUILD_DIR},))
+
+$(eval $(foreach objd,${OBJ_DIRS},$(call MAKE_PREREQ_DIR,${objd},${BUILD_DIR})))
+
+.PHONY : bl${1}_dirs
+
+# We use order-only prerequisites to ensure that directories are created,
+# but do not cause re-builds every time a file is written.
+bl${1}_dirs: | ${OBJ_DIRS}
+
+$(eval $(call MAKE_OBJS,$(BUILD_DIR),$(SOURCES),$(1)))
+$(eval $(call MAKE_LD,$(LINKERFILE),$(BL_LINKERFILE),$(1)))
+
+$(ELF): $(OBJS) $(LINKERFILE) | bl$(1)_dirs
+	@echo "  LD      $$@"
+ifdef MAKE_BUILD_STRINGS
+	$(call MAKE_BUILD_STRINGS, $(BUILD_DIR)/build_message.o)
+else
+	@echo 'const char build_message[] = "Built : "$(BUILD_MESSAGE_TIMESTAMP); \
+	       const char version_string[] = "${VERSION_STRING}";' | \
+		$$(CC) $$(TF_CFLAGS) $$(CFLAGS) -xc -c - -o $(BUILD_DIR)/build_message.o
+endif
+	$$(Q)$$(LD) -o $$@ $$(TF_LDFLAGS) $$(LDFLAGS) -Map=$(MAPFILE) \
+		--script $(LINKERFILE) $(BUILD_DIR)/build_message.o $(OBJS) $(LDLIBS)
+
+$(DUMP): $(ELF)
+	@echo "  OD      $$@"
+	$${Q}$${OD} -dx $$< > $$@
+
+$(BIN): $(ELF)
+	@echo "  BIN     $$@"
+	$$(Q)$$(OC) -O binary $$< $$@
+	@${ECHO_BLANK_LINE}
+	@echo "Built $$@ successfully"
+	@${ECHO_BLANK_LINE}
+
+.PHONY: bl$(1)
+bl$(1): $(BIN) $(DUMP)
+
+all: bl$(1)
+
+$(eval $(call MAKE_TOOL_ARGS,$(1),$(BIN),$(2)))
+
+endef
+
diff --git a/make_helpers/cygwin.mk b/make_helpers/cygwin.mk
new file mode 100644
index 0000000..04a963f
--- /dev/null
+++ b/make_helpers/cygwin.mk
@@ -0,0 +1,19 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+
+# OS specific definitions for builds in a Cygwin environment.
+# Cygwin allows us to use unix style commands on a windows platform.
+
+ifndef CYGWIN_MK
+    CYGWIN_MK := $(lastword $(MAKEFILE_LIST))
+
+    include ${MAKE_HELPERS_DIRECTORY}unix.mk
+
+    # In cygwin executable files have the Windows .exe extension type.
+    BIN_EXT := .exe
+
+endif
diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk
new file mode 100644
index 0000000..412c3b7
--- /dev/null
+++ b/make_helpers/defaults.mk
@@ -0,0 +1,158 @@
+#
+# Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Default, static values for build variables, listed in alphabetic order.
+# Dependencies between build options, if any, are handled in the top-level
+# Makefile, after this file is included. This ensures that the former is better
+# poised to handle dependencies, as all build variables would have a default
+# value by then.
+
+# The AArch32 Secure Payload to be built as BL32 image
+AARCH32_SP			:= none
+
+# The Target build architecture. Supported values are: aarch64, aarch32.
+ARCH				:= aarch64
+
+# ARM Architecture major and minor versions: 8.0 by default.
+ARM_ARCH_MAJOR			:= 8
+ARM_ARCH_MINOR			:= 0
+
+# Determine the version of ARM GIC architecture to use for interrupt management
+# in EL3. The platform port can change this value if needed.
+ARM_GIC_ARCH			:= 2
+
+# Flag used to indicate if ASM_ASSERTION should be enabled for the build.
+ASM_ASSERTION			:= 0
+
+# Base commit to perform code check on
+BASE_COMMIT			:= origin/master
+
+# By default, consider that the platform may release several CPUs out of reset.
+# The platform Makefile is free to override this value.
+COLD_BOOT_SINGLE_CPU		:= 0
+
+# For Chain of Trust
+CREATE_KEYS			:= 1
+
+# Build flag to include AArch32 registers in cpu context save and restore during
+# world switch. This flag must be set to 0 for AArch64-only platforms.
+CTX_INCLUDE_AARCH32_REGS	:= 1
+
+# Include FP registers in cpu context
+CTX_INCLUDE_FPREGS		:= 0
+
+# Debug build
+DEBUG				:= 0
+
+# Build platform
+DEFAULT_PLAT			:= fvp
+
+# Flag to enable Performance Measurement Framework
+ENABLE_PMF			:= 0
+
+# Flag to enable PSCI STATs functionality
+ENABLE_PSCI_STAT		:= 0
+
+# Flag to enable runtime instrumentation using PMF
+ENABLE_RUNTIME_INSTRUMENTATION	:= 0
+
+# Flag to enable stack corruption protection
+ENABLE_STACK_PROTECTOR		:= 0
+
+# Build flag to treat usage of deprecated platform and framework APIs as error.
+ERROR_DEPRECATED		:= 0
+
+# Byte alignment that each component in FIP is aligned to
+FIP_ALIGN			:= 0
+
+# Default FIP file name
+FIP_NAME			:= fip.bin
+
+# Default FWU_FIP file name
+FWU_FIP_NAME			:= fwu_fip.bin
+
+# For Chain of Trust
+GENERATE_COT			:= 0
+
+# Hint platform interrupt control layer that Group 0 interrupts are for EL3. By
+# default, they are for Secure EL1.
+GICV2_G0_FOR_EL3		:= 0
+
+# Whether system coherency is managed in hardware, without explicit software
+# operations.
+HW_ASSISTED_COHERENCY		:= 0
+
+# Set the default algorithm for the generation of Trusted Board Boot keys
+KEY_ALG				:= rsa
+
+# Flag to enable new version of image loading
+LOAD_IMAGE_V2			:= 0
+
+# NS timer register save and restore
+NS_TIMER_SWITCH			:= 0
+
+# Build PL011 UART driver in minimal generic UART mode
+PL011_GENERIC_UART		:= 0
+
+# By default, consider that the platform's reset address is not programmable.
+# The platform Makefile is free to override this value.
+PROGRAMMABLE_RESET_ADDRESS	:= 0
+
+# Flag used to choose the power state format viz Extended State-ID or the
+# Original format.
+PSCI_EXTENDED_STATE_ID		:= 0
+
+# By default, BL1 acts as the reset handler, not BL31
+RESET_TO_BL31			:= 0
+
+# For Chain of Trust
+SAVE_KEYS			:= 0
+
+# Whether code and read-only data should be put on separate memory pages. The
+# platform Makefile is free to override this value.
+SEPARATE_CODE_AND_RODATA	:= 0
+
+# SPD choice
+SPD				:= none
+
+# Flag to introduce an infinite loop in BL1 just before it exits into the next
+# image. This is meant to help debugging the post-BL2 phase.
+SPIN_ON_BL1_EXIT		:= 0
+
+# Flags to build TF with Trusted Boot support
+TRUSTED_BOARD_BOOT		:= 0
+
+# Build option to choose whether Trusted firmware uses Coherent memory or not.
+USE_COHERENT_MEM		:= 1
+
+# Use tbbr_oid.h instead of platform_oid.h
+USE_TBBR_DEFS			= $(ERROR_DEPRECATED)
+
+# Build verbosity
+V				:= 0
+
+# Whether to enable D-Cache early during warm boot. This is usually
+# applicable for platforms wherein interconnect programming is not
+# required to enable cache coherency after warm reset (eg: single cluster
+# platforms).
+WARMBOOT_ENABLE_DCACHE_EARLY	:= 0
+
+# By default, enable Statistical Profiling Extensions.
+# The top level Makefile will disable this feature depending on
+# the target architecture and version number.
+ENABLE_SPE_FOR_LOWER_ELS	:= 1
+
+# SPE is enabled by default but only supported on AArch64 8.2 onwards.
+# Disable it in all other cases.
+ifeq (${ARCH},aarch32)
+    override ENABLE_SPE_FOR_LOWER_ELS := 0
+else
+    ifeq (${ARM_ARCH_MAJOR},8)
+        ifeq ($(ARM_ARCH_MINOR),$(filter $(ARM_ARCH_MINOR),0 1))
+            ENABLE_SPE_FOR_LOWER_ELS := 0
+        endif
+    endif
+endif
diff --git a/make_helpers/msys.mk b/make_helpers/msys.mk
new file mode 100644
index 0000000..7e60d57
--- /dev/null
+++ b/make_helpers/msys.mk
@@ -0,0 +1,20 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+
+# OS specific definitions for builds in a Mingw32 MSYS environment.
+# Mingw32 allows us to use some unix style commands on a windows platform.
+
+ifndef MSYS_MK
+    MSYS_MK := $(lastword $(MAKEFILE_LIST))
+
+    include ${MAKE_HELPERS_DIRECTORY}unix.mk
+
+    # In MSYS executable files have the Windows .exe extension type.
+    BIN_EXT := .exe
+
+endif
+
diff --git a/make_helpers/plat_helpers.mk b/make_helpers/plat_helpers.mk
new file mode 100644
index 0000000..a2f383a
--- /dev/null
+++ b/make_helpers/plat_helpers.mk
@@ -0,0 +1,38 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+################################################################################
+# Helpers for finding and referencing platform directories
+################################################################################
+
+ifndef PLAT_HELPERS_MK
+    PLAT_HELPERS_MK := $(lastword $(MAKEFILE_LIST))
+
+    ifeq (${PLAT},)
+        $(error "Error: Unknown platform. Please use PLAT=<platform name> to specify the platform")
+    endif
+
+    # PLATFORM_ROOT can be overridden for when building tools directly
+    PLATFORM_ROOT               ?= plat/
+    PLAT_MAKEFILE               := platform.mk
+
+    # Generate the platforms list by recursively searching for all directories
+    # under /plat containing a PLAT_MAKEFILE. Append each platform with a `|`
+    # char and strip out the final '|'.
+    ALL_PLATFORM_MK_FILES       := $(call rwildcard,${PLATFORM_ROOT},${PLAT_MAKEFILE})
+    ALL_PLATFORM_DIRS           := $(patsubst %/,%,$(dir ${ALL_PLATFORM_MK_FILES}))
+    ALL_PLATFORMS               := $(sort $(notdir ${ALL_PLATFORM_DIRS}))
+
+    PLAT_MAKEFILE_FULL          := $(filter %/${PLAT}/${PLAT_MAKEFILE},${ALL_PLATFORM_MK_FILES})
+    PLATFORM_LIST               := $(subst ${space},|,${ALL_PLATFORMS})
+    ifeq ($(PLAT_MAKEFILE_FULL),)
+        $(error "Error: Invalid platform. The following platforms are available: ${PLATFORM_LIST}")
+    endif
+
+    # Record the directory where the platform make file was found.
+    PLAT_DIR                    := $(dir ${PLAT_MAKEFILE_FULL})
+
+endif
diff --git a/make_helpers/tbbr/tbbr_tools.mk b/make_helpers/tbbr/tbbr_tools.mk
new file mode 100644
index 0000000..712fa6f
--- /dev/null
+++ b/make_helpers/tbbr/tbbr_tools.mk
@@ -0,0 +1,128 @@
+#
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# This file defines the keys and certificates that must be created to establish
+# a Chain of Trust following the TBBR document. These definitions include the
+# command line options passed to the cert_create and fiptool commands.
+#
+# Expected environment:
+#
+#   BUILD_PLAT: output directory
+#   NEED_BL32: indicates whether BL32 is needed by the platform
+#   BL2: image filename (optional). Default is IMG_BIN(2) (see macro IMG_BIN)
+#   SCP_BL2: image filename (optional). Default is IMG_BIN(30)
+#   BL31: image filename (optional). Default is IMG_BIN(31)
+#   BL32: image filename (optional). Default is IMG_BIN(32)
+#   BL33: image filename (optional). Default is IMG_BIN(33)
+#
+# Build options added by this file:
+#
+#   KEY_ALG
+#   ROT_KEY
+#   TRUSTED_WORLD_KEY
+#   NON_TRUSTED_WORLD_KEY
+#   SCP_BL2_KEY
+#   BL31_KEY
+#   BL32_KEY
+#   BL33_KEY
+#
+
+# Certificate generation tool default parameters
+TRUSTED_KEY_CERT	:=	${BUILD_PLAT}/trusted_key.crt
+FWU_CERT		:=	${BUILD_PLAT}/fwu_cert.crt
+
+# Default non-volatile counter values (overridable by the platform)
+TFW_NVCTR_VAL		?=	0
+NTFW_NVCTR_VAL		?=	0
+
+# Pass the non-volatile counters to the cert_create tool
+$(eval $(call CERT_ADD_CMD_OPT,${TFW_NVCTR_VAL},--tfw-nvctr))
+$(eval $(call CERT_ADD_CMD_OPT,${NTFW_NVCTR_VAL},--ntfw-nvctr))
+
+# Add Trusted Key certificate to the fiptool and cert_create command line options
+$(eval $(call FIP_ADD_PAYLOAD,${TRUSTED_KEY_CERT},--trusted-key-cert))
+$(eval $(call CERT_ADD_CMD_OPT,${TRUSTED_KEY_CERT},--trusted-key-cert))
+
+# Add fwu certificate to the fiptool and cert_create command line options
+$(eval $(call FWU_FIP_ADD_PAYLOAD,${FWU_CERT},--fwu-cert))
+$(eval $(call FWU_CERT_ADD_CMD_OPT,${FWU_CERT},--fwu-cert))
+
+# Add the keys to the cert_create command line options (private keys are NOT
+# packed in the FIP). Developers can use their own keys by specifying the proper
+# build option in the command line when building the Trusted Firmware
+$(if ${KEY_ALG},$(eval $(call CERT_ADD_CMD_OPT,${KEY_ALG},--key-alg)))
+$(if ${ROT_KEY},$(eval $(call CERT_ADD_CMD_OPT,${ROT_KEY},--rot-key)))
+$(if ${ROT_KEY},$(eval $(call FWU_CERT_ADD_CMD_OPT,${ROT_KEY},--rot-key)))
+$(if ${TRUSTED_WORLD_KEY},$(eval $(call CERT_ADD_CMD_OPT,${TRUSTED_WORLD_KEY},--trusted-world-key)))
+$(if ${NON_TRUSTED_WORLD_KEY},$(eval $(call CERT_ADD_CMD_OPT,${NON_TRUSTED_WORLD_KEY},--non-trusted-world-key)))
+
+# Add the BL2 CoT (image cert + image)
+$(if ${BL2},$(eval $(call CERT_ADD_CMD_OPT,${BL2},--tb-fw,true)),\
+            $(eval $(call CERT_ADD_CMD_OPT,$(call IMG_BIN,2),--tb-fw,true)))
+$(eval $(call CERT_ADD_CMD_OPT,${BUILD_PLAT}/tb_fw.crt,--tb-fw-cert))
+$(eval $(call FIP_ADD_PAYLOAD,${BUILD_PLAT}/tb_fw.crt,--tb-fw-cert))
+
+# Add the SCP_BL2 CoT (key cert + img cert + image)
+ifneq (${SCP_BL2},)
+    $(eval $(call CERT_ADD_CMD_OPT,${SCP_BL2},--scp-fw,true))
+    $(if ${SCP_BL2_KEY},$(eval $(call CERT_ADD_CMD_OPT,${SCP_BL2_KEY},--scp-fw-key)))
+    $(eval $(call CERT_ADD_CMD_OPT,${BUILD_PLAT}/scp_fw_content.crt,--scp-fw-cert))
+    $(eval $(call CERT_ADD_CMD_OPT,${BUILD_PLAT}/scp_fw_key.crt,--scp-fw-key-cert))
+    $(eval $(call FIP_ADD_PAYLOAD,${BUILD_PLAT}/scp_fw_content.crt,--scp-fw-cert))
+    $(eval $(call FIP_ADD_PAYLOAD,${BUILD_PLAT}/scp_fw_key.crt,--scp-fw-key-cert))
+endif
+
+ifeq (${ARCH},aarch64)
+# Add the BL31 CoT (key cert + img cert + image)
+$(if ${BL31},$(eval $(call CERT_ADD_CMD_OPT,${BL31},--soc-fw,true)),\
+             $(eval $(call CERT_ADD_CMD_OPT,$(call IMG_BIN,31),--soc-fw,true)))
+$(if ${BL31_KEY},$(eval $(call CERT_ADD_CMD_OPT,${BL31_KEY},--soc-fw-key)))
+$(eval $(call CERT_ADD_CMD_OPT,${BUILD_PLAT}/soc_fw_content.crt,--soc-fw-cert))
+$(eval $(call CERT_ADD_CMD_OPT,${BUILD_PLAT}/soc_fw_key.crt,--soc-fw-key-cert))
+$(eval $(call FIP_ADD_PAYLOAD,${BUILD_PLAT}/soc_fw_content.crt,--soc-fw-cert))
+$(eval $(call FIP_ADD_PAYLOAD,${BUILD_PLAT}/soc_fw_key.crt,--soc-fw-key-cert))
+endif
+
+# Add the BL32 CoT (key cert + img cert + image)
+ifeq (${NEED_BL32},yes)
+    $(if ${BL32},$(eval $(call CERT_ADD_CMD_OPT,${BL32},--tos-fw,true)),\
+                 $(if ${BL32_SOURCES},$(eval $(call CERT_ADD_CMD_OPT,$(call IMG_BIN,32),--tos-fw,true))))
+    $(if ${BL32_KEY},$(eval $(call CERT_ADD_CMD_OPT,${BL32_KEY},--tos-fw-key)))
+    $(eval $(call CERT_ADD_CMD_OPT,${BUILD_PLAT}/tos_fw_content.crt,--tos-fw-cert))
+    $(eval $(call CERT_ADD_CMD_OPT,${BUILD_PLAT}/tos_fw_key.crt,--tos-fw-key-cert))
+    $(eval $(call FIP_ADD_PAYLOAD,${BUILD_PLAT}/tos_fw_content.crt,--tos-fw-cert))
+    $(eval $(call FIP_ADD_PAYLOAD,${BUILD_PLAT}/tos_fw_key.crt,--tos-fw-key-cert))
+ifneq (${BL32_EXTRA1},)
+    $(eval $(call CERT_ADD_CMD_OPT,${BL32_EXTRA1},--tos-fw-extra1,true))
+endif
+ifneq (${BL32_EXTRA2},)
+    $(eval $(call CERT_ADD_CMD_OPT,${BL32_EXTRA2},--tos-fw-extra2,true))
+endif
+endif
+
+# Add the BL33 CoT (key cert + img cert + image)
+ifneq (${BL33},)
+    $(eval $(call CERT_ADD_CMD_OPT,${BL33},--nt-fw,true))
+    $(if ${BL33_KEY},$(eval $(call CERT_ADD_CMD_OPT,${BL33_KEY},--nt-fw-key)))
+    $(eval $(call CERT_ADD_CMD_OPT,${BUILD_PLAT}/nt_fw_content.crt,--nt-fw-cert))
+    $(eval $(call CERT_ADD_CMD_OPT,${BUILD_PLAT}/nt_fw_key.crt,--nt-fw-key-cert))
+    $(eval $(call FIP_ADD_PAYLOAD,${BUILD_PLAT}/nt_fw_content.crt,--nt-fw-cert))
+    $(eval $(call FIP_ADD_PAYLOAD,${BUILD_PLAT}/nt_fw_key.crt,--nt-fw-key-cert))
+endif
+
+# Add the BL2U image
+$(if ${BL2U},$(eval $(call FWU_CERT_ADD_CMD_OPT,${BL2U},--ap-fwu-cfg,true)),\
+     $(eval $(call FWU_CERT_ADD_CMD_OPT,$(call IMG_BIN,2u),--ap-fwu-cfg,true)))
+
+# Add the SCP_BL2U image
+ifneq (${SCP_BL2U},)
+    $(eval $(call FWU_CERT_ADD_CMD_OPT,${SCP_BL2U},--scp-fwu-cfg,true))
+endif
+
+# Add the NS_BL2U image
+ifneq (${NS_BL2U},)
+    $(eval $(call FWU_CERT_ADD_CMD_OPT,${NS_BL2U},--fwu,true))
+endif
diff --git a/make_helpers/unix.mk b/make_helpers/unix.mk
new file mode 100644
index 0000000..17f8a7c
--- /dev/null
+++ b/make_helpers/unix.mk
@@ -0,0 +1,60 @@
+#
+# Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+
+# Trusted Firmware shell command definitions for a Unix style environment.
+
+ifndef UNIX_MK
+    UNIX_MK := $(lastword $(MAKEFILE_LIST))
+
+    ECHO_BLANK_LINE := echo
+
+    DIR_DELIM := /
+    PATH_SEP := :
+
+    # These defines provide Unix style equivalents of the shell commands
+    # required by the Trusted Firmware build environment.
+
+    # ${1} is the file to be copied.
+    # ${2} is the destination file name.
+    define SHELL_COPY
+	${Q}cp -f  "${1}"  "${2}"
+    endef
+
+    # ${1} is the directory to be copied.
+    # ${2} is the destination directory path.
+    define SHELL_COPY_TREE
+	${Q}cp -rf  "${1}"  "${2}"
+    endef
+
+    # ${1} is the file to be deleted.
+    define SHELL_DELETE
+	-${Q}rm -f  "${1}"
+    endef
+
+    # ${1} is a space delimited list of files to be deleted.
+    # Note that we do not quote ${1}, as multiple parameters may be passed.
+    define SHELL_DELETE_ALL
+	-${Q}rm -rf  ${1}
+    endef
+
+    # ${1} is the directory to be generated.
+    # ${2} is optional, and allows a prerequisite to be specified.
+    # Do nothing if $1 == $2, to ignore self dependencies.
+    define MAKE_PREREQ_DIR
+        ifneq (${1},${2})
+
+${1} : ${2}
+	${Q}mkdir -p  "${1}"
+
+        endif
+    endef
+
+    define SHELL_REMOVE_DIR
+	-${Q}rm -rf  "${1}"
+    endef
+
+endif
diff --git a/make_helpers/windows.mk b/make_helpers/windows.mk
new file mode 100644
index 0000000..69f6a01
--- /dev/null
+++ b/make_helpers/windows.mk
@@ -0,0 +1,88 @@
+#
+# Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+
+# OS specific parts for builds in a Windows_NT environment. The
+# environment variable OS is set to Windows_NT on all modern Windows platforms
+
+# Include generic windows command definitions.
+
+ifndef WINDOWS_MK
+    WINDOWS_MK := $(lastword $(MAKEFILE_LIST))
+
+    ECHO_BLANK_LINE := @cmd /c echo.
+    DIR_DELIM := $(strip \)
+    BIN_EXT   := .exe
+    PATH_SEP  := ;
+
+    # For some Windows native commands there is a problem with the directory delimiter.
+    # Make uses / (slash) and the commands expect \ (backslash)
+    # We have to provide a means of translating these, so we define local functions.
+
+    # ${1} is the file to be copied.
+    # ${2} is the destination file name.
+    define SHELL_COPY
+	$(eval tmp_from_file:=$(subst /,\,${1}))
+	$(eval tmp_to_file:=$(subst /,\,${2}))
+	copy "${tmp_from_file}" "${tmp_to_file}"
+    endef
+
+    # ${1} is the directory to be copied.
+    # ${2} is the destination directory path.
+    define SHELL_COPY_TREE
+	$(eval tmp_from_dir:=$(subst /,\,${1}))
+	$(eval tmp_to_dir:=$(subst /,\,${2}))
+	xcopy /HIVE "${tmp_from_dir}" "${tmp_to_dir}"
+    endef
+
+    # ${1} is the file to be deleted.
+    define SHELL_DELETE
+	$(eval tmp_del_file:=$(subst /,\,${*}))
+	-@if exist $(tmp_del_file)  del /Q $(tmp_del_file)
+    endef
+
+    # ${1} is a space delimited list of files to be deleted.
+    define SHELL_DELETE_ALL
+	$(eval $(foreach filename,$(wildcard ${1}),$(call DELETE_IF_THERE,${filename})))
+    endef
+
+    # ${1} is the directory to be generated.
+    # ${2} is optional, and allows prerequisites to be specified.
+    # Do nothing if $1 == $2, to ignore self dependencies.
+    define MAKE_PREREQ_DIR
+        ifneq (${1},${2})
+
+${1} : ${2}
+	$(eval tmp_dir:=$(subst /,\,${1}))
+	-@if not exist "$(tmp_dir)"  mkdir "${tmp_dir}"
+
+        endif
+    endef
+
+    # ${1} is the directory to be removed.
+    define SHELL_REMOVE_DIR
+	$(eval tmp_dir:=$(subst /,\,${1}))
+	-@if exist "$(tmp_dir)"  rd /Q /S "$(tmp_dir)"
+    endef
+
+endif
+
+# Because git is not available from CMD.EXE, we need to avoid
+# the BUILD_STRING generation which uses git.
+# For now we use "development build".
+# This can be overridden from the command line or environment.
+BUILD_STRING ?= development build
+
+# The DOS echo shell command does not strip ' characters from the command
+# parameters before printing. We therefore use an alternative method invoked
+# by defining the MAKE_BUILD_STRINGS macro.
+BUILT_TIME_DATE_STRING = const char build_message[] = "Built : "${BUILD_MESSAGE_TIMESTAMP};
+VERSION_STRING_MESSAGE = const char version_string[] = "${VERSION_STRING}";
+define MAKE_BUILD_STRINGS
+	@echo $$(BUILT_TIME_DATE_STRING) $$(VERSION_STRING_MESSAGE) | \
+		$$(CC) $$(TF_CFLAGS) $$(CFLAGS) -x c -c - -o $1
+endef
+
diff --git a/plat/arm/board/common/aarch32/board_arm_helpers.S b/plat/arm/board/common/aarch32/board_arm_helpers.S
new file mode 100644
index 0000000..d64da29
--- /dev/null
+++ b/plat/arm/board/common/aarch32/board_arm_helpers.S
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <v2m_def.h>
+
+	.globl  plat_report_exception
+
+
+	/* -------------------------------------------------------
+	 * void plat_report_exception(unsigned int type)
+	 * Function to report an unhandled exception
+	 * with platform-specific means.
+	 * On FVP platform, it updates the LEDs
+	 * to indicate where we are.
+	 * SYS_LED[0]   - 0x0
+	 * SYS_LED[2:1] - 0x0
+	 * SYS_LED[7:3] - Exception Mode.
+	 * Clobbers: r0-r1
+	 * -------------------------------------------------------
+	 */
+func plat_report_exception
+	lsl	r0, r0, #V2M_SYS_LED_EC_SHIFT
+	ldr	r1, =V2M_SYSREGS_BASE
+	add	r1, r1, #V2M_SYS_LED
+	str	r0, [r1]
+	bx	lr
+endfunc plat_report_exception
diff --git a/plat/arm/board/common/aarch64/board_arm_helpers.S b/plat/arm/board/common/aarch64/board_arm_helpers.S
new file mode 100644
index 0000000..4385e62
--- /dev/null
+++ b/plat/arm/board/common/aarch64/board_arm_helpers.S
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <v2m_def.h>
+
+	.globl  plat_report_exception
+
+
+	/* ---------------------------------------------
+	 * void plat_report_exception(unsigned int type)
+	 * Function to report an unhandled exception
+	 * with platform-specific means.
+	 * On FVP platform, it updates the LEDs
+	 * to indicate where we are
+	 * ---------------------------------------------
+	 */
+func plat_report_exception
+	mrs	x1, CurrentEl
+	lsr	x1, x1, #MODE_EL_SHIFT
+	lsl	x1, x1, #V2M_SYS_LED_EL_SHIFT
+	lsl	x0, x0, #V2M_SYS_LED_EC_SHIFT
+	mov	x2, #(SECURE << V2M_SYS_LED_SS_SHIFT)
+	orr	x0, x0, x2
+	orr	x0, x0, x1
+	mov	x1, #V2M_SYSREGS_BASE
+	add	x1, x1, #V2M_SYS_LED
+	str	w0, [x1]
+	ret
+endfunc plat_report_exception
diff --git a/plat/arm/board/common/board_arm_trusted_boot.c b/plat/arm/board/common/board_arm_trusted_boot.c
new file mode 100644
index 0000000..6c03c91
--- /dev/null
+++ b/plat/arm/board/common/board_arm_trusted_boot.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_def.h>
+#include <assert.h>
+#include <cassert.h>
+#include <platform.h>
+#include <stdint.h>
+#include <string.h>
+#include <tbbr_oid.h>
+
+/* SHA256 algorithm */
+#define SHA256_BYTES			32
+
+/* ROTPK locations */
+#define ARM_ROTPK_REGS_ID		1
+#define ARM_ROTPK_DEVEL_RSA_ID		2
+#define ARM_ROTPK_DEVEL_ECDSA_ID	3
+
+static const unsigned char rotpk_hash_hdr[] =		\
+		"\x30\x31\x30\x0D\x06\x09\x60\x86\x48"	\
+		"\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20";
+static const unsigned int rotpk_hash_hdr_len = sizeof(rotpk_hash_hdr) - 1;
+static unsigned char rotpk_hash_der[sizeof(rotpk_hash_hdr) - 1 + SHA256_BYTES];
+
+/* Use the cryptocell variants if Cryptocell is present */
+#if !ARM_CRYPTOCELL_INTEG
+#if !ARM_ROTPK_LOCATION_ID
+  #error "ARM_ROTPK_LOCATION_ID not defined"
+#endif
+
+/* Weak definition may be overridden in specific platform */
+#pragma weak plat_get_nv_ctr
+#pragma weak plat_set_nv_ctr
+
+#if (ARM_ROTPK_LOCATION_ID == ARM_ROTPK_DEVEL_RSA_ID)
+static const unsigned char arm_devel_rotpk_hash[] =	\
+		"\xB0\xF3\x82\x09\x12\x97\xD8\x3A"	\
+		"\x37\x7A\x72\x47\x1B\xEC\x32\x73"	\
+		"\xE9\x92\x32\xE2\x49\x59\xF6\x5E"	\
+		"\x8B\x4A\x4A\x46\xD8\x22\x9A\xDA";
+#elif (ARM_ROTPK_LOCATION_ID == ARM_ROTPK_DEVEL_ECDSA_ID)
+static const unsigned char arm_devel_rotpk_hash[] =	\
+		"\x2E\x40\xBF\x6E\xF9\x12\xBB\x98"	\
+		"\x31\x71\x09\x0E\x1E\x15\x3D\x0B"	\
+		"\xFD\xD1\xCC\x69\x4A\x98\xEB\x8B"	\
+		"\xA0\xB0\x20\x86\x4E\x6C\x07\x17";
+#endif
+
+/*
+ * Return the ROTPK hash in the following ASN.1 structure in DER format:
+ *
+ * AlgorithmIdentifier  ::=  SEQUENCE  {
+ *     algorithm         OBJECT IDENTIFIER,
+ *     parameters        ANY DEFINED BY algorithm OPTIONAL
+ * }
+ *
+ * DigestInfo ::= SEQUENCE {
+ *     digestAlgorithm   AlgorithmIdentifier,
+ *     digest            OCTET STRING
+ * }
+ */
+int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len,
+			unsigned int *flags)
+{
+	uint8_t *dst;
+
+	assert(key_ptr != NULL);
+	assert(key_len != NULL);
+	assert(flags != NULL);
+
+	/* Copy the DER header */
+	memcpy(rotpk_hash_der, rotpk_hash_hdr, rotpk_hash_hdr_len);
+	dst = (uint8_t *)&rotpk_hash_der[rotpk_hash_hdr_len];
+
+#if (ARM_ROTPK_LOCATION_ID == ARM_ROTPK_DEVEL_RSA_ID) \
+	|| (ARM_ROTPK_LOCATION_ID == ARM_ROTPK_DEVEL_ECDSA_ID)
+	memcpy(dst, arm_devel_rotpk_hash, SHA256_BYTES);
+#elif (ARM_ROTPK_LOCATION_ID == ARM_ROTPK_REGS_ID)
+	uint32_t *src, tmp;
+	unsigned int words, i;
+
+	/*
+	 * Append the hash from Trusted Root-Key Storage registers. The hash has
+	 * not been written linearly into the registers, so we have to do a bit
+	 * of byte swapping:
+	 *
+	 *     0x00    0x04    0x08    0x0C    0x10    0x14    0x18    0x1C
+	 * +---------------------------------------------------------------+
+	 * | Reg0  | Reg1  | Reg2  | Reg3  | Reg4  | Reg5  | Reg6  | Reg7  |
+	 * +---------------------------------------------------------------+
+	 *  | ...                    ... |   | ...                   ...  |
+	 *  |       +--------------------+   |                    +-------+
+	 *  |       |                        |                    |
+	 *  +----------------------------+   +----------------------------+
+	 *          |                    |                        |       |
+	 *  +-------+                    |   +--------------------+       |
+	 *  |                            |   |                            |
+	 *  v                            v   v                            v
+	 * +---------------------------------------------------------------+
+	 * |                               |                               |
+	 * +---------------------------------------------------------------+
+	 *  0                           15  16                           31
+	 *
+	 * Additionally, we have to access the registers in 32-bit words
+	 */
+	words = SHA256_BYTES >> 3;
+
+	/* Swap bytes 0-15 (first four registers) */
+	src = (uint32_t *)TZ_PUB_KEY_HASH_BASE;
+	for (i = 0 ; i < words ; i++) {
+		tmp = src[words - 1 - i];
+		/* Words are read in little endian */
+		*dst++ = (uint8_t)((tmp >> 24) & 0xFF);
+		*dst++ = (uint8_t)((tmp >> 16) & 0xFF);
+		*dst++ = (uint8_t)((tmp >> 8) & 0xFF);
+		*dst++ = (uint8_t)(tmp & 0xFF);
+	}
+
+	/* Swap bytes 16-31 (last four registers) */
+	src = (uint32_t *)(TZ_PUB_KEY_HASH_BASE + SHA256_BYTES / 2);
+	for (i = 0 ; i < words ; i++) {
+		tmp = src[words - 1 - i];
+		*dst++ = (uint8_t)((tmp >> 24) & 0xFF);
+		*dst++ = (uint8_t)((tmp >> 16) & 0xFF);
+		*dst++ = (uint8_t)((tmp >> 8) & 0xFF);
+		*dst++ = (uint8_t)(tmp & 0xFF);
+	}
+#endif /* (ARM_ROTPK_LOCATION_ID == ARM_ROTPK_DEVEL_RSA_ID) \
+		  || (ARM_ROTPK_LOCATION_ID == ARM_ROTPK_DEVEL_ECDSA_ID) */
+
+	*key_ptr = (void *)rotpk_hash_der;
+	*key_len = (unsigned int)sizeof(rotpk_hash_der);
+	*flags = ROTPK_IS_HASH;
+	return 0;
+}
+
+/*
+ * Return the non-volatile counter value stored in the platform. The cookie
+ * will contain the OID of the counter in the certificate.
+ *
+ * Return: 0 = success, Otherwise = error
+ */
+int plat_get_nv_ctr(void *cookie, unsigned int *nv_ctr)
+{
+	const char *oid;
+	uint32_t *nv_ctr_addr;
+
+	assert(cookie != NULL);
+	assert(nv_ctr != NULL);
+
+	oid = (const char *)cookie;
+	if (strcmp(oid, TRUSTED_FW_NVCOUNTER_OID) == 0) {
+		nv_ctr_addr = (uint32_t *)TFW_NVCTR_BASE;
+	} else if (strcmp(oid, NON_TRUSTED_FW_NVCOUNTER_OID) == 0) {
+		nv_ctr_addr = (uint32_t *)NTFW_CTR_BASE;
+	} else {
+		return 1;
+	}
+
+	*nv_ctr = (unsigned int)(*nv_ctr_addr);
+
+	return 0;
+}
+
+/*
+ * Store a new non-volatile counter value. By default on ARM development
+ * platforms, the non-volatile counters are RO and cannot be modified. We expect
+ * the values in the certificates to always match the RO values so that this
+ * function is never called.
+ *
+ * Return: 0 = success, Otherwise = error
+ */
+int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr)
+{
+	return 1;
+}
+#else /* ARM_CRYPTOCELL_INTEG */
+
+#include <nvm.h>
+#include <nvm_otp.h>
+#include <sbrom_bsv_api.h>
+
+CASSERT(HASH_RESULT_SIZE_IN_BYTES == SHA256_BYTES,
+		assert_mismatch_in_hash_result_size);
+
+/*
+ * Return the ROTPK hash in the following ASN.1 structure in DER format:
+ *
+ * AlgorithmIdentifier  ::=  SEQUENCE  {
+ *     algorithm         OBJECT IDENTIFIER,
+ *     parameters        ANY DEFINED BY algorithm OPTIONAL
+ * }
+ *
+ * DigestInfo ::= SEQUENCE {
+ *     digestAlgorithm   AlgorithmIdentifier,
+ *     digest            OCTET STRING
+ * }
+ */
+int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len,
+			unsigned int *flags)
+{
+	unsigned char *dst;
+	CCError_t error;
+	uint32_t lcs;
+
+	assert(key_ptr != NULL);
+	assert(key_len != NULL);
+	assert(flags != NULL);
+
+	error = NVM_GetLCS(PLAT_CRYPTOCELL_BASE, &lcs);
+	if (error != CC_OK)
+		return 1;
+
+	/* If the lifecycle state is `SD`, return failure */
+	if (lcs == CC_BSV_SECURITY_DISABLED_LCS)
+		return 1;
+
+	/*
+	 * If the lifecycle state is `CM` or `DM`, ROTPK shouldn't be verified.
+	 * Return success after setting ROTPK_NOT_DEPLOYED flag
+	 */
+	if ((lcs == CC_BSV_CHIP_MANUFACTURE_LCS) ||
+			(lcs == CC_BSV_DEVICE_MANUFACTURE_LCS)) {
+		*key_len = 0;
+		*flags = ROTPK_NOT_DEPLOYED;
+		return 0;
+	}
+
+	/* Copy the DER header */
+	memcpy(rotpk_hash_der, rotpk_hash_hdr, rotpk_hash_hdr_len);
+	dst = &rotpk_hash_der[rotpk_hash_hdr_len];
+	error = NVM_ReadHASHPubKey(PLAT_CRYPTOCELL_BASE,
+			CC_SB_HASH_BOOT_KEY_256B,
+			(uint32_t *)dst, HASH_RESULT_SIZE_IN_WORDS);
+	if (error != CC_OK)
+		return 1;
+
+	*key_ptr = rotpk_hash_der;
+	*key_len = sizeof(rotpk_hash_der);
+	*flags = ROTPK_IS_HASH;
+	return 0;
+}
+
+/*
+ * Return the non-volatile counter value stored in the platform. The cookie
+ * specifies the OID of the counter in the certificate.
+ *
+ * Return: 0 = success, Otherwise = error
+ */
+int plat_get_nv_ctr(void *cookie, unsigned int *nv_ctr)
+{
+	CCError_t error = CC_FAIL;
+
+	if (strcmp(cookie, TRUSTED_FW_NVCOUNTER_OID) == 0) {
+		error = NVM_GetSwVersion(PLAT_CRYPTOCELL_BASE,
+				CC_SW_VERSION_COUNTER1, nv_ctr);
+	} else if (strcmp(cookie, NON_TRUSTED_FW_NVCOUNTER_OID) == 0) {
+		error = NVM_GetSwVersion(PLAT_CRYPTOCELL_BASE,
+				CC_SW_VERSION_COUNTER2, nv_ctr);
+	}
+
+	return (error != CC_OK);
+}
+
+/*
+ * Store a new non-volatile counter value in the counter specified by the OID
+ * in the cookie. This function is not expected to be called if the Lifecycle
+ * state is RMA as the values in the certificate are expected to always match
+ * the nvcounter values. But if called when the LCS is RMA, the underlying
+ * helper functions will return success but without updating the counter.
+ *
+ * Return: 0 = success, Otherwise = error
+ */
+int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr)
+{
+	CCError_t error = CC_FAIL;
+
+	if (strcmp(cookie, TRUSTED_FW_NVCOUNTER_OID) == 0) {
+		error = NVM_SetSwVersion(PLAT_CRYPTOCELL_BASE,
+				CC_SW_VERSION_COUNTER1, nv_ctr);
+	} else if (strcmp(cookie, NON_TRUSTED_FW_NVCOUNTER_OID) == 0) {
+		error = NVM_SetSwVersion(PLAT_CRYPTOCELL_BASE,
+				CC_SW_VERSION_COUNTER2, nv_ctr);
+	}
+
+	return (error != CC_OK);
+}
+
+#endif /* ARM_CRYPTOCELL_INTEG */
diff --git a/plat/arm/board/common/board_common.mk b/plat/arm/board/common/board_common.mk
new file mode 100644
index 0000000..d63ae9a
--- /dev/null
+++ b/plat/arm/board/common/board_common.mk
@@ -0,0 +1,58 @@
+#
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PLAT_INCLUDES		+=	-Iinclude/plat/arm/board/common/			\
+				-Iinclude/plat/arm/board/common/drivers
+
+PLAT_BL_COMMON_SOURCES	+=	drivers/arm/pl011/${ARCH}/pl011_console.S		\
+				plat/arm/board/common/${ARCH}/board_arm_helpers.S
+
+BL1_SOURCES		+=	plat/arm/board/common/drivers/norflash/norflash.c
+
+BL2_SOURCES		+=	lib/utils/mem_region.c					\
+				plat/arm/common/arm_nor_psci_mem_protect.c		\
+				plat/arm/board/common/drivers/norflash/norflash.c
+
+BL31_SOURCES		+=	lib/utils/mem_region.c					\
+				plat/arm/board/common/drivers/norflash/norflash.c	\
+				plat/arm/common/arm_nor_psci_mem_protect.c
+
+ifneq (${TRUSTED_BOARD_BOOT},0)
+  ifneq (${ARM_CRYPTOCELL_INTEG}, 1)
+    # ROTPK hash location
+    ifeq (${ARM_ROTPK_LOCATION}, regs)
+        ARM_ROTPK_LOCATION_ID = ARM_ROTPK_REGS_ID
+    else ifeq (${ARM_ROTPK_LOCATION}, devel_rsa)
+        KEY_ALG := rsa
+        ARM_ROTPK_LOCATION_ID = ARM_ROTPK_DEVEL_RSA_ID
+    else ifeq (${ARM_ROTPK_LOCATION}, devel_ecdsa)
+        KEY_ALG := ecdsa
+        ARM_ROTPK_LOCATION_ID = ARM_ROTPK_DEVEL_ECDSA_ID
+    else
+        $(error "Unsupported ARM_ROTPK_LOCATION value")
+    endif
+    $(eval $(call add_define,ARM_ROTPK_LOCATION_ID))
+
+    # Certificate NV-Counters. Use values corresponding to tied off values in
+    # ARM development platforms
+    TFW_NVCTR_VAL	?=	31
+    NTFW_NVCTR_VAL	?=	223
+  else
+    # Certificate NV-Counters when CryptoCell is integrated. For development
+    # platforms we set the counter to first valid value.
+    TFW_NVCTR_VAL	?=	0
+    NTFW_NVCTR_VAL	?=	0
+  endif
+    BL1_SOURCES		+=	plat/arm/board/common/board_arm_trusted_boot.c
+    BL2_SOURCES		+=	plat/arm/board/common/board_arm_trusted_boot.c
+endif
+
+# This flag controls whether memory usage needs to be optimised
+ARM_BOARD_OPTIMISE_MEM	?=	0
+
+# Process flags
+$(eval $(call assert_boolean,ARM_BOARD_OPTIMISE_MEM))
+$(eval $(call add_define,ARM_BOARD_OPTIMISE_MEM))
diff --git a/plat/arm/board/common/board_css.mk b/plat/arm/board/common/board_css.mk
new file mode 100644
index 0000000..5ac213a
--- /dev/null
+++ b/plat/arm/board/common/board_css.mk
@@ -0,0 +1,9 @@
+#
+# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PLAT_BL_COMMON_SOURCES	+=	plat/arm/board/common/board_css_common.c
+
+include plat/arm/board/common/board_common.mk
diff --git a/plat/arm/board/common/board_css_common.c b/plat/arm/board/common/board_css_common.c
new file mode 100644
index 0000000..032ebdf
--- /dev/null
+++ b/plat/arm/board/common/board_css_common.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arm_def.h>
+#include <plat_arm.h>
+
+/*
+ * Table of memory regions for different BL stages to map using the MMU.
+ * This doesn't include Trusted SRAM as arm_setup_page_tables() already
+ * takes care of mapping it.
+ */
+#ifdef IMAGE_BL1
+const mmap_region_t plat_arm_mmap[] = {
+	ARM_MAP_SHARED_RAM,
+	V2M_MAP_FLASH0_RO,
+	V2M_MAP_IOFPGA,
+	CSS_MAP_DEVICE,
+	SOC_CSS_MAP_DEVICE,
+#if TRUSTED_BOARD_BOOT
+	/* Map DRAM to authenticate NS_BL2U image. */
+	ARM_MAP_NS_DRAM1,
+#endif
+	{0}
+};
+#endif
+#ifdef IMAGE_BL2
+const mmap_region_t plat_arm_mmap[] = {
+	ARM_MAP_SHARED_RAM,
+	V2M_MAP_FLASH0_RO,
+#ifdef PLAT_ARM_MEM_PROT_ADDR
+	ARM_V2M_MAP_MEM_PROTECT,
+#endif
+	V2M_MAP_IOFPGA,
+	CSS_MAP_DEVICE,
+	SOC_CSS_MAP_DEVICE,
+	ARM_MAP_NS_DRAM1,
+#ifdef AARCH64
+	ARM_MAP_DRAM2,
+#endif
+#ifdef SPD_tspd
+	ARM_MAP_TSP_SEC_MEM,
+#endif
+#ifdef SPD_opteed
+	ARM_MAP_OPTEE_CORE_MEM,
+	ARM_OPTEE_PAGEABLE_LOAD_MEM,
+#endif
+	{0}
+};
+#endif
+#ifdef IMAGE_BL2U
+const mmap_region_t plat_arm_mmap[] = {
+	ARM_MAP_SHARED_RAM,
+	CSS_MAP_DEVICE,
+	SOC_CSS_MAP_DEVICE,
+	{0}
+};
+#endif
+#ifdef IMAGE_BL31
+const mmap_region_t plat_arm_mmap[] = {
+	ARM_MAP_SHARED_RAM,
+	V2M_MAP_IOFPGA,
+	CSS_MAP_DEVICE,
+#ifdef PLAT_ARM_MEM_PROT_ADDR
+	ARM_V2M_MAP_MEM_PROTECT,
+#endif
+	SOC_CSS_MAP_DEVICE,
+	{0}
+};
+#endif
+#ifdef IMAGE_BL32
+const mmap_region_t plat_arm_mmap[] = {
+#ifdef AARCH32
+	ARM_MAP_SHARED_RAM,
+#endif
+	V2M_MAP_IOFPGA,
+	CSS_MAP_DEVICE,
+	SOC_CSS_MAP_DEVICE,
+	{0}
+};
+#endif
+
+ARM_CASSERT_MMAP
diff --git a/plat/arm/board/common/drivers/norflash/norflash.c b/plat/arm/board/common/drivers/norflash/norflash.c
new file mode 100644
index 0000000..722cf33
--- /dev/null
+++ b/plat/arm/board/common/drivers/norflash/norflash.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <errno.h>
+#include <mmio.h>
+#include <norflash.h>
+
+
+/*
+ * DWS ready poll retries. The number of retries in this driver have been
+ * obtained empirically from Juno. FVP implements a zero wait state NOR flash
+ * model
+ */
+#define DWS_WORD_PROGRAM_RETRIES	1000
+#define DWS_WORD_ERASE_RETRIES		3000000
+#define DWS_WORD_LOCK_RETRIES		1000
+
+/* Helper macro to detect end of command */
+#define NOR_CMD_END (NOR_DWS | NOR_DWS << 16l)
+
+/*
+ * This file supplies a low level interface to the vexpress NOR flash
+ * memory of juno and fvp. This memory is organized as an interleaved
+ * memory of two chips with a 16 bit word. It means that every 32 bit
+ * access is going to access to two different chips. This is very
+ * important when we send commands or read status of the chips
+ */
+
+/* Helper macros to access two flash banks in parallel */
+#define NOR_2X16(d)			((d << 16) | (d & 0xffff))
+
+static unsigned int nor_status(uintptr_t base_addr)
+{
+	unsigned long status;
+
+	nor_send_cmd(base_addr, NOR_CMD_READ_STATUS_REG);
+	status = mmio_read_32(base_addr);
+	status |= status >> 16; /* merge status from both flash banks */
+
+	return status & 0xFFFF;
+}
+
+/*
+ * Poll Write State Machine.
+ * Return values:
+ *    0      = WSM ready
+ *    -EBUSY = WSM busy after the number of retries
+ */
+static int nor_poll_dws(uintptr_t base_addr, unsigned long int retries)
+{
+	unsigned long status;
+
+	do {
+		nor_send_cmd(base_addr, NOR_CMD_READ_STATUS_REG);
+		status = mmio_read_32(base_addr);
+		if ((status & NOR_CMD_END) == NOR_CMD_END)
+			return 0;
+	} while (retries-- > 0);
+
+	return -EBUSY;
+}
+
+/*
+ * Return values:
+ *    0      = success
+ *    -EPERM = Device protected or Block locked
+ *    -EIO   = General I/O error
+ */
+static int nor_full_status_check(uintptr_t base_addr)
+{
+	unsigned long status;
+
+	/* Full status check */
+	status = nor_status(base_addr);
+
+	if (status & (NOR_PS | NOR_BLS | NOR_ESS | NOR_PSS))
+		return -EPERM;
+	if (status & (NOR_VPPS | NOR_ES))
+		return -EIO;
+	return 0;
+}
+
+void nor_send_cmd(uintptr_t base_addr, unsigned long cmd)
+{
+	mmio_write_32(base_addr, NOR_2X16(cmd));
+}
+
+/*
+ * This function programs a word in the flash. Be aware that it only
+ * can reset bits that were previously set. It cannot set bits that
+ * were previously reset. The resulting bits = old_bits & new bits.
+ * Return values:
+ *  0 = success
+ *  otherwise it returns a negative value
+ */
+int nor_word_program(uintptr_t base_addr, unsigned long data)
+{
+	uint32_t status;
+	int ret;
+
+	nor_send_cmd(base_addr, NOR_CMD_CLEAR_STATUS_REG);
+
+	/* Set the device in write word mode */
+	nor_send_cmd(base_addr, NOR_CMD_WORD_PROGRAM);
+	mmio_write_32(base_addr, data);
+
+	ret = nor_poll_dws(base_addr, DWS_WORD_PROGRAM_RETRIES);
+	if (ret == 0) {
+		/* Full status check */
+		nor_send_cmd(base_addr, NOR_CMD_READ_STATUS_REG);
+		status = mmio_read_32(base_addr);
+
+		if (status & (NOR_PS | NOR_BLS)) {
+			nor_send_cmd(base_addr, NOR_CMD_CLEAR_STATUS_REG);
+			ret = -EPERM;
+		}
+	}
+
+	if (ret == 0)
+		ret = nor_full_status_check(base_addr);
+	nor_send_cmd(base_addr, NOR_CMD_READ_ARRAY);
+
+	return ret;
+}
+
+/*
+ * Erase a full 256K block
+ * Return values:
+ *  0 = success
+ *  otherwise it returns a negative value
+ */
+int nor_erase(uintptr_t base_addr)
+{
+	int ret;
+
+	nor_send_cmd(base_addr, NOR_CMD_CLEAR_STATUS_REG);
+
+	nor_send_cmd(base_addr, NOR_CMD_BLOCK_ERASE);
+	nor_send_cmd(base_addr, NOR_CMD_BLOCK_ERASE_ACK);
+
+	ret = nor_poll_dws(base_addr, DWS_WORD_ERASE_RETRIES);
+	if (ret == 0)
+		ret = nor_full_status_check(base_addr);
+	nor_send_cmd(base_addr, NOR_CMD_READ_ARRAY);
+
+	return ret;
+}
+
+/*
+ * Lock a full 256 block
+ * Return values:
+ *  0 = success
+ *  otherwise it returns a negative value
+ */
+int nor_lock(uintptr_t base_addr)
+{
+	int ret;
+
+	nor_send_cmd(base_addr, NOR_CMD_CLEAR_STATUS_REG);
+
+	nor_send_cmd(base_addr, NOR_CMD_LOCK_UNLOCK);
+	nor_send_cmd(base_addr, NOR_LOCK_BLOCK);
+
+	ret = nor_poll_dws(base_addr, DWS_WORD_LOCK_RETRIES);
+	if (ret == 0)
+		ret = nor_full_status_check(base_addr);
+	nor_send_cmd(base_addr, NOR_CMD_READ_ARRAY);
+
+	return ret;
+}
+
+/*
+ * unlock a full 256 block
+ * Return values:
+ *  0 = success
+ *  otherwise it returns a negative value
+ */
+int nor_unlock(uintptr_t base_addr)
+{
+	int ret;
+
+	nor_send_cmd(base_addr, NOR_CMD_CLEAR_STATUS_REG);
+
+	nor_send_cmd(base_addr, NOR_CMD_LOCK_UNLOCK);
+	nor_send_cmd(base_addr, NOR_UNLOCK_BLOCK);
+
+	ret = nor_poll_dws(base_addr, DWS_WORD_LOCK_RETRIES);
+	if (ret == 0)
+		ret = nor_full_status_check(base_addr);
+	nor_send_cmd(base_addr, NOR_CMD_READ_ARRAY);
+
+	return ret;
+}
diff --git a/plat/arm/board/common/rotpk/arm_rotpk_ecdsa.der b/plat/arm/board/common/rotpk/arm_rotpk_ecdsa.der
new file mode 100644
index 0000000..2547877
--- /dev/null
+++ b/plat/arm/board/common/rotpk/arm_rotpk_ecdsa.der
Binary files differ
diff --git a/plat/arm/board/common/rotpk/arm_rotpk_ecdsa_sha256.bin b/plat/arm/board/common/rotpk/arm_rotpk_ecdsa_sha256.bin
new file mode 100644
index 0000000..c5e123a
--- /dev/null
+++ b/plat/arm/board/common/rotpk/arm_rotpk_ecdsa_sha256.bin
@@ -0,0 +1 @@
+.@¿nù»Â˜1q	=ýÑÌiJ˜ë‹ ° †Nl
\ No newline at end of file
diff --git a/plat/arm/board/common/rotpk/arm_rotpk_rsa.der b/plat/arm/board/common/rotpk/arm_rotpk_rsa.der
new file mode 100644
index 0000000..661f899
--- /dev/null
+++ b/plat/arm/board/common/rotpk/arm_rotpk_rsa.der
Binary files differ
diff --git a/plat/arm/board/common/rotpk/arm_rotpk_rsa_sha256.bin b/plat/arm/board/common/rotpk/arm_rotpk_rsa_sha256.bin
new file mode 100644
index 0000000..7653f7e
--- /dev/null
+++ b/plat/arm/board/common/rotpk/arm_rotpk_rsa_sha256.bin
@@ -0,0 +1 @@
+°ó‚	—Ø:7zrGì2séÂ’2âIYö^‹JJFØ"šÚ
\ No newline at end of file
diff --git a/plat/arm/board/common/rotpk/arm_rotprivk_ecdsa.pem b/plat/arm/board/common/rotpk/arm_rotprivk_ecdsa.pem
new file mode 100644
index 0000000..fb328e3
--- /dev/null
+++ b/plat/arm/board/common/rotpk/arm_rotprivk_ecdsa.pem
@@ -0,0 +1,5 @@
+-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEINSaX6nvzS3teiBJA7WlTLRKJOajpy29o2cArLbUXoZBoAoGCCqGSM49
+AwEHoUQDQgAEm+ZIvTQ44aKk83DhVLsvsFpKDP/Ch9vA+4Hp+fmVfX6gDH8K1OBi
+SpRf7FJ9RGPIn2H6xst+a1OtLMWUDRqGkQ==
+-----END EC PRIVATE KEY-----
diff --git a/plat/arm/board/common/rotpk/arm_rotprivk_rsa.pem b/plat/arm/board/common/rotpk/arm_rotprivk_rsa.pem
new file mode 100644
index 0000000..71410ec
--- /dev/null
+++ b/plat/arm/board/common/rotpk/arm_rotprivk_rsa.pem
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDLLGDVjWPUB3l+
+xxaWvU0kTqyG5rdx48VUC+cUHL0pGsE/erYCqqs2xNk2aWziZcObsb89qFYmy/0E
+AbqsPlQyynleu7IF6gZY8nS64fSHwBkKH2YHd4SDoRzv/yhZ58NofSYgQ+tWY/M5
+MdgrUam8T9D23pXcX1vB7ZBv7CiRfhfteJD0YKfEx09Q7V0TOiErcMVhewghZTrN
+glaMekesieilSEgx2R1G5YWGmKDlwKZqvQfkkldhB499Wk3Krja5VgQQ8my+9jts
+gD6+DqNNx9R+p0nU8tK8zzCo53SPZN+8XEdozEBM+IPMy0A1BGDKs6QXnwPKHVr6
+0a8hVxDTAgMBAAECggEAfwsc8ewbhDW4TwIGqfNtDUr0rtYN13VpqohW0ki2L8G/
+HQaKUViO/wxQFqoNn/OqQO0AfHmKhXAAokTCiXngBHJ/OjF7vB7+IRhazZEE6u2/
+uoivr/OYNQbFpXyTqsQ1eFzpPju6KKcPK7BzT4Mc89ek/vloFAi8w6LdMl8lbvOg
+LBWqX+5A+UQoenPUTvYM4U22YNcEAWubkpsYAmViiWiac+a+uPRk39aKyfOedDNu
++ty9MtCwekivoUTfP/1+O+jFlDnPMJUOEkBmcBqxseYYAHu7blBpdHxYpAItC2pv
+YwJJSvsE+HLBLPk177Jahg7sOUqcP0F/X+T65yuvIQKBgQDxdjXdJT5K8j7rG2fv
+2bvF2H1GPaHaTYRk0EGI2Ql6Nn+ddfeCE6gaT7aPPgg87wAhNu93coFuYHw0p/sc
+ZkXMJ+BmlstPV555cWXmwcxZLsni0fOXrt4YxwWkZwmh74m0NVM/cSFw56PU0oj1
+yDNeq3fgmsJocmuNTe1eG9qA7QKBgQDXaAGrNA5Xel5mqqMYTHHQWI6l2uzdNtt7
+eDn3K9+Eh3ywTqrwP845MAjKDU2Lq61I6t2H89dEifHq823VIcLCHd9BF04MrAH7
+qDPzrmPP2iB9g+YFmGBKe+K0HFE1t1KrTlo9VV6ZAC6RJNLAgwD4kvfIVYNkCGwe
++hoZBdhgvwKBgBrOsPQ4ak4PzwRzKnrqhXpVqrLdrNZ7vLMkm+IBlpfG7SwiKLR8
+UjF5oB8PGAML1cvaOYPdZplGhQOjkrF4eU9NLhC1tSS96Y46FMIlyfYsx6UzAgRZ
+GbdOgUXbWqpr2bH0KaXlfXz3eqzqIuKGs41TJB//jo3iBibN/AhytzORAoGAeGov
+5KDpE4XYl9Pz8HVremjG9Xh4yQENmOwQm1fvT4rd7UFM1ZkVk2qCv1DIdLe32vdQ
+d9ucDzh+ADWsxGRnF1TTpPN+Mh9FzISu5h4qtdreJsxBHgecbIbsqHrb+wdMM29N
+itPaWfV8Eq9fETcqp8qgsWD8XkNHDdoKFMrrtskCgYAoSt/Je1D3ZE/3HEjez7bq
+fenS3J6KG2SEn2PNFn+R0R5vBo4DaV/cQysKh44GD2+sh0QDyh6nuWJufyhPzROP
+DU6DCLbwNePj/yaGuzi36oLt6bBgfPWCiJY7jIdK8DmTLW25m7fRtCC5pxZlSzgl
+KBf7R6cbaTvaFe05Y2FJXA==
+-----END PRIVATE KEY-----
diff --git a/plat/arm/board/fvp/aarch32/fvp_helpers.S b/plat/arm/board/fvp/aarch32/fvp_helpers.S
new file mode 100644
index 0000000..143972d
--- /dev/null
+++ b/plat/arm/board/fvp/aarch32/fvp_helpers.S
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+#include "../drivers/pwrc/fvp_pwrc.h"
+#include "../fvp_def.h"
+
+	.globl	plat_secondary_cold_boot_setup
+	.globl	plat_get_my_entrypoint
+	.globl	plat_is_my_cpu_primary
+	.globl	plat_arm_calc_core_pos
+
+	/* --------------------------------------------------------------------
+	 * void plat_secondary_cold_boot_setup (void);
+	 *
+	 * For AArch32, cold-booting secondary CPUs is not yet
+	 * implemented and they panic.
+	 * --------------------------------------------------------------------
+	 */
+func plat_secondary_cold_boot_setup
+cb_panic:
+	b	cb_panic
+endfunc plat_secondary_cold_boot_setup
+
+	/* ---------------------------------------------------------------------
+	 * unsigned long plat_get_my_entrypoint (void);
+	 *
+	 * Main job of this routine is to distinguish between a cold and warm
+	 * boot. On FVP, this information can be queried from the power
+	 * controller. The Power Control SYS Status Register (PSYSR) indicates
+	 * the wake-up reason for the CPU.
+	 *
+	 * For a cold boot, return 0.
+	 * For a warm boot, read the mailbox and return the address it contains.
+	 *
+	 * TODO: PSYSR is a common register and should be
+	 * 	accessed using locks. Since it is not possible
+	 * 	to use locks immediately after a cold reset
+	 * 	we are relying on the fact that after a cold
+	 * 	reset all cpus will read the same WK field
+	 * ---------------------------------------------------------------------
+	 */
+func plat_get_my_entrypoint
+	/* ---------------------------------------------------------------------
+	 * When bit PSYSR.WK indicates either "Wake by PPONR" or "Wake by GIC
+	 * WakeRequest signal" then it is a warm boot.
+	 * ---------------------------------------------------------------------
+	 */
+	ldcopr	r2, MPIDR
+	ldr	r1, =PWRC_BASE
+	str	r2, [r1, #PSYSR_OFF]
+	ldr	r2, [r1, #PSYSR_OFF]
+	ubfx	r2, r2, #PSYSR_WK_SHIFT, #PSYSR_WK_WIDTH
+	cmp	r2, #WKUP_PPONR
+	beq	warm_reset
+	cmp	r2, #WKUP_GICREQ
+	beq	warm_reset
+
+	/* Cold reset */
+	mov	r0, #0
+	bx	lr
+
+warm_reset:
+	/* ---------------------------------------------------------------------
+	 * A mailbox is maintained in the trusted SRAM. It is flushed out of the
+	 * caches after every update using normal memory so it is safe to read
+	 * it here with SO attributes.
+	 * ---------------------------------------------------------------------
+	 */
+	ldr	r0, =PLAT_ARM_TRUSTED_MAILBOX_BASE
+	ldr	r0, [r0]
+	cmp	r0, #0
+	beq	_panic
+	bx	lr
+
+	/* ---------------------------------------------------------------------
+	 * The power controller indicates this is a warm reset but the mailbox
+	 * is empty. This should never happen!
+	 * ---------------------------------------------------------------------
+	 */
+_panic:
+	b	_panic
+endfunc plat_get_my_entrypoint
+
+	/* -----------------------------------------------------
+	 * unsigned int plat_is_my_cpu_primary (void);
+	 *
+	 * Find out whether the current cpu is the primary
+	 * cpu.
+	 * -----------------------------------------------------
+	 */
+func plat_is_my_cpu_primary
+	ldcopr	r0, MPIDR
+	ldr	r1, =MPIDR_AFFINITY_MASK
+	and	r0, r1
+	cmp	r0, #FVP_PRIMARY_CPU
+	moveq	r0, #1
+	movne	r0, #0
+	bx	lr
+endfunc plat_is_my_cpu_primary
+
+	/* -----------------------------------------------------
+	 * unsigned int plat_arm_calc_core_pos(u_register_t mpidr)
+	 *
+	 * Function to calculate the core position on FVP.
+	 *
+	 * (ClusterId * FVP_MAX_CPUS_PER_CLUSTER) +
+	 * (CPUId * FVP_MAX_PE_PER_CPU) +
+	 * ThreadId
+	 * -----------------------------------------------------
+	 */
+func plat_arm_calc_core_pos
+	mov	r3, r0
+
+	/*
+	 * Check for MT bit in MPIDR. If not set, shift MPIDR to left to make it
+	 * look as if in a multi-threaded implementation
+	 */
+	tst	r0, #MPIDR_MT_MASK
+	lsleq	r3, r0, #MPIDR_AFFINITY_BITS
+
+	/* Extract individual affinity fields from MPIDR */
+	mov	r2, #FVP_MAX_PE_PER_CPU
+	ubfx	r0, r3, #MPIDR_AFF0_SHIFT, #MPIDR_AFFINITY_BITS
+	ubfx	r1, r3, #MPIDR_AFF1_SHIFT, #MPIDR_AFFINITY_BITS
+	mla	r0, r1, r2, r0
+
+	mov	r1, #FVP_MAX_CPUS_PER_CLUSTER
+	ubfx	r2, r3, #MPIDR_AFF2_SHIFT, #MPIDR_AFFINITY_BITS
+	mla	r0, r1, r2, r0
+
+	bx	lr
+endfunc plat_arm_calc_core_pos
diff --git a/plat/arm/board/fvp/aarch64/fvp_helpers.S b/plat/arm/board/fvp/aarch64/fvp_helpers.S
new file mode 100644
index 0000000..6ea4585
--- /dev/null
+++ b/plat/arm/board/fvp/aarch64/fvp_helpers.S
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <gicv2.h>
+#include <gicv3.h>
+#include <platform_def.h>
+#include <v2m_def.h>
+#include "../drivers/pwrc/fvp_pwrc.h"
+#include "../fvp_def.h"
+
+	.globl	plat_secondary_cold_boot_setup
+	.globl	plat_get_my_entrypoint
+	.globl	plat_is_my_cpu_primary
+	.globl	plat_arm_calc_core_pos
+
+	.macro	fvp_choose_gicmmap  param1, param2, x_tmp, w_tmp, res
+	ldr	\x_tmp, =V2M_SYSREGS_BASE + V2M_SYS_ID
+	ldr	\w_tmp, [\x_tmp]
+	ubfx	\w_tmp, \w_tmp, #V2M_SYS_ID_BLD_SHIFT, #V2M_SYS_ID_BLD_LENGTH
+	cmp	\w_tmp, #BLD_GIC_VE_MMAP
+	csel	\res, \param1, \param2, eq
+	.endm
+
+	/* -----------------------------------------------------
+	 * void plat_secondary_cold_boot_setup (void);
+	 *
+	 * This function performs any platform specific actions
+	 * needed for a secondary cpu after a cold reset e.g
+	 * mark the cpu's presence, mechanism to place it in a
+	 * holding pen etc.
+	 * TODO: Should we read the PSYS register to make sure
+	 * that the request has gone through.
+	 * -----------------------------------------------------
+	 */
+func plat_secondary_cold_boot_setup
+#ifndef EL3_PAYLOAD_BASE
+	/* ---------------------------------------------
+	 * Power down this cpu.
+	 * TODO: Do we need to worry about powering the
+	 * cluster down as well here. That will need
+	 * locks which we won't have unless an elf-
+	 * loader zeroes out the zi section.
+	 * ---------------------------------------------
+	 */
+	mrs	x0, mpidr_el1
+	ldr	x1, =PWRC_BASE
+	str	w0, [x1, #PPOFFR_OFF]
+
+	/* ---------------------------------------------
+	 * Disable GIC bypass as well
+	 * ---------------------------------------------
+	 */
+	/* Check for GICv3 system register access */
+	mrs	x0, id_aa64pfr0_el1
+	ubfx	x0, x0, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_WIDTH
+	cmp	x0, #1
+	b.ne	gicv2_bypass_disable
+
+	/* Check for SRE enable */
+	mrs	x1, ICC_SRE_EL3
+	tst	x1, #ICC_SRE_SRE_BIT
+	b.eq	gicv2_bypass_disable
+
+	mrs	x2, ICC_SRE_EL3
+	orr	x2, x2, #(ICC_SRE_DIB_BIT | ICC_SRE_DFB_BIT)
+	msr	ICC_SRE_EL3, x2
+	b	secondary_cold_boot_wait
+
+gicv2_bypass_disable:
+	ldr	x0, =VE_GICC_BASE
+	ldr	x1, =BASE_GICC_BASE
+	fvp_choose_gicmmap	x0, x1, x2, w2, x1
+	mov	w0, #(IRQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP1)
+	orr	w0, w0, #(IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP0)
+	str	w0, [x1, #GICC_CTLR]
+
+secondary_cold_boot_wait:
+	/* ---------------------------------------------
+	 * There is no sane reason to come out of this
+	 * wfi so panic if we do. This cpu will be pow-
+	 * ered on and reset by the cpu_on pm api
+	 * ---------------------------------------------
+	 */
+	dsb	sy
+	wfi
+	no_ret	plat_panic_handler
+#else
+	mov_imm	x0, PLAT_ARM_TRUSTED_MAILBOX_BASE
+
+	/* Wait until the entrypoint gets populated */
+poll_mailbox:
+	ldr	x1, [x0]
+	cbz	x1, 1f
+	br	x1
+1:
+	wfe
+	b	poll_mailbox
+#endif /* EL3_PAYLOAD_BASE */
+endfunc plat_secondary_cold_boot_setup
+
+	/* ---------------------------------------------------------------------
+	 * uintptr_t plat_get_my_entrypoint (void);
+	 *
+	 * Main job of this routine is to distinguish between a cold and warm
+	 * boot. On FVP, this information can be queried from the power
+	 * controller. The Power Control SYS Status Register (PSYSR) indicates
+	 * the wake-up reason for the CPU.
+	 *
+	 * For a cold boot, return 0.
+	 * For a warm boot, read the mailbox and return the address it contains.
+	 *
+	 * TODO: PSYSR is a common register and should be
+	 * 	accessed using locks. Since it is not possible
+	 * 	to use locks immediately after a cold reset
+	 * 	we are relying on the fact that after a cold
+	 * 	reset all cpus will read the same WK field
+	 * ---------------------------------------------------------------------
+	 */
+func plat_get_my_entrypoint
+	/* ---------------------------------------------------------------------
+	 * When bit PSYSR.WK indicates either "Wake by PPONR" or "Wake by GIC
+	 * WakeRequest signal" then it is a warm boot.
+	 * ---------------------------------------------------------------------
+	 */
+	mrs	x2, mpidr_el1
+	ldr	x1, =PWRC_BASE
+	str	w2, [x1, #PSYSR_OFF]
+	ldr	w2, [x1, #PSYSR_OFF]
+	ubfx	w2, w2, #PSYSR_WK_SHIFT, #PSYSR_WK_WIDTH
+	cmp	w2, #WKUP_PPONR
+	beq	warm_reset
+	cmp	w2, #WKUP_GICREQ
+	beq	warm_reset
+
+	/* Cold reset */
+	mov	x0, #0
+	ret
+
+warm_reset:
+	/* ---------------------------------------------------------------------
+	 * A mailbox is maintained in the trusted SRAM. It is flushed out of the
+	 * caches after every update using normal memory so it is safe to read
+	 * it here with SO attributes.
+	 * ---------------------------------------------------------------------
+	 */
+	mov_imm	x0, PLAT_ARM_TRUSTED_MAILBOX_BASE
+	ldr	x0, [x0]
+	cbz	x0, _panic_handler
+	ret
+
+	/* ---------------------------------------------------------------------
+	 * The power controller indicates this is a warm reset but the mailbox
+	 * is empty. This should never happen!
+	 * ---------------------------------------------------------------------
+	 */
+_panic_handler:
+	no_ret	plat_panic_handler
+endfunc plat_get_my_entrypoint
+
+	/* -----------------------------------------------------
+	 * unsigned int plat_is_my_cpu_primary (void);
+	 *
+	 * Find out whether the current cpu is the primary
+	 * cpu.
+	 * -----------------------------------------------------
+	 */
+func plat_is_my_cpu_primary
+	mrs	x0, mpidr_el1
+	ldr	x1, =MPIDR_AFFINITY_MASK
+	and	x0, x0, x1
+	cmp	x0, #FVP_PRIMARY_CPU
+	cset	w0, eq
+	ret
+endfunc plat_is_my_cpu_primary
+
+	/* -----------------------------------------------------
+	 * unsigned int plat_arm_calc_core_pos(u_register_t mpidr)
+	 *
+	 * Function to calculate the core position on FVP.
+	 *
+	 * (ClusterId * FVP_MAX_CPUS_PER_CLUSTER) +
+	 * (CPUId * FVP_MAX_PE_PER_CPU) +
+	 * ThreadId
+	 * -----------------------------------------------------
+	 */
+func plat_arm_calc_core_pos
+	mov	x3, x0
+
+	/*
+	 * Check for MT bit in MPIDR. If not set, shift MPIDR to left to make it
+	 * look as if in a multi-threaded implementation.
+	 */
+	tst	x0, #MPIDR_MT_MASK
+	lsl	x3, x0, #MPIDR_AFFINITY_BITS
+	csel	x3, x3, x0, eq
+
+	/* Extract individual affinity fields from MPIDR */
+	ubfx	x0, x3, #MPIDR_AFF0_SHIFT, #MPIDR_AFFINITY_BITS
+	ubfx	x1, x3, #MPIDR_AFF1_SHIFT, #MPIDR_AFFINITY_BITS
+	ubfx	x2, x3, #MPIDR_AFF2_SHIFT, #MPIDR_AFFINITY_BITS
+
+	/* Compute linear position */
+	mov	x4, #FVP_MAX_PE_PER_CPU
+	madd	x0, x1, x4, x0
+	mov	x5, #FVP_MAX_CPUS_PER_CLUSTER
+	madd	x0, x2, x5, x0
+	ret
+endfunc plat_arm_calc_core_pos
diff --git a/plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.c b/plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.c
new file mode 100644
index 0000000..5948e14
--- /dev/null
+++ b/plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <bakery_lock.h>
+#include <mmio.h>
+#include <plat_arm.h>
+#include "../../fvp_def.h"
+#include "../../fvp_private.h"
+#include "fvp_pwrc.h"
+
+/*
+ * TODO: Someday there will be a generic power controller api. At the moment
+ * each platform has its own pwrc so just exporting functions is fine.
+ */
+ARM_INSTANTIATE_LOCK;
+
+unsigned int fvp_pwrc_get_cpu_wkr(u_register_t mpidr)
+{
+	return PSYSR_WK(fvp_pwrc_read_psysr(mpidr));
+}
+
+unsigned int fvp_pwrc_read_psysr(u_register_t mpidr)
+{
+	unsigned int rc;
+	arm_lock_get();
+	mmio_write_32(PWRC_BASE + PSYSR_OFF, (unsigned int) mpidr);
+	rc = mmio_read_32(PWRC_BASE + PSYSR_OFF);
+	arm_lock_release();
+	return rc;
+}
+
+void fvp_pwrc_write_pponr(u_register_t mpidr)
+{
+	arm_lock_get();
+	mmio_write_32(PWRC_BASE + PPONR_OFF, (unsigned int) mpidr);
+	arm_lock_release();
+}
+
+void fvp_pwrc_write_ppoffr(u_register_t mpidr)
+{
+	arm_lock_get();
+	mmio_write_32(PWRC_BASE + PPOFFR_OFF, (unsigned int) mpidr);
+	arm_lock_release();
+}
+
+void fvp_pwrc_set_wen(u_register_t mpidr)
+{
+	arm_lock_get();
+	mmio_write_32(PWRC_BASE + PWKUPR_OFF,
+		      (unsigned int) (PWKUPR_WEN | mpidr));
+	arm_lock_release();
+}
+
+void fvp_pwrc_clr_wen(u_register_t mpidr)
+{
+	arm_lock_get();
+	mmio_write_32(PWRC_BASE + PWKUPR_OFF,
+		      (unsigned int) mpidr);
+	arm_lock_release();
+}
+
+void fvp_pwrc_write_pcoffr(u_register_t mpidr)
+{
+	arm_lock_get();
+	mmio_write_32(PWRC_BASE + PCOFFR_OFF, (unsigned int) mpidr);
+	arm_lock_release();
+}
+
+/* Nothing else to do here apart from initializing the lock */
+void plat_arm_pwrc_setup(void)
+{
+	arm_lock_init();
+}
+
+
+
diff --git a/plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.h b/plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.h
new file mode 100644
index 0000000..f5f2178
--- /dev/null
+++ b/plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __FVP_PWRC_H__
+#define __FVP_PWRC_H__
+
+/* FVP Power controller register offset etc */
+#define PPOFFR_OFF		0x0
+#define PPONR_OFF		0x4
+#define PCOFFR_OFF		0x8
+#define PWKUPR_OFF		0xc
+#define PSYSR_OFF		0x10
+
+#define PWKUPR_WEN		(1ull << 31)
+
+#define PSYSR_AFF_L2		(1 << 31)
+#define PSYSR_AFF_L1		(1 << 30)
+#define PSYSR_AFF_L0		(1 << 29)
+#define PSYSR_WEN		(1 << 28)
+#define PSYSR_PC		(1 << 27)
+#define PSYSR_PP		(1 << 26)
+
+#define PSYSR_WK_SHIFT		24
+#define PSYSR_WK_WIDTH		0x2
+#define PSYSR_WK_MASK		((1 << PSYSR_WK_WIDTH) - 1)
+#define PSYSR_WK(x)		(x >> PSYSR_WK_SHIFT) & PSYSR_WK_MASK
+
+#define WKUP_COLD		0x0
+#define WKUP_RESET		0x1
+#define WKUP_PPONR		0x2
+#define WKUP_GICREQ		0x3
+
+#define PSYSR_INVALID		0xffffffff
+
+#ifndef __ASSEMBLY__
+
+/*******************************************************************************
+ * Function & variable prototypes
+ ******************************************************************************/
+void fvp_pwrc_write_pcoffr(u_register_t);
+void fvp_pwrc_write_ppoffr(u_register_t);
+void fvp_pwrc_write_pponr(u_register_t);
+void fvp_pwrc_set_wen(u_register_t);
+void fvp_pwrc_clr_wen(u_register_t);
+unsigned int fvp_pwrc_read_psysr(u_register_t);
+unsigned int fvp_pwrc_get_cpu_wkr(u_register_t);
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* __FVP_PWRC_H__ */
diff --git a/plat/arm/board/fvp/fvp_bl1_setup.c b/plat/arm/board/fvp/fvp_bl1_setup.c
new file mode 100644
index 0000000..c539a28
--- /dev/null
+++ b/plat/arm/board/fvp/fvp_bl1_setup.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <plat_arm.h>
+#include <tbbr_img_def.h>
+#include "fvp_private.h"
+
+
+/*******************************************************************************
+ * Perform any BL1 specific platform actions.
+ ******************************************************************************/
+void bl1_early_platform_setup(void)
+{
+	arm_bl1_early_platform_setup();
+
+	/* Initialize the platform config for future decision making */
+	fvp_config_setup();
+
+	/*
+	 * Initialize Interconnect for this cluster during cold boot.
+	 * No need for locks as no other CPU is active.
+	 */
+	fvp_interconnect_init();
+	/*
+	 * Enable coherency in Interconnect for the primary CPU's cluster.
+	 */
+	fvp_interconnect_enable();
+}
+
+/*******************************************************************************
+ * The following function checks if Firmware update is needed,
+ * by checking if TOC in FIP image is valid or not.
+ ******************************************************************************/
+unsigned int bl1_plat_get_next_image_id(void)
+{
+	if (!arm_io_is_toc_valid())
+		return NS_BL1U_IMAGE_ID;
+
+	return BL2_IMAGE_ID;
+}
+
diff --git a/plat/arm/board/fvp/fvp_bl2_setup.c b/plat/arm/board/fvp/fvp_bl2_setup.c
new file mode 100644
index 0000000..e9c4ab5
--- /dev/null
+++ b/plat/arm/board/fvp/fvp_bl2_setup.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <generic_delay_timer.h>
+#include <mmio.h>
+#include <plat_arm.h>
+#include <sp804_delay_timer.h>
+#include <v2m_def.h>
+#include "fvp_def.h"
+#include "fvp_private.h"
+
+
+void bl2_early_platform_setup(meminfo_t *mem_layout)
+{
+	arm_bl2_early_platform_setup(mem_layout);
+
+	/* Initialize the platform config for future decision making */
+	fvp_config_setup();
+}
+
+void bl2_platform_setup(void)
+{
+	arm_bl2_platform_setup();
+
+#if FVP_USE_SP804_TIMER
+	/* Enable the clock override for SP804 timer 0, which means that no
+	 * clock dividers are applied and the raw (35 MHz) clock will be used */
+	mmio_write_32(V2M_SP810_BASE, FVP_SP810_CTRL_TIM0_OV);
+
+	/* Initialize delay timer driver using SP804 dual timer 0 */
+	sp804_timer_init(V2M_SP804_TIMER0_BASE,
+			SP804_TIMER_CLKMULT, SP804_TIMER_CLKDIV);
+#else
+	generic_delay_timer_init();
+#endif /* FVP_USE_SP804_TIMER */
+}
diff --git a/plat/arm/board/fvp/fvp_bl2u_setup.c b/plat/arm/board/fvp/fvp_bl2u_setup.c
new file mode 100644
index 0000000..283829a
--- /dev/null
+++ b/plat/arm/board/fvp/fvp_bl2u_setup.c
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <plat_arm.h>
+#include "fvp_def.h"
+#include "fvp_private.h"
+
+void bl2u_early_platform_setup(meminfo_t *mem_layout, void *plat_info)
+{
+	arm_bl2u_early_platform_setup(mem_layout, plat_info);
+
+	/* Initialize the platform config for future decision making */
+	fvp_config_setup();
+}
diff --git a/plat/arm/board/fvp/fvp_bl31_setup.c b/plat/arm/board/fvp/fvp_bl31_setup.c
new file mode 100644
index 0000000..181c923
--- /dev/null
+++ b/plat/arm/board/fvp/fvp_bl31_setup.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_config.h>
+#include <plat_arm.h>
+#include <smmu_v3.h>
+#include "fvp_private.h"
+
+#if LOAD_IMAGE_V2
+void bl31_early_platform_setup(void *from_bl2,
+				void *plat_params_from_bl2)
+#else
+void bl31_early_platform_setup(bl31_params_t *from_bl2,
+				void *plat_params_from_bl2)
+#endif
+{
+	arm_bl31_early_platform_setup(from_bl2, plat_params_from_bl2);
+
+	/* Initialize the platform config for future decision making */
+	fvp_config_setup();
+
+	/*
+	 * Initialize the correct interconnect for this cluster during cold
+	 * boot. No need for locks as no other CPU is active.
+	 */
+	fvp_interconnect_init();
+
+	/*
+	 * Enable coherency in interconnect for the primary CPU's cluster.
+	 * Earlier bootloader stages might already do this (e.g. Trusted
+	 * Firmware's BL1 does it) but we can't assume so. There is no harm in
+	 * executing this code twice anyway.
+	 * FVP PSCI code will enable coherency for other clusters.
+	 */
+	fvp_interconnect_enable();
+
+	/* On FVP RevC, intialize SMMUv3 */
+	if (arm_config.flags & ARM_CONFIG_FVP_HAS_SMMUV3)
+		smmuv3_init(PLAT_FVP_SMMUV3_BASE);
+}
diff --git a/plat/arm/board/fvp/fvp_common.c b/plat/arm/board/fvp/fvp_common.c
new file mode 100644
index 0000000..57cc3d5
--- /dev/null
+++ b/plat/arm/board/fvp/fvp_common.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_config.h>
+#include <arm_def.h>
+#include <assert.h>
+#include <cci.h>
+#include <ccn.h>
+#include <debug.h>
+#include <gicv2.h>
+#include <mmio.h>
+#include <plat_arm.h>
+#include <v2m_def.h>
+#include "../fvp_def.h"
+
+/* Defines for GIC Driver build time selection */
+#define FVP_GICV2		1
+#define FVP_GICV3		2
+#define FVP_GICV3_LEGACY	3
+
+/*******************************************************************************
+ * arm_config holds the characteristics of the differences between the three FVP
+ * platforms (Base, A53_A57 & Foundation). It will be populated during cold boot
+ * at each boot stage by the primary before enabling the MMU (to allow
+ * interconnect configuration) & used thereafter. Each BL will have its own copy
+ * to allow independent operation.
+ ******************************************************************************/
+arm_config_t arm_config;
+
+#define MAP_DEVICE0	MAP_REGION_FLAT(DEVICE0_BASE,			\
+					DEVICE0_SIZE,			\
+					MT_DEVICE | MT_RW | MT_SECURE)
+
+#define MAP_DEVICE1	MAP_REGION_FLAT(DEVICE1_BASE,			\
+					DEVICE1_SIZE,			\
+					MT_DEVICE | MT_RW | MT_SECURE)
+
+/*
+ * Need to be mapped with write permissions in order to set a new non-volatile
+ * counter value.
+ */
+#define MAP_DEVICE2	MAP_REGION_FLAT(DEVICE2_BASE,			\
+					DEVICE2_SIZE,			\
+					MT_DEVICE | MT_RW | MT_SECURE)
+
+
+/*
+ * Table of memory regions for various BL stages to map using the MMU.
+ * This doesn't include Trusted SRAM as arm_setup_page_tables() already
+ * takes care of mapping it.
+ *
+ * The flash needs to be mapped as writable in order to erase the FIP's Table of
+ * Contents in case of unrecoverable error (see plat_error_handler()).
+ */
+#ifdef IMAGE_BL1
+const mmap_region_t plat_arm_mmap[] = {
+	ARM_MAP_SHARED_RAM,
+	V2M_MAP_FLASH0_RW,
+	V2M_MAP_IOFPGA,
+	MAP_DEVICE0,
+	MAP_DEVICE1,
+#if TRUSTED_BOARD_BOOT
+	/* To access the Root of Trust Public Key registers. */
+	MAP_DEVICE2,
+	/* Map DRAM to authenticate NS_BL2U image. */
+	ARM_MAP_NS_DRAM1,
+#endif
+	{0}
+};
+#endif
+#ifdef IMAGE_BL2
+const mmap_region_t plat_arm_mmap[] = {
+	ARM_MAP_SHARED_RAM,
+	V2M_MAP_FLASH0_RW,
+	V2M_MAP_IOFPGA,
+	MAP_DEVICE0,
+	MAP_DEVICE1,
+	ARM_MAP_NS_DRAM1,
+#ifdef AARCH64
+	ARM_MAP_DRAM2,
+#endif
+#ifdef SPD_tspd
+	ARM_MAP_TSP_SEC_MEM,
+#endif
+#if TRUSTED_BOARD_BOOT
+	/* To access the Root of Trust Public Key registers. */
+	MAP_DEVICE2,
+#endif
+#if ARM_BL31_IN_DRAM
+	ARM_MAP_BL31_SEC_DRAM,
+#endif
+#ifdef SPD_opteed
+	ARM_MAP_OPTEE_CORE_MEM,
+	ARM_OPTEE_PAGEABLE_LOAD_MEM,
+#endif
+	{0}
+};
+#endif
+#ifdef IMAGE_BL2U
+const mmap_region_t plat_arm_mmap[] = {
+	MAP_DEVICE0,
+	V2M_MAP_IOFPGA,
+	{0}
+};
+#endif
+#ifdef IMAGE_BL31
+const mmap_region_t plat_arm_mmap[] = {
+	ARM_MAP_SHARED_RAM,
+	ARM_MAP_EL3_TZC_DRAM,
+	V2M_MAP_IOFPGA,
+	MAP_DEVICE0,
+	MAP_DEVICE1,
+	ARM_V2M_MAP_MEM_PROTECT,
+	{0}
+};
+#endif
+#ifdef IMAGE_BL32
+const mmap_region_t plat_arm_mmap[] = {
+#ifdef AARCH32
+	ARM_MAP_SHARED_RAM,
+#endif
+	V2M_MAP_IOFPGA,
+	MAP_DEVICE0,
+	MAP_DEVICE1,
+	{0}
+};
+#endif
+
+ARM_CASSERT_MMAP
+
+#if FVP_INTERCONNECT_DRIVER != FVP_CCN
+static const int fvp_cci400_map[] = {
+	PLAT_FVP_CCI400_CLUS0_SL_PORT,
+	PLAT_FVP_CCI400_CLUS1_SL_PORT,
+};
+
+static const int fvp_cci5xx_map[] = {
+	PLAT_FVP_CCI5XX_CLUS0_SL_PORT,
+	PLAT_FVP_CCI5XX_CLUS1_SL_PORT,
+};
+
+static unsigned int get_interconnect_master(void)
+{
+	unsigned int master;
+	u_register_t mpidr;
+
+	mpidr = read_mpidr_el1();
+	master = (arm_config.flags & ARM_CONFIG_FVP_SHIFTED_AFF) ?
+		MPIDR_AFFLVL2_VAL(mpidr) : MPIDR_AFFLVL1_VAL(mpidr);
+
+	assert(master < FVP_CLUSTER_COUNT);
+	return master;
+}
+#endif
+
+/*******************************************************************************
+ * A single boot loader stack is expected to work on both the Foundation FVP
+ * models and the two flavours of the Base FVP models (AEMv8 & Cortex). The
+ * SYS_ID register provides a mechanism for detecting the differences between
+ * these platforms. This information is stored in a per-BL array to allow the
+ * code to take the correct path.Per BL platform configuration.
+ ******************************************************************************/
+void fvp_config_setup(void)
+{
+	unsigned int rev, hbi, bld, arch, sys_id;
+
+	sys_id = mmio_read_32(V2M_SYSREGS_BASE + V2M_SYS_ID);
+	rev = (sys_id >> V2M_SYS_ID_REV_SHIFT) & V2M_SYS_ID_REV_MASK;
+	hbi = (sys_id >> V2M_SYS_ID_HBI_SHIFT) & V2M_SYS_ID_HBI_MASK;
+	bld = (sys_id >> V2M_SYS_ID_BLD_SHIFT) & V2M_SYS_ID_BLD_MASK;
+	arch = (sys_id >> V2M_SYS_ID_ARCH_SHIFT) & V2M_SYS_ID_ARCH_MASK;
+
+	if (arch != ARCH_MODEL) {
+		ERROR("This firmware is for FVP models\n");
+		panic();
+	}
+
+	/*
+	 * The build field in the SYS_ID tells which variant of the GIC
+	 * memory is implemented by the model.
+	 */
+	switch (bld) {
+	case BLD_GIC_VE_MMAP:
+		ERROR("Legacy Versatile Express memory map for GIC peripheral"
+				" is not supported\n");
+		panic();
+		break;
+	case BLD_GIC_A53A57_MMAP:
+		break;
+	default:
+		ERROR("Unsupported board build %x\n", bld);
+		panic();
+	}
+
+	/*
+	 * The hbi field in the SYS_ID is 0x020 for the Base FVP & 0x010
+	 * for the Foundation FVP.
+	 */
+	switch (hbi) {
+	case HBI_FOUNDATION_FVP:
+		arm_config.flags = 0;
+
+		/*
+		 * Check for supported revisions of Foundation FVP
+		 * Allow future revisions to run but emit warning diagnostic
+		 */
+		switch (rev) {
+		case REV_FOUNDATION_FVP_V2_0:
+		case REV_FOUNDATION_FVP_V2_1:
+		case REV_FOUNDATION_FVP_v9_1:
+		case REV_FOUNDATION_FVP_v9_6:
+			break;
+		default:
+			WARN("Unrecognized Foundation FVP revision %x\n", rev);
+			break;
+		}
+		break;
+	case HBI_BASE_FVP:
+		arm_config.flags |= (ARM_CONFIG_BASE_MMAP | ARM_CONFIG_HAS_TZC);
+
+		/*
+		 * Check for supported revisions
+		 * Allow future revisions to run but emit warning diagnostic
+		 */
+		switch (rev) {
+		case REV_BASE_FVP_V0:
+			arm_config.flags |= ARM_CONFIG_FVP_HAS_CCI400;
+			break;
+		case REV_BASE_FVP_REVC:
+			arm_config.flags |= (ARM_CONFIG_FVP_HAS_SMMUV3 |
+					ARM_CONFIG_FVP_HAS_CCI5XX);
+			break;
+		default:
+			WARN("Unrecognized Base FVP revision %x\n", rev);
+			break;
+		}
+		break;
+	default:
+		ERROR("Unsupported board HBI number 0x%x\n", hbi);
+		panic();
+	}
+
+	/*
+	 * We assume that the presence of MT bit, and therefore shifted
+	 * affinities, is uniform across the platform: either all CPUs, or no
+	 * CPUs implement it.
+	 */
+	if (read_mpidr_el1() & MPIDR_MT_MASK)
+		arm_config.flags |= ARM_CONFIG_FVP_SHIFTED_AFF;
+}
+
+
+void fvp_interconnect_init(void)
+{
+#if FVP_INTERCONNECT_DRIVER == FVP_CCN
+	if (ccn_get_part0_id(PLAT_ARM_CCN_BASE) != CCN_502_PART0_ID) {
+		ERROR("Unrecognized CCN variant detected. Only CCN-502"
+				" is supported");
+		panic();
+	}
+
+	plat_arm_interconnect_init();
+#else
+	uintptr_t cci_base = 0;
+	const int *cci_map = 0;
+	unsigned int map_size = 0;
+
+	if (!(arm_config.flags & (ARM_CONFIG_FVP_HAS_CCI400 |
+				ARM_CONFIG_FVP_HAS_CCI5XX))) {
+		return;
+	}
+
+	/* Initialize the right interconnect */
+	if (arm_config.flags & ARM_CONFIG_FVP_HAS_CCI5XX) {
+		cci_base = PLAT_FVP_CCI5XX_BASE;
+		cci_map = fvp_cci5xx_map;
+		map_size = ARRAY_SIZE(fvp_cci5xx_map);
+	} else if (arm_config.flags & ARM_CONFIG_FVP_HAS_CCI400) {
+		cci_base = PLAT_FVP_CCI400_BASE;
+		cci_map = fvp_cci400_map;
+		map_size = ARRAY_SIZE(fvp_cci400_map);
+	}
+
+	assert(cci_base);
+	assert(cci_map);
+	cci_init(cci_base, cci_map, map_size);
+#endif
+}
+
+void fvp_interconnect_enable(void)
+{
+#if FVP_INTERCONNECT_DRIVER == FVP_CCN
+	plat_arm_interconnect_enter_coherency();
+#else
+	unsigned int master;
+
+	if (arm_config.flags & (ARM_CONFIG_FVP_HAS_CCI400 |
+				ARM_CONFIG_FVP_HAS_CCI5XX)) {
+		master = get_interconnect_master();
+		cci_enable_snoop_dvm_reqs(master);
+	}
+#endif
+}
+
+void fvp_interconnect_disable(void)
+{
+#if FVP_INTERCONNECT_DRIVER == FVP_CCN
+	plat_arm_interconnect_exit_coherency();
+#else
+	unsigned int master;
+
+	if (arm_config.flags & (ARM_CONFIG_FVP_HAS_CCI400 |
+				ARM_CONFIG_FVP_HAS_CCI5XX)) {
+		master = get_interconnect_master();
+		cci_disable_snoop_dvm_reqs(master);
+	}
+#endif
+}
diff --git a/plat/arm/board/fvp/fvp_def.h b/plat/arm/board/fvp/fvp_def.h
new file mode 100644
index 0000000..a430bca
--- /dev/null
+++ b/plat/arm/board/fvp/fvp_def.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __FVP_DEF_H__
+#define __FVP_DEF_H__
+
+#ifndef FVP_CLUSTER_COUNT
+#define FVP_CLUSTER_COUNT		2
+#endif
+#define FVP_MAX_CPUS_PER_CLUSTER	4
+
+#ifndef FVP_MAX_PE_PER_CPU
+# define FVP_MAX_PE_PER_CPU		1
+#endif
+
+#define FVP_PRIMARY_CPU			0x0
+
+/* Defines for the Interconnect build selection */
+#define FVP_CCI			1
+#define FVP_CCN			2
+
+/*******************************************************************************
+ * FVP memory map related constants
+ ******************************************************************************/
+
+#define FLASH1_BASE			0x0c000000
+#define FLASH1_SIZE			0x04000000
+
+#define PSRAM_BASE			0x14000000
+#define PSRAM_SIZE			0x04000000
+
+#define VRAM_BASE			0x18000000
+#define VRAM_SIZE			0x02000000
+
+/* Aggregate of all devices in the first GB */
+#define DEVICE0_BASE			0x20000000
+#define DEVICE0_SIZE			0x0c200000
+
+/*
+ *  In case of FVP models with CCN, the CCN register space overlaps into
+ *  the NSRAM area.
+ */
+#if FVP_INTERCONNECT_DRIVER == FVP_CCN
+#define DEVICE1_BASE			0x2e000000
+#define DEVICE1_SIZE			0x1A00000
+#else
+#define DEVICE1_BASE			0x2f000000
+#define DEVICE1_SIZE			0x200000
+#define NSRAM_BASE			0x2e000000
+#define NSRAM_SIZE			0x10000
+#endif
+/* Devices in the second GB */
+#define DEVICE2_BASE			0x7fe00000
+#define DEVICE2_SIZE			0x00200000
+
+#define PCIE_EXP_BASE			0x40000000
+#define TZRNG_BASE			0x7fe60000
+
+/* Non-volatile counters */
+#define TRUSTED_NVCTR_BASE		0x7fe70000
+#define TFW_NVCTR_BASE			(TRUSTED_NVCTR_BASE + 0x0000)
+#define TFW_NVCTR_SIZE			4
+#define NTFW_CTR_BASE			(TRUSTED_NVCTR_BASE + 0x0004)
+#define NTFW_CTR_SIZE			4
+
+/* Keys */
+#define SOC_KEYS_BASE			0x7fe80000
+#define TZ_PUB_KEY_HASH_BASE		(SOC_KEYS_BASE + 0x0000)
+#define TZ_PUB_KEY_HASH_SIZE		32
+#define HU_KEY_BASE			(SOC_KEYS_BASE + 0x0020)
+#define HU_KEY_SIZE			16
+#define END_KEY_BASE			(SOC_KEYS_BASE + 0x0044)
+#define END_KEY_SIZE			32
+
+/* Constants to distinguish FVP type */
+#define HBI_BASE_FVP			0x020
+#define REV_BASE_FVP_V0			0x0
+#define REV_BASE_FVP_REVC		0x2
+
+#define HBI_FOUNDATION_FVP		0x010
+#define REV_FOUNDATION_FVP_V2_0		0x0
+#define REV_FOUNDATION_FVP_V2_1		0x1
+#define REV_FOUNDATION_FVP_v9_1		0x2
+#define REV_FOUNDATION_FVP_v9_6		0x3
+
+#define BLD_GIC_VE_MMAP			0x0
+#define BLD_GIC_A53A57_MMAP		0x1
+
+#define ARCH_MODEL			0x1
+
+/* FVP Power controller base address*/
+#define PWRC_BASE			0x1c100000
+
+/* FVP SP804 timer frequency is 35 MHz*/
+#define SP804_TIMER_CLKMULT		1
+#define SP804_TIMER_CLKDIV		35
+
+/* SP810 controller. FVP specific flags */
+#define FVP_SP810_CTRL_TIM0_OV		(1 << 16)
+#define FVP_SP810_CTRL_TIM1_OV		(1 << 18)
+#define FVP_SP810_CTRL_TIM2_OV		(1 << 20)
+#define FVP_SP810_CTRL_TIM3_OV		(1 << 22)
+
+/*******************************************************************************
+ * GIC-400 & interrupt handling related constants
+ ******************************************************************************/
+/* VE compatible GIC memory map */
+#define VE_GICD_BASE			0x2c001000
+#define VE_GICC_BASE			0x2c002000
+#define VE_GICH_BASE			0x2c004000
+#define VE_GICV_BASE			0x2c006000
+
+/* Base FVP compatible GIC memory map */
+#define BASE_GICD_BASE			0x2f000000
+#define BASE_GICR_BASE			0x2f100000
+#define BASE_GICC_BASE			0x2c000000
+#define BASE_GICH_BASE			0x2c010000
+#define BASE_GICV_BASE			0x2c02f000
+
+#define FVP_IRQ_TZ_WDOG			56
+#define FVP_IRQ_SEC_SYS_TIMER		57
+
+
+/*******************************************************************************
+ * TrustZone address space controller related constants
+ ******************************************************************************/
+
+/* NSAIDs used by devices in TZC filter 0 on FVP */
+#define FVP_NSAID_DEFAULT		0
+#define FVP_NSAID_PCI			1
+#define FVP_NSAID_VIRTIO		8  /* from FVP v5.6 onwards */
+#define FVP_NSAID_AP			9  /* Application Processors */
+#define FVP_NSAID_VIRTIO_OLD		15 /* until FVP v5.5 */
+
+/* NSAIDs used by devices in TZC filter 2 on FVP */
+#define FVP_NSAID_HDLCD0		2
+#define FVP_NSAID_CLCD			7
+
+#endif /* __FVP_DEF_H__ */
diff --git a/plat/arm/board/fvp/fvp_err.c b/plat/arm/board/fvp/fvp_err.c
new file mode 100644
index 0000000..8d49595
--- /dev/null
+++ b/plat/arm/board/fvp/fvp_err.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <board_arm_def.h>
+#include <debug.h>
+#include <errno.h>
+#include <norflash.h>
+#include <stdint.h>
+
+/*
+ * FVP error handler
+ */
+void plat_error_handler(int err)
+{
+	int ret;
+
+	switch (err) {
+	case -ENOENT:
+	case -EAUTH:
+		/* Image load or authentication error. Erase the ToC */
+		INFO("Erasing FIP ToC from flash...\n");
+		nor_unlock(PLAT_ARM_FIP_BASE);
+		ret = nor_word_program(PLAT_ARM_FIP_BASE, 0);
+		if (ret) {
+			ERROR("Cannot erase ToC\n");
+		} else {
+			INFO("Done\n");
+		}
+		break;
+	default:
+		/* Unexpected error */
+		break;
+	}
+
+	/* Loop until the watchdog resets the system */
+	for (;;)
+		wfi();
+}
diff --git a/plat/arm/board/fvp/fvp_io_storage.c b/plat/arm/board/fvp/fvp_io_storage.c
new file mode 100644
index 0000000..aa2ee30
--- /dev/null
+++ b/plat/arm/board/fvp/fvp_io_storage.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <common_def.h>
+#include <debug.h>
+#include <io_driver.h>
+#include <io_semihosting.h>
+#include <io_storage.h>
+#include <plat_arm.h>
+#include <semihosting.h>	/* For FOPEN_MODE_... */
+
+/* Semihosting filenames */
+#define BL2_IMAGE_NAME			"bl2.bin"
+#define BL31_IMAGE_NAME			"bl31.bin"
+#define BL32_IMAGE_NAME			"bl32.bin"
+#define BL33_IMAGE_NAME			"bl33.bin"
+
+#if TRUSTED_BOARD_BOOT
+#define TRUSTED_BOOT_FW_CERT_NAME	"tb_fw.crt"
+#define TRUSTED_KEY_CERT_NAME		"trusted_key.crt"
+#define SOC_FW_KEY_CERT_NAME		"soc_fw_key.crt"
+#define TOS_FW_KEY_CERT_NAME		"tos_fw_key.crt"
+#define NT_FW_KEY_CERT_NAME		"nt_fw_key.crt"
+#define SOC_FW_CONTENT_CERT_NAME	"soc_fw_content.crt"
+#define TOS_FW_CONTENT_CERT_NAME	"tos_fw_content.crt"
+#define NT_FW_CONTENT_CERT_NAME		"nt_fw_content.crt"
+#endif /* TRUSTED_BOARD_BOOT */
+
+/* IO devices */
+static const io_dev_connector_t *sh_dev_con;
+static uintptr_t sh_dev_handle;
+
+static const io_file_spec_t sh_file_spec[] = {
+	[BL2_IMAGE_ID] = {
+		.path = BL2_IMAGE_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[BL31_IMAGE_ID] = {
+		.path = BL31_IMAGE_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[BL32_IMAGE_ID] = {
+		.path = BL32_IMAGE_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[BL33_IMAGE_ID] = {
+		.path = BL33_IMAGE_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+#if TRUSTED_BOARD_BOOT
+	[TRUSTED_BOOT_FW_CERT_ID] = {
+		.path = TRUSTED_BOOT_FW_CERT_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[TRUSTED_KEY_CERT_ID] = {
+		.path = TRUSTED_KEY_CERT_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[SOC_FW_KEY_CERT_ID] = {
+		.path = SOC_FW_KEY_CERT_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[TRUSTED_OS_FW_KEY_CERT_ID] = {
+		.path = TOS_FW_KEY_CERT_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[NON_TRUSTED_FW_KEY_CERT_ID] = {
+		.path = NT_FW_KEY_CERT_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[SOC_FW_CONTENT_CERT_ID] = {
+		.path = SOC_FW_CONTENT_CERT_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[TRUSTED_OS_FW_CONTENT_CERT_ID] = {
+		.path = TOS_FW_CONTENT_CERT_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[NON_TRUSTED_FW_CONTENT_CERT_ID] = {
+		.path = NT_FW_CONTENT_CERT_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+#endif /* TRUSTED_BOARD_BOOT */
+};
+
+
+static int open_semihosting(const uintptr_t spec)
+{
+	int result;
+	uintptr_t local_image_handle;
+
+	/* See if the file exists on semi-hosting.*/
+	result = io_dev_init(sh_dev_handle, (uintptr_t)NULL);
+	if (result == 0) {
+		result = io_open(sh_dev_handle, spec, &local_image_handle);
+		if (result == 0) {
+			VERBOSE("Using Semi-hosting IO\n");
+			io_close(local_image_handle);
+		}
+	}
+	return result;
+}
+
+void plat_arm_io_setup(void)
+{
+	int io_result;
+
+	arm_io_setup();
+
+	/* Register the additional IO devices on this platform */
+	io_result = register_io_dev_sh(&sh_dev_con);
+	assert(io_result == 0);
+
+	/* Open connections to devices and cache the handles */
+	io_result = io_dev_open(sh_dev_con, (uintptr_t)NULL, &sh_dev_handle);
+	assert(io_result == 0);
+
+	/* Ignore improbable errors in release builds */
+	(void)io_result;
+}
+
+/*
+ * FVP provides semihosting as an alternative to load images
+ */
+int plat_arm_get_alt_image_source(unsigned int image_id, uintptr_t *dev_handle,
+				  uintptr_t *image_spec)
+{
+	int result = open_semihosting((const uintptr_t)&sh_file_spec[image_id]);
+	if (result == 0) {
+		*dev_handle = sh_dev_handle;
+		*image_spec = (uintptr_t)&sh_file_spec[image_id];
+	}
+
+	return result;
+}
diff --git a/plat/arm/board/fvp/fvp_pm.c b/plat/arm/board/fvp/fvp_pm.c
new file mode 100644
index 0000000..faeb1b7
--- /dev/null
+++ b/plat/arm/board/fvp/fvp_pm.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_config.h>
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <gicv3.h>
+#include <mmio.h>
+#include <plat_arm.h>
+#include <platform.h>
+#include <psci.h>
+#include <v2m_def.h>
+#include "drivers/pwrc/fvp_pwrc.h"
+#include "fvp_def.h"
+#include "fvp_private.h"
+
+
+#if ARM_RECOM_STATE_ID_ENC
+/*
+ *  The table storing the valid idle power states. Ensure that the
+ *  array entries are populated in ascending order of state-id to
+ *  enable us to use binary search during power state validation.
+ *  The table must be terminated by a NULL entry.
+ */
+const unsigned int arm_pm_idle_states[] = {
+	/* State-id - 0x01 */
+	arm_make_pwrstate_lvl1(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RET,
+			ARM_PWR_LVL0, PSTATE_TYPE_STANDBY),
+	/* State-id - 0x02 */
+	arm_make_pwrstate_lvl1(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_OFF,
+			ARM_PWR_LVL0, PSTATE_TYPE_POWERDOWN),
+	/* State-id - 0x22 */
+	arm_make_pwrstate_lvl1(ARM_LOCAL_STATE_OFF, ARM_LOCAL_STATE_OFF,
+			ARM_PWR_LVL1, PSTATE_TYPE_POWERDOWN),
+	/* State-id - 0x222 */
+	arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_OFF, ARM_LOCAL_STATE_OFF,
+		ARM_LOCAL_STATE_OFF, ARM_PWR_LVL2, PSTATE_TYPE_POWERDOWN),
+	0,
+};
+#endif
+
+/*******************************************************************************
+ * Function which implements the common FVP specific operations to power down a
+ * cluster in response to a CPU_OFF or CPU_SUSPEND request.
+ ******************************************************************************/
+static void fvp_cluster_pwrdwn_common(void)
+{
+	uint64_t mpidr = read_mpidr_el1();
+
+#if ENABLE_SPE_FOR_LOWER_ELS
+	/*
+	 * On power down we need to disable statistical profiling extensions
+	 * before exiting coherency.
+	 */
+	arm_disable_spe();
+#endif
+
+	/* Disable coherency if this cluster is to be turned off */
+	fvp_interconnect_disable();
+
+	/* Program the power controller to turn the cluster off */
+	fvp_pwrc_write_pcoffr(mpidr);
+}
+
+/*
+ * Empty implementation of these hooks avoid setting the GICR_WAKER.Sleep bit
+ * on ARM GICv3 implementations on FVP. This is required, because FVP does not
+ * support SYSTEM_SUSPEND and it is `faked` in firmware. Hence, for wake up
+ * from `fake` system suspend the GIC must not be powered off.
+ */
+void arm_gicv3_distif_pre_save(unsigned int proc_num)
+{}
+
+void arm_gicv3_distif_post_restore(unsigned int proc_num)
+{}
+
+static void fvp_power_domain_on_finish_common(const psci_power_state_t *target_state)
+{
+	unsigned long mpidr;
+
+	assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
+					ARM_LOCAL_STATE_OFF);
+
+	/* Get the mpidr for this cpu */
+	mpidr = read_mpidr_el1();
+
+	/* Perform the common cluster specific operations */
+	if (target_state->pwr_domain_state[ARM_PWR_LVL1] ==
+					ARM_LOCAL_STATE_OFF) {
+		/*
+		 * This CPU might have woken up whilst the cluster was
+		 * attempting to power down. In this case the FVP power
+		 * controller will have a pending cluster power off request
+		 * which needs to be cleared by writing to the PPONR register.
+		 * This prevents the power controller from interpreting a
+		 * subsequent entry of this cpu into a simple wfi as a power
+		 * down request.
+		 */
+		fvp_pwrc_write_pponr(mpidr);
+
+		/* Enable coherency if this cluster was off */
+		fvp_interconnect_enable();
+	}
+	/* Perform the common system specific operations */
+	if (target_state->pwr_domain_state[ARM_PWR_LVL2] ==
+						ARM_LOCAL_STATE_OFF)
+		arm_system_pwr_domain_resume();
+
+	/*
+	 * Clear PWKUPR.WEN bit to ensure interrupts do not interfere
+	 * with a cpu power down unless the bit is set again
+	 */
+	fvp_pwrc_clr_wen(mpidr);
+}
+
+
+/*******************************************************************************
+ * FVP handler called when a CPU is about to enter standby.
+ ******************************************************************************/
+void fvp_cpu_standby(plat_local_state_t cpu_state)
+{
+
+	assert(cpu_state == ARM_LOCAL_STATE_RET);
+
+	/*
+	 * Enter standby state
+	 * dsb is good practice before using wfi to enter low power states
+	 */
+	dsb();
+	wfi();
+}
+
+/*******************************************************************************
+ * FVP handler called when a power domain is about to be turned on. The
+ * mpidr determines the CPU to be turned on.
+ ******************************************************************************/
+int fvp_pwr_domain_on(u_register_t mpidr)
+{
+	int rc = PSCI_E_SUCCESS;
+	unsigned int psysr;
+
+	/*
+	 * Ensure that we do not cancel an inflight power off request for the
+	 * target cpu. That would leave it in a zombie wfi. Wait for it to power
+	 * off and then program the power controller to turn that CPU on.
+	 */
+	do {
+		psysr = fvp_pwrc_read_psysr(mpidr);
+	} while (psysr & PSYSR_AFF_L0);
+
+	fvp_pwrc_write_pponr(mpidr);
+	return rc;
+}
+
+/*******************************************************************************
+ * FVP handler called when a power domain is about to be turned off. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+void fvp_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
+					ARM_LOCAL_STATE_OFF);
+
+	/*
+	 * If execution reaches this stage then this power domain will be
+	 * suspended. Perform at least the cpu specific actions followed
+	 * by the cluster specific operations if applicable.
+	 */
+
+	/* Prevent interrupts from spuriously waking up this cpu */
+	plat_arm_gic_cpuif_disable();
+
+	/* Turn redistributor off */
+	plat_arm_gic_redistif_off();
+
+	/* Program the power controller to power off this cpu. */
+	fvp_pwrc_write_ppoffr(read_mpidr_el1());
+
+	if (target_state->pwr_domain_state[ARM_PWR_LVL1] ==
+					ARM_LOCAL_STATE_OFF)
+		fvp_cluster_pwrdwn_common();
+
+}
+
+/*******************************************************************************
+ * FVP handler called when a power domain is about to be suspended. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+void fvp_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+	unsigned long mpidr;
+
+	/*
+	 * FVP has retention only at cpu level. Just return
+	 * as nothing is to be done for retention.
+	 */
+	if (target_state->pwr_domain_state[ARM_PWR_LVL0] ==
+					ARM_LOCAL_STATE_RET)
+		return;
+
+	assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
+					ARM_LOCAL_STATE_OFF);
+
+	/* Get the mpidr for this cpu */
+	mpidr = read_mpidr_el1();
+
+	/* Program the power controller to enable wakeup interrupts. */
+	fvp_pwrc_set_wen(mpidr);
+
+	/* Prevent interrupts from spuriously waking up this cpu */
+	plat_arm_gic_cpuif_disable();
+
+	/*
+	 * The Redistributor is not powered off as it can potentially prevent
+	 * wake up events reaching the CPUIF and/or might lead to losing
+	 * register context.
+	 */
+
+	/* Perform the common cluster specific operations */
+	if (target_state->pwr_domain_state[ARM_PWR_LVL1] ==
+					ARM_LOCAL_STATE_OFF)
+		fvp_cluster_pwrdwn_common();
+
+	/* Perform the common system specific operations */
+	if (target_state->pwr_domain_state[ARM_PWR_LVL2] ==
+						ARM_LOCAL_STATE_OFF)
+		arm_system_pwr_domain_save();
+
+	/* Program the power controller to power off this cpu. */
+	fvp_pwrc_write_ppoffr(read_mpidr_el1());
+}
+
+/*******************************************************************************
+ * FVP handler called when a power domain has just been powered on after
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from.
+ ******************************************************************************/
+void fvp_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+	fvp_power_domain_on_finish_common(target_state);
+
+	/* Enable the gic cpu interface */
+	plat_arm_gic_pcpu_init();
+
+	/* Program the gic per-cpu distributor or re-distributor interface */
+	plat_arm_gic_cpuif_enable();
+}
+
+/*******************************************************************************
+ * FVP handler called when a power domain has just been powered on after
+ * having been suspended earlier. The target_state encodes the low power state
+ * that each level has woken up from.
+ * TODO: At the moment we reuse the on finisher and reinitialize the secure
+ * context. Need to implement a separate suspend finisher.
+ ******************************************************************************/
+void fvp_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
+{
+	/*
+	 * Nothing to be done on waking up from retention from CPU level.
+	 */
+	if (target_state->pwr_domain_state[ARM_PWR_LVL0] ==
+					ARM_LOCAL_STATE_RET)
+		return;
+
+	fvp_power_domain_on_finish_common(target_state);
+
+	/* Enable the gic cpu interface */
+	plat_arm_gic_cpuif_enable();
+}
+
+/*******************************************************************************
+ * FVP handlers to shutdown/reboot the system
+ ******************************************************************************/
+static void __dead2 fvp_system_off(void)
+{
+	/* Write the System Configuration Control Register */
+	mmio_write_32(V2M_SYSREGS_BASE + V2M_SYS_CFGCTRL,
+		V2M_CFGCTRL_START |
+		V2M_CFGCTRL_RW |
+		V2M_CFGCTRL_FUNC(V2M_FUNC_SHUTDOWN));
+	wfi();
+	ERROR("FVP System Off: operation not handled.\n");
+	panic();
+}
+
+static void __dead2 fvp_system_reset(void)
+{
+	/* Write the System Configuration Control Register */
+	mmio_write_32(V2M_SYSREGS_BASE + V2M_SYS_CFGCTRL,
+		V2M_CFGCTRL_START |
+		V2M_CFGCTRL_RW |
+		V2M_CFGCTRL_FUNC(V2M_FUNC_REBOOT));
+	wfi();
+	ERROR("FVP System Reset: operation not handled.\n");
+	panic();
+}
+
+static int fvp_node_hw_state(u_register_t target_cpu,
+			     unsigned int power_level)
+{
+	unsigned int psysr;
+	int ret;
+
+	/*
+	 * The format of 'power_level' is implementation-defined, but 0 must
+	 * mean a CPU. We also allow 1 to denote the cluster
+	 */
+	if (power_level != ARM_PWR_LVL0 && power_level != ARM_PWR_LVL1)
+		return PSCI_E_INVALID_PARAMS;
+
+	/*
+	 * Read the status of the given MPDIR from FVP power controller. The
+	 * power controller only gives us on/off status, so map that to expected
+	 * return values of the PSCI call
+	 */
+	psysr = fvp_pwrc_read_psysr(target_cpu);
+	if (psysr == PSYSR_INVALID)
+		return PSCI_E_INVALID_PARAMS;
+
+	switch (power_level) {
+	case ARM_PWR_LVL0:
+		ret = (psysr & PSYSR_AFF_L0) ? HW_ON : HW_OFF;
+		break;
+	case ARM_PWR_LVL1:
+		ret = (psysr & PSYSR_AFF_L1) ? HW_ON : HW_OFF;
+		break;
+	}
+
+	return ret;
+}
+
+/*
+ * The FVP doesn't truly support power management at SYSTEM power domain. The
+ * SYSTEM_SUSPEND will be down-graded to the cluster level within the platform
+ * layer. The `fake` SYSTEM_SUSPEND allows us to validate some of the driver
+ * save and restore sequences on FVP.
+ */
+void fvp_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+	unsigned int i;
+
+	for (i = ARM_PWR_LVL0; i <= PLAT_MAX_PWR_LVL; i++)
+		req_state->pwr_domain_state[i] = ARM_LOCAL_STATE_OFF;
+}
+
+/*******************************************************************************
+ * Handler to filter PSCI requests.
+ ******************************************************************************/
+/*
+ * The system power domain suspend is only supported only via
+ * PSCI SYSTEM_SUSPEND API. PSCI CPU_SUSPEND request to system power domain
+ * will be downgraded to the lower level.
+ */
+static int fvp_validate_power_state(unsigned int power_state,
+			    psci_power_state_t *req_state)
+{
+	int rc;
+	rc = arm_validate_power_state(power_state, req_state);
+
+	/*
+	 * Ensure that the system power domain level is never suspended
+	 * via PSCI CPU SUSPEND API. Currently system suspend is only
+	 * supported via PSCI SYSTEM SUSPEND API.
+	 */
+	req_state->pwr_domain_state[ARM_PWR_LVL2] = ARM_LOCAL_STATE_RUN;
+	return rc;
+}
+
+/*
+ * Custom `translate_power_state_by_mpidr` handler for FVP. Unlike in the
+ * `fvp_validate_power_state`, we do not downgrade the system power
+ * domain level request in `power_state` as it will be used to query the
+ * PSCI_STAT_COUNT/RESIDENCY at the system power domain level.
+ */
+static int fvp_translate_power_state_by_mpidr(u_register_t mpidr,
+		unsigned int power_state,
+		psci_power_state_t *output_state)
+{
+	return arm_validate_power_state(power_state, output_state);
+}
+
+/*******************************************************************************
+ * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
+ * platform layer will take care of registering the handlers with PSCI.
+ ******************************************************************************/
+plat_psci_ops_t plat_arm_psci_pm_ops = {
+	.cpu_standby = fvp_cpu_standby,
+	.pwr_domain_on = fvp_pwr_domain_on,
+	.pwr_domain_off = fvp_pwr_domain_off,
+	.pwr_domain_suspend = fvp_pwr_domain_suspend,
+	.pwr_domain_on_finish = fvp_pwr_domain_on_finish,
+	.pwr_domain_suspend_finish = fvp_pwr_domain_suspend_finish,
+	.system_off = fvp_system_off,
+	.system_reset = fvp_system_reset,
+	.validate_power_state = fvp_validate_power_state,
+	.validate_ns_entrypoint = arm_validate_ns_entrypoint,
+	.translate_power_state_by_mpidr = fvp_translate_power_state_by_mpidr,
+	.get_node_hw_state = fvp_node_hw_state,
+	.get_sys_suspend_power_state = fvp_get_sys_suspend_power_state,
+/*
+ * mem_protect is not supported in RESET_TO_BL31 and RESET_TO_SP_MIN,
+ * as that would require mapping in all of NS DRAM into BL31 or BL32.
+ */
+#if !RESET_TO_BL31 && !RESET_TO_SP_MIN
+	.mem_protect_chk	= arm_psci_mem_protect_chk,
+	.read_mem_protect	= arm_psci_read_mem_protect,
+	.write_mem_protect	= arm_nor_psci_write_mem_protect,
+#endif
+};
diff --git a/plat/arm/board/fvp/fvp_private.h b/plat/arm/board/fvp/fvp_private.h
new file mode 100644
index 0000000..c5cd07d
--- /dev/null
+++ b/plat/arm/board/fvp/fvp_private.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __FVP_PRIVATE_H__
+#define __FVP_PRIVATE_H__
+
+#include <plat_arm.h>
+
+/*******************************************************************************
+ * Function and variable prototypes
+ ******************************************************************************/
+
+void fvp_config_setup(void);
+
+void fvp_interconnect_init(void);
+void fvp_interconnect_enable(void);
+void fvp_interconnect_disable(void);
+
+
+#endif /* __FVP_PRIVATE_H__ */
diff --git a/plat/arm/board/fvp/fvp_security.c b/plat/arm/board/fvp/fvp_security.c
new file mode 100644
index 0000000..4559865
--- /dev/null
+++ b/plat/arm/board/fvp/fvp_security.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_config.h>
+#include <plat_arm.h>
+
+/*
+ * We assume that all security programming is done by the primary core.
+ */
+void plat_arm_security_setup(void)
+{
+	/*
+	 * The Base FVP has a TrustZone address space controller, the Foundation
+	 * FVP does not. Trying to program the device on the foundation FVP will
+	 * cause an abort.
+	 *
+	 * If the platform had additional peripheral specific security
+	 * configurations, those would be configured here.
+	 */
+
+	if (get_arm_config()->flags & ARM_CONFIG_HAS_TZC)
+		arm_tzc400_setup();
+}
diff --git a/plat/arm/board/fvp/fvp_stack_protector.c b/plat/arm/board/fvp/fvp_stack_protector.c
new file mode 100644
index 0000000..1e8e301
--- /dev/null
+++ b/plat/arm/board/fvp/fvp_stack_protector.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <platform.h>
+#include <stdint.h>
+
+#define RANDOM_CANARY_VALUE ((u_register_t) 3288484550995823360ULL)
+
+u_register_t plat_get_stack_protector_canary(void)
+{
+	/*
+	 * Ideally, a random number should be returned instead of the
+	 * combination of a timer's value and a compile-time constant. As the
+	 * FVP does not have any random number generator, this is better than
+	 * nothing but not necessarily really secure.
+	 */
+	return RANDOM_CANARY_VALUE ^ read_cntpct_el0();
+}
+
diff --git a/plat/arm/board/fvp/fvp_topology.c b/plat/arm/board/fvp/fvp_topology.c
new file mode 100644
index 0000000..4a007f4
--- /dev/null
+++ b/plat/arm/board/fvp/fvp_topology.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arm_config.h>
+#include <cassert.h>
+#include <plat_arm.h>
+#include <platform_def.h>
+#include "drivers/pwrc/fvp_pwrc.h"
+
+/* The FVP power domain tree descriptor */
+unsigned char fvp_power_domain_tree_desc[FVP_CLUSTER_COUNT + 2];
+
+
+CASSERT(FVP_CLUSTER_COUNT && FVP_CLUSTER_COUNT <= 256, assert_invalid_fvp_cluster_count);
+
+/*******************************************************************************
+ * This function dynamically constructs the topology according to
+ * FVP_CLUSTER_COUNT and returns it.
+ ******************************************************************************/
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+	unsigned int i;
+
+	/*
+	 * The highest level is the system level. The next level is constituted
+	 * by clusters and then cores in clusters.
+	 */
+	fvp_power_domain_tree_desc[0] = 1;
+	fvp_power_domain_tree_desc[1] = FVP_CLUSTER_COUNT;
+
+	for (i = 0; i < FVP_CLUSTER_COUNT; i++)
+		fvp_power_domain_tree_desc[i + 2] = FVP_MAX_CPUS_PER_CLUSTER;
+
+
+	return fvp_power_domain_tree_desc;
+}
+
+/*******************************************************************************
+ * This function returns the core count within the cluster corresponding to
+ * `mpidr`.
+ ******************************************************************************/
+unsigned int plat_arm_get_cluster_core_count(u_register_t mpidr)
+{
+	return FVP_MAX_CPUS_PER_CLUSTER;
+}
+
+/*******************************************************************************
+ * This function implements a part of the critical interface between the psci
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is returned
+ * in case the MPIDR is invalid.
+ ******************************************************************************/
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+	unsigned int clus_id, cpu_id, thread_id;
+
+	/* Validate affinity fields */
+	if (arm_config.flags & ARM_CONFIG_FVP_SHIFTED_AFF) {
+		thread_id = MPIDR_AFFLVL0_VAL(mpidr);
+		cpu_id = MPIDR_AFFLVL1_VAL(mpidr);
+		clus_id = MPIDR_AFFLVL2_VAL(mpidr);
+	} else {
+		thread_id = 0;
+		cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
+		clus_id = MPIDR_AFFLVL1_VAL(mpidr);
+	}
+
+	if (clus_id >= FVP_CLUSTER_COUNT)
+		return -1;
+	if (cpu_id >= FVP_MAX_CPUS_PER_CLUSTER)
+		return -1;
+	if (thread_id >= FVP_MAX_PE_PER_CPU)
+		return -1;
+
+	if (fvp_pwrc_read_psysr(mpidr) == PSYSR_INVALID)
+		return -1;
+
+	/*
+	 * Core position calculation for FVP platform depends on the MT bit in
+	 * MPIDR. This function cannot assume that the supplied MPIDR has the MT
+	 * bit set even if the implementation has. For example, PSCI clients
+	 * might supply MPIDR values without the MT bit set. Therefore, we
+	 * inject the current PE's MT bit so as to get the calculation correct.
+	 * This of course assumes that none or all CPUs on the platform has MT
+	 * bit set.
+	 */
+	mpidr |= (read_mpidr_el1() & MPIDR_MT_MASK);
+	return plat_arm_calc_core_pos(mpidr);
+}
diff --git a/plat/arm/board/fvp/fvp_trusted_boot.c b/plat/arm/board/fvp/fvp_trusted_boot.c
new file mode 100644
index 0000000..d1e8b9f
--- /dev/null
+++ b/plat/arm/board/fvp/fvp_trusted_boot.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+#include <tbbr_oid.h>
+
+#include "fvp_def.h"
+
+/*
+ * Store a new non-volatile counter value. On some FVP versions, the
+ * non-volatile counters are RO. On these versions we expect the values in the
+ * certificates to always match the RO values so that this function is never
+ * called.
+ *
+ * Return: 0 = success, Otherwise = error
+ */
+int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr)
+{
+	const char *oid;
+	uint32_t *nv_ctr_addr;
+
+	assert(cookie != NULL);
+
+	oid = (const char *)cookie;
+	if (strcmp(oid, TRUSTED_FW_NVCOUNTER_OID) == 0) {
+		nv_ctr_addr = (uint32_t *)TFW_NVCTR_BASE;
+	} else if (strcmp(oid, NON_TRUSTED_FW_NVCOUNTER_OID) == 0) {
+		nv_ctr_addr = (uint32_t *)NTFW_CTR_BASE;
+	} else {
+		return 1;
+	}
+
+	*(unsigned int *)nv_ctr_addr = nv_ctr;
+
+	/* Verify that the current value is the one we just wrote. */
+	if (nv_ctr != (unsigned int)(*nv_ctr_addr))
+		return 1;
+
+	return 0;
+}
diff --git a/plat/arm/board/fvp/include/plat.ld.S b/plat/arm/board/fvp/include/plat.ld.S
new file mode 100644
index 0000000..24c3deb
--- /dev/null
+++ b/plat/arm/board/fvp/include/plat.ld.S
@@ -0,0 +1,11 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __PLAT_LD_S__
+#define __PLAT_LD_S__
+
+#include <arm_common.ld.S>
+
+#endif /* __PLAT_LD_S__ */
diff --git a/plat/arm/board/fvp/include/plat_macros.S b/plat/arm/board/fvp/include/plat_macros.S
new file mode 100644
index 0000000..4dcde2d
--- /dev/null
+++ b/plat/arm/board/fvp/include/plat_macros.S
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __PLAT_MACROS_S__
+#define __PLAT_MACROS_S__
+
+#include <arm_macros.S>
+#include <v2m_def.h>
+#include "../fvp_def.h"
+
+	/* ---------------------------------------------
+	 * The below required platform porting macro
+	 * prints out relevant GIC registers whenever an
+	 * unhandled exception is taken in BL31.
+	 * Clobbers: x0 - x10, x16, x17, sp
+	 * ---------------------------------------------
+	 */
+	.macro plat_crash_print_regs
+	/*
+	 * Detect if we're using the base memory map or
+	 * the legacy VE memory map
+	 */
+	mov_imm	x0, (V2M_SYSREGS_BASE + V2M_SYS_ID)
+	ldr	w16, [x0]
+	/* Extract BLD (12th - 15th bits) from the SYS_ID */
+	ubfx	x16, x16, #V2M_SYS_ID_BLD_SHIFT, #4
+	/* Check if VE mmap */
+	cmp	w16, #BLD_GIC_VE_MMAP
+	b.eq	use_ve_mmap
+	/* Assume Base Cortex mmap */
+	mov_imm	x17, BASE_GICC_BASE
+	mov_imm	x16, BASE_GICD_BASE
+	b	print_gic_regs
+use_ve_mmap:
+	mov_imm	x17, VE_GICC_BASE
+	mov_imm	x16, VE_GICD_BASE
+print_gic_regs:
+	arm_print_gic_regs
+	.endm
+
+#endif /* __PLAT_MACROS_S__ */
diff --git a/plat/arm/board/fvp/include/platform_def.h b/plat/arm/board/fvp/include/platform_def.h
new file mode 100644
index 0000000..310db7b
--- /dev/null
+++ b/plat/arm/board/fvp/include/platform_def.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#include <arm_def.h>
+#include <board_arm_def.h>
+#include <common_def.h>
+#include <tzc400.h>
+#include <utils_def.h>
+#include <v2m_def.h>
+#include "../fvp_def.h"
+
+/* Required platform porting definitions */
+#define PLATFORM_CORE_COUNT \
+	(FVP_CLUSTER_COUNT * FVP_MAX_CPUS_PER_CLUSTER * FVP_MAX_PE_PER_CPU)
+
+#define PLAT_NUM_PWR_DOMAINS		(FVP_CLUSTER_COUNT + \
+					PLATFORM_CORE_COUNT) + 1
+
+#define PLAT_MAX_PWR_LVL		ARM_PWR_LVL2
+
+/*
+ * Other platform porting definitions are provided by included headers
+ */
+
+/*
+ * Required ARM standard platform porting definitions
+ */
+#define PLAT_ARM_CLUSTER_COUNT		FVP_CLUSTER_COUNT
+
+#define PLAT_ARM_TRUSTED_ROM_BASE	0x00000000
+#define PLAT_ARM_TRUSTED_ROM_SIZE	0x04000000	/* 64 MB */
+
+#define PLAT_ARM_TRUSTED_DRAM_BASE	0x06000000
+#define PLAT_ARM_TRUSTED_DRAM_SIZE	0x02000000	/* 32 MB */
+
+/* No SCP in FVP */
+#define PLAT_ARM_SCP_TZC_DRAM1_SIZE	ULL(0x0)
+
+#define PLAT_ARM_DRAM2_SIZE		ULL(0x780000000)
+
+/*
+ * Load address of BL33 for this platform port
+ */
+#define PLAT_ARM_NS_IMAGE_OFFSET	(ARM_DRAM1_BASE + 0x8000000)
+
+
+/*
+ * PL011 related constants
+ */
+#define PLAT_ARM_BOOT_UART_BASE		V2M_IOFPGA_UART0_BASE
+#define PLAT_ARM_BOOT_UART_CLK_IN_HZ	V2M_IOFPGA_UART0_CLK_IN_HZ
+
+#define PLAT_ARM_BL31_RUN_UART_BASE		V2M_IOFPGA_UART1_BASE
+#define PLAT_ARM_BL31_RUN_UART_CLK_IN_HZ	V2M_IOFPGA_UART1_CLK_IN_HZ
+
+#define PLAT_ARM_SP_MIN_RUN_UART_BASE		V2M_IOFPGA_UART1_BASE
+#define PLAT_ARM_SP_MIN_RUN_UART_CLK_IN_HZ	V2M_IOFPGA_UART1_CLK_IN_HZ
+
+#define PLAT_ARM_CRASH_UART_BASE	PLAT_ARM_BL31_RUN_UART_BASE
+#define PLAT_ARM_CRASH_UART_CLK_IN_HZ	PLAT_ARM_BL31_RUN_UART_CLK_IN_HZ
+
+#define PLAT_ARM_TSP_UART_BASE		V2M_IOFPGA_UART2_BASE
+#define PLAT_ARM_TSP_UART_CLK_IN_HZ	V2M_IOFPGA_UART2_CLK_IN_HZ
+
+#define PLAT_FVP_SMMUV3_BASE		0x2b400000
+
+/* CCI related constants */
+#define PLAT_FVP_CCI400_BASE		0x2c090000
+#define PLAT_FVP_CCI400_CLUS0_SL_PORT	3
+#define PLAT_FVP_CCI400_CLUS1_SL_PORT	4
+
+/* CCI-500/CCI-550 on Base platform */
+#define PLAT_FVP_CCI5XX_BASE		0x2a000000
+#define PLAT_FVP_CCI5XX_CLUS0_SL_PORT	5
+#define PLAT_FVP_CCI5XX_CLUS1_SL_PORT	6
+
+/* CCN related constants. Only CCN 502 is currently supported */
+#define PLAT_ARM_CCN_BASE		0x2e000000
+#define PLAT_ARM_CLUSTER_TO_CCN_ID_MAP	1, 5, 7, 11
+
+/* System timer related constants */
+#define PLAT_ARM_NSTIMER_FRAME_ID		1
+
+/* Mailbox base address */
+#define PLAT_ARM_TRUSTED_MAILBOX_BASE	ARM_TRUSTED_SRAM_BASE
+
+
+/* TrustZone controller related constants
+ *
+ * Currently only filters 0 and 2 are connected on Base FVP.
+ * Filter 0 : CPU clusters (no access to DRAM by default)
+ * Filter 1 : not connected
+ * Filter 2 : LCDs (access to VRAM allowed by default)
+ * Filter 3 : not connected
+ * Programming unconnected filters will have no effect at the
+ * moment. These filter could, however, be connected in future.
+ * So care should be taken not to configure the unused filters.
+ *
+ * Allow only non-secure access to all DRAM to supported devices.
+ * Give access to the CPUs and Virtio. Some devices
+ * would normally use the default ID so allow that too.
+ */
+#define PLAT_ARM_TZC_BASE		0x2a4a0000
+#define PLAT_ARM_TZC_FILTERS		TZC_400_REGION_ATTR_FILTER_BIT(0)
+
+#define PLAT_ARM_TZC_NS_DEV_ACCESS	(				\
+		TZC_REGION_ACCESS_RDWR(FVP_NSAID_DEFAULT)	|	\
+		TZC_REGION_ACCESS_RDWR(FVP_NSAID_PCI)		|	\
+		TZC_REGION_ACCESS_RDWR(FVP_NSAID_AP)		|	\
+		TZC_REGION_ACCESS_RDWR(FVP_NSAID_VIRTIO)	|	\
+		TZC_REGION_ACCESS_RDWR(FVP_NSAID_VIRTIO_OLD))
+
+/*
+ * GIC related constants to cater for both GICv2 and GICv3 instances of an
+ * FVP. They could be overriden at runtime in case the FVP implements the legacy
+ * VE memory map.
+ */
+#define PLAT_ARM_GICD_BASE		BASE_GICD_BASE
+#define PLAT_ARM_GICR_BASE		BASE_GICR_BASE
+#define PLAT_ARM_GICC_BASE		BASE_GICC_BASE
+
+/*
+ * Define a list of Group 1 Secure and Group 0 interrupts as per GICv3
+ * terminology. On a GICv2 system or mode, the lists will be merged and treated
+ * as Group 0 interrupts.
+ */
+#define PLAT_ARM_G1S_IRQS		ARM_G1S_IRQS,			\
+					FVP_IRQ_TZ_WDOG,		\
+					FVP_IRQ_SEC_SYS_TIMER
+
+#define PLAT_ARM_G0_IRQS		ARM_G0_IRQS
+
+#define PLAT_ARM_G1S_IRQ_PROPS(grp) \
+	ARM_G1S_IRQ_PROPS(grp), \
+	INTR_PROP_DESC(FVP_IRQ_TZ_WDOG, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(FVP_IRQ_SEC_SYS_TIMER, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL)
+
+#define PLAT_ARM_G0_IRQ_PROPS(grp)	ARM_G0_IRQ_PROPS(grp)
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
new file mode 100644
index 0000000..1b50296
--- /dev/null
+++ b/plat/arm/board/fvp/platform.mk
@@ -0,0 +1,162 @@
+#
+# Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Use the GICv3 driver on the FVP by default
+FVP_USE_GIC_DRIVER	:= FVP_GICV3
+
+# Use the SP804 timer instead of the generic one
+FVP_USE_SP804_TIMER	:= 0
+
+# Default cluster count for FVP
+FVP_CLUSTER_COUNT	:= 2
+
+# Default number of threads per CPU on FVP
+FVP_MAX_PE_PER_CPU	:= 1
+
+$(eval $(call assert_boolean,FVP_USE_SP804_TIMER))
+$(eval $(call add_define,FVP_USE_SP804_TIMER))
+
+# The FVP platform depends on this macro to build with correct GIC driver.
+$(eval $(call add_define,FVP_USE_GIC_DRIVER))
+
+# Pass FVP_CLUSTER_COUNT to the build system.
+$(eval $(call add_define,FVP_CLUSTER_COUNT))
+
+# Pass FVP_MAX_PE_PER_CPU to the build system.
+$(eval $(call add_define,FVP_MAX_PE_PER_CPU))
+
+# Sanity check the cluster count and if FVP_CLUSTER_COUNT <= 2,
+# choose the CCI driver , else the CCN driver
+ifeq ($(FVP_CLUSTER_COUNT), 0)
+$(error "Incorrect cluster count specified for FVP port")
+else ifeq ($(FVP_CLUSTER_COUNT),$(filter $(FVP_CLUSTER_COUNT),1 2))
+FVP_INTERCONNECT_DRIVER := FVP_CCI
+else
+FVP_INTERCONNECT_DRIVER := FVP_CCN
+endif
+
+$(eval $(call add_define,FVP_INTERCONNECT_DRIVER))
+
+FVP_GICV3_SOURCES	:=	drivers/arm/gic/common/gic_common.c	\
+				drivers/arm/gic/v3/gicv3_main.c		\
+				drivers/arm/gic/v3/gicv3_helpers.c	\
+				plat/common/plat_gicv3.c		\
+				plat/arm/common/arm_gicv3.c
+
+# Choose the GIC sources depending upon the how the FVP will be invoked
+ifeq (${FVP_USE_GIC_DRIVER}, FVP_GICV3)
+FVP_GIC_SOURCES		:=	${FVP_GICV3_SOURCES}			\
+				drivers/arm/gic/v3/gic500.c
+else ifeq (${FVP_USE_GIC_DRIVER},FVP_GIC600)
+FVP_GIC_SOURCES		:=	${FVP_GICV3_SOURCES}			\
+				drivers/arm/gic/v3/gic600.c
+else ifeq (${FVP_USE_GIC_DRIVER}, FVP_GICV2)
+FVP_GIC_SOURCES		:=	drivers/arm/gic/common/gic_common.c	\
+				drivers/arm/gic/v2/gicv2_main.c		\
+				drivers/arm/gic/v2/gicv2_helpers.c	\
+				plat/common/plat_gicv2.c		\
+				plat/arm/common/arm_gicv2.c
+else ifeq (${FVP_USE_GIC_DRIVER}, FVP_GICV3_LEGACY)
+  ifeq (${ARCH}, aarch32)
+    $(error "GICV3 Legacy driver not supported for AArch32 build")
+  endif
+FVP_GIC_SOURCES		:=	drivers/arm/gic/arm_gic.c		\
+				drivers/arm/gic/gic_v2.c		\
+				drivers/arm/gic/gic_v3.c		\
+				plat/common/plat_gic.c			\
+				plat/arm/common/arm_gicv3_legacy.c
+else
+$(error "Incorrect GIC driver chosen on FVP port")
+endif
+
+ifeq (${FVP_INTERCONNECT_DRIVER}, FVP_CCI)
+FVP_INTERCONNECT_SOURCES	:= 	drivers/arm/cci/cci.c
+else ifeq (${FVP_INTERCONNECT_DRIVER}, FVP_CCN)
+FVP_INTERCONNECT_SOURCES	:= 	drivers/arm/ccn/ccn.c		\
+					plat/arm/common/arm_ccn.c
+else
+$(error "Incorrect CCN driver chosen on FVP port")
+endif
+
+FVP_SECURITY_SOURCES	:=	drivers/arm/tzc/tzc400.c		\
+				plat/arm/board/fvp/fvp_security.c	\
+				plat/arm/common/arm_tzc400.c
+
+
+PLAT_INCLUDES		:=	-Iplat/arm/board/fvp/include
+
+
+PLAT_BL_COMMON_SOURCES	:=	plat/arm/board/fvp/fvp_common.c
+
+FVP_CPU_LIBS		:=	lib/cpus/${ARCH}/aem_generic.S
+
+ifeq (${ARCH}, aarch64)
+FVP_CPU_LIBS		+=	lib/cpus/aarch64/cortex_a35.S			\
+				lib/cpus/aarch64/cortex_a53.S			\
+				lib/cpus/aarch64/cortex_a55.S			\
+				lib/cpus/aarch64/cortex_a57.S			\
+				lib/cpus/aarch64/cortex_a72.S			\
+				lib/cpus/aarch64/cortex_a73.S			\
+				lib/cpus/aarch64/cortex_a75.S
+else
+FVP_CPU_LIBS		+=	lib/cpus/aarch32/cortex_a32.S
+endif
+
+BL1_SOURCES		+=	drivers/io/io_semihosting.c			\
+				lib/semihosting/semihosting.c			\
+				lib/semihosting/${ARCH}/semihosting_call.S	\
+				plat/arm/board/fvp/${ARCH}/fvp_helpers.S	\
+				plat/arm/board/fvp/fvp_bl1_setup.c		\
+				plat/arm/board/fvp/fvp_err.c			\
+				plat/arm/board/fvp/fvp_io_storage.c		\
+				plat/arm/board/fvp/fvp_trusted_boot.c		\
+				${FVP_CPU_LIBS}					\
+				${FVP_INTERCONNECT_SOURCES}
+
+
+BL2_SOURCES		+=	drivers/io/io_semihosting.c			\
+				lib/semihosting/semihosting.c			\
+				lib/semihosting/${ARCH}/semihosting_call.S	\
+				plat/arm/board/fvp/fvp_bl2_setup.c		\
+				plat/arm/board/fvp/fvp_err.c			\
+				plat/arm/board/fvp/fvp_io_storage.c		\
+				plat/arm/board/fvp/fvp_trusted_boot.c		\
+				${FVP_SECURITY_SOURCES}
+
+ifeq (${FVP_USE_SP804_TIMER},1)
+BL2_SOURCES		+=	drivers/arm/sp804/sp804_delay_timer.c
+endif
+
+BL2U_SOURCES		+=	plat/arm/board/fvp/fvp_bl2u_setup.c		\
+				${FVP_SECURITY_SOURCES}
+
+BL31_SOURCES		+=	drivers/arm/smmu/smmu_v3.c			\
+				plat/arm/board/fvp/fvp_bl31_setup.c		\
+				plat/arm/board/fvp/fvp_pm.c			\
+				plat/arm/board/fvp/fvp_topology.c		\
+				plat/arm/board/fvp/aarch64/fvp_helpers.S	\
+				plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.c	\
+				${FVP_CPU_LIBS}					\
+				${FVP_GIC_SOURCES}				\
+				${FVP_INTERCONNECT_SOURCES}			\
+				${FVP_SECURITY_SOURCES}
+
+# Disable the PSCI platform compatibility layer
+ENABLE_PLAT_COMPAT	:= 	0
+
+ifneq (${ENABLE_STACK_PROTECTOR},0)
+PLAT_BL_COMMON_SOURCES	+=	plat/arm/board/fvp/fvp_stack_protector.c
+endif
+
+ifeq (${ARCH},aarch32)
+    NEED_BL32 := yes
+endif
+
+# Add support for platform supplied linker script for BL31 build
+$(eval $(call add_define,PLAT_EXTRA_LD_SCRIPT))
+
+include plat/arm/board/common/board_common.mk
+include plat/arm/common/arm_common.mk
diff --git a/plat/arm/board/fvp/sp_min/fvp_sp_min_setup.c b/plat/arm/board/fvp/sp_min/fvp_sp_min_setup.c
new file mode 100644
index 0000000..b924636
--- /dev/null
+++ b/plat/arm/board/fvp/sp_min/fvp_sp_min_setup.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <plat_arm.h>
+#include "../fvp_private.h"
+
+void sp_min_early_platform_setup(void *from_bl2,
+		void *plat_params_from_bl2)
+{
+	arm_sp_min_early_platform_setup(from_bl2, plat_params_from_bl2);
+
+	/* Initialize the platform config for future decision making */
+	fvp_config_setup();
+
+	/*
+	 * Initialize the correct interconnect for this cluster during cold
+	 * boot. No need for locks as no other CPU is active.
+	 */
+	fvp_interconnect_init();
+
+	/*
+	 * Enable coherency in interconnect for the primary CPU's cluster.
+	 * Earlier bootloader stages might already do this (e.g. Trusted
+	 * Firmware's BL1 does it) but we can't assume so. There is no harm in
+	 * executing this code twice anyway.
+	 * FVP PSCI code will enable coherency for other clusters.
+	 */
+	fvp_interconnect_enable();
+}
diff --git a/plat/arm/board/fvp/sp_min/sp_min-fvp.mk b/plat/arm/board/fvp/sp_min/sp_min-fvp.mk
new file mode 100644
index 0000000..b370fd5
--- /dev/null
+++ b/plat/arm/board/fvp/sp_min/sp_min-fvp.mk
@@ -0,0 +1,21 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# SP_MIN source files specific to FVP platform
+BL32_SOURCES		+=	lib/utils/mem_region.c				\
+				plat/arm/board/fvp/aarch32/fvp_helpers.S	\
+				plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.c	\
+				plat/arm/board/fvp/fvp_pm.c			\
+				plat/arm/board/fvp/fvp_topology.c		\
+				plat/arm/board/fvp/sp_min/fvp_sp_min_setup.c	\
+				plat/arm/board/common/drivers/norflash/norflash.c	\
+				plat/arm/common/arm_nor_psci_mem_protect.c	\
+				${FVP_CPU_LIBS}					\
+				${FVP_GIC_SOURCES}				\
+				${FVP_INTERCONNECT_SOURCES}			\
+				${FVP_SECURITY_SOURCES}
+
+include plat/arm/common/sp_min/arm_sp_min.mk
diff --git a/plat/arm/board/fvp/tsp/fvp_tsp_setup.c b/plat/arm/board/fvp/tsp/fvp_tsp_setup.c
new file mode 100644
index 0000000..86d265a
--- /dev/null
+++ b/plat/arm/board/fvp/tsp/fvp_tsp_setup.c
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <plat_arm.h>
+#include "../fvp_private.h"
+
+void tsp_early_platform_setup(void)
+{
+	arm_tsp_early_platform_setup();
+
+	/* Initialize the platform config for future decision making */
+	fvp_config_setup();
+}
diff --git a/plat/arm/board/fvp/tsp/tsp-fvp.mk b/plat/arm/board/fvp/tsp/tsp-fvp.mk
new file mode 100644
index 0000000..861fe72
--- /dev/null
+++ b/plat/arm/board/fvp/tsp/tsp-fvp.mk
@@ -0,0 +1,14 @@
+#
+# Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# TSP source files specific to FVP platform
+BL32_SOURCES		+=	plat/arm/board/fvp/aarch64/fvp_helpers.S	\
+				plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.c	\
+				plat/arm/board/fvp/fvp_topology.c		\
+				plat/arm/board/fvp/tsp/fvp_tsp_setup.c		\
+				${FVP_GIC_SOURCES}
+
+include plat/arm/common/tsp/arm_tsp.mk
diff --git a/plat/arm/board/juno/aarch32/juno_helpers.S b/plat/arm/board/juno/aarch32/juno_helpers.S
new file mode 100644
index 0000000..824002a
--- /dev/null
+++ b/plat/arm/board/juno/aarch32/juno_helpers.S
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <cortex_a53.h>
+#include <cortex_a57.h>
+#include <cortex_a72.h>
+#include <v2m_def.h>
+#include "../juno_def.h"
+
+
+	.globl	plat_reset_handler
+	.globl	plat_arm_calc_core_pos
+
+#define JUNO_REVISION(rev)	REV_JUNO_R##rev
+#define JUNO_HANDLER(rev)	plat_reset_handler_juno_r##rev
+#define JUMP_TO_HANDLER_IF_JUNO_R(revision)	\
+	jump_to_handler JUNO_REVISION(revision), JUNO_HANDLER(revision)
+
+	/* --------------------------------------------------------------------
+	 * Helper macro to jump to the given handler if the board revision
+	 * matches.
+	 * Expects the Juno board revision in x0.
+	 * --------------------------------------------------------------------
+	 */
+	.macro jump_to_handler _revision, _handler
+	cmp	r0, #\_revision
+	beq	\_handler
+	.endm
+
+	/* --------------------------------------------------------------------
+	 * Helper macro that reads the part number of the current CPU and jumps
+	 * to the given label if it matches the CPU MIDR provided.
+	 *
+	 * Clobbers r0.
+	 * --------------------------------------------------------------------
+	 */
+	.macro  jump_if_cpu_midr _cpu_midr, _label
+	ldcopr	r0, MIDR
+	ubfx	r0, r0, #MIDR_PN_SHIFT, #12
+	ldr	r1, =((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
+	cmp	r0, r1
+	beq	\_label
+	.endm
+
+	/* --------------------------------------------------------------------
+	 * Platform reset handler for Juno R0.
+	 *
+	 * Juno R0 has the following topology:
+	 * - Quad core Cortex-A53 processor cluster;
+	 * - Dual core Cortex-A57 processor cluster.
+	 *
+	 * This handler does the following:
+	 * - Implement workaround for defect id 831273 by enabling an event
+	 *   stream every 65536 cycles.
+	 * - Set the L2 Data RAM latency to 2 (i.e. 3 cycles) for Cortex-A57
+	 * - Set the L2 Tag RAM latency to 2 (i.e. 3 cycles) for Cortex-A57
+	 * --------------------------------------------------------------------
+	 */
+func JUNO_HANDLER(0)
+	/* --------------------------------------------------------------------
+	 * Enable the event stream every 65536 cycles
+	 * --------------------------------------------------------------------
+	 */
+	mov	r0, #(0xf << EVNTI_SHIFT)
+	orr	r0, r0, #EVNTEN_BIT
+	stcopr	r0, CNTKCTL
+
+	/* --------------------------------------------------------------------
+	 * Nothing else to do on Cortex-A53.
+	 * --------------------------------------------------------------------
+	 */
+	jump_if_cpu_midr CORTEX_A53_MIDR, 1f
+
+	/* --------------------------------------------------------------------
+	 * Cortex-A57 specific settings
+	 * --------------------------------------------------------------------
+	 */
+	mov	r0, #((CORTEX_A57_L2_DATA_RAM_LATENCY_3_CYCLES << CORTEX_A57_L2CTLR_DATA_RAM_LATENCY_SHIFT) |	\
+		      (CORTEX_A57_L2_TAG_RAM_LATENCY_3_CYCLES << CORTEX_A57_L2CTLR_TAG_RAM_LATENCY_SHIFT))
+	stcopr	r0, CORTEX_A57_L2CTLR
+1:
+	isb
+	bx	lr
+endfunc JUNO_HANDLER(0)
+
+	/* --------------------------------------------------------------------
+	 * Platform reset handler for Juno R1.
+	 *
+	 * Juno R1 has the following topology:
+	 * - Quad core Cortex-A53 processor cluster;
+	 * - Dual core Cortex-A57 processor cluster.
+	 *
+	 * This handler does the following:
+	 * - Set the L2 Data RAM latency to 2 (i.e. 3 cycles) for Cortex-A57
+	 *
+	 * Note that:
+	 * - The default value for the L2 Tag RAM latency for Cortex-A57 is
+	 *   suitable.
+	 * - Defect #831273 doesn't affect Juno R1.
+	 * --------------------------------------------------------------------
+	 */
+func JUNO_HANDLER(1)
+	/* --------------------------------------------------------------------
+	 * Nothing to do on Cortex-A53.
+	 * --------------------------------------------------------------------
+	 */
+	jump_if_cpu_midr CORTEX_A57_MIDR, A57
+	bx	lr
+
+A57:
+	/* --------------------------------------------------------------------
+	 * Cortex-A57 specific settings
+	 * --------------------------------------------------------------------
+	 */
+	mov	r0, #(CORTEX_A57_L2_DATA_RAM_LATENCY_3_CYCLES << CORTEX_A57_L2CTLR_DATA_RAM_LATENCY_SHIFT)
+	stcopr	r0, CORTEX_A57_L2CTLR
+	isb
+	bx	lr
+endfunc JUNO_HANDLER(1)
+
+	/* --------------------------------------------------------------------
+	 * Platform reset handler for Juno R2.
+	 *
+	 * Juno R2 has the following topology:
+	 * - Quad core Cortex-A53 processor cluster;
+	 * - Dual core Cortex-A72 processor cluster.
+	 *
+	 * This handler does the following:
+	 * - Set the L2 Data RAM latency to 2 (i.e. 3 cycles) for Cortex-A72
+	 * - Set the L2 Tag RAM latency to 1 (i.e. 2 cycles) for Cortex-A72
+	 *
+	 * Note that:
+	 * - Defect #831273 doesn't affect Juno R2.
+	 * --------------------------------------------------------------------
+	 */
+func JUNO_HANDLER(2)
+	/* --------------------------------------------------------------------
+	 * Nothing to do on Cortex-A53.
+	 * --------------------------------------------------------------------
+	 */
+	jump_if_cpu_midr CORTEX_A72_MIDR, A72
+	bx	lr
+
+A72:
+	/* --------------------------------------------------------------------
+	 * Cortex-A72 specific settings
+	 * --------------------------------------------------------------------
+	 */
+	mov	r0, #((CORTEX_A72_L2_DATA_RAM_LATENCY_3_CYCLES << CORTEX_A72_L2CTLR_DATA_RAM_LATENCY_SHIFT) |	\
+		      (CORTEX_A72_L2_TAG_RAM_LATENCY_2_CYCLES << CORTEX_A72_L2CTLR_TAG_RAM_LATENCY_SHIFT))
+	stcopr	r0, CORTEX_A72_L2CTLR
+	isb
+	bx	lr
+endfunc JUNO_HANDLER(2)
+
+	/* --------------------------------------------------------------------
+	 * void plat_reset_handler(void);
+	 *
+	 * Determine the Juno board revision and call the appropriate reset
+	 * handler.
+	 * --------------------------------------------------------------------
+	 */
+func plat_reset_handler
+	/* Read the V2M SYS_ID register */
+	ldr	r0, =(V2M_SYSREGS_BASE + V2M_SYS_ID)
+	ldr	r1, [r0]
+	/* Extract board revision from the SYS_ID */
+	ubfx	r0, r1, #V2M_SYS_ID_REV_SHIFT, #4
+
+	JUMP_TO_HANDLER_IF_JUNO_R(0)
+	JUMP_TO_HANDLER_IF_JUNO_R(1)
+	JUMP_TO_HANDLER_IF_JUNO_R(2)
+
+	/* Board revision is not supported */
+	no_ret	plat_panic_handler
+
+endfunc plat_reset_handler
+
+	/* -----------------------------------------------------
+	 *  unsigned int plat_arm_calc_core_pos(u_register_t mpidr)
+	 *  Helper function to calculate the core position.
+	 * -----------------------------------------------------
+	 */
+func plat_arm_calc_core_pos
+	b	css_calc_core_pos_swap_cluster
+endfunc plat_arm_calc_core_pos
diff --git a/plat/arm/board/juno/aarch64/juno_helpers.S b/plat/arm/board/juno/aarch64/juno_helpers.S
new file mode 100644
index 0000000..29c2c0a
--- /dev/null
+++ b/plat/arm/board/juno/aarch64/juno_helpers.S
@@ -0,0 +1,292 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <cortex_a53.h>
+#include <cortex_a57.h>
+#include <cortex_a72.h>
+#include <cpu_macros.S>
+#include <css_def.h>
+#include <v2m_def.h>
+#include "../juno_def.h"
+
+
+	.globl	plat_reset_handler
+	.globl	plat_arm_calc_core_pos
+#if JUNO_AARCH32_EL3_RUNTIME
+	.globl	plat_get_my_entrypoint
+	.globl	juno_reset_to_aarch32_state
+#endif
+
+#define JUNO_REVISION(rev)	REV_JUNO_R##rev
+#define JUNO_HANDLER(rev)	plat_reset_handler_juno_r##rev
+#define JUMP_TO_HANDLER_IF_JUNO_R(revision)	\
+	jump_to_handler JUNO_REVISION(revision), JUNO_HANDLER(revision)
+
+	/* --------------------------------------------------------------------
+	 * Helper macro to jump to the given handler if the board revision
+	 * matches.
+	 * Expects the Juno board revision in x0.
+	 * --------------------------------------------------------------------
+	 */
+	.macro jump_to_handler _revision, _handler
+	cmp	x0, #\_revision
+	b.eq	\_handler
+	.endm
+
+	/* --------------------------------------------------------------------
+	 * Helper macro that reads the part number of the current CPU and jumps
+	 * to the given label if it matches the CPU MIDR provided.
+	 *
+	 * Clobbers x0.
+	 * --------------------------------------------------------------------
+	 */
+	.macro  jump_if_cpu_midr _cpu_midr, _label
+	mrs	x0, midr_el1
+	ubfx	x0, x0, MIDR_PN_SHIFT, #12
+	cmp     w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
+	b.eq	\_label
+	.endm
+
+	/* --------------------------------------------------------------------
+	 * Platform reset handler for Juno R0.
+	 *
+	 * Juno R0 has the following topology:
+	 * - Quad core Cortex-A53 processor cluster;
+	 * - Dual core Cortex-A57 processor cluster.
+	 *
+	 * This handler does the following:
+	 * - Implement workaround for defect id 831273 by enabling an event
+	 *   stream every 65536 cycles.
+	 * - Set the L2 Data RAM latency to 2 (i.e. 3 cycles) for Cortex-A57
+	 * - Set the L2 Tag RAM latency to 2 (i.e. 3 cycles) for Cortex-A57
+	 * --------------------------------------------------------------------
+	 */
+func JUNO_HANDLER(0)
+	/* --------------------------------------------------------------------
+	 * Enable the event stream every 65536 cycles
+	 * --------------------------------------------------------------------
+	 */
+	mov     x0, #(0xf << EVNTI_SHIFT)
+	orr     x0, x0, #EVNTEN_BIT
+	msr     CNTKCTL_EL1, x0
+
+	/* --------------------------------------------------------------------
+	 * Nothing else to do on Cortex-A53.
+	 * --------------------------------------------------------------------
+	 */
+	jump_if_cpu_midr CORTEX_A53_MIDR, 1f
+
+	/* --------------------------------------------------------------------
+	 * Cortex-A57 specific settings
+	 * --------------------------------------------------------------------
+	 */
+	mov	x0, #((CORTEX_A57_L2_DATA_RAM_LATENCY_3_CYCLES << CORTEX_A57_L2CTLR_DATA_RAM_LATENCY_SHIFT) |	\
+		      (CORTEX_A57_L2_TAG_RAM_LATENCY_3_CYCLES << CORTEX_A57_L2CTLR_TAG_RAM_LATENCY_SHIFT))
+	msr     CORTEX_A57_L2CTLR_EL1, x0
+1:
+	isb
+	ret
+endfunc JUNO_HANDLER(0)
+
+	/* --------------------------------------------------------------------
+	 * Platform reset handler for Juno R1.
+	 *
+	 * Juno R1 has the following topology:
+	 * - Quad core Cortex-A53 processor cluster;
+	 * - Dual core Cortex-A57 processor cluster.
+	 *
+	 * This handler does the following:
+	 * - Set the L2 Data RAM latency to 2 (i.e. 3 cycles) for Cortex-A57
+	 *
+	 * Note that:
+	 * - The default value for the L2 Tag RAM latency for Cortex-A57 is
+	 *   suitable.
+	 * - Defect #831273 doesn't affect Juno R1.
+	 * --------------------------------------------------------------------
+	 */
+func JUNO_HANDLER(1)
+	/* --------------------------------------------------------------------
+	 * Nothing to do on Cortex-A53.
+	 * --------------------------------------------------------------------
+	 */
+	jump_if_cpu_midr CORTEX_A57_MIDR, A57
+	ret
+
+A57:
+	/* --------------------------------------------------------------------
+	 * Cortex-A57 specific settings
+	 * --------------------------------------------------------------------
+	 */
+	mov	x0, #(CORTEX_A57_L2_DATA_RAM_LATENCY_3_CYCLES << CORTEX_A57_L2CTLR_DATA_RAM_LATENCY_SHIFT)
+	msr     CORTEX_A57_L2CTLR_EL1, x0
+	isb
+	ret
+endfunc JUNO_HANDLER(1)
+
+	/* --------------------------------------------------------------------
+	 * Platform reset handler for Juno R2.
+	 *
+	 * Juno R2 has the following topology:
+	 * - Quad core Cortex-A53 processor cluster;
+	 * - Dual core Cortex-A72 processor cluster.
+	 *
+	 * This handler does the following:
+	 * - Set the L2 Data RAM latency to 2 (i.e. 3 cycles) for Cortex-A72
+	 * - Set the L2 Tag RAM latency to 1 (i.e. 2 cycles) for Cortex-A72
+	 *
+	 * Note that:
+	 * - Defect #831273 doesn't affect Juno R2.
+	 * --------------------------------------------------------------------
+	 */
+func JUNO_HANDLER(2)
+	/* --------------------------------------------------------------------
+	 * Nothing to do on Cortex-A53.
+	 * --------------------------------------------------------------------
+	 */
+	jump_if_cpu_midr CORTEX_A72_MIDR, A72
+	ret
+
+A72:
+	/* --------------------------------------------------------------------
+	 * Cortex-A72 specific settings
+	 * --------------------------------------------------------------------
+	 */
+	mov	x0, #((CORTEX_A72_L2_DATA_RAM_LATENCY_3_CYCLES << CORTEX_A72_L2CTLR_DATA_RAM_LATENCY_SHIFT) |	\
+		      (CORTEX_A72_L2_TAG_RAM_LATENCY_2_CYCLES << CORTEX_A72_L2CTLR_TAG_RAM_LATENCY_SHIFT))
+	msr     CORTEX_A57_L2CTLR_EL1, x0
+	isb
+	ret
+endfunc JUNO_HANDLER(2)
+
+	/* --------------------------------------------------------------------
+	 * void plat_reset_handler(void);
+	 *
+	 * Determine the Juno board revision and call the appropriate reset
+	 * handler.
+	 * --------------------------------------------------------------------
+	 */
+func plat_reset_handler
+	/* Read the V2M SYS_ID register */
+	mov_imm	x0, (V2M_SYSREGS_BASE + V2M_SYS_ID)
+	ldr	w1, [x0]
+	/* Extract board revision from the SYS_ID */
+	ubfx	x0, x1, #V2M_SYS_ID_REV_SHIFT, #4
+
+	JUMP_TO_HANDLER_IF_JUNO_R(0)
+	JUMP_TO_HANDLER_IF_JUNO_R(1)
+	JUMP_TO_HANDLER_IF_JUNO_R(2)
+
+	/* Board revision is not supported */
+	no_ret	plat_panic_handler
+
+endfunc plat_reset_handler
+
+	/* -----------------------------------------------------
+	 *  void juno_do_reset_to_aarch32_state(void);
+	 *
+	 *  Request warm reset to AArch32 mode.
+	 * -----------------------------------------------------
+	 */
+func juno_do_reset_to_aarch32_state
+	mov	x0, #RMR_EL3_RR_BIT
+	dsb	sy
+	msr	rmr_el3, x0
+	isb
+	wfi
+	b	plat_panic_handler
+endfunc juno_do_reset_to_aarch32_state
+
+	/* -----------------------------------------------------
+	 *  unsigned int plat_arm_calc_core_pos(u_register_t mpidr)
+	 *  Helper function to calculate the core position.
+	 * -----------------------------------------------------
+	 */
+func plat_arm_calc_core_pos
+	b	css_calc_core_pos_swap_cluster
+endfunc plat_arm_calc_core_pos
+
+#if JUNO_AARCH32_EL3_RUNTIME
+	/* ---------------------------------------------------------------------
+	 * uintptr_t plat_get_my_entrypoint (void);
+	 *
+	 * Main job of this routine is to distinguish between a cold and a warm
+	 * boot. On JUNO platform, this distinction is based on the contents of
+	 * the Trusted Mailbox. It is initialised to zero by the SCP before the
+	 * AP cores are released from reset. Therefore, a zero mailbox means
+	 * it's a cold reset. If it is a warm boot then a request to reset to
+	 * AArch32 state is issued. This is the only way to reset to AArch32
+	 * in EL3 on Juno. A trampoline located at the high vector address
+	 * has already been prepared by BL1.
+	 *
+	 * This functions returns the contents of the mailbox, i.e.:
+	 *  - 0 for a cold boot;
+	 *  - request warm reset in AArch32 state for warm boot case;
+	 * ---------------------------------------------------------------------
+	 */
+func plat_get_my_entrypoint
+	mov_imm	x0, PLAT_ARM_TRUSTED_MAILBOX_BASE
+	ldr	x0, [x0]
+	cbz	x0, return
+	b	juno_do_reset_to_aarch32_state
+return:
+	ret
+endfunc plat_get_my_entrypoint
+
+/*
+ * Emit a "movw r0, #imm16" which moves the lower
+ * 16 bits of `_val` into r0.
+ */
+.macro emit_movw _reg_d, _val
+	mov_imm	\_reg_d, (0xe3000000 | \
+			((\_val & 0xfff) | \
+			((\_val & 0xf000) << 4)))
+.endm
+
+/*
+ * Emit a "movt r0, #imm16" which moves the upper
+ * 16 bits of `_val` into r0.
+ */
+.macro emit_movt _reg_d, _val
+	mov_imm	\_reg_d, (0xe3400000 | \
+			(((\_val & 0x0fff0000) >> 16) | \
+			((\_val & 0xf0000000) >> 12)))
+.endm
+
+/*
+ * This function writes the trampoline code at HI-VEC (0xFFFF0000)
+ * address which loads r0 with the entrypoint address for
+ * BL32 (a.k.a SP_MIN) when EL3 is in AArch32 mode. A warm reset
+ * to AArch32 mode is then requested by writing into RMR_EL3.
+ */
+func juno_reset_to_aarch32_state
+	/*
+	 * Invalidate all caches before the warm reset to AArch32 state.
+	 * This is required on the Juno AArch32 boot flow because the L2
+	 * unified cache may contain code and data from when the processor
+	 * was still executing in AArch64 state.  This code only runs on
+	 * the primary core, all other cores are powered down.
+	 */
+	mov	x0, #DCISW
+	bl	dcsw_op_all
+
+	emit_movw	w0, BL32_BASE
+	emit_movt	w1, BL32_BASE
+	/* opcode "bx r0" to branch using r0 in AArch32 mode */
+	mov_imm	w2, 0xe12fff10
+
+	/* Write the above opcodes at HI-VECTOR location */
+	mov_imm	x3, HI_VECTOR_BASE
+	str	w0, [x3], #4
+	str	w1, [x3], #4
+	str	w2, [x3]
+
+	b	juno_do_reset_to_aarch32_state
+endfunc juno_reset_to_aarch32_state
+
+#endif /* JUNO_AARCH32_EL3_RUNTIME */
diff --git a/plat/arm/board/juno/include/plat_macros.S b/plat/arm/board/juno/include/plat_macros.S
new file mode 100644
index 0000000..0dd96c4
--- /dev/null
+++ b/plat/arm/board/juno/include/plat_macros.S
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __PLAT_MACROS_S__
+#define __PLAT_MACROS_S__
+
+#include <cci_macros.S>
+#include <css_macros.S>
+
+	/* ---------------------------------------------
+	 * The below required platform porting macro
+	 * prints out relevant platform registers
+	 * whenever an unhandled exception is taken in
+	 * BL31.
+	 * ---------------------------------------------
+	 */
+	.macro plat_crash_print_regs
+	css_print_gic_regs
+	print_cci_regs
+	.endm
+
+#endif /* __PLAT_MACROS_S__ */
diff --git a/plat/arm/board/juno/include/platform_def.h b/plat/arm/board/juno/include/platform_def.h
new file mode 100644
index 0000000..ccc7771
--- /dev/null
+++ b/plat/arm/board/juno/include/platform_def.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#include <arm_def.h>
+#include <board_arm_def.h>
+#include <board_css_def.h>
+#include <common_def.h>
+#include <css_def.h>
+#if TRUSTED_BOARD_BOOT
+#include <mbedtls_config.h>
+#endif
+#include <soc_css_def.h>
+#include <tzc400.h>
+#include <v2m_def.h>
+#include "../juno_def.h"
+
+/* Required platform porting definitions */
+/* Juno supports system power domain */
+#define PLAT_MAX_PWR_LVL		ARM_PWR_LVL2
+#define PLAT_NUM_PWR_DOMAINS		(ARM_SYSTEM_COUNT + \
+					JUNO_CLUSTER_COUNT + \
+					PLATFORM_CORE_COUNT)
+#define PLATFORM_CORE_COUNT		(JUNO_CLUSTER0_CORE_COUNT + \
+					JUNO_CLUSTER1_CORE_COUNT)
+
+/* Cryptocell HW Base address */
+#define PLAT_CRYPTOCELL_BASE		0x60050000
+
+/*
+ * Other platform porting definitions are provided by included headers
+ */
+
+/*
+ * Required ARM standard platform porting definitions
+ */
+#define PLAT_ARM_CLUSTER_COUNT		JUNO_CLUSTER_COUNT
+
+/* Use the bypass address */
+#define PLAT_ARM_TRUSTED_ROM_BASE	V2M_FLASH0_BASE + BL1_ROM_BYPASS_OFFSET
+
+/*
+ * Actual ROM size on Juno is 64 KB, but TBB currently requires at least 80 KB
+ * in debug mode. We can test TBB on Juno bypassing the ROM and using 128 KB of
+ * flash
+ */
+#if TRUSTED_BOARD_BOOT
+#define PLAT_ARM_TRUSTED_ROM_SIZE	0x00020000
+#else
+#define PLAT_ARM_TRUSTED_ROM_SIZE	0x00010000
+#endif /* TRUSTED_BOARD_BOOT */
+
+/*
+ * If ARM_BOARD_OPTIMISE_MEM=0 then Juno uses the default, unoptimised values
+ * defined for ARM development platforms.
+ */
+#if ARM_BOARD_OPTIMISE_MEM
+/*
+ * PLAT_ARM_MMAP_ENTRIES depends on the number of entries in the
+ * plat_arm_mmap array defined for each BL stage.
+ */
+#ifdef IMAGE_BL1
+# define PLAT_ARM_MMAP_ENTRIES		7
+# define MAX_XLAT_TABLES		4
+#endif
+
+#ifdef IMAGE_BL2
+#ifdef SPD_opteed
+# define PLAT_ARM_MMAP_ENTRIES		11
+# define MAX_XLAT_TABLES		5
+#else
+# define PLAT_ARM_MMAP_ENTRIES		10
+# define MAX_XLAT_TABLES		4
+#endif
+#endif
+
+#ifdef IMAGE_BL2U
+# define PLAT_ARM_MMAP_ENTRIES		4
+# define MAX_XLAT_TABLES		3
+#endif
+
+#ifdef IMAGE_BL31
+#  define PLAT_ARM_MMAP_ENTRIES		7
+#  define MAX_XLAT_TABLES		3
+#endif
+
+#ifdef IMAGE_BL32
+# define PLAT_ARM_MMAP_ENTRIES		5
+# define MAX_XLAT_TABLES		4
+#endif
+
+/*
+ * PLAT_ARM_MAX_BL1_RW_SIZE is calculated using the current BL1 RW debug size
+ * plus a little space for growth.
+ */
+#if TRUSTED_BOARD_BOOT
+# define PLAT_ARM_MAX_BL1_RW_SIZE	0xA000
+#else
+# define PLAT_ARM_MAX_BL1_RW_SIZE	0x6000
+#endif
+
+/*
+ * PLAT_ARM_MAX_BL2_SIZE is calculated using the current BL2 debug size plus a
+ * little space for growth.
+ */
+#if TRUSTED_BOARD_BOOT
+#if TF_MBEDTLS_KEY_ALG_ID == TF_MBEDTLS_RSA_AND_ECDSA
+# define PLAT_ARM_MAX_BL2_SIZE		0x1E000
+#else
+# define PLAT_ARM_MAX_BL2_SIZE		0x1A000
+#endif
+#else
+# define PLAT_ARM_MAX_BL2_SIZE		0xC000
+#endif
+
+/*
+ * PLAT_ARM_MAX_BL31_SIZE is calculated using the current BL31 debug size plus a
+ * little space for growth.
+ * SCP_BL2 image is loaded into the space BL31 -> BL1_RW_BASE.
+ * For TBB use case, PLAT_ARM_MAX_BL1_RW_SIZE has been increased and therefore
+ * PLAT_ARM_MAX_BL31_SIZE has been increased to ensure SCP_BL2 has the same
+ * space available.
+ */
+#define PLAT_ARM_MAX_BL31_SIZE		0x1E000
+
+/*
+ * Since free SRAM space is scant, enable the ASSERTION message size
+ * optimization by fixing the PLAT_LOG_LEVEL_ASSERT to LOG_LEVEL_INFO (40).
+ */
+#define PLAT_LOG_LEVEL_ASSERT		40
+
+#endif /* ARM_BOARD_OPTIMISE_MEM */
+
+/* CCI related constants */
+#define PLAT_ARM_CCI_BASE		0x2c090000
+#define PLAT_ARM_CCI_CLUSTER0_SL_IFACE_IX	4
+#define PLAT_ARM_CCI_CLUSTER1_SL_IFACE_IX	3
+
+/* System timer related constants */
+#define PLAT_ARM_NSTIMER_FRAME_ID		1
+
+/* TZC related constants */
+#define PLAT_ARM_TZC_BASE		0x2a4a0000
+#define PLAT_ARM_TZC_NS_DEV_ACCESS	(				\
+		TZC_REGION_ACCESS_RDWR(TZC400_NSAID_CCI400)	|	\
+		TZC_REGION_ACCESS_RDWR(TZC400_NSAID_PCIE)	|	\
+		TZC_REGION_ACCESS_RDWR(TZC400_NSAID_HDLCD0)	|	\
+		TZC_REGION_ACCESS_RDWR(TZC400_NSAID_HDLCD1)	|	\
+		TZC_REGION_ACCESS_RDWR(TZC400_NSAID_USB)	|	\
+		TZC_REGION_ACCESS_RDWR(TZC400_NSAID_DMA330)	|	\
+		TZC_REGION_ACCESS_RDWR(TZC400_NSAID_THINLINKS)	|	\
+		TZC_REGION_ACCESS_RDWR(TZC400_NSAID_AP)		|	\
+		TZC_REGION_ACCESS_RDWR(TZC400_NSAID_GPU)	|	\
+		TZC_REGION_ACCESS_RDWR(TZC400_NSAID_CORESIGHT))
+
+/*
+ * Required ARM CSS based platform porting definitions
+ */
+
+/* GIC related constants (no GICR in GIC-400) */
+#define PLAT_ARM_GICD_BASE		0x2c010000
+#define PLAT_ARM_GICC_BASE		0x2c02f000
+#define PLAT_ARM_GICH_BASE		0x2c04f000
+#define PLAT_ARM_GICV_BASE		0x2c06f000
+
+/* MHU related constants */
+#define PLAT_CSS_MHU_BASE		0x2b1f0000
+
+/*
+ * Base address of the first memory region used for communication between AP
+ * and SCP. Used by the BOM and SCPI protocols.
+ */
+#if !CSS_USE_SCMI_SDS_DRIVER
+/*
+ * Note that this is located at the same address as SCP_BOOT_CFG_ADDR, which
+ * means the SCP/AP configuration data gets overwritten when the AP initiates
+ * communication with the SCP. The configuration data is expected to be a
+ * 32-bit word on all CSS platforms. On Juno, part of this configuration is
+ * which CPU is the primary, according to the shift and mask definitions below.
+ */
+#define PLAT_CSS_SCP_COM_SHARED_MEM_BASE	(ARM_TRUSTED_SRAM_BASE + 0x80)
+#define PLAT_CSS_PRIMARY_CPU_SHIFT		8
+#define PLAT_CSS_PRIMARY_CPU_BIT_WIDTH		4
+#endif
+
+/*
+ * PLAT_CSS_MAX_SCP_BL2_SIZE is calculated using the current
+ * SCP_BL2 size plus a little space for growth.
+ */
+#define PLAT_CSS_MAX_SCP_BL2_SIZE	0x14000
+
+/*
+ * PLAT_CSS_MAX_SCP_BL2U_SIZE is calculated using the current
+ * SCP_BL2U size plus a little space for growth.
+ */
+#define PLAT_CSS_MAX_SCP_BL2U_SIZE	0x14000
+
+#define PLAT_ARM_G1S_IRQ_PROPS(grp) \
+	CSS_G1S_IRQ_PROPS(grp), \
+	ARM_G1S_IRQ_PROPS(grp), \
+	INTR_PROP_DESC(JUNO_IRQ_DMA_SMMU, GIC_HIGHEST_SEC_PRIORITY, \
+		grp, GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(JUNO_IRQ_HDLCD0_SMMU, GIC_HIGHEST_SEC_PRIORITY, \
+		grp, GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(JUNO_IRQ_HDLCD1_SMMU, GIC_HIGHEST_SEC_PRIORITY, \
+		grp, GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(JUNO_IRQ_USB_SMMU, GIC_HIGHEST_SEC_PRIORITY, \
+		grp, GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(JUNO_IRQ_THIN_LINKS_SMMU, GIC_HIGHEST_SEC_PRIORITY, \
+		grp, GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(JUNO_IRQ_SEC_I2C, GIC_HIGHEST_SEC_PRIORITY, \
+		grp, GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(JUNO_IRQ_GPU_SMMU_1, GIC_HIGHEST_SEC_PRIORITY, \
+		grp, GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(JUNO_IRQ_ETR_SMMU, GIC_HIGHEST_SEC_PRIORITY, \
+		grp, GIC_INTR_CFG_LEVEL)
+
+#define PLAT_ARM_G0_IRQ_PROPS(grp)	ARM_G0_IRQ_PROPS(grp)
+
+/*
+ * Required ARM CSS SoC based platform porting definitions
+ */
+
+/* CSS SoC NIC-400 Global Programmers View (GPV) */
+#define PLAT_SOC_CSS_NIC400_BASE	0x2a000000
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/arm/board/juno/juno_bl1_setup.c b/plat/arm/board/juno/juno_bl1_setup.c
new file mode 100644
index 0000000..7c026bc
--- /dev/null
+++ b/plat/arm/board/juno/juno_bl1_setup.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <bl_common.h>
+#include <errno.h>
+#include <plat_arm.h>
+#include <platform.h>
+#include <sp805.h>
+#include <tbbr_img_def.h>
+#include <v2m_def.h>
+
+#define RESET_REASON_WDOG_RESET		(0x2)
+
+void juno_reset_to_aarch32_state(void);
+
+
+/*******************************************************************************
+ * The following function checks if Firmware update is needed,
+ * by checking if TOC in FIP image is valid or watchdog reset happened.
+ ******************************************************************************/
+unsigned int bl1_plat_get_next_image_id(void)
+{
+	unsigned int *reset_flags_ptr = (unsigned int *)SSC_GPRETN;
+	unsigned int *nv_flags_ptr = (unsigned int *)
+			(V2M_SYSREGS_BASE + V2M_SYS_NVFLAGS);
+	/*
+	 * Check if TOC is invalid or watchdog reset happened.
+	 */
+	if ((arm_io_is_toc_valid() != 1) ||
+		((*reset_flags_ptr & RESET_REASON_WDOG_RESET) &&
+		((*nv_flags_ptr == -EAUTH) || (*nv_flags_ptr == -ENOENT))))
+		return NS_BL1U_IMAGE_ID;
+
+	return BL2_IMAGE_ID;
+}
+
+/*******************************************************************************
+ * On JUNO update the arg2 with address of SCP_BL2U image info.
+ ******************************************************************************/
+void bl1_plat_set_ep_info(unsigned int image_id,
+		entry_point_info_t *ep_info)
+{
+	if (image_id == BL2U_IMAGE_ID) {
+		image_desc_t *image_desc = bl1_plat_get_image_desc(SCP_BL2U_IMAGE_ID);
+		ep_info->args.arg2 = (unsigned long)&image_desc->image_info;
+	}
+}
+
+/*******************************************************************************
+ * On Juno clear SYS_NVFLAGS and wait for watchdog reset.
+ ******************************************************************************/
+__dead2 void bl1_plat_fwu_done(void *client_cookie, void *reserved)
+{
+	unsigned int *nv_flags_clr = (unsigned int *)
+			(V2M_SYSREGS_BASE + V2M_SYS_NVFLAGSCLR);
+	unsigned int *nv_flags_ptr = (unsigned int *)
+			(V2M_SYSREGS_BASE + V2M_SYS_NVFLAGS);
+
+	/* Clear the NV flags register. */
+	*nv_flags_clr = *nv_flags_ptr;
+
+	while (1)
+		wfi();
+}
+
+#if JUNO_AARCH32_EL3_RUNTIME
+void bl1_plat_prepare_exit(entry_point_info_t *ep_info)
+{
+#if !ARM_DISABLE_TRUSTED_WDOG
+	/* Disable watchdog before leaving BL1 */
+	sp805_stop(ARM_SP805_TWDG_BASE);
+#endif
+
+	juno_reset_to_aarch32_state();
+}
+#endif /* JUNO_AARCH32_EL3_RUNTIME */
diff --git a/plat/arm/board/juno/juno_bl2_setup.c b/plat/arm/board/juno/juno_bl2_setup.c
new file mode 100644
index 0000000..2771e0f
--- /dev/null
+++ b/plat/arm/board/juno/juno_bl2_setup.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <bl_common.h>
+#include <desc_image_load.h>
+#include <plat_arm.h>
+
+#if JUNO_AARCH32_EL3_RUNTIME
+/*******************************************************************************
+ * This function changes the spsr for BL32 image to bypass
+ * the check in BL1 AArch64 exception handler. This is needed in the aarch32
+ * boot flow as the core comes up in aarch64 and to enter the BL32 image a warm
+ * reset in aarch32 state is required.
+ ******************************************************************************/
+int bl2_plat_handle_post_image_load(unsigned int image_id)
+{
+	int err = arm_bl2_handle_post_image_load(image_id);
+
+	if (!err && (image_id == BL32_IMAGE_ID)) {
+		bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
+		assert(bl_mem_params);
+		bl_mem_params->ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+			DISABLE_ALL_EXCEPTIONS);
+	}
+
+	return err;
+}
+
+#if !CSS_USE_SCMI_SDS_DRIVER
+/*
+ * We need to override some of the platform functions when booting SP_MIN
+ * on Juno AArch32. These needs to be done only for SCPI/BOM SCP systems as
+ * in case of SDS, the structures remain in memory and doesn't need to be
+ * overwritten.
+ */
+
+static unsigned int scp_boot_config;
+
+void bl2_early_platform_setup(meminfo_t *mem_layout)
+{
+	arm_bl2_early_platform_setup(mem_layout);
+
+	/* Save SCP Boot config before it gets overwritten by SCP_BL2 loading */
+	VERBOSE("BL2: Saving SCP Boot config = 0x%x\n", scp_boot_config);
+	scp_boot_config = mmio_read_32(SCP_BOOT_CFG_ADDR);
+}
+
+void bl2_platform_setup(void)
+{
+	arm_bl2_platform_setup();
+
+	mmio_write_32(SCP_BOOT_CFG_ADDR, scp_boot_config);
+	VERBOSE("BL2: Restored SCP Boot config = 0x%x\n", scp_boot_config);
+}
+#endif
+
+#endif /* JUNO_AARCH32_EL3_RUNTIME */
diff --git a/plat/arm/board/juno/juno_decl.h b/plat/arm/board/juno/juno_decl.h
new file mode 100644
index 0000000..8a3b373
--- /dev/null
+++ b/plat/arm/board/juno/juno_decl.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __JUNO_DECL_H__
+#define __JUNO_DECL_H__
+
+int juno_getentropy(void *buf, size_t len);
+
+#endif /* __JUNO_DECL_H__ */
diff --git a/plat/arm/board/juno/juno_def.h b/plat/arm/board/juno/juno_def.h
new file mode 100644
index 0000000..d2834e1
--- /dev/null
+++ b/plat/arm/board/juno/juno_def.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __JUNO_DEF_H__
+#define __JUNO_DEF_H__
+
+
+/*******************************************************************************
+ * Juno memory map related constants
+ ******************************************************************************/
+
+/* Board revisions */
+#define REV_JUNO_R0			0x1	/* Rev B */
+#define REV_JUNO_R1			0x2	/* Rev C */
+#define REV_JUNO_R2			0x3	/* Rev D */
+
+/* Bypass offset from start of NOR flash */
+#define BL1_ROM_BYPASS_OFFSET		0x03EC0000
+
+#define EMMC_BASE			0x0c000000
+#define EMMC_SIZE			0x04000000
+
+#define PSRAM_BASE			0x14000000
+#define PSRAM_SIZE			0x02000000
+
+#define JUNO_SSC_VER_PART_NUM		0x030
+
+/*******************************************************************************
+ * Juno topology related constants
+ ******************************************************************************/
+#define JUNO_CLUSTER_COUNT		2
+#define JUNO_CLUSTER0_CORE_COUNT	2
+#define JUNO_CLUSTER1_CORE_COUNT	4
+
+/*******************************************************************************
+ * TZC-400 related constants
+ ******************************************************************************/
+#define TZC400_NSAID_CCI400		0  /* Note: Same as default NSAID!! */
+#define TZC400_NSAID_PCIE		1
+#define TZC400_NSAID_HDLCD0		2
+#define TZC400_NSAID_HDLCD1		3
+#define TZC400_NSAID_USB		4
+#define TZC400_NSAID_DMA330		5
+#define TZC400_NSAID_THINLINKS		6
+#define TZC400_NSAID_AP			9
+#define TZC400_NSAID_GPU		10
+#define TZC400_NSAID_SCP		11
+#define TZC400_NSAID_CORESIGHT		12
+
+/*******************************************************************************
+ * TRNG related constants
+ ******************************************************************************/
+#define TRNG_BASE	0x7FE60000ULL
+#define TRNG_NOUTPUTS	4
+#define TRNG_STATUS	0x10
+#define TRNG_INTMASK	0x14
+#define TRNG_CONFIG	0x18
+#define TRNG_CONTROL	0x1C
+#define TRNG_NBYTES	16	/* Number of bytes generated per round. */
+
+/*******************************************************************************
+ * MMU-401 related constants
+ ******************************************************************************/
+#define MMU401_SSD_OFFSET		0x4000
+#define MMU401_DMA330_BASE		0x7fb00000
+
+/*******************************************************************************
+ * Interrupt handling constants
+ ******************************************************************************/
+#define JUNO_IRQ_DMA_SMMU		126
+#define JUNO_IRQ_HDLCD0_SMMU		128
+#define JUNO_IRQ_HDLCD1_SMMU		130
+#define JUNO_IRQ_USB_SMMU		132
+#define JUNO_IRQ_THIN_LINKS_SMMU	134
+#define JUNO_IRQ_SEC_I2C		137
+#define JUNO_IRQ_GPU_SMMU_1		73
+#define JUNO_IRQ_ETR_SMMU		75
+
+#endif /* __JUNO_DEF_H__ */
diff --git a/plat/arm/board/juno/juno_err.c b/plat/arm/board/juno/juno_err.c
new file mode 100644
index 0000000..4682895
--- /dev/null
+++ b/plat/arm/board/juno/juno_err.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <errno.h>
+#include <v2m_def.h>
+
+#define V2M_SYS_NVFLAGS_ADDR		(V2M_SYSREGS_BASE + V2M_SYS_NVFLAGS)
+
+/*
+ * Juno error handler
+ */
+void plat_error_handler(int err)
+{
+	uint32_t *flags_ptr = (uint32_t *)V2M_SYS_NVFLAGS_ADDR;
+
+	/* Propagate the err code in the NV-flags register */
+	*flags_ptr = err;
+
+	/* Loop until the watchdog resets the system */
+	for (;;)
+		wfi();
+}
diff --git a/plat/arm/board/juno/juno_security.c b/plat/arm/board/juno/juno_security.c
new file mode 100644
index 0000000..ce4239b
--- /dev/null
+++ b/plat/arm/board/juno/juno_security.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <mmio.h>
+#include <nic_400.h>
+#include <plat_arm.h>
+#include <soc_css.h>
+#include "juno_def.h"
+
+
+/*******************************************************************************
+ * Set up the MMU-401 SSD tables. The power-on configuration has all stream IDs
+ * assigned to Non-Secure except some for the DMA-330. Assign those back to the
+ * Non-Secure world as well, otherwise EL1 may end up erroneously generating
+ * (untranslated) Secure transactions if it turns the SMMU on.
+ ******************************************************************************/
+static void init_mmu401(void)
+{
+	uint32_t reg = mmio_read_32(MMU401_DMA330_BASE + MMU401_SSD_OFFSET);
+	reg |= 0x1FF;
+	mmio_write_32(MMU401_DMA330_BASE + MMU401_SSD_OFFSET, reg);
+}
+
+/*******************************************************************************
+ * Program CSS-NIC400 to allow non-secure access to some CSS regions.
+ ******************************************************************************/
+static void css_init_nic400(void)
+{
+	/* Note: This is the NIC-400 device on the CSS */
+	mmio_write_32(PLAT_SOC_CSS_NIC400_BASE +
+		NIC400_ADDR_CTRL_SECURITY_REG(CSS_NIC400_SLAVE_BOOTSECURE),
+		~0);
+}
+
+/*******************************************************************************
+ * Initialize debug configuration.
+ ******************************************************************************/
+static void init_debug_cfg(void)
+{
+#if !DEBUG
+	/* Set internal drive selection for SPIDEN. */
+	mmio_write_32(SSC_REG_BASE + SSC_DBGCFG_SET,
+		1U << SPIDEN_SEL_SET_SHIFT);
+
+	/* Drive SPIDEN LOW to disable invasive debug of secure state. */
+	mmio_write_32(SSC_REG_BASE + SSC_DBGCFG_CLR,
+		1U << SPIDEN_INT_CLR_SHIFT);
+#endif
+}
+
+/*******************************************************************************
+ * Initialize the secure environment.
+ ******************************************************************************/
+void plat_arm_security_setup(void)
+{
+	/* Initialize debug configuration */
+	init_debug_cfg();
+	/* Initialize the TrustZone Controller */
+	arm_tzc400_setup();
+	/* Do ARM CSS internal NIC setup */
+	css_init_nic400();
+	/* Do ARM CSS SoC security setup */
+	soc_css_security_setup();
+	/* Initialize the SMMU SSD tables */
+	init_mmu401();
+}
diff --git a/plat/arm/board/juno/juno_stack_protector.c b/plat/arm/board/juno/juno_stack_protector.c
new file mode 100644
index 0000000..ec0b1fb
--- /dev/null
+++ b/plat/arm/board/juno/juno_stack_protector.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <utils.h>
+#include "juno_decl.h"
+#include "juno_def.h"
+
+u_register_t plat_get_stack_protector_canary(void)
+{
+	u_register_t c[TRNG_NBYTES / sizeof(u_register_t)];
+	u_register_t ret = 0;
+	size_t i;
+
+	if (juno_getentropy(c, sizeof(c)) != 0) {
+		ERROR("Not enough entropy to initialize canary value\n");
+		panic();
+	}
+
+	/*
+	 * On Juno we get 128-bits of entropy in one round.
+	 * Fuse the values together to form the canary.
+	 */
+	for (i = 0; i < ARRAY_SIZE(c); i++)
+		ret ^= c[i];
+	return ret;
+}
diff --git a/plat/arm/board/juno/juno_topology.c b/plat/arm/board/juno/juno_topology.c
new file mode 100644
index 0000000..b9412b1
--- /dev/null
+++ b/plat/arm/board/juno/juno_topology.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_def.h>
+#include <plat_arm.h>
+#include "juno_def.h"
+
+/*
+ * On Juno, the system power level is the highest power level.
+ * The first entry in the power domain descriptor specifies the
+ * number of system power domains i.e. 1.
+ */
+#define JUNO_PWR_DOMAINS_AT_MAX_PWR_LVL	 ARM_SYSTEM_COUNT
+
+/*
+ * The Juno power domain tree descriptor. The cluster power domains
+ * are arranged so that when the PSCI generic code creates the power
+ * domain tree, the indices of the CPU power domain nodes it allocates
+ * match the linear indices returned by plat_core_pos_by_mpidr()
+ * i.e. CLUSTER1 CPUs are allocated indices from 0 to 3 and the higher
+ * indices for CLUSTER0 CPUs.
+ */
+const unsigned char juno_power_domain_tree_desc[] = {
+	/* No of root nodes */
+	JUNO_PWR_DOMAINS_AT_MAX_PWR_LVL,
+	/* No of children for the root node */
+	JUNO_CLUSTER_COUNT,
+	/* No of children for the first cluster node */
+	JUNO_CLUSTER1_CORE_COUNT,
+	/* No of children for the second cluster node */
+	JUNO_CLUSTER0_CORE_COUNT
+};
+
+/*******************************************************************************
+ * This function returns the Juno topology tree information.
+ ******************************************************************************/
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+	return juno_power_domain_tree_desc;
+}
+
+/*******************************************************************************
+ * This function returns the core count within the cluster corresponding to
+ * `mpidr`.
+ ******************************************************************************/
+unsigned int plat_arm_get_cluster_core_count(u_register_t mpidr)
+{
+	return (((mpidr) & 0x100) ? JUNO_CLUSTER1_CORE_COUNT :\
+				JUNO_CLUSTER0_CORE_COUNT);
+}
+
+/*
+ * The array mapping platform core position (implemented by plat_my_core_pos())
+ * to the SCMI power domain ID implemented by SCP.
+ */
+const uint32_t plat_css_core_pos_to_scmi_dmn_id_map[PLATFORM_CORE_COUNT] = {
+			2, 3, 4, 5, 0, 1 };
diff --git a/plat/arm/board/juno/juno_trng.c b/plat/arm/board/juno/juno_trng.c
new file mode 100644
index 0000000..124821b
--- /dev/null
+++ b/plat/arm/board/juno/juno_trng.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <mmio.h>
+#include <string.h>
+#include <utils_def.h>
+#include "juno_def.h"
+
+#define NSAMPLE_CLOCKS	1 /* min 1 cycle, max 231 cycles */
+#define NRETRIES	5
+
+static inline int output_valid(void)
+{
+	int i;
+
+	for (i = 0; i < NRETRIES; i++) {
+		uint32_t val;
+
+		val = mmio_read_32(TRNG_BASE + TRNG_STATUS);
+		if (val & 1U)
+			break;
+	}
+	if (i >= NRETRIES)
+		return 0; /* No output data available. */
+	return 1;
+}
+
+/*
+ * This function fills `buf` with `len` bytes of entropy.
+ * It uses the Trusted Entropy Source peripheral on Juno.
+ * Returns 0 when the buffer has been filled with entropy
+ * successfully and -1 otherwise.
+ */
+int juno_getentropy(void *buf, size_t len)
+{
+	uint8_t *bp = buf;
+
+	assert(buf);
+	assert(len);
+	assert(!check_uptr_overflow((uintptr_t)bp, len));
+
+	/* Disable interrupt mode. */
+	mmio_write_32(TRNG_BASE + TRNG_INTMASK, 0);
+	/* Program TRNG to sample for `NSAMPLE_CLOCKS`. */
+	mmio_write_32(TRNG_BASE + TRNG_CONFIG, NSAMPLE_CLOCKS);
+
+	while (len > 0) {
+		int i;
+
+		/* Start TRNG. */
+		mmio_write_32(TRNG_BASE + TRNG_CONTROL, 1);
+
+		/* Check if output is valid. */
+		if (!output_valid())
+			return -1;
+
+		/* Fill entropy buffer. */
+		for (i = 0; i < TRNG_NOUTPUTS; i++) {
+			size_t n;
+			uint32_t val;
+
+			val = mmio_read_32(TRNG_BASE + i * sizeof(uint32_t));
+			n = MIN(len, sizeof(uint32_t));
+			memcpy(bp, &val, n);
+			bp += n;
+			len -= n;
+			if (len == 0)
+				break;
+		}
+
+		/* Reset TRNG outputs. */
+		mmio_write_32(TRNG_BASE + TRNG_STATUS, 1);
+	}
+
+	return 0;
+}
diff --git a/plat/arm/board/juno/platform.mk b/plat/arm/board/juno/platform.mk
new file mode 100644
index 0000000..5cd125b
--- /dev/null
+++ b/plat/arm/board/juno/platform.mk
@@ -0,0 +1,93 @@
+#
+# Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+JUNO_GIC_SOURCES	:=	drivers/arm/gic/common/gic_common.c	\
+				drivers/arm/gic/v2/gicv2_main.c		\
+				drivers/arm/gic/v2/gicv2_helpers.c	\
+				plat/common/plat_gicv2.c		\
+				plat/arm/common/arm_gicv2.c
+
+JUNO_INTERCONNECT_SOURCES	:=	drivers/arm/cci/cci.c		\
+					plat/arm/common/arm_cci.c
+
+JUNO_SECURITY_SOURCES	:=	drivers/arm/tzc/tzc400.c		\
+				plat/arm/board/juno/juno_security.c	\
+				plat/arm/board/juno/juno_trng.c		\
+				plat/arm/common/arm_tzc400.c
+
+ifneq (${ENABLE_STACK_PROTECTOR}, 0)
+JUNO_SECURITY_SOURCES	+=	plat/arm/board/juno/juno_stack_protector.c
+endif
+
+PLAT_INCLUDES		:=	-Iplat/arm/board/juno/include
+
+PLAT_BL_COMMON_SOURCES	:=	plat/arm/board/juno/${ARCH}/juno_helpers.S
+
+# Flag to enable support for AArch32 state on JUNO
+JUNO_AARCH32_EL3_RUNTIME	:=	0
+$(eval $(call assert_boolean,JUNO_AARCH32_EL3_RUNTIME))
+$(eval $(call add_define,JUNO_AARCH32_EL3_RUNTIME))
+
+ifeq (${ARCH},aarch64)
+BL1_SOURCES		+=	lib/cpus/aarch64/cortex_a53.S		\
+				lib/cpus/aarch64/cortex_a57.S		\
+				lib/cpus/aarch64/cortex_a72.S		\
+				plat/arm/board/juno/juno_bl1_setup.c	\
+				plat/arm/board/juno/juno_err.c		\
+				${JUNO_INTERCONNECT_SOURCES}		\
+				${JUNO_SECURITY_SOURCES}
+
+BL2_SOURCES		+=	plat/arm/board/juno/juno_err.c		\
+				plat/arm/board/juno/juno_bl2_setup.c	\
+				${JUNO_SECURITY_SOURCES}
+
+BL2U_SOURCES		+=	${JUNO_SECURITY_SOURCES}
+
+BL31_SOURCES		+=	lib/cpus/aarch64/cortex_a53.S		\
+				lib/cpus/aarch64/cortex_a57.S		\
+				lib/cpus/aarch64/cortex_a72.S		\
+				plat/arm/board/juno/juno_topology.c	\
+				${JUNO_GIC_SOURCES}			\
+				${JUNO_INTERCONNECT_SOURCES}		\
+				${JUNO_SECURITY_SOURCES}
+endif
+
+# Errata workarounds for Cortex-A53:
+ERRATA_A53_826319		:=	1
+ERRATA_A53_835769		:=	1
+ERRATA_A53_836870		:=	1
+ERRATA_A53_843419		:=	1
+ERRATA_A53_855873		:=	1
+
+# Errata workarounds for Cortex-A57:
+ERRATA_A57_806969		:=	0
+ERRATA_A57_813419		:=	1
+ERRATA_A57_813420		:=	1
+ERRATA_A57_826974		:=	1
+ERRATA_A57_826977		:=	1
+ERRATA_A57_828024		:=	1
+ERRATA_A57_829520		:=	1
+ERRATA_A57_833471		:=	1
+ERRATA_A57_859972		:=	0
+
+# Errata workarounds for Cortex-A72:
+ERRATA_A72_859971		:=	0
+
+# Enable option to skip L1 data cache flush during the Cortex-A57 cluster
+# power down sequence
+SKIP_A57_L1_FLUSH_PWR_DWN	:=	 1
+
+# Disable the PSCI platform compatibility layer
+ENABLE_PLAT_COMPAT		:= 	0
+
+# Enable memory map related constants optimisation
+ARM_BOARD_OPTIMISE_MEM		:=	1
+
+include plat/arm/board/common/board_css.mk
+include plat/arm/common/arm_common.mk
+include plat/arm/soc/common/soc_css.mk
+include plat/arm/css/common/css_common.mk
+
diff --git a/plat/arm/board/juno/sp_min/sp_min-juno.mk b/plat/arm/board/juno/sp_min/sp_min-juno.mk
new file mode 100644
index 0000000..cd1f497
--- /dev/null
+++ b/plat/arm/board/juno/sp_min/sp_min-juno.mk
@@ -0,0 +1,21 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# SP_MIN source files specific to JUNO platform
+BL32_SOURCES	+=	lib/cpus/aarch32/cortex_a53.S		\
+			lib/cpus/aarch32/cortex_a57.S		\
+			lib/cpus/aarch32/cortex_a72.S		\
+			lib/utils/mem_region.c			\
+			plat/arm/board/common/drivers/norflash/norflash.c	\
+			plat/arm/board/juno/juno_topology.c	\
+			plat/arm/common/arm_nor_psci_mem_protect.c	\
+			plat/arm/soc/common/soc_css_security.c	\
+			${JUNO_GIC_SOURCES}			\
+			${JUNO_INTERCONNECT_SOURCES}		\
+			${JUNO_SECURITY_SOURCES}
+
+include plat/arm/common/sp_min/arm_sp_min.mk
+include plat/arm/css/common/sp_min/css_sp_min.mk
diff --git a/plat/arm/board/juno/tsp/tsp-juno.mk b/plat/arm/board/juno/tsp/tsp-juno.mk
new file mode 100644
index 0000000..52461cf
--- /dev/null
+++ b/plat/arm/board/juno/tsp/tsp-juno.mk
@@ -0,0 +1,11 @@
+#
+# Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+BL32_SOURCES		+=	plat/arm/board/juno/juno_topology.c	\
+				plat/arm/css/common/css_topology.c	\
+				${JUNO_GIC_SOURCES}
+
+include plat/arm/common/tsp/arm_tsp.mk
diff --git a/plat/arm/common/aarch32/arm_bl2_mem_params_desc.c b/plat/arm/common/aarch32/arm_bl2_mem_params_desc.c
new file mode 100644
index 0000000..7fd42aa
--- /dev/null
+++ b/plat/arm/common/aarch32/arm_bl2_mem_params_desc.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <bl_common.h>
+#include <desc_image_load.h>
+#include <platform.h>
+#include <platform_def.h>
+
+
+/*******************************************************************************
+ * Following descriptor provides BL image/ep information that gets used
+ * by BL2 to load the images and also subset of this information is
+ * passed to next BL image. The image loading sequence is managed by
+ * populating the images in required loading order. The image execution
+ * sequence is managed by populating the `next_handoff_image_id` with
+ * the next executable image id.
+ ******************************************************************************/
+static bl_mem_params_node_t bl2_mem_params_descs[] = {
+#ifdef SCP_BL2_BASE
+	/* Fill SCP_BL2 related information if it exists */
+    {
+	    .image_id = SCP_BL2_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY,
+		    VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
+		    VERSION_2, image_info_t, 0),
+	    .image_info.image_base = SCP_BL2_BASE,
+	    .image_info.image_max_size = PLAT_CSS_MAX_SCP_BL2_SIZE,
+
+	    .next_handoff_image_id = INVALID_IMAGE_ID,
+    },
+#endif /* SCP_BL2_BASE */
+
+	/* Fill BL32 related information */
+    {
+	    .image_id = BL32_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t,
+		    SECURE | EXECUTABLE | EP_FIRST_EXE),
+	    .ep_info.pc = BL32_BASE,
+	    .ep_info.spsr = SPSR_MODE32(MODE32_mon, SPSR_T_ARM,
+		    SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS),
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, IMAGE_ATTRIB_PLAT_SETUP),
+	    .image_info.image_base = BL32_BASE,
+	    .image_info.image_max_size = BL32_LIMIT - BL32_BASE,
+
+	    .next_handoff_image_id = BL33_IMAGE_ID,
+    },
+
+	/* Fill BL33 related information */
+    {
+	    .image_id = BL33_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t, NON_SECURE | EXECUTABLE),
+#ifdef PRELOADED_BL33_BASE
+	    .ep_info.pc = PRELOADED_BL33_BASE,
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+#else
+	    .ep_info.pc = PLAT_ARM_NS_IMAGE_OFFSET,
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, 0),
+	    .image_info.image_base = PLAT_ARM_NS_IMAGE_OFFSET,
+	    .image_info.image_max_size = ARM_DRAM1_SIZE,
+#endif /* PRELOADED_BL33_BASE */
+
+	    .next_handoff_image_id = INVALID_IMAGE_ID,
+    }
+};
+
+REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs)
diff --git a/plat/arm/common/aarch32/arm_helpers.S b/plat/arm/common/aarch32/arm_helpers.S
new file mode 100644
index 0000000..f56b215
--- /dev/null
+++ b/plat/arm/common/aarch32/arm_helpers.S
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <asm_macros.S>
+#include <platform_def.h>
+
+	.weak	plat_arm_calc_core_pos
+	.weak	plat_my_core_pos
+	.globl	plat_crash_console_init
+	.globl	plat_crash_console_putc
+	.globl	plat_crash_console_flush
+
+	/* -----------------------------------------------------
+	 *  unsigned int plat_my_core_pos(void)
+	 *  This function uses the plat_arm_calc_core_pos()
+	 *  definition to get the index of the calling CPU.
+	 * -----------------------------------------------------
+	 */
+func plat_my_core_pos
+	ldcopr	r0, MPIDR
+	b	plat_arm_calc_core_pos
+endfunc plat_my_core_pos
+
+	/* -----------------------------------------------------
+	 *  unsigned int plat_arm_calc_core_pos(uint64_t mpidr)
+	 *  Helper function to calculate the core position.
+	 *  With this function: CorePos = (ClusterId * 4) +
+	 *  				  CoreId
+	 * -----------------------------------------------------
+	 */
+func plat_arm_calc_core_pos
+	and	r1, r0, #MPIDR_CPU_MASK
+	and	r0, r0, #MPIDR_CLUSTER_MASK
+	add	r0, r1, r0, LSR #6
+	bx	lr
+endfunc plat_arm_calc_core_pos
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_init(void)
+	 * Function to initialize the crash console
+	 * without a C Runtime to print crash report.
+	 * Clobber list : r0 - r3
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_init
+	ldr	r0, =PLAT_ARM_CRASH_UART_BASE
+	ldr	r1, =PLAT_ARM_CRASH_UART_CLK_IN_HZ
+	ldr	r2, =ARM_CONSOLE_BAUDRATE
+	b	console_core_init
+endfunc plat_crash_console_init
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_putc(int c)
+	 * Function to print a character on the crash
+	 * console without a C Runtime.
+	 * Clobber list : r1 - r2
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_putc
+	ldr	r1, =PLAT_ARM_CRASH_UART_BASE
+	b	console_core_putc
+endfunc plat_crash_console_putc
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_flush()
+	 * Function to force a write of all buffered
+	 * data that hasn't been output.
+	 * Out : return -1 on error else return 0.
+	 * Clobber list : r0 - r1
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_flush
+	ldr	r1, =PLAT_ARM_CRASH_UART_BASE
+	b	console_core_flush
+endfunc plat_crash_console_flush
diff --git a/plat/arm/common/aarch64/arm_bl2_mem_params_desc.c b/plat/arm/common/aarch64/arm_bl2_mem_params_desc.c
new file mode 100644
index 0000000..4376119
--- /dev/null
+++ b/plat/arm/common/aarch64/arm_bl2_mem_params_desc.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <bl_common.h>
+#include <desc_image_load.h>
+#include <platform.h>
+#include <platform_def.h>
+
+
+/*******************************************************************************
+ * Following descriptor provides BL image/ep information that gets used
+ * by BL2 to load the images and also subset of this information is
+ * passed to next BL image. The image loading sequence is managed by
+ * populating the images in required loading order. The image execution
+ * sequence is managed by populating the `next_handoff_image_id` with
+ * the next executable image id.
+ ******************************************************************************/
+static bl_mem_params_node_t bl2_mem_params_descs[] = {
+#ifdef SCP_BL2_BASE
+	/* Fill SCP_BL2 related information if it exists */
+    {
+	    .image_id = SCP_BL2_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY,
+		    VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
+		    VERSION_2, image_info_t, 0),
+	    .image_info.image_base = SCP_BL2_BASE,
+	    .image_info.image_max_size = PLAT_CSS_MAX_SCP_BL2_SIZE,
+
+	    .next_handoff_image_id = INVALID_IMAGE_ID,
+    },
+#endif /* SCP_BL2_BASE */
+
+#ifdef EL3_PAYLOAD_BASE
+	/* Fill EL3 payload related information (BL31 is EL3 payload)*/
+    {
+	    .image_id = BL31_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t,
+		    SECURE | EXECUTABLE | EP_FIRST_EXE),
+	    .ep_info.pc = EL3_PAYLOAD_BASE,
+	    .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+		    DISABLE_ALL_EXCEPTIONS),
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t,
+		    IMAGE_ATTRIB_PLAT_SETUP | IMAGE_ATTRIB_SKIP_LOADING),
+
+	    .next_handoff_image_id = INVALID_IMAGE_ID,
+    },
+
+#else /* EL3_PAYLOAD_BASE */
+
+	/* Fill BL31 related information */
+    {
+	    .image_id = BL31_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t,
+		    SECURE | EXECUTABLE | EP_FIRST_EXE),
+	    .ep_info.pc = BL31_BASE,
+	    .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+		    DISABLE_ALL_EXCEPTIONS),
+#if DEBUG
+	    .ep_info.args.arg1 = ARM_BL31_PLAT_PARAM_VAL,
+#endif
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, IMAGE_ATTRIB_PLAT_SETUP),
+	    .image_info.image_base = BL31_BASE,
+	    .image_info.image_max_size = BL31_LIMIT - BL31_BASE,
+
+# ifdef BL32_BASE
+	    .next_handoff_image_id = BL32_IMAGE_ID,
+# else
+	    .next_handoff_image_id = BL33_IMAGE_ID,
+# endif
+    },
+
+# ifdef BL32_BASE
+	/* Fill BL32 related information */
+    {
+	    .image_id = BL32_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t, SECURE | EXECUTABLE),
+	    .ep_info.pc = BL32_BASE,
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, 0),
+	    .image_info.image_base = BL32_BASE,
+	    .image_info.image_max_size = BL32_LIMIT - BL32_BASE,
+
+	    .next_handoff_image_id = BL33_IMAGE_ID,
+    },
+
+	/*
+	 * Fill BL32 external 1 related information.
+	 * A typical use for extra1 image is with OP-TEE where it is the pager image.
+	 */
+    {
+	    .image_id = BL32_EXTRA1_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+	    .image_info.image_base = BL32_BASE,
+	    .image_info.image_max_size = BL32_LIMIT - BL32_BASE,
+
+	    .next_handoff_image_id = INVALID_IMAGE_ID,
+    },
+
+	/*
+	 * Fill BL32 external 2 related information.
+	 * A typical use for extra2 image is with OP-TEE where it is the paged image.
+	 */
+    {
+	    .image_id = BL32_EXTRA2_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+#ifdef SPD_opteed
+	    .image_info.image_base = ARM_OPTEE_PAGEABLE_LOAD_BASE,
+	    .image_info.image_max_size = ARM_OPTEE_PAGEABLE_LOAD_SIZE,
+#endif
+	    .next_handoff_image_id = INVALID_IMAGE_ID,
+    },
+# endif /* BL32_BASE */
+
+	/* Fill BL33 related information */
+    {
+	    .image_id = BL33_IMAGE_ID,
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t, NON_SECURE | EXECUTABLE),
+# ifdef PRELOADED_BL33_BASE
+	    .ep_info.pc = PRELOADED_BL33_BASE,
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+# else
+	    .ep_info.pc = PLAT_ARM_NS_IMAGE_OFFSET,
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, 0),
+	    .image_info.image_base = PLAT_ARM_NS_IMAGE_OFFSET,
+	    .image_info.image_max_size = ARM_DRAM1_SIZE,
+# endif /* PRELOADED_BL33_BASE */
+
+	    .next_handoff_image_id = INVALID_IMAGE_ID,
+    }
+#endif /* EL3_PAYLOAD_BASE */
+};
+
+REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs)
diff --git a/plat/arm/common/aarch64/arm_helpers.S b/plat/arm/common/aarch64/arm_helpers.S
new file mode 100644
index 0000000..b53e60d
--- /dev/null
+++ b/plat/arm/common/aarch64/arm_helpers.S
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <asm_macros.S>
+#include <platform_def.h>
+
+	.weak	plat_arm_calc_core_pos
+	.weak	plat_my_core_pos
+	.globl	plat_crash_console_init
+	.globl	plat_crash_console_putc
+	.globl	plat_crash_console_flush
+	.globl	platform_mem_init
+	.globl	arm_disable_spe
+
+
+	/* -----------------------------------------------------
+	 *  unsigned int plat_my_core_pos(void)
+	 *  This function uses the plat_arm_calc_core_pos()
+	 *  definition to get the index of the calling CPU.
+	 * -----------------------------------------------------
+	 */
+func plat_my_core_pos
+	mrs	x0, mpidr_el1
+	b	plat_arm_calc_core_pos
+endfunc plat_my_core_pos
+
+	/* -----------------------------------------------------
+	 *  unsigned int plat_arm_calc_core_pos(u_register_t mpidr)
+	 *  Helper function to calculate the core position.
+	 *  With this function: CorePos = (ClusterId * 4) +
+	 *  				  CoreId
+	 * -----------------------------------------------------
+	 */
+func plat_arm_calc_core_pos
+	and	x1, x0, #MPIDR_CPU_MASK
+	and	x0, x0, #MPIDR_CLUSTER_MASK
+	add	x0, x1, x0, LSR #6
+	ret
+endfunc plat_arm_calc_core_pos
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_init(void)
+	 * Function to initialize the crash console
+	 * without a C Runtime to print crash report.
+	 * Clobber list : x0 - x4
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_init
+	mov_imm	x0, PLAT_ARM_CRASH_UART_BASE
+	mov_imm	x1, PLAT_ARM_CRASH_UART_CLK_IN_HZ
+	mov_imm	x2, ARM_CONSOLE_BAUDRATE
+	b	console_core_init
+endfunc plat_crash_console_init
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_putc(int c)
+	 * Function to print a character on the crash
+	 * console without a C Runtime.
+	 * Clobber list : x1, x2
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_putc
+	mov_imm	x1, PLAT_ARM_CRASH_UART_BASE
+	b	console_core_putc
+endfunc plat_crash_console_putc
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_flush()
+	 * Function to force a write of all buffered
+	 * data that hasn't been output.
+	 * Out : return -1 on error else return 0.
+	 * Clobber list : r0 - r1
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_flush
+	mov_imm	x1, PLAT_ARM_CRASH_UART_BASE
+	b	console_core_flush
+endfunc plat_crash_console_flush
+
+	/* ---------------------------------------------------------------------
+	 * We don't need to carry out any memory initialization on ARM
+	 * platforms. The Secure RAM is accessible straight away.
+	 * ---------------------------------------------------------------------
+	 */
+func platform_mem_init
+	ret
+endfunc platform_mem_init
+
+	/* -----------------------------------------------------
+	 * void arm_disable_spe (void);
+	 * -----------------------------------------------------
+	 */
+#if ENABLE_SPE_FOR_LOWER_ELS
+func arm_disable_spe
+	/* Detect if SPE is implemented */
+	mrs	x0, id_aa64dfr0_el1
+	ubfx	x0, x0, #ID_AA64DFR0_PMS_SHIFT, #ID_AA64DFR0_PMS_LENGTH
+	cmp	x0, #0x1
+	b.ne	1f
+
+	/* Drain buffered data */
+	.arch	armv8.2-a+profile
+	psb	csync
+	dsb	nsh
+
+	/* Disable Profiling Buffer */
+	mrs	x0, pmblimitr_el1
+	bic	x0, x0, #1
+	msr	pmblimitr_el1, x0
+	isb
+	.arch	armv8-a
+1:
+	ret
+endfunc arm_disable_spe
+#endif
+
+/*
+ * Need to use coherent stack when ARM Cryptocell is used to autheticate images
+ * since Cryptocell uses DMA to transfer data and it is not coherent with the
+ * AP CPU.
+ */
+#if ARM_CRYPTOCELL_INTEG
+#if defined(IMAGE_BL1) || defined(IMAGE_BL2)
+	.globl	plat_get_my_stack
+	.globl	plat_set_my_stack
+	.local	platform_coherent_stacks
+
+	/* -------------------------------------------------------
+	 * uintptr_t plat_get_my_stack ()
+	 *
+	 * For cold-boot BL images, only the primary CPU needs a
+	 * stack. This function returns the stack pointer for a
+	 * stack allocated in coherent memory.
+	 * -------------------------------------------------------
+	 */
+func plat_get_my_stack
+	get_up_stack platform_coherent_stacks, PLATFORM_STACK_SIZE
+	ret
+endfunc plat_get_my_stack
+
+	/* -------------------------------------------------------
+	 * void plat_set_my_stack ()
+	 *
+	 * For cold-boot BL images, only the primary CPU needs a
+	 * stack. This function sets the stack pointer to a stack
+	 * allocated in coherent memory.
+	 * -------------------------------------------------------
+	 */
+func plat_set_my_stack
+	get_up_stack platform_coherent_stacks, PLATFORM_STACK_SIZE
+	mov sp, x0
+	ret
+endfunc plat_set_my_stack
+
+	/* ----------------------------------------------------
+	 * Single cpu stack in coherent memory.
+	 * ----------------------------------------------------
+	 */
+declare_stack platform_coherent_stacks, tzfw_coherent_mem, \
+		PLATFORM_STACK_SIZE, 1, CACHE_WRITEBACK_GRANULE
+
+#endif	/* defined(IMAGE_BL1) || defined(IMAGE_BL2) */
+#endif	/* ARM_CRYPTOCELL_INTEG */
diff --git a/plat/arm/common/arm_bl1_fwu.c b/plat/arm/common/arm_bl1_fwu.c
new file mode 100644
index 0000000..1305934
--- /dev/null
+++ b/plat/arm/common/arm_bl1_fwu.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <bl_common.h>
+#include <debug.h>
+#include <errno.h>
+#include <plat_arm.h>
+#include <platform_def.h>
+#include <tbbr_img_desc.h>
+#include <utils.h>
+
+/* Struct to keep track of usable memory */
+typedef struct bl1_mem_info {
+	uintptr_t mem_base;
+	unsigned int mem_size;
+} bl1_mem_info_t;
+
+bl1_mem_info_t fwu_addr_map_secure[] = {
+	{
+		.mem_base = ARM_SHARED_RAM_BASE,
+		.mem_size = ARM_SHARED_RAM_SIZE
+	},
+	{
+		.mem_size = 0
+	}
+};
+
+bl1_mem_info_t fwu_addr_map_non_secure[] = {
+	{
+		.mem_base = ARM_NS_DRAM1_BASE,
+		.mem_size = ARM_NS_DRAM1_SIZE
+	},
+	{
+		.mem_base = PLAT_ARM_NVM_BASE,
+		.mem_size = PLAT_ARM_NVM_SIZE
+	},
+	{
+		.mem_size = 0
+	}
+};
+
+int bl1_plat_mem_check(uintptr_t mem_base,
+		unsigned int mem_size,
+		unsigned int flags)
+{
+	unsigned int index = 0;
+	bl1_mem_info_t *mmap;
+
+	assert(mem_base);
+	assert(mem_size);
+	/*
+	 * The caller of this function is responsible for checking upfront that
+	 * the end address doesn't overflow. We double-check this in debug
+	 * builds.
+	 */
+	assert(!check_uptr_overflow(mem_base, mem_size - 1));
+
+	/*
+	 * Check the given image source and size.
+	 */
+	if (GET_SECURITY_STATE(flags) == SECURE)
+		mmap = fwu_addr_map_secure;
+	else
+		mmap = fwu_addr_map_non_secure;
+
+	while (mmap[index].mem_size) {
+		if ((mem_base >= mmap[index].mem_base) &&
+			((mem_base + mem_size)
+			<= (mmap[index].mem_base +
+			mmap[index].mem_size)))
+			return 0;
+
+		index++;
+	}
+
+	return -ENOMEM;
+}
+
+/*******************************************************************************
+ * This function does linear search for image_id and returns image_desc.
+ ******************************************************************************/
+image_desc_t *bl1_plat_get_image_desc(unsigned int image_id)
+{
+	unsigned int index = 0;
+
+	while (bl1_tbbr_image_descs[index].image_id != INVALID_IMAGE_ID) {
+		if (bl1_tbbr_image_descs[index].image_id == image_id)
+			return &bl1_tbbr_image_descs[index];
+		index++;
+	}
+
+	return NULL;
+}
diff --git a/plat/arm/common/arm_bl1_setup.c b/plat/arm/common/arm_bl1_setup.c
new file mode 100644
index 0000000..6860e36
--- /dev/null
+++ b/plat/arm/common/arm_bl1_setup.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arm_def.h>
+#include <arm_xlat_tables.h>
+#include <bl_common.h>
+#include <console.h>
+#include <plat_arm.h>
+#include <platform_def.h>
+#include <sp805.h>
+#include <utils.h>
+#include "../../../bl1/bl1_private.h"
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak bl1_early_platform_setup
+#pragma weak bl1_plat_arch_setup
+#pragma weak bl1_platform_setup
+#pragma weak bl1_plat_sec_mem_layout
+#pragma weak bl1_plat_prepare_exit
+
+
+/* Data structure which holds the extents of the trusted SRAM for BL1*/
+static meminfo_t bl1_tzram_layout;
+
+meminfo_t *bl1_plat_sec_mem_layout(void)
+{
+	return &bl1_tzram_layout;
+}
+
+/*******************************************************************************
+ * BL1 specific platform actions shared between ARM standard platforms.
+ ******************************************************************************/
+void arm_bl1_early_platform_setup(void)
+{
+
+#if !ARM_DISABLE_TRUSTED_WDOG
+	/* Enable watchdog */
+	sp805_start(ARM_SP805_TWDG_BASE, ARM_TWDG_LOAD_VAL);
+#endif
+
+	/* Initialize the console to provide early debug support */
+	console_init(PLAT_ARM_BOOT_UART_BASE, PLAT_ARM_BOOT_UART_CLK_IN_HZ,
+			ARM_CONSOLE_BAUDRATE);
+
+	/* Allow BL1 to see the whole Trusted RAM */
+	bl1_tzram_layout.total_base = ARM_BL_RAM_BASE;
+	bl1_tzram_layout.total_size = ARM_BL_RAM_SIZE;
+
+#if !LOAD_IMAGE_V2
+	/* Calculate how much RAM BL1 is using and how much remains free */
+	bl1_tzram_layout.free_base = ARM_BL_RAM_BASE;
+	bl1_tzram_layout.free_size = ARM_BL_RAM_SIZE;
+	reserve_mem(&bl1_tzram_layout.free_base,
+		    &bl1_tzram_layout.free_size,
+		    BL1_RAM_BASE,
+		    BL1_RAM_LIMIT - BL1_RAM_BASE);
+#endif /* LOAD_IMAGE_V2 */
+}
+
+void bl1_early_platform_setup(void)
+{
+	arm_bl1_early_platform_setup();
+
+	/*
+	 * Initialize Interconnect for this cluster during cold boot.
+	 * No need for locks as no other CPU is active.
+	 */
+	plat_arm_interconnect_init();
+	/*
+	 * Enable Interconnect coherency for the primary CPU's cluster.
+	 */
+	plat_arm_interconnect_enter_coherency();
+}
+
+/******************************************************************************
+ * Perform the very early platform specific architecture setup shared between
+ * ARM standard platforms. This only does basic initialization. Later
+ * architectural setup (bl1_arch_setup()) does not do anything platform
+ * specific.
+ *****************************************************************************/
+void arm_bl1_plat_arch_setup(void)
+{
+	arm_setup_page_tables(bl1_tzram_layout.total_base,
+			      bl1_tzram_layout.total_size,
+			      BL_CODE_BASE,
+			      BL1_CODE_END,
+			      BL1_RO_DATA_BASE,
+			      BL1_RO_DATA_END
+#if USE_COHERENT_MEM
+			      , BL_COHERENT_RAM_BASE,
+			      BL_COHERENT_RAM_END
+#endif
+			     );
+#ifdef AARCH32
+	enable_mmu_secure(0);
+#else
+	enable_mmu_el3(0);
+#endif /* AARCH32 */
+}
+
+void bl1_plat_arch_setup(void)
+{
+	arm_bl1_plat_arch_setup();
+}
+
+/*
+ * Perform the platform specific architecture setup shared between
+ * ARM standard platforms.
+ */
+void arm_bl1_platform_setup(void)
+{
+	/* Initialise the IO layer and register platform IO devices */
+	plat_arm_io_setup();
+}
+
+void bl1_platform_setup(void)
+{
+	arm_bl1_platform_setup();
+}
+
+void bl1_plat_prepare_exit(entry_point_info_t *ep_info)
+{
+#if !ARM_DISABLE_TRUSTED_WDOG
+	/* Disable watchdog before leaving BL1 */
+	sp805_stop(ARM_SP805_TWDG_BASE);
+#endif
+
+#ifdef EL3_PAYLOAD_BASE
+	/*
+	 * Program the EL3 payload's entry point address into the CPUs mailbox
+	 * in order to release secondary CPUs from their holding pen and make
+	 * them jump there.
+	 */
+	arm_program_trusted_mailbox(ep_info->pc);
+	dsbsy();
+	sev();
+#endif
+}
diff --git a/plat/arm/common/arm_bl2_setup.c b/plat/arm/common/arm_bl2_setup.c
new file mode 100644
index 0000000..5d83118
--- /dev/null
+++ b/plat/arm/common/arm_bl2_setup.c
@@ -0,0 +1,379 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_def.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <debug.h>
+#include <desc_image_load.h>
+#include <generic_delay_timer.h>
+#ifdef SPD_opteed
+#include <optee_utils.h>
+#endif
+#include <plat_arm.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <string.h>
+#include <utils.h>
+
+/* Data structure which holds the extents of the trusted SRAM for BL2 */
+static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak bl2_early_platform_setup
+#pragma weak bl2_platform_setup
+#pragma weak bl2_plat_arch_setup
+#pragma weak bl2_plat_sec_mem_layout
+
+#if LOAD_IMAGE_V2
+
+#pragma weak bl2_plat_handle_post_image_load
+
+#else /* LOAD_IMAGE_V2 */
+
+/*******************************************************************************
+ * This structure represents the superset of information that is passed to
+ * BL31, e.g. while passing control to it from BL2, bl31_params
+ * and other platform specific params
+ ******************************************************************************/
+typedef struct bl2_to_bl31_params_mem {
+	bl31_params_t bl31_params;
+	image_info_t bl31_image_info;
+	image_info_t bl32_image_info;
+	image_info_t bl33_image_info;
+	entry_point_info_t bl33_ep_info;
+	entry_point_info_t bl32_ep_info;
+	entry_point_info_t bl31_ep_info;
+} bl2_to_bl31_params_mem_t;
+
+
+static bl2_to_bl31_params_mem_t bl31_params_mem;
+
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak bl2_plat_get_bl31_params
+#pragma weak bl2_plat_get_bl31_ep_info
+#pragma weak bl2_plat_flush_bl31_params
+#pragma weak bl2_plat_set_bl31_ep_info
+#pragma weak bl2_plat_get_scp_bl2_meminfo
+#pragma weak bl2_plat_get_bl32_meminfo
+#pragma weak bl2_plat_set_bl32_ep_info
+#pragma weak bl2_plat_get_bl33_meminfo
+#pragma weak bl2_plat_set_bl33_ep_info
+
+#if ARM_BL31_IN_DRAM
+meminfo_t *bl2_plat_sec_mem_layout(void)
+{
+	static meminfo_t bl2_dram_layout
+		__aligned(CACHE_WRITEBACK_GRANULE) = {
+		.total_base = BL31_BASE,
+		.total_size = (ARM_AP_TZC_DRAM1_BASE +
+				ARM_AP_TZC_DRAM1_SIZE) - BL31_BASE,
+		.free_base = BL31_BASE,
+		.free_size = (ARM_AP_TZC_DRAM1_BASE +
+				ARM_AP_TZC_DRAM1_SIZE) - BL31_BASE
+	};
+
+	return &bl2_dram_layout;
+}
+#else
+meminfo_t *bl2_plat_sec_mem_layout(void)
+{
+	return &bl2_tzram_layout;
+}
+#endif /* ARM_BL31_IN_DRAM */
+
+/*******************************************************************************
+ * This function assigns a pointer to the memory that the platform has kept
+ * aside to pass platform specific and trusted firmware related information
+ * to BL31. This memory is allocated by allocating memory to
+ * bl2_to_bl31_params_mem_t structure which is a superset of all the
+ * structure whose information is passed to BL31
+ * NOTE: This function should be called only once and should be done
+ * before generating params to BL31
+ ******************************************************************************/
+bl31_params_t *bl2_plat_get_bl31_params(void)
+{
+	bl31_params_t *bl2_to_bl31_params;
+
+	/*
+	 * Initialise the memory for all the arguments that needs to
+	 * be passed to BL31
+	 */
+	zeromem(&bl31_params_mem, sizeof(bl2_to_bl31_params_mem_t));
+
+	/* Assign memory for TF related information */
+	bl2_to_bl31_params = &bl31_params_mem.bl31_params;
+	SET_PARAM_HEAD(bl2_to_bl31_params, PARAM_BL31, VERSION_1, 0);
+
+	/* Fill BL31 related information */
+	bl2_to_bl31_params->bl31_image_info = &bl31_params_mem.bl31_image_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl31_image_info, PARAM_IMAGE_BINARY,
+		VERSION_1, 0);
+
+	/* Fill BL32 related information if it exists */
+#ifdef BL32_BASE
+	bl2_to_bl31_params->bl32_ep_info = &bl31_params_mem.bl32_ep_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl32_ep_info, PARAM_EP,
+		VERSION_1, 0);
+	bl2_to_bl31_params->bl32_image_info = &bl31_params_mem.bl32_image_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl32_image_info, PARAM_IMAGE_BINARY,
+		VERSION_1, 0);
+#endif /* BL32_BASE */
+
+	/* Fill BL33 related information */
+	bl2_to_bl31_params->bl33_ep_info = &bl31_params_mem.bl33_ep_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl33_ep_info,
+		PARAM_EP, VERSION_1, 0);
+
+	/* BL33 expects to receive the primary CPU MPID (through x0) */
+	bl2_to_bl31_params->bl33_ep_info->args.arg0 = 0xffff & read_mpidr();
+
+	bl2_to_bl31_params->bl33_image_info = &bl31_params_mem.bl33_image_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl33_image_info, PARAM_IMAGE_BINARY,
+		VERSION_1, 0);
+
+	return bl2_to_bl31_params;
+}
+
+/* Flush the TF params and the TF plat params */
+void bl2_plat_flush_bl31_params(void)
+{
+	flush_dcache_range((unsigned long)&bl31_params_mem,
+			sizeof(bl2_to_bl31_params_mem_t));
+}
+
+/*******************************************************************************
+ * This function returns a pointer to the shared memory that the platform
+ * has kept to point to entry point information of BL31 to BL2
+ ******************************************************************************/
+struct entry_point_info *bl2_plat_get_bl31_ep_info(void)
+{
+#if DEBUG
+	bl31_params_mem.bl31_ep_info.args.arg1 = ARM_BL31_PLAT_PARAM_VAL;
+#endif
+
+	return &bl31_params_mem.bl31_ep_info;
+}
+#endif /* LOAD_IMAGE_V2 */
+
+/*******************************************************************************
+ * BL1 has passed the extents of the trusted SRAM that should be visible to BL2
+ * in x0. This memory layout is sitting at the base of the free trusted SRAM.
+ * Copy it to a safe location before its reclaimed by later BL2 functionality.
+ ******************************************************************************/
+void arm_bl2_early_platform_setup(meminfo_t *mem_layout)
+{
+	/* Initialize the console to provide early debug support */
+	console_init(PLAT_ARM_BOOT_UART_BASE, PLAT_ARM_BOOT_UART_CLK_IN_HZ,
+			ARM_CONSOLE_BAUDRATE);
+
+	/* Setup the BL2 memory layout */
+	bl2_tzram_layout = *mem_layout;
+
+	/* Initialise the IO layer and register platform IO devices */
+	plat_arm_io_setup();
+}
+
+void bl2_early_platform_setup(meminfo_t *mem_layout)
+{
+	arm_bl2_early_platform_setup(mem_layout);
+	generic_delay_timer_init();
+}
+
+/*
+ * Perform ARM standard platform setup.
+ */
+void arm_bl2_platform_setup(void)
+{
+	/* Initialize the secure environment */
+	plat_arm_security_setup();
+
+#if defined(PLAT_ARM_MEM_PROT_ADDR)
+	arm_nor_psci_do_mem_protect();
+#endif
+}
+
+void bl2_platform_setup(void)
+{
+	arm_bl2_platform_setup();
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only initializes the mmu in a quick and dirty way.
+ ******************************************************************************/
+void arm_bl2_plat_arch_setup(void)
+{
+	arm_setup_page_tables(bl2_tzram_layout.total_base,
+			      bl2_tzram_layout.total_size,
+			      BL_CODE_BASE,
+			      BL_CODE_END,
+			      BL_RO_DATA_BASE,
+			      BL_RO_DATA_END
+#if USE_COHERENT_MEM
+			      , BL_COHERENT_RAM_BASE,
+			      BL_COHERENT_RAM_END
+#endif
+			      );
+
+#ifdef AARCH32
+	enable_mmu_secure(0);
+#else
+	enable_mmu_el1(0);
+#endif
+}
+
+void bl2_plat_arch_setup(void)
+{
+	arm_bl2_plat_arch_setup();
+}
+
+#if LOAD_IMAGE_V2
+int arm_bl2_handle_post_image_load(unsigned int image_id)
+{
+	int err = 0;
+	bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
+#ifdef SPD_opteed
+	bl_mem_params_node_t *pager_mem_params = NULL;
+	bl_mem_params_node_t *paged_mem_params = NULL;
+#endif
+	assert(bl_mem_params);
+
+	switch (image_id) {
+#ifdef AARCH64
+	case BL32_IMAGE_ID:
+#ifdef SPD_opteed
+		pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
+		assert(pager_mem_params);
+
+		paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
+		assert(paged_mem_params);
+
+		err = parse_optee_header(&bl_mem_params->ep_info,
+				&pager_mem_params->image_info,
+				&paged_mem_params->image_info);
+		if (err != 0) {
+			WARN("OPTEE header parse error.\n");
+		}
+#endif
+		bl_mem_params->ep_info.spsr = arm_get_spsr_for_bl32_entry();
+		break;
+#endif
+
+	case BL33_IMAGE_ID:
+		/* BL33 expects to receive the primary CPU MPID (through r0) */
+		bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
+		bl_mem_params->ep_info.spsr = arm_get_spsr_for_bl33_entry();
+		break;
+
+#ifdef SCP_BL2_BASE
+	case SCP_BL2_IMAGE_ID:
+		/* The subsequent handling of SCP_BL2 is platform specific */
+		err = plat_arm_bl2_handle_scp_bl2(&bl_mem_params->image_info);
+		if (err) {
+			WARN("Failure in platform-specific handling of SCP_BL2 image.\n");
+		}
+		break;
+#endif
+	}
+
+	return err;
+}
+
+/*******************************************************************************
+ * This function can be used by the platforms to update/use image
+ * information for given `image_id`.
+ ******************************************************************************/
+int bl2_plat_handle_post_image_load(unsigned int image_id)
+{
+	return arm_bl2_handle_post_image_load(image_id);
+}
+
+#else /* LOAD_IMAGE_V2 */
+
+/*******************************************************************************
+ * Populate the extents of memory available for loading SCP_BL2 (if used),
+ * i.e. anywhere in trusted RAM as long as it doesn't overwrite BL2.
+ ******************************************************************************/
+void bl2_plat_get_scp_bl2_meminfo(meminfo_t *scp_bl2_meminfo)
+{
+	*scp_bl2_meminfo = bl2_tzram_layout;
+}
+
+/*******************************************************************************
+ * Before calling this function BL31 is loaded in memory and its entrypoint
+ * is set by load_image. This is a placeholder for the platform to change
+ * the entrypoint of BL31 and set SPSR and security state.
+ * On ARM standard platforms we only set the security state of the entrypoint
+ ******************************************************************************/
+void bl2_plat_set_bl31_ep_info(image_info_t *bl31_image_info,
+					entry_point_info_t *bl31_ep_info)
+{
+	SET_SECURITY_STATE(bl31_ep_info->h.attr, SECURE);
+	bl31_ep_info->spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+					DISABLE_ALL_EXCEPTIONS);
+}
+
+
+/*******************************************************************************
+ * Before calling this function BL32 is loaded in memory and its entrypoint
+ * is set by load_image. This is a placeholder for the platform to change
+ * the entrypoint of BL32 and set SPSR and security state.
+ * On ARM standard platforms we only set the security state of the entrypoint
+ ******************************************************************************/
+#ifdef BL32_BASE
+void bl2_plat_set_bl32_ep_info(image_info_t *bl32_image_info,
+					entry_point_info_t *bl32_ep_info)
+{
+	SET_SECURITY_STATE(bl32_ep_info->h.attr, SECURE);
+	bl32_ep_info->spsr = arm_get_spsr_for_bl32_entry();
+}
+
+/*******************************************************************************
+ * Populate the extents of memory available for loading BL32
+ ******************************************************************************/
+void bl2_plat_get_bl32_meminfo(meminfo_t *bl32_meminfo)
+{
+	/*
+	 * Populate the extents of memory available for loading BL32.
+	 */
+	bl32_meminfo->total_base = BL32_BASE;
+	bl32_meminfo->free_base = BL32_BASE;
+	bl32_meminfo->total_size =
+			(TSP_SEC_MEM_BASE + TSP_SEC_MEM_SIZE) - BL32_BASE;
+	bl32_meminfo->free_size =
+			(TSP_SEC_MEM_BASE + TSP_SEC_MEM_SIZE) - BL32_BASE;
+}
+#endif /* BL32_BASE */
+
+/*******************************************************************************
+ * Before calling this function BL33 is loaded in memory and its entrypoint
+ * is set by load_image. This is a placeholder for the platform to change
+ * the entrypoint of BL33 and set SPSR and security state.
+ * On ARM standard platforms we only set the security state of the entrypoint
+ ******************************************************************************/
+void bl2_plat_set_bl33_ep_info(image_info_t *image,
+					entry_point_info_t *bl33_ep_info)
+{
+	SET_SECURITY_STATE(bl33_ep_info->h.attr, NON_SECURE);
+	bl33_ep_info->spsr = arm_get_spsr_for_bl33_entry();
+}
+
+/*******************************************************************************
+ * Populate the extents of memory available for loading BL33
+ ******************************************************************************/
+void bl2_plat_get_bl33_meminfo(meminfo_t *bl33_meminfo)
+{
+	bl33_meminfo->total_base = ARM_NS_DRAM1_BASE;
+	bl33_meminfo->total_size = ARM_NS_DRAM1_SIZE;
+	bl33_meminfo->free_base = ARM_NS_DRAM1_BASE;
+	bl33_meminfo->free_size = ARM_NS_DRAM1_SIZE;
+}
+
+#endif /* LOAD_IMAGE_V2 */
diff --git a/plat/arm/common/arm_bl2u_setup.c b/plat/arm/common/arm_bl2u_setup.c
new file mode 100644
index 0000000..03d908b
--- /dev/null
+++ b/plat/arm/common/arm_bl2u_setup.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_def.h>
+#include <bl_common.h>
+#include <console.h>
+#include <plat_arm.h>
+#include <platform_def.h>
+#include <string.h>
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak bl2u_platform_setup
+#pragma weak bl2u_early_platform_setup
+#pragma weak bl2u_plat_arch_setup
+
+/*
+ * Perform ARM standard platform setup for BL2U
+ */
+void arm_bl2u_platform_setup(void)
+{
+	/* Initialize the secure environment */
+	plat_arm_security_setup();
+}
+
+void bl2u_platform_setup(void)
+{
+	arm_bl2u_platform_setup();
+}
+
+void arm_bl2u_early_platform_setup(meminfo_t *mem_layout, void *plat_info)
+{
+	/* Initialize the console to provide early debug support */
+	console_init(PLAT_ARM_BOOT_UART_BASE, PLAT_ARM_BOOT_UART_CLK_IN_HZ,
+			ARM_CONSOLE_BAUDRATE);
+}
+
+/*******************************************************************************
+ * BL1 can pass platform dependent information to BL2U in x1.
+ * In case of ARM CSS platforms x1 contains SCP_BL2U image info.
+ * In case of ARM FVP platforms x1 is not used.
+ * In both cases, x0 contains the extents of the memory available to BL2U
+ ******************************************************************************/
+void bl2u_early_platform_setup(meminfo_t *mem_layout, void *plat_info)
+{
+	arm_bl2u_early_platform_setup(mem_layout, plat_info);
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only initializes the mmu in a quick and dirty way.
+ * The memory that is used by BL2U is only mapped.
+ ******************************************************************************/
+void arm_bl2u_plat_arch_setup(void)
+{
+	arm_setup_page_tables(BL2U_BASE,
+			      BL31_LIMIT,
+			      BL_CODE_BASE,
+			      BL_CODE_END,
+			      BL_RO_DATA_BASE,
+			      BL_RO_DATA_END
+#if USE_COHERENT_MEM
+			      ,
+			      BL_COHERENT_RAM_BASE,
+			      BL_COHERENT_RAM_END
+#endif
+		);
+#ifdef AARCH32
+	enable_mmu_secure(0);
+#else
+	enable_mmu_el1(0);
+#endif
+}
+
+void bl2u_plat_arch_setup(void)
+{
+	arm_bl2u_plat_arch_setup();
+}
diff --git a/plat/arm/common/arm_bl31_setup.c b/plat/arm/common/arm_bl31_setup.c
new file mode 100644
index 0000000..8fba80b
--- /dev/null
+++ b/plat/arm/common/arm_bl31_setup.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arm_def.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <debug.h>
+#include <mmio.h>
+#include <plat_arm.h>
+#include <platform.h>
+
+#define BL31_END (uintptr_t)(&__BL31_END__)
+
+/*
+ * Placeholder variables for copying the arguments that have been passed to
+ * BL31 from BL2.
+ */
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak bl31_early_platform_setup
+#pragma weak bl31_platform_setup
+#pragma weak bl31_plat_arch_setup
+#pragma weak bl31_plat_get_next_image_ep_info
+
+
+/*******************************************************************************
+ * Return a pointer to the 'entry_point_info' structure of the next image for the
+ * security state specified. BL33 corresponds to the non-secure image type
+ * while BL32 corresponds to the secure image type. A NULL pointer is returned
+ * if the image does not exist.
+ ******************************************************************************/
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+	entry_point_info_t *next_image_info;
+
+	assert(sec_state_is_valid(type));
+	next_image_info = (type == NON_SECURE)
+			? &bl33_image_ep_info : &bl32_image_ep_info;
+	/*
+	 * None of the images on the ARM development platforms can have 0x0
+	 * as the entrypoint
+	 */
+	if (next_image_info->pc)
+		return next_image_info;
+	else
+		return NULL;
+}
+
+/*******************************************************************************
+ * Perform any BL31 early platform setup common to ARM standard platforms.
+ * Here is an opportunity to copy parameters passed by the calling EL (S-EL1
+ * in BL2 & S-EL3 in BL1) before they are lost (potentially). This needs to be
+ * done before the MMU is initialized so that the memory layout can be used
+ * while creating page tables. BL2 has flushed this information to memory, so
+ * we are guaranteed to pick up good data.
+ ******************************************************************************/
+#if LOAD_IMAGE_V2
+void arm_bl31_early_platform_setup(void *from_bl2,
+				void *plat_params_from_bl2)
+#else
+void arm_bl31_early_platform_setup(bl31_params_t *from_bl2,
+				void *plat_params_from_bl2)
+#endif
+{
+	/* Initialize the console to provide early debug support */
+	console_init(PLAT_ARM_BOOT_UART_BASE, PLAT_ARM_BOOT_UART_CLK_IN_HZ,
+			ARM_CONSOLE_BAUDRATE);
+
+#if RESET_TO_BL31
+	/* There are no parameters from BL2 if BL31 is a reset vector */
+	assert(from_bl2 == NULL);
+	assert(plat_params_from_bl2 == NULL);
+
+#ifdef BL32_BASE
+	/* Populate entry point information for BL32 */
+	SET_PARAM_HEAD(&bl32_image_ep_info,
+				PARAM_EP,
+				VERSION_1,
+				0);
+	SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
+	bl32_image_ep_info.pc = BL32_BASE;
+	bl32_image_ep_info.spsr = arm_get_spsr_for_bl32_entry();
+#endif /* BL32_BASE */
+
+	/* Populate entry point information for BL33 */
+	SET_PARAM_HEAD(&bl33_image_ep_info,
+				PARAM_EP,
+				VERSION_1,
+				0);
+	/*
+	 * Tell BL31 where the non-trusted software image
+	 * is located and the entry state information
+	 */
+	bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
+
+	bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry();
+	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+
+#else /* RESET_TO_BL31 */
+
+	/*
+	 * In debug builds, we pass a special value in 'plat_params_from_bl2'
+	 * to verify platform parameters from BL2 to BL31.
+	 * In release builds, it's not used.
+	 */
+	assert(((unsigned long long)plat_params_from_bl2) ==
+		ARM_BL31_PLAT_PARAM_VAL);
+
+# if LOAD_IMAGE_V2
+	/*
+	 * Check params passed from BL2 should not be NULL,
+	 */
+	bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
+	assert(params_from_bl2 != NULL);
+	assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
+	assert(params_from_bl2->h.version >= VERSION_2);
+
+	bl_params_node_t *bl_params = params_from_bl2->head;
+
+	/*
+	 * Copy BL33 and BL32 (if present), entry point information.
+	 * They are stored in Secure RAM, in BL2's address space.
+	 */
+	while (bl_params) {
+		if (bl_params->image_id == BL32_IMAGE_ID)
+			bl32_image_ep_info = *bl_params->ep_info;
+
+		if (bl_params->image_id == BL33_IMAGE_ID)
+			bl33_image_ep_info = *bl_params->ep_info;
+
+		bl_params = bl_params->next_params_info;
+	}
+
+	if (bl33_image_ep_info.pc == 0)
+		panic();
+
+# else /* LOAD_IMAGE_V2 */
+
+	/*
+	 * Check params passed from BL2 should not be NULL,
+	 */
+	assert(from_bl2 != NULL);
+	assert(from_bl2->h.type == PARAM_BL31);
+	assert(from_bl2->h.version >= VERSION_1);
+
+	/*
+	 * Copy BL32 (if populated by BL2) and BL33 entry point information.
+	 * They are stored in Secure RAM, in BL2's address space.
+	 */
+	if (from_bl2->bl32_ep_info)
+		bl32_image_ep_info = *from_bl2->bl32_ep_info;
+	bl33_image_ep_info = *from_bl2->bl33_ep_info;
+
+# endif /* LOAD_IMAGE_V2 */
+#endif /* RESET_TO_BL31 */
+}
+
+#if LOAD_IMAGE_V2
+void bl31_early_platform_setup(void *from_bl2,
+				void *plat_params_from_bl2)
+#else
+void bl31_early_platform_setup(bl31_params_t *from_bl2,
+				void *plat_params_from_bl2)
+#endif
+{
+	arm_bl31_early_platform_setup(from_bl2, plat_params_from_bl2);
+
+	/*
+	 * Initialize Interconnect for this cluster during cold boot.
+	 * No need for locks as no other CPU is active.
+	 */
+	plat_arm_interconnect_init();
+
+	/*
+	 * Enable Interconnect coherency for the primary CPU's cluster.
+	 * Earlier bootloader stages might already do this (e.g. Trusted
+	 * Firmware's BL1 does it) but we can't assume so. There is no harm in
+	 * executing this code twice anyway.
+	 * Platform specific PSCI code will enable coherency for other
+	 * clusters.
+	 */
+	plat_arm_interconnect_enter_coherency();
+}
+
+/*******************************************************************************
+ * Perform any BL31 platform setup common to ARM standard platforms
+ ******************************************************************************/
+void arm_bl31_platform_setup(void)
+{
+	/* Initialize the GIC driver, cpu and distributor interfaces */
+	plat_arm_gic_driver_init();
+	plat_arm_gic_init();
+
+#if RESET_TO_BL31
+	/*
+	 * Do initial security configuration to allow DRAM/device access
+	 * (if earlier BL has not already done so).
+	 */
+	plat_arm_security_setup();
+
+#endif /* RESET_TO_BL31 */
+
+	/* Enable and initialize the System level generic timer */
+	mmio_write_32(ARM_SYS_CNTCTL_BASE + CNTCR_OFF,
+			CNTCR_FCREQ(0) | CNTCR_EN);
+
+	/* Allow access to the System counter timer module */
+	arm_configure_sys_timer();
+
+	/* Initialize power controller before setting up topology */
+	plat_arm_pwrc_setup();
+}
+
+/*******************************************************************************
+ * Perform any BL31 platform runtime setup prior to BL31 exit common to ARM
+ * standard platforms
+ ******************************************************************************/
+void arm_bl31_plat_runtime_setup(void)
+{
+	/* Initialize the runtime console */
+	console_init(PLAT_ARM_BL31_RUN_UART_BASE, PLAT_ARM_BL31_RUN_UART_CLK_IN_HZ,
+			ARM_CONSOLE_BAUDRATE);
+}
+
+void bl31_platform_setup(void)
+{
+	arm_bl31_platform_setup();
+}
+
+void bl31_plat_runtime_setup(void)
+{
+	arm_bl31_plat_runtime_setup();
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup shared between
+ * ARM standard platforms. This only does basic initialization. Later
+ * architectural setup (bl31_arch_setup()) does not do anything platform
+ * specific.
+ ******************************************************************************/
+void arm_bl31_plat_arch_setup(void)
+{
+	arm_setup_page_tables(BL31_BASE,
+			      BL31_END - BL31_BASE,
+			      BL_CODE_BASE,
+			      BL_CODE_END,
+			      BL_RO_DATA_BASE,
+			      BL_RO_DATA_END
+#if USE_COHERENT_MEM
+			      , BL_COHERENT_RAM_BASE,
+			      BL_COHERENT_RAM_END
+#endif
+			      );
+	enable_mmu_el3(0);
+}
+
+void bl31_plat_arch_setup(void)
+{
+	arm_bl31_plat_arch_setup();
+}
diff --git a/plat/arm/common/arm_cci.c b/plat/arm/common/arm_cci.c
new file mode 100644
index 0000000..fc24cc3
--- /dev/null
+++ b/plat/arm/common/arm_cci.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <cci.h>
+#include <plat_arm.h>
+#include <platform_def.h>
+#include <utils.h>
+
+static const int cci_map[] = {
+	PLAT_ARM_CCI_CLUSTER0_SL_IFACE_IX,
+	PLAT_ARM_CCI_CLUSTER1_SL_IFACE_IX
+};
+
+/******************************************************************************
+ * The following functions are defined as weak to allow a platform to override
+ * the way ARM CCI driver is initialised and used.
+ *****************************************************************************/
+#pragma weak plat_arm_interconnect_init
+#pragma weak plat_arm_interconnect_enter_coherency
+#pragma weak plat_arm_interconnect_exit_coherency
+
+
+/******************************************************************************
+ * Helper function to initialize ARM CCI driver.
+ *****************************************************************************/
+void plat_arm_interconnect_init(void)
+{
+	cci_init(PLAT_ARM_CCI_BASE, cci_map, ARRAY_SIZE(cci_map));
+}
+
+/******************************************************************************
+ * Helper function to place current master into coherency
+ *****************************************************************************/
+void plat_arm_interconnect_enter_coherency(void)
+{
+	cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+}
+
+/******************************************************************************
+ * Helper function to remove current master from coherency
+ *****************************************************************************/
+void plat_arm_interconnect_exit_coherency(void)
+{
+	cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+}
diff --git a/plat/arm/common/arm_ccn.c b/plat/arm/common/arm_ccn.c
new file mode 100644
index 0000000..84a529f
--- /dev/null
+++ b/plat/arm/common/arm_ccn.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <ccn.h>
+#include <plat_arm.h>
+#include <platform_def.h>
+
+static const unsigned char master_to_rn_id_map[] = {
+	PLAT_ARM_CLUSTER_TO_CCN_ID_MAP
+};
+
+static const ccn_desc_t arm_ccn_desc = {
+	.periphbase = PLAT_ARM_CCN_BASE,
+	.num_masters = ARRAY_SIZE(master_to_rn_id_map),
+	.master_to_rn_id_map = master_to_rn_id_map
+};
+
+CASSERT(PLAT_ARM_CLUSTER_COUNT == ARRAY_SIZE(master_to_rn_id_map),
+		assert_invalid_cluster_count_for_ccn_variant);
+
+/******************************************************************************
+ * The following functions are defined as weak to allow a platform to override
+ * the way ARM CCN driver is initialised and used.
+ *****************************************************************************/
+#pragma weak plat_arm_interconnect_init
+#pragma weak plat_arm_interconnect_enter_coherency
+#pragma weak plat_arm_interconnect_exit_coherency
+
+
+/******************************************************************************
+ * Helper function to initialize ARM CCN driver.
+ *****************************************************************************/
+void plat_arm_interconnect_init(void)
+{
+	ccn_init(&arm_ccn_desc);
+}
+
+/******************************************************************************
+ * Helper function to place current master into coherency
+ *****************************************************************************/
+void plat_arm_interconnect_enter_coherency(void)
+{
+	ccn_enter_snoop_dvm_domain(1 << MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+}
+
+/******************************************************************************
+ * Helper function to remove current master from coherency
+ *****************************************************************************/
+void plat_arm_interconnect_exit_coherency(void)
+{
+	ccn_exit_snoop_dvm_domain(1 << MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+}
diff --git a/plat/arm/common/arm_common.c b/plat/arm/common/arm_common.c
new file mode 100644
index 0000000..420a386
--- /dev/null
+++ b/plat/arm/common/arm_common.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arm_xlat_tables.h>
+#include <assert.h>
+#include <debug.h>
+#include <mmio.h>
+#include <plat_arm.h>
+#include <platform_def.h>
+
+extern const mmap_region_t plat_arm_mmap[];
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak plat_get_ns_image_entrypoint
+#pragma weak plat_arm_get_mmap
+
+/* Conditionally provide a weak definition of plat_get_syscnt_freq2 to avoid
+ * conflicts with the definition in plat/common. */
+#if ERROR_DEPRECATED
+#pragma weak plat_get_syscnt_freq2
+#endif
+
+/*
+ * Set up the page tables for the generic and platform-specific memory regions.
+ * The extents of the generic memory regions are specified by the function
+ * arguments and consist of:
+ * - Trusted SRAM seen by the BL image;
+ * - Code section;
+ * - Read-only data section;
+ * - Coherent memory region, if applicable.
+ */
+void arm_setup_page_tables(uintptr_t total_base,
+			   size_t total_size,
+			   uintptr_t code_start,
+			   uintptr_t code_limit,
+			   uintptr_t rodata_start,
+			   uintptr_t rodata_limit
+#if USE_COHERENT_MEM
+			   ,
+			   uintptr_t coh_start,
+			   uintptr_t coh_limit
+#endif
+			   )
+{
+	/*
+	 * Map the Trusted SRAM with appropriate memory attributes.
+	 * Subsequent mappings will adjust the attributes for specific regions.
+	 */
+	VERBOSE("Trusted SRAM seen by this BL image: %p - %p\n",
+		(void *) total_base, (void *) (total_base + total_size));
+	mmap_add_region(total_base, total_base,
+			total_size,
+			MT_MEMORY | MT_RW | MT_SECURE);
+
+	/* Re-map the code section */
+	VERBOSE("Code region: %p - %p\n",
+		(void *) code_start, (void *) code_limit);
+	mmap_add_region(code_start, code_start,
+			code_limit - code_start,
+			MT_CODE | MT_SECURE);
+
+	/* Re-map the read-only data section */
+	VERBOSE("Read-only data region: %p - %p\n",
+		(void *) rodata_start, (void *) rodata_limit);
+	mmap_add_region(rodata_start, rodata_start,
+			rodata_limit - rodata_start,
+			MT_RO_DATA | MT_SECURE);
+
+#if USE_COHERENT_MEM
+	/* Re-map the coherent memory region */
+	VERBOSE("Coherent region: %p - %p\n",
+		(void *) coh_start, (void *) coh_limit);
+	mmap_add_region(coh_start, coh_start,
+			coh_limit - coh_start,
+			MT_DEVICE | MT_RW | MT_SECURE);
+#endif
+
+	/* Now (re-)map the platform-specific memory regions */
+	mmap_add(plat_arm_get_mmap());
+
+	/* Create the page tables to reflect the above mappings */
+	init_xlat_tables();
+}
+
+uintptr_t plat_get_ns_image_entrypoint(void)
+{
+#ifdef PRELOADED_BL33_BASE
+	return PRELOADED_BL33_BASE;
+#else
+	return PLAT_ARM_NS_IMAGE_OFFSET;
+#endif
+}
+
+/*******************************************************************************
+ * Gets SPSR for BL32 entry
+ ******************************************************************************/
+uint32_t arm_get_spsr_for_bl32_entry(void)
+{
+	/*
+	 * The Secure Payload Dispatcher service is responsible for
+	 * setting the SPSR prior to entry into the BL32 image.
+	 */
+	return 0;
+}
+
+/*******************************************************************************
+ * Gets SPSR for BL33 entry
+ ******************************************************************************/
+#ifndef AARCH32
+uint32_t arm_get_spsr_for_bl33_entry(void)
+{
+	unsigned int mode;
+	uint32_t spsr;
+
+	/* Figure out what mode we enter the non-secure world in */
+	mode = EL_IMPLEMENTED(2) ? MODE_EL2 : MODE_EL1;
+
+	/*
+	 * TODO: Consider the possibility of specifying the SPSR in
+	 * the FIP ToC and allowing the platform to have a say as
+	 * well.
+	 */
+	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+	return spsr;
+}
+#else
+/*******************************************************************************
+ * Gets SPSR for BL33 entry
+ ******************************************************************************/
+uint32_t arm_get_spsr_for_bl33_entry(void)
+{
+	unsigned int hyp_status, mode, spsr;
+
+	hyp_status = GET_VIRT_EXT(read_id_pfr1());
+
+	mode = (hyp_status) ? MODE32_hyp : MODE32_svc;
+
+	/*
+	 * TODO: Consider the possibility of specifying the SPSR in
+	 * the FIP ToC and allowing the platform to have a say as
+	 * well.
+	 */
+	spsr = SPSR_MODE32(mode, plat_get_ns_image_entrypoint() & 0x1,
+			SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
+	return spsr;
+}
+#endif /* AARCH32 */
+
+/*******************************************************************************
+ * Configures access to the system counter timer module.
+ ******************************************************************************/
+#ifdef ARM_SYS_TIMCTL_BASE
+void arm_configure_sys_timer(void)
+{
+	unsigned int reg_val;
+
+#if ARM_CONFIG_CNTACR
+	reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT);
+	reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT);
+	reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT);
+	mmio_write_32(ARM_SYS_TIMCTL_BASE + CNTACR_BASE(PLAT_ARM_NSTIMER_FRAME_ID), reg_val);
+#endif /* ARM_CONFIG_CNTACR */
+
+	reg_val = (1 << CNTNSAR_NS_SHIFT(PLAT_ARM_NSTIMER_FRAME_ID));
+	mmio_write_32(ARM_SYS_TIMCTL_BASE + CNTNSAR, reg_val);
+}
+#endif /* ARM_SYS_TIMCTL_BASE */
+
+/*******************************************************************************
+ * Returns ARM platform specific memory map regions.
+ ******************************************************************************/
+const mmap_region_t *plat_arm_get_mmap(void)
+{
+	return plat_arm_mmap;
+}
+
+#ifdef ARM_SYS_CNTCTL_BASE
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+	unsigned int counter_base_frequency;
+
+	/* Read the frequency from Frequency modes table */
+	counter_base_frequency = mmio_read_32(ARM_SYS_CNTCTL_BASE + CNTFID_OFF);
+
+	/* The first entry of the frequency modes table must not be 0 */
+	if (counter_base_frequency == 0)
+		panic();
+
+	return counter_base_frequency;
+}
+
+#endif /* ARM_SYS_CNTCTL_BASE */
diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk
new file mode 100644
index 0000000..44eb43f
--- /dev/null
+++ b/plat/arm/common/arm_common.mk
@@ -0,0 +1,221 @@
+#
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${ARCH}, aarch64)
+  # On ARM standard platorms, the TSP can execute from Trusted SRAM, Trusted
+  # DRAM (if available) or the TZC secured area of DRAM.
+  # Trusted SRAM is the default.
+
+  ifneq (${TRUSTED_BOARD_BOOT},0)
+    ARM_TSP_RAM_LOCATION	?=	dram
+  else
+    ARM_TSP_RAM_LOCATION	?=	tsram
+  endif
+
+  ifeq (${ARM_TSP_RAM_LOCATION}, tsram)
+    ARM_TSP_RAM_LOCATION_ID = ARM_TRUSTED_SRAM_ID
+  else ifeq (${ARM_TSP_RAM_LOCATION}, tdram)
+    ARM_TSP_RAM_LOCATION_ID = ARM_TRUSTED_DRAM_ID
+  else ifeq (${ARM_TSP_RAM_LOCATION}, dram)
+    ARM_TSP_RAM_LOCATION_ID = ARM_DRAM_ID
+  else
+    $(error "Unsupported ARM_TSP_RAM_LOCATION value")
+  endif
+
+  # Process flags
+  # Process ARM_BL31_IN_DRAM flag
+  ARM_BL31_IN_DRAM		:=	0
+  $(eval $(call assert_boolean,ARM_BL31_IN_DRAM))
+  $(eval $(call add_define,ARM_BL31_IN_DRAM))
+else
+  ARM_TSP_RAM_LOCATION_ID = ARM_TRUSTED_SRAM_ID
+endif
+
+$(eval $(call add_define,ARM_TSP_RAM_LOCATION_ID))
+
+
+# For the original power-state parameter format, the State-ID can be encoded
+# according to the recommended encoding or zero. This flag determines which
+# State-ID encoding to be parsed.
+ARM_RECOM_STATE_ID_ENC := 0
+
+# If the PSCI_EXTENDED_STATE_ID is set, then ARM_RECOM_STATE_ID_ENC need to
+# be set. Else throw a build error.
+ifeq (${PSCI_EXTENDED_STATE_ID}, 1)
+  ifeq (${ARM_RECOM_STATE_ID_ENC}, 0)
+    $(error Build option ARM_RECOM_STATE_ID_ENC needs to be set if \
+            PSCI_EXTENDED_STATE_ID is set for ARM platforms)
+  endif
+endif
+
+# Process ARM_RECOM_STATE_ID_ENC flag
+$(eval $(call assert_boolean,ARM_RECOM_STATE_ID_ENC))
+$(eval $(call add_define,ARM_RECOM_STATE_ID_ENC))
+
+# Process ARM_DISABLE_TRUSTED_WDOG flag
+# By default, Trusted Watchdog is always enabled unless SPIN_ON_BL1_EXIT is set
+ARM_DISABLE_TRUSTED_WDOG	:=	0
+ifeq (${SPIN_ON_BL1_EXIT}, 1)
+ARM_DISABLE_TRUSTED_WDOG	:=	1
+endif
+$(eval $(call assert_boolean,ARM_DISABLE_TRUSTED_WDOG))
+$(eval $(call add_define,ARM_DISABLE_TRUSTED_WDOG))
+
+# Process ARM_CONFIG_CNTACR
+ARM_CONFIG_CNTACR		:=	1
+$(eval $(call assert_boolean,ARM_CONFIG_CNTACR))
+$(eval $(call add_define,ARM_CONFIG_CNTACR))
+
+# Process ARM_BL31_IN_DRAM flag
+ARM_BL31_IN_DRAM		:=	0
+$(eval $(call assert_boolean,ARM_BL31_IN_DRAM))
+$(eval $(call add_define,ARM_BL31_IN_DRAM))
+
+# Process ARM_PLAT_MT flag
+ARM_PLAT_MT			:=	0
+$(eval $(call assert_boolean,ARM_PLAT_MT))
+$(eval $(call add_define,ARM_PLAT_MT))
+
+# Use translation tables library v2 by default
+ARM_XLAT_TABLES_LIB_V1		:=	0
+$(eval $(call assert_boolean,ARM_XLAT_TABLES_LIB_V1))
+$(eval $(call add_define,ARM_XLAT_TABLES_LIB_V1))
+
+# Use an implementation of SHA-256 with a smaller memory footprint but reduced
+# speed.
+$(eval $(call add_define,MBEDTLS_SHA256_SMALLER))
+
+# Add the build options to pack Trusted OS Extra1 and Trusted OS Extra2 images
+# in the FIP if the platform requires.
+ifneq ($(BL32_EXTRA1),)
+$(eval $(call FIP_ADD_IMG,BL32_EXTRA1,--tos-fw-extra1))
+endif
+ifneq ($(BL32_EXTRA2),)
+$(eval $(call FIP_ADD_IMG,BL32_EXTRA2,--tos-fw-extra2))
+endif
+
+# Enable PSCI_STAT_COUNT/RESIDENCY APIs on ARM platforms
+ENABLE_PSCI_STAT		:=	1
+ENABLE_PMF			:=	1
+
+# On ARM platforms, separate the code and read-only data sections to allow
+# mapping the former as executable and the latter as execute-never.
+SEPARATE_CODE_AND_RODATA	:=	1
+
+# Enable new version of image loading on ARM platforms
+LOAD_IMAGE_V2			:=	1
+
+# Use generic OID definition (tbbr_oid.h)
+USE_TBBR_DEFS			:=	1
+
+# Disable ARM Cryptocell by default
+ARM_CRYPTOCELL_INTEG		:=	0
+$(eval $(call assert_boolean,ARM_CRYPTOCELL_INTEG))
+$(eval $(call add_define,ARM_CRYPTOCELL_INTEG))
+
+PLAT_INCLUDES		+=	-Iinclude/common/tbbr				\
+				-Iinclude/plat/arm/common
+
+ifeq (${ARCH}, aarch64)
+PLAT_INCLUDES		+=	-Iinclude/plat/arm/common/aarch64
+endif
+
+PLAT_BL_COMMON_SOURCES	+=	plat/arm/common/${ARCH}/arm_helpers.S		\
+				plat/arm/common/arm_common.c
+
+ifeq (${ARM_XLAT_TABLES_LIB_V1}, 1)
+PLAT_BL_COMMON_SOURCES	+=	lib/xlat_tables/xlat_tables_common.c		\
+				lib/xlat_tables/${ARCH}/xlat_tables.c
+else
+include lib/xlat_tables_v2/xlat_tables.mk
+
+PLAT_BL_COMMON_SOURCES	+=	${XLAT_TABLES_LIB_SRCS}
+endif
+
+BL1_SOURCES		+=	drivers/arm/sp805/sp805.c			\
+				drivers/io/io_fip.c				\
+				drivers/io/io_memmap.c				\
+				drivers/io/io_storage.c				\
+				plat/arm/common/arm_bl1_setup.c			\
+				plat/arm/common/arm_io_storage.c
+ifdef EL3_PAYLOAD_BASE
+# Need the arm_program_trusted_mailbox() function to release secondary CPUs from
+# their holding pen
+BL1_SOURCES		+=	plat/arm/common/arm_pm.c
+endif
+
+BL2_SOURCES		+=	drivers/delay_timer/delay_timer.c		\
+				drivers/delay_timer/generic_delay_timer.c	\
+				drivers/io/io_fip.c				\
+				drivers/io/io_memmap.c				\
+				drivers/io/io_storage.c				\
+				plat/arm/common/arm_bl2_setup.c			\
+				plat/arm/common/arm_io_storage.c
+ifeq (${LOAD_IMAGE_V2},1)
+# Because BL1/BL2 execute in AArch64 mode but BL32 in AArch32 we need to use
+# the AArch32 descriptors.
+ifeq (${JUNO_AARCH32_EL3_RUNTIME},1)
+BL2_SOURCES		+=	plat/arm/common/aarch32/arm_bl2_mem_params_desc.c
+else
+BL2_SOURCES		+=	plat/arm/common/${ARCH}/arm_bl2_mem_params_desc.c
+endif
+BL2_SOURCES		+=	plat/arm/common/arm_image_load.c		\
+				common/desc_image_load.c
+ifeq (${SPD},opteed)
+BL2_SOURCES		+=	lib/optee/optee_utils.c
+endif
+endif
+
+BL2U_SOURCES		+=	drivers/delay_timer/delay_timer.c		\
+				drivers/delay_timer/generic_delay_timer.c	\
+				plat/arm/common/arm_bl2u_setup.c
+
+BL31_SOURCES		+=	plat/arm/common/arm_bl31_setup.c		\
+				plat/arm/common/arm_pm.c			\
+				plat/arm/common/arm_topology.c			\
+				plat/arm/common/execution_state_switch.c	\
+				plat/common/plat_psci_common.c
+
+ifeq (${ENABLE_PMF}, 1)
+BL31_SOURCES		+=	plat/arm/common/arm_sip_svc.c			\
+				lib/pmf/pmf_smc.c
+endif
+
+ifneq (${TRUSTED_BOARD_BOOT},0)
+
+    # Include common TBB sources
+    AUTH_SOURCES	:=	drivers/auth/auth_mod.c				\
+				drivers/auth/crypto_mod.c			\
+				drivers/auth/img_parser_mod.c			\
+				drivers/auth/tbbr/tbbr_cot.c			\
+
+    PLAT_INCLUDES	+=	-Iinclude/bl1/tbbr
+
+    BL1_SOURCES		+=	${AUTH_SOURCES}					\
+				bl1/tbbr/tbbr_img_desc.c			\
+				plat/arm/common/arm_bl1_fwu.c			\
+				plat/common/tbbr/plat_tbbr.c
+
+    BL2_SOURCES		+=	${AUTH_SOURCES}					\
+				plat/common/tbbr/plat_tbbr.c
+
+    $(eval $(call FWU_FIP_ADD_IMG,NS_BL2U,--fwu))
+
+    # We expect to locate the *.mk files under the directories specified below
+ifeq (${ARM_CRYPTOCELL_INTEG},0)
+    CRYPTO_LIB_MK := drivers/auth/mbedtls/mbedtls_crypto.mk
+else
+    CRYPTO_LIB_MK := drivers/auth/cryptocell/cryptocell_crypto.mk
+endif
+    IMG_PARSER_LIB_MK := drivers/auth/mbedtls/mbedtls_x509.mk
+
+    $(info Including ${CRYPTO_LIB_MK})
+    include ${CRYPTO_LIB_MK}
+
+    $(info Including ${IMG_PARSER_LIB_MK})
+    include ${IMG_PARSER_LIB_MK}
+
+endif
diff --git a/plat/arm/common/arm_gicv2.c b/plat/arm/common/arm_gicv2.c
new file mode 100644
index 0000000..b081fa8
--- /dev/null
+++ b/plat/arm/common/arm_gicv2.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <gicv2.h>
+#include <plat_arm.h>
+#include <platform.h>
+#include <platform_def.h>
+
+/******************************************************************************
+ * The following functions are defined as weak to allow a platform to override
+ * the way the GICv2 driver is initialised and used.
+ *****************************************************************************/
+#pragma weak plat_arm_gic_driver_init
+#pragma weak plat_arm_gic_init
+#pragma weak plat_arm_gic_cpuif_enable
+#pragma weak plat_arm_gic_cpuif_disable
+#pragma weak plat_arm_gic_pcpu_init
+
+/******************************************************************************
+ * On a GICv2 system, the Group 1 secure interrupts are treated as Group 0
+ * interrupts.
+ *****************************************************************************/
+static const interrupt_prop_t arm_interrupt_props[] = {
+	PLAT_ARM_G1S_IRQ_PROPS(GICV2_INTR_GROUP0),
+	PLAT_ARM_G0_IRQ_PROPS(GICV2_INTR_GROUP0)
+};
+
+static unsigned int target_mask_array[PLATFORM_CORE_COUNT];
+
+static const gicv2_driver_data_t arm_gic_data = {
+	.gicd_base = PLAT_ARM_GICD_BASE,
+	.gicc_base = PLAT_ARM_GICC_BASE,
+	.interrupt_props = arm_interrupt_props,
+	.interrupt_props_num = ARRAY_SIZE(arm_interrupt_props),
+	.target_masks = target_mask_array,
+	.target_masks_num = ARRAY_SIZE(target_mask_array),
+};
+
+/******************************************************************************
+ * ARM common helper to initialize the GICv2 only driver.
+ *****************************************************************************/
+void plat_arm_gic_driver_init(void)
+{
+	gicv2_driver_init(&arm_gic_data);
+}
+
+void plat_arm_gic_init(void)
+{
+	gicv2_distif_init();
+	gicv2_pcpu_distif_init();
+	gicv2_cpuif_enable();
+}
+
+/******************************************************************************
+ * ARM common helper to enable the GICv2 CPU interface
+ *****************************************************************************/
+void plat_arm_gic_cpuif_enable(void)
+{
+	gicv2_cpuif_enable();
+}
+
+/******************************************************************************
+ * ARM common helper to disable the GICv2 CPU interface
+ *****************************************************************************/
+void plat_arm_gic_cpuif_disable(void)
+{
+	gicv2_cpuif_disable();
+}
+
+/******************************************************************************
+ * ARM common helper to initialize the per cpu distributor interface in GICv2
+ *****************************************************************************/
+void plat_arm_gic_pcpu_init(void)
+{
+	gicv2_pcpu_distif_init();
+	gicv2_set_pe_target_mask(plat_my_core_pos());
+}
+
+/******************************************************************************
+ * Stubs for Redistributor power management. Although GICv2 doesn't have
+ * Redistributor interface, these are provided for the sake of uniform GIC API
+ *****************************************************************************/
+void plat_arm_gic_redistif_on(void)
+{
+	return;
+}
+
+void plat_arm_gic_redistif_off(void)
+{
+	return;
+}
+
+
+/******************************************************************************
+ * ARM common helper to save & restore the GICv3 on resume from system suspend.
+ * The normal world currently takes care of saving and restoring the GICv2
+ * registers due to legacy reasons. Hence we just initialize the Distributor
+ * on resume from system suspend.
+ *****************************************************************************/
+void plat_arm_gic_save(void)
+{
+	return;
+}
+
+void plat_arm_gic_resume(void)
+{
+	gicv2_distif_init();
+	gicv2_pcpu_distif_init();
+}
diff --git a/plat/arm/common/arm_gicv3.c b/plat/arm/common/arm_gicv3.c
new file mode 100644
index 0000000..e273b77
--- /dev/null
+++ b/plat/arm/common/arm_gicv3.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_def.h>
+#include <gicv3.h>
+#include <interrupt_props.h>
+#include <plat_arm.h>
+#include <platform.h>
+#include <platform_def.h>
+
+/******************************************************************************
+ * The following functions are defined as weak to allow a platform to override
+ * the way the GICv3 driver is initialised and used.
+ *****************************************************************************/
+#pragma weak plat_arm_gic_driver_init
+#pragma weak plat_arm_gic_init
+#pragma weak plat_arm_gic_cpuif_enable
+#pragma weak plat_arm_gic_cpuif_disable
+#pragma weak plat_arm_gic_pcpu_init
+#pragma weak plat_arm_gic_redistif_on
+#pragma weak plat_arm_gic_redistif_off
+
+/* The GICv3 driver only needs to be initialized in EL3 */
+static uintptr_t rdistif_base_addrs[PLATFORM_CORE_COUNT];
+
+static const interrupt_prop_t arm_interrupt_props[] = {
+	PLAT_ARM_G1S_IRQ_PROPS(INTR_GROUP1S),
+	PLAT_ARM_G0_IRQ_PROPS(INTR_GROUP0)
+};
+
+/*
+ * We save and restore the GICv3 context on system suspend. Allocate the
+ * data in the designated EL3 Secure carve-out memory
+ */
+gicv3_redist_ctx_t rdist_ctx __section("arm_el3_tzc_dram");
+gicv3_dist_ctx_t dist_ctx __section("arm_el3_tzc_dram");
+
+/*
+ * MPIDR hashing function for translating MPIDRs read from GICR_TYPER register
+ * to core position.
+ *
+ * Calculating core position is dependent on MPIDR_EL1.MT bit. However, affinity
+ * values read from GICR_TYPER don't have an MT field. To reuse the same
+ * translation used for CPUs, we insert MT bit read from the PE's MPIDR into
+ * that read from GICR_TYPER.
+ *
+ * Assumptions:
+ *
+ *   - All CPUs implemented in the system have MPIDR_EL1.MT bit set;
+ *   - No CPUs implemented in the system use affinity level 3.
+ */
+static unsigned int arm_gicv3_mpidr_hash(u_register_t mpidr)
+{
+	mpidr |= (read_mpidr_el1() & MPIDR_MT_MASK);
+	return plat_arm_calc_core_pos(mpidr);
+}
+
+const gicv3_driver_data_t arm_gic_data = {
+	.gicd_base = PLAT_ARM_GICD_BASE,
+	.gicr_base = PLAT_ARM_GICR_BASE,
+	.interrupt_props = arm_interrupt_props,
+	.interrupt_props_num = ARRAY_SIZE(arm_interrupt_props),
+	.rdistif_num = PLATFORM_CORE_COUNT,
+	.rdistif_base_addrs = rdistif_base_addrs,
+	.mpidr_to_core_pos = arm_gicv3_mpidr_hash
+};
+
+void plat_arm_gic_driver_init(void)
+{
+	/*
+	 * The GICv3 driver is initialized in EL3 and does not need
+	 * to be initialized again in SEL1. This is because the S-EL1
+	 * can use GIC system registers to manage interrupts and does
+	 * not need GIC interface base addresses to be configured.
+	 */
+#if (defined(AARCH32) && defined(IMAGE_BL32)) || \
+	(defined(IMAGE_BL31) && !defined(AARCH32))
+	gicv3_driver_init(&arm_gic_data);
+#endif
+}
+
+/******************************************************************************
+ * ARM common helper to initialize the GIC. Only invoked by BL31
+ *****************************************************************************/
+void plat_arm_gic_init(void)
+{
+	gicv3_distif_init();
+	gicv3_rdistif_init(plat_my_core_pos());
+	gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+/******************************************************************************
+ * ARM common helper to enable the GIC CPU interface
+ *****************************************************************************/
+void plat_arm_gic_cpuif_enable(void)
+{
+	gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+/******************************************************************************
+ * ARM common helper to disable the GIC CPU interface
+ *****************************************************************************/
+void plat_arm_gic_cpuif_disable(void)
+{
+	gicv3_cpuif_disable(plat_my_core_pos());
+}
+
+/******************************************************************************
+ * ARM common helper to initialize the per-cpu redistributor interface in GICv3
+ *****************************************************************************/
+void plat_arm_gic_pcpu_init(void)
+{
+	gicv3_rdistif_init(plat_my_core_pos());
+}
+
+/******************************************************************************
+ * ARM common helpers to power GIC redistributor interface
+ *****************************************************************************/
+void plat_arm_gic_redistif_on(void)
+{
+	gicv3_rdistif_on(plat_my_core_pos());
+}
+
+void plat_arm_gic_redistif_off(void)
+{
+	gicv3_rdistif_off(plat_my_core_pos());
+}
+
+/******************************************************************************
+ * ARM common helper to save & restore the GICv3 on resume from system suspend
+ *****************************************************************************/
+void plat_arm_gic_save(void)
+{
+
+	/*
+	 * If an ITS is available, save its context before
+	 * the Redistributor using:
+	 * gicv3_its_save_disable(gits_base, &its_ctx[i])
+	 * Additionnaly, an implementation-defined sequence may
+	 * be required to save the whole ITS state.
+	 */
+
+	/*
+	 * Save the GIC Redistributors and ITS contexts before the
+	 * Distributor context. As we only handle SYSTEM SUSPEND API,
+	 * we only need to save the context of the CPU that is issuing
+	 * the SYSTEM SUSPEND call, i.e. the current CPU.
+	 */
+	gicv3_rdistif_save(plat_my_core_pos(), &rdist_ctx);
+
+	/* Save the GIC Distributor context */
+	gicv3_distif_save(&dist_ctx);
+
+	/*
+	 * From here, all the components of the GIC can be safely powered down
+	 * as long as there is an alternate way to handle wakeup interrupt
+	 * sources.
+	 */
+}
+
+void plat_arm_gic_resume(void)
+{
+	/* Restore the GIC Distributor context */
+	gicv3_distif_init_restore(&dist_ctx);
+
+	/*
+	 * Restore the GIC Redistributor and ITS contexts after the
+	 * Distributor context. As we only handle SYSTEM SUSPEND API,
+	 * we only need to restore the context of the CPU that issued
+	 * the SYSTEM SUSPEND call.
+	 */
+	gicv3_rdistif_init_restore(plat_my_core_pos(), &rdist_ctx);
+
+	/*
+	 * If an ITS is available, restore its context after
+	 * the Redistributor using:
+	 * gicv3_its_restore(gits_base, &its_ctx[i])
+	 * An implementation-defined sequence may be required to
+	 * restore the whole ITS state. The ITS must also be
+	 * re-enabled after this sequence has been executed.
+	 */
+}
diff --git a/plat/arm/common/arm_gicv3_legacy.c b/plat/arm/common/arm_gicv3_legacy.c
new file mode 100644
index 0000000..e19799a
--- /dev/null
+++ b/plat/arm/common/arm_gicv3_legacy.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_def.h>
+#include <arm_gic.h>
+#include <plat_arm.h>
+#include <platform.h>
+#include <platform_def.h>
+
+/******************************************************************************
+ * The following function is defined as weak to allow a platform to override
+ * the way the Legacy GICv3 driver is initialised and used.
+ *****************************************************************************/
+#pragma weak plat_arm_gic_driver_init
+#pragma weak plat_arm_gic_init
+#pragma weak plat_arm_gic_cpuif_enable
+#pragma weak plat_arm_gic_cpuif_disable
+#pragma weak plat_arm_gic_pcpu_init
+
+/*
+ * In the GICv3 Legacy mode, the Group 1 secure interrupts are treated as Group
+ * 0 interrupts.
+ */
+static const unsigned int irq_sec_array[] = {
+	PLAT_ARM_G0_IRQS,
+	PLAT_ARM_G1S_IRQS
+};
+
+void plat_arm_gic_driver_init(void)
+{
+	arm_gic_init(PLAT_ARM_GICC_BASE,
+		     PLAT_ARM_GICD_BASE,
+		     PLAT_ARM_GICR_BASE,
+		     irq_sec_array,
+		     ARRAY_SIZE(irq_sec_array));
+}
+
+/******************************************************************************
+ * ARM common helper to initialize the GIC.
+ *****************************************************************************/
+void plat_arm_gic_init(void)
+{
+	arm_gic_setup();
+}
+
+/******************************************************************************
+ * ARM common helper to enable the GIC CPU interface
+ *****************************************************************************/
+void plat_arm_gic_cpuif_enable(void)
+{
+	arm_gic_cpuif_setup();
+}
+
+/******************************************************************************
+ * ARM common helper to disable the GIC CPU interface
+ *****************************************************************************/
+void plat_arm_gic_cpuif_disable(void)
+{
+	arm_gic_cpuif_deactivate();
+}
+
+/******************************************************************************
+ * ARM common helper to initialize the per-cpu distributor in GICv2 or
+ * redistributor interface in GICv3.
+ *****************************************************************************/
+void plat_arm_gic_pcpu_init(void)
+{
+	arm_gic_pcpu_distif_setup();
+}
+
+/******************************************************************************
+ * Stubs for Redistributor power management. Although legacy configuration isn't
+ * supported, these are provided for the sake of uniform GIC API
+ *****************************************************************************/
+void plat_arm_gic_redistif_on(void)
+{
+	return;
+}
+
+void plat_arm_gic_redistif_off(void)
+{
+	return;
+}
+
+/******************************************************************************
+ * ARM common helper to save & restore the GICv3 on resume from system suspend.
+ *****************************************************************************/
+void plat_arm_gic_save(void)
+{
+	return;
+}
+
+void plat_arm_gic_resume(void)
+{
+	arm_gic_setup();
+}
diff --git a/plat/arm/common/arm_image_load.c b/plat/arm/common/arm_image_load.c
new file mode 100644
index 0000000..03e4b4f
--- /dev/null
+++ b/plat/arm/common/arm_image_load.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_def.h>
+#include <bl_common.h>
+#include <desc_image_load.h>
+#include <platform.h>
+
+
+#pragma weak plat_flush_next_bl_params
+#pragma weak plat_get_bl_image_load_info
+#pragma weak plat_get_next_bl_params
+
+
+/*******************************************************************************
+ * This function flushes the data structures so that they are visible
+ * in memory for the next BL image.
+ ******************************************************************************/
+void plat_flush_next_bl_params(void)
+{
+	flush_bl_params_desc();
+}
+
+/*******************************************************************************
+ * This function returns the list of loadable images.
+ ******************************************************************************/
+bl_load_info_t *plat_get_bl_image_load_info(void)
+{
+	return get_bl_load_info_from_mem_params_desc();
+}
+
+/*******************************************************************************
+ * This function returns the list of executable images.
+ ******************************************************************************/
+bl_params_t *plat_get_next_bl_params(void)
+{
+	return get_next_bl_params_from_mem_params_desc();
+}
diff --git a/plat/arm/common/arm_io_storage.c b/plat/arm/common/arm_io_storage.c
new file mode 100644
index 0000000..794ef61
--- /dev/null
+++ b/plat/arm/common/arm_io_storage.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <assert.h>
+#include <debug.h>
+#include <firmware_image_package.h>
+#include <io_driver.h>
+#include <io_fip.h>
+#include <io_memmap.h>
+#include <io_storage.h>
+#include <platform_def.h>
+#include <string.h>
+#include <utils.h>
+
+/* IO devices */
+static const io_dev_connector_t *fip_dev_con;
+static uintptr_t fip_dev_handle;
+static const io_dev_connector_t *memmap_dev_con;
+static uintptr_t memmap_dev_handle;
+
+static const io_block_spec_t fip_block_spec = {
+	.offset = PLAT_ARM_FIP_BASE,
+	.length = PLAT_ARM_FIP_MAX_SIZE
+};
+
+static const io_uuid_spec_t bl2_uuid_spec = {
+	.uuid = UUID_TRUSTED_BOOT_FIRMWARE_BL2,
+};
+
+static const io_uuid_spec_t scp_bl2_uuid_spec = {
+	.uuid = UUID_SCP_FIRMWARE_SCP_BL2,
+};
+
+static const io_uuid_spec_t bl31_uuid_spec = {
+	.uuid = UUID_EL3_RUNTIME_FIRMWARE_BL31,
+};
+
+static const io_uuid_spec_t bl32_uuid_spec = {
+	.uuid = UUID_SECURE_PAYLOAD_BL32,
+};
+
+static const io_uuid_spec_t bl32_extra1_uuid_spec = {
+	.uuid = UUID_SECURE_PAYLOAD_BL32_EXTRA1,
+};
+
+static const io_uuid_spec_t bl32_extra2_uuid_spec = {
+	.uuid = UUID_SECURE_PAYLOAD_BL32_EXTRA2,
+};
+
+static const io_uuid_spec_t bl33_uuid_spec = {
+	.uuid = UUID_NON_TRUSTED_FIRMWARE_BL33,
+};
+
+#if TRUSTED_BOARD_BOOT
+static const io_uuid_spec_t tb_fw_cert_uuid_spec = {
+	.uuid = UUID_TRUSTED_BOOT_FW_CERT,
+};
+
+static const io_uuid_spec_t trusted_key_cert_uuid_spec = {
+	.uuid = UUID_TRUSTED_KEY_CERT,
+};
+
+static const io_uuid_spec_t scp_fw_key_cert_uuid_spec = {
+	.uuid = UUID_SCP_FW_KEY_CERT,
+};
+
+static const io_uuid_spec_t soc_fw_key_cert_uuid_spec = {
+	.uuid = UUID_SOC_FW_KEY_CERT,
+};
+
+static const io_uuid_spec_t tos_fw_key_cert_uuid_spec = {
+	.uuid = UUID_TRUSTED_OS_FW_KEY_CERT,
+};
+
+static const io_uuid_spec_t nt_fw_key_cert_uuid_spec = {
+	.uuid = UUID_NON_TRUSTED_FW_KEY_CERT,
+};
+
+static const io_uuid_spec_t scp_fw_cert_uuid_spec = {
+	.uuid = UUID_SCP_FW_CONTENT_CERT,
+};
+
+static const io_uuid_spec_t soc_fw_cert_uuid_spec = {
+	.uuid = UUID_SOC_FW_CONTENT_CERT,
+};
+
+static const io_uuid_spec_t tos_fw_cert_uuid_spec = {
+	.uuid = UUID_TRUSTED_OS_FW_CONTENT_CERT,
+};
+
+static const io_uuid_spec_t nt_fw_cert_uuid_spec = {
+	.uuid = UUID_NON_TRUSTED_FW_CONTENT_CERT,
+};
+#endif /* TRUSTED_BOARD_BOOT */
+
+
+static int open_fip(const uintptr_t spec);
+static int open_memmap(const uintptr_t spec);
+
+struct plat_io_policy {
+	uintptr_t *dev_handle;
+	uintptr_t image_spec;
+	int (*check)(const uintptr_t spec);
+};
+
+/* By default, ARM platforms load images from the FIP */
+static const struct plat_io_policy policies[] = {
+	[FIP_IMAGE_ID] = {
+		&memmap_dev_handle,
+		(uintptr_t)&fip_block_spec,
+		open_memmap
+	},
+	[BL2_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl2_uuid_spec,
+		open_fip
+	},
+	[SCP_BL2_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&scp_bl2_uuid_spec,
+		open_fip
+	},
+	[BL31_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl31_uuid_spec,
+		open_fip
+	},
+	[BL32_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl32_uuid_spec,
+		open_fip
+	},
+	[BL32_EXTRA1_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl32_extra1_uuid_spec,
+		open_fip
+	},
+	[BL32_EXTRA2_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl32_extra2_uuid_spec,
+		open_fip
+	},
+	[BL33_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl33_uuid_spec,
+		open_fip
+	},
+#if TRUSTED_BOARD_BOOT
+	[TRUSTED_BOOT_FW_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&tb_fw_cert_uuid_spec,
+		open_fip
+	},
+	[TRUSTED_KEY_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&trusted_key_cert_uuid_spec,
+		open_fip
+	},
+	[SCP_FW_KEY_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&scp_fw_key_cert_uuid_spec,
+		open_fip
+	},
+	[SOC_FW_KEY_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&soc_fw_key_cert_uuid_spec,
+		open_fip
+	},
+	[TRUSTED_OS_FW_KEY_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&tos_fw_key_cert_uuid_spec,
+		open_fip
+	},
+	[NON_TRUSTED_FW_KEY_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&nt_fw_key_cert_uuid_spec,
+		open_fip
+	},
+	[SCP_FW_CONTENT_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&scp_fw_cert_uuid_spec,
+		open_fip
+	},
+	[SOC_FW_CONTENT_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&soc_fw_cert_uuid_spec,
+		open_fip
+	},
+	[TRUSTED_OS_FW_CONTENT_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&tos_fw_cert_uuid_spec,
+		open_fip
+	},
+	[NON_TRUSTED_FW_CONTENT_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&nt_fw_cert_uuid_spec,
+		open_fip
+	},
+#endif /* TRUSTED_BOARD_BOOT */
+};
+
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak plat_arm_io_setup
+#pragma weak plat_arm_get_alt_image_source
+
+
+static int open_fip(const uintptr_t spec)
+{
+	int result;
+	uintptr_t local_image_handle;
+
+	/* See if a Firmware Image Package is available */
+	result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID);
+	if (result == 0) {
+		result = io_open(fip_dev_handle, spec, &local_image_handle);
+		if (result == 0) {
+			VERBOSE("Using FIP\n");
+			io_close(local_image_handle);
+		}
+	}
+	return result;
+}
+
+
+static int open_memmap(const uintptr_t spec)
+{
+	int result;
+	uintptr_t local_image_handle;
+
+	result = io_dev_init(memmap_dev_handle, (uintptr_t)NULL);
+	if (result == 0) {
+		result = io_open(memmap_dev_handle, spec, &local_image_handle);
+		if (result == 0) {
+			VERBOSE("Using Memmap\n");
+			io_close(local_image_handle);
+		}
+	}
+	return result;
+}
+
+
+void arm_io_setup(void)
+{
+	int io_result;
+
+	io_result = register_io_dev_fip(&fip_dev_con);
+	assert(io_result == 0);
+
+	io_result = register_io_dev_memmap(&memmap_dev_con);
+	assert(io_result == 0);
+
+	/* Open connections to devices and cache the handles */
+	io_result = io_dev_open(fip_dev_con, (uintptr_t)NULL,
+				&fip_dev_handle);
+	assert(io_result == 0);
+
+	io_result = io_dev_open(memmap_dev_con, (uintptr_t)NULL,
+				&memmap_dev_handle);
+	assert(io_result == 0);
+
+	/* Ignore improbable errors in release builds */
+	(void)io_result;
+}
+
+void plat_arm_io_setup(void)
+{
+	arm_io_setup();
+}
+
+int plat_arm_get_alt_image_source(
+	unsigned int image_id __unused,
+	uintptr_t *dev_handle __unused,
+	uintptr_t *image_spec __unused)
+{
+	/* By default do not try an alternative */
+	return -ENOENT;
+}
+
+/* Return an IO device handle and specification which can be used to access
+ * an image. Use this to enforce platform load policy */
+int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
+			  uintptr_t *image_spec)
+{
+	int result;
+	const struct plat_io_policy *policy;
+
+	assert(image_id < ARRAY_SIZE(policies));
+
+	policy = &policies[image_id];
+	result = policy->check(policy->image_spec);
+	if (result == 0) {
+		*image_spec = policy->image_spec;
+		*dev_handle = *(policy->dev_handle);
+	} else {
+		VERBOSE("Trying alternative IO\n");
+		result = plat_arm_get_alt_image_source(image_id, dev_handle,
+						       image_spec);
+	}
+
+	return result;
+}
+
+/*
+ * See if a Firmware Image Package is available,
+ * by checking if TOC is valid or not.
+ */
+int arm_io_is_toc_valid(void)
+{
+	int result;
+
+	result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID);
+
+	return (result == 0);
+}
+
diff --git a/plat/arm/common/arm_nor_psci_mem_protect.c b/plat/arm/common/arm_nor_psci_mem_protect.c
new file mode 100644
index 0000000..c5263fd
--- /dev/null
+++ b/plat/arm/common/arm_nor_psci_mem_protect.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <mmio.h>
+#include <norflash.h>
+#include <plat_arm.h>
+#include <platform_def.h>
+#include <psci.h>
+#include <utils.h>
+
+mem_region_t arm_ram_ranges[] = {
+	{ARM_NS_DRAM1_BASE, ARM_NS_DRAM1_SIZE},
+#ifdef AARCH64
+	{ARM_DRAM2_BASE, ARM_DRAM2_SIZE},
+#endif
+};
+
+/*******************************************************************************
+ * Function that reads the content of the memory protect variable that
+ * enables clearing of non secure memory when system boots. This variable
+ * should be stored in a secure NVRAM.
+ ******************************************************************************/
+int arm_psci_read_mem_protect(int *enabled)
+{
+	int tmp;
+
+	tmp = *(int *) PLAT_ARM_MEM_PROT_ADDR;
+	*enabled = (tmp == 1);
+	return 0;
+}
+
+/*******************************************************************************
+ * Function that writes the content of the memory protect variable that
+ * enables overwritten of non secure memory when system boots.
+ ******************************************************************************/
+int arm_nor_psci_write_mem_protect(int val)
+{
+	int enable = (val != 0);
+
+	if (nor_unlock(PLAT_ARM_MEM_PROT_ADDR) != 0) {
+		ERROR("unlocking memory protect variable\n");
+		return -1;
+	}
+
+	if (enable) {
+		/*
+		 * If we want to write a value different than 0
+		 * then we have to erase the full block because
+		 * otherwise we cannot ensure that the value programmed
+		 * into the flash is going to be the same than the value
+		 * requested by the caller
+		 */
+		if (nor_erase(PLAT_ARM_MEM_PROT_ADDR) != 0) {
+			ERROR("erasing block containing memory protect variable\n");
+			return -1;
+		}
+	}
+
+	if (nor_word_program(PLAT_ARM_MEM_PROT_ADDR, enable) != 0) {
+		ERROR("programming memory protection variable\n");
+		return -1;
+	}
+	return 0;
+}
+
+/*******************************************************************************
+ * Function used for required psci operations performed when
+ * system boots
+ ******************************************************************************/
+void arm_nor_psci_do_mem_protect(void)
+{
+	int enable;
+
+	arm_psci_read_mem_protect(&enable);
+	if (!enable)
+		return;
+	INFO("PSCI: Overwritting non secure memory\n");
+	clear_mem_regions(arm_ram_ranges, ARRAY_SIZE(arm_ram_ranges));
+	arm_nor_psci_write_mem_protect(0);
+}
+
+/*******************************************************************************
+ * Function that checks if a region is protected by the memory protect
+ * mechanism
+ ******************************************************************************/
+int arm_psci_mem_protect_chk(uintptr_t base, u_register_t length)
+{
+	return mem_region_in_array_chk(arm_ram_ranges,
+				       ARRAY_SIZE(arm_ram_ranges),
+				       base, length);
+}
diff --git a/plat/arm/common/arm_pm.c b/plat/arm/common/arm_pm.c
new file mode 100644
index 0000000..5e7e047
--- /dev/null
+++ b/plat/arm/common/arm_pm.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_def.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <console.h>
+#include <errno.h>
+#include <plat_arm.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <psci.h>
+
+/* Allow ARM Standard platforms to override this function */
+#pragma weak plat_arm_psci_override_pm_ops
+
+/* Standard ARM platforms are expected to export plat_arm_psci_pm_ops */
+extern plat_psci_ops_t plat_arm_psci_pm_ops;
+
+#if ARM_RECOM_STATE_ID_ENC
+extern unsigned int arm_pm_idle_states[];
+#endif /* __ARM_RECOM_STATE_ID_ENC__ */
+
+#if !ARM_RECOM_STATE_ID_ENC
+/*******************************************************************************
+ * ARM standard platform handler called to check the validity of the power state
+ * parameter.
+ ******************************************************************************/
+int arm_validate_power_state(unsigned int power_state,
+			    psci_power_state_t *req_state)
+{
+	int pstate = psci_get_pstate_type(power_state);
+	int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
+	int i;
+
+	assert(req_state);
+
+	if (pwr_lvl > PLAT_MAX_PWR_LVL)
+		return PSCI_E_INVALID_PARAMS;
+
+	/* Sanity check the requested state */
+	if (pstate == PSTATE_TYPE_STANDBY) {
+		/*
+		 * It's possible to enter standby only on power level 0
+		 * Ignore any other power level.
+		 */
+		if (pwr_lvl != ARM_PWR_LVL0)
+			return PSCI_E_INVALID_PARAMS;
+
+		req_state->pwr_domain_state[ARM_PWR_LVL0] =
+					ARM_LOCAL_STATE_RET;
+	} else {
+		for (i = ARM_PWR_LVL0; i <= pwr_lvl; i++)
+			req_state->pwr_domain_state[i] =
+					ARM_LOCAL_STATE_OFF;
+	}
+
+	/*
+	 * We expect the 'state id' to be zero.
+	 */
+	if (psci_get_pstate_id(power_state))
+		return PSCI_E_INVALID_PARAMS;
+
+	return PSCI_E_SUCCESS;
+}
+
+#else
+/*******************************************************************************
+ * ARM standard platform handler called to check the validity of the power
+ * state parameter. The power state parameter has to be a composite power
+ * state.
+ ******************************************************************************/
+int arm_validate_power_state(unsigned int power_state,
+				psci_power_state_t *req_state)
+{
+	unsigned int state_id;
+	int i;
+
+	assert(req_state);
+
+	/*
+	 *  Currently we are using a linear search for finding the matching
+	 *  entry in the idle power state array. This can be made a binary
+	 *  search if the number of entries justify the additional complexity.
+	 */
+	for (i = 0; !!arm_pm_idle_states[i]; i++) {
+		if (power_state == arm_pm_idle_states[i])
+			break;
+	}
+
+	/* Return error if entry not found in the idle state array */
+	if (!arm_pm_idle_states[i])
+		return PSCI_E_INVALID_PARAMS;
+
+	i = 0;
+	state_id = psci_get_pstate_id(power_state);
+
+	/* Parse the State ID and populate the state info parameter */
+	while (state_id) {
+		req_state->pwr_domain_state[i++] = state_id &
+						ARM_LOCAL_PSTATE_MASK;
+		state_id >>= ARM_LOCAL_PSTATE_WIDTH;
+	}
+
+	return PSCI_E_SUCCESS;
+}
+#endif /* __ARM_RECOM_STATE_ID_ENC__ */
+
+/*******************************************************************************
+ * ARM standard platform handler called to check the validity of the non secure
+ * entrypoint.
+ ******************************************************************************/
+int arm_validate_ns_entrypoint(uintptr_t entrypoint)
+{
+	/*
+	 * Check if the non secure entrypoint lies within the non
+	 * secure DRAM.
+	 */
+	if ((entrypoint >= ARM_NS_DRAM1_BASE) && (entrypoint <
+			(ARM_NS_DRAM1_BASE + ARM_NS_DRAM1_SIZE)))
+		return PSCI_E_SUCCESS;
+#ifndef AARCH32
+	if ((entrypoint >= ARM_DRAM2_BASE) && (entrypoint <
+			(ARM_DRAM2_BASE + ARM_DRAM2_SIZE)))
+		return PSCI_E_SUCCESS;
+#endif
+
+	return PSCI_E_INVALID_ADDRESS;
+}
+
+/******************************************************************************
+ * Default definition on ARM standard platforms to override the plat_psci_ops.
+ *****************************************************************************/
+const plat_psci_ops_t *plat_arm_psci_override_pm_ops(plat_psci_ops_t *ops)
+{
+	return ops;
+}
+
+/******************************************************************************
+ * Helper function to save the platform state before a system suspend. Save the
+ * state of the system components which are not in the Always ON power domain.
+ *****************************************************************************/
+void arm_system_pwr_domain_save(void)
+{
+	/* Assert system power domain is available on the platform */
+	assert(PLAT_MAX_PWR_LVL >= ARM_PWR_LVL2);
+
+	plat_arm_gic_save();
+
+	/*
+	 * All the other peripheral which are configured by ARM TF are
+	 * re-initialized on resume from system suspend. Hence we
+	 * don't save their state here.
+	 */
+}
+
+/******************************************************************************
+ * Helper function to resume the platform from system suspend. Reinitialize
+ * the system components which are not in the Always ON power domain.
+ * TODO: Unify the platform setup when waking up from cold boot and system
+ * resume in arm_bl31_platform_setup().
+ *****************************************************************************/
+void arm_system_pwr_domain_resume(void)
+{
+	console_init(PLAT_ARM_BL31_RUN_UART_BASE, PLAT_ARM_BL31_RUN_UART_CLK_IN_HZ,
+						ARM_CONSOLE_BAUDRATE);
+
+	/* Assert system power domain is available on the platform */
+	assert(PLAT_MAX_PWR_LVL >= ARM_PWR_LVL2);
+
+	plat_arm_gic_resume();
+
+	plat_arm_security_setup();
+	arm_configure_sys_timer();
+}
+
+/*******************************************************************************
+ * Private function to program the mailbox for a cpu before it is released
+ * from reset. This function assumes that the Trusted mail box base is within
+ * the ARM_SHARED_RAM region
+ ******************************************************************************/
+void arm_program_trusted_mailbox(uintptr_t address)
+{
+	uintptr_t *mailbox = (void *) PLAT_ARM_TRUSTED_MAILBOX_BASE;
+
+	*mailbox = address;
+
+	/*
+	 * Ensure that the PLAT_ARM_TRUSTED_MAILBOX_BASE is within
+	 * ARM_SHARED_RAM region.
+	 */
+	assert((PLAT_ARM_TRUSTED_MAILBOX_BASE >= ARM_SHARED_RAM_BASE) &&
+		((PLAT_ARM_TRUSTED_MAILBOX_BASE + sizeof(*mailbox)) <= \
+				(ARM_SHARED_RAM_BASE + ARM_SHARED_RAM_SIZE)));
+}
+
+/*******************************************************************************
+ * The ARM Standard platform definition of platform porting API
+ * `plat_setup_psci_ops`.
+ ******************************************************************************/
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+				const plat_psci_ops_t **psci_ops)
+{
+	*psci_ops = plat_arm_psci_override_pm_ops(&plat_arm_psci_pm_ops);
+
+	/* Setup mailbox with entry point. */
+	arm_program_trusted_mailbox(sec_entrypoint);
+	return 0;
+}
diff --git a/plat/arm/common/arm_sip_svc.c b/plat/arm/common/arm_sip_svc.c
new file mode 100644
index 0000000..7fe6101
--- /dev/null
+++ b/plat/arm/common/arm_sip_svc.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_sip_svc.h>
+#include <debug.h>
+#include <plat_arm.h>
+#include <pmf.h>
+#include <runtime_svc.h>
+#include <stdint.h>
+#include <uuid.h>
+
+
+/* ARM SiP Service UUID */
+DEFINE_SVC_UUID(arm_sip_svc_uid,
+		0xe2756d55, 0x3360, 0x4bb5, 0xbf, 0xf3,
+		0x62, 0x79, 0xfd, 0x11, 0x37, 0xff);
+
+static int arm_sip_setup(void)
+{
+	if (pmf_setup() != 0)
+		return 1;
+	return 0;
+}
+
+/*
+ * This function handles ARM defined SiP Calls
+ */
+static uintptr_t arm_sip_handler(unsigned int smc_fid,
+			u_register_t x1,
+			u_register_t x2,
+			u_register_t x3,
+			u_register_t x4,
+			void *cookie,
+			void *handle,
+			u_register_t flags)
+{
+	int call_count = 0;
+
+	/*
+	 * Dispatch PMF calls to PMF SMC handler and return its return
+	 * value
+	 */
+	if (is_pmf_fid(smc_fid)) {
+		return pmf_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
+				handle, flags);
+	}
+
+	switch (smc_fid) {
+	case ARM_SIP_SVC_EXE_STATE_SWITCH: {
+		u_register_t pc;
+
+		/* Allow calls from non-secure only */
+		if (!is_caller_non_secure(flags))
+			SMC_RET1(handle, STATE_SW_E_DENIED);
+
+		/* Validate supplied entry point */
+		pc = (u_register_t) ((x1 << 32) | (uint32_t) x2);
+		if (arm_validate_ns_entrypoint(pc))
+			SMC_RET1(handle, STATE_SW_E_PARAM);
+
+		/*
+		 * Pointers used in execution state switch are all 32 bits wide
+		 */
+		return arm_execution_state_switch(smc_fid, (uint32_t) x1,
+				(uint32_t) x2, (uint32_t) x3, (uint32_t) x4,
+				handle);
+		}
+
+	case ARM_SIP_SVC_CALL_COUNT:
+		/* PMF calls */
+		call_count += PMF_NUM_SMC_CALLS;
+
+		/* State switch call */
+		call_count += 1;
+
+		SMC_RET1(handle, call_count);
+
+	case ARM_SIP_SVC_UID:
+		/* Return UID to the caller */
+		SMC_UUID_RET(handle, arm_sip_svc_uid);
+
+	case ARM_SIP_SVC_VERSION:
+		/* Return the version of current implementation */
+		SMC_RET2(handle, ARM_SIP_SVC_VERSION_MAJOR, ARM_SIP_SVC_VERSION_MINOR);
+
+	default:
+		WARN("Unimplemented ARM SiP Service Call: 0x%x \n", smc_fid);
+		SMC_RET1(handle, SMC_UNK);
+	}
+
+}
+
+
+/* Define a runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+	arm_sip_svc,
+	OEN_SIP_START,
+	OEN_SIP_END,
+	SMC_TYPE_FAST,
+	arm_sip_setup,
+	arm_sip_handler
+);
diff --git a/plat/arm/common/arm_topology.c b/plat/arm/common/arm_topology.c
new file mode 100644
index 0000000..c6d12da
--- /dev/null
+++ b/plat/arm/common/arm_topology.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <plat_arm.h>
+#include <platform_def.h>
+
+/*******************************************************************************
+ * This function validates an MPIDR by checking whether it falls within the
+ * acceptable bounds. An error code (-1) is returned if an incorrect mpidr
+ * is passed.
+ ******************************************************************************/
+int arm_check_mpidr(u_register_t mpidr)
+{
+	unsigned int cluster_id, cpu_id;
+	uint64_t valid_mask;
+
+#if ARM_PLAT_MT
+	unsigned int pe_id;
+
+	valid_mask = ~(MPIDR_AFFLVL_MASK |
+			(MPIDR_AFFLVL_MASK << MPIDR_AFF1_SHIFT) |
+			(MPIDR_AFFLVL_MASK << MPIDR_AFF2_SHIFT));
+	cluster_id = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK;
+	cpu_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+	pe_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
+#else
+	valid_mask = ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK);
+	cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+	cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
+#endif /* ARM_PLAT_MT */
+
+	mpidr &= MPIDR_AFFINITY_MASK;
+	if (mpidr & valid_mask)
+		return -1;
+
+	if (cluster_id >= PLAT_ARM_CLUSTER_COUNT)
+		return -1;
+
+	/* Validate cpu_id by checking whether it represents a CPU in
+	   one of the two clusters present on the platform. */
+	if (cpu_id >= plat_arm_get_cluster_core_count(mpidr))
+		return -1;
+
+#if ARM_PLAT_MT
+	if (pe_id >= plat_arm_get_cpu_pe_count(mpidr))
+		return -1;
+#endif /* ARM_PLAT_MT */
+
+	return 0;
+}
diff --git a/plat/arm/common/arm_tzc400.c b/plat/arm/common/arm_tzc400.c
new file mode 100644
index 0000000..e19ca67
--- /dev/null
+++ b/plat/arm/common/arm_tzc400.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_def.h>
+#include <debug.h>
+#include <platform_def.h>
+#include <tzc400.h>
+
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak plat_arm_security_setup
+
+
+/*******************************************************************************
+ * Initialize the TrustZone Controller for ARM standard platforms.
+ * Configure:
+ *   - Region 0 with no access;
+ *   - Region 1 with secure access only;
+ *   - the remaining DRAM regions access from the given Non-Secure masters.
+ *
+ * When booting an EL3 payload, this is simplified: we configure region 0 with
+ * secure access only and do not enable any other region.
+ ******************************************************************************/
+void arm_tzc400_setup(void)
+{
+	INFO("Configuring TrustZone Controller\n");
+
+	tzc400_init(PLAT_ARM_TZC_BASE);
+
+	/* Disable filters. */
+	tzc400_disable_filters();
+
+#ifndef EL3_PAYLOAD_BASE
+
+	/* Region 0 set to no access by default */
+	tzc400_configure_region0(TZC_REGION_S_NONE, 0);
+
+	/* Region 1 set to cover Secure part of DRAM */
+	tzc400_configure_region(PLAT_ARM_TZC_FILTERS, 1,
+			ARM_AP_TZC_DRAM1_BASE, ARM_EL3_TZC_DRAM1_END,
+			TZC_REGION_S_RDWR,
+			0);
+
+	/* Region 2 set to cover Non-Secure access to 1st DRAM address range.
+	 * Apply the same configuration to given filters in the TZC. */
+	tzc400_configure_region(PLAT_ARM_TZC_FILTERS, 2,
+			ARM_NS_DRAM1_BASE, ARM_NS_DRAM1_END,
+			ARM_TZC_NS_DRAM_S_ACCESS,
+			PLAT_ARM_TZC_NS_DEV_ACCESS);
+
+	/* Region 3 set to cover Non-Secure access to 2nd DRAM address range */
+	tzc400_configure_region(PLAT_ARM_TZC_FILTERS, 3,
+			ARM_DRAM2_BASE, ARM_DRAM2_END,
+			ARM_TZC_NS_DRAM_S_ACCESS,
+			PLAT_ARM_TZC_NS_DEV_ACCESS);
+#else
+	/* Allow secure access only to DRAM for EL3 payloads. */
+	tzc400_configure_region0(TZC_REGION_S_RDWR, 0);
+#endif /* EL3_PAYLOAD_BASE */
+
+	/*
+	 * Raise an exception if a NS device tries to access secure memory
+	 * TODO: Add interrupt handling support.
+	 */
+	tzc400_set_action(TZC_ACTION_ERR);
+
+	/* Enable filters. */
+	tzc400_enable_filters();
+}
+
+void plat_arm_security_setup(void)
+{
+	arm_tzc400_setup();
+}
diff --git a/plat/arm/common/arm_tzc_dmc500.c b/plat/arm/common/arm_tzc_dmc500.c
new file mode 100644
index 0000000..8e41391
--- /dev/null
+++ b/plat/arm/common/arm_tzc_dmc500.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_def.h>
+#include <assert.h>
+#include <debug.h>
+#include <platform_def.h>
+#include <tzc_dmc500.h>
+
+/*******************************************************************************
+ * Initialize the DMC500-TrustZone Controller for ARM standard platforms.
+ * Configure both the interfaces on Region 0 with no access, Region 1 with
+ * secure access only, and the remaining DRAM regions access from the
+ * given Non-Secure masters.
+ *
+ * When booting an EL3 payload, this is simplified: we configure region 0 with
+ * secure access only and do not enable any other region.
+ ******************************************************************************/
+void arm_tzc_dmc500_setup(tzc_dmc500_driver_data_t *plat_driver_data)
+{
+	assert(plat_driver_data);
+
+	INFO("Configuring DMC-500 TZ Settings\n");
+
+	tzc_dmc500_driver_init(plat_driver_data);
+
+#ifndef EL3_PAYLOAD_BASE
+	/* Region 0 set to no access by default */
+	tzc_dmc500_configure_region0(TZC_REGION_S_NONE, 0);
+
+	/* Region 1 set to cover Secure part of DRAM */
+	tzc_dmc500_configure_region(1, ARM_AP_TZC_DRAM1_BASE,
+		ARM_EL3_TZC_DRAM1_END,
+		TZC_REGION_S_RDWR,
+		0);
+
+	/* Region 2 set to cover Non-Secure access to 1st DRAM address range.*/
+	tzc_dmc500_configure_region(2,
+		ARM_NS_DRAM1_BASE,
+		ARM_NS_DRAM1_END,
+		ARM_TZC_NS_DRAM_S_ACCESS,
+		PLAT_ARM_TZC_NS_DEV_ACCESS);
+
+	/* Region 3 set to cover Non-Secure access to 2nd DRAM address range */
+	tzc_dmc500_configure_region(3,
+		ARM_DRAM2_BASE,
+		ARM_DRAM2_END,
+		ARM_TZC_NS_DRAM_S_ACCESS,
+		PLAT_ARM_TZC_NS_DEV_ACCESS);
+#else
+	/* Allow secure access only to DRAM for EL3 payloads */
+	tzc_dmc500_configure_region0(TZC_REGION_S_RDWR, 0);
+#endif
+	/*
+	 * Raise an exception if a NS device tries to access secure memory
+	 * TODO: Add interrupt handling support.
+	 */
+	tzc_dmc500_set_action(TZC_ACTION_RV_LOWERR);
+
+	/*
+	 * Flush the configuration settings to have an affect. Validate
+	 * flush by checking FILTER_EN is set on region 1 attributes
+	 * register.
+	 */
+	tzc_dmc500_config_complete();
+
+	/*
+	 * Wait for the flush to complete.
+	 * TODO: Have a timeout for this loop
+	 */
+	while (tzc_dmc500_verify_complete())
+		;
+}
diff --git a/plat/arm/common/execution_state_switch.c b/plat/arm/common/execution_state_switch.c
new file mode 100644
index 0000000..8499db0
--- /dev/null
+++ b/plat/arm/common/execution_state_switch.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_sip_svc.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <plat_arm.h>
+#include <psci.h>
+#include <smcc_helpers.h>
+#include <string.h>
+#include <utils.h>
+
+/*
+ * Handle SMC from a lower exception level to switch its execution state
+ * (either from AArch64 to AArch32, or vice versa).
+ *
+ * smc_fid:
+ *	SMC function ID - either ARM_SIP_SVC_STATE_SWITCH_64 or
+ *	ARM_SIP_SVC_STATE_SWITCH_32.
+ * pc_hi, pc_lo:
+ *	PC upon re-entry to the calling exception level; width dependent on the
+ *	calling exception level.
+ * cookie_hi, cookie_lo:
+ *	Opaque pointer pairs received from the caller to pass it back, upon
+ *	re-entry.
+ * handle:
+ *	Handle to saved context.
+ */
+int arm_execution_state_switch(unsigned int smc_fid,
+		uint32_t pc_hi,
+		uint32_t pc_lo,
+		uint32_t cookie_hi,
+		uint32_t cookie_lo,
+		void *handle)
+{
+	/* Execution state can be switched only if EL3 is AArch64 */
+#ifdef AARCH64
+	int caller_64, from_el2, el, endianness, thumb = 0;
+	u_register_t spsr, pc, scr, sctlr;
+	entry_point_info_t ep;
+	cpu_context_t *ctx = (cpu_context_t *) handle;
+	el3_state_t *el3_ctx = get_el3state_ctx(ctx);
+
+	/* That the SMC originated from NS is already validated by the caller */
+
+	/*
+	 * Disallow state switch if any of the secondaries have been brought up.
+	 */
+	if (psci_secondaries_brought_up())
+		goto exec_denied;
+
+	spsr = read_ctx_reg(el3_ctx, CTX_SPSR_EL3);
+	caller_64 = (GET_RW(spsr) == MODE_RW_64);
+
+	if (caller_64) {
+		/*
+		 * If the call originated from AArch64, expect 32-bit pointers when
+		 * switching to AArch32.
+		 */
+		if ((pc_hi != 0) || (cookie_hi != 0))
+			goto invalid_param;
+
+		pc = pc_lo;
+
+		/* Instruction state when entering AArch32 */
+		thumb = pc & 1;
+	} else {
+		/* Construct AArch64 PC */
+		pc = (((u_register_t) pc_hi) << 32) | pc_lo;
+	}
+
+	/* Make sure PC is 4-byte aligned, except for Thumb */
+	if ((pc & 0x3) && !thumb)
+		goto invalid_param;
+
+	/*
+	 * EL3 controls register width of the immediate lower EL only. Expect
+	 * this request from EL2/Hyp unless:
+	 *
+	 * - EL2 is not implemented;
+	 * - EL2 is implemented, but was disabled. This can be inferred from
+	 *   SCR_EL3.HCE.
+	 */
+	from_el2 = caller_64 ? (GET_EL(spsr) == MODE_EL2) :
+		(GET_M32(spsr) == MODE32_hyp);
+	scr = read_ctx_reg(el3_ctx, CTX_SCR_EL3);
+	if (!from_el2) {
+		/* The call is from NS privilege level other than HYP */
+
+		/*
+		 * Disallow switching state if there's a Hypervisor in place;
+		 * this request must be taken up with the Hypervisor instead.
+		 */
+		if (scr & SCR_HCE_BIT)
+			goto exec_denied;
+	}
+
+	/*
+	 * Return to the caller using the same endianness. Extract
+	 * endianness bit from the respective system control register
+	 * directly.
+	 */
+	sctlr = from_el2 ? read_sctlr_el2() : read_sctlr_el1();
+	endianness = !!(sctlr & SCTLR_EE_BIT);
+
+	/* Construct SPSR for the exception state we're about to switch to */
+	if (caller_64) {
+		int impl;
+
+		/*
+		 * Switching from AArch64 to AArch32. Ensure this CPU implements
+		 * the target EL in AArch32.
+		 */
+		impl = from_el2 ? EL_IMPLEMENTED(2) : EL_IMPLEMENTED(1);
+		if (impl != EL_IMPL_A64_A32)
+			goto exec_denied;
+
+		/* Return to the equivalent AArch32 privilege level */
+		el = from_el2 ? MODE32_hyp : MODE32_svc;
+		spsr = SPSR_MODE32(el, thumb ? SPSR_T_THUMB : SPSR_T_ARM,
+				endianness, DISABLE_ALL_EXCEPTIONS);
+	} else {
+		/*
+		 * Switching from AArch32 to AArch64. Since it's not possible to
+		 * implement an EL as AArch32-only (from which this call was
+		 * raised), it's safe to assume AArch64 is also implemented.
+		 */
+		el = from_el2 ? MODE_EL2 : MODE_EL1;
+		spsr = SPSR_64(el, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+	}
+
+	/*
+	 * Use the context management library to re-initialize the existing
+	 * context with the execution state flipped. Since the library takes
+	 * entry_point_info_t pointer as the argument, construct a dummy one
+	 * with PC, state width, endianness, security etc. appropriately set.
+	 * Other entries in the entry point structure are irrelevant for
+	 * purpose.
+	 */
+	zeromem(&ep, sizeof(ep));
+	ep.pc = pc;
+	ep.spsr = spsr;
+	SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1,
+			((endianness ? EP_EE_BIG : EP_EE_LITTLE) | NON_SECURE |
+			 EP_ST_DISABLE));
+
+	/*
+	 * Re-initialize the system register context, and exit EL3 as if for the
+	 * first time. State switch is effectively a soft reset of the
+	 * calling EL.
+	 */
+	cm_init_my_context(&ep);
+	cm_prepare_el3_exit(NON_SECURE);
+
+	/*
+	 * State switch success. The caller of SMC wouldn't see the SMC
+	 * returning. Instead, execution starts at the supplied entry point,
+	 * with context pointers populated in registers 0 and 1.
+	 */
+	SMC_RET2(handle, cookie_hi, cookie_lo);
+
+invalid_param:
+	SMC_RET1(handle, STATE_SW_E_PARAM);
+
+exec_denied:
+#endif
+	/* State switch denied */
+	SMC_RET1(handle, STATE_SW_E_DENIED);
+}
diff --git a/plat/arm/common/sp_min/arm_sp_min.mk b/plat/arm/common/sp_min/arm_sp_min.mk
new file mode 100644
index 0000000..e6792ca
--- /dev/null
+++ b/plat/arm/common/sp_min/arm_sp_min.mk
@@ -0,0 +1,13 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# SP MIN source files common to ARM standard platforms
+BL32_SOURCES		+=	plat/arm/common/arm_pm.c			\
+				plat/arm/common/arm_topology.c			\
+				plat/arm/common/sp_min/arm_sp_min_setup.c	\
+				plat/common/aarch32/platform_mp_stack.S		\
+				plat/common/plat_psci_common.c
+
diff --git a/plat/arm/common/sp_min/arm_sp_min_setup.c b/plat/arm/common/sp_min/arm_sp_min_setup.c
new file mode 100644
index 0000000..c5408c8
--- /dev/null
+++ b/plat/arm/common/sp_min/arm_sp_min_setup.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <console.h>
+#include <debug.h>
+#include <mmio.h>
+#include <plat_arm.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <platform_sp_min.h>
+
+#define BL32_END (uintptr_t)(&__BL32_END__)
+
+static entry_point_info_t bl33_image_ep_info;
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak sp_min_early_platform_setup
+#pragma weak sp_min_platform_setup
+#pragma weak sp_min_plat_arch_setup
+
+
+/*******************************************************************************
+ * Return a pointer to the 'entry_point_info' structure of the next image for the
+ * security state specified. BL33 corresponds to the non-secure image type
+ * while BL32 corresponds to the secure image type. A NULL pointer is returned
+ * if the image does not exist.
+ ******************************************************************************/
+entry_point_info_t *sp_min_plat_get_bl33_ep_info(void)
+{
+	entry_point_info_t *next_image_info;
+
+	next_image_info = &bl33_image_ep_info;
+
+	/*
+	 * None of the images on the ARM development platforms can have 0x0
+	 * as the entrypoint
+	 */
+	if (next_image_info->pc)
+		return next_image_info;
+	else
+		return NULL;
+}
+
+/*******************************************************************************
+ * Perform early platform setup.
+ ******************************************************************************/
+void arm_sp_min_early_platform_setup(void *from_bl2,
+		void *plat_params_from_bl2)
+{
+	/* Initialize the console to provide early debug support */
+	console_init(PLAT_ARM_BOOT_UART_BASE, PLAT_ARM_BOOT_UART_CLK_IN_HZ,
+				ARM_CONSOLE_BAUDRATE);
+
+#if RESET_TO_SP_MIN
+	/* There are no parameters from BL2 if SP_MIN is a reset vector */
+	assert(from_bl2 == NULL);
+	assert(plat_params_from_bl2 == NULL);
+
+	/* Populate entry point information for BL33 */
+	SET_PARAM_HEAD(&bl33_image_ep_info,
+				PARAM_EP,
+				VERSION_1,
+				0);
+	/*
+	 * Tell SP_MIN where the non-trusted software image
+	 * is located and the entry state information
+	 */
+	bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
+	bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry();
+	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+
+#else /* RESET_TO_SP_MIN */
+
+	/*
+	 * Check params passed from BL2 should not be NULL,
+	 */
+	bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
+	assert(params_from_bl2 != NULL);
+	assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
+	assert(params_from_bl2->h.version >= VERSION_2);
+
+	bl_params_node_t *bl_params = params_from_bl2->head;
+
+	/*
+	 * Copy BL33 entry point information.
+	 * They are stored in Secure RAM, in BL2's address space.
+	 */
+	while (bl_params) {
+		if (bl_params->image_id == BL33_IMAGE_ID) {
+			bl33_image_ep_info = *bl_params->ep_info;
+			break;
+		}
+
+		bl_params = bl_params->next_params_info;
+	}
+
+	if (bl33_image_ep_info.pc == 0)
+		panic();
+
+#endif /* RESET_TO_SP_MIN */
+
+}
+
+void sp_min_early_platform_setup(void *from_bl2,
+		void *plat_params_from_bl2)
+{
+	arm_sp_min_early_platform_setup(from_bl2, plat_params_from_bl2);
+
+	/*
+	 * Initialize Interconnect for this cluster during cold boot.
+	 * No need for locks as no other CPU is active.
+	 */
+	plat_arm_interconnect_init();
+
+	/*
+	 * Enable Interconnect coherency for the primary CPU's cluster.
+	 * Earlier bootloader stages might already do this (e.g. Trusted
+	 * Firmware's BL1 does it) but we can't assume so. There is no harm in
+	 * executing this code twice anyway.
+	 * Platform specific PSCI code will enable coherency for other
+	 * clusters.
+	 */
+	plat_arm_interconnect_enter_coherency();
+}
+
+/*******************************************************************************
+ * Perform any SP_MIN platform runtime setup prior to SP_MIN exit.
+ * Common to ARM standard platforms.
+ ******************************************************************************/
+void arm_sp_min_plat_runtime_setup(void)
+{
+	/* Initialize the runtime console */
+	console_init(PLAT_ARM_SP_MIN_RUN_UART_BASE,
+		PLAT_ARM_SP_MIN_RUN_UART_CLK_IN_HZ, ARM_CONSOLE_BAUDRATE);
+}
+
+/*******************************************************************************
+ * Perform platform specific setup for SP_MIN
+ ******************************************************************************/
+void sp_min_platform_setup(void)
+{
+	/* Initialize the GIC driver, cpu and distributor interfaces */
+	plat_arm_gic_driver_init();
+	plat_arm_gic_init();
+
+	/*
+	 * Do initial security configuration to allow DRAM/device access
+	 * (if earlier BL has not already done so).
+	 */
+#if RESET_TO_SP_MIN
+	plat_arm_security_setup();
+#endif
+
+	/* Enable and initialize the System level generic timer */
+	mmio_write_32(ARM_SYS_CNTCTL_BASE + CNTCR_OFF,
+			CNTCR_FCREQ(0) | CNTCR_EN);
+
+	/* Allow access to the System counter timer module */
+	arm_configure_sys_timer();
+
+	/* Initialize power controller before setting up topology */
+	plat_arm_pwrc_setup();
+}
+
+void sp_min_plat_runtime_setup(void)
+{
+	arm_sp_min_plat_runtime_setup();
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this only initializes the MMU
+ ******************************************************************************/
+void sp_min_plat_arch_setup(void)
+{
+
+	arm_setup_page_tables(BL32_BASE,
+			      (BL32_END - BL32_BASE),
+			      BL_CODE_BASE,
+			      BL_CODE_END,
+			      BL_RO_DATA_BASE,
+			      BL_RO_DATA_END
+#if USE_COHERENT_MEM
+			      , BL_COHERENT_RAM_BASE,
+			      BL_COHERENT_RAM_END
+#endif
+			      );
+
+	enable_mmu_secure(0);
+}
diff --git a/plat/arm/common/tsp/arm_tsp.mk b/plat/arm/common/tsp/arm_tsp.mk
new file mode 100644
index 0000000..4ad77c6
--- /dev/null
+++ b/plat/arm/common/tsp/arm_tsp.mk
@@ -0,0 +1,10 @@
+#
+# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# TSP source files common to ARM standard platforms
+BL32_SOURCES		+=	plat/arm/common/arm_topology.c			\
+				plat/arm/common/tsp/arm_tsp_setup.c		\
+				plat/common/aarch64/platform_mp_stack.S
diff --git a/plat/arm/common/tsp/arm_tsp_setup.c b/plat/arm/common/tsp/arm_tsp_setup.c
new file mode 100644
index 0000000..abeaea0
--- /dev/null
+++ b/plat/arm/common/tsp/arm_tsp_setup.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_def.h>
+#include <bl_common.h>
+#include <console.h>
+#include <plat_arm.h>
+#include <platform_def.h>
+#include <platform_tsp.h>
+
+#define BL32_END (unsigned long)(&__BL32_END__)
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak tsp_early_platform_setup
+#pragma weak tsp_platform_setup
+#pragma weak tsp_plat_arch_setup
+
+
+/*******************************************************************************
+ * Initialize the UART
+ ******************************************************************************/
+void arm_tsp_early_platform_setup(void)
+{
+	/*
+	 * Initialize a different console than already in use to display
+	 * messages from TSP
+	 */
+	console_init(PLAT_ARM_TSP_UART_BASE, PLAT_ARM_TSP_UART_CLK_IN_HZ,
+			ARM_CONSOLE_BAUDRATE);
+}
+
+void tsp_early_platform_setup(void)
+{
+	arm_tsp_early_platform_setup();
+}
+
+/*******************************************************************************
+ * Perform platform specific setup placeholder
+ ******************************************************************************/
+void tsp_platform_setup(void)
+{
+	plat_arm_gic_driver_init();
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only intializes the MMU
+ ******************************************************************************/
+void tsp_plat_arch_setup(void)
+{
+	arm_setup_page_tables(BL32_BASE,
+			      (BL32_END - BL32_BASE),
+			      BL_CODE_BASE,
+			      BL_CODE_END,
+			      BL_RO_DATA_BASE,
+			      BL_RO_DATA_END
+#if USE_COHERENT_MEM
+			      , BL_COHERENT_RAM_BASE,
+			      BL_COHERENT_RAM_END
+#endif
+			      );
+	enable_mmu_el1(0);
+}
diff --git a/plat/arm/css/common/aarch32/css_helpers.S b/plat/arm/css/common/aarch32/css_helpers.S
new file mode 100644
index 0000000..80aa24c
--- /dev/null
+++ b/plat/arm/css/common/aarch32/css_helpers.S
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <cpu_macros.S>
+#include <css_def.h>
+
+	.weak	plat_secondary_cold_boot_setup
+	.weak	plat_get_my_entrypoint
+	.globl	css_calc_core_pos_swap_cluster
+	.weak	plat_is_my_cpu_primary
+
+	/* ---------------------------------------------------------------------
+	 * void plat_secondary_cold_boot_setup(void);
+	 * In the normal boot flow, cold-booting secondary
+	 * CPUs is not yet implemented and they panic.
+	 * ---------------------------------------------------------------------
+	 */
+func plat_secondary_cold_boot_setup
+	/* TODO: Implement secondary CPU cold boot setup on CSS platforms */
+cb_panic:
+	b	cb_panic
+endfunc plat_secondary_cold_boot_setup
+
+	/* ---------------------------------------------------------------------
+	 * uintptr_t plat_get_my_entrypoint (void);
+	 *
+	 * Main job of this routine is to distinguish between a cold and a warm
+	 * boot. On CSS platforms, this distinction is based on the contents of
+	 * the Trusted Mailbox. It is initialised to zero by the SCP before the
+	 * AP cores are released from reset. Therefore, a zero mailbox means
+	 * it's a cold reset.
+	 *
+	 * This functions returns the contents of the mailbox, i.e.:
+	 *  - 0 for a cold boot;
+	 *  - the warm boot entrypoint for a warm boot.
+	 * ---------------------------------------------------------------------
+	 */
+func plat_get_my_entrypoint
+	ldr	r0, =PLAT_ARM_TRUSTED_MAILBOX_BASE
+	ldr	r0, [r0]
+	bx	lr
+endfunc plat_get_my_entrypoint
+
+	/* -----------------------------------------------------------
+	 * unsigned int css_calc_core_pos_swap_cluster(u_register_t mpidr)
+	 * Utility function to calculate the core position by
+	 * swapping the cluster order. This is necessary in order to
+	 * match the format of the boot information passed by the SCP
+	 * and read in plat_is_my_cpu_primary below.
+	 * -----------------------------------------------------------
+	 */
+func css_calc_core_pos_swap_cluster
+	and	r1, r0, #MPIDR_CPU_MASK
+	and	r0, r0, #MPIDR_CLUSTER_MASK
+	eor	r0, r0, #(1 << MPIDR_AFFINITY_BITS)  // swap cluster order
+	add	r0, r1, r0, LSR #6
+	bx	lr
+endfunc css_calc_core_pos_swap_cluster
+
+	/* -----------------------------------------------------
+	 * unsigned int plat_is_my_cpu_primary (void);
+	 *
+	 * Find out whether the current cpu is the primary
+	 * cpu (applicable ony after a cold boot)
+	 * -----------------------------------------------------
+	 */
+#if CSS_USE_SCMI_SDS_DRIVER
+func plat_is_my_cpu_primary
+	mov	r10, lr
+	bl	plat_my_core_pos
+	mov	r4, r0
+	bl	sds_get_primary_cpu_id
+	/* Check for error */
+	mov	r1, #0xffffffff
+	cmp	r0, r1
+	beq	1f
+	cmp	r0, r4
+	moveq	r0, #1
+	movne	r0, #0
+	bx	r10
+1:
+	no_ret	plat_panic_handler
+endfunc plat_is_my_cpu_primary
+#else
+func plat_is_my_cpu_primary
+	mov	r10, lr
+	bl	plat_my_core_pos
+	ldr	r1, =SCP_BOOT_CFG_ADDR
+	ldr	r1, [r1]
+	ubfx	r1, r1, #PLAT_CSS_PRIMARY_CPU_SHIFT, \
+			#PLAT_CSS_PRIMARY_CPU_BIT_WIDTH
+	cmp	r0, r1
+	moveq	r0, #1
+	movne	r0, #0
+	bx	r10
+endfunc plat_is_my_cpu_primary
+#endif
diff --git a/plat/arm/css/common/aarch64/css_helpers.S b/plat/arm/css/common/aarch64/css_helpers.S
new file mode 100644
index 0000000..59d9206
--- /dev/null
+++ b/plat/arm/css/common/aarch64/css_helpers.S
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <cpu_macros.S>
+#include <css_def.h>
+
+	.weak	plat_secondary_cold_boot_setup
+	.weak	plat_get_my_entrypoint
+	.globl	css_calc_core_pos_swap_cluster
+	.weak	plat_is_my_cpu_primary
+
+	/* ---------------------------------------------------------------------
+	 * void plat_secondary_cold_boot_setup(void);
+	 *
+	 * In the normal boot flow, cold-booting secondary CPUs is not yet
+	 * implemented and they panic.
+	 *
+	 * When booting an EL3 payload, secondary CPUs are placed in a holding
+	 * pen, waiting for their mailbox to be populated. Note that all CPUs
+	 * share the same mailbox ; therefore, populating it will release all
+	 * CPUs from their holding pen. If finer-grained control is needed then
+	 * this should be handled in the code that secondary CPUs jump to.
+	 * ---------------------------------------------------------------------
+	 */
+func plat_secondary_cold_boot_setup
+#ifndef EL3_PAYLOAD_BASE
+	/* TODO: Implement secondary CPU cold boot setup on CSS platforms */
+cb_panic:
+	b	cb_panic
+#else
+	mov_imm	x0, PLAT_ARM_TRUSTED_MAILBOX_BASE
+
+	/* Wait until the mailbox gets populated */
+poll_mailbox:
+	ldr	x1, [x0]
+	cbz	x1, 1f
+	br	x1
+1:
+	wfe
+	b	poll_mailbox
+#endif /* EL3_PAYLOAD_BASE */
+endfunc plat_secondary_cold_boot_setup
+
+	/* ---------------------------------------------------------------------
+	 * uintptr_t plat_get_my_entrypoint (void);
+	 *
+	 * Main job of this routine is to distinguish between a cold and a warm
+	 * boot. On CSS platforms, this distinction is based on the contents of
+	 * the Trusted Mailbox. It is initialised to zero by the SCP before the
+	 * AP cores are released from reset. Therefore, a zero mailbox means
+	 * it's a cold reset.
+	 *
+	 * This functions returns the contents of the mailbox, i.e.:
+	 *  - 0 for a cold boot;
+	 *  - the warm boot entrypoint for a warm boot.
+	 * ---------------------------------------------------------------------
+	 */
+func plat_get_my_entrypoint
+	mov_imm	x0, PLAT_ARM_TRUSTED_MAILBOX_BASE
+	ldr	x0, [x0]
+	ret
+endfunc plat_get_my_entrypoint
+
+	/* -----------------------------------------------------------
+	 * unsigned int css_calc_core_pos_swap_cluster(u_register_t mpidr)
+	 * Utility function to calculate the core position by
+	 * swapping the cluster order. This is necessary in order to
+	 * match the format of the boot information passed by the SCP
+	 * and read in plat_is_my_cpu_primary below.
+	 * -----------------------------------------------------------
+	 */
+func css_calc_core_pos_swap_cluster
+	and	x1, x0, #MPIDR_CPU_MASK
+	and	x0, x0, #MPIDR_CLUSTER_MASK
+	eor	x0, x0, #(1 << MPIDR_AFFINITY_BITS)  // swap cluster order
+	add	x0, x1, x0, LSR #6
+	ret
+endfunc css_calc_core_pos_swap_cluster
+
+	/* -----------------------------------------------------
+	 * unsigned int plat_is_my_cpu_primary (void);
+	 *
+	 * Find out whether the current cpu is the primary
+	 * cpu (applicable ony after a cold boot)
+	 * -----------------------------------------------------
+	 */
+#if CSS_USE_SCMI_SDS_DRIVER
+func plat_is_my_cpu_primary
+	mov	x9, x30
+	bl	plat_my_core_pos
+	mov	x4, x0
+	bl	sds_get_primary_cpu_id
+	/* Check for error */
+	mov	x1, #0xffffffff
+	cmp	x0, x1
+	b.eq	1f
+	cmp	x0, x4
+	cset	w0, eq
+	ret	x9
+1:
+	no_ret	plat_panic_handler
+endfunc plat_is_my_cpu_primary
+#else
+func plat_is_my_cpu_primary
+	mov	x9, x30
+	bl	plat_my_core_pos
+	ldr	x1, =SCP_BOOT_CFG_ADDR
+	ldr	x1, [x1]
+	ubfx	x1, x1, #PLAT_CSS_PRIMARY_CPU_SHIFT, \
+			#PLAT_CSS_PRIMARY_CPU_BIT_WIDTH
+	cmp	x0, x1
+	cset	w0, eq
+	ret	x9
+endfunc plat_is_my_cpu_primary
+#endif
diff --git a/plat/arm/css/common/css_bl1_setup.c b/plat/arm/css/common/css_bl1_setup.c
new file mode 100644
index 0000000..7a2a6ff
--- /dev/null
+++ b/plat/arm/css/common/css_bl1_setup.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <bl_common.h>
+#include <debug.h>
+#include <plat_arm.h>
+#include <soc_css.h>
+
+void bl1_platform_setup(void)
+{
+	arm_bl1_platform_setup();
+	/*
+	 * Do ARM CSS SoC security setup.
+	 * BL1 needs to enable normal world access to memory.
+	 */
+	soc_css_security_setup();
+}
+
diff --git a/plat/arm/css/common/css_bl2_setup.c b/plat/arm/css/common/css_bl2_setup.c
new file mode 100644
index 0000000..9b4800e
--- /dev/null
+++ b/plat/arm/css/common/css_bl2_setup.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <bl_common.h>
+#include <css_def.h>
+#include <debug.h>
+#include <mmio.h>
+#include <plat_arm.h>
+#include <string.h>
+#include <utils.h>
+#include "../drivers/scp/css_scp.h"
+
+/* Weak definition may be overridden in specific CSS based platform */
+#if LOAD_IMAGE_V2
+#pragma weak plat_arm_bl2_handle_scp_bl2
+#else
+#pragma weak bl2_plat_handle_scp_bl2
+#endif
+
+/*******************************************************************************
+ * Transfer SCP_BL2 from Trusted RAM using the SCP Download protocol.
+ * Return 0 on success, -1 otherwise.
+ ******************************************************************************/
+#if LOAD_IMAGE_V2
+int plat_arm_bl2_handle_scp_bl2(image_info_t *scp_bl2_image_info)
+#else
+int bl2_plat_handle_scp_bl2(image_info_t *scp_bl2_image_info)
+#endif
+{
+	int ret;
+
+	INFO("BL2: Initiating SCP_BL2 transfer to SCP\n");
+
+	ret = css_scp_boot_image_xfer((void *)scp_bl2_image_info->image_base,
+		scp_bl2_image_info->image_size);
+
+	if (ret == 0)
+		ret = css_scp_boot_ready();
+
+	if (ret == 0)
+		INFO("BL2: SCP_BL2 transferred to SCP\n");
+	else
+		ERROR("BL2: SCP_BL2 transfer failure\n");
+
+	return ret;
+}
+
+#if !CSS_USE_SCMI_SDS_DRIVER
+# ifdef EL3_PAYLOAD_BASE
+
+/*
+ * We need to override some of the platform functions when booting an EL3
+ * payload. These needs to be done only for SCPI/BOM SCP systems as
+ * in case of SDS, the structures remain in memory and doesn't need to be
+ * overwritten.
+ */
+
+static unsigned int scp_boot_config;
+
+void bl2_early_platform_setup(meminfo_t *mem_layout)
+{
+	arm_bl2_early_platform_setup(mem_layout);
+
+	/* Save SCP Boot config before it gets overwritten by SCP_BL2 loading */
+	scp_boot_config = mmio_read_32(SCP_BOOT_CFG_ADDR);
+	VERBOSE("BL2: Saved SCP Boot config = 0x%x\n", scp_boot_config);
+}
+
+void bl2_platform_setup(void)
+{
+	arm_bl2_platform_setup();
+
+	/*
+	 * Before releasing the AP cores out of reset, the SCP writes some data
+	 * at the beginning of the Trusted SRAM. It is is overwritten before
+	 * reaching this function. We need to restore this data, as if the
+	 * target had just come out of reset. This implies:
+	 *  - zeroing the first 128 bytes of Trusted SRAM using zeromem instead
+	 *    of zero_normalmem since this is device memory.
+	 *  - restoring the SCP boot configuration.
+	 */
+	VERBOSE("BL2: Restoring SCP reset data in Trusted SRAM\n");
+	zeromem((void *) ARM_SHARED_RAM_BASE, 128);
+	mmio_write_32(SCP_BOOT_CFG_ADDR, scp_boot_config);
+}
+
+# endif /* EL3_PAYLOAD_BASE */
+
+#endif /* CSS_USE_SCMI_SDS_DRIVER */
diff --git a/plat/arm/css/common/css_bl2u_setup.c b/plat/arm/css/common/css_bl2u_setup.c
new file mode 100644
index 0000000..d225151
--- /dev/null
+++ b/plat/arm/css/common/css_bl2u_setup.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <bl_common.h>
+#include <debug.h>
+#include <plat_arm.h>
+#include "../drivers/scp/css_scp.h"
+
+/* Weak definition may be overridden in specific CSS based platform */
+#pragma weak bl2u_plat_handle_scp_bl2u
+
+/* Data structure which holds the SCP_BL2U image info for BL2U */
+static image_info_t scp_bl2u_image_info;
+
+/*******************************************************************************
+ * BL1 can pass platform dependent information to BL2U in x1.
+ * In case of ARM CSS platforms x1 contains SCP_BL2U image info.
+ * In case of ARM FVP platforms x1 is not used.
+ * In both cases, x0 contains the extents of the memory available to BL2U
+ ******************************************************************************/
+void bl2u_early_platform_setup(meminfo_t *mem_layout, void *plat_info)
+{
+	if (!plat_info)
+		panic();
+
+	arm_bl2u_early_platform_setup(mem_layout, plat_info);
+
+	scp_bl2u_image_info = *(image_info_t *)plat_info;
+}
+
+/*******************************************************************************
+ * Transfer SCP_BL2U from Trusted RAM using the SCP Download protocol.
+ ******************************************************************************/
+int bl2u_plat_handle_scp_bl2u(void)
+{
+	int ret;
+
+	INFO("BL2U: Initiating SCP_BL2U transfer to SCP\n");
+
+	ret = css_scp_boot_image_xfer((void *)scp_bl2u_image_info.image_base,
+		scp_bl2u_image_info.image_size);
+
+	if (ret == 0)
+		ret = css_scp_boot_ready();
+
+	if (ret == 0)
+		INFO("BL2U: SCP_BL2U transferred to SCP\n");
+	else
+		ERROR("BL2U: SCP_BL2U transfer failure\n");
+
+	return ret;
+}
diff --git a/plat/arm/css/common/css_common.mk b/plat/arm/css/common/css_common.mk
new file mode 100644
index 0000000..63e3059
--- /dev/null
+++ b/plat/arm/css/common/css_common.mk
@@ -0,0 +1,85 @@
+#
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+
+# By default, SCP images are needed by CSS platforms.
+CSS_LOAD_SCP_IMAGES	?=	1
+
+# By default, SCMI driver is disabled for CSS platforms
+CSS_USE_SCMI_SDS_DRIVER	?=	0
+
+PLAT_INCLUDES		+=	-Iinclude/plat/arm/css/common			\
+				-Iinclude/plat/arm/css/common/aarch64
+
+
+PLAT_BL_COMMON_SOURCES	+=	plat/arm/css/common/${ARCH}/css_helpers.S
+
+BL1_SOURCES		+=	plat/arm/css/common/css_bl1_setup.c
+
+BL2_SOURCES		+=	plat/arm/css/common/css_bl2_setup.c
+
+BL2U_SOURCES		+=	plat/arm/css/common/css_bl2u_setup.c
+
+BL31_SOURCES		+=	plat/arm/css/common/css_pm.c			\
+				plat/arm/css/common/css_topology.c
+
+ifeq (${CSS_USE_SCMI_SDS_DRIVER},0)
+BL31_SOURCES		+=	plat/arm/css/drivers/scp/css_pm_scpi.c		\
+				plat/arm/css/drivers/scpi/css_mhu.c		\
+				plat/arm/css/drivers/scpi/css_scpi.c
+else
+BL31_SOURCES		+=	plat/arm/css/drivers/scp/css_pm_scmi.c		\
+				plat/arm/css/drivers/scmi/scmi_common.c		\
+				plat/arm/css/drivers/scmi/scmi_pwr_dmn_proto.c	\
+				plat/arm/css/drivers/scmi/scmi_sys_pwr_proto.c
+endif
+
+ifneq (${RESET_TO_BL31},0)
+  $(error "Using BL31 as the reset vector is not supported on CSS platforms. \
+  Please set RESET_TO_BL31 to 0.")
+endif
+
+# Process CSS_LOAD_SCP_IMAGES flag
+$(eval $(call assert_boolean,CSS_LOAD_SCP_IMAGES))
+$(eval $(call add_define,CSS_LOAD_SCP_IMAGES))
+
+ifeq (${CSS_LOAD_SCP_IMAGES},1)
+  NEED_SCP_BL2 := yes
+  ifneq (${TRUSTED_BOARD_BOOT},0)
+    $(eval $(call FWU_FIP_ADD_IMG,SCP_BL2U,--scp-fwu-cfg))
+  endif
+
+  ifeq (${CSS_USE_SCMI_SDS_DRIVER},1)
+    BL2U_SOURCES	+=	plat/arm/css/drivers/scp/css_sds.c	\
+				plat/arm/css/drivers/sds/sds.c
+
+    BL2_SOURCES		+=	plat/arm/css/drivers/scp/css_sds.c	\
+				plat/arm/css/drivers/sds/sds.c
+  else
+    BL2U_SOURCES	+=	plat/arm/css/drivers/scp/css_bom_bootloader.c	\
+				plat/arm/css/drivers/scpi/css_mhu.c		\
+				plat/arm/css/drivers/scpi/css_scpi.c
+
+    BL2_SOURCES		+=	plat/arm/css/drivers/scp/css_bom_bootloader.c	\
+				plat/arm/css/drivers/scpi/css_mhu.c		\
+				plat/arm/css/drivers/scpi/css_scpi.c
+    # Enable option to detect whether the SCP ROM firmware in use predates version
+    # 1.7.0 and therefore, is incompatible.
+    CSS_DETECT_PRE_1_7_0_SCP	:=	1
+
+    # Process CSS_DETECT_PRE_1_7_0_SCP flag
+    $(eval $(call assert_boolean,CSS_DETECT_PRE_1_7_0_SCP))
+    $(eval $(call add_define,CSS_DETECT_PRE_1_7_0_SCP))
+  endif
+endif
+
+ifeq (${CSS_USE_SCMI_SDS_DRIVER},1)
+  PLAT_BL_COMMON_SOURCES	+=	plat/arm/css/drivers/sds/${ARCH}/sds_helpers.S
+endif
+
+# Process CSS_USE_SCMI_SDS_DRIVER flag
+$(eval $(call assert_boolean,CSS_USE_SCMI_SDS_DRIVER))
+$(eval $(call add_define,CSS_USE_SCMI_SDS_DRIVER))
diff --git a/plat/arm/css/common/css_pm.c b/plat/arm/css/common/css_pm.c
new file mode 100644
index 0000000..4104dd7
--- /dev/null
+++ b/plat/arm/css/common/css_pm.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cassert.h>
+#include <css_pm.h>
+#include <debug.h>
+#include <errno.h>
+#include <plat_arm.h>
+#include <platform.h>
+#include <platform_def.h>
+#include "../drivers/scp/css_scp.h"
+
+/* Allow CSS platforms to override `plat_arm_psci_pm_ops` */
+#pragma weak plat_arm_psci_pm_ops
+
+#if ARM_RECOM_STATE_ID_ENC
+/*
+ *  The table storing the valid idle power states. Ensure that the
+ *  array entries are populated in ascending order of state-id to
+ *  enable us to use binary search during power state validation.
+ *  The table must be terminated by a NULL entry.
+ */
+const unsigned int arm_pm_idle_states[] = {
+	/* State-id - 0x001 */
+	arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RUN,
+		ARM_LOCAL_STATE_RET, ARM_PWR_LVL0, PSTATE_TYPE_STANDBY),
+	/* State-id - 0x002 */
+	arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RUN,
+		ARM_LOCAL_STATE_OFF, ARM_PWR_LVL0, PSTATE_TYPE_POWERDOWN),
+	/* State-id - 0x022 */
+	arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_OFF,
+		ARM_LOCAL_STATE_OFF, ARM_PWR_LVL1, PSTATE_TYPE_POWERDOWN),
+#if PLAT_MAX_PWR_LVL > ARM_PWR_LVL1
+	/* State-id - 0x222 */
+	arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_OFF, ARM_LOCAL_STATE_OFF,
+		ARM_LOCAL_STATE_OFF, ARM_PWR_LVL2, PSTATE_TYPE_POWERDOWN),
+#endif
+	0,
+};
+#endif /* __ARM_RECOM_STATE_ID_ENC__ */
+
+/*
+ * All the power management helpers in this file assume at least cluster power
+ * level is supported.
+ */
+CASSERT(PLAT_MAX_PWR_LVL >= ARM_PWR_LVL1,
+		assert_max_pwr_lvl_supported_mismatch);
+
+/*
+ * Ensure that the PLAT_MAX_PWR_LVL is not greater than CSS_SYSTEM_PWR_DMN_LVL
+ * assumed by the CSS layer.
+ */
+CASSERT(PLAT_MAX_PWR_LVL <= CSS_SYSTEM_PWR_DMN_LVL,
+		assert_max_pwr_lvl_higher_than_css_sys_lvl);
+
+/*******************************************************************************
+ * Handler called when a power domain is about to be turned on. The
+ * level and mpidr determine the affinity instance.
+ ******************************************************************************/
+int css_pwr_domain_on(u_register_t mpidr)
+{
+	css_scp_on(mpidr);
+
+	return PSCI_E_SUCCESS;
+}
+
+static void css_pwr_domain_on_finisher_common(
+		const psci_power_state_t *target_state)
+{
+	assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF);
+
+	/* Enable the gic cpu interface */
+	plat_arm_gic_cpuif_enable();
+
+	/*
+	 * Perform the common cluster specific operations i.e enable coherency
+	 * if this cluster was off.
+	 */
+	if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF)
+		plat_arm_interconnect_enter_coherency();
+}
+
+/*******************************************************************************
+ * Handler called when a power level has just been powered on after
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from. This handler would never be invoked with
+ * the system power domain uninitialized as either the primary would have taken
+ * care of it as part of cold boot or the first core awakened from system
+ * suspend would have already initialized it.
+ ******************************************************************************/
+void css_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+	/* Assert that the system power domain need not be initialized */
+	assert(CSS_SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_RUN);
+
+	/* Program the gic per-cpu distributor or re-distributor interface */
+	plat_arm_gic_pcpu_init();
+
+	css_pwr_domain_on_finisher_common(target_state);
+}
+
+/*******************************************************************************
+ * Common function called while turning a cpu off or suspending it. It is called
+ * from css_off() or css_suspend() when these functions in turn are called for
+ * power domain at the highest power level which will be powered down. It
+ * performs the actions common to the OFF and SUSPEND calls.
+ ******************************************************************************/
+static void css_power_down_common(const psci_power_state_t *target_state)
+{
+	/* Prevent interrupts from spuriously waking up this cpu */
+	plat_arm_gic_cpuif_disable();
+
+	/* Cluster is to be turned off, so disable coherency */
+	if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF)
+		plat_arm_interconnect_exit_coherency();
+}
+
+/*******************************************************************************
+ * Handler called when a power domain is about to be turned off. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+void css_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF);
+	css_power_down_common(target_state);
+	css_scp_off(target_state);
+}
+
+/*******************************************************************************
+ * Handler called when a power domain is about to be suspended. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+void css_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+	/*
+	 * CSS currently supports retention only at cpu level. Just return
+	 * as nothing is to be done for retention.
+	 */
+	if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET)
+		return;
+
+
+	assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF);
+	css_power_down_common(target_state);
+
+	/* Perform system domain state saving if issuing system suspend */
+	if (CSS_SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) {
+		arm_system_pwr_domain_save();
+
+		/* Power off the Redistributor after having saved its context */
+		plat_arm_gic_redistif_off();
+	}
+
+	css_scp_suspend(target_state);
+}
+
+/*******************************************************************************
+ * Handler called when a power domain has just been powered on after
+ * having been suspended earlier. The target_state encodes the low power state
+ * that each level has woken up from.
+ * TODO: At the moment we reuse the on finisher and reinitialize the secure
+ * context. Need to implement a separate suspend finisher.
+ ******************************************************************************/
+void css_pwr_domain_suspend_finish(
+				const psci_power_state_t *target_state)
+{
+	/* Return as nothing is to be done on waking up from retention. */
+	if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET)
+		return;
+
+	/* Perform system domain restore if woken up from system suspend */
+	if (CSS_SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF)
+		/*
+		 * At this point, the Distributor must be powered on to be ready
+		 * to have its state restored. The Redistributor will be powered
+		 * on as part of gicv3_rdistif_init_restore.
+		 */
+		arm_system_pwr_domain_resume();
+
+	css_pwr_domain_on_finisher_common(target_state);
+}
+
+/*******************************************************************************
+ * Handlers to shutdown/reboot the system
+ ******************************************************************************/
+void __dead2 css_system_off(void)
+{
+	css_scp_sys_shutdown();
+}
+
+void __dead2 css_system_reset(void)
+{
+	css_scp_sys_reboot();
+}
+
+/*******************************************************************************
+ * Handler called when the CPU power domain is about to enter standby.
+ ******************************************************************************/
+void css_cpu_standby(plat_local_state_t cpu_state)
+{
+	unsigned int scr;
+
+	assert(cpu_state == ARM_LOCAL_STATE_RET);
+
+	scr = read_scr_el3();
+	/*
+	 * Enable the Non secure interrupt to wake the CPU.
+	 * In GICv3 affinity routing mode, the non secure group1 interrupts use
+	 * the PhysicalFIQ at EL3 whereas in GICv2, it uses the PhysicalIRQ.
+	 * Enabling both the bits works for both GICv2 mode and GICv3 affinity
+	 * routing mode.
+	 */
+	write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT);
+	isb();
+	dsb();
+	wfi();
+
+	/*
+	 * Restore SCR to the original value, synchronisation of scr_el3 is
+	 * done by eret while el3_exit to save some execution cycles.
+	 */
+	write_scr_el3(scr);
+}
+
+/*******************************************************************************
+ * Handler called to return the 'req_state' for system suspend.
+ ******************************************************************************/
+void css_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+	unsigned int i;
+
+	/*
+	 * System Suspend is supported only if the system power domain node
+	 * is implemented.
+	 */
+	assert(PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL);
+
+	for (i = ARM_PWR_LVL0; i <= PLAT_MAX_PWR_LVL; i++)
+		req_state->pwr_domain_state[i] = ARM_LOCAL_STATE_OFF;
+}
+
+/*******************************************************************************
+ * Handler to query CPU/cluster power states from SCP
+ ******************************************************************************/
+int css_node_hw_state(u_register_t mpidr, unsigned int power_level)
+{
+	return css_scp_get_power_state(mpidr, power_level);
+}
+
+/*
+ * The system power domain suspend is only supported only via
+ * PSCI SYSTEM_SUSPEND API. PSCI CPU_SUSPEND request to system power domain
+ * will be downgraded to the lower level.
+ */
+static int css_validate_power_state(unsigned int power_state,
+			    psci_power_state_t *req_state)
+{
+	int rc;
+	rc = arm_validate_power_state(power_state, req_state);
+
+	/*
+	 * Ensure that the system power domain level is never suspended
+	 * via PSCI CPU SUSPEND API. Currently system suspend is only
+	 * supported via PSCI SYSTEM SUSPEND API.
+	 */
+	req_state->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] = ARM_LOCAL_STATE_RUN;
+	return rc;
+}
+
+/*
+ * Custom `translate_power_state_by_mpidr` handler for CSS. Unlike in the
+ * `css_validate_power_state`, we do not downgrade the system power
+ * domain level request in `power_state` as it will be used to query the
+ * PSCI_STAT_COUNT/RESIDENCY at the system power domain level.
+ */
+static int css_translate_power_state_by_mpidr(u_register_t mpidr,
+		unsigned int power_state,
+		psci_power_state_t *output_state)
+{
+	return arm_validate_power_state(power_state, output_state);
+}
+
+/*******************************************************************************
+ * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
+ * platform will take care of registering the handlers with PSCI.
+ ******************************************************************************/
+plat_psci_ops_t plat_arm_psci_pm_ops = {
+	.pwr_domain_on		= css_pwr_domain_on,
+	.pwr_domain_on_finish	= css_pwr_domain_on_finish,
+	.pwr_domain_off		= css_pwr_domain_off,
+	.cpu_standby		= css_cpu_standby,
+	.pwr_domain_suspend	= css_pwr_domain_suspend,
+	.pwr_domain_suspend_finish	= css_pwr_domain_suspend_finish,
+	.system_off		= css_system_off,
+	.system_reset		= css_system_reset,
+	.validate_power_state	= css_validate_power_state,
+	.validate_ns_entrypoint = arm_validate_ns_entrypoint,
+	.translate_power_state_by_mpidr = css_translate_power_state_by_mpidr,
+	.get_node_hw_state	= css_node_hw_state,
+	.get_sys_suspend_power_state = css_get_sys_suspend_power_state,
+/*
+ * mem_protect is not supported in RESET_TO_BL31 and RESET_TO_SP_MIN,
+ * as that would require mapping in all of NS DRAM into BL31 or BL32.
+ */
+#if defined(PLAT_ARM_MEM_PROT_ADDR) && !RESET_TO_BL31 && !RESET_TO_SP_MIN
+	.mem_protect_chk	= arm_psci_mem_protect_chk,
+	.read_mem_protect	= arm_psci_read_mem_protect,
+	.write_mem_protect	= arm_nor_psci_write_mem_protect,
+#endif
+#if CSS_USE_SCMI_SDS_DRIVER
+	.system_reset2		= css_system_reset2,
+#endif
+};
diff --git a/plat/arm/css/common/css_topology.c b/plat/arm/css/common/css_topology.c
new file mode 100644
index 0000000..9b114e2
--- /dev/null
+++ b/plat/arm/css/common/css_topology.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <plat_arm.h>
+
+#if ARM_PLAT_MT
+#pragma weak plat_arm_get_cpu_pe_count
+#endif
+
+/******************************************************************************
+ * This function implements a part of the critical interface between the psci
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is
+ * returned in case the MPIDR is invalid.
+ *****************************************************************************/
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+	if (arm_check_mpidr(mpidr) == 0)
+		return plat_arm_calc_core_pos(mpidr);
+
+	return -1;
+}
+
+#if ARM_PLAT_MT
+/******************************************************************************
+ * This function returns the PE count within the physical cpu corresponding to
+ * `mpidr`. Now one cpu only have one thread, so just return 1.
+ *****************************************************************************/
+unsigned int plat_arm_get_cpu_pe_count(u_register_t mpidr)
+{
+	return 1;
+}
+#endif /* ARM_PLAT_MT */
diff --git a/plat/arm/css/common/sp_min/css_sp_min.mk b/plat/arm/css/common/sp_min/css_sp_min.mk
new file mode 100644
index 0000000..28eb2db
--- /dev/null
+++ b/plat/arm/css/common/sp_min/css_sp_min.mk
@@ -0,0 +1,20 @@
+#
+# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# SP MIN source files common to CSS platforms
+BL32_SOURCES		+=	plat/arm/css/common/css_pm.c			\
+				plat/arm/css/common/css_topology.c
+
+ifeq (${CSS_USE_SCMI_SDS_DRIVER},0)
+BL32_SOURCES		+=	plat/arm/css/drivers/scp/css_pm_scpi.c		\
+				plat/arm/css/drivers/scpi/css_mhu.c		\
+				plat/arm/css/drivers/scpi/css_scpi.c
+else
+BL32_SOURCES		+=	plat/arm/css/drivers/scp/css_pm_scmi.c		\
+				plat/arm/css/drivers/scmi/scmi_common.c		\
+				plat/arm/css/drivers/scmi/scmi_pwr_dmn_proto.c	\
+				plat/arm/css/drivers/scmi/scmi_sys_pwr_proto.c
+endif
diff --git a/plat/arm/css/drivers/scmi/scmi.h b/plat/arm/css/drivers/scmi/scmi.h
new file mode 100644
index 0000000..850402a
--- /dev/null
+++ b/plat/arm/css/drivers/scmi/scmi.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CSS_SCMI_H__
+#define __CSS_SCMI_H__
+
+#include <bakery_lock.h>
+#include <stddef.h>
+#include <stdint.h>
+
+/* Supported SCMI Protocol Versions */
+#define SCMI_PWR_DMN_PROTO_VER			MAKE_SCMI_VERSION(1, 0)
+#define SCMI_SYS_PWR_PROTO_VER			MAKE_SCMI_VERSION(1, 0)
+
+#define GET_SCMI_MAJOR_VER(ver)			(((ver) >> 16) & 0xffff)
+#define GET_SCMI_MINOR_VER(ver)			((ver) & 0xffff)
+
+#define MAKE_SCMI_VERSION(maj, min)	\
+			((((maj) & 0xffff) << 16) | ((min) & 0xffff))
+
+/* Macro to check if the driver is compatible with the SCMI version reported */
+#define is_scmi_version_compatible(drv, scmi)				\
+	((GET_SCMI_MAJOR_VER(drv) == GET_SCMI_MAJOR_VER(scmi)) &&	\
+	(GET_SCMI_MINOR_VER(drv) <= GET_SCMI_MINOR_VER(scmi)))
+
+/* SCMI Protocol identifiers */
+#define SCMI_PWR_DMN_PROTO_ID			0x11
+#define SCMI_SYS_PWR_PROTO_ID			0x12
+
+/* Mandatory messages IDs for all SCMI protocols */
+#define SCMI_PROTO_VERSION_MSG			0x0
+#define SCMI_PROTO_ATTR_MSG			0x1
+#define SCMI_PROTO_MSG_ATTR_MSG			0x2
+
+/* SCMI power domain management protocol message IDs */
+#define SCMI_PWR_STATE_SET_MSG			0x4
+#define SCMI_PWR_STATE_GET_MSG			0x5
+
+/* SCMI system power management protocol message IDs */
+#define SCMI_SYS_PWR_STATE_SET_MSG		0x3
+#define SCMI_SYS_PWR_STATE_GET_MSG		0x4
+
+/* Helper macros for system power management protocol commands */
+
+/*
+ * Macros to describe the bit-fields of the `attribute` of system power domain
+ * protocol PROTOCOL_MSG_ATTRIBUTE message.
+ */
+#define SYS_PWR_ATTR_WARM_RESET_SHIFT		31
+#define SCMI_SYS_PWR_WARM_RESET_SUPPORTED	(1U << SYS_PWR_ATTR_WARM_RESET_SHIFT)
+
+#define SYS_PWR_ATTR_SUSPEND_SHIFT		30
+#define SCMI_SYS_PWR_SUSPEND_SUPPORTED		(1 << SYS_PWR_ATTR_SUSPEND_SHIFT)
+
+/*
+ * Macros to describe the bit-fields of the `flags` parameter of system power
+ * domain protocol SYSTEM_POWER_STATE_SET message.
+ */
+#define SYS_PWR_SET_GRACEFUL_REQ_SHIFT		0
+#define SCMI_SYS_PWR_GRACEFUL_REQ		(1 << SYS_PWR_SET_GRACEFUL_REQ_SHIFT)
+#define SCMI_SYS_PWR_FORCEFUL_REQ		(0 << SYS_PWR_SET_GRACEFUL_REQ_SHIFT)
+
+/*
+ * Macros to describe the `system_state` parameter of system power
+ * domain protocol SYSTEM_POWER_STATE_SET message.
+ */
+#define SCMI_SYS_PWR_SHUTDOWN			0x0
+#define SCMI_SYS_PWR_COLD_RESET			0x1
+#define SCMI_SYS_PWR_WARM_RESET			0x2
+#define SCMI_SYS_PWR_POWER_UP			0x3
+#define SCMI_SYS_PWR_SUSPEND			0x4
+
+/* SCMI Error code definitions */
+#define SCMI_E_QUEUED			1
+#define SCMI_E_SUCCESS			0
+#define SCMI_E_NOT_SUPPORTED		-1
+#define SCMI_E_INVALID_PARAM		-2
+#define SCMI_E_DENIED			-3
+#define SCMI_E_NOT_FOUND		-4
+#define SCMI_E_OUT_OF_RANGE		-5
+#define SCMI_E_BUSY			-6
+
+/*
+ * SCMI driver platform information. The details of the doorbell mechanism
+ * can be found in the SCMI specification.
+ */
+typedef struct scmi_channel_plat_info {
+	/* SCMI mailbox memory */
+	uintptr_t scmi_mbx_mem;
+	/* The door bell register address */
+	uintptr_t db_reg_addr;
+	/* The bit mask that need to be preserved when ringing doorbell */
+	uint32_t db_preserve_mask;
+	/* The bit mask that need to be set to ring doorbell */
+	uint32_t db_modify_mask;
+} scmi_channel_plat_info_t;
+
+/*
+ * Structure to represent an SCMI channel.
+ */
+typedef struct scmi_channel {
+	scmi_channel_plat_info_t *info;
+	 /* The lock for channel access */
+	bakery_lock_t *lock;
+	/* Indicate whether the channel is initialized */
+	int is_initialized;
+} scmi_channel_t;
+
+/* External Common API */
+void *scmi_init(scmi_channel_t *ch);
+int scmi_proto_msg_attr(void *p, uint32_t proto_id, uint32_t command_id,
+						uint32_t *attr);
+int scmi_proto_version(void *p, uint32_t proto_id, uint32_t *version);
+
+/*
+ * Power domain protocol commands. Refer to the SCMI specification for more
+ * details on these commands.
+ */
+int scmi_pwr_state_set(void *p, uint32_t domain_id, uint32_t scmi_pwr_state);
+int scmi_pwr_state_get(void *p, uint32_t domain_id, uint32_t *scmi_pwr_state);
+
+/*
+ * System power management protocol commands. Refer SCMI specification for more
+ * details on these commands.
+ */
+int scmi_sys_pwr_state_set(void *p, uint32_t flags, uint32_t system_state);
+int scmi_sys_pwr_state_get(void *p, uint32_t *system_state);
+
+#endif	/* __CSS_SCMI_H__ */
diff --git a/plat/arm/css/drivers/scmi/scmi_common.c b/plat/arm/css/drivers/scmi/scmi_common.c
new file mode 100644
index 0000000..d0051c7
--- /dev/null
+++ b/plat/arm/css/drivers/scmi/scmi_common.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include "scmi.h"
+#include "scmi_private.h"
+
+/*
+ * Private helper function to get exclusive access to SCMI channel.
+ */
+void scmi_get_channel(scmi_channel_t *ch)
+{
+	assert(ch->lock);
+	bakery_lock_get(ch->lock);
+
+	/* Make sure any previous command has finished */
+	assert(SCMI_IS_CHANNEL_FREE(
+			((mailbox_mem_t *)(ch->info->scmi_mbx_mem))->status));
+}
+
+/*
+ * Private helper function to transfer ownership of channel from AP to SCP.
+ */
+void scmi_send_sync_command(scmi_channel_t *ch)
+{
+	mailbox_mem_t *mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
+
+	SCMI_MARK_CHANNEL_BUSY(mbx_mem->status);
+
+	/*
+	 * Ensure that any write to the SCMI payload area is seen by SCP before
+	 * we write to the doorbell register. If these 2 writes were reordered
+	 * by the CPU then SCP would read stale payload data
+	 */
+	dmbst();
+
+	SCMI_RING_DOORBELL(ch->info->db_reg_addr, ch->info->db_modify_mask,
+					ch->info->db_preserve_mask);
+
+	/*
+	 * Ensure that the write to the doorbell register is ordered prior to
+	 * checking whether the channel is free.
+	 */
+	dmbsy();
+
+	/* Wait for channel to be free */
+	while (!SCMI_IS_CHANNEL_FREE(mbx_mem->status))
+		;
+
+	/*
+	 * Ensure that any read to the SCMI payload area is done after reading
+	 * mailbox status. If these 2 reads were reordered then the CPU would
+	 * read invalid payload data
+	 */
+	dmbld();
+}
+
+/*
+ * Private helper function to release exclusive access to SCMI channel.
+ */
+void scmi_put_channel(scmi_channel_t *ch)
+{
+	/* Make sure any previous command has finished */
+	assert(SCMI_IS_CHANNEL_FREE(
+			((mailbox_mem_t *)(ch->info->scmi_mbx_mem))->status));
+
+	assert(ch->lock);
+	bakery_lock_release(ch->lock);
+}
+
+/*
+ * API to query the SCMI protocol version.
+ */
+int scmi_proto_version(void *p, uint32_t proto_id, uint32_t *version)
+{
+	mailbox_mem_t *mbx_mem;
+	int token = 0, ret;
+	scmi_channel_t *ch = (scmi_channel_t *)p;
+
+	validate_scmi_channel(ch);
+
+	scmi_get_channel(ch);
+
+	mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
+	mbx_mem->msg_header = SCMI_MSG_CREATE(proto_id, SCMI_PROTO_VERSION_MSG,
+							token);
+	mbx_mem->len = SCMI_PROTO_VERSION_MSG_LEN;
+	mbx_mem->flags = SCMI_FLAG_RESP_POLL;
+
+	scmi_send_sync_command(ch);
+
+	/* Get the return values */
+	SCMI_PAYLOAD_RET_VAL2(mbx_mem->payload, ret, *version);
+	assert(mbx_mem->len == SCMI_PROTO_VERSION_RESP_LEN);
+	assert(token == SCMI_MSG_GET_TOKEN(mbx_mem->msg_header));
+
+	scmi_put_channel(ch);
+
+	return ret;
+}
+
+/*
+ * API to query the protocol message attributes for a SCMI protocol.
+ */
+int scmi_proto_msg_attr(void *p, uint32_t proto_id,
+		uint32_t command_id, uint32_t *attr)
+{
+	mailbox_mem_t *mbx_mem;
+	int token = 0, ret;
+	scmi_channel_t *ch = (scmi_channel_t *)p;
+
+	validate_scmi_channel(ch);
+
+	scmi_get_channel(ch);
+
+	mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
+	mbx_mem->msg_header = SCMI_MSG_CREATE(proto_id,
+				SCMI_PROTO_MSG_ATTR_MSG, token);
+	mbx_mem->len = SCMI_PROTO_MSG_ATTR_MSG_LEN;
+	mbx_mem->flags = SCMI_FLAG_RESP_POLL;
+	SCMI_PAYLOAD_ARG1(mbx_mem->payload, command_id);
+
+	scmi_send_sync_command(ch);
+
+	/* Get the return values */
+	SCMI_PAYLOAD_RET_VAL2(mbx_mem->payload, ret, *attr);
+	assert(mbx_mem->len == SCMI_PROTO_MSG_ATTR_RESP_LEN);
+	assert(token == SCMI_MSG_GET_TOKEN(mbx_mem->msg_header));
+
+	scmi_put_channel(ch);
+
+	return ret;
+}
+
+/*
+ * SCMI Driver initialization API. Returns initialized channel on success
+ * or NULL on error. The return type is an opaque void pointer.
+ */
+void *scmi_init(scmi_channel_t *ch)
+{
+	uint32_t version;
+	int ret;
+
+	assert(ch && ch->info);
+	assert(ch->info->db_reg_addr);
+	assert(ch->info->db_modify_mask);
+	assert(ch->info->db_preserve_mask);
+
+	assert(ch->lock);
+
+	bakery_lock_init(ch->lock);
+
+	ch->is_initialized = 1;
+
+	ret = scmi_proto_version(ch, SCMI_PWR_DMN_PROTO_ID, &version);
+	if (ret != SCMI_E_SUCCESS) {
+		WARN("SCMI power domain protocol version message failed");
+		goto error;
+	}
+
+	if (!is_scmi_version_compatible(SCMI_PWR_DMN_PROTO_VER, version)) {
+		WARN("SCMI power domain protocol version 0x%x incompatible with driver version 0x%x",
+			version, SCMI_PWR_DMN_PROTO_VER);
+		goto error;
+	}
+
+	VERBOSE("SCMI power domain protocol version 0x%x detected\n", version);
+
+	ret = scmi_proto_version(ch, SCMI_SYS_PWR_PROTO_ID, &version);
+	if ((ret != SCMI_E_SUCCESS)) {
+		WARN("SCMI system power protocol version message failed");
+		goto error;
+	}
+
+	if (!is_scmi_version_compatible(SCMI_SYS_PWR_PROTO_VER, version)) {
+		WARN("SCMI system power management protocol version 0x%x incompatible with driver version 0x%x",
+			version, SCMI_SYS_PWR_PROTO_VER);
+		goto error;
+	}
+
+	VERBOSE("SCMI system power management protocol version 0x%x detected\n",
+						version);
+
+	INFO("SCMI driver initialized\n");
+
+	return (void *)ch;
+
+error:
+	ch->is_initialized = 0;
+	return NULL;
+}
diff --git a/plat/arm/css/drivers/scmi/scmi_private.h b/plat/arm/css/drivers/scmi/scmi_private.h
new file mode 100644
index 0000000..20e1e9b
--- /dev/null
+++ b/plat/arm/css/drivers/scmi/scmi_private.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CSS_SCMI_PRIVATE_H__
+#define __CSS_SCMI_PRIVATE_H__
+
+/*
+ * SCMI power domain management protocol message and response lengths. It is
+ * calculated as sum of length in bytes of the message header (4) and payload
+ * area (the number of bytes of parameters or return values in the payload).
+ */
+#define SCMI_PROTO_VERSION_MSG_LEN		4
+#define SCMI_PROTO_VERSION_RESP_LEN		12
+
+#define SCMI_PROTO_MSG_ATTR_MSG_LEN		8
+#define SCMI_PROTO_MSG_ATTR_RESP_LEN		12
+
+#define SCMI_PWR_STATE_SET_MSG_LEN		16
+#define SCMI_PWR_STATE_SET_RESP_LEN		8
+
+#define SCMI_PWR_STATE_GET_MSG_LEN		8
+#define SCMI_PWR_STATE_GET_RESP_LEN		12
+
+#define SCMI_SYS_PWR_STATE_SET_MSG_LEN		12
+#define SCMI_SYS_PWR_STATE_SET_RESP_LEN		8
+
+#define SCMI_SYS_PWR_STATE_GET_MSG_LEN		4
+#define SCMI_SYS_PWR_STATE_GET_RESP_LEN		12
+
+/* SCMI message header format bit field */
+#define SCMI_MSG_ID_SHIFT		0
+#define SCMI_MSG_ID_WIDTH		8
+#define SCMI_MSG_ID_MASK		((1 << SCMI_MSG_ID_WIDTH) - 1)
+
+#define SCMI_MSG_TYPE_SHIFT		8
+#define SCMI_MSG_TYPE_WIDTH		2
+#define SCMI_MSG_TYPE_MASK		((1 << SCMI_MSG_TYPE_WIDTH) - 1)
+
+#define SCMI_MSG_PROTO_ID_SHIFT		10
+#define SCMI_MSG_PROTO_ID_WIDTH		8
+#define SCMI_MSG_PROTO_ID_MASK		((1 << SCMI_MSG_PROTO_ID_WIDTH) - 1)
+
+#define SCMI_MSG_TOKEN_SHIFT		18
+#define SCMI_MSG_TOKEN_WIDTH		10
+#define SCMI_MSG_TOKEN_MASK		((1 << SCMI_MSG_TOKEN_WIDTH) - 1)
+
+
+/* SCMI mailbox flags */
+#define SCMI_FLAG_RESP_POLL	0
+#define SCMI_FLAG_RESP_INT	1
+
+/* SCMI power domain protocol `POWER_STATE_SET` message flags */
+#define SCMI_PWR_STATE_SET_FLAG_SYNC	0
+#define SCMI_PWR_STATE_SET_FLAG_ASYNC	1
+
+/*
+ * Helper macro to create an SCMI message header given protocol, message id
+ * and token.
+ */
+#define SCMI_MSG_CREATE(protocol, msg_id, token)				\
+	((((protocol) & SCMI_MSG_PROTO_ID_MASK) << SCMI_MSG_PROTO_ID_SHIFT) |	\
+	(((msg_id) & SCMI_MSG_ID_MASK) << SCMI_MSG_ID_SHIFT) |			\
+	(((token) & SCMI_MSG_TOKEN_MASK) << SCMI_MSG_TOKEN_SHIFT))
+
+/* Helper macro to get the token from a SCMI message header */
+#define SCMI_MSG_GET_TOKEN(msg)				\
+	(((msg) >> SCMI_MSG_TOKEN_SHIFT) & SCMI_MSG_TOKEN_MASK)
+
+/* SCMI Channel Status bit fields */
+#define SCMI_CH_STATUS_RES0_MASK	0xFFFFFFFE
+#define SCMI_CH_STATUS_FREE_SHIFT	0
+#define SCMI_CH_STATUS_FREE_WIDTH	1
+#define SCMI_CH_STATUS_FREE_MASK	((1 << SCMI_CH_STATUS_FREE_WIDTH) - 1)
+
+/* Helper macros to check and write the channel status */
+#define SCMI_IS_CHANNEL_FREE(status)					\
+	(!!(((status) >> SCMI_CH_STATUS_FREE_SHIFT) & SCMI_CH_STATUS_FREE_MASK))
+
+#define SCMI_MARK_CHANNEL_BUSY(status)	do {				\
+		assert(SCMI_IS_CHANNEL_FREE(status));			\
+		(status) &= ~(SCMI_CH_STATUS_FREE_MASK <<		\
+				SCMI_CH_STATUS_FREE_SHIFT);		\
+	} while (0)
+
+/* Helper macros to copy arguments to the mailbox payload */
+#define SCMI_PAYLOAD_ARG1(payld_arr, arg1)				\
+		mmio_write_32((uintptr_t)&payld_arr[0], arg1)
+
+#define SCMI_PAYLOAD_ARG2(payld_arr, arg1, arg2)	do {		\
+		SCMI_PAYLOAD_ARG1(payld_arr, arg1);			\
+		mmio_write_32((uintptr_t)&payld_arr[1], arg2);		\
+	} while (0)
+
+#define SCMI_PAYLOAD_ARG3(payld_arr, arg1, arg2, arg3)	do {		\
+		SCMI_PAYLOAD_ARG2(payld_arr, arg1, arg2);		\
+		mmio_write_32((uintptr_t)&payld_arr[2], arg3);		\
+	} while (0)
+
+/* Helper macros to read return values from the mailbox payload */
+#define SCMI_PAYLOAD_RET_VAL1(payld_arr, val1)				\
+		(val1) = mmio_read_32((uintptr_t)&payld_arr[0])
+
+#define SCMI_PAYLOAD_RET_VAL2(payld_arr, val1, val2)	do {		\
+		SCMI_PAYLOAD_RET_VAL1(payld_arr, val1);			\
+		(val2) = mmio_read_32((uintptr_t)&payld_arr[1]);	\
+	} while (0)
+
+#define SCMI_PAYLOAD_RET_VAL3(payld_arr, val1, val2, val3)	do {	\
+		SCMI_PAYLOAD_RET_VAL2(payld_arr, val1, val2);		\
+		(val3) = mmio_read_32((uintptr_t)&payld_arr[2]);	\
+	} while (0)
+
+/* Helper macro to ring doorbell */
+#define SCMI_RING_DOORBELL(addr, modify_mask, preserve_mask)	do {	\
+		uint32_t db = mmio_read_32(addr) & (preserve_mask);	\
+		mmio_write_32(addr, db | (modify_mask));		\
+	} while (0)
+
+/*
+ * Private data structure for representing the mailbox memory layout. Refer
+ * the SCMI specification for more details.
+ */
+typedef struct mailbox_mem {
+	uint32_t res_a; /* Reserved */
+	volatile uint32_t status;
+	uint64_t res_b; /* Reserved */
+	uint32_t flags;
+	volatile uint32_t len;
+	uint32_t msg_header;
+	uint32_t payload[];
+} mailbox_mem_t;
+
+
+/* Private APIs for use within SCMI driver */
+void scmi_get_channel(scmi_channel_t *ch);
+void scmi_send_sync_command(scmi_channel_t *ch);
+void scmi_put_channel(scmi_channel_t *ch);
+
+static inline void validate_scmi_channel(scmi_channel_t *ch)
+{
+	assert(ch && ch->is_initialized);
+	assert(ch->info && ch->info->scmi_mbx_mem);
+}
+
+#endif	/* __CSS_SCMI_PRIVATE_H__ */
diff --git a/plat/arm/css/drivers/scmi/scmi_pwr_dmn_proto.c b/plat/arm/css/drivers/scmi/scmi_pwr_dmn_proto.c
new file mode 100644
index 0000000..90c5d6b
--- /dev/null
+++ b/plat/arm/css/drivers/scmi/scmi_pwr_dmn_proto.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include "scmi.h"
+#include "scmi_private.h"
+
+/*
+ * API to set the SCMI power domain power state.
+ */
+int scmi_pwr_state_set(void *p, uint32_t domain_id,
+					uint32_t scmi_pwr_state)
+{
+	mailbox_mem_t *mbx_mem;
+	int token = 0, ret;
+
+	/*
+	 * Only asynchronous mode of `set power state` command is allowed on
+	 * application processors.
+	 */
+	uint32_t pwr_state_set_msg_flag = SCMI_PWR_STATE_SET_FLAG_ASYNC;
+	scmi_channel_t *ch = (scmi_channel_t *)p;
+
+	validate_scmi_channel(ch);
+
+	scmi_get_channel(ch);
+
+	mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
+	mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_PWR_DMN_PROTO_ID,
+			SCMI_PWR_STATE_SET_MSG, token);
+	mbx_mem->len = SCMI_PWR_STATE_SET_MSG_LEN;
+	mbx_mem->flags = SCMI_FLAG_RESP_POLL;
+	SCMI_PAYLOAD_ARG3(mbx_mem->payload, pwr_state_set_msg_flag,
+						domain_id, scmi_pwr_state);
+
+	scmi_send_sync_command(ch);
+
+	/* Get the return values */
+	SCMI_PAYLOAD_RET_VAL1(mbx_mem->payload, ret);
+	assert(mbx_mem->len == SCMI_PWR_STATE_SET_RESP_LEN);
+	assert(token == SCMI_MSG_GET_TOKEN(mbx_mem->msg_header));
+
+	scmi_put_channel(ch);
+
+	return ret;
+}
+
+/*
+ * API to get the SCMI power domain power state.
+ */
+int scmi_pwr_state_get(void *p, uint32_t domain_id,
+					uint32_t *scmi_pwr_state)
+{
+	mailbox_mem_t *mbx_mem;
+	int token = 0, ret;
+	scmi_channel_t *ch = (scmi_channel_t *)p;
+
+	validate_scmi_channel(ch);
+
+	scmi_get_channel(ch);
+
+	mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
+	mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_PWR_DMN_PROTO_ID,
+			SCMI_PWR_STATE_GET_MSG, token);
+	mbx_mem->len = SCMI_PWR_STATE_GET_MSG_LEN;
+	mbx_mem->flags = SCMI_FLAG_RESP_POLL;
+	SCMI_PAYLOAD_ARG1(mbx_mem->payload, domain_id);
+
+	scmi_send_sync_command(ch);
+
+	/* Get the return values */
+	SCMI_PAYLOAD_RET_VAL2(mbx_mem->payload, ret, *scmi_pwr_state);
+	assert(mbx_mem->len == SCMI_PWR_STATE_GET_RESP_LEN);
+	assert(token == SCMI_MSG_GET_TOKEN(mbx_mem->msg_header));
+
+	scmi_put_channel(ch);
+
+	return ret;
+}
diff --git a/plat/arm/css/drivers/scmi/scmi_sys_pwr_proto.c b/plat/arm/css/drivers/scmi/scmi_sys_pwr_proto.c
new file mode 100644
index 0000000..f6da394
--- /dev/null
+++ b/plat/arm/css/drivers/scmi/scmi_sys_pwr_proto.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include "scmi.h"
+#include "scmi_private.h"
+
+/*
+ * API to set the SCMI system power state
+ */
+int scmi_sys_pwr_state_set(void *p, uint32_t flags, uint32_t system_state)
+{
+	mailbox_mem_t *mbx_mem;
+	int token = 0, ret;
+	scmi_channel_t *ch = (scmi_channel_t *)p;
+
+	validate_scmi_channel(ch);
+
+	scmi_get_channel(ch);
+
+	mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
+	mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_SYS_PWR_PROTO_ID,
+			SCMI_SYS_PWR_STATE_SET_MSG, token);
+	mbx_mem->len = SCMI_SYS_PWR_STATE_SET_MSG_LEN;
+	mbx_mem->flags = SCMI_FLAG_RESP_POLL;
+	SCMI_PAYLOAD_ARG2(mbx_mem->payload, flags, system_state);
+
+	scmi_send_sync_command(ch);
+
+	/* Get the return values */
+	SCMI_PAYLOAD_RET_VAL1(mbx_mem->payload, ret);
+	assert(mbx_mem->len == SCMI_SYS_PWR_STATE_SET_RESP_LEN);
+	assert(token == SCMI_MSG_GET_TOKEN(mbx_mem->msg_header));
+
+	scmi_put_channel(ch);
+
+	return ret;
+}
+
+/*
+ * API to get the SCMI system power state
+ */
+int scmi_sys_pwr_state_get(void *p, uint32_t *system_state)
+{
+	mailbox_mem_t *mbx_mem;
+	int token = 0, ret;
+	scmi_channel_t *ch = (scmi_channel_t *)p;
+
+	validate_scmi_channel(ch);
+
+	scmi_get_channel(ch);
+
+	mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
+	mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_SYS_PWR_PROTO_ID,
+			SCMI_SYS_PWR_STATE_GET_MSG, token);
+	mbx_mem->len = SCMI_SYS_PWR_STATE_GET_MSG_LEN;
+	mbx_mem->flags = SCMI_FLAG_RESP_POLL;
+
+	scmi_send_sync_command(ch);
+
+	/* Get the return values */
+	SCMI_PAYLOAD_RET_VAL2(mbx_mem->payload, ret, *system_state);
+	assert(mbx_mem->len == SCMI_SYS_PWR_STATE_GET_RESP_LEN);
+	assert(token == SCMI_MSG_GET_TOKEN(mbx_mem->msg_header));
+
+	scmi_put_channel(ch);
+
+	return ret;
+}
diff --git a/plat/arm/css/drivers/scp/css_bom_bootloader.c b/plat/arm/css/drivers/scp/css_bom_bootloader.c
new file mode 100644
index 0000000..a92ce6b
--- /dev/null
+++ b/plat/arm/css/drivers/scp/css_bom_bootloader.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <css_def.h>
+#include <debug.h>
+#include <platform.h>
+#include <stdint.h>
+#include "../scpi/css_mhu.h"
+#include "../scpi/css_scpi.h"
+
+/* ID of the MHU slot used for the BOM protocol */
+#define BOM_MHU_SLOT_ID		0
+
+/* Boot commands sent from AP -> SCP */
+#define BOOT_CMD_INFO	0x00
+#define BOOT_CMD_DATA	0x01
+
+/* BOM command header */
+typedef struct {
+	uint32_t id : 8;
+	uint32_t reserved : 24;
+} bom_cmd_t;
+
+typedef struct {
+	uint32_t image_size;
+	uint32_t checksum;
+} cmd_info_payload_t;
+
+/*
+ * Unlike the SCPI protocol, the boot protocol uses the same memory region
+ * for both AP -> SCP and SCP -> AP transfers; define the address of this...
+ */
+#define BOM_SHARED_MEM		PLAT_CSS_SCP_COM_SHARED_MEM_BASE
+#define BOM_CMD_HEADER		((bom_cmd_t *) BOM_SHARED_MEM)
+#define BOM_CMD_PAYLOAD		((void *) (BOM_SHARED_MEM + sizeof(bom_cmd_t)))
+
+typedef struct {
+	/* Offset from the base address of the Trusted RAM */
+	uint32_t offset;
+	uint32_t block_size;
+} cmd_data_payload_t;
+
+/*
+ * All CSS platforms load SCP_BL2/SCP_BL2U just below BL rw-data and above
+ * BL2/BL2U (this is where BL31 usually resides except when ARM_BL31_IN_DRAM is
+ * set. Ensure that SCP_BL2/SCP_BL2U do not overflow into BL1 rw-data nor
+ * BL2/BL2U.
+ */
+CASSERT(SCP_BL2_LIMIT <= BL1_RW_BASE, assert_scp_bl2_overwrite_bl1);
+CASSERT(SCP_BL2U_LIMIT <= BL1_RW_BASE, assert_scp_bl2u_overwrite_bl1);
+
+CASSERT(SCP_BL2_BASE >= BL2_LIMIT, assert_scp_bl2_overwrite_bl2);
+CASSERT(SCP_BL2U_BASE >= BL2U_LIMIT, assert_scp_bl2u_overwrite_bl2u);
+
+static void scp_boot_message_start(void)
+{
+	mhu_secure_message_start(BOM_MHU_SLOT_ID);
+}
+
+static void scp_boot_message_send(size_t payload_size)
+{
+	/* Ensure that any write to the BOM payload area is seen by SCP before
+	 * we write to the MHU register. If these 2 writes were reordered by
+	 * the CPU then SCP would read stale payload data */
+	dmbst();
+
+	/* Send command to SCP */
+	mhu_secure_message_send(BOM_MHU_SLOT_ID);
+}
+
+static uint32_t scp_boot_message_wait(size_t size)
+{
+	uint32_t mhu_status;
+
+	mhu_status = mhu_secure_message_wait();
+
+	/* Expect an SCP Boot Protocol message, reject any other protocol */
+	if (mhu_status != (1 << BOM_MHU_SLOT_ID)) {
+		ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n",
+			mhu_status);
+		panic();
+	}
+
+	/* Ensure that any read to the BOM payload area is done after reading
+	 * the MHU register. If these 2 reads were reordered then the CPU would
+	 * read invalid payload data */
+	dmbld();
+
+	return *(uint32_t *) BOM_SHARED_MEM;
+}
+
+static void scp_boot_message_end(void)
+{
+	mhu_secure_message_end(BOM_MHU_SLOT_ID);
+}
+
+int css_scp_boot_image_xfer(void *image, unsigned int image_size)
+{
+	uint32_t response;
+	uint32_t checksum;
+	cmd_info_payload_t *cmd_info_payload;
+	cmd_data_payload_t *cmd_data_payload;
+
+	assert((uintptr_t) image == SCP_BL2_BASE);
+
+	if ((image_size == 0) || (image_size % 4 != 0)) {
+		ERROR("Invalid size for the SCP_BL2 image. Must be a multiple of "
+			"4 bytes and not zero (current size = 0x%x)\n",
+			image_size);
+		return -1;
+	}
+
+	/* Extract the checksum from the image */
+	checksum = *(uint32_t *) image;
+	image = (char *) image + sizeof(checksum);
+	image_size -= sizeof(checksum);
+
+	mhu_secure_init();
+
+	VERBOSE("Send info about the SCP_BL2 image to be transferred to SCP\n");
+
+	/*
+	 * Send information about the SCP firmware image about to be transferred
+	 * to SCP
+	 */
+	scp_boot_message_start();
+
+	BOM_CMD_HEADER->id = BOOT_CMD_INFO;
+	cmd_info_payload = BOM_CMD_PAYLOAD;
+	cmd_info_payload->image_size = image_size;
+	cmd_info_payload->checksum = checksum;
+
+	scp_boot_message_send(sizeof(*cmd_info_payload));
+#if CSS_DETECT_PRE_1_7_0_SCP
+	{
+		const uint32_t deprecated_scp_nack_cmd = 0x404;
+		uint32_t mhu_status;
+
+		VERBOSE("Detecting SCP version incompatibility\n");
+
+		mhu_status = mhu_secure_message_wait();
+		if (mhu_status == deprecated_scp_nack_cmd) {
+			ERROR("Detected an incompatible version of the SCP firmware.\n");
+			ERROR("Only versions from v1.7.0 onwards are supported.\n");
+			ERROR("Please update the SCP firmware.\n");
+			return -1;
+		}
+
+		VERBOSE("SCP version looks OK\n");
+	}
+#endif /* CSS_DETECT_PRE_1_7_0_SCP */
+	response = scp_boot_message_wait(sizeof(response));
+	scp_boot_message_end();
+
+	if (response != 0) {
+		ERROR("SCP BOOT_CMD_INFO returned error %u\n", response);
+		return -1;
+	}
+
+	VERBOSE("Transferring SCP_BL2 image to SCP\n");
+
+	/* Transfer SCP_BL2 image to SCP */
+	scp_boot_message_start();
+
+	BOM_CMD_HEADER->id = BOOT_CMD_DATA;
+	cmd_data_payload = BOM_CMD_PAYLOAD;
+	cmd_data_payload->offset = (uintptr_t) image - ARM_TRUSTED_SRAM_BASE;
+	cmd_data_payload->block_size = image_size;
+
+	scp_boot_message_send(sizeof(*cmd_data_payload));
+	response = scp_boot_message_wait(sizeof(response));
+	scp_boot_message_end();
+
+	if (response != 0) {
+		ERROR("SCP BOOT_CMD_DATA returned error %u\n", response);
+		return -1;
+	}
+
+	return 0;
+}
+
+int css_scp_boot_ready(void)
+{
+	VERBOSE("Waiting for SCP to signal it is ready to go on\n");
+
+	/* Wait for SCP to signal it's ready */
+	return scpi_wait_ready();
+}
diff --git a/plat/arm/css/drivers/scp/css_pm_scmi.c b/plat/arm/css/drivers/scp/css_pm_scmi.c
new file mode 100644
index 0000000..e29cd86
--- /dev/null
+++ b/plat/arm/css/drivers/scp/css_pm_scmi.c
@@ -0,0 +1,392 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <css_def.h>
+#include <css_pm.h>
+#include <debug.h>
+#include <plat_arm.h>
+#include <platform.h>
+#include <string.h>
+#include "../scmi/scmi.h"
+#include "css_scp.h"
+
+/*
+ * This file implements the SCP helper functions using SCMI protocol.
+ */
+
+/*
+ * SCMI power state parameter bit field encoding for ARM CSS platforms.
+ *
+ * 31  20 19       16 15      12 11       8 7        4 3         0
+ * +-------------------------------------------------------------+
+ * | SBZ | Max level |  Level 3 |  Level 2 |  Level 1 |  Level 0 |
+ * |     |           |   state  |   state  |   state  |   state  |
+ * +-------------------------------------------------------------+
+ *
+ * `Max level` encodes the highest level that has a valid power state
+ * encoded in the power state.
+ */
+#define SCMI_PWR_STATE_MAX_PWR_LVL_SHIFT	16
+#define SCMI_PWR_STATE_MAX_PWR_LVL_WIDTH	4
+#define SCMI_PWR_STATE_MAX_PWR_LVL_MASK		\
+				((1 << SCMI_PWR_STATE_MAX_PWR_LVL_WIDTH) - 1)
+#define SCMI_SET_PWR_STATE_MAX_PWR_LVL(pwr_state, max_lvl)		\
+		(pwr_state) |= ((max_lvl) & SCMI_PWR_STATE_MAX_PWR_LVL_MASK)	\
+				<< SCMI_PWR_STATE_MAX_PWR_LVL_SHIFT
+#define SCMI_GET_PWR_STATE_MAX_PWR_LVL(pwr_state)		\
+		(((pwr_state) >> SCMI_PWR_STATE_MAX_PWR_LVL_SHIFT)	\
+				& SCMI_PWR_STATE_MAX_PWR_LVL_MASK)
+
+#define SCMI_PWR_STATE_LVL_WIDTH		4
+#define SCMI_PWR_STATE_LVL_MASK			\
+				((1 << SCMI_PWR_STATE_LVL_WIDTH) - 1)
+#define SCMI_SET_PWR_STATE_LVL(pwr_state, lvl, lvl_state)		\
+		(pwr_state) |= ((lvl_state) & SCMI_PWR_STATE_LVL_MASK)	\
+				<< (SCMI_PWR_STATE_LVL_WIDTH * (lvl))
+#define SCMI_GET_PWR_STATE_LVL(pwr_state, lvl)		\
+		(((pwr_state) >> (SCMI_PWR_STATE_LVL_WIDTH * (lvl))) &	\
+				SCMI_PWR_STATE_LVL_MASK)
+
+/*
+ * The SCMI power state enumeration for a power domain level
+ */
+typedef enum {
+	scmi_power_state_off = 0,
+	scmi_power_state_on = 1,
+	scmi_power_state_sleep = 2,
+} scmi_power_state_t;
+
+/*
+ * This mapping array has to be exported by the platform. Each element at
+ * a given index maps that core to an SCMI power domain.
+ */
+extern uint32_t plat_css_core_pos_to_scmi_dmn_id_map[];
+
+/*
+ * The global handle for invoking the SCMI driver APIs after the driver
+ * has been initialized.
+ */
+void *scmi_handle;
+
+/* The SCMI channel global object */
+static scmi_channel_t scmi_channel;
+
+ARM_INSTANTIATE_LOCK;
+
+/*
+ * Helper function to suspend a CPU power domain and its parent power domains
+ * if applicable.
+ */
+void css_scp_suspend(const psci_power_state_t *target_state)
+{
+	int lvl, ret;
+	uint32_t scmi_pwr_state = 0;
+
+	/* At least power domain level 0 should be specified to be suspended */
+	assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
+						ARM_LOCAL_STATE_OFF);
+
+	/* Check if power down at system power domain level is requested */
+	if (CSS_SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) {
+		/* Issue SCMI command for SYSTEM_SUSPEND */
+		ret = scmi_sys_pwr_state_set(scmi_handle,
+				SCMI_SYS_PWR_FORCEFUL_REQ,
+				SCMI_SYS_PWR_SUSPEND);
+		if (ret != SCMI_E_SUCCESS) {
+			ERROR("SCMI system power domain suspend return 0x%x unexpected\n",
+					ret);
+			panic();
+		}
+		return;
+	}
+
+	/*
+	 * If we reach here, then assert that power down at system power domain
+	 * level is running.
+	 */
+	assert(target_state->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] ==
+							ARM_LOCAL_STATE_RUN);
+
+	/* For level 0, specify `scmi_power_state_sleep` as the power state */
+	SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, ARM_PWR_LVL0,
+						scmi_power_state_sleep);
+
+	for (lvl = ARM_PWR_LVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
+		if (target_state->pwr_domain_state[lvl] == ARM_LOCAL_STATE_RUN)
+			break;
+
+		assert(target_state->pwr_domain_state[lvl] ==
+							ARM_LOCAL_STATE_OFF);
+		/*
+		 * Specify `scmi_power_state_off` as power state for higher
+		 * levels.
+		 */
+		SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, lvl,
+						scmi_power_state_off);
+	}
+
+	SCMI_SET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state, lvl - 1);
+
+	ret = scmi_pwr_state_set(scmi_handle,
+		plat_css_core_pos_to_scmi_dmn_id_map[plat_my_core_pos()],
+		scmi_pwr_state);
+
+	if (ret != SCMI_E_SUCCESS) {
+		ERROR("SCMI set power state command return 0x%x unexpected\n",
+				ret);
+		panic();
+	}
+}
+
+/*
+ * Helper function to turn off a CPU power domain and its parent power domains
+ * if applicable.
+ */
+void css_scp_off(const psci_power_state_t *target_state)
+{
+	int lvl = 0, ret;
+	uint32_t scmi_pwr_state = 0;
+
+	/* At-least the CPU level should be specified to be OFF */
+	assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
+							ARM_LOCAL_STATE_OFF);
+
+	/* PSCI CPU OFF cannot be used to turn OFF system power domain */
+	assert(target_state->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] ==
+							ARM_LOCAL_STATE_RUN);
+
+	for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
+		if (target_state->pwr_domain_state[lvl] == ARM_LOCAL_STATE_RUN)
+			break;
+
+		assert(target_state->pwr_domain_state[lvl] ==
+							ARM_LOCAL_STATE_OFF);
+		SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, lvl,
+				scmi_power_state_off);
+	}
+
+	SCMI_SET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state, lvl - 1);
+
+	ret = scmi_pwr_state_set(scmi_handle,
+		plat_css_core_pos_to_scmi_dmn_id_map[plat_my_core_pos()],
+		scmi_pwr_state);
+
+	if (ret != SCMI_E_QUEUED && ret != SCMI_E_SUCCESS) {
+		ERROR("SCMI set power state command return 0x%x unexpected\n",
+				ret);
+		panic();
+	}
+}
+
+/*
+ * Helper function to turn ON a CPU power domain and its parent power domains
+ * if applicable.
+ */
+void css_scp_on(u_register_t mpidr)
+{
+	int lvl = 0, ret, core_pos;
+	uint32_t scmi_pwr_state = 0;
+
+	for (; lvl <= PLAT_MAX_PWR_LVL; lvl++)
+		SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, lvl,
+				scmi_power_state_on);
+
+	SCMI_SET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state, lvl - 1);
+
+	core_pos = plat_core_pos_by_mpidr(mpidr);
+	assert(core_pos >= 0 && core_pos < PLATFORM_CORE_COUNT);
+
+	ret = scmi_pwr_state_set(scmi_handle,
+		plat_css_core_pos_to_scmi_dmn_id_map[core_pos],
+		scmi_pwr_state);
+
+	if (ret != SCMI_E_QUEUED && ret != SCMI_E_SUCCESS) {
+		ERROR("SCMI set power state command return 0x%x unexpected\n",
+				ret);
+		panic();
+	}
+}
+
+/*
+ * Helper function to get the power state of a power domain node as reported
+ * by the SCP.
+ */
+int css_scp_get_power_state(u_register_t mpidr, unsigned int power_level)
+{
+	int ret, cpu_idx;
+	uint32_t scmi_pwr_state = 0, lvl_state;
+
+	/* We don't support get power state at the system power domain level */
+	if ((power_level > PLAT_MAX_PWR_LVL) ||
+			(power_level == CSS_SYSTEM_PWR_DMN_LVL)) {
+		WARN("Invalid power level %u specified for SCMI get power state\n",
+				power_level);
+		return PSCI_E_INVALID_PARAMS;
+	}
+
+	cpu_idx = plat_core_pos_by_mpidr(mpidr);
+	assert(cpu_idx > -1);
+
+	ret = scmi_pwr_state_get(scmi_handle,
+		plat_css_core_pos_to_scmi_dmn_id_map[cpu_idx],
+		&scmi_pwr_state);
+
+	if (ret != SCMI_E_SUCCESS) {
+		WARN("SCMI get power state command return 0x%x unexpected\n",
+				ret);
+		return PSCI_E_INVALID_PARAMS;
+	}
+
+	/*
+	 * Find the maximum power level described in the get power state
+	 * command. If it is less than the requested power level, then assume
+	 * the requested power level is ON.
+	 */
+	if (SCMI_GET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state) < power_level)
+		return HW_ON;
+
+	lvl_state = SCMI_GET_PWR_STATE_LVL(scmi_pwr_state, power_level);
+	if (lvl_state == scmi_power_state_on)
+		return HW_ON;
+
+	assert((lvl_state == scmi_power_state_off) ||
+				(lvl_state == scmi_power_state_sleep));
+	return HW_OFF;
+}
+
+void __dead2 css_scp_system_off(int state)
+{
+	int ret;
+
+	/*
+	 * Disable GIC CPU interface to prevent pending interrupt from waking
+	 * up the AP from WFI.
+	 */
+	plat_arm_gic_cpuif_disable();
+
+	/*
+	 * Issue SCMI command. First issue a graceful
+	 * request and if that fails force the request.
+	 */
+	ret = scmi_sys_pwr_state_set(scmi_handle,
+			SCMI_SYS_PWR_FORCEFUL_REQ,
+			state);
+
+	if (ret != SCMI_E_SUCCESS) {
+		ERROR("SCMI system power state set 0x%x returns unexpected 0x%x\n",
+			state, ret);
+		panic();
+	}
+	wfi();
+	ERROR("CSS set power state: operation not handled.\n");
+	panic();
+}
+
+/*
+ * Helper function to shutdown the system via SCMI.
+ */
+void __dead2 css_scp_sys_shutdown(void)
+{
+	css_scp_system_off(SCMI_SYS_PWR_SHUTDOWN);
+}
+
+/*
+ * Helper function to reset the system via SCMI.
+ */
+void __dead2 css_scp_sys_reboot(void)
+{
+	css_scp_system_off(SCMI_SYS_PWR_COLD_RESET);
+}
+
+scmi_channel_plat_info_t plat_css_scmi_plat_info = {
+		.scmi_mbx_mem = CSS_SCMI_PAYLOAD_BASE,
+		.db_reg_addr = PLAT_CSS_MHU_BASE + CSS_SCMI_MHU_DB_REG_OFF,
+		.db_preserve_mask = 0xfffffffe,
+		.db_modify_mask = 0x1,
+};
+
+void plat_arm_pwrc_setup(void)
+{
+	scmi_channel.info = &plat_css_scmi_plat_info;
+	scmi_channel.lock = ARM_LOCK_GET_INSTANCE;
+	scmi_handle = scmi_init(&scmi_channel);
+	if (scmi_handle == NULL) {
+		ERROR("SCMI Initialization failed\n");
+		panic();
+	}
+}
+
+/******************************************************************************
+ * This function overrides the default definition for ARM platforms. Initialize
+ * the SCMI driver, query capability via SCMI and modify the PSCI capability
+ * based on that.
+ *****************************************************************************/
+const plat_psci_ops_t *plat_arm_psci_override_pm_ops(plat_psci_ops_t *ops)
+{
+	uint32_t msg_attr;
+	int ret;
+
+	assert(scmi_handle);
+
+	/* Check that power domain POWER_STATE_SET message is supported */
+	ret = scmi_proto_msg_attr(scmi_handle, SCMI_PWR_DMN_PROTO_ID,
+				SCMI_PWR_STATE_SET_MSG, &msg_attr);
+	if (ret != SCMI_E_SUCCESS) {
+		ERROR("Set power state command is not supported by SCMI\n");
+		panic();
+	}
+
+	/*
+	 * Don't support PSCI NODE_HW_STATE call if SCMI doesn't support
+	 * POWER_STATE_GET message.
+	 */
+	ret = scmi_proto_msg_attr(scmi_handle, SCMI_PWR_DMN_PROTO_ID,
+				SCMI_PWR_STATE_GET_MSG, &msg_attr);
+	if (ret != SCMI_E_SUCCESS)
+		ops->get_node_hw_state = NULL;
+
+	/* Check if the SCMI SYSTEM_POWER_STATE_SET message is supported */
+	ret = scmi_proto_msg_attr(scmi_handle, SCMI_SYS_PWR_PROTO_ID,
+				SCMI_SYS_PWR_STATE_SET_MSG, &msg_attr);
+	if (ret != SCMI_E_SUCCESS) {
+		/* System power management operations are not supported */
+		ops->system_off = NULL;
+		ops->system_reset = NULL;
+		ops->get_sys_suspend_power_state = NULL;
+	} else {
+		if (!(msg_attr & SCMI_SYS_PWR_SUSPEND_SUPPORTED)) {
+			/*
+			 * System power management protocol is available, but
+			 * it does not support SYSTEM SUSPEND.
+			 */
+			ops->get_sys_suspend_power_state = NULL;
+		}
+		if (!(msg_attr & SCMI_SYS_PWR_WARM_RESET_SUPPORTED)) {
+			/*
+			 * WARM reset is not available.
+			 */
+			ops->system_reset2 = NULL;
+		}
+	}
+
+	return ops;
+}
+
+int css_system_reset2(int is_vendor, int reset_type, u_register_t cookie)
+{
+	if (is_vendor || (reset_type != PSCI_RESET2_SYSTEM_WARM_RESET))
+		return PSCI_E_INVALID_PARAMS;
+
+	css_scp_system_off(SCMI_SYS_PWR_WARM_RESET);
+	/*
+	 * css_scp_system_off cannot return (it is a __dead function),
+	 * but css_system_reset2 has to return some value, even in
+	 * this case.
+	 */
+	return 0;
+}
diff --git a/plat/arm/css/drivers/scp/css_pm_scpi.c b/plat/arm/css/drivers/scp/css_pm_scpi.c
new file mode 100644
index 0000000..545c3fb
--- /dev/null
+++ b/plat/arm/css/drivers/scp/css_pm_scpi.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <css_pm.h>
+#include <debug.h>
+#include <plat_arm.h>
+#include "../scpi/css_scpi.h"
+#include "css_scp.h"
+
+/*
+ * This file implements the SCP power management functions using SCPI protocol.
+ */
+
+/*
+ * Helper function to inform power down state to SCP.
+ */
+void css_scp_suspend(const psci_power_state_t *target_state)
+{
+	uint32_t cluster_state = scpi_power_on;
+	uint32_t system_state = scpi_power_on;
+
+	/* Check if power down at system power domain level is requested */
+	if (CSS_SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF)
+		system_state = scpi_power_retention;
+
+	/* Cluster is to be turned off, so disable coherency */
+	if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF)
+		cluster_state = scpi_power_off;
+
+	/*
+	 * Ask the SCP to power down the appropriate components depending upon
+	 * their state.
+	 */
+	scpi_set_css_power_state(read_mpidr_el1(),
+				 scpi_power_off,
+				 cluster_state,
+				 system_state);
+}
+
+/*
+ * Helper function to turn off a CPU power domain and its parent power domains
+ * if applicable. Since SCPI doesn't differentiate between OFF and suspend, we
+ * call the suspend helper here.
+ */
+void css_scp_off(const psci_power_state_t *target_state)
+{
+	css_scp_suspend(target_state);
+}
+
+/*
+ * Helper function to turn ON a CPU power domain and its parent power domains
+ * if applicable.
+ */
+void css_scp_on(u_register_t mpidr)
+{
+	/*
+	 * SCP takes care of powering up parent power domains so we
+	 * only need to care about level 0
+	 */
+	scpi_set_css_power_state(mpidr, scpi_power_on, scpi_power_on,
+				 scpi_power_on);
+}
+
+/*
+ * Helper function to get the power state of a power domain node as reported
+ * by the SCP.
+ */
+int css_scp_get_power_state(u_register_t mpidr, unsigned int power_level)
+{
+	int rc, element;
+	unsigned int cpu_state, cluster_state;
+
+	/*
+	 * The format of 'power_level' is implementation-defined, but 0 must
+	 * mean a CPU. We also allow 1 to denote the cluster
+	 */
+	if (power_level != ARM_PWR_LVL0 && power_level != ARM_PWR_LVL1)
+		return PSCI_E_INVALID_PARAMS;
+
+	/* Query SCP */
+	rc = scpi_get_css_power_state(mpidr, &cpu_state, &cluster_state);
+	if (rc != 0)
+		return PSCI_E_INVALID_PARAMS;
+
+	/* Map power states of CPU and cluster to expected PSCI return codes */
+	if (power_level == ARM_PWR_LVL0) {
+		/*
+		 * The CPU state returned by SCP is an 8-bit bit mask
+		 * corresponding to each CPU in the cluster
+		 */
+#if ARM_PLAT_MT
+		/*
+		 * The current SCPI driver only caters for single-threaded
+		 * platforms. Hence we ignore the thread ID (which is always 0)
+		 * for such platforms.
+		 */
+		element = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+#else
+		element = mpidr & MPIDR_AFFLVL_MASK;
+#endif  /* ARM_PLAT_MT */
+		return CSS_CPU_PWR_STATE(cpu_state, element) ==
+			CSS_CPU_PWR_STATE_ON ? HW_ON : HW_OFF;
+	} else {
+		assert(cluster_state == CSS_CLUSTER_PWR_STATE_ON ||
+				cluster_state == CSS_CLUSTER_PWR_STATE_OFF);
+		return cluster_state == CSS_CLUSTER_PWR_STATE_ON ? HW_ON :
+			HW_OFF;
+	}
+}
+
+/*
+ * Helper function to shutdown the system via SCPI.
+ */
+void __dead2 css_scp_sys_shutdown(void)
+{
+	uint32_t response;
+
+	/*
+	 * Disable GIC CPU interface to prevent pending interrupt
+	 * from waking up the AP from WFI.
+	 */
+	plat_arm_gic_cpuif_disable();
+
+	/* Send the power down request to the SCP */
+	response = scpi_sys_power_state(scpi_system_shutdown);
+
+	if (response != SCP_OK) {
+		ERROR("CSS System Off: SCP error %u.\n", response);
+		panic();
+	}
+	wfi();
+	ERROR("CSS System Off: operation not handled.\n");
+	panic();
+}
+
+/*
+ * Helper function to reset the system via SCPI.
+ */
+void __dead2 css_scp_sys_reboot(void)
+{
+	uint32_t response;
+
+	/*
+	 * Disable GIC CPU interface to prevent pending interrupt
+	 * from waking up the AP from WFI.
+	 */
+	plat_arm_gic_cpuif_disable();
+
+	/* Send the system reset request to the SCP */
+	response = scpi_sys_power_state(scpi_system_reboot);
+
+	if (response != SCP_OK) {
+		ERROR("CSS System Reset: SCP error %u.\n", response);
+		panic();
+	}
+	wfi();
+	ERROR("CSS System Reset: operation not handled.\n");
+	panic();
+}
diff --git a/plat/arm/css/drivers/scp/css_scp.h b/plat/arm/css/drivers/scp/css_scp.h
new file mode 100644
index 0000000..1f0cf8e
--- /dev/null
+++ b/plat/arm/css/drivers/scp/css_scp.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CSS_SCP_H__
+#define __CSS_SCP_H__
+
+#include <cassert.h>
+#include <platform_def.h>
+#include <types.h>
+
+/* Forward declarations */
+struct psci_power_state;
+
+/* API for power management by SCP */
+int css_system_reset2(int is_vendor, int reset_type, u_register_t cookie);
+void css_scp_suspend(const struct psci_power_state *target_state);
+void css_scp_off(const struct psci_power_state *target_state);
+void css_scp_on(u_register_t mpidr);
+int css_scp_get_power_state(u_register_t mpidr, unsigned int power_level);
+void __dead2 css_scp_sys_shutdown(void);
+void __dead2 css_scp_sys_reboot(void);
+void __dead2 css_scp_system_off(int state);
+
+/* API for SCP Boot Image transfer. Return 0 on success, -1 on error */
+int css_scp_boot_image_xfer(void *image, unsigned int image_size);
+
+/*
+ * API to wait for SCP to signal till it's ready after booting the transferred
+ * image.
+ */
+int css_scp_boot_ready(void);
+
+#if CSS_LOAD_SCP_IMAGES
+/*
+ * All CSS platforms load SCP_BL2/SCP_BL2U just below BL rw-data and above
+ * BL2/BL2U (this is where BL31 usually resides except when ARM_BL31_IN_DRAM is
+ * set. Ensure that SCP_BL2/SCP_BL2U do not overflow into BL1 rw-data nor
+ * BL2/BL2U.
+ */
+CASSERT(SCP_BL2_LIMIT <= BL1_RW_BASE, assert_scp_bl2_limit_overwrite_bl1);
+CASSERT(SCP_BL2U_LIMIT <= BL1_RW_BASE, assert_scp_bl2u_limit_overwrite_bl1);
+
+CASSERT(SCP_BL2_BASE >= BL2_LIMIT, assert_scp_bl2_overwrite_bl2);
+CASSERT(SCP_BL2U_BASE >= BL2U_LIMIT, assert_scp_bl2u_overwrite_bl2u);
+#endif
+
+#endif	/* __CSS_SCP_H__ */
diff --git a/plat/arm/css/drivers/scp/css_sds.c b/plat/arm/css/drivers/scp/css_sds.c
new file mode 100644
index 0000000..a7a51ba
--- /dev/null
+++ b/plat/arm/css/drivers/scp/css_sds.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <css_def.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <platform.h>
+#include <stdint.h>
+#include "../sds/sds.h"
+
+int css_scp_boot_image_xfer(void *image, unsigned int image_size)
+{
+	int ret;
+	unsigned int image_offset, image_flags;
+
+	ret = sds_init();
+	if (ret != SDS_OK) {
+		ERROR("SCP SDS initialization failed\n");
+		panic();
+	}
+
+	VERBOSE("Writing SCP image metadata\n");
+	image_offset = (uintptr_t) image - ARM_TRUSTED_SRAM_BASE;
+	ret = sds_struct_write(SDS_SCP_IMG_STRUCT_ID, SDS_SCP_IMG_ADDR_OFFSET,
+			&image_offset, SDS_SCP_IMG_ADDR_SIZE,
+			SDS_ACCESS_MODE_NON_CACHED);
+	if (ret != SDS_OK)
+		goto sds_fail;
+
+	ret = sds_struct_write(SDS_SCP_IMG_STRUCT_ID, SDS_SCP_IMG_SIZE_OFFSET,
+			&image_size, SDS_SCP_IMG_SIZE_SIZE,
+			SDS_ACCESS_MODE_NON_CACHED);
+	if (ret != SDS_OK)
+		goto sds_fail;
+
+	VERBOSE("Marking SCP image metadata as valid\n");
+	image_flags = SDS_SCP_IMG_VALID_FLAG_BIT;
+	ret = sds_struct_write(SDS_SCP_IMG_STRUCT_ID, SDS_SCP_IMG_FLAG_OFFSET,
+			&image_flags, SDS_SCP_IMG_FLAG_SIZE,
+			SDS_ACCESS_MODE_NON_CACHED);
+	if (ret != SDS_OK)
+		goto sds_fail;
+
+	return 0;
+sds_fail:
+	ERROR("SCP SDS write to SCP IMG struct failed\n");
+	panic();
+}
+
+/*
+ * API to wait for SCP to signal till it's ready after booting the transferred
+ * image.
+ */
+int css_scp_boot_ready(void)
+{
+	uint32_t scp_feature_availability_flags;
+	int ret, retry = CSS_SCP_READY_10US_RETRIES;
+
+
+	VERBOSE("Waiting for SCP RAM to complete its initialization process\n");
+
+	/* Wait for the SCP RAM Firmware to complete its initialization process */
+	while (retry > 0) {
+		ret = sds_struct_read(SDS_FEATURE_AVAIL_STRUCT_ID, 0,
+				&scp_feature_availability_flags,
+				SDS_FEATURE_AVAIL_SIZE,
+				SDS_ACCESS_MODE_NON_CACHED);
+		if (ret == SDS_ERR_STRUCT_NOT_FINALIZED)
+			continue;
+
+		if (ret != SDS_OK) {
+			ERROR(" sds_struct_read failed\n");
+			panic();
+		}
+
+		if (scp_feature_availability_flags &
+				SDS_FEATURE_AVAIL_SCP_RAM_READY_BIT)
+			return 0;
+
+		udelay(10);
+		retry--;
+	}
+
+	ERROR("Timeout of %d ms expired waiting for SCP RAM Ready flag\n",
+			CSS_SCP_READY_10US_RETRIES/100);
+
+	plat_panic_handler();
+}
diff --git a/plat/arm/css/drivers/scpi/css_mhu.c b/plat/arm/css/drivers/scpi/css_mhu.c
new file mode 100644
index 0000000..500b8df
--- /dev/null
+++ b/plat/arm/css/drivers/scpi/css_mhu.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bakery_lock.h>
+#include <css_def.h>
+#include <mmio.h>
+#include <plat_arm.h>
+#include <platform_def.h>
+#include "css_mhu.h"
+
+/* SCP MHU secure channel registers */
+#define SCP_INTR_S_STAT		0x200
+#define SCP_INTR_S_SET		0x208
+#define SCP_INTR_S_CLEAR	0x210
+
+/* CPU MHU secure channel registers */
+#define CPU_INTR_S_STAT		0x300
+#define CPU_INTR_S_SET		0x308
+#define CPU_INTR_S_CLEAR	0x310
+
+ARM_INSTANTIATE_LOCK;
+
+/* Weak definition may be overridden in specific CSS based platform */
+#pragma weak plat_arm_pwrc_setup
+
+
+/*
+ * Slot 31 is reserved because the MHU hardware uses this register bit to
+ * indicate a non-secure access attempt. The total number of available slots is
+ * therefore 31 [30:0].
+ */
+#define MHU_MAX_SLOT_ID		30
+
+void mhu_secure_message_start(unsigned int slot_id)
+{
+	assert(slot_id <= MHU_MAX_SLOT_ID);
+
+	arm_lock_get();
+
+	/* Make sure any previous command has finished */
+	while (mmio_read_32(PLAT_CSS_MHU_BASE + CPU_INTR_S_STAT) &
+							(1 << slot_id))
+		;
+}
+
+void mhu_secure_message_send(unsigned int slot_id)
+{
+	assert(slot_id <= MHU_MAX_SLOT_ID);
+	assert(!(mmio_read_32(PLAT_CSS_MHU_BASE + CPU_INTR_S_STAT) &
+							(1 << slot_id)));
+
+	/* Send command to SCP */
+	mmio_write_32(PLAT_CSS_MHU_BASE + CPU_INTR_S_SET, 1 << slot_id);
+}
+
+uint32_t mhu_secure_message_wait(void)
+{
+	/* Wait for response from SCP */
+	uint32_t response;
+	while (!(response = mmio_read_32(PLAT_CSS_MHU_BASE + SCP_INTR_S_STAT)))
+		;
+
+	return response;
+}
+
+void mhu_secure_message_end(unsigned int slot_id)
+{
+	assert(slot_id <= MHU_MAX_SLOT_ID);
+
+	/*
+	 * Clear any response we got by writing one in the relevant slot bit to
+	 * the CLEAR register
+	 */
+	mmio_write_32(PLAT_CSS_MHU_BASE + SCP_INTR_S_CLEAR, 1 << slot_id);
+
+	arm_lock_release();
+}
+
+void mhu_secure_init(void)
+{
+	arm_lock_init();
+
+	/*
+	 * The STAT register resets to zero. Ensure it is in the expected state,
+	 * as a stale or garbage value would make us think it's a message we've
+	 * already sent.
+	 */
+	assert(mmio_read_32(PLAT_CSS_MHU_BASE + CPU_INTR_S_STAT) == 0);
+}
+
+void plat_arm_pwrc_setup(void)
+{
+	mhu_secure_init();
+}
diff --git a/plat/arm/css/drivers/scpi/css_mhu.h b/plat/arm/css/drivers/scpi/css_mhu.h
new file mode 100644
index 0000000..298eee9
--- /dev/null
+++ b/plat/arm/css/drivers/scpi/css_mhu.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CSS_MHU_H__
+#define __CSS_MHU_H__
+
+#include <stdint.h>
+
+void mhu_secure_message_start(unsigned int slot_id);
+void mhu_secure_message_send(unsigned int slot_id);
+uint32_t mhu_secure_message_wait(void);
+void mhu_secure_message_end(unsigned int slot_id);
+
+void mhu_secure_init(void);
+
+#endif	/* __CSS_MHU_H__ */
diff --git a/plat/arm/css/drivers/scpi/css_scpi.c b/plat/arm/css/drivers/scpi/css_scpi.c
new file mode 100644
index 0000000..3e92c86
--- /dev/null
+++ b/plat/arm/css/drivers/scpi/css_scpi.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <css_def.h>
+#include <debug.h>
+#include <platform.h>
+#include <string.h>
+#include <utils.h>
+#include "css_mhu.h"
+#include "css_scpi.h"
+
+#define SCPI_SHARED_MEM_SCP_TO_AP	PLAT_CSS_SCP_COM_SHARED_MEM_BASE
+#define SCPI_SHARED_MEM_AP_TO_SCP	(PLAT_CSS_SCP_COM_SHARED_MEM_BASE \
+								 + 0x100)
+
+/* Header and payload addresses for commands from AP to SCP */
+#define SCPI_CMD_HEADER_AP_TO_SCP		\
+	((scpi_cmd_t *) SCPI_SHARED_MEM_AP_TO_SCP)
+#define SCPI_CMD_PAYLOAD_AP_TO_SCP		\
+	((void *) (SCPI_SHARED_MEM_AP_TO_SCP + sizeof(scpi_cmd_t)))
+
+/* Header and payload addresses for responses from SCP to AP */
+#define SCPI_RES_HEADER_SCP_TO_AP \
+	((scpi_cmd_t *) SCPI_SHARED_MEM_SCP_TO_AP)
+#define SCPI_RES_PAYLOAD_SCP_TO_AP \
+	((void *) (SCPI_SHARED_MEM_SCP_TO_AP + sizeof(scpi_cmd_t)))
+
+/* ID of the MHU slot used for the SCPI protocol */
+#define SCPI_MHU_SLOT_ID		0
+
+static void scpi_secure_message_start(void)
+{
+	mhu_secure_message_start(SCPI_MHU_SLOT_ID);
+}
+
+static void scpi_secure_message_send(size_t payload_size)
+{
+	/*
+	 * Ensure that any write to the SCPI payload area is seen by SCP before
+	 * we write to the MHU register. If these 2 writes were reordered by
+	 * the CPU then SCP would read stale payload data
+	 */
+	dmbst();
+
+	mhu_secure_message_send(SCPI_MHU_SLOT_ID);
+}
+
+static void scpi_secure_message_receive(scpi_cmd_t *cmd)
+{
+	uint32_t mhu_status;
+
+	assert(cmd != NULL);
+
+	mhu_status = mhu_secure_message_wait();
+
+	/* Expect an SCPI message, reject any other protocol */
+	if (mhu_status != (1 << SCPI_MHU_SLOT_ID)) {
+		ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n",
+			mhu_status);
+		panic();
+	}
+
+	/*
+	 * Ensure that any read to the SCPI payload area is done after reading
+	 * the MHU register. If these 2 reads were reordered then the CPU would
+	 * read invalid payload data
+	 */
+	dmbld();
+
+	memcpy(cmd, (void *) SCPI_SHARED_MEM_SCP_TO_AP, sizeof(*cmd));
+}
+
+static void scpi_secure_message_end(void)
+{
+	mhu_secure_message_end(SCPI_MHU_SLOT_ID);
+}
+
+int scpi_wait_ready(void)
+{
+	scpi_cmd_t scpi_cmd;
+
+	VERBOSE("Waiting for SCP_READY command...\n");
+
+	/* Get a message from the SCP */
+	scpi_secure_message_start();
+	scpi_secure_message_receive(&scpi_cmd);
+	scpi_secure_message_end();
+
+	/* We are expecting 'SCP Ready', produce correct error if it's not */
+	scpi_status_t status = SCP_OK;
+	if (scpi_cmd.id != SCPI_CMD_SCP_READY) {
+		ERROR("Unexpected SCP command: expected command #%u, got command #%u\n",
+		      SCPI_CMD_SCP_READY, scpi_cmd.id);
+		status = SCP_E_SUPPORT;
+	} else if (scpi_cmd.size != 0) {
+		ERROR("SCP_READY command has incorrect size: expected 0, got %u\n",
+		      scpi_cmd.size);
+		status = SCP_E_SIZE;
+	}
+
+	VERBOSE("Sending response for SCP_READY command\n");
+
+	/*
+	 * Send our response back to SCP.
+	 * We are using the same SCPI header, just update the status field.
+	 */
+	scpi_cmd.status = status;
+	scpi_secure_message_start();
+	memcpy((void *) SCPI_SHARED_MEM_AP_TO_SCP, &scpi_cmd, sizeof(scpi_cmd));
+	scpi_secure_message_send(0);
+	scpi_secure_message_end();
+
+	return status == SCP_OK ? 0 : -1;
+}
+
+void scpi_set_css_power_state(unsigned int mpidr,
+		scpi_power_state_t cpu_state, scpi_power_state_t cluster_state,
+		scpi_power_state_t css_state)
+{
+	scpi_cmd_t *cmd;
+	uint32_t state = 0;
+	uint32_t *payload_addr;
+
+#if ARM_PLAT_MT
+	/*
+	 * The current SCPI driver only caters for single-threaded platforms.
+	 * Hence we ignore the thread ID (which is always 0) for such platforms.
+	 */
+	state |= (mpidr >> MPIDR_AFF1_SHIFT) & 0x0f;	/* CPU ID */
+	state |= ((mpidr >> MPIDR_AFF2_SHIFT) & 0x0f) << 4;	/* Cluster ID */
+#else
+	state |= mpidr & 0x0f;	/* CPU ID */
+	state |= (mpidr & 0xf00) >> 4;	/* Cluster ID */
+#endif /* ARM_PLAT_MT */
+
+	state |= cpu_state << 8;
+	state |= cluster_state << 12;
+	state |= css_state << 16;
+
+	scpi_secure_message_start();
+
+	/* Populate the command header */
+	cmd = SCPI_CMD_HEADER_AP_TO_SCP;
+	cmd->id = SCPI_CMD_SET_CSS_POWER_STATE;
+	cmd->set = SCPI_SET_NORMAL;
+	cmd->sender = 0;
+	cmd->size = sizeof(state);
+	/* Populate the command payload */
+	payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
+	*payload_addr = state;
+	scpi_secure_message_send(sizeof(state));
+	/*
+	 * SCP does not reply to this command in order to avoid MHU interrupts
+	 * from the sender, which could interfere with its power state request.
+	 */
+
+	scpi_secure_message_end();
+}
+
+/*
+ * Query and obtain CSS power state from SCP.
+ *
+ * In response to the query, SCP returns power states of all CPUs in all
+ * clusters of the system. The returned response is then filtered based on the
+ * supplied MPIDR. Power states of requested cluster and CPUs within are updated
+ * via. supplied non-NULL pointer arguments.
+ *
+ * Returns 0 on success, or -1 on errors.
+ */
+int scpi_get_css_power_state(unsigned int mpidr, unsigned int *cpu_state_p,
+		unsigned int *cluster_state_p)
+{
+	scpi_cmd_t *cmd;
+	scpi_cmd_t response;
+	int power_state, cpu, cluster, rc = -1;
+
+	/*
+	 * Extract CPU and cluster membership of the given MPIDR. SCPI caters
+	 * for only up to 0xf clusters, and 8 CPUs per cluster
+	 */
+#if ARM_PLAT_MT
+	/*
+	 * The current SCPI driver only caters for single-threaded platforms.
+	 * Hence we ignore the thread ID (which is always 0) for such platforms.
+	 */
+	cpu = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+	cluster = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK;
+#else
+	cpu = mpidr & MPIDR_AFFLVL_MASK;
+	cluster = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+#endif  /* ARM_PLAT_MT */
+	if (cpu >= 8 || cluster >= 0xf)
+		return -1;
+
+	scpi_secure_message_start();
+
+	/* Populate request headers */
+	zeromem(SCPI_CMD_HEADER_AP_TO_SCP, sizeof(*cmd));
+	cmd = SCPI_CMD_HEADER_AP_TO_SCP;
+	cmd->id = SCPI_CMD_GET_CSS_POWER_STATE;
+
+	/*
+	 * Send message and wait for SCP's response
+	 */
+	scpi_secure_message_send(0);
+	scpi_secure_message_receive(&response);
+
+	if (response.status != SCP_OK)
+		goto exit;
+
+	/* Validate SCP response */
+	if (!CHECK_RESPONSE(response, cluster))
+		goto exit;
+
+	/* Extract power states for required cluster */
+	power_state = *(((uint16_t *) SCPI_RES_PAYLOAD_SCP_TO_AP) + cluster);
+	if (CLUSTER_ID(power_state) != cluster)
+		goto exit;
+
+	/* Update power state via. pointers */
+	if (cluster_state_p)
+		*cluster_state_p = CLUSTER_POWER_STATE(power_state);
+	if (cpu_state_p)
+		*cpu_state_p = CPU_POWER_STATE(power_state);
+	rc = 0;
+
+exit:
+	scpi_secure_message_end();
+	return rc;
+}
+
+uint32_t scpi_sys_power_state(scpi_system_state_t system_state)
+{
+	scpi_cmd_t *cmd;
+	uint8_t *payload_addr;
+	scpi_cmd_t response;
+
+	scpi_secure_message_start();
+
+	/* Populate the command header */
+	cmd = SCPI_CMD_HEADER_AP_TO_SCP;
+	cmd->id = SCPI_CMD_SYS_POWER_STATE;
+	cmd->set = 0;
+	cmd->sender = 0;
+	cmd->size = sizeof(*payload_addr);
+	/* Populate the command payload */
+	payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
+	*payload_addr = system_state & 0xff;
+	scpi_secure_message_send(sizeof(*payload_addr));
+
+	scpi_secure_message_receive(&response);
+
+	scpi_secure_message_end();
+
+	return response.status;
+}
diff --git a/plat/arm/css/drivers/scpi/css_scpi.h b/plat/arm/css/drivers/scpi/css_scpi.h
new file mode 100644
index 0000000..2a7e624
--- /dev/null
+++ b/plat/arm/css/drivers/scpi/css_scpi.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CSS_SCPI_H__
+#define __CSS_SCPI_H__
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*
+ * An SCPI command consists of a header and a payload.
+ * The following structure describes the header. It is 64-bit long.
+ */
+typedef struct {
+	/* Command ID */
+	uint32_t id		: 7;
+	/* Set ID. Identifies whether this is a standard or extended command. */
+	uint32_t set		: 1;
+	/* Sender ID to match a reply. The value is sender specific. */
+	uint32_t sender		: 8;
+	/* Size of the payload in bytes (0 - 511) */
+	uint32_t size		: 9;
+	uint32_t reserved	: 7;
+	/*
+	 * Status indicating the success of a command.
+	 * See the enum below.
+	 */
+	uint32_t status;
+} scpi_cmd_t;
+
+typedef enum {
+	SCPI_SET_NORMAL = 0,	/* Normal SCPI commands */
+	SCPI_SET_EXTENDED	/* Extended SCPI commands */
+} scpi_set_t;
+
+enum {
+	SCP_OK = 0,	/* Success */
+	SCP_E_PARAM,	/* Invalid parameter(s) */
+	SCP_E_ALIGN,	/* Invalid alignment */
+	SCP_E_SIZE,	/* Invalid size */
+	SCP_E_HANDLER,	/* Invalid handler or callback */
+	SCP_E_ACCESS,	/* Invalid access or permission denied */
+	SCP_E_RANGE,	/* Value out of range */
+	SCP_E_TIMEOUT,	/* Time out has ocurred */
+	SCP_E_NOMEM,	/* Invalid memory area or pointer */
+	SCP_E_PWRSTATE,	/* Invalid power state */
+	SCP_E_SUPPORT,	/* Feature not supported or disabled */
+	SCPI_E_DEVICE,	/* Device error */
+	SCPI_E_BUSY,	/* Device is busy */
+};
+
+typedef uint32_t scpi_status_t;
+
+typedef enum {
+	SCPI_CMD_SCP_READY = 0x01,
+	SCPI_CMD_SET_CSS_POWER_STATE = 0x03,
+	SCPI_CMD_GET_CSS_POWER_STATE = 0x04,
+	SCPI_CMD_SYS_POWER_STATE = 0x05
+} scpi_command_t;
+
+/*
+ * Macros to parse SCP response to GET_CSS_POWER_STATE command
+ *
+ *   [3:0] : cluster ID
+ *   [7:4] : cluster state: 0 = on; 3 = off; rest are reserved
+ *   [15:8]: on/off state for individual CPUs in the cluster
+ *
+ * Payload is in little-endian
+ */
+#define CLUSTER_ID(_resp)		((_resp) & 0xf)
+#define CLUSTER_POWER_STATE(_resp)	(((_resp) >> 4) & 0xf)
+
+/* Result is a bit mask of CPU on/off states in the cluster */
+#define CPU_POWER_STATE(_resp)		(((_resp) >> 8) & 0xff)
+
+/*
+ * For GET_CSS_POWER_STATE, SCP returns the power states of every cluster. The
+ * size of response depends on the number of clusters in the system. The
+ * SCP-to-AP payload contains 2 bytes per cluster. Make sure the response is
+ * large enough to contain power states of a given cluster
+ */
+#define CHECK_RESPONSE(_resp, _clus) \
+	(_resp.size >= (((_clus) + 1) * 2))
+
+typedef enum {
+	scpi_power_on = 0,
+	scpi_power_retention = 1,
+	scpi_power_off = 3,
+} scpi_power_state_t;
+
+typedef enum {
+	scpi_system_shutdown = 0,
+	scpi_system_reboot = 1,
+	scpi_system_reset = 2
+} scpi_system_state_t;
+
+int scpi_wait_ready(void);
+void scpi_set_css_power_state(unsigned int mpidr,
+				scpi_power_state_t cpu_state,
+				scpi_power_state_t cluster_state,
+				scpi_power_state_t css_state);
+int scpi_get_css_power_state(unsigned int mpidr, unsigned int *cpu_state_p,
+		unsigned int *cluster_state_p);
+uint32_t scpi_sys_power_state(scpi_system_state_t system_state);
+
+
+#endif	/* __CSS_SCPI_H__ */
diff --git a/plat/arm/css/drivers/sds/aarch32/sds_helpers.S b/plat/arm/css/drivers/sds/aarch32/sds_helpers.S
new file mode 100644
index 0000000..f68cb35
--- /dev/null
+++ b/plat/arm/css/drivers/sds/aarch32/sds_helpers.S
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+#include "../sds.h"
+#include "../sds_private.h"
+
+	.globl	sds_get_primary_cpu_id
+
+	/*
+	 * int sds_get_primary_cpu_id(void);
+	 * Return the primary CPU ID from SDS Structure
+	 * Returns CPUID on success or -1 on failure
+	 */
+func sds_get_primary_cpu_id
+	ldr	r0, =PLAT_ARM_SDS_MEM_BASE
+	ldr	r2, =SDS_REGION_SIGNATURE
+	ldr	r1, [r0]
+	ubfx	r3, r1, #0, #16
+
+	/* Check if the SDS region signature found */
+	cmp	r2, r3
+	bne	2f
+
+	/* Get the structure count from region descriptor in r1 */
+	ubfx	r1, r1, #SDS_REGION_STRUCT_COUNT_SHIFT, #SDS_REGION_STRUCT_COUNT_WIDTH
+	cmp	r1, #0
+	beq	2f
+	add	r0, r0, #SDS_REGION_DESC_SIZE
+
+	/* Initialize the loop iterator count in r3 */
+	mov	r3, #0
+loop_begin:
+	ldrh	r2, [r0]
+	cmp	r2, #SDS_AP_CPU_INFO_STRUCT_ID
+	bne	continue_loop
+
+	/* We have found the required structure */
+	ldr	r0, [r0,#(SDS_HEADER_SIZE + SDS_AP_CPU_INFO_PRIMARY_CPUID_OFFSET)]
+	bx	lr
+continue_loop:
+	/* Increment the loop counter and exit loop if counter == structure count */
+	add	r3, r3, #0x1
+	cmp	r1, r3
+	beq	2f
+
+	/* Read the 2nd word in header */
+	ldr	r2, [r0,#4]
+	/* Get the structure size from header */
+	ubfx	r2, r2, #SDS_HEADER_STRUCT_SIZE_SHIFT, #SDS_HEADER_STRUCT_SIZE_WIDTH
+	/* Add the structure size and SDS HEADER SIZE to point to next header */
+	add	r2, r2, #SDS_HEADER_SIZE
+	add	r0, r0, r2
+	b	loop_begin
+2:
+	mov	r0, #0xffffffff
+	bx	lr
+endfunc sds_get_primary_cpu_id
diff --git a/plat/arm/css/drivers/sds/aarch64/sds_helpers.S b/plat/arm/css/drivers/sds/aarch64/sds_helpers.S
new file mode 100644
index 0000000..3b9c562
--- /dev/null
+++ b/plat/arm/css/drivers/sds/aarch64/sds_helpers.S
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+#include "../sds.h"
+#include "../sds_private.h"
+
+	.globl	sds_get_primary_cpu_id
+
+	/*
+	 * int sds_get_primary_cpu_id(void);
+	 * Return the primary CPI ID from SDS Structure
+	 * Returns CPUID on success or -1 on failure
+	 */
+func sds_get_primary_cpu_id
+	mov_imm	x0, PLAT_ARM_SDS_MEM_BASE
+	mov	w2, #SDS_REGION_SIGNATURE
+	ldr	w1, [x0]
+
+	/* Check if the SDS region signature found */
+	cmp	w2, w1, uxth
+	b.ne	2f
+
+	/* Get the structure count from region descriptor in `w1 */
+	ubfx	w1, w1, #SDS_REGION_STRUCT_COUNT_SHIFT, #SDS_REGION_STRUCT_COUNT_WIDTH
+	cbz	w1, 2f
+	add	x0, x0, #SDS_REGION_DESC_SIZE
+
+	/* Initialize the loop iterator count in w3 */
+	mov	w3, #0
+loop_begin:
+	ldrh	w2, [x0]
+	cmp	w2, #SDS_AP_CPU_INFO_STRUCT_ID
+	b.ne	continue_loop
+
+	/* We have found the required structure */
+	ldr	w0, [x0,#(SDS_HEADER_SIZE + SDS_AP_CPU_INFO_PRIMARY_CPUID_OFFSET)]
+	ret
+continue_loop:
+	/* Increment the loop counter and exit loop if counter == structure count */
+	add	w3, w3, #0x1
+	cmp	w1, w3
+	b.eq	2f
+
+	/* Read the 2nd word in header */
+	ldr	w2, [x0,#4]
+	/* Get the structure size from header */
+	ubfx	x2, x2, #SDS_HEADER_STRUCT_SIZE_SHIFT, #SDS_HEADER_STRUCT_SIZE_WIDTH
+	/* Add the structure size and SDS HEADER SIZE to point to next header */
+	add	x2, x2, #SDS_HEADER_SIZE
+	add	x0, x0, x2
+	b	loop_begin
+2:
+	mov	w0, #0xffffffff
+	ret
+endfunc sds_get_primary_cpu_id
diff --git a/plat/arm/css/drivers/sds/sds.c b/plat/arm/css/drivers/sds/sds.c
new file mode 100644
index 0000000..e2fac54
--- /dev/null
+++ b/plat/arm/css/drivers/sds/sds.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <css_def.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "sds.h"
+#include "sds_private.h"
+
+/*
+ * Variables used to track and maintain the state of the memory region reserved
+ * for usage by the SDS framework.
+ */
+
+/* Pointer to the base of the SDS memory region */
+static uintptr_t sds_mem_base;
+
+/* Size of the SDS memory region in bytes */
+static size_t sds_mem_size;
+
+/*
+ * Perform some non-exhaustive tests to determine whether any of the fields
+ * within a Structure Header contain obviously invalid data.
+ * Returns SDS_OK on success, SDS_ERR_FAIL on error.
+ */
+static int sds_struct_is_valid(uintptr_t header)
+{
+	size_t struct_size = GET_SDS_HEADER_STRUCT_SIZE(header);
+
+	/* Zero is not a valid identifier */
+	if (GET_SDS_HEADER_ID(header) == 0)
+		return SDS_ERR_FAIL;
+
+	/* Check SDS Schema version */
+	if (GET_SDS_HEADER_VERSION(header) == SDS_REGION_SCH_VERSION)
+		return SDS_ERR_FAIL;
+
+	/* The SDS Structure sizes have to be multiple of 8 */
+	if ((struct_size == 0) || ((struct_size % 8) != 0))
+		return SDS_ERR_FAIL;
+
+	if (struct_size > sds_mem_size)
+		return SDS_ERR_FAIL;
+
+	return SDS_OK;
+}
+
+/*
+ * Validate the SDS structure headers.
+ * Returns SDS_OK on success, SDS_ERR_FAIL on error.
+ */
+static int validate_sds_struct_headers(void)
+{
+	unsigned int i, structure_count;
+	uintptr_t header;
+
+	structure_count = GET_SDS_REGION_STRUCTURE_COUNT(sds_mem_base);
+
+	if (structure_count == 0)
+		return SDS_ERR_FAIL;
+
+	header = sds_mem_base + SDS_REGION_DESC_SIZE;
+
+	/* Iterate over structure headers and validate each one */
+	for (i = 0; i < structure_count; i++) {
+		if (sds_struct_is_valid(header) != SDS_OK) {
+			WARN("SDS: Invalid structure header detected\n");
+			return SDS_ERR_FAIL;
+		}
+		header += GET_SDS_HEADER_STRUCT_SIZE(header) + SDS_HEADER_SIZE;
+	}
+	return SDS_OK;
+}
+
+/*
+ * Get the structure header pointer corresponding to the structure ID.
+ * Returns SDS_OK on success, SDS_ERR_STRUCT_NOT_FOUND on error.
+ */
+static int get_struct_header(uint32_t structure_id, struct_header_t **header)
+{
+	unsigned int i, structure_count;
+	uintptr_t current_header;
+
+	assert(header);
+
+	structure_count = GET_SDS_REGION_STRUCTURE_COUNT(sds_mem_base);
+	if (structure_count == 0)
+		return SDS_ERR_STRUCT_NOT_FOUND;
+
+	current_header = ((uintptr_t)sds_mem_base) + SDS_REGION_DESC_SIZE;
+
+	/* Iterate over structure headers to find one with a matching ID */
+	for (i = 0; i < structure_count; i++) {
+		if (GET_SDS_HEADER_ID(current_header) == structure_id) {
+			*header = (struct_header_t *)current_header;
+			return SDS_OK;
+		}
+		current_header += GET_SDS_HEADER_STRUCT_SIZE(current_header) +
+						SDS_HEADER_SIZE;
+	}
+
+	*header = NULL;
+	return SDS_ERR_STRUCT_NOT_FOUND;
+}
+
+/*
+ * Check if a structure header corresponding to the structure ID exists.
+ * Returns SDS_OK if structure header exists else SDS_ERR_STRUCT_NOT_FOUND
+ * if not found.
+ */
+int sds_struct_exists(unsigned int structure_id)
+{
+	struct_header_t *header = NULL;
+	int ret;
+
+	ret = get_struct_header(structure_id, &header);
+	if (ret == SDS_OK) {
+		assert(header);
+	}
+
+	return ret;
+}
+
+/*
+ * Read from field in the structure corresponding to `structure_id`.
+ * `fld_off` is the offset to the field in the structure and `mode`
+ * indicates whether cache maintenance need to performed prior to the read.
+ * The `data` is the pointer to store the read data of size specified by `size`.
+ * Returns SDS_OK on success or corresponding error codes on failure.
+ */
+int sds_struct_read(uint32_t structure_id, unsigned int fld_off,
+		void *data, size_t size, sds_access_mode_t mode)
+{
+	int status;
+	uintptr_t field_base;
+	struct_header_t *header = NULL;
+
+	if (!data)
+		return SDS_ERR_INVALID_PARAMS;
+
+	/* Check if a structure with this ID exists */
+	status = get_struct_header(structure_id, &header);
+	if (status != SDS_OK)
+		return status;
+
+	assert(header);
+
+	if (mode == SDS_ACCESS_MODE_CACHED)
+		inv_dcache_range((uintptr_t)header, SDS_HEADER_SIZE + size);
+
+	if (!IS_SDS_HEADER_VALID(header)) {
+		WARN("SDS: Reading from un-finalized structure 0x%x\n",
+				structure_id);
+		return SDS_ERR_STRUCT_NOT_FINALIZED;
+	}
+
+	if ((fld_off + size) > GET_SDS_HEADER_STRUCT_SIZE(header))
+		return SDS_ERR_FAIL;
+
+	field_base = (uintptr_t)header + SDS_HEADER_SIZE + fld_off;
+	if (check_uptr_overflow(field_base, size - 1))
+		return SDS_ERR_FAIL;
+
+	/* Copy the required field in the struct */
+	memcpy(data, (void *)field_base, size);
+
+	return SDS_OK;
+}
+
+/*
+ * Write to the field in the structure corresponding to `structure_id`.
+ * `fld_off` is the offset to the field in the structure and `mode`
+ * indicates whether cache maintenance need to performed for the write.
+ * The `data` is the pointer to data of size specified by `size`.
+ * Returns SDS_OK on success or corresponding error codes on failure.
+ */
+int sds_struct_write(uint32_t structure_id, unsigned int fld_off,
+		void *data, size_t size, sds_access_mode_t mode)
+{
+	int status;
+	uintptr_t field_base;
+	struct_header_t *header = NULL;
+
+	if (!data)
+		return SDS_ERR_INVALID_PARAMS;
+
+	/* Check if a structure with this ID exists */
+	status = get_struct_header(structure_id, &header);
+	if (status != SDS_OK)
+		return status;
+
+	assert(header);
+
+	if (mode == SDS_ACCESS_MODE_CACHED)
+		inv_dcache_range((uintptr_t)header, SDS_HEADER_SIZE + size);
+
+	if (!IS_SDS_HEADER_VALID(header)) {
+		WARN("SDS: Writing to un-finalized structure 0x%x\n",
+				structure_id);
+		return SDS_ERR_STRUCT_NOT_FINALIZED;
+	}
+
+	if ((fld_off + size) > GET_SDS_HEADER_STRUCT_SIZE(header))
+		return SDS_ERR_FAIL;
+
+	field_base = (uintptr_t)header + SDS_HEADER_SIZE + fld_off;
+	if (check_uptr_overflow(field_base, size - 1))
+		return SDS_ERR_FAIL;
+
+	/* Copy the required field in the struct */
+	memcpy((void *)field_base, data, size);
+
+	if (mode == SDS_ACCESS_MODE_CACHED)
+		flush_dcache_range((uintptr_t)field_base, size);
+
+	return SDS_OK;
+}
+
+/*
+ * Initialize the SDS driver. Also verifies the SDS version and sanity of
+ * the SDS structure headers.
+ * Returns SDS_OK on success, SDS_ERR_FAIL on error.
+ */
+int sds_init(void)
+{
+	sds_mem_base = (uintptr_t)PLAT_ARM_SDS_MEM_BASE;
+
+	if (!IS_SDS_REGION_VALID(sds_mem_base)) {
+		WARN("SDS: No valid SDS Memory Region found\n");
+		return SDS_ERR_FAIL;
+	}
+
+	if (GET_SDS_REGION_SCHEMA_VERSION(sds_mem_base)
+				!= SDS_REGION_SCH_VERSION) {
+		WARN("SDS: Unsupported SDS schema version\n");
+		return SDS_ERR_FAIL;
+	}
+
+	sds_mem_size = GET_SDS_REGION_SIZE(sds_mem_base);
+	if (sds_mem_size > PLAT_ARM_SDS_MEM_SIZE_MAX) {
+		WARN("SDS: SDS Memory Region exceeds size limit\n");
+		return SDS_ERR_FAIL;
+	}
+
+	INFO("SDS: Detected SDS Memory Region (%zu bytes)\n", sds_mem_size);
+
+	if (validate_sds_struct_headers() != SDS_OK)
+		return SDS_ERR_FAIL;
+
+	return SDS_OK;
+}
diff --git a/plat/arm/css/drivers/sds/sds.h b/plat/arm/css/drivers/sds/sds.h
new file mode 100644
index 0000000..ff3787d
--- /dev/null
+++ b/plat/arm/css/drivers/sds/sds.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SDS_H__
+#define __SDS_H__
+
+/* SDS Structure Identifier defines */
+/* AP CPU INFO defines */
+#define SDS_AP_CPU_INFO_STRUCT_ID		1
+#define SDS_AP_CPU_INFO_PRIMARY_CPUID_OFFSET	0x0
+#define SDS_AP_CPU_INFO_PRIMARY_CPUID_SIZE	0x4
+
+/* ROM Firmware Version defines */
+#define SDS_ROM_VERSION_STRUCT_ID		2
+#define SDS_ROM_VERSION_OFFSET			0x0
+#define SDS_ROM_VERSION_SIZE 			0x4
+
+/* RAM Firmware version defines */
+#define SDS_RAM_VERSION_STRUCT_ID		3
+#define SDS_RAM_VERSION_OFFSET			0x0
+#define SDS_RAM_VERSION_SIZE			0x4
+
+/* Platform Identity defines */
+#define SDS_PLATFORM_IDENTITY_STRUCT_ID		4
+#define SDS_PLATFORM_IDENTITY_ID_OFFSET		0x0
+#define SDS_PLATFORM_IDENTITY_ID_SIZE		0x4
+#define SDS_PLATFORM_IDENTITY_ID_CONFIG_SHIFT	28
+#define SDS_PLATFORM_IDENTITY_ID_CONFIG_WIDTH	4
+#define SDS_PLATFORM_IDENTITY_ID_CONFIG_MASK	\
+	((1 << SDS_PLATFORM_IDENTITY_ID_CONFIG_WIDTH) - 1)
+
+#define SDS_PLATFORM_IDENTITY_PLAT_TYPE_OFFSET	0x4
+#define SDS_PLATFORM_IDENTITY_PLAT_TYPE_SIZE	0x4
+
+/* Reset Syndrome defines */
+#define SDS_RESET_SYNDROME_STRUCT_ID		5
+#define SDS_RESET_SYNDROME_OFFSET		0
+#define SDS_RESET_SYNDROME_SIZE			4
+#define SDS_RESET_SYNDROME_POW_ON_RESET_BIT	(1 << 0)
+#define SDS_RESET_SYNDROME_SCP_WD_RESET_BIT	(1 << 1)
+#define SDS_RESET_SYNDROME_AP_WD_RESET_BIT	(1 << 2)
+#define SDS_RESET_SYNDROME_SYS_RESET_REQ_BIT	(1 << 3)
+#define SDS_RESET_SYNDROME_M3_LOCKUP_BIT	(1 << 4)
+
+/* SCP Firmware Feature Availability defines */
+#define SDS_FEATURE_AVAIL_STRUCT_ID		6
+#define SDS_FEATURE_AVAIL_OFFSET		0
+#define SDS_FEATURE_AVAIL_SIZE			4
+#define SDS_FEATURE_AVAIL_SCP_RAM_READY_BIT	(1 << 0)
+#define SDS_FEATURE_AVAIL_DMC_READY_BIT		(1 << 1)
+#define SDS_FEATURE_AVAIL_MSG_IF_READY_BIT	(1 << 2)
+
+/* SCP BL2 Image Metadata defines */
+#define SDS_SCP_IMG_STRUCT_ID			9
+#define SDS_SCP_IMG_FLAG_OFFSET			0
+#define SDS_SCP_IMG_FLAG_SIZE			4
+#define SDS_SCP_IMG_VALID_FLAG_BIT		(1 << 0)
+#define SDS_SCP_IMG_ADDR_OFFSET			4
+#define SDS_SCP_IMG_ADDR_SIZE			4
+#define SDS_SCP_IMG_SIZE_OFFSET			8
+#define SDS_SCP_IMG_SIZE_SIZE			4
+
+/* SDS Driver Error Codes */
+#define SDS_OK				0
+#define SDS_ERR_FAIL			-1
+#define SDS_ERR_INVALID_PARAMS		-2
+#define SDS_ERR_STRUCT_NOT_FOUND	-3
+#define SDS_ERR_STRUCT_NOT_FINALIZED	-4
+
+#ifndef __ASSEMBLY__
+#include <stddef.h>
+#include <stdint.h>
+
+typedef enum {
+	SDS_ACCESS_MODE_NON_CACHED,
+	SDS_ACCESS_MODE_CACHED,
+} sds_access_mode_t;
+
+int sds_init(void);
+int sds_struct_exists(uint32_t structure_id);
+int sds_struct_read(uint32_t structure_id, unsigned int fld_off, void *data,
+		size_t size, sds_access_mode_t mode);
+int sds_struct_write(uint32_t structure_id, unsigned int fld_off, void *data,
+		size_t size, sds_access_mode_t mode);
+#endif /*__ASSEMBLY__ */
+#endif /* __SDS_H__ */
diff --git a/plat/arm/css/drivers/sds/sds_private.h b/plat/arm/css/drivers/sds/sds_private.h
new file mode 100644
index 0000000..649576b
--- /dev/null
+++ b/plat/arm/css/drivers/sds/sds_private.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SDS_PRIVATE_H__
+#define __SDS_PRIVATE_H__
+
+/* SDS Header defines */
+#define SDS_HEADER_ID_SHIFT			0
+#define SDS_HEADER_ID_WIDTH			16
+#define SDS_HEADER_ID_MASK			((1 << SDS_HEADER_ID_WIDTH) - 1)
+
+#define SDS_HEADER_MINOR_VERSION_WIDTH		8
+#define SDS_HEADER_MINOR_VERSION_SHIFT		16
+#define SDS_HEADER_MAJOR_VERSION_WIDTH		8
+
+#define MAKE_SDS_HEADER_VERSION(major, minor)	\
+	(((((major) & 0xff) << SDS_HEADER_MINOR_VERSION_WIDTH) | ((minor) & 0xff)))
+#define SDS_HEADER_VERSION_MASK			\
+	((1 << (SDS_HEADER_MINOR_VERSION_WIDTH + SDS_HEADER_MAJOR_VERSION_WIDTH)) - 1)
+
+#define SDS_HEADER_VERSION			MAKE_SDS_HEADER_VERSION(1, 0)
+#define SDS_HEADER_STRUCT_SIZE_WIDTH		23
+#define SDS_HEADER_STRUCT_SIZE_SHIFT		1
+#define SDS_HEADER_STRUCT_SIZE_MASK		((1 << SDS_HEADER_STRUCT_SIZE_WIDTH) - 1)
+#define SDS_HEADER_VALID_MASK			0x1
+#define SDS_HEADER_VALID_SHIFT			0
+#define SDS_HEADER_SIZE				0x8
+
+/* Arbitrary, 16 bit value that indicates a valid SDS Memory Region */
+#define SDS_REGION_SIGNATURE			0xAA7A
+#define SDS_REGION_SIGNATURE_WIDTH		16
+#define SDS_REGION_SIGNATURE_SHIFT		0
+#define SDS_REGION_SIGNATURE_MASK		((1 << SDS_REGION_SIGNATURE_WIDTH) - 1)
+
+#define SDS_REGION_STRUCT_COUNT_SHIFT		16
+#define SDS_REGION_STRUCT_COUNT_WIDTH		8
+#define SDS_REGION_STRUCT_COUNT_MASK		((1 << SDS_REGION_STRUCT_COUNT_WIDTH) - 1)
+
+#define SDS_REGION_SCH_MINOR_SHIFT		24
+#define SDS_REGION_SCH_MINOR_WIDTH		4
+#define SDS_REGION_SCH_MINOR_MASK		((1 << SDS_REGION_SCH_MINOR_WIDTH) - 1)
+
+#define SDS_REGION_SCH_MAJOR_SHIFT		28
+#define SDS_REGION_SCH_MAJOR_WIDTH		4
+#define SDS_REGION_SCH_MAJOR_MASK		((1 << SDS_REGION_SCH_MAJOR_WIDTH) - 1)
+
+#define SDS_REGION_SCH_VERSION_MASK		\
+	((1 << (SDS_REGION_SCH_MINOR_WIDTH + SDS_REGION_SCH_MAJOR_WIDTH)) - 1)
+
+#define MAKE_SDS_REGION_SCH_VERSION(maj, min)	\
+	((((maj) & SDS_REGION_SCH_MAJOR_MASK) << SDS_REGION_SCH_MINOR_WIDTH) |	\
+	((min) & SDS_REGION_SCH_MINOR_MASK))
+
+#define SDS_REGION_SCH_VERSION			MAKE_SDS_REGION_SCH_VERSION(1, 0)
+#define SDS_REGION_REGIONSIZE_OFFSET		0x4
+#define SDS_REGION_DESC_SIZE			0x8
+
+#ifndef __ASSEMBLY__
+#include <stddef.h>
+#include <stdint.h>
+
+/* Header containing Shared Data Structure metadata */
+typedef struct structure_header {
+	uint32_t reg[2];
+} struct_header_t;
+
+#define GET_SDS_HEADER_ID(header)			\
+	((((struct_header_t *)(header))->reg[0]) & SDS_HEADER_ID_MASK)
+#define GET_SDS_HEADER_VERSION(header)			\
+	(((((struct_header_t *)(header))->reg[0]) >> SDS_HEADER_MINOR_VERSION_SHIFT)\
+	& SDS_HEADER_VERSION_MASK)
+#define GET_SDS_HEADER_STRUCT_SIZE(header)		\
+	(((((struct_header_t *)(header))->reg[1]) >> SDS_HEADER_STRUCT_SIZE_SHIFT)\
+	& SDS_HEADER_STRUCT_SIZE_MASK)
+#define IS_SDS_HEADER_VALID(header)			\
+	((((struct_header_t *)(header))->reg[1]) & SDS_HEADER_VALID_MASK)
+#define GET_SDS_STRUCT_FIELD(header, field_offset)	\
+	((((uint8_t *)(header)) + sizeof(struct_header_t)) + (field_offset))
+
+/* Region Descriptor describing the SDS Memory Region */
+typedef struct region_descriptor {
+	uint32_t reg[2];
+} region_desc_t;
+
+#define IS_SDS_REGION_VALID(region)			\
+	(((((region_desc_t *)(region))->reg[0]) & SDS_REGION_SIGNATURE_MASK) == SDS_REGION_SIGNATURE)
+#define GET_SDS_REGION_STRUCTURE_COUNT(region)		\
+	(((((region_desc_t *)(region))->reg[0]) >> SDS_REGION_STRUCT_COUNT_SHIFT)\
+	& SDS_REGION_STRUCT_COUNT_MASK)
+#define GET_SDS_REGION_SCHEMA_VERSION(region)		\
+	(((((region_desc_t *)(region))->reg[0]) >> SDS_REGION_SCH_MINOR_SHIFT)\
+	& SDS_REGION_SCH_VERSION_MASK)
+#define GET_SDS_REGION_SIZE(region)		((((region_desc_t *)(region))->reg[1]))
+
+#endif /* __ASSEMBLY__ */
+#endif /* __SDS_PRIVATE_H__ */
diff --git a/plat/arm/soc/common/soc_css.mk b/plat/arm/soc/common/soc_css.mk
new file mode 100644
index 0000000..e009467
--- /dev/null
+++ b/plat/arm/soc/common/soc_css.mk
@@ -0,0 +1,17 @@
+#
+# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PLAT_INCLUDES		+=	-Iinclude/plat/arm/soc/common/
+
+#PLAT_BL_COMMON_SOURCES	+=
+
+BL1_SOURCES		+=	plat/arm/soc/common/soc_css_security.c
+
+BL2_SOURCES		+=	plat/arm/soc/common/soc_css_security.c
+
+BL2U_SOURCES		+=	plat/arm/soc/common/soc_css_security.c
+
+BL31_SOURCES		+=	plat/arm/soc/common/soc_css_security.c
diff --git a/plat/arm/soc/common/soc_css_security.c b/plat/arm/soc/common/soc_css_security.c
new file mode 100644
index 0000000..a8747f1
--- /dev/null
+++ b/plat/arm/soc/common/soc_css_security.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <board_css_def.h>
+#include <mmio.h>
+#include <nic_400.h>
+#include <platform_def.h>
+#include <soc_css_def.h>
+
+void soc_css_init_nic400(void)
+{
+	/*
+	 * NIC-400 Access Control Initialization
+	 *
+	 * Define access privileges by setting each corresponding bit to:
+	 *   0 = Secure access only
+	 *   1 = Non-secure access allowed
+	 */
+
+	/*
+	 * Allow non-secure access to some SOC regions, excluding UART1, which
+	 * remains secure.
+	 * Note: This is the NIC-400 device on the SOC
+	 */
+	mmio_write_32(SOC_CSS_NIC400_BASE +
+		NIC400_ADDR_CTRL_SECURITY_REG(SOC_CSS_NIC400_USB_EHCI), ~0);
+	mmio_write_32(SOC_CSS_NIC400_BASE +
+		NIC400_ADDR_CTRL_SECURITY_REG(SOC_CSS_NIC400_TLX_MASTER), ~0);
+	mmio_write_32(SOC_CSS_NIC400_BASE +
+		NIC400_ADDR_CTRL_SECURITY_REG(SOC_CSS_NIC400_USB_OHCI), ~0);
+	mmio_write_32(SOC_CSS_NIC400_BASE +
+		NIC400_ADDR_CTRL_SECURITY_REG(SOC_CSS_NIC400_PL354_SMC), ~0);
+	mmio_write_32(SOC_CSS_NIC400_BASE +
+		NIC400_ADDR_CTRL_SECURITY_REG(SOC_CSS_NIC400_APB4_BRIDGE), ~0);
+	mmio_write_32(SOC_CSS_NIC400_BASE +
+		NIC400_ADDR_CTRL_SECURITY_REG(SOC_CSS_NIC400_BOOTSEC_BRIDGE),
+		~SOC_CSS_NIC400_BOOTSEC_BRIDGE_UART1);
+
+}
+
+
+#define PCIE_SECURE_REG		0x3000
+/* Mask uses REG and MEM access bits */
+#define PCIE_SEC_ACCESS_MASK	((1 << 0) | (1 << 1))
+
+void soc_css_init_pcie(void)
+{
+#if !PLAT_juno
+	/*
+	 * Do not initialize PCIe in emulator environment.
+	 * Platform ID register not supported on Juno
+	 */
+	if (BOARD_CSS_GET_PLAT_TYPE(BOARD_CSS_PLAT_ID_REG_ADDR) ==
+			BOARD_CSS_PLAT_TYPE_EMULATOR)
+		return;
+#endif /* PLAT_juno */
+
+	/*
+	 * PCIE Root Complex Security settings to enable non-secure
+	 * access to config registers.
+	 */
+	mmio_write_32(SOC_CSS_PCIE_CONTROL_BASE + PCIE_SECURE_REG,
+			PCIE_SEC_ACCESS_MASK);
+}
diff --git a/plat/common/aarch32/plat_common.c b/plat/common/aarch32/plat_common.c
new file mode 100644
index 0000000..d3799d2
--- /dev/null
+++ b/plat/common/aarch32/plat_common.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <console.h>
+#include <platform.h>
+#include <xlat_mmu_helpers.h>
+
+/*
+ * The following platform setup functions are weakly defined. They
+ * provide typical implementations that may be re-used by multiple
+ * platforms but may also be overridden by a platform if required.
+ */
+#pragma weak bl32_plat_enable_mmu
+#pragma weak sp_min_plat_runtime_setup
+
+void bl32_plat_enable_mmu(uint32_t flags)
+{
+	enable_mmu_secure(flags);
+}
+
+void sp_min_plat_runtime_setup(void)
+{
+	/*
+	 * Finish the use of console driver in SP_MIN so that any runtime logs
+	 * from SP_MIN will be suppressed.
+	 */
+	console_uninit();
+}
diff --git a/plat/common/aarch32/platform_helpers.S b/plat/common/aarch32/platform_helpers.S
new file mode 100644
index 0000000..61d21ab
--- /dev/null
+++ b/plat/common/aarch32/platform_helpers.S
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+	.weak	plat_crash_console_init
+	.weak	plat_crash_console_putc
+	.weak	plat_crash_console_flush
+	.weak	plat_reset_handler
+	.weak	plat_disable_acp
+	.weak	platform_mem_init
+	.weak	plat_panic_handler
+	.weak	bl2_plat_preload_setup
+	.weak	plat_try_next_boot_source
+
+	/* -----------------------------------------------------
+	 * Placeholder function which should be redefined by
+	 * each platform.
+	 * -----------------------------------------------------
+	 */
+func plat_crash_console_init
+	mov	r0, #0
+	bx	lr
+endfunc plat_crash_console_init
+
+	/* -----------------------------------------------------
+	 * Placeholder function which should be redefined by
+	 * each platform.
+	 * -----------------------------------------------------
+	 */
+func plat_crash_console_putc
+	bx	lr
+endfunc plat_crash_console_putc
+
+	/* -----------------------------------------------------
+	 * Placeholder function which should be redefined by
+	 * each platform.
+	 * -----------------------------------------------------
+	 */
+func plat_crash_console_flush
+	mov	r0, #0
+	bx	lr
+endfunc plat_crash_console_flush
+
+	/* -----------------------------------------------------
+	 * Placeholder function which should be redefined by
+	 * each platform.
+	 * -----------------------------------------------------
+	 */
+func plat_reset_handler
+	bx	lr
+endfunc plat_reset_handler
+
+	/* -----------------------------------------------------
+	 * Placeholder function which should be redefined by
+	 * each platform.
+	 * -----------------------------------------------------
+	 */
+func plat_disable_acp
+	bx	lr
+endfunc plat_disable_acp
+
+	/* ---------------------------------------------------------------------
+	 * Placeholder function which should be redefined by
+	 * each platform.
+	 * ---------------------------------------------------------------------
+	 */
+func platform_mem_init
+	bx	lr
+endfunc platform_mem_init
+
+	/* -----------------------------------------------------
+	 * void plat_panic_handler(void) __dead2;
+	 * Endless loop by default.
+	 * -----------------------------------------------------
+	 */
+func plat_panic_handler
+	b	plat_panic_handler
+endfunc plat_panic_handler
+
+
+	/* -----------------------------------------------------
+	 * Placeholder function which should be redefined by
+	 * each platfrom.
+	 * -----------------------------------------------------
+	 */
+func bl2_plat_preload_setup
+	bx	lr
+endfunc bl2_plat_preload_setup
+
+	/* -----------------------------------------------------
+	 * Placeholder function which should be redefined by
+	 * each platfrom.
+	 * -----------------------------------------------------
+	 */
+func plat_try_next_boot_source
+	mov	r0, #0
+	bx	lr
+endfunc plat_try_next_boot_source
diff --git a/plat/common/aarch32/platform_mp_stack.S b/plat/common/aarch32/platform_mp_stack.S
new file mode 100644
index 0000000..6c3d08d
--- /dev/null
+++ b/plat/common/aarch32/platform_mp_stack.S
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+	.weak	plat_get_my_stack
+	.weak	plat_set_my_stack
+
+	/* -----------------------------------------------------
+	 * uintptr_t plat_get_my_stack (u_register_t mpidr)
+	 *
+	 * For a given CPU, this function returns the stack
+	 * pointer for a stack allocated in device memory.
+	 * -----------------------------------------------------
+	 */
+func plat_get_my_stack
+	push	{r4, lr}
+	get_my_mp_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+	pop	{r4, pc}
+endfunc	plat_get_my_stack
+
+	/* -----------------------------------------------------
+	 * void plat_set_my_stack ()
+	 *
+	 * For the current CPU, this function sets the stack
+	 * pointer to a stack allocated in normal memory.
+	 * -----------------------------------------------------
+	 */
+func plat_set_my_stack
+	mov	r4, lr
+	get_my_mp_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+	mov	sp, r0
+	bx	r4
+endfunc plat_set_my_stack
+
+	/* -----------------------------------------------------
+	 * Per-cpu stacks in normal memory. Each cpu gets a
+	 * stack of PLATFORM_STACK_SIZE bytes.
+	 * -----------------------------------------------------
+	 */
+declare_stack platform_normal_stacks, tzfw_normal_stacks, \
+		PLATFORM_STACK_SIZE, PLATFORM_CORE_COUNT
diff --git a/plat/common/aarch32/platform_up_stack.S b/plat/common/aarch32/platform_up_stack.S
new file mode 100644
index 0000000..836c13a
--- /dev/null
+++ b/plat/common/aarch32/platform_up_stack.S
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+	.weak	plat_get_my_stack
+	.weak	plat_set_my_stack
+
+	/* -----------------------------------------------------
+	 * unsigned long plat_get_my_stack ()
+	 *
+	 * For cold-boot BL images, only the primary CPU needs
+	 * a stack. This function returns the stack pointer for
+	 * a stack allocated in normal memory.
+	 * -----------------------------------------------------
+	 */
+func plat_get_my_stack
+	get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+	bx	lr
+endfunc	plat_get_my_stack
+
+	/* -----------------------------------------------------
+	 * void plat_set_my_stack ()
+	 *
+	 * For cold-boot BL images, only the primary CPU needs
+	 * a stack. This function sets the stack pointer to a
+	 * stack allocated in normal memory.
+	 * -----------------------------------------------------
+	 */
+func plat_set_my_stack
+	get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+	mov	sp, r0
+	bx	lr
+endfunc plat_set_my_stack
+
+	/* -----------------------------------------------------
+	 * Per-cpu stacks in normal memory. Each cpu gets a
+	 * stack of PLATFORM_STACK_SIZE bytes.
+	 * -----------------------------------------------------
+	 */
+declare_stack platform_normal_stacks, tzfw_normal_stacks, \
+		PLATFORM_STACK_SIZE, 1, CACHE_WRITEBACK_GRANULE
diff --git a/plat/common/aarch64/plat_common.c b/plat/common/aarch64/plat_common.c
index 90574fd..05084e1 100644
--- a/plat/common/aarch64/plat_common.c
+++ b/plat/common/aarch64/plat_common.c
@@ -1,42 +1,24 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
-
-#include <xlat_tables.h>
+#include <assert.h>
+#include <console.h>
+#include <platform.h>
+#include <xlat_mmu_helpers.h>
 
 /*
- * The following 2 platform setup functions are weakly defined. They
+ * The following platform setup functions are weakly defined. They
  * provide typical implementations that may be re-used by multiple
  * platforms but may also be overridden by a platform if required.
  */
 #pragma weak bl31_plat_enable_mmu
 #pragma weak bl32_plat_enable_mmu
+#pragma weak bl31_plat_runtime_setup
+#if !ERROR_DEPRECATED
+#pragma weak plat_get_syscnt_freq2
+#endif /* ERROR_DEPRECATED */
 
 void bl31_plat_enable_mmu(uint32_t flags)
 {
@@ -47,3 +29,38 @@
 {
 	enable_mmu_el1(flags);
 }
+
+void bl31_plat_runtime_setup(void)
+{
+	/*
+	 * Finish the use of console driver in BL31 so that any runtime logs
+	 * from BL31 will be suppressed.
+	 */
+	console_uninit();
+}
+
+#if !ENABLE_PLAT_COMPAT
+/*
+ * Helper function for platform_get_pos() when platform compatibility is
+ * disabled. This is to enable SPDs using the older platform API to continue
+ * to work.
+ */
+unsigned int platform_core_pos_helper(unsigned long mpidr)
+{
+	int idx = plat_core_pos_by_mpidr(mpidr);
+	assert(idx >= 0);
+	return idx;
+}
+#endif
+
+
+#if !ERROR_DEPRECATED
+unsigned int plat_get_syscnt_freq2(void)
+{
+	unsigned long long freq = plat_get_syscnt_freq();
+
+	assert(freq >> 32 == 0);
+
+	return (unsigned int)freq;
+}
+#endif /* ERROR_DEPRECATED */
diff --git a/plat/common/aarch64/plat_psci_common.c b/plat/common/aarch64/plat_psci_common.c
new file mode 100644
index 0000000..35afcdb
--- /dev/null
+++ b/plat/common/aarch64/plat_psci_common.c
@@ -0,0 +1,9 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#if !ERROR_DEPRECATED
+#include "../plat_psci_common.c"
+#endif
diff --git a/plat/common/aarch64/platform_helpers.S b/plat/common/aarch64/platform_helpers.S
index c236fd7..797a936 100644
--- a/plat/common/aarch64/platform_helpers.S
+++ b/plat/common/aarch64/platform_helpers.S
@@ -1,66 +1,51 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
 #include <asm_macros.S>
 #include <platform_def.h>
 
-
-	.weak	platform_get_core_pos
-	.weak	platform_check_mpidr
 	.weak	plat_report_exception
 	.weak	plat_crash_console_init
 	.weak	plat_crash_console_putc
+	.weak	plat_crash_console_flush
 	.weak	plat_reset_handler
 	.weak	plat_disable_acp
+	.weak	bl1_plat_prepare_exit
+	.weak	plat_error_handler
+	.weak	plat_panic_handler
+	.weak	bl2_plat_preload_setup
+	.weak	plat_try_next_boot_source
 
-	/* -----------------------------------------------------
-	 *  int platform_get_core_pos(int mpidr);
-	 *  With this function: CorePos = (ClusterId * 4) +
-	 *  				  CoreId
-	 * -----------------------------------------------------
-	 */
-func platform_get_core_pos
-	and	x1, x0, #MPIDR_CPU_MASK
-	and	x0, x0, #MPIDR_CLUSTER_MASK
-	add	x0, x1, x0, LSR #6
-	ret
+#if !ENABLE_PLAT_COMPAT
+	.globl	platform_get_core_pos
 
-	/* -----------------------------------------------------
-	 * Placeholder function which should be redefined by
-	 * each platform.
-	 * -----------------------------------------------------
+#define MPIDR_RES_BIT_MASK	0xff000000
+
+	/* ------------------------------------------------------------------
+	 *  int platform_get_core_pos(int mpidr)
+	 *  Returns the CPU index of the CPU specified by mpidr. This is
+	 *  defined when platform compatibility is disabled to enable Trusted
+	 *  Firmware components like SPD using the old  platform API to work.
+	 *  This API is deprecated and it assumes that the mpidr specified is
+	 *  that of a valid and present CPU. Instead, plat_my_core_pos()
+	 *  should be used for CPU index of the current CPU and
+	 *  plat_core_pos_by_mpidr() should be used for CPU index of a
+	 *  CPU specified by its mpidr.
+	 * ------------------------------------------------------------------
 	 */
-func platform_check_mpidr
-	mov	x0, xzr
-	ret
+func_deprecated platform_get_core_pos
+	bic	x0, x0, #MPIDR_RES_BIT_MASK
+	mrs	x1, mpidr_el1
+	bic	x1, x1, #MPIDR_RES_BIT_MASK
+	cmp	x0, x1
+	beq	plat_my_core_pos
+	b	platform_core_pos_helper
+endfunc_deprecated platform_get_core_pos
+#endif
 
 	/* -----------------------------------------------------
 	 * Placeholder function which should be redefined by
@@ -69,6 +54,7 @@
 	 */
 func plat_report_exception
 	ret
+endfunc plat_report_exception
 
 	/* -----------------------------------------------------
 	 * Placeholder function which should be redefined by
@@ -78,6 +64,7 @@
 func plat_crash_console_init
 	mov	x0, #0
 	ret
+endfunc plat_crash_console_init
 
 	/* -----------------------------------------------------
 	 * Placeholder function which should be redefined by
@@ -86,14 +73,25 @@
 	 */
 func plat_crash_console_putc
 	ret
+endfunc plat_crash_console_putc
 
 	/* -----------------------------------------------------
 	 * Placeholder function which should be redefined by
-	 * each platform. This function should preserve x10.
+	 * each platform.
+	 * -----------------------------------------------------
+	 */
+func plat_crash_console_flush
+	ret
+endfunc plat_crash_console_flush
+
+	/* -----------------------------------------------------
+	 * Placeholder function which should be redefined by
+	 * each platform. This function should preserve x19 - x29.
 	 * -----------------------------------------------------
 	 */
 func plat_reset_handler
 	ret
+endfunc plat_reset_handler
 
 	/* -----------------------------------------------------
 	 * Placeholder function which should be redefined by
@@ -103,3 +101,52 @@
 	 */
 func plat_disable_acp
 	ret
+endfunc plat_disable_acp
+
+	/* -----------------------------------------------------
+	 * void bl1_plat_prepare_exit(entry_point_info_t *ep_info);
+	 * Called before exiting BL1. Default: do nothing
+	 * -----------------------------------------------------
+	 */
+func bl1_plat_prepare_exit
+	ret
+endfunc bl1_plat_prepare_exit
+
+	/* -----------------------------------------------------
+	 * void plat_error_handler(int err) __dead2;
+	 * Endless loop by default.
+	 * -----------------------------------------------------
+	 */
+func plat_error_handler
+	wfi
+	b	plat_error_handler
+endfunc plat_error_handler
+
+	/* -----------------------------------------------------
+	 * void plat_panic_handler(void) __dead2;
+	 * Endless loop by default.
+	 * -----------------------------------------------------
+	 */
+func plat_panic_handler
+	wfi
+	b	plat_panic_handler
+endfunc plat_panic_handler
+
+	/* -----------------------------------------------------
+	 * Placeholder function which should be redefined by
+	 * each platfrom.
+	 * -----------------------------------------------------
+	 */
+func bl2_plat_preload_setup
+	ret
+endfunc bl2_plat_preload_setup
+
+	/* -----------------------------------------------------
+	 * Placeholder function which should be redefined by
+	 * each platfrom.
+	 * -----------------------------------------------------
+	 */
+func plat_try_next_boot_source
+	mov	x0, #0
+	ret
+endfunc plat_try_next_boot_source
diff --git a/plat/common/aarch64/platform_mp_stack.S b/plat/common/aarch64/platform_mp_stack.S
index 8eb1aa6..10323bf 100644
--- a/plat/common/aarch64/platform_mp_stack.S
+++ b/plat/common/aarch64/platform_mp_stack.S
@@ -1,42 +1,61 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
 #include <asm_macros.S>
+#include <assert_macros.S>
 #include <platform_def.h>
 
-
 	.local	platform_normal_stacks
-	.weak	platform_set_stack
+#if ENABLE_PLAT_COMPAT
+	.globl	plat_get_my_stack
+	.globl	plat_set_my_stack
 	.weak	platform_get_stack
+	.weak	platform_set_stack
+#else
+	.weak	plat_get_my_stack
+	.weak	plat_set_my_stack
+	.globl	platform_get_stack
+	.globl	platform_set_stack
+#endif /* __ENABLE_PLAT_COMPAT__ */
 
+#if ENABLE_PLAT_COMPAT
+	/* ---------------------------------------------------------------------
+	 * When the compatility layer is enabled, the new platform APIs
+	 * viz plat_get_my_stack() and plat_set_my_stack() need to be
+	 * defined using the previous APIs platform_get_stack() and
+	 * platform_set_stack(). Also we need to provide weak definitions
+	 * of platform_get_stack() and platform_set_stack() for the platforms
+	 * to reuse.
+	 * --------------------------------------------------------------------
+	 */
+
+	/* -----------------------------------------------------
+	 * unsigned long plat_get_my_stack ()
+	 *
+	 * For the current CPU, this function returns the stack
+	 * pointer for a stack allocated in device memory.
+	 * -----------------------------------------------------
+	 */
+func plat_get_my_stack
+	mrs	x0, mpidr_el1
+	b	platform_get_stack
+endfunc plat_get_my_stack
+
+	/* -----------------------------------------------------
+	 * void plat_set_my_stack ()
+	 *
+	 * For the current CPU, this function sets the stack
+	 * pointer to a stack allocated in normal memory.
+	 * -----------------------------------------------------
+	 */
+func plat_set_my_stack
+	mrs	x0, mpidr_el1
+	b	platform_set_stack
+endfunc plat_set_my_stack
 
 	/* -----------------------------------------------------
 	 * unsigned long platform_get_stack (unsigned long mpidr)
@@ -49,6 +68,7 @@
 	mov x10, x30 // lr
 	get_mp_stack platform_normal_stacks, PLATFORM_STACK_SIZE
 	ret x10
+endfunc platform_get_stack
 
 	/* -----------------------------------------------------
 	 * void platform_set_stack (unsigned long mpidr)
@@ -62,6 +82,86 @@
 	bl  platform_get_stack
 	mov sp, x0
 	ret x9
+endfunc platform_set_stack
+
+#else
+	/* ---------------------------------------------------------------------
+	 * When the compatility layer is disabled, the new platform APIs
+	 * viz plat_get_my_stack() and plat_set_my_stack() are
+	 * supported by the platform and the previous APIs platform_get_stack()
+	 * and platform_set_stack() are defined in terms of new APIs making use
+	 * of the fact that they are only ever invoked for the current CPU.
+	 * This is to enable components of Trusted Firmware like SPDs using the
+	 * old platform APIs to continue to work.
+	 * --------------------------------------------------------------------
+	 */
+
+	/* -------------------------------------------------------
+	 * unsigned long platform_get_stack (unsigned long mpidr)
+	 *
+	 * For the current CPU, this function returns the stack
+	 * pointer for a stack allocated in device memory. The
+	 * 'mpidr' should correspond to that of the current CPU.
+	 * This function is deprecated and plat_get_my_stack()
+	 * should be used instead.
+	 * -------------------------------------------------------
+	 */
+func_deprecated platform_get_stack
+#if ENABLE_ASSERTIONS
+	mrs	x1, mpidr_el1
+	cmp	x0, x1
+	ASM_ASSERT(eq)
+#endif
+	b	plat_get_my_stack
+endfunc_deprecated platform_get_stack
+
+	/* -----------------------------------------------------
+	 * void platform_set_stack (unsigned long mpidr)
+	 *
+	 * For the current CPU, this function sets the stack pointer
+	 * to a stack allocated in normal memory. The
+	 * 'mpidr' should correspond to that of the current CPU.
+	 * This function is deprecated and plat_get_my_stack()
+	 * should be used instead.
+	 * -----------------------------------------------------
+	 */
+func_deprecated platform_set_stack
+#if ENABLE_ASSERTIONS
+	mrs	x1, mpidr_el1
+	cmp	x0, x1
+	ASM_ASSERT(eq)
+#endif
+	b	plat_set_my_stack
+endfunc_deprecated platform_set_stack
+
+	/* -----------------------------------------------------
+	 * uintptr_t plat_get_my_stack ()
+	 *
+	 * For the current CPU, this function returns the stack
+	 * pointer for a stack allocated in device memory.
+	 * -----------------------------------------------------
+	 */
+func plat_get_my_stack
+	mov	x10, x30 // lr
+	get_my_mp_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+	ret	x10
+endfunc plat_get_my_stack
+
+	/* -----------------------------------------------------
+	 * void plat_set_my_stack ()
+	 *
+	 * For the current CPU, this function sets the stack
+	 * pointer to a stack allocated in normal memory.
+	 * -----------------------------------------------------
+	 */
+func plat_set_my_stack
+	mov	x9, x30 // lr
+	bl 	plat_get_my_stack
+	mov	sp, x0
+	ret	x9
+endfunc plat_set_my_stack
+
+#endif /*__ENABLE_PLAT_COMPAT__*/
 
 	/* -----------------------------------------------------
 	 * Per-cpu stacks in normal memory. Each cpu gets a
@@ -69,4 +169,5 @@
 	 * -----------------------------------------------------
 	 */
 declare_stack platform_normal_stacks, tzfw_normal_stacks, \
-		PLATFORM_STACK_SIZE, PLATFORM_CORE_COUNT
+		PLATFORM_STACK_SIZE, PLATFORM_CORE_COUNT, \
+		CACHE_WRITEBACK_GRANULE
diff --git a/plat/common/aarch64/platform_up_stack.S b/plat/common/aarch64/platform_up_stack.S
index 73b74b2..a99a7cc 100644
--- a/plat/common/aarch64/platform_up_stack.S
+++ b/plat/common/aarch64/platform_up_stack.S
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch.h>
@@ -34,33 +10,63 @@
 
 
 	.local	platform_normal_stacks
-	.globl	platform_set_stack
-	.globl	platform_get_stack
+	.weak	plat_set_my_stack
+	.weak	plat_get_my_stack
+	.weak	platform_set_stack
+	.weak	platform_get_stack
 
 	/* -----------------------------------------------------
-	 * unsigned long platform_get_stack (unsigned long)
+	 * uintptr_t plat_get_my_stack ()
 	 *
 	 * For cold-boot BL images, only the primary CPU needs a
 	 * stack. This function returns the stack pointer for a
 	 * stack allocated in device memory.
 	 * -----------------------------------------------------
 	 */
-func platform_get_stack
+func plat_get_my_stack
 	get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
 	ret
+endfunc plat_get_my_stack
 
 	/* -----------------------------------------------------
-	 * void platform_set_stack (unsigned long)
+	 * void plat_set_my_stack ()
 	 *
 	 * For cold-boot BL images, only the primary CPU needs a
 	 * stack. This function sets the stack pointer to a stack
 	 * allocated in normal memory.
 	 * -----------------------------------------------------
 	 */
-func platform_set_stack
+func plat_set_my_stack
 	get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
 	mov sp, x0
 	ret
+endfunc plat_set_my_stack
+
+	/* -----------------------------------------------------
+	 * unsigned long platform_get_stack ()
+	 *
+	 * For cold-boot BL images, only the primary CPU needs a
+	 * stack. This function returns the stack pointer for a
+	 * stack allocated in device memory. This function
+	 * is deprecated.
+	 * -----------------------------------------------------
+	 */
+func_deprecated platform_get_stack
+	b	plat_get_my_stack
+endfunc_deprecated platform_get_stack
+
+	/* -----------------------------------------------------
+	 * void platform_set_stack ()
+	 *
+	 * For cold-boot BL images, only the primary CPU needs a
+	 * stack. This function sets the stack pointer to a stack
+	 * allocated in normal memory.This function is
+	 * deprecated.
+	 * -----------------------------------------------------
+	 */
+func_deprecated platform_set_stack
+	b	plat_set_my_stack
+endfunc_deprecated platform_set_stack
 
 	/* -----------------------------------------------------
 	 * Single cpu stack in normal memory.
@@ -69,4 +75,4 @@
 	 * -----------------------------------------------------
 	 */
 declare_stack platform_normal_stacks, tzfw_normal_stacks, \
-		PLATFORM_STACK_SIZE, 1
+		PLATFORM_STACK_SIZE, 1, CACHE_WRITEBACK_GRANULE
diff --git a/plat/common/plat_bl1_common.c b/plat/common/plat_bl1_common.c
new file mode 100644
index 0000000..b92cf54
--- /dev/null
+++ b/plat/common/plat_bl1_common.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <debug.h>
+#include <errno.h>
+#include <platform_def.h>
+
+/*
+ * The following platform functions are weakly defined. They
+ * are default implementations that allow BL1 to compile in
+ * absence of real definitions. The Platforms may override
+ * with more complex definitions.
+ */
+#pragma weak bl1_plat_get_next_image_id
+#pragma weak bl1_plat_set_ep_info
+#pragma weak bl1_plat_get_image_desc
+#pragma weak bl1_plat_fwu_done
+
+
+unsigned int bl1_plat_get_next_image_id(void)
+{
+	/* BL2 load will be done by default. */
+	return BL2_IMAGE_ID;
+}
+
+void bl1_plat_set_ep_info(unsigned int image_id,
+		entry_point_info_t *ep_info)
+{
+
+}
+
+/*
+ * Following is the default definition that always
+ * returns BL2 image details.
+ */
+image_desc_t *bl1_plat_get_image_desc(unsigned int image_id)
+{
+	static image_desc_t bl2_img_desc = BL2_IMAGE_DESC;
+	return &bl2_img_desc;
+}
+
+__dead2 void bl1_plat_fwu_done(void *client_cookie, void *reserved)
+{
+	while (1)
+		wfi();
+}
+
+/*
+ * The Platforms must override with real definition.
+ */
+#pragma weak bl1_plat_mem_check
+
+int bl1_plat_mem_check(uintptr_t mem_base, unsigned int mem_size,
+		unsigned int flags)
+{
+	assert(0);
+	return -ENOMEM;
+}
diff --git a/plat/common/plat_gic.c b/plat/common/plat_gic.c
index f736e55..5363c92 100644
--- a/plat/common/plat_gic.c
+++ b/plat/common/plat_gic.c
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 #include <arm_gic.h>
 
diff --git a/plat/common/plat_gicv2.c b/plat/common/plat_gicv2.c
new file mode 100644
index 0000000..05fabca
--- /dev/null
+++ b/plat/common/plat_gicv2.c
@@ -0,0 +1,279 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <assert.h>
+#include <gic_common.h>
+#include <gicv2.h>
+#include <interrupt_mgmt.h>
+#include <platform.h>
+
+/*
+ * The following platform GIC functions are weakly defined. They
+ * provide typical implementations that may be re-used by multiple
+ * platforms but may also be overridden by a platform if required.
+ */
+#pragma weak plat_ic_get_pending_interrupt_id
+#pragma weak plat_ic_get_pending_interrupt_type
+#pragma weak plat_ic_acknowledge_interrupt
+#pragma weak plat_ic_get_interrupt_type
+#pragma weak plat_ic_end_of_interrupt
+#pragma weak plat_interrupt_type_to_line
+
+#pragma weak plat_ic_get_running_priority
+#pragma weak plat_ic_is_spi
+#pragma weak plat_ic_is_ppi
+#pragma weak plat_ic_is_sgi
+#pragma weak plat_ic_get_interrupt_active
+#pragma weak plat_ic_enable_interrupt
+#pragma weak plat_ic_disable_interrupt
+#pragma weak plat_ic_set_interrupt_priority
+#pragma weak plat_ic_set_interrupt_type
+#pragma weak plat_ic_raise_el3_sgi
+#pragma weak plat_ic_set_spi_routing
+
+/*
+ * This function returns the highest priority pending interrupt at
+ * the Interrupt controller
+ */
+uint32_t plat_ic_get_pending_interrupt_id(void)
+{
+	unsigned int id;
+
+	id = gicv2_get_pending_interrupt_id();
+	if (id == GIC_SPURIOUS_INTERRUPT)
+		return INTR_ID_UNAVAILABLE;
+
+	return id;
+}
+
+/*
+ * This function returns the type of the highest priority pending interrupt
+ * at the Interrupt controller. In the case of GICv2, the Highest Priority
+ * Pending interrupt register (`GICC_HPPIR`) is read to determine the id of
+ * the pending interrupt. The type of interrupt depends upon the id value
+ * as follows.
+ *   1. id < PENDING_G1_INTID (1022) is reported as a S-EL1 interrupt
+ *   2. id = PENDING_G1_INTID (1022) is reported as a Non-secure interrupt.
+ *   3. id = GIC_SPURIOUS_INTERRUPT (1023) is reported as an invalid interrupt
+ *           type.
+ */
+uint32_t plat_ic_get_pending_interrupt_type(void)
+{
+	unsigned int id;
+
+	id = gicv2_get_pending_interrupt_type();
+
+	/* Assume that all secure interrupts are S-EL1 interrupts */
+	if (id < PENDING_G1_INTID) {
+#if GICV2_G0_FOR_EL3
+		return INTR_TYPE_EL3;
+#else
+		return INTR_TYPE_S_EL1;
+#endif
+	}
+
+	if (id == GIC_SPURIOUS_INTERRUPT)
+		return INTR_TYPE_INVAL;
+
+	return INTR_TYPE_NS;
+}
+
+/*
+ * This function returns the highest priority pending interrupt at
+ * the Interrupt controller and indicates to the Interrupt controller
+ * that the interrupt processing has started.
+ */
+uint32_t plat_ic_acknowledge_interrupt(void)
+{
+	return gicv2_acknowledge_interrupt();
+}
+
+/*
+ * This function returns the type of the interrupt `id`, depending on how
+ * the interrupt has been configured in the interrupt controller
+ */
+uint32_t plat_ic_get_interrupt_type(uint32_t id)
+{
+	unsigned int type;
+
+	type = gicv2_get_interrupt_group(id);
+
+	/* Assume that all secure interrupts are S-EL1 interrupts */
+	return type == GICV2_INTR_GROUP1 ? INTR_TYPE_NS :
+#if GICV2_G0_FOR_EL3
+		INTR_TYPE_EL3;
+#else
+		INTR_TYPE_S_EL1;
+#endif
+}
+
+/*
+ * This functions is used to indicate to the interrupt controller that
+ * the processing of the interrupt corresponding to the `id` has
+ * finished.
+ */
+void plat_ic_end_of_interrupt(uint32_t id)
+{
+	gicv2_end_of_interrupt(id);
+}
+
+/*
+ * An ARM processor signals interrupt exceptions through the IRQ and FIQ pins.
+ * The interrupt controller knows which pin/line it uses to signal a type of
+ * interrupt. It lets the interrupt management framework determine
+ * for a type of interrupt and security state, which line should be used in the
+ * SCR_EL3 to control its routing to EL3. The interrupt line is represented
+ * as the bit position of the IRQ or FIQ bit in the SCR_EL3.
+ */
+uint32_t plat_interrupt_type_to_line(uint32_t type,
+				uint32_t security_state)
+{
+	assert(type == INTR_TYPE_S_EL1 ||
+		       type == INTR_TYPE_EL3 ||
+		       type == INTR_TYPE_NS);
+
+	/* Non-secure interrupts are signaled on the IRQ line always */
+	if (type == INTR_TYPE_NS)
+		return __builtin_ctz(SCR_IRQ_BIT);
+
+	/*
+	 * Secure interrupts are signaled using the IRQ line if the FIQ is
+	 * not enabled else they are signaled using the FIQ line.
+	 */
+	return ((gicv2_is_fiq_enabled()) ? __builtin_ctz(SCR_FIQ_BIT) :
+						__builtin_ctz(SCR_IRQ_BIT));
+}
+
+unsigned int plat_ic_get_running_priority(void)
+{
+	return gicv2_get_running_priority();
+}
+
+int plat_ic_is_spi(unsigned int id)
+{
+	return (id >= MIN_SPI_ID) && (id <= MAX_SPI_ID);
+}
+
+int plat_ic_is_ppi(unsigned int id)
+{
+	return (id >= MIN_PPI_ID) && (id < MIN_SPI_ID);
+}
+
+int plat_ic_is_sgi(unsigned int id)
+{
+	return (id >= MIN_SGI_ID) && (id < MIN_PPI_ID);
+}
+
+unsigned int plat_ic_get_interrupt_active(unsigned int id)
+{
+	return gicv2_get_interrupt_active(id);
+}
+
+void plat_ic_enable_interrupt(unsigned int id)
+{
+	gicv2_enable_interrupt(id);
+}
+
+void plat_ic_disable_interrupt(unsigned int id)
+{
+	gicv2_disable_interrupt(id);
+}
+
+void plat_ic_set_interrupt_priority(unsigned int id, unsigned int priority)
+{
+	gicv2_set_interrupt_priority(id, priority);
+}
+
+int plat_ic_has_interrupt_type(unsigned int type)
+{
+	switch (type) {
+#if GICV2_G0_FOR_EL3
+	case INTR_TYPE_EL3:
+#else
+	case INTR_TYPE_S_EL1:
+#endif
+	case INTR_TYPE_NS:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+void plat_ic_set_interrupt_type(unsigned int id, unsigned int type)
+{
+	int gicv2_type = 0;
+
+	/* Map canonical interrupt type to GICv2 type */
+	switch (type) {
+#if GICV2_G0_FOR_EL3
+	case INTR_TYPE_EL3:
+#else
+	case INTR_TYPE_S_EL1:
+#endif
+		gicv2_type = GICV2_INTR_GROUP0;
+		break;
+	case INTR_TYPE_NS:
+		gicv2_type = GICV2_INTR_GROUP1;
+		break;
+	default:
+		assert(0);
+	}
+
+	gicv2_set_interrupt_type(id, gicv2_type);
+}
+
+void plat_ic_raise_el3_sgi(int sgi_num, u_register_t target)
+{
+#if GICV2_G0_FOR_EL3
+	int id;
+
+	/* Target must be a valid MPIDR in the system */
+	id = plat_core_pos_by_mpidr(target);
+	assert(id >= 0);
+
+	/* Verify that this is a secure SGI */
+	assert(plat_ic_get_interrupt_type(sgi_num) == INTR_TYPE_EL3);
+
+	gicv2_raise_sgi(sgi_num, id);
+#else
+	assert(0);
+#endif
+}
+
+void plat_ic_set_spi_routing(unsigned int id, unsigned int routing_mode,
+		u_register_t mpidr)
+{
+	int proc_num = 0;
+
+	switch (routing_mode) {
+	case INTR_ROUTING_MODE_PE:
+		proc_num = plat_core_pos_by_mpidr(mpidr);
+		assert(proc_num >= 0);
+		break;
+	case INTR_ROUTING_MODE_ANY:
+		/* Bit mask selecting all 8 CPUs as candidates */
+		proc_num = -1;
+		break;
+	default:
+		assert(0);
+	}
+
+	gicv2_set_spi_routing(id, proc_num);
+}
+
+void plat_ic_set_interrupt_pending(unsigned int id)
+{
+	gicv2_set_interrupt_pending(id);
+}
+
+void plat_ic_clear_interrupt_pending(unsigned int id)
+{
+	gicv2_clear_interrupt_pending(id);
+}
+
+unsigned int plat_ic_set_priority_mask(unsigned int mask)
+{
+	return gicv2_set_pmr(mask);
+}
diff --git a/plat/common/plat_gicv3.c b/plat/common/plat_gicv3.c
new file mode 100644
index 0000000..52ceb6a
--- /dev/null
+++ b/plat/common/plat_gicv3.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <cassert.h>
+#include <gic_common.h>
+#include <gicv3.h>
+#include <interrupt_mgmt.h>
+#include <platform.h>
+
+#ifdef IMAGE_BL31
+
+/*
+ * The following platform GIC functions are weakly defined. They
+ * provide typical implementations that may be re-used by multiple
+ * platforms but may also be overridden by a platform if required.
+ */
+#pragma weak plat_ic_get_pending_interrupt_id
+#pragma weak plat_ic_get_pending_interrupt_type
+#pragma weak plat_ic_acknowledge_interrupt
+#pragma weak plat_ic_get_interrupt_type
+#pragma weak plat_ic_end_of_interrupt
+#pragma weak plat_interrupt_type_to_line
+
+#pragma weak plat_ic_get_running_priority
+#pragma weak plat_ic_is_spi
+#pragma weak plat_ic_is_ppi
+#pragma weak plat_ic_is_sgi
+#pragma weak plat_ic_get_interrupt_active
+#pragma weak plat_ic_enable_interrupt
+#pragma weak plat_ic_disable_interrupt
+#pragma weak plat_ic_set_interrupt_priority
+#pragma weak plat_ic_set_interrupt_type
+#pragma weak plat_ic_raise_el3_sgi
+#pragma weak plat_ic_set_spi_routing
+#pragma weak plat_ic_set_interrupt_pending
+#pragma weak plat_ic_clear_interrupt_pending
+
+CASSERT((INTR_TYPE_S_EL1 == INTR_GROUP1S) &&
+	(INTR_TYPE_NS == INTR_GROUP1NS) &&
+	(INTR_TYPE_EL3 == INTR_GROUP0), assert_interrupt_type_mismatch);
+
+/*
+ * This function returns the highest priority pending interrupt at
+ * the Interrupt controller
+ */
+uint32_t plat_ic_get_pending_interrupt_id(void)
+{
+	unsigned int irqnr;
+
+	assert(IS_IN_EL3());
+	irqnr = gicv3_get_pending_interrupt_id();
+	return (gicv3_is_intr_id_special_identifier(irqnr)) ?
+				INTR_ID_UNAVAILABLE : irqnr;
+}
+
+/*
+ * This function returns the type of the highest priority pending interrupt
+ * at the Interrupt controller. In the case of GICv3, the Highest Priority
+ * Pending interrupt system register (`ICC_HPPIR0_EL1`) is read to determine
+ * the id of the pending interrupt. The type of interrupt depends upon the
+ * id value as follows.
+ *   1. id = PENDING_G1S_INTID (1020) is reported as a S-EL1 interrupt
+ *   2. id = PENDING_G1NS_INTID (1021) is reported as a Non-secure interrupt.
+ *   3. id = GIC_SPURIOUS_INTERRUPT (1023) is reported as an invalid interrupt
+ *           type.
+ *   4. All other interrupt id's are reported as EL3 interrupt.
+ */
+uint32_t plat_ic_get_pending_interrupt_type(void)
+{
+	unsigned int irqnr;
+
+	assert(IS_IN_EL3());
+	irqnr = gicv3_get_pending_interrupt_type();
+
+	switch (irqnr) {
+	case PENDING_G1S_INTID:
+		return INTR_TYPE_S_EL1;
+	case PENDING_G1NS_INTID:
+		return INTR_TYPE_NS;
+	case GIC_SPURIOUS_INTERRUPT:
+		return INTR_TYPE_INVAL;
+	default:
+		return INTR_TYPE_EL3;
+	}
+}
+
+/*
+ * This function returns the highest priority pending interrupt at
+ * the Interrupt controller and indicates to the Interrupt controller
+ * that the interrupt processing has started.
+ */
+uint32_t plat_ic_acknowledge_interrupt(void)
+{
+	assert(IS_IN_EL3());
+	return gicv3_acknowledge_interrupt();
+}
+
+/*
+ * This function returns the type of the interrupt `id`, depending on how
+ * the interrupt has been configured in the interrupt controller
+ */
+uint32_t plat_ic_get_interrupt_type(uint32_t id)
+{
+	assert(IS_IN_EL3());
+	return gicv3_get_interrupt_type(id, plat_my_core_pos());
+}
+
+/*
+ * This functions is used to indicate to the interrupt controller that
+ * the processing of the interrupt corresponding to the `id` has
+ * finished.
+ */
+void plat_ic_end_of_interrupt(uint32_t id)
+{
+	assert(IS_IN_EL3());
+	gicv3_end_of_interrupt(id);
+}
+
+/*
+ * An ARM processor signals interrupt exceptions through the IRQ and FIQ pins.
+ * The interrupt controller knows which pin/line it uses to signal a type of
+ * interrupt. It lets the interrupt management framework determine for a type of
+ * interrupt and security state, which line should be used in the SCR_EL3 to
+ * control its routing to EL3. The interrupt line is represented as the bit
+ * position of the IRQ or FIQ bit in the SCR_EL3.
+ */
+uint32_t plat_interrupt_type_to_line(uint32_t type,
+				uint32_t security_state)
+{
+	assert(type == INTR_TYPE_S_EL1 ||
+	       type == INTR_TYPE_EL3 ||
+	       type == INTR_TYPE_NS);
+
+	assert(sec_state_is_valid(security_state));
+	assert(IS_IN_EL3());
+
+	switch (type) {
+	case INTR_TYPE_S_EL1:
+		/*
+		 * The S-EL1 interrupts are signaled as IRQ in S-EL0/1 contexts
+		 * and as FIQ in the NS-EL0/1/2 contexts
+		 */
+		if (security_state == SECURE)
+			return __builtin_ctz(SCR_IRQ_BIT);
+		else
+			return __builtin_ctz(SCR_FIQ_BIT);
+	case INTR_TYPE_NS:
+		/*
+		 * The Non secure interrupts will be signaled as FIQ in S-EL0/1
+		 * contexts and as IRQ in the NS-EL0/1/2 contexts.
+		 */
+		if (security_state == SECURE)
+			return __builtin_ctz(SCR_FIQ_BIT);
+		else
+			return __builtin_ctz(SCR_IRQ_BIT);
+	default:
+		assert(0);
+		/* Fall through in the release build */
+	case INTR_TYPE_EL3:
+		/*
+		 * The EL3 interrupts are signaled as FIQ in both S-EL0/1 and
+		 * NS-EL0/1/2 contexts
+		 */
+		return __builtin_ctz(SCR_FIQ_BIT);
+	}
+}
+
+unsigned int plat_ic_get_running_priority(void)
+{
+	return gicv3_get_running_priority();
+}
+
+int plat_ic_is_spi(unsigned int id)
+{
+	return (id >= MIN_SPI_ID) && (id <= MAX_SPI_ID);
+}
+
+int plat_ic_is_ppi(unsigned int id)
+{
+	return (id >= MIN_PPI_ID) && (id < MIN_SPI_ID);
+}
+
+int plat_ic_is_sgi(unsigned int id)
+{
+	return (id >= MIN_SGI_ID) && (id < MIN_PPI_ID);
+}
+
+unsigned int plat_ic_get_interrupt_active(unsigned int id)
+{
+	return gicv3_get_interrupt_active(id, plat_my_core_pos());
+}
+
+void plat_ic_enable_interrupt(unsigned int id)
+{
+	gicv3_enable_interrupt(id, plat_my_core_pos());
+}
+
+void plat_ic_disable_interrupt(unsigned int id)
+{
+	gicv3_disable_interrupt(id, plat_my_core_pos());
+}
+
+void plat_ic_set_interrupt_priority(unsigned int id, unsigned int priority)
+{
+	gicv3_set_interrupt_priority(id, plat_my_core_pos(), priority);
+}
+
+int plat_ic_has_interrupt_type(unsigned int type)
+{
+	assert((type == INTR_TYPE_EL3) || (type == INTR_TYPE_S_EL1) ||
+			(type == INTR_TYPE_NS));
+	return 1;
+}
+
+void plat_ic_set_interrupt_type(unsigned int id, unsigned int type)
+{
+	gicv3_set_interrupt_type(id, plat_my_core_pos(), type);
+}
+
+void plat_ic_raise_el3_sgi(int sgi_num, u_register_t target)
+{
+	/* Target must be a valid MPIDR in the system */
+	assert(plat_core_pos_by_mpidr(target) >= 0);
+
+	/* Verify that this is a secure EL3 SGI */
+	assert(plat_ic_get_interrupt_type(sgi_num) == INTR_TYPE_EL3);
+
+	gicv3_raise_secure_g0_sgi(sgi_num, target);
+}
+
+void plat_ic_set_spi_routing(unsigned int id, unsigned int routing_mode,
+		u_register_t mpidr)
+{
+	unsigned int irm = 0;
+
+	switch (routing_mode) {
+	case INTR_ROUTING_MODE_PE:
+		assert(plat_core_pos_by_mpidr(mpidr) >= 0);
+		irm = GICV3_IRM_PE;
+		break;
+	case INTR_ROUTING_MODE_ANY:
+		irm = GICV3_IRM_ANY;
+		break;
+	default:
+		assert(0);
+	}
+
+	gicv3_set_spi_routing(id, irm, mpidr);
+}
+
+void plat_ic_set_interrupt_pending(unsigned int id)
+{
+	/* Disallow setting SGIs pending */
+	assert(id >= MIN_PPI_ID);
+	gicv3_set_interrupt_pending(id, plat_my_core_pos());
+}
+
+void plat_ic_clear_interrupt_pending(unsigned int id)
+{
+	/* Disallow setting SGIs pending */
+	assert(id >= MIN_PPI_ID);
+	gicv3_clear_interrupt_pending(id, plat_my_core_pos());
+}
+
+unsigned int plat_ic_set_priority_mask(unsigned int mask)
+{
+	return gicv3_set_pmr(mask);
+}
+#endif
+#ifdef IMAGE_BL32
+
+#pragma weak plat_ic_get_pending_interrupt_id
+#pragma weak plat_ic_acknowledge_interrupt
+#pragma weak plat_ic_end_of_interrupt
+
+/* In AArch32, the secure group1 interrupts are targeted to Secure PL1 */
+#ifdef AARCH32
+#define IS_IN_EL1()	IS_IN_SECURE()
+#endif
+
+/*
+ * This function returns the highest priority pending interrupt at
+ * the Interrupt controller
+ */
+uint32_t plat_ic_get_pending_interrupt_id(void)
+{
+	unsigned int irqnr;
+
+	assert(IS_IN_EL1());
+	irqnr = gicv3_get_pending_interrupt_id_sel1();
+	return (irqnr == GIC_SPURIOUS_INTERRUPT) ?
+				INTR_ID_UNAVAILABLE : irqnr;
+}
+
+/*
+ * This function returns the highest priority pending interrupt at
+ * the Interrupt controller and indicates to the Interrupt controller
+ * that the interrupt processing has started.
+ */
+uint32_t plat_ic_acknowledge_interrupt(void)
+{
+	assert(IS_IN_EL1());
+	return gicv3_acknowledge_interrupt_sel1();
+}
+
+/*
+ * This functions is used to indicate to the interrupt controller that
+ * the processing of the interrupt corresponding to the `id` has
+ * finished.
+ */
+void plat_ic_end_of_interrupt(uint32_t id)
+{
+	assert(IS_IN_EL1());
+	gicv3_end_of_interrupt_sel1(id);
+}
+#endif
diff --git a/plat/common/plat_log_common.c b/plat/common/plat_log_common.c
new file mode 100644
index 0000000..30dcb12
--- /dev/null
+++ b/plat/common/plat_log_common.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <platform.h>
+
+/* Allow platforms to override the log prefix string */
+#pragma weak plat_log_get_prefix
+
+static const char *prefix_str[] = {
+	"ERROR:   ", "NOTICE:  ", "WARNING: ", "INFO:    ", "VERBOSE: "};
+
+const char *plat_log_get_prefix(unsigned int log_level)
+{
+	if (log_level < LOG_LEVEL_ERROR)
+		log_level = LOG_LEVEL_ERROR;
+	else if (log_level > LOG_LEVEL_VERBOSE)
+		log_level = LOG_LEVEL_VERBOSE;
+
+	return prefix_str[(log_level/10) - 1];
+}
diff --git a/plat/common/plat_psci_common.c b/plat/common/plat_psci_common.c
new file mode 100644
index 0000000..95adb05
--- /dev/null
+++ b/plat/common/plat_psci_common.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert.h>
+#include <platform.h>
+#include <pmf.h>
+#include <psci.h>
+
+#if ENABLE_PSCI_STAT && ENABLE_PMF
+#pragma weak plat_psci_stat_accounting_start
+#pragma weak plat_psci_stat_accounting_stop
+#pragma weak plat_psci_stat_get_residency
+
+/* Ticks elapsed in one second by a signal of 1 MHz */
+#define MHZ_TICKS_PER_SEC 1000000
+
+/* Following are used as ID's to capture time-stamp */
+#define PSCI_STAT_ID_ENTER_LOW_PWR		0
+#define PSCI_STAT_ID_EXIT_LOW_PWR		1
+#define PSCI_STAT_TOTAL_IDS			2
+
+PMF_REGISTER_SERVICE(psci_svc, PMF_PSCI_STAT_SVC_ID, PSCI_STAT_TOTAL_IDS,
+	PMF_STORE_ENABLE)
+
+/*
+ * This function calculates the stats residency in microseconds,
+ * taking in account the wrap around condition.
+ */
+static u_register_t calc_stat_residency(unsigned long long pwrupts,
+	unsigned long long pwrdnts)
+{
+	/* The divisor to use to convert raw timestamp into microseconds. */
+	u_register_t residency_div;
+	u_register_t res;
+
+	/*
+	 * Calculate divisor so that it can be directly used to
+	 * convert time-stamp into microseconds.
+	 */
+	residency_div = read_cntfrq_el0() / MHZ_TICKS_PER_SEC;
+	assert(residency_div);
+
+	if (pwrupts < pwrdnts)
+		res = UINT64_MAX - pwrdnts + pwrupts;
+	else
+		res = pwrupts - pwrdnts;
+
+	return res / residency_div;
+}
+
+/*
+ * Capture timestamp before entering a low power state.
+ * No cache maintenance is required when capturing the timestamp.
+ * Cache maintenance may be needed when reading these timestamps.
+ */
+void plat_psci_stat_accounting_start(
+	__unused const psci_power_state_t *state_info)
+{
+	assert(state_info);
+	PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
+		PMF_NO_CACHE_MAINT);
+}
+
+/*
+ * Capture timestamp after exiting a low power state.
+ * No cache maintenance is required when capturing the timestamp.
+ * Cache maintenance may be needed when reading these timestamps.
+ */
+void plat_psci_stat_accounting_stop(
+	__unused const psci_power_state_t *state_info)
+{
+	assert(state_info);
+	PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_EXIT_LOW_PWR,
+		PMF_NO_CACHE_MAINT);
+}
+
+/*
+ * Calculate the residency for the given level and power state
+ * information.
+ */
+u_register_t plat_psci_stat_get_residency(unsigned int lvl,
+	const psci_power_state_t *state_info,
+	int last_cpu_idx)
+{
+	plat_local_state_t state;
+	unsigned long long pwrup_ts = 0, pwrdn_ts = 0;
+	unsigned int pmf_flags;
+
+	assert(lvl >= PSCI_CPU_PWR_LVL && lvl <= PLAT_MAX_PWR_LVL);
+	assert(state_info);
+	assert(last_cpu_idx >= 0 && last_cpu_idx <= PLATFORM_CORE_COUNT);
+
+	if (lvl == PSCI_CPU_PWR_LVL)
+		assert(last_cpu_idx == plat_my_core_pos());
+
+	/*
+	 * If power down is requested, then timestamp capture will
+	 * be with caches OFF.  Hence we have to do cache maintenance
+	 * when reading the timestamp.
+	 */
+	state = state_info->pwr_domain_state[PSCI_CPU_PWR_LVL];
+	if (is_local_state_off(state)) {
+		pmf_flags = PMF_CACHE_MAINT;
+	} else {
+		assert(is_local_state_retn(state));
+		pmf_flags = PMF_NO_CACHE_MAINT;
+	}
+
+	PMF_GET_TIMESTAMP_BY_INDEX(psci_svc,
+		PSCI_STAT_ID_ENTER_LOW_PWR,
+		last_cpu_idx,
+		pmf_flags,
+		pwrdn_ts);
+
+	PMF_GET_TIMESTAMP_BY_INDEX(psci_svc,
+		PSCI_STAT_ID_EXIT_LOW_PWR,
+		plat_my_core_pos(),
+		pmf_flags,
+		pwrup_ts);
+
+	return calc_stat_residency(pwrup_ts, pwrdn_ts);
+}
+#endif /* ENABLE_PSCI_STAT && ENABLE_PMF */
+
+/*
+ * The PSCI generic code uses this API to let the platform participate in state
+ * coordination during a power management operation. It compares the platform
+ * specific local power states requested by each cpu for a given power domain
+ * and returns the coordinated target power state that the domain should
+ * enter. A platform assigns a number to a local power state. This default
+ * implementation assumes that the platform assigns these numbers in order of
+ * increasing depth of the power state i.e. for two power states X & Y, if X < Y
+ * then X represents a shallower power state than Y. As a result, the
+ * coordinated target local power state for a power domain will be the minimum
+ * of the requested local power states.
+ */
+plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
+					     const plat_local_state_t *states,
+					     unsigned int ncpu)
+{
+	plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
+
+	assert(ncpu);
+
+	do {
+		temp = *states++;
+		if (temp < target)
+			target = temp;
+	} while (--ncpu);
+
+	return target;
+}
diff --git a/plat/common/tbbr/plat_tbbr.c b/plat/common/tbbr/plat_tbbr.c
new file mode 100644
index 0000000..f5a4f31
--- /dev/null
+++ b/plat/common/tbbr/plat_tbbr.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <auth/auth_mod.h>
+#include <platform.h>
+#if USE_TBBR_DEFS
+#include <tbbr_oid.h>
+#else
+#include <platform_oid.h>
+#endif
+#include <string.h>
+
+/*
+ * Store a new non-volatile counter value. This implementation
+ * only allows updating of the platform's Trusted NV counter when a
+ * certificate protected by the Trusted NV counter is signed with
+ * the ROT key. This avoids a compromised secondary certificate from
+ * updating the platform's Trusted NV counter, which could lead to the
+ * platform becoming unusable. The function is suitable for all TBBR
+ * compliant platforms.
+ *
+ * Return: 0 = success, Otherwise = error
+ */
+int plat_set_nv_ctr2(void *cookie, const auth_img_desc_t *img_desc,
+		unsigned int nv_ctr)
+{
+	int trusted_nv_ctr;
+
+	assert(cookie != NULL);
+	assert(img_desc != NULL);
+
+	trusted_nv_ctr = strcmp(cookie, TRUSTED_FW_NVCOUNTER_OID) == 0;
+
+	/*
+	 * Only update the Trusted NV Counter if the certificate
+	 * has been signed with the ROT key. Non Trusted NV counter
+	 * updates are unconditional.
+	 */
+	if (!trusted_nv_ctr || img_desc->parent == NULL)
+		return plat_set_nv_ctr(cookie, nv_ctr);
+
+	/*
+	 * Trusted certificates not signed with the ROT key are not
+	 * allowed to update the Trusted NV Counter.
+	 */
+	return 1;
+}
diff --git a/plat/compat/aarch64/plat_helpers_compat.S b/plat/compat/aarch64/plat_helpers_compat.S
new file mode 100644
index 0000000..0c5551b
--- /dev/null
+++ b/plat/compat/aarch64/plat_helpers_compat.S
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <platform_def.h>
+
+	.globl	plat_my_core_pos
+	.globl	plat_is_my_cpu_primary
+	.globl	plat_get_my_entrypoint
+	.weak	platform_get_core_pos
+
+	/* -----------------------------------------------------
+	 *  Compatibility wrappers for new platform APIs.
+	 * -----------------------------------------------------
+	 */
+func plat_my_core_pos
+	mrs	x0, mpidr_el1
+	b	platform_get_core_pos
+endfunc plat_my_core_pos
+
+func plat_is_my_cpu_primary
+	mrs	x0, mpidr_el1
+	b	platform_is_primary_cpu
+endfunc plat_is_my_cpu_primary
+
+func plat_get_my_entrypoint
+	mrs	x0, mpidr_el1
+	b	platform_get_entrypoint
+endfunc plat_get_my_entrypoint
+
+	/* -----------------------------------------------------
+	 *  int platform_get_core_pos(int mpidr);
+	 *  With this function: CorePos = (ClusterId * 4) +
+	 *                                CoreId
+	 * -----------------------------------------------------
+	 */
+func platform_get_core_pos
+	and	x1, x0, #MPIDR_CPU_MASK
+	and	x0, x0, #MPIDR_CLUSTER_MASK
+	add	x0, x1, x0, LSR #6
+	ret
+endfunc platform_get_core_pos
diff --git a/plat/compat/plat_compat.mk b/plat/compat/plat_compat.mk
new file mode 100644
index 0000000..af88542
--- /dev/null
+++ b/plat/compat/plat_compat.mk
@@ -0,0 +1,20 @@
+#
+# Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${PSCI_EXTENDED_STATE_ID}, 1)
+  $(error "PSCI Compatibility mode can be enabled only if \
+				 PSCI_EXTENDED_STATE_ID is not set")
+endif
+
+ifneq (${ARCH}, aarch64)
+  $(error "PSCI Compatibility mode is only supported for AArch64 platforms")
+endif
+
+PLAT_BL_COMMON_SOURCES	+=	plat/compat/aarch64/plat_helpers_compat.S
+
+BL31_SOURCES		+=	plat/common/plat_psci_common.c		\
+				plat/compat/plat_pm_compat.c		\
+				plat/compat/plat_topology_compat.c
diff --git a/plat/compat/plat_pm_compat.c b/plat/compat/plat_pm_compat.c
new file mode 100644
index 0000000..6e40ad6
--- /dev/null
+++ b/plat/compat/plat_pm_compat.c
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <errno.h>
+#include <platform.h>
+#include <psci.h>
+
+/*
+ * The platform hooks exported by the platform using the earlier version of
+ * platform interface
+ */
+const plat_pm_ops_t *pm_ops;
+
+/*
+ * The hooks exported by the compatibility layer
+ */
+static plat_psci_ops_t compat_psci_ops;
+
+/*
+ * The secure entry point to be used on warm reset.
+ */
+static unsigned long secure_entrypoint;
+
+/*
+ * This array stores the 'power_state' requests of each CPU during
+ * CPU_SUSPEND and SYSTEM_SUSPEND to support querying of state-ID
+ * by the platform.
+ */
+unsigned int psci_power_state_compat[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * The PSCI compatibility helper to parse the power state and populate the
+ * 'pwr_domain_state' for each power level. It is assumed that, when in
+ * compatibility mode, the PSCI generic layer need to know only whether the
+ * affinity level will be OFF or in RETENTION and if the platform supports
+ * multiple power down and retention states, it will be taken care within
+ * the platform layer.
+ ******************************************************************************/
+static int parse_power_state(unsigned int power_state,
+		    psci_power_state_t *req_state)
+{
+	int i;
+	int pstate = psci_get_pstate_type(power_state);
+	int aff_lvl = psci_get_pstate_pwrlvl(power_state);
+
+	if (aff_lvl > PLATFORM_MAX_AFFLVL)
+		return PSCI_E_INVALID_PARAMS;
+
+	/* Sanity check the requested state */
+	if (pstate == PSTATE_TYPE_STANDBY) {
+		/*
+		 * Set the CPU local state as retention and ignore the higher
+		 * levels. This allows the generic PSCI layer to invoke
+		 * plat_psci_ops 'cpu_standby' hook and the compatibility
+		 * layer invokes the 'affinst_standby' handler with the
+		 * correct power_state parameter thus preserving the correct
+		 * behavior.
+		 */
+		req_state->pwr_domain_state[0] =
+					PLAT_MAX_RET_STATE;
+	} else {
+		for (i = 0; i <= aff_lvl; i++)
+			req_state->pwr_domain_state[i] =
+					PLAT_MAX_OFF_STATE;
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * The PSCI compatibility helper to set the 'power_state' in
+ * psci_power_state_compat[] at index corresponding to the current core.
+ ******************************************************************************/
+static void set_psci_power_state_compat(unsigned int power_state)
+{
+	unsigned int my_core_pos = plat_my_core_pos();
+
+	psci_power_state_compat[my_core_pos] = power_state;
+	flush_dcache_range((uintptr_t) &psci_power_state_compat[my_core_pos],
+			sizeof(psci_power_state_compat[my_core_pos]));
+}
+
+/*******************************************************************************
+ * The PSCI compatibility helper for plat_pm_ops_t 'validate_power_state'
+ * hook.
+ ******************************************************************************/
+static int validate_power_state_compat(unsigned int power_state,
+			    psci_power_state_t *req_state)
+{
+	int rc;
+	assert(req_state);
+
+	if (pm_ops->validate_power_state) {
+		rc = pm_ops->validate_power_state(power_state);
+		if (rc != PSCI_E_SUCCESS)
+			return rc;
+	}
+
+	/* Store the 'power_state' parameter for the current CPU. */
+	set_psci_power_state_compat(power_state);
+
+	return parse_power_state(power_state, req_state);
+}
+
+/*******************************************************************************
+ * The PSCI compatibility helper for plat_pm_ops_t
+ * 'get_sys_suspend_power_state' hook.
+ ******************************************************************************/
+void get_sys_suspend_power_state_compat(psci_power_state_t *req_state)
+{
+	unsigned int power_state;
+	assert(req_state);
+
+	power_state = pm_ops->get_sys_suspend_power_state();
+
+	/* Store the 'power_state' parameter for the current CPU. */
+	set_psci_power_state_compat(power_state);
+
+	if (parse_power_state(power_state, req_state) != PSCI_E_SUCCESS)
+		assert(0);
+}
+
+/*******************************************************************************
+ * The PSCI compatibility helper for plat_pm_ops_t 'validate_ns_entrypoint'
+ * hook.
+ ******************************************************************************/
+static int validate_ns_entrypoint_compat(uintptr_t ns_entrypoint)
+{
+	return pm_ops->validate_ns_entrypoint(ns_entrypoint);
+}
+
+/*******************************************************************************
+ * The PSCI compatibility helper for plat_pm_ops_t 'affinst_standby' hook.
+ ******************************************************************************/
+static void cpu_standby_compat(plat_local_state_t cpu_state)
+{
+	unsigned int powerstate = psci_get_suspend_powerstate();
+
+	assert(powerstate != PSCI_INVALID_DATA);
+
+	pm_ops->affinst_standby(powerstate);
+}
+
+/*******************************************************************************
+ * The PSCI compatibility helper for plat_pm_ops_t 'affinst_on' hook.
+ ******************************************************************************/
+static int pwr_domain_on_compat(u_register_t mpidr)
+{
+	int level, rc;
+
+	/*
+	 * The new PSCI framework does not hold the locks for higher level
+	 * power domain nodes when this hook is invoked. Hence figuring out the
+	 * target state of the parent power domains does not make much sense.
+	 * Hence we hard-code the state as PSCI_STATE_OFF for all the levels.
+	 * We expect the platform to perform the necessary CPU_ON operations
+	 * when the 'affinst_on' is invoked only for level 0.
+	 */
+	for (level = PLATFORM_MAX_AFFLVL; level >= 0; level--) {
+		rc = pm_ops->affinst_on((unsigned long)mpidr, secure_entrypoint,
+					level, PSCI_STATE_OFF);
+		if (rc != PSCI_E_SUCCESS)
+			break;
+	}
+
+	return rc;
+}
+
+/*******************************************************************************
+ * The PSCI compatibility helper for plat_pm_ops_t 'affinst_off' hook.
+ ******************************************************************************/
+static void pwr_domain_off_compat(const psci_power_state_t *target_state)
+{
+	int level;
+	unsigned int plat_state;
+
+	for (level = 0; level <= PLATFORM_MAX_AFFLVL; level++) {
+		plat_state = (is_local_state_run(
+				target_state->pwr_domain_state[level]) ?
+				PSCI_STATE_ON : PSCI_STATE_OFF);
+		pm_ops->affinst_off(level, plat_state);
+	}
+}
+
+/*******************************************************************************
+ * The PSCI compatibility helper for plat_pm_ops_t 'affinst_suspend' hook.
+ ******************************************************************************/
+static void pwr_domain_suspend_compat(const psci_power_state_t *target_state)
+{
+	int level;
+	unsigned int plat_state;
+
+	for (level = 0; level <= psci_get_suspend_afflvl(); level++) {
+		plat_state = (is_local_state_run(
+				target_state->pwr_domain_state[level]) ?
+				PSCI_STATE_ON : PSCI_STATE_OFF);
+		pm_ops->affinst_suspend(secure_entrypoint, level, plat_state);
+	}
+}
+
+/*******************************************************************************
+ * The PSCI compatibility helper for plat_pm_ops_t 'affinst_on_finish'
+ * hook.
+ ******************************************************************************/
+static void pwr_domain_on_finish_compat(const psci_power_state_t *target_state)
+{
+	int level;
+	unsigned int plat_state;
+
+	for (level = PLATFORM_MAX_AFFLVL; level >= 0; level--) {
+		plat_state = (is_local_state_run(
+				target_state->pwr_domain_state[level]) ?
+				PSCI_STATE_ON : PSCI_STATE_OFF);
+		pm_ops->affinst_on_finish(level, plat_state);
+	}
+}
+
+/*******************************************************************************
+ * The PSCI compatibility helper for plat_pm_ops_t
+ * 'affinst_suspend_finish' hook.
+ ******************************************************************************/
+static void pwr_domain_suspend_finish_compat(
+				const psci_power_state_t *target_state)
+{
+	int level;
+	unsigned int plat_state;
+
+	for (level = psci_get_suspend_afflvl(); level >= 0; level--) {
+		plat_state = (is_local_state_run(
+				target_state->pwr_domain_state[level]) ?
+				PSCI_STATE_ON : PSCI_STATE_OFF);
+		pm_ops->affinst_suspend_finish(level, plat_state);
+	}
+}
+
+/*******************************************************************************
+ * The PSCI compatibility helper for plat_pm_ops_t 'system_off' hook.
+ ******************************************************************************/
+static void __dead2 system_off_compat(void)
+{
+	pm_ops->system_off();
+}
+
+/*******************************************************************************
+ * The PSCI compatibility helper for plat_pm_ops_t 'system_reset' hook.
+ ******************************************************************************/
+static void __dead2 system_reset_compat(void)
+{
+	pm_ops->system_reset();
+}
+
+/*******************************************************************************
+ * Export the compatibility compat_psci_ops. The assumption made is that the
+ * power domains correspond to affinity instances on the platform.
+ ******************************************************************************/
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+				const plat_psci_ops_t **psci_ops)
+{
+	platform_setup_pm(&pm_ops);
+
+	secure_entrypoint = (unsigned long) sec_entrypoint;
+
+	/*
+	 * It is compulsory for the platform ports using the new porting
+	 * interface to export a hook to validate the power state parameter
+	 */
+	compat_psci_ops.validate_power_state = validate_power_state_compat;
+
+	/*
+	 * Populate the compatibility plat_psci_ops_t hooks if available
+	 */
+	if (pm_ops->validate_ns_entrypoint)
+		compat_psci_ops.validate_ns_entrypoint =
+				validate_ns_entrypoint_compat;
+
+	if (pm_ops->affinst_standby)
+		compat_psci_ops.cpu_standby = cpu_standby_compat;
+
+	if (pm_ops->affinst_on)
+		compat_psci_ops.pwr_domain_on = pwr_domain_on_compat;
+
+	if (pm_ops->affinst_off)
+		compat_psci_ops.pwr_domain_off = pwr_domain_off_compat;
+
+	if (pm_ops->affinst_suspend)
+		compat_psci_ops.pwr_domain_suspend = pwr_domain_suspend_compat;
+
+	if (pm_ops->affinst_on_finish)
+		compat_psci_ops.pwr_domain_on_finish =
+				pwr_domain_on_finish_compat;
+
+	if (pm_ops->affinst_suspend_finish)
+		compat_psci_ops.pwr_domain_suspend_finish =
+				pwr_domain_suspend_finish_compat;
+
+	if (pm_ops->system_off)
+		compat_psci_ops.system_off = system_off_compat;
+
+	if (pm_ops->system_reset)
+		compat_psci_ops.system_reset = system_reset_compat;
+
+	if (pm_ops->get_sys_suspend_power_state)
+		compat_psci_ops.get_sys_suspend_power_state =
+				get_sys_suspend_power_state_compat;
+
+	*psci_ops = &compat_psci_ops;
+	return 0;
+}
diff --git a/plat/compat/plat_topology_compat.c b/plat/compat/plat_topology_compat.c
new file mode 100644
index 0000000..48d565c
--- /dev/null
+++ b/plat/compat/plat_topology_compat.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <psci.h>
+
+/* The power domain tree descriptor */
+static unsigned char power_domain_tree_desc
+				[PLATFORM_NUM_AFFS - PLATFORM_CORE_COUNT + 1];
+
+/*******************************************************************************
+ * Simple routine to set the id of an affinity instance at a given level
+ * in the mpidr. The assumption is that the affinity level and the power
+ * domain level are the same.
+ ******************************************************************************/
+unsigned long mpidr_set_aff_inst(unsigned long mpidr,
+				 unsigned char aff_inst,
+				 int aff_lvl)
+{
+	unsigned long aff_shift;
+
+	assert(aff_lvl <= MPIDR_AFFLVL3);
+
+	/*
+	 * Decide the number of bits to shift by depending upon
+	 * the power level
+	 */
+	aff_shift = get_afflvl_shift(aff_lvl);
+
+	/* Clear the existing power instance & set the new one*/
+	mpidr &= ~((unsigned long)MPIDR_AFFLVL_MASK << aff_shift);
+	mpidr |= (unsigned long)aff_inst << aff_shift;
+
+	return mpidr;
+}
+
+/******************************************************************************
+ * This function uses insertion sort to sort a given list of mpidr's in the
+ * ascending order of the index returned by platform_get_core_pos.
+ *****************************************************************************/
+void sort_mpidr_by_cpu_idx(unsigned int aff_count, unsigned long mpidr_list[])
+{
+	int i, j;
+	unsigned long temp_mpidr;
+
+	for (i = 1; i < aff_count; i++) {
+		temp_mpidr = mpidr_list[i];
+
+		for (j = i;
+			j > 0 &&
+			platform_get_core_pos(mpidr_list[j-1]) >
+			platform_get_core_pos(temp_mpidr);
+			j--)
+			mpidr_list[j] = mpidr_list[j-1];
+
+		mpidr_list[j] = temp_mpidr;
+	}
+}
+
+/*******************************************************************************
+ * The compatibility routine to construct the power domain tree description.
+ * The assumption made is that the power domains correspond to affinity
+ * instances on the platform. This routine's aim is to traverse to the target
+ * affinity level and populate the number of siblings at that level in
+ * 'power_domain_tree_desc' array. It uses the current affinity level to keep
+ * track of how many levels from the root of the tree have been traversed.
+ * If the current affinity level != target affinity level, then the platform
+ * is asked to return the number of children that each affinity instance has
+ * at the current affinity level. Traversal is then done for each child at the
+ * next lower level i.e. current affinity level - 1.
+ *
+ * The power domain description needs to be constructed in such a way that
+ * affinity instances containing CPUs with lower cpu indices need to be
+ * described first.  Hence when traversing the power domain levels, the list
+ * of mpidrs at that power domain level is sorted in the ascending order of CPU
+ * indices before the lower levels are recursively described.
+ *
+ * CAUTION: This routine assumes that affinity instance ids are allocated in a
+ * monotonically increasing manner at each affinity level in a mpidr starting
+ * from 0. If the platform breaks this assumption then this code will have to
+ * be reworked accordingly.
+ ******************************************************************************/
+static unsigned int init_pwr_domain_tree_desc(unsigned long mpidr,
+					unsigned int affmap_idx,
+					unsigned int cur_afflvl,
+					unsigned int tgt_afflvl)
+{
+	unsigned int ctr, aff_count;
+
+	/*
+	 * Temporary list to hold the MPIDR list at a particular power domain
+	 * level so as to sort them.
+	 */
+	unsigned long mpidr_list[PLATFORM_CORE_COUNT];
+
+	assert(cur_afflvl >= tgt_afflvl);
+
+	/*
+	 * Find the number of siblings at the current power level &
+	 * assert if there are none 'cause then we have been invoked with
+	 * an invalid mpidr.
+	 */
+	aff_count = plat_get_aff_count(cur_afflvl, mpidr);
+	assert(aff_count);
+
+	if (tgt_afflvl < cur_afflvl) {
+		for (ctr = 0; ctr < aff_count; ctr++) {
+			mpidr_list[ctr] = mpidr_set_aff_inst(mpidr, ctr,
+						cur_afflvl);
+		}
+
+		/* Need to sort mpidr list according to CPU index */
+		sort_mpidr_by_cpu_idx(aff_count, mpidr_list);
+		for (ctr = 0; ctr < aff_count; ctr++) {
+			affmap_idx = init_pwr_domain_tree_desc(mpidr_list[ctr],
+						       affmap_idx,
+						       cur_afflvl - 1,
+						       tgt_afflvl);
+		}
+	} else {
+		power_domain_tree_desc[affmap_idx++] = aff_count;
+	}
+	return affmap_idx;
+}
+
+
+/*******************************************************************************
+ * This function constructs the topology tree description at runtime
+ * and returns it. The assumption made is that the power domains correspond
+ * to affinity instances on the platform.
+ ******************************************************************************/
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+	int afflvl;
+	unsigned int affmap_idx;
+
+	/*
+	 * We assume that the platform allocates affinity instance ids from
+	 * 0 onwards at each affinity level in the mpidr. FIRST_MPIDR = 0.0.0.0
+	 */
+	affmap_idx = 0;
+	for (afflvl = (int) PLATFORM_MAX_AFFLVL;
+			afflvl >= (int) MPIDR_AFFLVL0; afflvl--) {
+		affmap_idx = init_pwr_domain_tree_desc(FIRST_MPIDR,
+					       affmap_idx,
+					       PLATFORM_MAX_AFFLVL,
+					       (unsigned int) afflvl);
+	}
+
+	assert(affmap_idx == (PLATFORM_NUM_AFFS - PLATFORM_CORE_COUNT + 1));
+
+	return power_domain_tree_desc;
+}
+
+/******************************************************************************
+ * The compatibility helper function for plat_core_pos_by_mpidr(). It
+ * validates the 'mpidr' by making sure that it is within acceptable bounds
+ * for the platform and queries the platform layer whether the CPU specified
+ * by the mpidr is present or not. If present, it returns the index of the
+ * core corresponding to the 'mpidr'. Else it returns -1.
+ *****************************************************************************/
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+	unsigned long shift, aff_inst;
+	int i;
+
+	/* Ignore the Reserved bits and U bit in MPIDR */
+	mpidr &= MPIDR_AFFINITY_MASK;
+
+	/*
+	 * Check if any affinity field higher than
+	 * the PLATFORM_MAX_AFFLVL is set.
+	 */
+	shift = get_afflvl_shift(PLATFORM_MAX_AFFLVL + 1);
+	if (mpidr >> shift)
+		return -1;
+
+	for (i = PLATFORM_MAX_AFFLVL; i >= 0; i--) {
+		shift = get_afflvl_shift(i);
+		aff_inst = ((mpidr &
+			((unsigned long)MPIDR_AFFLVL_MASK << shift)) >> shift);
+		if (aff_inst >= plat_get_aff_count(i, mpidr))
+			return -1;
+	}
+
+	if (plat_get_aff_state(0, mpidr) == PSCI_AFF_ABSENT)
+		return -1;
+
+	return platform_get_core_pos(mpidr);
+}
diff --git a/plat/fvp/aarch64/fvp_common.c b/plat/fvp/aarch64/fvp_common.c
deleted file mode 100644
index d89e1e6..0000000
--- a/plat/fvp/aarch64/fvp_common.c
+++ /dev/null
@@ -1,368 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <arch_helpers.h>
-#include <arm_gic.h>
-#include <bl_common.h>
-#include <cci400.h>
-#include <debug.h>
-#include <mmio.h>
-#include <platform.h>
-#include <platform_def.h>
-#include <plat_config.h>
-#include <xlat_tables.h>
-#include "../fvp_def.h"
-
-/*******************************************************************************
- * plat_config holds the characteristics of the differences between the three
- * FVP platforms (Base, A53_A57 & Foundation). It will be populated during cold
- * boot at each boot stage by the primary before enabling the MMU (to allow cci
- * configuration) & used thereafter. Each BL will have its own copy to allow
- * independent operation.
- ******************************************************************************/
-plat_config_t plat_config;
-
-#define MAP_SHARED_RAM	MAP_REGION_FLAT(FVP_SHARED_MEM_BASE,		\
-					FVP_SHARED_MEM_SIZE,		\
-					MT_MEMORY | MT_RW | MT_SECURE)
-
-#define MAP_FLASH0	MAP_REGION_FLAT(FLASH0_BASE,			\
-					FLASH0_SIZE,			\
-					MT_MEMORY | MT_RO | MT_SECURE)
-
-#define MAP_DEVICE0	MAP_REGION_FLAT(DEVICE0_BASE,			\
-					DEVICE0_SIZE,			\
-					MT_DEVICE | MT_RW | MT_SECURE)
-
-#define MAP_DEVICE1	MAP_REGION_FLAT(DEVICE1_BASE,			\
-					DEVICE1_SIZE,			\
-					MT_DEVICE | MT_RW | MT_SECURE)
-
-#define MAP_DRAM1_NS	MAP_REGION_FLAT(DRAM1_NS_BASE,			\
-					DRAM1_NS_SIZE,			\
-					MT_MEMORY | MT_RW | MT_NS)
-
-#define MAP_TSP_SEC_MEM	MAP_REGION_FLAT(TSP_SEC_MEM_BASE,		\
-					TSP_SEC_MEM_SIZE,		\
-					MT_MEMORY | MT_RW | MT_SECURE)
-
-/*
- * Table of regions for various BL stages to map using the MMU.
- * This doesn't include TZRAM as the 'mem_layout' argument passed to
- * configure_mmu_elx() will give the available subset of that,
- */
-#if IMAGE_BL1
-const mmap_region_t fvp_mmap[] = {
-	MAP_SHARED_RAM,
-	MAP_FLASH0,
-	MAP_DEVICE0,
-	MAP_DEVICE1,
-	{0}
-};
-#endif
-#if IMAGE_BL2
-const mmap_region_t fvp_mmap[] = {
-	MAP_SHARED_RAM,
-	MAP_FLASH0,
-	MAP_DEVICE0,
-	MAP_DEVICE1,
-	MAP_DRAM1_NS,
-	MAP_TSP_SEC_MEM,
-	{0}
-};
-#endif
-#if IMAGE_BL31
-const mmap_region_t fvp_mmap[] = {
-	MAP_SHARED_RAM,
-	MAP_DEVICE0,
-	MAP_DEVICE1,
-	MAP_TSP_SEC_MEM,
-	{0}
-};
-#endif
-#if IMAGE_BL32
-const mmap_region_t fvp_mmap[] = {
-	MAP_DEVICE0,
-	MAP_DEVICE1,
-	{0}
-};
-#endif
-
-/* Array of secure interrupts to be configured by the gic driver */
-const unsigned int irq_sec_array[] = {
-	IRQ_TZ_WDOG,
-	IRQ_SEC_PHY_TIMER,
-	IRQ_SEC_SGI_0,
-	IRQ_SEC_SGI_1,
-	IRQ_SEC_SGI_2,
-	IRQ_SEC_SGI_3,
-	IRQ_SEC_SGI_4,
-	IRQ_SEC_SGI_5,
-	IRQ_SEC_SGI_6,
-	IRQ_SEC_SGI_7
-};
-
-const unsigned int num_sec_irqs = sizeof(irq_sec_array) /
-	sizeof(irq_sec_array[0]);
-
-/*******************************************************************************
- * Macro generating the code for the function setting up the pagetables as per
- * the platform memory map & initialize the mmu, for the given exception level
- ******************************************************************************/
-#if USE_COHERENT_MEM
-#define DEFINE_CONFIGURE_MMU_EL(_el)				\
-	void fvp_configure_mmu_el##_el(unsigned long total_base,	\
-				   unsigned long total_size,		\
-				   unsigned long ro_start,		\
-				   unsigned long ro_limit,		\
-				   unsigned long coh_start,		\
-				   unsigned long coh_limit)		\
-	{								\
-		mmap_add_region(total_base, total_base,			\
-				total_size,				\
-				MT_MEMORY | MT_RW | MT_SECURE);		\
-		mmap_add_region(ro_start, ro_start,			\
-				ro_limit - ro_start,			\
-				MT_MEMORY | MT_RO | MT_SECURE);		\
-		mmap_add_region(coh_start, coh_start,			\
-				coh_limit - coh_start,			\
-				MT_DEVICE | MT_RW | MT_SECURE);		\
-		mmap_add(fvp_mmap);					\
-		init_xlat_tables();					\
-									\
-		enable_mmu_el##_el(0);					\
-	}
-#else
-#define DEFINE_CONFIGURE_MMU_EL(_el)				\
-	void fvp_configure_mmu_el##_el(unsigned long total_base,	\
-				   unsigned long total_size,		\
-				   unsigned long ro_start,		\
-				   unsigned long ro_limit)		\
-	{								\
-		mmap_add_region(total_base, total_base,			\
-				total_size,				\
-				MT_MEMORY | MT_RW | MT_SECURE);		\
-		mmap_add_region(ro_start, ro_start,			\
-				ro_limit - ro_start,			\
-				MT_MEMORY | MT_RO | MT_SECURE);		\
-		mmap_add(fvp_mmap);					\
-		init_xlat_tables();					\
-									\
-		enable_mmu_el##_el(0);					\
-	}
-#endif
-
-/* Define EL1 and EL3 variants of the function initialising the MMU */
-DEFINE_CONFIGURE_MMU_EL(1)
-DEFINE_CONFIGURE_MMU_EL(3)
-
-/*******************************************************************************
- * A single boot loader stack is expected to work on both the Foundation FVP
- * models and the two flavours of the Base FVP models (AEMv8 & Cortex). The
- * SYS_ID register provides a mechanism for detecting the differences between
- * these platforms. This information is stored in a per-BL array to allow the
- * code to take the correct path.Per BL platform configuration.
- ******************************************************************************/
-int fvp_config_setup(void)
-{
-	unsigned int rev, hbi, bld, arch, sys_id;
-
-	sys_id = mmio_read_32(VE_SYSREGS_BASE + V2M_SYS_ID);
-	rev = (sys_id >> SYS_ID_REV_SHIFT) & SYS_ID_REV_MASK;
-	hbi = (sys_id >> SYS_ID_HBI_SHIFT) & SYS_ID_HBI_MASK;
-	bld = (sys_id >> SYS_ID_BLD_SHIFT) & SYS_ID_BLD_MASK;
-	arch = (sys_id >> SYS_ID_ARCH_SHIFT) & SYS_ID_ARCH_MASK;
-
-	if (arch != ARCH_MODEL) {
-		ERROR("This firmware is for FVP models\n");
-		panic();
-	}
-
-	/*
-	 * The build field in the SYS_ID tells which variant of the GIC
-	 * memory is implemented by the model.
-	 */
-	switch (bld) {
-	case BLD_GIC_VE_MMAP:
-		plat_config.gicd_base = VE_GICD_BASE;
-		plat_config.gicc_base = VE_GICC_BASE;
-		plat_config.gich_base = VE_GICH_BASE;
-		plat_config.gicv_base = VE_GICV_BASE;
-		break;
-	case BLD_GIC_A53A57_MMAP:
-		plat_config.gicd_base = BASE_GICD_BASE;
-		plat_config.gicc_base = BASE_GICC_BASE;
-		plat_config.gich_base = BASE_GICH_BASE;
-		plat_config.gicv_base = BASE_GICV_BASE;
-		break;
-	default:
-		ERROR("Unsupported board build %x\n", bld);
-		panic();
-	}
-
-	/*
-	 * The hbi field in the SYS_ID is 0x020 for the Base FVP & 0x010
-	 * for the Foundation FVP.
-	 */
-	switch (hbi) {
-	case HBI_FOUNDATION:
-		plat_config.max_aff0 = 4;
-		plat_config.max_aff1 = 1;
-		plat_config.flags = 0;
-
-		/*
-		 * Check for supported revisions of Foundation FVP
-		 * Allow future revisions to run but emit warning diagnostic
-		 */
-		switch (rev) {
-		case REV_FOUNDATION_V2_0:
-		case REV_FOUNDATION_V2_1:
-			break;
-		default:
-			WARN("Unrecognized Foundation FVP revision %x\n", rev);
-			break;
-		}
-		break;
-	case HBI_FVP_BASE:
-		plat_config.max_aff0 = 4;
-		plat_config.max_aff1 = 2;
-		plat_config.flags |= CONFIG_BASE_MMAP | CONFIG_HAS_CCI |
-			CONFIG_HAS_TZC;
-
-		/*
-		 * Check for supported revisions
-		 * Allow future revisions to run but emit warning diagnostic
-		 */
-		switch (rev) {
-		case REV_FVP_BASE_V0:
-			break;
-		default:
-			WARN("Unrecognized Base FVP revision %x\n", rev);
-			break;
-		}
-		break;
-	default:
-		ERROR("Unsupported board HBI number 0x%x\n", hbi);
-		panic();
-	}
-
-	return 0;
-}
-
-unsigned long plat_get_ns_image_entrypoint(void)
-{
-	return NS_IMAGE_OFFSET;
-}
-
-uint64_t plat_get_syscnt_freq(void)
-{
-	uint64_t counter_base_frequency;
-
-	/* Read the frequency from Frequency modes table */
-	counter_base_frequency = mmio_read_32(SYS_CNTCTL_BASE + CNTFID_OFF);
-
-	/* The first entry of the frequency modes table must not be 0 */
-	if (counter_base_frequency == 0)
-		panic();
-
-	return counter_base_frequency;
-}
-
-void fvp_cci_init(void)
-{
-	/*
-	 * Initialize CCI-400 driver
-	 */
-	if (plat_config.flags & CONFIG_HAS_CCI)
-		cci_init(CCI400_BASE,
-			CCI400_SL_IFACE3_CLUSTER_IX,
-			CCI400_SL_IFACE4_CLUSTER_IX);
-}
-
-void fvp_cci_enable(void)
-{
-	/*
-	 * Enable CCI-400 coherency for this cluster. No need
-	 * for locks as no other cpu is active at the
-	 * moment
-	 */
-	if (plat_config.flags & CONFIG_HAS_CCI)
-		cci_enable_cluster_coherency(read_mpidr());
-}
-
-void fvp_gic_init(void)
-{
-	arm_gic_init(plat_config.gicc_base,
-		plat_config.gicd_base,
-		BASE_GICR_BASE,
-		irq_sec_array,
-		num_sec_irqs);
-}
-
-
-/*******************************************************************************
- * Gets SPSR for BL32 entry
- ******************************************************************************/
-uint32_t fvp_get_spsr_for_bl32_entry(void)
-{
-	/*
-	 * The Secure Payload Dispatcher service is responsible for
-	 * setting the SPSR prior to entry into the BL32 image.
-	 */
-	return 0;
-}
-
-/*******************************************************************************
- * Gets SPSR for BL33 entry
- ******************************************************************************/
-uint32_t fvp_get_spsr_for_bl33_entry(void)
-{
-	unsigned long el_status;
-	unsigned int mode;
-	uint32_t spsr;
-
-	/* Figure out what mode we enter the non-secure world in */
-	el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
-	el_status &= ID_AA64PFR0_ELX_MASK;
-
-	if (el_status)
-		mode = MODE_EL2;
-	else
-		mode = MODE_EL1;
-
-	/*
-	 * TODO: Consider the possibility of specifying the SPSR in
-	 * the FIP ToC and allowing the platform to have a say as
-	 * well.
-	 */
-	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
-	return spsr;
-}
diff --git a/plat/fvp/aarch64/fvp_helpers.S b/plat/fvp/aarch64/fvp_helpers.S
deleted file mode 100644
index e678b43..0000000
--- a/plat/fvp/aarch64/fvp_helpers.S
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-#include <bl_common.h>
-#include <gic_v2.h>
-#include <platform_def.h>
-#include <pl011.h>
-#include "../drivers/pwrc/fvp_pwrc.h"
-
-	.globl	platform_get_entrypoint
-	.globl	plat_secondary_cold_boot_setup
-	.globl	platform_mem_init
-	.globl	plat_report_exception
-	.globl	platform_is_primary_cpu
-	.globl	plat_crash_console_init
-	.globl	plat_crash_console_putc
-
-	.macro	fvp_choose_gicmmap  param1, param2, x_tmp, w_tmp, res
-	ldr	\x_tmp, =VE_SYSREGS_BASE + V2M_SYS_ID
-	ldr	\w_tmp, [\x_tmp]
-	ubfx	\w_tmp, \w_tmp, #SYS_ID_BLD_SHIFT, #SYS_ID_BLD_LENGTH
-	cmp	\w_tmp, #BLD_GIC_VE_MMAP
-	csel	\res, \param1, \param2, eq
-	.endm
-
-	/* -----------------------------------------------------
-	 * void plat_secondary_cold_boot_setup (void);
-	 *
-	 * This function performs any platform specific actions
-	 * needed for a secondary cpu after a cold reset e.g
-	 * mark the cpu's presence, mechanism to place it in a
-	 * holding pen etc.
-	 * TODO: Should we read the PSYS register to make sure
-	 * that the request has gone through.
-	 * -----------------------------------------------------
-	 */
-func plat_secondary_cold_boot_setup
-	/* ---------------------------------------------
-	 * Power down this cpu.
-	 * TODO: Do we need to worry about powering the
-	 * cluster down as well here. That will need
-	 * locks which we won't have unless an elf-
-	 * loader zeroes out the zi section.
-	 * ---------------------------------------------
-	 */
-	mrs	x0, mpidr_el1
-	ldr	x1, =PWRC_BASE
-	str	w0, [x1, #PPOFFR_OFF]
-
-	/* ---------------------------------------------
-	 * Deactivate the gic cpu interface as well
-	 * ---------------------------------------------
-	 */
-	ldr	x0, =VE_GICC_BASE
-	ldr	x1, =BASE_GICC_BASE
-	fvp_choose_gicmmap	x0, x1, x2, w2, x1
-	mov	w0, #(IRQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP1)
-	orr	w0, w0, #(IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP0)
-	str	w0, [x1, #GICC_CTLR]
-
-	/* ---------------------------------------------
-	 * There is no sane reason to come out of this
-	 * wfi so panic if we do. This cpu will be pow-
-	 * ered on and reset by the cpu_on pm api
-	 * ---------------------------------------------
-	 */
-	dsb	sy
-	wfi
-cb_panic:
-	b	cb_panic
-
-
-	/* -----------------------------------------------------
-	 * void platform_get_entrypoint (unsigned int mpid);
-	 *
-	 * Main job of this routine is to distinguish between
-	 * a cold and warm boot.
-	 * On a cold boot the secondaries first wait for the
-	 * platform to be initialized after which they are
-	 * hotplugged in. The primary proceeds to perform the
-	 * platform initialization.
-	 * On a warm boot, each cpu jumps to the address in its
-	 * mailbox.
-	 *
-	 * TODO: Not a good idea to save lr in a temp reg
-	 * TODO: PSYSR is a common register and should be
-	 * 	accessed using locks. Since its not possible
-	 * 	to use locks immediately after a cold reset
-	 * 	we are relying on the fact that after a cold
-	 * 	reset all cpus will read the same WK field
-	 * -----------------------------------------------------
-	 */
-func platform_get_entrypoint
-	mov	x9, x30 // lr
-	mov	x2, x0
-	ldr	x1, =PWRC_BASE
-	str	w2, [x1, #PSYSR_OFF]
-	ldr	w2, [x1, #PSYSR_OFF]
-	ubfx	w2, w2, #PSYSR_WK_SHIFT, #PSYSR_WK_MASK
-	cmp	w2, #WKUP_PPONR
-	beq	warm_reset
-	cmp	w2, #WKUP_GICREQ
-	beq	warm_reset
-	mov	x0, #0
-	b	exit
-warm_reset:
-	/* ---------------------------------------------
-	 * A per-cpu mailbox is maintained in the tru-
-	 * sted DRAM. Its flushed out of the caches
-	 * after every update using normal memory so
-	 * its safe to read it here with SO attributes
-	 * ---------------------------------------------
-	 */
-	ldr	x10, =MBOX_BASE
-	bl	platform_get_core_pos
-	lsl	x0, x0, #CACHE_WRITEBACK_SHIFT
-	ldr	x0, [x10, x0]
-	cbz	x0, _panic
-exit:
-	ret	x9
-_panic:	b	_panic
-
-
-	/* -----------------------------------------------------
-	 * void platform_mem_init (void);
-	 *
-	 * Zero out the mailbox registers in the shared memory.
-	 * The mmu is turned off right now and only the primary can
-	 * ever execute this code. Secondaries will read the
-	 * mailboxes using SO accesses. In short, BL31 will
-	 * update the mailboxes after mapping the tzdram as
-	 * normal memory. It will flush its copy after update.
-	 * BL1 will always read the mailboxes with the MMU off
-	 * -----------------------------------------------------
-	 */
-func platform_mem_init
-	ldr	x0, =MBOX_BASE
-	mov	w1, #PLATFORM_CORE_COUNT
-loop:
-	str	xzr, [x0], #CACHE_WRITEBACK_GRANULE
-	subs	w1, w1, #1
-	b.gt	loop
-	ret
-
-	/* ---------------------------------------------
-	 * void plat_report_exception(unsigned int type)
-	 * Function to report an unhandled exception
-	 * with platform-specific means.
-	 * On FVP platform, it updates the LEDs
-	 * to indicate where we are
-	 * ---------------------------------------------
-	 */
-func plat_report_exception
-	mrs	x1, CurrentEl
-	lsr	x1, x1, #MODE_EL_SHIFT
-	lsl	x1, x1, #SYS_LED_EL_SHIFT
-	lsl	x0, x0, #SYS_LED_EC_SHIFT
-	mov	x2, #(SECURE << SYS_LED_SS_SHIFT)
-	orr	x0, x0, x2
-	orr	x0, x0, x1
-	mov	x1, #VE_SYSREGS_BASE
-	add	x1, x1, #V2M_SYS_LED
-	str	w0, [x1]
-	ret
-
-func platform_is_primary_cpu
-	and	x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
-	cmp	x0, #FVP_PRIMARY_CPU
-	cset	x0, eq
-	ret
-
-	/* Define a crash console for the plaform */
-#define FVP_CRASH_CONSOLE_BASE		PL011_UART1_BASE
-
-	/* ---------------------------------------------
-	 * int plat_crash_console_init(void)
-	 * Function to initialize the crash console
-	 * without a C Runtime to print crash report.
-	 * Clobber list : x0, x1, x2
-	 * ---------------------------------------------
-	 */
-func plat_crash_console_init
-	mov_imm	x0, FVP_CRASH_CONSOLE_BASE
-	mov_imm	x1, PL011_UART1_CLK_IN_HZ
-	mov_imm	x2, PL011_BAUDRATE
-	b	console_core_init
-
-	/* ---------------------------------------------
-	 * int plat_crash_console_putc(int c)
-	 * Function to print a character on the crash
-	 * console without a C Runtime.
-	 * Clobber list : x1, x2
-	 * ---------------------------------------------
-	 */
-func plat_crash_console_putc
-	mov_imm	x1, FVP_CRASH_CONSOLE_BASE
-	b	console_core_putc
diff --git a/plat/fvp/bl1_fvp_setup.c b/plat/fvp/bl1_fvp_setup.c
deleted file mode 100644
index 4b421d7..0000000
--- a/plat/fvp/bl1_fvp_setup.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch_helpers.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <debug.h>
-#include <console.h>
-#include <mmio.h>
-#include <platform.h>
-#include <platform_def.h>
-#include "../../bl1/bl1_private.h"
-#include "fvp_def.h"
-#include "fvp_private.h"
-
-#if USE_COHERENT_MEM
-/*******************************************************************************
- * Declarations of linker defined symbols which will help us find the layout
- * of trusted SRAM
- ******************************************************************************/
-extern unsigned long __COHERENT_RAM_START__;
-extern unsigned long __COHERENT_RAM_END__;
-
-/*
- * The next 2 constants identify the extents of the coherent memory region.
- * These addresses are used by the MMU setup code and therefore they must be
- * page-aligned.  It is the responsibility of the linker script to ensure that
- * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
- * page-aligned addresses.
- */
-#define BL1_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
-#define BL1_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
-#endif
-
-/* Data structure which holds the extents of the trusted SRAM for BL1*/
-static meminfo_t bl1_tzram_layout;
-
-meminfo_t *bl1_plat_sec_mem_layout(void)
-{
-	return &bl1_tzram_layout;
-}
-
-/*******************************************************************************
- * Perform any BL1 specific platform actions.
- ******************************************************************************/
-void bl1_early_platform_setup(void)
-{
-	const size_t bl1_size = BL1_RAM_LIMIT - BL1_RAM_BASE;
-
-	/* Initialize the console to provide early debug support */
-	console_init(PL011_UART0_BASE, PL011_UART0_CLK_IN_HZ, PL011_BAUDRATE);
-
-	/* Allow BL1 to see the whole Trusted RAM */
-	bl1_tzram_layout.total_base = FVP_TRUSTED_SRAM_BASE;
-	bl1_tzram_layout.total_size = FVP_TRUSTED_SRAM_SIZE;
-
-	/* Calculate how much RAM BL1 is using and how much remains free */
-	bl1_tzram_layout.free_base = FVP_TRUSTED_SRAM_BASE;
-	bl1_tzram_layout.free_size = FVP_TRUSTED_SRAM_SIZE;
-	reserve_mem(&bl1_tzram_layout.free_base,
-		    &bl1_tzram_layout.free_size,
-		    BL1_RAM_BASE,
-		    bl1_size);
-
-	/* Initialize the platform config for future decision making */
-	fvp_config_setup();
-}
-
-/*******************************************************************************
- * Function which will evaluate how much of the trusted ram has been gobbled
- * up by BL1 and return the base and size of whats available for loading BL2.
- * Its called after coherency and the MMU have been turned on.
- ******************************************************************************/
-void bl1_platform_setup(void)
-{
-	/* Initialise the IO layer and register platform IO devices */
-	fvp_io_setup();
-}
-
-
-/*******************************************************************************
- * Perform the very early platform specific architecture setup here. At the
- * moment this only does basic initialization. Later architectural setup
- * (bl1_arch_setup()) does not do anything platform specific.
- ******************************************************************************/
-void bl1_plat_arch_setup(void)
-{
-	fvp_cci_init();
-	fvp_cci_enable();
-
-	fvp_configure_mmu_el3(bl1_tzram_layout.total_base,
-			      bl1_tzram_layout.total_size,
-			      BL1_RO_BASE,
-			      BL1_RO_LIMIT
-#if USE_COHERENT_MEM
-			      , BL1_COHERENT_RAM_BASE,
-			      BL1_COHERENT_RAM_LIMIT
-#endif
-			     );
-}
-
-
-/*******************************************************************************
- * Before calling this function BL2 is loaded in memory and its entrypoint
- * is set by load_image. This is a placeholder for the platform to change
- * the entrypoint of BL2 and set SPSR and security state.
- * On FVP we are only setting the security state, entrypoint
- ******************************************************************************/
-void bl1_plat_set_bl2_ep_info(image_info_t *bl2_image,
-				entry_point_info_t *bl2_ep)
-{
-	SET_SECURITY_STATE(bl2_ep->h.attr, SECURE);
-	bl2_ep->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
-}
diff --git a/plat/fvp/bl2_fvp_setup.c b/plat/fvp/bl2_fvp_setup.c
deleted file mode 100644
index 364833f..0000000
--- a/plat/fvp/bl2_fvp_setup.c
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch_helpers.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <console.h>
-#include <platform.h>
-#include <platform_def.h>
-#include <string.h>
-#include "fvp_def.h"
-#include "fvp_private.h"
-
-/*******************************************************************************
- * Declarations of linker defined symbols which will help us find the layout
- * of trusted SRAM
- ******************************************************************************/
-extern unsigned long __RO_START__;
-extern unsigned long __RO_END__;
-
-#if USE_COHERENT_MEM
-extern unsigned long __COHERENT_RAM_START__;
-extern unsigned long __COHERENT_RAM_END__;
-#endif
-
-/*
- * The next 2 constants identify the extents of the code & RO data region.
- * These addresses are used by the MMU setup code and therefore they must be
- * page-aligned.  It is the responsibility of the linker script to ensure that
- * __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
- */
-#define BL2_RO_BASE (unsigned long)(&__RO_START__)
-#define BL2_RO_LIMIT (unsigned long)(&__RO_END__)
-
-#if USE_COHERENT_MEM
-/*
- * The next 2 constants identify the extents of the coherent memory region.
- * These addresses are used by the MMU setup code and therefore they must be
- * page-aligned.  It is the responsibility of the linker script to ensure that
- * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
- * page-aligned addresses.
- */
-#define BL2_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
-#define BL2_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
-#endif
-
-/* Data structure which holds the extents of the trusted SRAM for BL2 */
-static meminfo_t bl2_tzram_layout
-__attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE)));
-
-/* Assert that BL3-1 parameters fit in shared memory */
-CASSERT((PARAMS_BASE + sizeof(bl2_to_bl31_params_mem_t)) <
-	(FVP_SHARED_MEM_BASE + FVP_SHARED_MEM_SIZE),
-	assert_bl31_params_do_not_fit_in_shared_memory);
-
-/*******************************************************************************
- * Reference to structures which holds the arguments which need to be passed
- * to BL31
- ******************************************************************************/
-static bl31_params_t *bl2_to_bl31_params;
-static entry_point_info_t *bl31_ep_info;
-
-meminfo_t *bl2_plat_sec_mem_layout(void)
-{
-	return &bl2_tzram_layout;
-}
-
-/*******************************************************************************
- * This function assigns a pointer to the memory that the platform has kept
- * aside to pass platform specific and trusted firmware related information
- * to BL31. This memory is allocated by allocating memory to
- * bl2_to_bl31_params_mem_t structure which is a superset of all the
- * structure whose information is passed to BL31
- * NOTE: This function should be called only once and should be done
- * before generating params to BL31
- ******************************************************************************/
-bl31_params_t *bl2_plat_get_bl31_params(void)
-{
-	bl2_to_bl31_params_mem_t *bl31_params_mem;
-
-	/*
-	 * Allocate the memory for all the arguments that needs to
-	 * be passed to BL31
-	 */
-	bl31_params_mem = (bl2_to_bl31_params_mem_t *)PARAMS_BASE;
-	memset((void *)PARAMS_BASE, 0, sizeof(bl2_to_bl31_params_mem_t));
-
-	/* Assign memory for TF related information */
-	bl2_to_bl31_params = &bl31_params_mem->bl31_params;
-	SET_PARAM_HEAD(bl2_to_bl31_params, PARAM_BL31, VERSION_1, 0);
-
-	/* Fill BL31 related information */
-	bl31_ep_info = &bl31_params_mem->bl31_ep_info;
-	bl2_to_bl31_params->bl31_image_info = &bl31_params_mem->bl31_image_info;
-	SET_PARAM_HEAD(bl2_to_bl31_params->bl31_image_info, PARAM_IMAGE_BINARY,
-						VERSION_1, 0);
-
-	/* Fill BL32 related information if it exists */
-	if (BL32_BASE) {
-		bl2_to_bl31_params->bl32_ep_info =
-					&bl31_params_mem->bl32_ep_info;
-		SET_PARAM_HEAD(bl2_to_bl31_params->bl32_ep_info,
-					PARAM_EP, VERSION_1, 0);
-		bl2_to_bl31_params->bl32_image_info =
-					&bl31_params_mem->bl32_image_info;
-		SET_PARAM_HEAD(bl2_to_bl31_params->bl32_image_info,
-					PARAM_IMAGE_BINARY,
-					VERSION_1, 0);
-	}
-
-	/* Fill BL33 related information */
-	bl2_to_bl31_params->bl33_ep_info = &bl31_params_mem->bl33_ep_info;
-	SET_PARAM_HEAD(bl2_to_bl31_params->bl33_ep_info,
-					PARAM_EP, VERSION_1, 0);
-	bl2_to_bl31_params->bl33_image_info = &bl31_params_mem->bl33_image_info;
-	SET_PARAM_HEAD(bl2_to_bl31_params->bl33_image_info, PARAM_IMAGE_BINARY,
-					VERSION_1, 0);
-
-	return bl2_to_bl31_params;
-}
-
-
-/*******************************************************************************
- * This function returns a pointer to the shared memory that the platform
- * has kept to point to entry point information of BL31 to BL2
- ******************************************************************************/
-struct entry_point_info *bl2_plat_get_bl31_ep_info(void)
-{
-#if DEBUG
-	bl31_ep_info->args.arg1 = FVP_BL31_PLAT_PARAM_VAL;
-#endif
-	return bl31_ep_info;
-}
-
-
-/*******************************************************************************
- * BL1 has passed the extents of the trusted SRAM that should be visible to BL2
- * in x0. This memory layout is sitting at the base of the free trusted SRAM.
- * Copy it to a safe loaction before its reclaimed by later BL2 functionality.
- ******************************************************************************/
-void bl2_early_platform_setup(meminfo_t *mem_layout)
-{
-	/* Initialize the console to provide early debug support */
-	console_init(PL011_UART0_BASE, PL011_UART0_CLK_IN_HZ, PL011_BAUDRATE);
-
-	/* Setup the BL2 memory layout */
-	bl2_tzram_layout = *mem_layout;
-
-	/* Initialize the platform config for future decision making */
-	fvp_config_setup();
-
-	/* Initialise the IO layer and register platform IO devices */
-	fvp_io_setup();
-}
-
-/*******************************************************************************
- * Perform platform specific setup. For now just initialize the memory location
- * to use for passing arguments to BL31.
- ******************************************************************************/
-void bl2_platform_setup(void)
-{
-	/*
-	 * Do initial security configuration to allow DRAM/device access. On
-	 * Base FVP only DRAM security is programmable (via TrustZone), but
-	 * other platforms might have more programmable security devices
-	 * present.
-	 */
-	fvp_security_setup();
-}
-
-/* Flush the TF params and the TF plat params */
-void bl2_plat_flush_bl31_params(void)
-{
-	flush_dcache_range((unsigned long)PARAMS_BASE, \
-				sizeof(bl2_to_bl31_params_mem_t));
-}
-
-
-/*******************************************************************************
- * Perform the very early platform specific architectural setup here. At the
- * moment this is only intializes the mmu in a quick and dirty way.
- ******************************************************************************/
-void bl2_plat_arch_setup(void)
-{
-	fvp_configure_mmu_el1(bl2_tzram_layout.total_base,
-			      bl2_tzram_layout.total_size,
-			      BL2_RO_BASE,
-			      BL2_RO_LIMIT
-#if USE_COHERENT_MEM
-			      , BL2_COHERENT_RAM_BASE,
-			      BL2_COHERENT_RAM_LIMIT
-#endif
-			      );
-}
-
-/*******************************************************************************
- * Before calling this function BL31 is loaded in memory and its entrypoint
- * is set by load_image. This is a placeholder for the platform to change
- * the entrypoint of BL31 and set SPSR and security state.
- * On FVP we are only setting the security state, entrypoint
- ******************************************************************************/
-void bl2_plat_set_bl31_ep_info(image_info_t *bl31_image_info,
-					entry_point_info_t *bl31_ep_info)
-{
-	SET_SECURITY_STATE(bl31_ep_info->h.attr, SECURE);
-	bl31_ep_info->spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
-					DISABLE_ALL_EXCEPTIONS);
-}
-
-
-/*******************************************************************************
- * Before calling this function BL32 is loaded in memory and its entrypoint
- * is set by load_image. This is a placeholder for the platform to change
- * the entrypoint of BL32 and set SPSR and security state.
- * On FVP we are only setting the security state, entrypoint
- ******************************************************************************/
-void bl2_plat_set_bl32_ep_info(image_info_t *bl32_image_info,
-					entry_point_info_t *bl32_ep_info)
-{
-	SET_SECURITY_STATE(bl32_ep_info->h.attr, SECURE);
-	bl32_ep_info->spsr = fvp_get_spsr_for_bl32_entry();
-}
-
-/*******************************************************************************
- * Before calling this function BL33 is loaded in memory and its entrypoint
- * is set by load_image. This is a placeholder for the platform to change
- * the entrypoint of BL33 and set SPSR and security state.
- * On FVP we are only setting the security state, entrypoint
- ******************************************************************************/
-void bl2_plat_set_bl33_ep_info(image_info_t *image,
-					entry_point_info_t *bl33_ep_info)
-{
-	SET_SECURITY_STATE(bl33_ep_info->h.attr, NON_SECURE);
-	bl33_ep_info->spsr = fvp_get_spsr_for_bl33_entry();
-}
-
-
-/*******************************************************************************
- * Populate the extents of memory available for loading BL32
- ******************************************************************************/
-void bl2_plat_get_bl32_meminfo(meminfo_t *bl32_meminfo)
-{
-	/*
-	 * Populate the extents of memory available for loading BL32.
-	 */
-	bl32_meminfo->total_base = BL32_BASE;
-	bl32_meminfo->free_base = BL32_BASE;
-	bl32_meminfo->total_size =
-			(TSP_SEC_MEM_BASE + TSP_SEC_MEM_SIZE) - BL32_BASE;
-	bl32_meminfo->free_size =
-			(TSP_SEC_MEM_BASE + TSP_SEC_MEM_SIZE) - BL32_BASE;
-}
-
-
-/*******************************************************************************
- * Populate the extents of memory available for loading BL33
- ******************************************************************************/
-void bl2_plat_get_bl33_meminfo(meminfo_t *bl33_meminfo)
-{
-	bl33_meminfo->total_base = DRAM1_NS_BASE;
-	bl33_meminfo->total_size = DRAM1_NS_SIZE;
-	bl33_meminfo->free_base = DRAM1_NS_BASE;
-	bl33_meminfo->free_size = DRAM1_NS_SIZE;
-}
diff --git a/plat/fvp/bl31_fvp_setup.c b/plat/fvp/bl31_fvp_setup.c
deleted file mode 100644
index 977cbb4..0000000
--- a/plat/fvp/bl31_fvp_setup.c
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <arch_helpers.h>
-#include <arm_gic.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <bl31.h>
-#include <console.h>
-#include <mmio.h>
-#include <platform.h>
-#include <stddef.h>
-#include "drivers/pwrc/fvp_pwrc.h"
-#include "fvp_def.h"
-#include "fvp_private.h"
-
-/*******************************************************************************
- * Declarations of linker defined symbols which will help us find the layout
- * of trusted SRAM
- ******************************************************************************/
-extern unsigned long __RO_START__;
-extern unsigned long __RO_END__;
-extern unsigned long __BL31_END__;
-
-#if USE_COHERENT_MEM
-extern unsigned long __COHERENT_RAM_START__;
-extern unsigned long __COHERENT_RAM_END__;
-#endif
-
-/*
- * The next 3 constants identify the extents of the code, RO data region and the
- * limit of the BL3-1 image.  These addresses are used by the MMU setup code and
- * therefore they must be page-aligned.  It is the responsibility of the linker
- * script to ensure that __RO_START__, __RO_END__ & __BL31_END__ linker symbols
- * refer to page-aligned addresses.
- */
-#define BL31_RO_BASE (unsigned long)(&__RO_START__)
-#define BL31_RO_LIMIT (unsigned long)(&__RO_END__)
-#define BL31_END (unsigned long)(&__BL31_END__)
-
-#if USE_COHERENT_MEM
-/*
- * The next 2 constants identify the extents of the coherent memory region.
- * These addresses are used by the MMU setup code and therefore they must be
- * page-aligned.  It is the responsibility of the linker script to ensure that
- * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols
- * refer to page-aligned addresses.
- */
-#define BL31_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
-#define BL31_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
-#endif
-
-#if RESET_TO_BL31
-static entry_point_info_t bl32_image_ep_info;
-static entry_point_info_t bl33_image_ep_info;
-#else
-/*******************************************************************************
- * Reference to structure which holds the arguments that have been passed to
- * BL31 from BL2.
- ******************************************************************************/
-static bl31_params_t *bl2_to_bl31_params;
-#endif
-
-/*******************************************************************************
- * Return a pointer to the 'entry_point_info' structure of the next image for the
- * security state specified. BL33 corresponds to the non-secure image type
- * while BL32 corresponds to the secure image type. A NULL pointer is returned
- * if the image does not exist.
- ******************************************************************************/
-entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
-{
-#if RESET_TO_BL31
-	assert(sec_state_is_valid(type));
-
-	if (type == NON_SECURE)
-		return &bl33_image_ep_info;
-	else
-		return &bl32_image_ep_info;
-#else
-	entry_point_info_t *next_image_info;
-
-	assert(sec_state_is_valid(type));
-
-	next_image_info = (type == NON_SECURE) ?
-		bl2_to_bl31_params->bl33_ep_info :
-		bl2_to_bl31_params->bl32_ep_info;
-
-	/* None of the images on this platform can have 0x0 as the entrypoint */
-	if (next_image_info->pc)
-		return next_image_info;
-	else
-		return NULL;
-#endif
-}
-
-/*******************************************************************************
- * Return a pointer to the 'image_info' structure of the next image for the
- * security state specified. BL33 corresponds to the non-secure image type
- * while BL32 corresponds to the secure image type. A NULL pointer is returned
- * if the image does not exist.
- ******************************************************************************/
-image_info_t *bl31_plat_get_next_image_image_info(uint32_t type)
-{
-#if RESET_TO_BL31
-	assert(sec_state_is_valid(type));
-
-	if (type == NON_SECURE)
-		return NULL;
-	else
-		return NULL;
-#else
-	image_info_t *next_image_info;
-
-	assert(sec_state_is_valid(type));
-
-	next_image_info = (type == NON_SECURE) ?
-		bl2_to_bl31_params->bl33_image_info :
-		bl2_to_bl31_params->bl32_image_info;
-
-	/* None of the images on this platform can have size 0x0 */
-	if (next_image_info->image_size)
-		return next_image_info;
-	else
-		return NULL;
-#endif
-}
-
-
-
-/*******************************************************************************
- * Perform any BL31 specific platform actions. Here is an opportunity to copy
- * parameters passed by the calling EL (S-EL1 in BL2 & S-EL3 in BL1) before they
- * are lost (potentially). This needs to be done before the MMU is initialized
- * so that the memory layout can be used while creating page tables. On the FVP
- * we know that BL2 has populated the parameters in secure DRAM. So we just use
- * the reference passed in 'from_bl2' instead of copying. The 'data' parameter
- * is not used since all the information is contained in 'from_bl2'. Also, BL2
- * has flushed this information to memory, so we are guaranteed to pick up good
- * data
- ******************************************************************************/
-void bl31_early_platform_setup(bl31_params_t *from_bl2,
-				void *plat_params_from_bl2)
-{
-	/* Initialize the console to provide early debug support */
-	console_init(PL011_UART0_BASE, PL011_UART0_CLK_IN_HZ, PL011_BAUDRATE);
-
-	/* Initialize the platform config for future decision making */
-	fvp_config_setup();
-
-#if RESET_TO_BL31
-	/* There are no parameters from BL2 if BL31 is a reset vector */
-	assert(from_bl2 == NULL);
-	assert(plat_params_from_bl2 == NULL);
-
-	/*
-	 * Do initial security configuration to allow DRAM/device access. On
-	 * Base FVP only DRAM security is programmable (via TrustZone), but
-	 * other platforms might have more programmable security devices
-	 * present.
-	 */
-	fvp_security_setup();
-
-	/* Populate entry point information for BL3-2 and BL3-3 */
-	SET_PARAM_HEAD(&bl32_image_ep_info,
-				PARAM_EP,
-				VERSION_1,
-				0);
-	SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
-	bl32_image_ep_info.pc = BL32_BASE;
-	bl32_image_ep_info.spsr = fvp_get_spsr_for_bl32_entry();
-
-	SET_PARAM_HEAD(&bl33_image_ep_info,
-				PARAM_EP,
-				VERSION_1,
-				0);
-	/*
-	 * Tell BL31 where the non-trusted software image
-	 * is located and the entry state information
-	 */
-	bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
-	bl33_image_ep_info.spsr = fvp_get_spsr_for_bl33_entry();
-	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
-
-#else
-	/* Check params passed from BL2 should not be NULL,
-	 * We are not checking plat_params_from_bl2 as NULL as we are not
-	 * using it on FVP
-	 */
-	assert(from_bl2 != NULL);
-	assert(from_bl2->h.type == PARAM_BL31);
-	assert(from_bl2->h.version >= VERSION_1);
-
-	bl2_to_bl31_params = from_bl2;
-	assert(((unsigned long)plat_params_from_bl2) == FVP_BL31_PLAT_PARAM_VAL);
-#endif
-}
-
-/*******************************************************************************
- * Initialize the gic, configure the CLCD and zero out variables needed by the
- * secondaries to boot up correctly.
- ******************************************************************************/
-void bl31_platform_setup(void)
-{
-	unsigned int reg_val;
-
-	/* Initialize the gic cpu and distributor interfaces */
-	fvp_gic_init();
-	arm_gic_setup();
-
-	/*
-	 * TODO: Configure the CLCD before handing control to
-	 * linux. Need to see if a separate driver is needed
-	 * instead.
-	 */
-	mmio_write_32(VE_SYSREGS_BASE + V2M_SYS_CFGDATA, 0);
-	mmio_write_32(VE_SYSREGS_BASE + V2M_SYS_CFGCTRL,
-		      (1ull << 31) | (1 << 30) | (7 << 20) | (0 << 16));
-
-	/* Enable and initialize the System level generic timer */
-	mmio_write_32(SYS_CNTCTL_BASE + CNTCR_OFF, CNTCR_FCREQ(0) | CNTCR_EN);
-
-	/* Allow access to the System counter timer module */
-	reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT);
-	reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT);
-	reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT);
-	mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(0), reg_val);
-	mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(1), reg_val);
-
-	reg_val = (1 << CNTNSAR_NS_SHIFT(0)) | (1 << CNTNSAR_NS_SHIFT(1));
-	mmio_write_32(SYS_TIMCTL_BASE + CNTNSAR, reg_val);
-
-	/* Intialize the power controller */
-	fvp_pwrc_setup();
-
-	/* Topologies are best known to the platform. */
-	fvp_setup_topology();
-}
-
-/*******************************************************************************
- * Perform the very early platform specific architectural setup here. At the
- * moment this is only intializes the mmu in a quick and dirty way.
- ******************************************************************************/
-void bl31_plat_arch_setup(void)
-{
-	fvp_cci_init();
-#if RESET_TO_BL31
-	fvp_cci_enable();
-#endif
-	fvp_configure_mmu_el3(BL31_RO_BASE,
-			      (BL31_END - BL31_RO_BASE),
-			      BL31_RO_BASE,
-			      BL31_RO_LIMIT
-#if USE_COHERENT_MEM
-			      , BL31_COHERENT_RAM_BASE,
-			      BL31_COHERENT_RAM_LIMIT
-#endif
-			      );
-}
diff --git a/plat/fvp/drivers/pwrc/fvp_pwrc.c b/plat/fvp/drivers/pwrc/fvp_pwrc.c
deleted file mode 100644
index 0497c2b..0000000
--- a/plat/fvp/drivers/pwrc/fvp_pwrc.c
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <bakery_lock.h>
-#include <mmio.h>
-#include "../../fvp_def.h"
-#include "../../fvp_private.h"
-#include "fvp_pwrc.h"
-
-/*
- * TODO: Someday there will be a generic power controller api. At the moment
- * each platform has its own pwrc so just exporting functions is fine.
- */
-#if USE_COHERENT_MEM
-static bakery_lock_t pwrc_lock __attribute__ ((section("tzfw_coherent_mem")));
-#define LOCK_ARG	&pwrc_lock
-#else
-#define LOCK_ARG	FVP_PWRC_BAKERY_ID
-#endif
-
-unsigned int fvp_pwrc_get_cpu_wkr(unsigned long mpidr)
-{
-	return PSYSR_WK(fvp_pwrc_read_psysr(mpidr));
-}
-
-unsigned int fvp_pwrc_read_psysr(unsigned long mpidr)
-{
-	unsigned int rc;
-	fvp_lock_get(LOCK_ARG);
-	mmio_write_32(PWRC_BASE + PSYSR_OFF, (unsigned int) mpidr);
-	rc = mmio_read_32(PWRC_BASE + PSYSR_OFF);
-	fvp_lock_release(LOCK_ARG);
-	return rc;
-}
-
-void fvp_pwrc_write_pponr(unsigned long mpidr)
-{
-	fvp_lock_get(LOCK_ARG);
-	mmio_write_32(PWRC_BASE + PPONR_OFF, (unsigned int) mpidr);
-	fvp_lock_release(LOCK_ARG);
-}
-
-void fvp_pwrc_write_ppoffr(unsigned long mpidr)
-{
-	fvp_lock_get(LOCK_ARG);
-	mmio_write_32(PWRC_BASE + PPOFFR_OFF, (unsigned int) mpidr);
-	fvp_lock_release(LOCK_ARG);
-}
-
-void fvp_pwrc_set_wen(unsigned long mpidr)
-{
-	fvp_lock_get(LOCK_ARG);
-	mmio_write_32(PWRC_BASE + PWKUPR_OFF,
-		      (unsigned int) (PWKUPR_WEN | mpidr));
-	fvp_lock_release(LOCK_ARG);
-}
-
-void fvp_pwrc_clr_wen(unsigned long mpidr)
-{
-	fvp_lock_get(LOCK_ARG);
-	mmio_write_32(PWRC_BASE + PWKUPR_OFF,
-		      (unsigned int) mpidr);
-	fvp_lock_release(LOCK_ARG);
-}
-
-void fvp_pwrc_write_pcoffr(unsigned long mpidr)
-{
-	fvp_lock_get(LOCK_ARG);
-	mmio_write_32(PWRC_BASE + PCOFFR_OFF, (unsigned int) mpidr);
-	fvp_lock_release(LOCK_ARG);
-}
-
-/* Nothing else to do here apart from initializing the lock */
-int fvp_pwrc_setup(void)
-{
-	fvp_lock_init(LOCK_ARG);
-
-	return 0;
-}
-
-
-
diff --git a/plat/fvp/drivers/pwrc/fvp_pwrc.h b/plat/fvp/drivers/pwrc/fvp_pwrc.h
deleted file mode 100644
index ad1ea85..0000000
--- a/plat/fvp/drivers/pwrc/fvp_pwrc.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __FVP_PWRC_H__
-#define __FVP_PWRC_H__
-
-/* FVP Power controller register offset etc */
-#define PPOFFR_OFF		0x0
-#define PPONR_OFF		0x4
-#define PCOFFR_OFF		0x8
-#define PWKUPR_OFF		0xc
-#define PSYSR_OFF		0x10
-
-#define PWKUPR_WEN		(1ull << 31)
-
-#define PSYSR_AFF_L2		(1 << 31)
-#define PSYSR_AFF_L1		(1 << 30)
-#define PSYSR_AFF_L0		(1 << 29)
-#define PSYSR_WEN		(1 << 28)
-#define PSYSR_PC		(1 << 27)
-#define PSYSR_PP		(1 << 26)
-
-#define PSYSR_WK_SHIFT		24
-#define PSYSR_WK_MASK		0x3
-#define PSYSR_WK(x)		(x >> PSYSR_WK_SHIFT) & PSYSR_WK_MASK
-
-#define WKUP_COLD		0x0
-#define WKUP_RESET		0x1
-#define WKUP_PPONR		0x2
-#define WKUP_GICREQ		0x3
-
-#define PSYSR_INVALID		0xffffffff
-
-#ifndef __ASSEMBLY__
-
-/*******************************************************************************
- * Function & variable prototypes
- ******************************************************************************/
-int fvp_pwrc_setup(void);
-void fvp_pwrc_write_pcoffr(unsigned long);
-void fvp_pwrc_write_ppoffr(unsigned long);
-void fvp_pwrc_write_pponr(unsigned long);
-void fvp_pwrc_set_wen(unsigned long);
-void fvp_pwrc_clr_wen(unsigned long);
-unsigned int fvp_pwrc_read_psysr(unsigned long);
-unsigned int fvp_pwrc_get_cpu_wkr(unsigned long);
-
-#endif /*__ASSEMBLY__*/
-
-#endif /* __FVP_PWRC_H__ */
diff --git a/plat/fvp/fvp_def.h b/plat/fvp/fvp_def.h
deleted file mode 100644
index d1d9adb..0000000
--- a/plat/fvp/fvp_def.h
+++ /dev/null
@@ -1,288 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __FVP_DEF_H__
-#define __FVP_DEF_H__
-
-/* Firmware Image Package */
-#define FIP_IMAGE_NAME			"fip.bin"
-#define FVP_PRIMARY_CPU			0x0
-
-/* Memory location options for TSP */
-#define FVP_TRUSTED_SRAM_ID		0
-#define FVP_TRUSTED_DRAM_ID		1
-#define FVP_DRAM_ID			2
-
-/*
- * Some of the definitions in this file use the 'ull' suffix in order to avoid
- * subtle integer overflow errors due to implicit integer type promotion when
- * working with 32-bit values.
- *
- * The TSP linker script includes some of these definitions to define the BL3-2
- * memory map, but the GNU LD does not support the 'ull' suffix, causing the
- * build process to fail. To solve this problem, the auxiliary macro MAKE_ULL(x)
- * will add the 'ull' suffix only when the macro __LINKER__  is not defined
- * (__LINKER__ is defined in the command line to preprocess the linker script).
- * Constants in the linker script will not have the 'ull' suffix, but this is
- * not a problem since the linker evaluates all constant expressions to 64 bit
- * (assuming the target architecture is 64 bit).
- */
-#ifndef __LINKER__
-  #define MAKE_ULL(x)			x##ull
-#else
-  #define MAKE_ULL(x)			x
-#endif
-
-/*******************************************************************************
- * FVP memory map related constants
- ******************************************************************************/
-
-#define FVP_TRUSTED_ROM_BASE	0x00000000
-#define FVP_TRUSTED_ROM_SIZE	0x04000000	/* 64 MB */
-
-/* The first 4KB of Trusted SRAM are used as shared memory */
-#define FVP_SHARED_MEM_BASE	0x04000000
-#define FVP_SHARED_MEM_SIZE	0x00001000	/* 4 KB */
-
-/* The remaining Trusted SRAM is used to load the BL images */
-#define FVP_TRUSTED_SRAM_BASE	0x04001000
-#define FVP_TRUSTED_SRAM_SIZE	0x0003F000	/* 252 KB */
-
-#define FVP_TRUSTED_DRAM_BASE	0x06000000
-#define FVP_TRUSTED_DRAM_SIZE	0x02000000	/* 32 MB */
-
-#define FLASH0_BASE		0x08000000
-#define FLASH0_SIZE		0x04000000
-
-#define FLASH1_BASE		0x0c000000
-#define FLASH1_SIZE		0x04000000
-
-#define PSRAM_BASE		0x14000000
-#define PSRAM_SIZE		0x04000000
-
-#define VRAM_BASE		0x18000000
-#define VRAM_SIZE		0x02000000
-
-/* Aggregate of all devices in the first GB */
-#define DEVICE0_BASE		0x1a000000
-#define DEVICE0_SIZE		0x12200000
-
-#define DEVICE1_BASE		0x2f000000
-#define DEVICE1_SIZE		0x200000
-
-#define NSRAM_BASE		0x2e000000
-#define NSRAM_SIZE		0x10000
-
-#define DRAM1_BASE		MAKE_ULL(0x80000000)
-#define DRAM1_SIZE		MAKE_ULL(0x80000000)
-#define DRAM1_END		(DRAM1_BASE + DRAM1_SIZE - 1)
-
-/* Define the top 16 MB of DRAM1 as secure */
-#define DRAM1_SEC_SIZE		MAKE_ULL(0x01000000)
-#define DRAM1_SEC_BASE		(DRAM1_BASE + DRAM1_SIZE - DRAM1_SEC_SIZE)
-#define DRAM1_SEC_END		(DRAM1_SEC_BASE + DRAM1_SEC_SIZE - 1)
-
-#define DRAM1_NS_BASE		DRAM1_BASE
-#define DRAM1_NS_SIZE		(DRAM1_SIZE - DRAM1_SEC_SIZE)
-#define DRAM1_NS_END		(DRAM1_NS_BASE + DRAM1_NS_SIZE - 1)
-
-#define DRAM_BASE		DRAM1_BASE
-#define DRAM_SIZE		DRAM1_SIZE
-
-#define DRAM2_BASE		MAKE_ULL(0x880000000)
-#define DRAM2_SIZE		MAKE_ULL(0x780000000)
-#define DRAM2_END		(DRAM2_BASE + DRAM2_SIZE - 1)
-
-#define PCIE_EXP_BASE		0x40000000
-#define TZRNG_BASE		0x7fe60000
-#define TZNVCTR_BASE		0x7fe70000
-#define TZROOTKEY_BASE		0x7fe80000
-
-/* Memory mapped Generic timer interfaces  */
-#define SYS_CNTCTL_BASE		0x2a430000
-#define SYS_CNTREAD_BASE	0x2a800000
-#define SYS_TIMCTL_BASE		0x2a810000
-
-/* V2M motherboard system registers & offsets */
-#define VE_SYSREGS_BASE		0x1c010000
-#define V2M_SYS_ID		0x0
-#define V2M_SYS_SWITCH		0x4
-#define V2M_SYS_LED		0x8
-#define V2M_SYS_CFGDATA		0xa0
-#define V2M_SYS_CFGCTRL		0xa4
-#define V2M_SYS_CFGSTATUS	0xa8
-
-#define CFGCTRL_START		(1 << 31)
-#define CFGCTRL_RW		(1 << 30)
-#define CFGCTRL_FUNC_SHIFT	20
-#define CFGCTRL_FUNC(fn)	(fn << CFGCTRL_FUNC_SHIFT)
-#define FUNC_CLK_GEN		0x01
-#define FUNC_TEMP		0x04
-#define FUNC_DB_RESET		0x05
-#define FUNC_SCC_CFG		0x06
-#define FUNC_SHUTDOWN		0x08
-#define FUNC_REBOOT		0x09
-
-/* Load address of BL33 in the FVP port */
-#define NS_IMAGE_OFFSET		(DRAM1_BASE + 0x8000000) /* DRAM + 128MB */
-
-/* Special value used to verify platform parameters from BL2 to BL3-1 */
-#define FVP_BL31_PLAT_PARAM_VAL	0x0f1e2d3c4b5a6978ULL
-
-/*
- * V2M sysled bit definitions. The values written to this
- * register are defined in arch.h & runtime_svc.h. Only
- * used by the primary cpu to diagnose any cold boot issues.
- *
- * SYS_LED[0]   - Security state (S=0/NS=1)
- * SYS_LED[2:1] - Exception Level (EL3-EL0)
- * SYS_LED[7:3] - Exception Class (Sync/Async & origin)
- *
- */
-#define SYS_LED_SS_SHIFT		0x0
-#define SYS_LED_EL_SHIFT		0x1
-#define SYS_LED_EC_SHIFT		0x3
-
-#define SYS_LED_SS_MASK		0x1
-#define SYS_LED_EL_MASK		0x3
-#define SYS_LED_EC_MASK		0x1f
-
-/* V2M sysid register bits */
-#define SYS_ID_REV_SHIFT	28
-#define SYS_ID_HBI_SHIFT	16
-#define SYS_ID_BLD_SHIFT	12
-#define SYS_ID_ARCH_SHIFT	8
-#define SYS_ID_FPGA_SHIFT	0
-
-#define SYS_ID_REV_MASK	0xf
-#define SYS_ID_HBI_MASK	0xfff
-#define SYS_ID_BLD_MASK	0xf
-#define SYS_ID_ARCH_MASK	0xf
-#define SYS_ID_FPGA_MASK	0xff
-
-#define SYS_ID_BLD_LENGTH	4
-
-#define HBI_FVP_BASE		0x020
-#define REV_FVP_BASE_V0		0x0
-
-#define HBI_FOUNDATION		0x010
-#define REV_FOUNDATION_V2_0	0x0
-#define REV_FOUNDATION_V2_1	0x1
-
-#define BLD_GIC_VE_MMAP	0x0
-#define BLD_GIC_A53A57_MMAP	0x1
-
-#define ARCH_MODEL		0x1
-
-/* FVP Power controller base address*/
-#define PWRC_BASE		0x1c100000
-
-
-/*******************************************************************************
- * CCI-400 related constants
- ******************************************************************************/
-#define CCI400_BASE			0x2c090000
-#define CCI400_SL_IFACE3_CLUSTER_IX	0
-#define CCI400_SL_IFACE4_CLUSTER_IX	1
-
-/*******************************************************************************
- * GIC-400 & interrupt handling related constants
- ******************************************************************************/
-/* VE compatible GIC memory map */
-#define VE_GICD_BASE			0x2c001000
-#define VE_GICC_BASE			0x2c002000
-#define VE_GICH_BASE			0x2c004000
-#define VE_GICV_BASE			0x2c006000
-
-/* Base FVP compatible GIC memory map */
-#define BASE_GICD_BASE			0x2f000000
-#define BASE_GICR_BASE			0x2f100000
-#define BASE_GICC_BASE			0x2c000000
-#define BASE_GICH_BASE			0x2c010000
-#define BASE_GICV_BASE			0x2c02f000
-
-#define IRQ_TZ_WDOG			56
-#define IRQ_SEC_PHY_TIMER		29
-#define IRQ_SEC_SGI_0			8
-#define IRQ_SEC_SGI_1			9
-#define IRQ_SEC_SGI_2			10
-#define IRQ_SEC_SGI_3			11
-#define IRQ_SEC_SGI_4			12
-#define IRQ_SEC_SGI_5			13
-#define IRQ_SEC_SGI_6			14
-#define IRQ_SEC_SGI_7			15
-
-/*******************************************************************************
- * PL011 related constants
- ******************************************************************************/
-#define PL011_UART0_BASE		0x1c090000
-#define PL011_UART1_BASE		0x1c0a0000
-#define PL011_UART2_BASE		0x1c0b0000
-#define PL011_UART3_BASE		0x1c0c0000
-
-#define PL011_BAUDRATE  115200
-
-#define PL011_UART0_CLK_IN_HZ 24000000
-#define PL011_UART1_CLK_IN_HZ 24000000
-#define PL011_UART2_CLK_IN_HZ 24000000
-#define PL011_UART3_CLK_IN_HZ 24000000
-
-/*******************************************************************************
- * TrustZone address space controller related constants
- ******************************************************************************/
-#define TZC400_BASE			0x2a4a0000
-
-/*
- * The NSAIDs for this platform as used to program the TZC400.
- */
-
-/* NSAIDs used by devices in TZC filter 0 on FVP */
-#define FVP_NSAID_DEFAULT		0
-#define FVP_NSAID_PCI			1
-#define FVP_NSAID_VIRTIO		8  /* from FVP v5.6 onwards */
-#define FVP_NSAID_AP			9  /* Application Processors */
-#define FVP_NSAID_VIRTIO_OLD		15 /* until FVP v5.5 */
-
-/* NSAIDs used by devices in TZC filter 2 on FVP */
-#define FVP_NSAID_HDLCD0		2
-#define FVP_NSAID_CLCD			7
-
-/*******************************************************************************
- *  Shared Data
- ******************************************************************************/
-
-/* Entrypoint mailboxes */
-#define MBOX_BASE		FVP_SHARED_MEM_BASE
-#define MBOX_SIZE		0x200
-
-/* Base address where parameters to BL31 are stored */
-#define PARAMS_BASE		(MBOX_BASE + MBOX_SIZE)
-
-#endif /* __FVP_DEF_H__ */
diff --git a/plat/fvp/fvp_io_storage.c b/plat/fvp/fvp_io_storage.c
deleted file mode 100644
index ec1fe58..0000000
--- a/plat/fvp/fvp_io_storage.c
+++ /dev/null
@@ -1,338 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <assert.h>
-#include <debug.h>
-#include <io_driver.h>
-#include <io_fip.h>
-#include <io_memmap.h>
-#include <io_storage.h>
-#include <io_semihosting.h>
-#include <platform_def.h>
-#include <semihosting.h>	/* For FOPEN_MODE_... */
-#include <string.h>
-
-/* IO devices */
-static const io_dev_connector_t *sh_dev_con;
-static uintptr_t sh_dev_spec;
-static uintptr_t sh_init_params;
-static uintptr_t sh_dev_handle;
-static const io_dev_connector_t *fip_dev_con;
-static uintptr_t fip_dev_spec;
-static uintptr_t fip_dev_handle;
-static const io_dev_connector_t *memmap_dev_con;
-static uintptr_t memmap_dev_spec;
-static uintptr_t memmap_init_params;
-static uintptr_t memmap_dev_handle;
-
-static const io_block_spec_t fip_block_spec = {
-	.offset = FLASH0_BASE,
-	.length = FLASH0_SIZE
-};
-
-static const io_file_spec_t bl2_file_spec = {
-	.path = BL2_IMAGE_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl31_file_spec = {
-	.path = BL31_IMAGE_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl32_file_spec = {
-	.path = BL32_IMAGE_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl33_file_spec = {
-	.path = BL33_IMAGE_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-#if TRUSTED_BOARD_BOOT
-static const io_file_spec_t bl2_cert_file_spec = {
-	.path = BL2_CERT_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t trusted_key_cert_file_spec = {
-	.path = TRUSTED_KEY_CERT_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl30_key_cert_file_spec = {
-	.path = BL30_KEY_CERT_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl31_key_cert_file_spec = {
-	.path = BL31_KEY_CERT_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl32_key_cert_file_spec = {
-	.path = BL32_KEY_CERT_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl33_key_cert_file_spec = {
-	.path = BL33_KEY_CERT_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl30_cert_file_spec = {
-	.path = BL30_CERT_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl31_cert_file_spec = {
-	.path = BL31_CERT_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl32_cert_file_spec = {
-	.path = BL32_CERT_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl33_cert_file_spec = {
-	.path = BL33_CERT_NAME,
-	.mode = FOPEN_MODE_RB
-};
-#endif /* TRUSTED_BOARD_BOOT */
-
-static int open_fip(const uintptr_t spec);
-static int open_memmap(const uintptr_t spec);
-
-struct plat_io_policy {
-	char *image_name;
-	uintptr_t *dev_handle;
-	uintptr_t image_spec;
-	int (*check)(const uintptr_t spec);
-};
-
-static const struct plat_io_policy policies[] = {
-	{
-		FIP_IMAGE_NAME,
-		&memmap_dev_handle,
-		(uintptr_t)&fip_block_spec,
-		open_memmap
-	}, {
-		BL2_IMAGE_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl2_file_spec,
-		open_fip
-	}, {
-		BL31_IMAGE_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl31_file_spec,
-		open_fip
-	}, {
-		BL32_IMAGE_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl32_file_spec,
-		open_fip
-	}, {
-		BL33_IMAGE_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl33_file_spec,
-		open_fip
-	}, {
-#if TRUSTED_BOARD_BOOT
-		BL2_CERT_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl2_cert_file_spec,
-		open_fip
-	}, {
-		TRUSTED_KEY_CERT_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&trusted_key_cert_file_spec,
-		open_fip
-	}, {
-		BL30_KEY_CERT_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl30_key_cert_file_spec,
-		open_fip
-	}, {
-		BL31_KEY_CERT_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl31_key_cert_file_spec,
-		open_fip
-	}, {
-		BL32_KEY_CERT_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl32_key_cert_file_spec,
-		open_fip
-	}, {
-		BL33_KEY_CERT_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl33_key_cert_file_spec,
-		open_fip
-	}, {
-		BL30_CERT_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl30_cert_file_spec,
-		open_fip
-	}, {
-		BL31_CERT_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl31_cert_file_spec,
-		open_fip
-	}, {
-		BL32_CERT_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl32_cert_file_spec,
-		open_fip
-	}, {
-		BL33_CERT_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl33_cert_file_spec,
-		open_fip
-	}, {
-#endif /* TRUSTED_BOARD_BOOT */
-		0, 0, 0
-	}
-};
-
-
-static int open_fip(const uintptr_t spec)
-{
-	int result = IO_FAIL;
-
-	/* See if a Firmware Image Package is available */
-	result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_NAME);
-	if (result == IO_SUCCESS) {
-		VERBOSE("Using FIP\n");
-		/*TODO: Check image defined in spec is present in FIP. */
-	}
-	return result;
-}
-
-
-static int open_memmap(const uintptr_t spec)
-{
-	int result = IO_FAIL;
-	uintptr_t local_image_handle;
-
-	result = io_dev_init(memmap_dev_handle, memmap_init_params);
-	if (result == IO_SUCCESS) {
-		result = io_open(memmap_dev_handle, spec, &local_image_handle);
-		if (result == IO_SUCCESS) {
-			VERBOSE("Using Memmap IO\n");
-			io_close(local_image_handle);
-		}
-	}
-	return result;
-}
-
-
-static int open_semihosting(const uintptr_t spec)
-{
-	int result = IO_FAIL;
-	uintptr_t local_image_handle;
-
-	/* See if the file exists on semi-hosting.*/
-	result = io_dev_init(sh_dev_handle, sh_init_params);
-	if (result == IO_SUCCESS) {
-		result = io_open(sh_dev_handle, spec, &local_image_handle);
-		if (result == IO_SUCCESS) {
-			VERBOSE("Using Semi-hosting IO\n");
-			io_close(local_image_handle);
-		}
-	}
-	return result;
-}
-
-void fvp_io_setup (void)
-{
-	int io_result = IO_FAIL;
-
-	/* Register the IO devices on this platform */
-	io_result = register_io_dev_sh(&sh_dev_con);
-	assert(io_result == IO_SUCCESS);
-
-	io_result = register_io_dev_fip(&fip_dev_con);
-	assert(io_result == IO_SUCCESS);
-
-	io_result = register_io_dev_memmap(&memmap_dev_con);
-	assert(io_result == IO_SUCCESS);
-
-	/* Open connections to devices and cache the handles */
-	io_result = io_dev_open(sh_dev_con, sh_dev_spec, &sh_dev_handle);
-	assert(io_result == IO_SUCCESS);
-
-	io_result = io_dev_open(fip_dev_con, fip_dev_spec, &fip_dev_handle);
-	assert(io_result == IO_SUCCESS);
-
-	io_result = io_dev_open(memmap_dev_con, memmap_dev_spec,
-				&memmap_dev_handle);
-	assert(io_result == IO_SUCCESS);
-
-	/* Ignore improbable errors in release builds */
-	(void)io_result;
-}
-
-
-/* Return an IO device handle and specification which can be used to access
- * an image. Use this to enforce platform load policy */
-int plat_get_image_source(const char *image_name, uintptr_t *dev_handle,
-			  uintptr_t *image_spec)
-{
-	int result = IO_FAIL;
-	const struct plat_io_policy *policy;
-
-	if ((image_name != NULL) && (dev_handle != NULL) &&
-	    (image_spec != NULL)) {
-		policy = policies;
-		while (policy->image_name != NULL) {
-			if (strcmp(policy->image_name, image_name) == 0) {
-				result = policy->check(policy->image_spec);
-				if (result == IO_SUCCESS) {
-					*image_spec = policy->image_spec;
-					*dev_handle = *(policy->dev_handle);
-					break;
-				} else {
-					result = open_semihosting(
-							policy->image_spec);
-					if (result == IO_SUCCESS) {
-						*dev_handle = sh_dev_handle;
-						*image_spec =
-							policy->image_spec;
-					}
-				}
-			}
-			policy++;
-		}
-	} else {
-		result = IO_FAIL;
-	}
-	return result;
-}
diff --git a/plat/fvp/fvp_pm.c b/plat/fvp/fvp_pm.c
deleted file mode 100644
index 9044e69..0000000
--- a/plat/fvp/fvp_pm.c
+++ /dev/null
@@ -1,370 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch_helpers.h>
-#include <arm_gic.h>
-#include <assert.h>
-#include <bakery_lock.h>
-#include <cci400.h>
-#include <debug.h>
-#include <mmio.h>
-#include <platform.h>
-#include <plat_config.h>
-#include <platform_def.h>
-#include <psci.h>
-#include <errno.h>
-#include "drivers/pwrc/fvp_pwrc.h"
-#include "fvp_def.h"
-#include "fvp_private.h"
-
-/*******************************************************************************
- * Private FVP function to program the mailbox for a cpu before it is released
- * from reset.
- ******************************************************************************/
-static void fvp_program_mailbox(uint64_t mpidr, uint64_t address)
-{
-	uint64_t linear_id;
-	mailbox_t *fvp_mboxes;
-
-	linear_id = platform_get_core_pos(mpidr);
-	fvp_mboxes = (mailbox_t *)MBOX_BASE;
-	fvp_mboxes[linear_id].value = address;
-	flush_dcache_range((unsigned long) &fvp_mboxes[linear_id],
-			   sizeof(unsigned long));
-}
-
-/*******************************************************************************
- * Function which implements the common FVP specific operations to power down a
- * cpu in response to a CPU_OFF or CPU_SUSPEND request.
- ******************************************************************************/
-static void fvp_cpu_pwrdwn_common()
-{
-	/* Prevent interrupts from spuriously waking up this cpu */
-	arm_gic_cpuif_deactivate();
-
-	/* Program the power controller to power off this cpu. */
-	fvp_pwrc_write_ppoffr(read_mpidr_el1());
-}
-
-/*******************************************************************************
- * Function which implements the common FVP specific operations to power down a
- * cluster in response to a CPU_OFF or CPU_SUSPEND request.
- ******************************************************************************/
-static void fvp_cluster_pwrdwn_common()
-{
-	uint64_t mpidr = read_mpidr_el1();
-
-	/* Disable coherency if this cluster is to be turned off */
-	if (get_plat_config()->flags & CONFIG_HAS_CCI)
-		cci_disable_cluster_coherency(mpidr);
-
-	/* Program the power controller to turn the cluster off */
-	fvp_pwrc_write_pcoffr(mpidr);
-}
-
-/*******************************************************************************
- * Private FVP function which is used to determine if any platform actions
- * should be performed for the specified affinity instance given its
- * state. Nothing needs to be done if the 'state' is not off or if this is not
- * the highest affinity level which will enter the 'state'.
- ******************************************************************************/
-static int32_t fvp_do_plat_actions(unsigned int afflvl, unsigned int state)
-{
-	unsigned int max_phys_off_afflvl;
-
-	assert(afflvl <= MPIDR_AFFLVL1);
-
-	if (state != PSCI_STATE_OFF)
-		return -EAGAIN;
-
-	/*
-	 * Find the highest affinity level which will be suspended and postpone
-	 * all the platform specific actions until that level is hit.
-	 */
-	max_phys_off_afflvl = psci_get_max_phys_off_afflvl();
-	assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
-	if (afflvl != max_phys_off_afflvl)
-		return -EAGAIN;
-
-	return 0;
-}
-
-/*******************************************************************************
- * FVP handler called when an affinity instance is about to enter standby.
- ******************************************************************************/
-void fvp_affinst_standby(unsigned int power_state)
-{
-	/*
-	 * Enter standby state
-	 * dsb is good practice before using wfi to enter low power states
-	 */
-	dsb();
-	wfi();
-}
-
-/*******************************************************************************
- * FVP handler called when an affinity instance is about to be turned on. The
- * level and mpidr determine the affinity instance.
- ******************************************************************************/
-int fvp_affinst_on(unsigned long mpidr,
-		   unsigned long sec_entrypoint,
-		   unsigned int afflvl,
-		   unsigned int state)
-{
-	int rc = PSCI_E_SUCCESS;
-	unsigned int psysr;
-
-	/*
-	 * It's possible to turn on only affinity level 0 i.e. a cpu
-	 * on the FVP. Ignore any other affinity level.
-	 */
-	if (afflvl != MPIDR_AFFLVL0)
-		return rc;
-
-	/*
-	 * Ensure that we do not cancel an inflight power off request
-	 * for the target cpu. That would leave it in a zombie wfi.
-	 * Wait for it to power off, program the jump address for the
-	 * target cpu and then program the power controller to turn
-	 * that cpu on
-	 */
-	do {
-		psysr = fvp_pwrc_read_psysr(mpidr);
-	} while (psysr & PSYSR_AFF_L0);
-
-	fvp_program_mailbox(mpidr, sec_entrypoint);
-	fvp_pwrc_write_pponr(mpidr);
-
-	return rc;
-}
-
-/*******************************************************************************
- * FVP handler called when an affinity instance is about to be turned off. The
- * level and mpidr determine the affinity instance. The 'state' arg. allows the
- * platform to decide whether the cluster is being turned off and take apt
- * actions.
- *
- * CAUTION: There is no guarantee that caches will remain turned on across calls
- * to this function as each affinity level is dealt with. So do not write & read
- * global variables across calls. It will be wise to do flush a write to the
- * global to prevent unpredictable results.
- ******************************************************************************/
-void fvp_affinst_off(unsigned int afflvl,
-		    unsigned int state)
-{
-	/* Determine if any platform actions need to be executed */
-	if (fvp_do_plat_actions(afflvl, state) == -EAGAIN)
-		return;
-
-	/*
-	 * If execution reaches this stage then this affinity level will be
-	 * suspended. Perform at least the cpu specific actions followed the
-	 * cluster specific operations if applicable.
-	 */
-	fvp_cpu_pwrdwn_common();
-
-	if (afflvl != MPIDR_AFFLVL0)
-		fvp_cluster_pwrdwn_common();
-
-}
-
-/*******************************************************************************
- * FVP handler called when an affinity instance is about to be suspended. The
- * level and mpidr determine the affinity instance. The 'state' arg. allows the
- * platform to decide whether the cluster is being turned off and take apt
- * actions.
- *
- * CAUTION: There is no guarantee that caches will remain turned on across calls
- * to this function as each affinity level is dealt with. So do not write & read
- * global variables across calls. It will be wise to do flush a write to the
- * global to prevent unpredictable results.
- ******************************************************************************/
-void fvp_affinst_suspend(unsigned long sec_entrypoint,
-			unsigned int afflvl,
-			unsigned int state)
-{
-	unsigned long mpidr;
-
-	/* Determine if any platform actions need to be executed. */
-	if (fvp_do_plat_actions(afflvl, state) == -EAGAIN)
-		return;
-
-	/* Get the mpidr for this cpu */
-	mpidr = read_mpidr_el1();
-
-	/* Program the jump address for the this cpu */
-	fvp_program_mailbox(mpidr, sec_entrypoint);
-
-	/* Program the power controller to enable wakeup interrupts. */
-	fvp_pwrc_set_wen(mpidr);
-
-	/* Perform the common cpu specific operations */
-	fvp_cpu_pwrdwn_common();
-
-	/* Perform the common cluster specific operations */
-	if (afflvl != MPIDR_AFFLVL0)
-		fvp_cluster_pwrdwn_common();
-}
-
-/*******************************************************************************
- * FVP handler called when an affinity instance has just been powered on after
- * being turned off earlier. The level and mpidr determine the affinity
- * instance. The 'state' arg. allows the platform to decide whether the cluster
- * was turned off prior to wakeup and do what's necessary to setup it up
- * correctly.
- ******************************************************************************/
-void fvp_affinst_on_finish(unsigned int afflvl,
-			  unsigned int state)
-{
-	unsigned long mpidr;
-
-	/* Determine if any platform actions need to be executed. */
-	if (fvp_do_plat_actions(afflvl, state) == -EAGAIN)
-		return;
-
-	/* Get the mpidr for this cpu */
-	mpidr = read_mpidr_el1();
-
-	/* Perform the common cluster specific operations */
-	if (afflvl != MPIDR_AFFLVL0) {
-		/*
-		 * This CPU might have woken up whilst the cluster was
-		 * attempting to power down. In this case the FVP power
-		 * controller will have a pending cluster power off request
-		 * which needs to be cleared by writing to the PPONR register.
-		 * This prevents the power controller from interpreting a
-		 * subsequent entry of this cpu into a simple wfi as a power
-		 * down request.
-		 */
-		fvp_pwrc_write_pponr(mpidr);
-
-		/* Enable coherency if this cluster was off */
-		fvp_cci_enable();
-	}
-
-	/*
-	 * Clear PWKUPR.WEN bit to ensure interrupts do not interfere
-	 * with a cpu power down unless the bit is set again
-	 */
-	fvp_pwrc_clr_wen(mpidr);
-
-	/* Zero the jump address in the mailbox for this cpu */
-	fvp_program_mailbox(mpidr, 0);
-
-	/* Enable the gic cpu interface */
-	arm_gic_cpuif_setup();
-
-	/* TODO: This setup is needed only after a cold boot */
-	arm_gic_pcpu_distif_setup();
-}
-
-/*******************************************************************************
- * FVP handler called when an affinity instance has just been powered on after
- * having been suspended earlier. The level and mpidr determine the affinity
- * instance.
- * TODO: At the moment we reuse the on finisher and reinitialize the secure
- * context. Need to implement a separate suspend finisher.
- ******************************************************************************/
-void fvp_affinst_suspend_finish(unsigned int afflvl,
-			       unsigned int state)
-{
-	fvp_affinst_on_finish(afflvl, state);
-}
-
-/*******************************************************************************
- * FVP handlers to shutdown/reboot the system
- ******************************************************************************/
-static void __dead2 fvp_system_off(void)
-{
-	/* Write the System Configuration Control Register */
-	mmio_write_32(VE_SYSREGS_BASE + V2M_SYS_CFGCTRL,
-		CFGCTRL_START | CFGCTRL_RW | CFGCTRL_FUNC(FUNC_SHUTDOWN));
-	wfi();
-	ERROR("FVP System Off: operation not handled.\n");
-	panic();
-}
-
-static void __dead2 fvp_system_reset(void)
-{
-	/* Write the System Configuration Control Register */
-	mmio_write_32(VE_SYSREGS_BASE + V2M_SYS_CFGCTRL,
-		CFGCTRL_START | CFGCTRL_RW | CFGCTRL_FUNC(FUNC_REBOOT));
-	wfi();
-	ERROR("FVP System Reset: operation not handled.\n");
-	panic();
-}
-
-/*******************************************************************************
- * FVP handler called to check the validity of the power state parameter.
- ******************************************************************************/
-int fvp_validate_power_state(unsigned int power_state)
-{
-	/* Sanity check the requested state */
-	if (psci_get_pstate_type(power_state) == PSTATE_TYPE_STANDBY) {
-		/*
-		 * It's possible to enter standby only on affinity level 0
-		 * i.e. a cpu on the fvp. Ignore any other affinity level.
-		 */
-		if (psci_get_pstate_afflvl(power_state) != MPIDR_AFFLVL0)
-			return PSCI_E_INVALID_PARAMS;
-	}
-
-	/*
-	 * We expect the 'state id' to be zero.
-	 */
-	if (psci_get_pstate_id(power_state))
-		return PSCI_E_INVALID_PARAMS;
-
-	return PSCI_E_SUCCESS;
-}
-
-/*******************************************************************************
- * Export the platform handlers to enable psci to invoke them
- ******************************************************************************/
-static const plat_pm_ops_t fvp_plat_pm_ops = {
-	.affinst_standby = fvp_affinst_standby,
-	.affinst_on = fvp_affinst_on,
-	.affinst_off = fvp_affinst_off,
-	.affinst_suspend = fvp_affinst_suspend,
-	.affinst_on_finish = fvp_affinst_on_finish,
-	.affinst_suspend_finish = fvp_affinst_suspend_finish,
-	.system_off = fvp_system_off,
-	.system_reset = fvp_system_reset,
-	.validate_power_state = fvp_validate_power_state
-};
-
-/*******************************************************************************
- * Export the platform specific power ops & initialize the fvp power controller
- ******************************************************************************/
-int platform_setup_pm(const plat_pm_ops_t **plat_ops)
-{
-	*plat_ops = &fvp_plat_pm_ops;
-	return 0;
-}
diff --git a/plat/fvp/fvp_private.h b/plat/fvp/fvp_private.h
deleted file mode 100644
index 3949754..0000000
--- a/plat/fvp/fvp_private.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __FVP_PRIVATE_H__
-#define __FVP_PRIVATE_H__
-
-#include <bakery_lock.h>
-#include <bl_common.h>
-#include <cpu_data.h>
-#include <platform_def.h>
-
-
-typedef volatile struct mailbox {
-	unsigned long value
-	__attribute__((__aligned__(CACHE_WRITEBACK_GRANULE)));
-} mailbox_t;
-
-/*******************************************************************************
- * This structure represents the superset of information that is passed to
- * BL31 e.g. while passing control to it from BL2 which is bl31_params
- * and bl31_plat_params and its elements
- ******************************************************************************/
-typedef struct bl2_to_bl31_params_mem {
-	bl31_params_t bl31_params;
-	image_info_t bl31_image_info;
-	image_info_t bl32_image_info;
-	image_info_t bl33_image_info;
-	entry_point_info_t bl33_ep_info;
-	entry_point_info_t bl32_ep_info;
-	entry_point_info_t bl31_ep_info;
-} bl2_to_bl31_params_mem_t;
-
-#if USE_COHERENT_MEM
-/*
- * These are wrapper macros to the Coherent Memory Bakery Lock API.
- */
-#define fvp_lock_init(_lock_arg)	bakery_lock_init(_lock_arg)
-#define fvp_lock_get(_lock_arg)		bakery_lock_get(_lock_arg)
-#define fvp_lock_release(_lock_arg)	bakery_lock_release(_lock_arg)
-
-#else
-
-/*******************************************************************************
- * Constants to specify how many bakery locks this platform implements. These
- * are used if the platform chooses not to use coherent memory for bakery lock
- * data structures.
- ******************************************************************************/
-#define FVP_MAX_BAKERIES	1
-#define FVP_PWRC_BAKERY_ID	0
-
-/*******************************************************************************
- * Definition of structure which holds platform specific per-cpu data. Currently
- * it holds only the bakery lock information for each cpu. Constants to
- * specify how many bakeries this platform implements and bakery ids are
- * specified in fvp_def.h
- ******************************************************************************/
-typedef struct fvp_cpu_data {
-	bakery_info_t pcpu_bakery_info[FVP_MAX_BAKERIES];
-} fvp_cpu_data_t;
-
-/* Macro to define the offset of bakery_info_t in fvp_cpu_data_t */
-#define FVP_CPU_DATA_LOCK_OFFSET	__builtin_offsetof\
-					    (fvp_cpu_data_t, pcpu_bakery_info)
-
-
-/*******************************************************************************
- * Helper macros for bakery lock api when using the above fvp_cpu_data_t for
- * bakery lock data structures. It assumes that the bakery_info is at the
- * beginning of the platform specific per-cpu data.
- ******************************************************************************/
-#define fvp_lock_init(_lock_arg)	/* No init required */
-#define fvp_lock_get(_lock_arg)		bakery_lock_get(_lock_arg,  	    \
-						CPU_DATA_PLAT_PCPU_OFFSET + \
-						FVP_CPU_DATA_LOCK_OFFSET)
-#define fvp_lock_release(_lock_arg)	bakery_lock_release(_lock_arg,	    \
-						CPU_DATA_PLAT_PCPU_OFFSET + \
-						FVP_CPU_DATA_LOCK_OFFSET)
-
-/*
- * Ensure that the size of the FVP specific per-cpu data structure and the size
- * of the memory allocated in generic per-cpu data for the platform are the same.
- */
-CASSERT(PLAT_PCPU_DATA_SIZE == sizeof(fvp_cpu_data_t),	\
-	fvp_pcpu_data_size_mismatch);
-
-#endif /* __USE_COHERENT_MEM__ */
-
-/*******************************************************************************
- * Function and variable prototypes
- ******************************************************************************/
-void fvp_configure_mmu_el1(unsigned long total_base,
-			   unsigned long total_size,
-			   unsigned long,
-			   unsigned long
-#if USE_COHERENT_MEM
-			   , unsigned long,
-			   unsigned long
-#endif
-			   );
-void fvp_configure_mmu_el3(unsigned long total_base,
-			   unsigned long total_size,
-			   unsigned long,
-			   unsigned long
-#if USE_COHERENT_MEM
-			   , unsigned long,
-			   unsigned long
-#endif
-			   );
-
-int fvp_config_setup(void);
-
-void fvp_cci_init(void);
-void fvp_cci_enable(void);
-
-void fvp_gic_init(void);
-
-/* Declarations for fvp_topology.c */
-int fvp_setup_topology(void);
-
-/* Declarations for fvp_io_storage.c */
-void fvp_io_setup(void);
-
-/* Declarations for fvp_security.c */
-void fvp_security_setup(void);
-
-/* Gets the SPR for BL32 entry */
-uint32_t fvp_get_spsr_for_bl32_entry(void);
-
-/* Gets the SPSR for BL33 entry */
-uint32_t fvp_get_spsr_for_bl33_entry(void);
-
-
-#endif /* __FVP_PRIVATE_H__ */
diff --git a/plat/fvp/fvp_security.c b/plat/fvp/fvp_security.c
deleted file mode 100644
index 62bde08..0000000
--- a/plat/fvp/fvp_security.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <assert.h>
-#include <debug.h>
-#include <plat_config.h>
-#include <tzc400.h>
-#include "fvp_def.h"
-#include "fvp_private.h"
-
-/* Used to improve readability for configuring regions. */
-#define FILTER_SHIFT(filter)	(1 << filter)
-
-/*
- * For the moment we assume that all security programming is done by the
- * primary core.
- * TODO:
- * Might want to enable interrupt on violations when supported?
- */
-void fvp_security_setup(void)
-{
-	/*
-	 * The Base FVP has a TrustZone address space controller, the Foundation
-	 * FVP does not. Trying to program the device on the foundation FVP will
-	 * cause an abort.
-	 *
-	 * If the platform had additional peripheral specific security
-	 * configurations, those would be configured here.
-	 */
-
-	if (!(get_plat_config()->flags & CONFIG_HAS_TZC))
-		return;
-
-	/*
-	 * The TrustZone controller controls access to main DRAM. Give
-	 * full NS access for the moment to use with OS.
-	 */
-	INFO("Configuring TrustZone Controller\n");
-
-	/*
-	 * The driver does some error checking and will assert.
-	 * - Provide base address of device on platform.
-	 * - Provide width of ACE-Lite IDs on platform.
-	 */
-	tzc_init(TZC400_BASE);
-
-	/*
-	 * Currently only filters 0 and 2 are connected on Base FVP.
-	 * Filter 0 : CPU clusters (no access to DRAM by default)
-	 * Filter 1 : not connected
-	 * Filter 2 : LCDs (access to VRAM allowed by default)
-	 * Filter 3 : not connected
-	 * Programming unconnected filters will have no effect at the
-	 * moment. These filter could, however, be connected in future.
-	 * So care should be taken not to configure the unused filters.
-	 */
-
-	/* Disable all filters before programming. */
-	tzc_disable_filters();
-
-	/*
-	 * Allow only non-secure access to all DRAM to supported devices.
-	 * Give access to the CPUs and Virtio. Some devices
-	 * would normally use the default ID so allow that too. We use
-	 * two regions to cover the blocks of physical memory in the FVPs
-	 * plus one region to reserve some memory as secure.
-	 *
-	 * Software executing in the secure state, such as a secure
-	 * boot-loader, can access the DRAM by using the NS attributes in
-	 * the MMU translation tables and descriptors.
-	 */
-
-	/* Region 1 set to cover the Non-Secure DRAM */
-	tzc_configure_region(FILTER_SHIFT(0), 1,
-			DRAM1_NS_BASE, DRAM1_NS_END,
-			TZC_REGION_S_NONE,
-			TZC_REGION_ACCESS_RDWR(FVP_NSAID_DEFAULT) |
-			TZC_REGION_ACCESS_RDWR(FVP_NSAID_PCI) |
-			TZC_REGION_ACCESS_RDWR(FVP_NSAID_AP) |
-			TZC_REGION_ACCESS_RDWR(FVP_NSAID_VIRTIO) |
-			TZC_REGION_ACCESS_RDWR(FVP_NSAID_VIRTIO_OLD));
-
-	/* Region 2 set to cover the Secure DRAM */
-	tzc_configure_region(FILTER_SHIFT(0), 2,
-			DRAM1_SEC_BASE, DRAM1_SEC_END,
-			TZC_REGION_S_RDWR,
-			0x0);
-
-	/* Region 3 set to cover the second block of DRAM */
-	tzc_configure_region(FILTER_SHIFT(0), 3,
-			DRAM2_BASE, DRAM2_END, TZC_REGION_S_NONE,
-			TZC_REGION_ACCESS_RDWR(FVP_NSAID_DEFAULT) |
-			TZC_REGION_ACCESS_RDWR(FVP_NSAID_PCI) |
-			TZC_REGION_ACCESS_RDWR(FVP_NSAID_AP) |
-			TZC_REGION_ACCESS_RDWR(FVP_NSAID_VIRTIO) |
-			TZC_REGION_ACCESS_RDWR(FVP_NSAID_VIRTIO_OLD));
-
-	/*
-	 * TODO: Interrupts are not currently supported. The only
-	 * options we have are for access errors to occur quietly or to
-	 * cause an exception. We choose to cause an exception.
-	 */
-	tzc_set_action(TZC_ACTION_ERR);
-
-	/* Enable filters. */
-	tzc_enable_filters();
-}
diff --git a/plat/fvp/fvp_topology.c b/plat/fvp/fvp_topology.c
deleted file mode 100644
index 49f7daf..0000000
--- a/plat/fvp/fvp_topology.c
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <assert.h>
-#include <platform_def.h>
-/* TODO: Reusing psci error codes & state information. Get our own! */
-#include <psci.h>
-#include "drivers/pwrc/fvp_pwrc.h"
-
-/* We treat '255' as an invalid affinity instance */
-#define AFFINST_INVAL	0xff
-
-/*******************************************************************************
- * We support 3 flavours of the FVP: Foundation, Base AEM & Base Cortex. Each
- * flavour has a different topology. The common bit is that there can be a max.
- * of 2 clusters (affinity 1) and 4 cpus (affinity 0) per cluster. So we define
- * a tree like data structure which caters to these maximum bounds. It simply
- * marks the absent affinity level instances as PSCI_AFF_ABSENT e.g. there is no
- * cluster 1 on the Foundation FVP. The 'data' field is currently unused.
- ******************************************************************************/
-typedef struct affinity_info {
-	unsigned char sibling;
-	unsigned char child;
-	unsigned char state;
-	unsigned int data;
-} affinity_info_t;
-
-/*******************************************************************************
- * The following two data structures store the topology tree for the fvp. There
- * is a separate array for each affinity level i.e. cpus and clusters. The child
- * and sibling references allow traversal inside and in between the two arrays.
- ******************************************************************************/
-static affinity_info_t fvp_aff1_topology_map[PLATFORM_CLUSTER_COUNT];
-static affinity_info_t fvp_aff0_topology_map[PLATFORM_CORE_COUNT];
-
-/* Simple global variable to safeguard us from stupidity */
-static unsigned int topology_setup_done;
-
-/*******************************************************************************
- * This function implements a part of the critical interface between the psci
- * generic layer and the platform to allow the former to detect the platform
- * topology. psci queries the platform to determine how many affinity instances
- * are present at a particular level for a given mpidr e.g. consider a dual
- * cluster platform where each cluster has 4 cpus. A call to this function with
- * (0, 0x100) will return the number of cpus implemented under cluster 1 i.e. 4.
- * Similarly a call with (1, 0x100) will return 2 i.e. the number of clusters.
- * This is 'cause we are effectively asking how many affinity level 1 instances
- * are implemented under affinity level 2 instance 0.
- ******************************************************************************/
-unsigned int plat_get_aff_count(unsigned int aff_lvl,
-				unsigned long mpidr)
-{
-	unsigned int aff_count = 1, ctr;
-	unsigned char parent_aff_id;
-
-	assert(topology_setup_done == 1);
-
-	switch (aff_lvl) {
-	case 3:
-	case 2:
-		/*
-		 * Assert if the parent affinity instance is not 0.
-		 * This also takes care of level 3 in an obfuscated way
-		 */
-		parent_aff_id = (mpidr >> MPIDR_AFF3_SHIFT) & MPIDR_AFFLVL_MASK;
-		assert(parent_aff_id == 0);
-
-		/*
-		 * Report that we implement a single instance of
-		 * affinity levels 2 & 3 which are AFF_ABSENT
-		 */
-		break;
-	case 1:
-		/* Assert if the parent affinity instance is not 0. */
-		parent_aff_id = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK;
-		assert(parent_aff_id == 0);
-
-		/* Fetch the starting index in the aff1 array */
-		for (ctr = 0;
-		     fvp_aff1_topology_map[ctr].sibling != AFFINST_INVAL;
-		     ctr = fvp_aff1_topology_map[ctr].sibling) {
-			aff_count++;
-		}
-
-		break;
-	case 0:
-		/* Assert if the cluster id is anything apart from 0 or 1 */
-		parent_aff_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
-		assert(parent_aff_id < PLATFORM_CLUSTER_COUNT);
-
-		/* Fetch the starting index in the aff0 array */
-		for (ctr = fvp_aff1_topology_map[parent_aff_id].child;
-		     fvp_aff0_topology_map[ctr].sibling != AFFINST_INVAL;
-		     ctr = fvp_aff0_topology_map[ctr].sibling) {
-			aff_count++;
-		}
-
-		break;
-	default:
-		assert(0);
-	}
-
-	return aff_count;
-}
-
-/*******************************************************************************
- * This function implements a part of the critical interface between the psci
- * generic layer and the platform to allow the former to detect the state of a
- * affinity instance in the platform topology. psci queries the platform to
- * determine whether an affinity instance is present or absent. This caters for
- * topologies where an intermediate affinity level instance is missing e.g.
- * consider a platform which implements a single cluster with 4 cpus and there
- * is another cpu sitting directly on the interconnect along with the cluster.
- * The mpidrs of the cluster would range from 0x0-0x3. The mpidr of the single
- * cpu would be 0x100 to highlight that it does not belong to cluster 0. Cluster
- * 1 is however missing but needs to be accounted to reach this single cpu in
- * the topology tree. Hence it will be marked as PSCI_AFF_ABSENT. This is not
- * applicable to the FVP but depicted as an example.
- ******************************************************************************/
-unsigned int plat_get_aff_state(unsigned int aff_lvl,
-				unsigned long mpidr)
-{
-	unsigned int aff_state = PSCI_AFF_ABSENT, idx;
-	idx = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
-
-	assert(topology_setup_done == 1);
-
-	switch (aff_lvl) {
-	case 3:
-	case 2:
-		/* Report affinity levels 2 & 3 as absent */
-		break;
-	case 1:
-		aff_state = fvp_aff1_topology_map[idx].state;
-		break;
-	case 0:
-		/*
-		 * First get start index of the aff0 in its array & then add
-		 * to it the affinity id that we want the state of
-		 */
-		idx = fvp_aff1_topology_map[idx].child;
-		idx += (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
-		aff_state = fvp_aff0_topology_map[idx].state;
-		break;
-	default:
-		assert(0);
-	}
-
-	return aff_state;
-}
-
-/*******************************************************************************
- * Handy optimization to prevent the psci implementation from traversing through
- * affinity levels which are not present while detecting the platform topology.
- ******************************************************************************/
-int plat_get_max_afflvl(void)
-{
-	return MPIDR_AFFLVL1;
-}
-
-/*******************************************************************************
- * This function populates the FVP specific topology information depending upon
- * the FVP flavour its running on. We construct all the mpidrs we can handle
- * and rely on the PWRC.PSYSR to flag absent cpus when their status is queried.
- ******************************************************************************/
-int fvp_setup_topology(void)
-{
-	unsigned char aff0, aff1, aff_state, aff0_offset = 0;
-	unsigned long mpidr;
-
-	topology_setup_done = 0;
-
-	for (aff1 = 0; aff1 < PLATFORM_CLUSTER_COUNT; aff1++) {
-
-		fvp_aff1_topology_map[aff1].child = aff0_offset;
-		fvp_aff1_topology_map[aff1].sibling = aff1 + 1;
-
-		for (aff0 = 0; aff0 < PLATFORM_MAX_CPUS_PER_CLUSTER; aff0++) {
-
-			mpidr = aff1 << MPIDR_AFF1_SHIFT;
-			mpidr |= aff0 << MPIDR_AFF0_SHIFT;
-
-			if (fvp_pwrc_read_psysr(mpidr) != PSYSR_INVALID) {
-				/*
-				 * Presence of even a single aff0 indicates
-				 * presence of parent aff1 on the FVP.
-				 */
-				aff_state = PSCI_AFF_PRESENT;
-				fvp_aff1_topology_map[aff1].state =
-					PSCI_AFF_PRESENT;
-			} else {
-				aff_state = PSCI_AFF_ABSENT;
-			}
-
-			fvp_aff0_topology_map[aff0_offset].child = AFFINST_INVAL;
-			fvp_aff0_topology_map[aff0_offset].state = aff_state;
-			fvp_aff0_topology_map[aff0_offset].sibling =
-				aff0_offset + 1;
-
-			/* Increment the absolute number of aff0s traversed */
-			aff0_offset++;
-		}
-
-		/* Tie-off the last aff0 sibling to -1 to avoid overflow */
-		fvp_aff0_topology_map[aff0_offset - 1].sibling = AFFINST_INVAL;
-	}
-
-	/* Tie-off the last aff1 sibling to AFFINST_INVAL to avoid overflow */
-	fvp_aff1_topology_map[aff1 - 1].sibling = AFFINST_INVAL;
-
-	topology_setup_done = 1;
-	return 0;
-}
diff --git a/plat/fvp/fvp_trusted_boot.c b/plat/fvp/fvp_trusted_boot.c
deleted file mode 100644
index e7dcc01..0000000
--- a/plat/fvp/fvp_trusted_boot.c
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <assert.h>
-#include <debug.h>
-#include "fvp_def.h"
-#include "fvp_private.h"
-
-/*
- * Check the validity of the key
- *
- * 0 = success, Otherwise = error
- */
-int plat_match_rotpk(const unsigned char *key_buf, unsigned int key_len)
-{
-	/* TODO: check against the ROT key stored in the platform */
-	return 0;
-}
diff --git a/plat/fvp/include/plat_macros.S b/plat/fvp/include/plat_macros.S
deleted file mode 100644
index f050261..0000000
--- a/plat/fvp/include/plat_macros.S
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#include <cci400.h>
-#include <gic_v2.h>
-#include <plat_config.h>
-#include "../fvp_def.h"
-
-.section .rodata.gic_reg_name, "aS"
-gicc_regs:
-	.asciz "gicc_hppir", "gicc_ahppir", "gicc_ctlr", ""
-gicd_pend_reg:
-	.asciz "gicd_ispendr regs (Offsets 0x200 - 0x278)\n Offset:\t\t\tvalue\n"
-newline:
-	.asciz "\n"
-spacer:
-	.asciz ":\t\t0x"
-
-	/* ---------------------------------------------
-	 * The below macro prints out relevant GIC
-	 * registers whenever an unhandled exception is
-	 * taken in BL3-1.
-	 * Clobbers: x0 - x10, x16, x17, sp
-	 * ---------------------------------------------
-	 */
-	.macro plat_print_gic_regs
-	mov_imm	x0, (VE_SYSREGS_BASE + V2M_SYS_ID)
-	ldr	w16, [x0]
-	/* Extract BLD (12th - 15th bits) from the SYS_ID */
-	ubfx	x16, x16, #SYS_ID_BLD_SHIFT, #4
-	/* Check if VE mmap */
-	cmp	w16, #BLD_GIC_VE_MMAP
-	b.eq	use_ve_mmap
-	/* Check if Cortex-A53/A57 mmap */
-	cmp	w16, #BLD_GIC_A53A57_MMAP
-	b.ne	exit_print_gic_regs
-	mov_imm	x17, BASE_GICC_BASE
-	mov_imm	x16, BASE_GICD_BASE
-	b	print_gicc_regs
-use_ve_mmap:
-	mov_imm	x17, VE_GICC_BASE
-	mov_imm	x16, VE_GICD_BASE
-print_gicc_regs:
-	/* gicc base address is now in x17 */
-	adr	x6, gicc_regs	/* Load the gicc reg list to x6 */
-	/* Load the gicc regs to gp regs used by str_in_crash_buf_print */
-	ldr	w8, [x17, #GICC_HPPIR]
-	ldr	w9, [x17, #GICC_AHPPIR]
-	ldr	w10, [x17, #GICC_CTLR]
-	/* Store to the crash buf and print to console */
-	bl	str_in_crash_buf_print
-
-	/* Print the GICD_ISPENDR regs */
-	add	x7, x16, #GICD_ISPENDR
-	adr	x4, gicd_pend_reg
-	bl	asm_print_str
-gicd_ispendr_loop:
-	sub	x4, x7, x16
-	cmp	x4, #0x280
-	b.eq	exit_print_gic_regs
-	bl	asm_print_hex
-	adr	x4, spacer
-	bl	asm_print_str
-	ldr	x4, [x7], #8
-	bl	asm_print_hex
-	adr	x4, newline
-	bl	asm_print_str
-	b	gicd_ispendr_loop
-exit_print_gic_regs:
-	.endm
-
-.section .rodata.cci_reg_name, "aS"
-cci_iface_regs:
-	.asciz "cci_snoop_ctrl_cluster0", "cci_snoop_ctrl_cluster1" , ""
-
-	/* ------------------------------------------------
-	 * The below macro prints out relevant interconnect
-	 * registers whenever an unhandled exception is
-	 * taken in BL3-1.
-	 * Clobbers: x0 - x9, sp
-	 * ------------------------------------------------
-	 */
-	.macro plat_print_interconnect_regs
-	adr	x6, cci_iface_regs
-	/* Store in x7 the base address of the first interface */
-	mov_imm	x7, (CCI400_BASE + SLAVE_IFACE3_OFFSET)
-	ldr	w8, [x7, #SNOOP_CTRL_REG]
-	/* Store in x7 the base address of the second interface */
-	mov_imm	x7, (CCI400_BASE + SLAVE_IFACE4_OFFSET)
-	ldr	w9, [x7, #SNOOP_CTRL_REG]
-	/* Store to the crash buf and print to console */
-	bl	str_in_crash_buf_print
-	.endm
diff --git a/plat/fvp/include/platform_def.h b/plat/fvp/include/platform_def.h
deleted file mode 100644
index 182c150..0000000
--- a/plat/fvp/include/platform_def.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __PLATFORM_DEF_H__
-#define __PLATFORM_DEF_H__
-
-#include <arch.h>
-#include "../fvp_def.h"
-
-
-/*******************************************************************************
- * Platform binary types for linking
- ******************************************************************************/
-#define PLATFORM_LINKER_FORMAT          "elf64-littleaarch64"
-#define PLATFORM_LINKER_ARCH            aarch64
-
-/*******************************************************************************
- * Generic platform constants
- ******************************************************************************/
-
-/* Size of cacheable stacks */
-#if DEBUG_XLAT_TABLE
-#define PLATFORM_STACK_SIZE 0x800
-#elif IMAGE_BL1
-#if TRUSTED_BOARD_BOOT
-#define PLATFORM_STACK_SIZE 0x1000
-#else
-#define PLATFORM_STACK_SIZE 0x440
-#endif
-#elif IMAGE_BL2
-#if TRUSTED_BOARD_BOOT
-#define PLATFORM_STACK_SIZE 0x1000
-#else
-#define PLATFORM_STACK_SIZE 0x400
-#endif
-#elif IMAGE_BL31
-#define PLATFORM_STACK_SIZE 0x400
-#elif IMAGE_BL32
-#define PLATFORM_STACK_SIZE 0x440
-#endif
-
-#define FIRMWARE_WELCOME_STR		"Booting Trusted Firmware\n"
-
-/* Trusted Boot Firmware BL2 */
-#define BL2_IMAGE_NAME			"bl2.bin"
-
-/* EL3 Runtime Firmware BL31 */
-#define BL31_IMAGE_NAME			"bl31.bin"
-
-/* Secure Payload BL32 (Trusted OS) */
-#define BL32_IMAGE_NAME			"bl32.bin"
-
-/* Non-Trusted Firmware BL33 */
-#define BL33_IMAGE_NAME			"bl33.bin" /* e.g. UEFI */
-
-#if TRUSTED_BOARD_BOOT
-/* Certificates */
-# define BL2_CERT_NAME			"bl2.crt"
-# define TRUSTED_KEY_CERT_NAME		"trusted_key.crt"
-
-# define BL30_KEY_CERT_NAME		"bl30_key.crt"
-# define BL31_KEY_CERT_NAME		"bl31_key.crt"
-# define BL32_KEY_CERT_NAME		"bl32_key.crt"
-# define BL33_KEY_CERT_NAME		"bl33_key.crt"
-
-# define BL30_CERT_NAME			"bl30.crt"
-# define BL31_CERT_NAME			"bl31.crt"
-# define BL32_CERT_NAME			"bl32.crt"
-# define BL33_CERT_NAME			"bl33.crt"
-#endif /* TRUSTED_BOARD_BOOT */
-
-#define PLATFORM_CACHE_LINE_SIZE	64
-#define PLATFORM_CLUSTER_COUNT		2ull
-#define PLATFORM_CLUSTER0_CORE_COUNT	4
-#define PLATFORM_CLUSTER1_CORE_COUNT	4
-#define PLATFORM_CORE_COUNT		(PLATFORM_CLUSTER1_CORE_COUNT + \
-						PLATFORM_CLUSTER0_CORE_COUNT)
-#define PLATFORM_MAX_CPUS_PER_CLUSTER	4
-#define PLATFORM_NUM_AFFS		(PLATFORM_CLUSTER_COUNT + \
-					 PLATFORM_CORE_COUNT)
-#define MAX_IO_DEVICES			3
-#define MAX_IO_HANDLES			4
-
-/*******************************************************************************
- * BL1 specific defines.
- * BL1 RW data is relocated from ROM to RAM at runtime so we need 2 sets of
- * addresses.
- ******************************************************************************/
-#define BL1_RO_BASE			FVP_TRUSTED_ROM_BASE
-#define BL1_RO_LIMIT			(FVP_TRUSTED_ROM_BASE \
-					+ FVP_TRUSTED_ROM_SIZE)
-/*
- * Put BL1 RW at the top of the Trusted SRAM. BL1_RW_BASE is calculated using
- * the current BL1 RW debug size plus a little space for growth.
- */
-#if TRUSTED_BOARD_BOOT
-#define BL1_RW_BASE			(FVP_TRUSTED_SRAM_BASE \
-					+ FVP_TRUSTED_SRAM_SIZE - 0x8000)
-#else
-#define BL1_RW_BASE			(FVP_TRUSTED_SRAM_BASE \
-					+ FVP_TRUSTED_SRAM_SIZE - 0x6000)
-#endif
-#define BL1_RW_LIMIT			(FVP_TRUSTED_SRAM_BASE \
-					+ FVP_TRUSTED_SRAM_SIZE)
-
-/*******************************************************************************
- * BL2 specific defines.
- ******************************************************************************/
-/*
- * Put BL2 just below BL3-1. BL2_BASE is calculated using the current BL2 debug
- * size plus a little space for growth.
- */
-#if TRUSTED_BOARD_BOOT
-#define BL2_BASE			(BL31_BASE - 0x1C000)
-#else
-#define BL2_BASE			(BL31_BASE - 0xC000)
-#endif
-#define BL2_LIMIT			BL31_BASE
-
-/*******************************************************************************
- * BL31 specific defines.
- ******************************************************************************/
-/*
- * Put BL3-1 at the top of the Trusted SRAM. BL31_BASE is calculated using the
- * current BL3-1 debug size plus a little space for growth.
- */
-#define BL31_BASE			(FVP_TRUSTED_SRAM_BASE \
-					+ FVP_TRUSTED_SRAM_SIZE - 0x1D000)
-#define BL31_PROGBITS_LIMIT		BL1_RW_BASE
-#define BL31_LIMIT			(FVP_TRUSTED_SRAM_BASE \
-					+ FVP_TRUSTED_SRAM_SIZE)
-
-/*******************************************************************************
- * BL32 specific defines.
- ******************************************************************************/
-/*
- * The TSP can execute either from Trusted SRAM or Trusted DRAM.
- */
-#define BL32_SRAM_BASE			FVP_TRUSTED_SRAM_BASE
-#define BL32_SRAM_LIMIT			BL31_BASE
-#define BL32_DRAM_BASE			FVP_TRUSTED_DRAM_BASE
-#define BL32_DRAM_LIMIT			(FVP_TRUSTED_DRAM_BASE + (1 << 21))
-
-#if FVP_TSP_RAM_LOCATION_ID == FVP_IN_TRUSTED_SRAM
-# define TSP_SEC_MEM_BASE		FVP_TRUSTED_SRAM_BASE
-# define TSP_SEC_MEM_SIZE		FVP_TRUSTED_SRAM_SIZE
-# define TSP_PROGBITS_LIMIT		BL2_BASE
-# define BL32_BASE			BL32_SRAM_BASE
-# define BL32_LIMIT			BL32_SRAM_LIMIT
-#elif FVP_TSP_RAM_LOCATION_ID == FVP_IN_TRUSTED_DRAM
-# define TSP_SEC_MEM_BASE		FVP_TRUSTED_DRAM_BASE
-# define TSP_SEC_MEM_SIZE		FVP_TRUSTED_DRAM_SIZE
-# define BL32_BASE			BL32_DRAM_BASE
-# define BL32_LIMIT			BL32_DRAM_LIMIT
-#else
-# error "Unsupported FVP_TSP_RAM_LOCATION_ID value"
-#endif
-
-/*
- * ID of the secure physical generic timer interrupt used by the TSP.
- */
-#define TSP_IRQ_SEC_PHY_TIMER		IRQ_SEC_PHY_TIMER
-
-/*******************************************************************************
- * Platform specific page table and MMU setup constants
- ******************************************************************************/
-#define ADDR_SPACE_SIZE			(1ull << 32)
-
-#if IMAGE_BL1
-# define MAX_XLAT_TABLES		2
-#elif IMAGE_BL2
-# define MAX_XLAT_TABLES		3
-#elif IMAGE_BL31
-# define MAX_XLAT_TABLES		2
-#elif IMAGE_BL32
-# if FVP_TSP_RAM_LOCATION_ID == FVP_DRAM_ID
-#  define MAX_XLAT_TABLES		3
-# else
-#  define MAX_XLAT_TABLES		2
-# endif
-#endif
-
-#define MAX_MMAP_REGIONS		16
-
-/*******************************************************************************
- * Declarations and constants to access the mailboxes safely. Each mailbox is
- * aligned on the biggest cache line size in the platform. This is known only
- * to the platform as it might have a combination of integrated and external
- * caches. Such alignment ensures that two maiboxes do not sit on the same cache
- * line at any cache level. They could belong to different cpus/clusters &
- * get written while being protected by different locks causing corruption of
- * a valid mailbox address.
- ******************************************************************************/
-#define CACHE_WRITEBACK_SHIFT   6
-#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
-
-#if !USE_COHERENT_MEM
-/*******************************************************************************
- * Size of the per-cpu data in bytes that should be reserved in the generic
- * per-cpu data structure for the FVP port.
- ******************************************************************************/
-#define PLAT_PCPU_DATA_SIZE	2
-#endif
-
-#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/fvp/include/platform_oid.h b/plat/fvp/include/platform_oid.h
deleted file mode 100644
index 38aca12..0000000
--- a/plat/fvp/include/platform_oid.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef PLATFORM_OID_H_
-#define PLATFORM_OID_H_
-
-/*
- * This is the list of the different extensions containing relevant information
- * to establish the chain of trust.
- *
- * The OIDs shown here are just an example. Real OIDs should be obtained from
- * the ITU-T.
- */
-
-/* Non-volatile counter extensions */
-#define TZ_FW_NVCOUNTER_OID		"1.2.3.1"
-#define NTZ_FW_NVCOUNTER_OID		"1.2.3.2"
-
-/* BL2 extensions */
-#define BL2_HASH_OID			"1.2.3.3"
-
-/* Trusted Key extensions */
-#define TZ_WORLD_PK_OID			"1.2.3.4"
-#define NTZ_WORLD_PK_OID		"1.2.3.5"
-
-/* BL3-1 extensions */
-#define BL31_CONTENT_CERT_PK_OID	"1.2.3.6"
-#define BL31_HASH_OID			"1.2.3.7"
-
-/* BL3-0 extensions */
-#define BL30_CONTENT_CERT_PK_OID	"1.2.3.8"
-#define BL30_HASH_OID			"1.2.3.9"
-
-/* BL3-2 extensions */
-#define BL32_CONTENT_CERT_PK_OID	"1.2.3.10"
-#define BL32_HASH_OID			"1.2.3.11"
-
-/* BL3-3 extensions */
-#define BL33_CONTENT_CERT_PK_OID	"1.2.3.12"
-#define BL33_HASH_OID			"1.2.3.13"
-
-#endif /* PLATFORM_OID_H_ */
diff --git a/plat/fvp/platform.mk b/plat/fvp/platform.mk
deleted file mode 100644
index bcee328..0000000
--- a/plat/fvp/platform.mk
+++ /dev/null
@@ -1,96 +0,0 @@
-#
-# Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# Neither the name of ARM nor the names of its contributors may be used
-# to endorse or promote products derived from this software without specific
-# prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-
-# On FVP, the TSP can execute either from Trusted SRAM or Trusted DRAM.
-# Trusted SRAM is the default.
-FVP_TSP_RAM_LOCATION	:=	tsram
-ifeq (${FVP_TSP_RAM_LOCATION}, tsram)
-  FVP_TSP_RAM_LOCATION_ID := FVP_TRUSTED_SRAM_ID
-else ifeq (${FVP_TSP_RAM_LOCATION}, tdram)
-  FVP_TSP_RAM_LOCATION_ID := FVP_TRUSTED_DRAM_ID
-else ifeq (${FVP_TSP_RAM_LOCATION}, dram)
-  FVP_TSP_RAM_LOCATION_ID := FVP_DRAM_ID
-else
-  $(error "Unsupported FVP_TSP_RAM_LOCATION value")
-endif
-
-# Process flags
-$(eval $(call add_define,FVP_TSP_RAM_LOCATION_ID))
-
-PLAT_INCLUDES		:=	-Iplat/fvp/include/
-
-PLAT_BL_COMMON_SOURCES	:=	drivers/arm/pl011/pl011_console.S		\
-				drivers/io/io_fip.c				\
-				drivers/io/io_memmap.c				\
-				drivers/io/io_semihosting.c			\
-				drivers/io/io_storage.c				\
-				lib/aarch64/xlat_tables.c			\
-				lib/semihosting/semihosting.c			\
-				lib/semihosting/aarch64/semihosting_call.S	\
-				plat/common/aarch64/plat_common.c		\
-				plat/fvp/fvp_io_storage.c
-
-BL1_SOURCES		+=	drivers/arm/cci400/cci400.c			\
-				lib/cpus/aarch64/aem_generic.S			\
-				lib/cpus/aarch64/cortex_a53.S			\
-				lib/cpus/aarch64/cortex_a57.S			\
-				plat/common/aarch64/platform_up_stack.S		\
-				plat/fvp/bl1_fvp_setup.c			\
-				plat/fvp/aarch64/fvp_common.c			\
-				plat/fvp/aarch64/fvp_helpers.S
-
-BL2_SOURCES		+=	drivers/arm/tzc400/tzc400.c			\
-				plat/common/aarch64/platform_up_stack.S		\
-				plat/fvp/bl2_fvp_setup.c			\
-				plat/fvp/fvp_security.c				\
-				plat/fvp/aarch64/fvp_common.c
-
-BL31_SOURCES		+=	drivers/arm/cci400/cci400.c			\
-				drivers/arm/gic/arm_gic.c			\
-				drivers/arm/gic/gic_v2.c			\
-				drivers/arm/gic/gic_v3.c			\
-				drivers/arm/tzc400/tzc400.c			\
-				lib/cpus/aarch64/aem_generic.S			\
-				lib/cpus/aarch64/cortex_a53.S			\
-				lib/cpus/aarch64/cortex_a57.S			\
-				plat/common/plat_gic.c				\
-				plat/common/aarch64/platform_mp_stack.S		\
-				plat/fvp/bl31_fvp_setup.c			\
-				plat/fvp/fvp_pm.c				\
-				plat/fvp/fvp_security.c				\
-				plat/fvp/fvp_topology.c				\
-				plat/fvp/aarch64/fvp_helpers.S			\
-				plat/fvp/aarch64/fvp_common.c			\
-				plat/fvp/drivers/pwrc/fvp_pwrc.c
-
-ifneq (${TRUSTED_BOARD_BOOT},0)
-  BL1_SOURCES		+=	plat/fvp/fvp_trusted_boot.c
-  BL2_SOURCES		+=	plat/fvp/fvp_trusted_boot.c
-endif
diff --git a/plat/fvp/tsp/tsp-fvp.mk b/plat/fvp/tsp/tsp-fvp.mk
deleted file mode 100644
index d2e112a..0000000
--- a/plat/fvp/tsp/tsp-fvp.mk
+++ /dev/null
@@ -1,38 +0,0 @@
-#
-# Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# Neither the name of ARM nor the names of its contributors may be used
-# to endorse or promote products derived from this software without specific
-# prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-
-# TSP source files specific to FVP platform
-BL32_SOURCES		+=	drivers/arm/gic/arm_gic.c			\
-				drivers/arm/gic/gic_v2.c			\
-				plat/common/aarch64/platform_mp_stack.S		\
-				plat/common/plat_gic.c				\
-				plat/fvp/aarch64/fvp_common.c			\
-				plat/fvp/aarch64/fvp_helpers.S			\
-				plat/fvp/tsp/tsp_fvp_setup.c
diff --git a/plat/fvp/tsp/tsp_fvp_setup.c b/plat/fvp/tsp/tsp_fvp_setup.c
deleted file mode 100644
index d8f46bd..0000000
--- a/plat/fvp/tsp/tsp_fvp_setup.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <bl_common.h>
-#include <console.h>
-#include <platform_tsp.h>
-#include "../fvp_def.h"
-#include "../fvp_private.h"
-
-/*******************************************************************************
- * Declarations of linker defined symbols which will help us find the layout
- * of trusted SRAM
- ******************************************************************************/
-extern unsigned long __RO_START__;
-extern unsigned long __RO_END__;
-extern unsigned long __BL32_END__;
-
-#if USE_COHERENT_MEM
-extern unsigned long __COHERENT_RAM_START__;
-extern unsigned long __COHERENT_RAM_END__;
-#endif
-
-/*
- * The next 3 constants identify the extents of the code & RO data region and
- * the limit of the BL3-2 image. These addresses are used by the MMU setup code
- * and therefore they must be page-aligned.  It is the responsibility of the
- * linker script to ensure that __RO_START__, __RO_END__ & & __BL32_END__
- * linker symbols refer to page-aligned addresses.
- */
-#define BL32_RO_BASE (unsigned long)(&__RO_START__)
-#define BL32_RO_LIMIT (unsigned long)(&__RO_END__)
-#define BL32_END (unsigned long)(&__BL32_END__)
-
-#if USE_COHERENT_MEM
-/*
- * The next 2 constants identify the extents of the coherent memory region.
- * These addresses are used by the MMU setup code and therefore they must be
- * page-aligned.  It is the responsibility of the linker script to ensure that
- * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
- * page-aligned addresses.
- */
-#define BL32_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
-#define BL32_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
-#endif
-
-/*******************************************************************************
- * Initialize the UART
- ******************************************************************************/
-void tsp_early_platform_setup(void)
-{
-	/*
-	 * Initialize a different console than already in use to display
-	 * messages from TSP
-	 */
-	console_init(PL011_UART2_BASE, PL011_UART2_CLK_IN_HZ, PL011_BAUDRATE);
-
-	/* Initialize the platform config for future decision making */
-	fvp_config_setup();
-}
-
-/*******************************************************************************
- * Perform platform specific setup placeholder
- ******************************************************************************/
-void tsp_platform_setup(void)
-{
-	fvp_gic_init();
-}
-
-/*******************************************************************************
- * Perform the very early platform specific architectural setup here. At the
- * moment this is only intializes the MMU
- ******************************************************************************/
-void tsp_plat_arch_setup(void)
-{
-	fvp_configure_mmu_el1(BL32_RO_BASE,
-			      (BL32_END - BL32_RO_BASE),
-			      BL32_RO_BASE,
-			      BL32_RO_LIMIT
-#if USE_COHERENT_MEM
-			      , BL32_COHERENT_RAM_BASE,
-			      BL32_COHERENT_RAM_LIMIT
-#endif
-			      );
-}
diff --git a/plat/hikey/aarch64/bl1_plat_helpers.S b/plat/hikey/aarch64/bl1_plat_helpers.S
deleted file mode 100644
index d3e6ff6..0000000
--- a/plat/hikey/aarch64/bl1_plat_helpers.S
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-#include "../hikey_def.h"
-
-	.globl	platform_is_primary_cpu
-	.globl	platform_get_entrypoint
-	.globl	platform_cold_boot_init
-	.globl	plat_secondary_cold_boot_setup
-
-func platform_is_primary_cpu
-	and	x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
-	cmp	x0, #0
-	cset	x0, eq
-	ret
-
-	/*
-	 * Do we need to know whether it's a warm boot?
-	 */
-func platform_get_entrypoint
-	mov	x0, #0
-	ret
-
-func plat_secondary_cold_boot_setup
-cb_panic:
-	b	cb_panic
diff --git a/plat/hikey/aarch64/hikey_common.c b/plat/hikey/aarch64/hikey_common.c
deleted file mode 100644
index ba1313d..0000000
--- a/plat/hikey/aarch64/hikey_common.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch_helpers.h>
-#include <arm_gic.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <debug.h>
-#include <mmio.h>
-#include <platform.h>
-#include <platform_def.h>
-#include <xlat_tables.h>
-#include <../hikey_def.h>
-
-#define MAP_DEVICE	MAP_REGION_FLAT(DEVICE_BASE,			\
-					DEVICE_SIZE,			\
-					MT_DEVICE | MT_RW | MT_SECURE)
-
-#define MAP_NS_DRAM	MAP_REGION_FLAT(DRAM_NS_BASE,			\
-					DRAM_NS_SIZE,			\
-					MT_DEVICE | MT_RW | MT_NS)
-
-#define MAP_TSP_MEM	MAP_REGION_FLAT(TSP_SEC_MEM_BASE, 		\
-					TSP_SEC_MEM_SIZE,		\
-					MT_MEMORY | MT_RW | MT_SECURE)
-
-#define MAP_ROM_PARAM	MAP_REGION_FLAT(XG2RAM0_BASE,			\
-					0x1000,				\
-					MT_DEVICE | MT_RW | MT_NS)
-
-#define MAP_SRAM	MAP_REGION_FLAT(SRAM_BASE,			\
-					SRAM_SIZE,			\
-					MT_DEVICE | MT_RW | MT_SECURE)
-
-/*
- * Table of regions for different BL stages to map using the MMU.
- * This doesn't include Trusted RAM as the 'mem_layout' argument passed to
- * configure_mmu_elx() will give the available subset of that,
- */
-#if IMAGE_BL1
-static const mmap_region_t hikey_mmap[] = {
-	MAP_DEVICE,
-	MAP_NS_DRAM,
-	MAP_ROM_PARAM,
-	{0}
-};
-#endif
-#if IMAGE_BL2
-static const mmap_region_t hikey_mmap[] = {
-	MAP_DEVICE,
-	MAP_NS_DRAM,
-	MAP_TSP_MEM,
-	MAP_SRAM,
-	{0}
-};
-#endif
-#if IMAGE_BL31
-static const mmap_region_t hikey_mmap[] = {
-	MAP_DEVICE,
-	MAP_NS_DRAM,
-	MAP_TSP_MEM,
-	MAP_SRAM,
-	{0}
-};
-#endif
-#if IMAGE_BL32
-static const mmap_region_t hikey_mmap[] = {
-	MAP_DEVICE,
-	MAP_NS_DRAM,
-	{0}
-};
-#endif
-
-/* Array of secure interrupts to be configured by the gic driver */
-const unsigned int irq_sec_array[] = {
-	IRQ_SEC_PHY_TIMER,
-	IRQ_SEC_SGI_0,
-	IRQ_SEC_SGI_1,
-	IRQ_SEC_SGI_2,
-	IRQ_SEC_SGI_3,
-	IRQ_SEC_SGI_4,
-	IRQ_SEC_SGI_5,
-	IRQ_SEC_SGI_6,
-	IRQ_SEC_SGI_7
-};
-
-const unsigned int num_sec_irqs = sizeof(irq_sec_array) /
-	sizeof(irq_sec_array[0]);
-
-/*******************************************************************************
- * Macro generating the code for the function setting up the pagetables as per
- * the platform memory map & initialize the mmu, for the given exception level
- ******************************************************************************/
-#define DEFINE_CONFIGURE_MMU_EL(_el)				\
-	void configure_mmu_el##_el(unsigned long total_base,	\
-				  unsigned long total_size,	\
-				  unsigned long ro_start,	\
-				  unsigned long ro_limit,	\
-				  unsigned long coh_start,	\
-				  unsigned long coh_limit)	\
-	{							\
-	       mmap_add_region(total_base, total_base,		\
-			       total_size,			\
-			       MT_MEMORY | MT_RW | MT_SECURE);	\
-	       mmap_add_region(ro_start, ro_start,		\
-			       ro_limit - ro_start,		\
-			       MT_MEMORY | MT_RO | MT_SECURE);	\
-	       mmap_add_region(coh_start, coh_start,		\
-			       coh_limit - coh_start,		\
-			       MT_DEVICE | MT_RW | MT_SECURE);	\
-	       mmap_add(hikey_mmap);				\
-	       init_xlat_tables();				\
-								\
-	       enable_mmu_el##_el(0);				\
-	}
-
-/* Define EL1 and EL3 variants of the function initialising the MMU */
-DEFINE_CONFIGURE_MMU_EL(1)
-DEFINE_CONFIGURE_MMU_EL(3)
-
-unsigned long plat_get_ns_image_entrypoint(void)
-{
-	return NS_IMAGE_OFFSET;
-}
-
-uint64_t plat_get_syscnt_freq(void)
-{
-	return 1200000;
-}
-
-void plat_gic_init(void)
-{
-	arm_gic_init(GICC_BASE, GICD_BASE, 0, irq_sec_array, num_sec_irqs);
-}
diff --git a/plat/hikey/aarch64/plat_helpers.S b/plat/hikey/aarch64/plat_helpers.S
deleted file mode 100644
index 02f739c..0000000
--- a/plat/hikey/aarch64/plat_helpers.S
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-#include <bl_common.h>
-#include <cortex_a53.h>
-#include <cpu_macros.S>
-#include <platform_def.h>
-#include "../hikey_def.h"
-
-	.globl	plat_crash_console_init
-	.globl	plat_crash_console_putc
-	.globl	plat_report_exception
-	.globl	plat_reset_handler
-	.globl	platform_get_core_pos
-	.globl	platform_mem_init
-
-	/* ---------------------------------------------
-	 * int plat_crash_console_init(void)
-	 * Function to initialize the crash console
-	 * without a C Runtime to print crash report.
-	 * Clobber list : x0, x1, x2
-	 * ---------------------------------------------
-	 */
-func plat_crash_console_init
-	mov_imm	x0, CRASH_CONSOLE_BASE
-	mov_imm	x1, PL011_UART_CLK_IN_HZ
-	mov_imm	x2, PL011_BAUDRATE
-	b	console_core_init
-
-	/* ---------------------------------------------
-	 * int plat_crash_console_putc(int c)
-	 * Function to print a character on the crash
-	 * console without a C Runtime.
-	 * Clobber list : x1, x2
-	 * ---------------------------------------------
-	 */
-func plat_crash_console_putc
-	mov_imm	x1, CRASH_CONSOLE_BASE
-	b	console_core_putc
-
-	/* ---------------------------------------------
-	 * void plat_report_exception(unsigned int type)
-	 * Function to report an unhandled exception
-	 * with platform-specific means.
-	 * On HIKEY platform, it updates the LEDs
-	 * to indicate where we are
-	 * ---------------------------------------------
-	 */
-func plat_report_exception
-	mov	x8, x30
-
-	/* Turn on LED according to x0 (0 -- f) */
-	/*
-	ldr	x2, =0xf7020000
-	and	x1, x0, #1
-	str	w1, [x2, #4]
-	and	x1, x0, #2
-	str	w1, [x2, #8]
-	and	x1, x0, #4
-	str	w1, [x2, #16]
-	and	x1, x0, #8
-	str	w1, [x2, #32]
-	*/
-
-	adr	x4, plat_err_str
-	bl	asm_print_str
-
-	adr	x4, esr_el3_str
-	bl	asm_print_str
-
-	mrs	x4, esr_el3
-	bl	asm_print_hex
-
-	adr	x4, elr_el3_str
-	bl	asm_print_str
-
-	mrs	x4, elr_el3
-	bl	asm_print_hex
-
-	mov	x30, x8
-	ret
-
-	/* -----------------------------------------------------
-	 * void plat_reset_handler(void);
-	 *
-	 * Implement workaround for defect id 831273 by enabling
-	 * an event stream every 65536 cycles and set the L2 RAM
-	 * latencies for Cortex-A57.
-	 * -----------------------------------------------------
-	 */
-func plat_reset_handler
-	/* In juno, it sets the latency of L2 Data and Tag. How about hikey? */
-	/* Do anything just after reset. At here, do we need? */
-	ret
-
-	/*
-	 * Return 0 to 7
-	 */
-func platform_get_core_pos
-	and	x1, x0, #MPIDR_CPU_MASK
-	and	x0, x0, #MPIDR_CLUSTER_MASK
-	add	x0, x1, x0, LSR #6
-	ret
-
-	/* -----------------------------------------------------
-	 * void platform_mem_init(void);
-	 *
-	 * We don't need to carry out any memory initialization
-	 * on HIKEY. The Secure RAM is accessible straight away.
-	 * -----------------------------------------------------
-	 */
-func platform_mem_init
-	ret
-
-.section .rodata.rev_err_str, "aS"
-plat_err_str:
-	.asciz "\nPlatform exception reporting:"
-esr_el3_str:
-	.asciz "\nESR_EL3: "
-elr_el3_str:
-	.asciz "\nELR_EL3: "
diff --git a/plat/hikey/bl1_plat_setup.c b/plat/hikey/bl1_plat_setup.c
deleted file mode 100644
index cb3b252..0000000
--- a/plat/hikey/bl1_plat_setup.c
+++ /dev/null
@@ -1,459 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch_helpers.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <cci400.h>
-#include <console.h>
-#include <ctype.h>
-#include <debug.h>
-#include <errno.h>
-#include <gpio.h>
-#include <hi6220.h>
-#include <hi6553.h>
-#include <mmio.h>
-#include <partitions.h>
-#include <platform.h>
-#include <platform_def.h>
-#include <sp804_timer.h>
-#include <string.h>
-#include "../../bl1/bl1_private.h"
-#include "hikey_def.h"
-#include "hikey_private.h"
-
-/*******************************************************************************
- * Declarations of linker defined symbols which will help us find the layout
- * of trusted RAM
- ******************************************************************************/
-extern unsigned long __COHERENT_RAM_START__;
-extern unsigned long __COHERENT_RAM_END__;
-
-/*
- * The next 2 constants identify the extents of the coherent memory region.
- * These addresses are used by the MMU setup code and therefore they must be
- * page-aligned.  It is the responsibility of the linker script to ensure that
- * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
- * page-aligned addresses.
- */
-#define BL1_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
-#define BL1_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
-
-/* Data structure which holds the extents of the trusted RAM for BL1 */
-static meminfo_t bl1_tzram_layout;
-
-static void hi6220_pmussi_init(void);
-static void hikey_gpio_init(void);
-static void hikey_hi6553_init(void);
-static int query_boot_mode(void);
-
-meminfo_t *bl1_plat_sec_mem_layout(void)
-{
-	return &bl1_tzram_layout;
-}
-
-/*******************************************************************************
- * Perform any BL1 specific platform actions.
- ******************************************************************************/
-void bl1_early_platform_setup(void)
-{
-	const size_t bl1_size = BL1_RAM_LIMIT - BL1_RAM_BASE;
-
-	/* Initialize the console to provide early debug support */
-	console_init(CONSOLE_BASE, PL011_UART_CLK_IN_HZ, PL011_BAUDRATE);
-
-	hi6220_timer_init();
-	/*
-	 * Enable CCI-400 for this cluster. No need for locks as no other cpu is
-	 * active at the moment
-	 */
-	cci_init(CCI400_BASE,
-		 CCI400_SL_IFACE3_CLUSTER_IX,
-		 CCI400_SL_IFACE4_CLUSTER_IX);
-	cci_enable_cluster_coherency(read_mpidr());
-
-	/* Allow BL1 to see the whole Trusted RAM */
-	bl1_tzram_layout.total_base = BL1_RW_BASE;
-	bl1_tzram_layout.total_size = BL1_RW_SIZE;
-
-	/* Calculate how much RAM BL1 is using and how much remains free */
-	bl1_tzram_layout.free_base = BL1_RW_BASE;
-	bl1_tzram_layout.free_size = BL1_RW_SIZE;
-	reserve_mem(&bl1_tzram_layout.free_base,
-		    &bl1_tzram_layout.free_size,
-		    BL1_RAM_BASE,
-		    bl1_size);
-
-	INFO("BL1: 0x%lx - 0x%lx [size = %u]\n", BL1_RAM_BASE, BL1_RAM_LIMIT,
-	     bl1_size);
-}
-
-/*******************************************************************************
- * Perform the very early platform specific architecture setup here. At the
- * moment this only does basic initialization. Later architectural setup
- * (bl1_arch_setup()) does not do anything platform specific.
- ******************************************************************************/
-void bl1_plat_arch_setup(void)
-{
-	configure_mmu_el3(bl1_tzram_layout.total_base,
-			  bl1_tzram_layout.total_size,
-			  BL1_RO_BASE,
-			  BL1_RO_LIMIT,
-			  BL1_COHERENT_RAM_BASE,
-			  BL1_COHERENT_RAM_LIMIT);
-}
-
-static int sd_card_detect(void)
-{
-	int ret;
-	/* configure GPIO8 as nopull */
-	mmio_write_32(0xf8001830, 0);
-	gpio_direction_input(8);
-	ret = gpio_get_value(8);
-	if (!ret)
-		return 1;
-	return 0;
-}
-
-static void hikey_sd_init(void)
-{
-	int ret;
-
-	/* switch pinmux to SD */
-	mmio_write_32(0xf701000c, 0);
-	mmio_write_32(0xf7010010, 0);
-	mmio_write_32(0xf7010014, 0);
-	mmio_write_32(0xf7010018, 0);
-	mmio_write_32(0xf701001c, 0);
-	mmio_write_32(0xf7010020, 0);
-
-	/* input, 16mA or 12mA */
-	mmio_write_32(0xf701080c, 0x64);
-	mmio_write_32(0xf7010810, 0x54);
-	mmio_write_32(0xf7010814, 0x54);
-	mmio_write_32(0xf7010818, 0x54);
-	mmio_write_32(0xf701081c, 0x54);
-	mmio_write_32(0xf7010820, 0x54);
-	ret = sd_card_detect();
-	if (ret)
-		INFO("SD Card has been detected.\n");
-}
-
-static void hikey_jumper_init(void)
-{
-	/* configure GPIO24 as nopull */
-	mmio_write_32(0xf7010950, 0);
-	/* configure GPIO24 as gpio */
-	mmio_write_32(0xf7010140, 0);
-	gpio_direction_input(24);
-	VERBOSE("Jumper value:%d\n", gpio_get_value(24));
-}
-
-static inline char hex2str(unsigned int data)
-{
-	data &= 0xf;
-	if ((data >= 0) && (data <= 9))
-		return (char)(data + 0x30);
-	return (char)(data - 10 + 0x41);
-}
-
-static uint64_t rand(unsigned int data)
-{
-	int64_t quotient, remainder, t;
-
-	quotient = data / 127773;
-	remainder = data % 127773;
-	t = 16807 * remainder - 2836 * quotient;
-	if (t <= 0)
-		t += RANDOM_MAX;
-	return (t % ((uint64_t)RANDOM_MAX + 1));
-}
-
-void generate_serialno(struct random_serial_num *random)
-{
-	unsigned int data, t;
-	int i;
-
-	data = mmio_read_32(AO_SC_SYSTEST_SLICER_CNT0);
-	t = rand(data);
-	random->data = ((uint64_t)t << 32) | data;
-	for (i = 0; i < 8; i++) {
-		random->serialno[i] = hex2str((t >> ((7 - i) << 2)) & 0xf);
-	}
-	for (i = 0; i < 8; i++) {
-		random->serialno[i + 8] = hex2str((data >> ((7 - i) << 2)) & 0xf);
-	}
-	random->serialno[16] = '\0';
-	random->magic = RANDOM_MAGIC;
-}
-
-int assign_serialno(char *cmdbuf, struct random_serial_num *random)
-{
-	int offset, i;
-
-	offset = 0;
-	while (*(cmdbuf + offset) == ' ')
-		offset++;
-	for (i = 0; i < 16; i++) {
-		if (isxdigit(*(cmdbuf + offset + i)))
-			continue;
-		return -EINVAL;
-	}
-	memcpy(random->serialno, cmdbuf + offset, 16);
-	random->serialno[16] = '\0';
-	random->magic = RANDOM_MAGIC;
-	return 0;
-}
-
-static void hikey_verify_serialno(struct random_serial_num *random)
-{
-	char *serialno;
-
-	serialno = load_serialno();
-	if (serialno == NULL) {
-		generate_serialno(random);
-		flush_random_serialno((unsigned long)&random, sizeof(random));
-	}
-}
-
-/*******************************************************************************
- * Function which will perform any remaining platform-specific setup that can
- * occur after the MMU and data cache have been enabled.
- ******************************************************************************/
-void bl1_platform_setup(void)
-{
-	struct random_serial_num random;
-
-	hikey_gpio_init();
-	hi6220_pmussi_init();
-	hikey_hi6553_init();
-	hi6220_pll_init();
-	hikey_sd_init();
-	hikey_jumper_init();
-
-	io_setup();
-	get_partition();
-	INFO("Hisilicon HiKey platform is initialized\n");
-	if (query_boot_mode()) {
-		NOTICE("Enter fastboot mode...\n");
-		flush_loader_image();
-		hikey_verify_serialno(&random);
-		usb_download();
-	}
-}
-
-/* Get the boot mode (normal boot/usb download/uart download) */
-static int query_boot_mode(void)
-{
-	int boot_mode;
-
-	boot_mode = mmio_read_32(ONCHIPROM_PARAM_BASE);
-	if ((boot_mode < 0) || (boot_mode > 2)) {
-		NOTICE("Invalid boot mode is found:%d\n", boot_mode);
-		panic();
-	}
-	return boot_mode;
-}
-
-/* PMU SSI is the device that could map external PMU register to IO */
-static void hi6220_pmussi_init(void)
-{
-	uint32_t data;
-
-	/*
-	 * After reset, PMUSSI stays in reset mode.
-	 * Now make it out of reset.
-	 */
-	mmio_write_32(AO_SC_PERIPH_RSTDIS4,
-		AO_SC_PERIPH_RSTDIS4_PRESET_PMUSSI_N);
-	do {
-		data = mmio_read_32(AO_SC_PERIPH_RSTSTAT4);
-	} while (data & AO_SC_PERIPH_RSTDIS4_PRESET_PMUSSI_N);
-
-	/* set PMU SSI clock latency for read operation */
-	data = mmio_read_32(AO_SC_MCU_SUBSYS_CTRL3);
-	data &= ~AO_SC_MCU_SUBSYS_CTRL3_RCLK_MASK;
-	data |= AO_SC_MCU_SUBSYS_CTRL3_RCLK_3;
-	mmio_write_32(AO_SC_MCU_SUBSYS_CTRL3, data);
-
-	/* enable PMUSSI clock */
-	data = AO_SC_PERIPH_CLKEN5_PCLK_PMUSSI_CCPU |
-	       AO_SC_PERIPH_CLKEN5_PCLK_PMUSSI_MCU;
-	mmio_write_32(AO_SC_PERIPH_CLKEN5, data);
-	data = AO_SC_PERIPH_CLKEN4_PCLK_PMUSSI;
-	mmio_write_32(AO_SC_PERIPH_CLKEN4, data);
-
-	/* output high on gpio0 */
-	gpio_direction_output(0);
-	gpio_set_value(0, 1);
-}
-
-static void hikey_hi6553_init(void)
-{
-	int data;
-
-	hi6553_write_8(PERI_EN_MARK, 0x1e);
-	hi6553_write_8(NP_REG_ADJ1, 0);
-	data = DISABLE6_XO_CLK_CONN | DISABLE6_XO_CLK_NFC |
-		DISABLE6_XO_CLK_RF1 | DISABLE6_XO_CLK_RF2;
-	hi6553_write_8(DISABLE6_XO_CLK, data);
-
-	/* configure BUCK0 & BUCK1 */
-	hi6553_write_8(BUCK01_CTRL2, 0x5e);
-	hi6553_write_8(BUCK0_CTRL7, 0x10);
-	hi6553_write_8(BUCK1_CTRL7, 0x10);
-	hi6553_write_8(BUCK0_CTRL5, 0x1e);
-	hi6553_write_8(BUCK1_CTRL5, 0x1e);
-	hi6553_write_8(BUCK0_CTRL1, 0xfc);
-	hi6553_write_8(BUCK1_CTRL1, 0xfc);
-
-	/* configure BUCK2 */
-	hi6553_write_8(BUCK2_REG1, 0x4f);
-	hi6553_write_8(BUCK2_REG5, 0x99);
-	hi6553_write_8(BUCK2_REG6, 0x45);
-	mdelay(1);
-	hi6553_write_8(VSET_BUCK2_ADJ, 0x22);
-	mdelay(1);
-
-	/* configure BUCK3 */
-	hi6553_write_8(BUCK3_REG3, 0x02);
-	hi6553_write_8(BUCK3_REG5, 0x99);
-	hi6553_write_8(BUCK3_REG6, 0x41);
-	hi6553_write_8(VSET_BUCK3_ADJ, 0x02);
-	mdelay(1);
-
-	/* configure BUCK4 */
-	hi6553_write_8(BUCK4_REG2, 0x9a);
-	hi6553_write_8(BUCK4_REG5, 0x99);
-	hi6553_write_8(BUCK4_REG6, 0x45);
-
-	/* configure LDO20 */
-	hi6553_write_8(LDO20_REG_ADJ, 0x50);
-
-	hi6553_write_8(NP_REG_CHG, 0x0f);
-	hi6553_write_8(CLK_TOP0, 0x06);
-	hi6553_write_8(CLK_TOP3, 0xc0);
-	hi6553_write_8(CLK_TOP4, 0x00);
-
-	/* configure LDO7 & LDO10 for SD slot */
-	data = hi6553_read_8(LDO7_REG_ADJ);
-	data = (data & 0xf8) | 0x2;
-	hi6553_write_8(LDO7_REG_ADJ, data);
-	mdelay(5);
-	/* enable LDO7 */
-	hi6553_write_8(ENABLE2_LDO1_8, 1 << 6);
-	mdelay(5);
-	data = hi6553_read_8(LDO10_REG_ADJ);
-	data = (data & 0xf8) | 0x5;
-	hi6553_write_8(LDO10_REG_ADJ, data);
-	mdelay(5);
-	/* enable LDO10 */
-	hi6553_write_8(ENABLE3_LDO9_16, 1 << 1);
-	mdelay(5);
-	/* enable LDO15 */
-	data = hi6553_read_8(LDO15_REG_ADJ);
-	data = (data & 0xf8) | 0x4;
-	hi6553_write_8(LDO15_REG_ADJ, data);
-	hi6553_write_8(ENABLE3_LDO9_16, 1 << 6);
-	mdelay(5);
-	/* enable LDO21 */
-	data = hi6553_read_8(LDO21_REG_ADJ);
-	data = (data & 0xf8) | 0x3;
-	hi6553_write_8(LDO21_REG_ADJ, data);
-	hi6553_write_8(ENABLE4_LDO17_22, 1 << 4);
-	mdelay(5);
-	/* enable LDO22 */
-	data = hi6553_read_8(LDO22_REG_ADJ);
-	data = (data & 0xf8) | 0x7;
-	hi6553_write_8(LDO22_REG_ADJ, data);
-	hi6553_write_8(ENABLE4_LDO17_22, 1 << 5);
-	mdelay(5);
-
-	/* select 32.764KHz */
-	hi6553_write_8(CLK19M2_600_586_EN, 0x01);
-
-	/* Disable PMIC internal interrupt */
-	data = hi6553_read_8(IRQ2_MASK);
-	data = data | 0x3;
-	hi6553_write_8(IRQ2_MASK, data);
-}
-
-static void hikey_gpio_init(void)
-{
-	gpio_register_device(GPIO0_BASE);
-	gpio_register_device(GPIO1_BASE);
-	gpio_register_device(GPIO2_BASE);
-	gpio_register_device(GPIO3_BASE);
-	gpio_register_device(GPIO4_BASE);
-	gpio_register_device(GPIO5_BASE);
-	gpio_register_device(GPIO6_BASE);
-	gpio_register_device(GPIO7_BASE);
-	gpio_register_device(GPIO8_BASE);
-	gpio_register_device(GPIO9_BASE);
-	gpio_register_device(GPIO10_BASE);
-	gpio_register_device(GPIO11_BASE);
-	gpio_register_device(GPIO12_BASE);
-	gpio_register_device(GPIO13_BASE);
-	gpio_register_device(GPIO14_BASE);
-	gpio_register_device(GPIO15_BASE);
-	gpio_register_device(GPIO16_BASE);
-	gpio_register_device(GPIO17_BASE);
-	gpio_register_device(GPIO18_BASE);
-	gpio_register_device(GPIO19_BASE);
-
-	/* Power on indicator LED (User LED0). */
-	gpio_direction_output(32);
-	gpio_set_value(32, 1);
-	gpio_direction_output(33);
-	gpio_direction_output(34);
-	gpio_direction_output(35);
-
-	/* Clear GPIO5 and GPIO6 interrutps */
-	mmio_write_32(GPIO5_BASE + 0x41C, 0xFF);
-	mmio_write_32(GPIO6_BASE + 0x41C, 0xFF);
-
-	/* Initialize PWR_HOLD GPIO */
-	gpio_set_value(0, 1);
-	gpio_direction_output(0);
-}
-
-/*******************************************************************************
- * Before calling this function BL2 is loaded in memory and its entrypoint
- * is set by load_image. This is a placeholder for the platform to change
- * the entrypoint of BL2 and set SPSR and security state.
- * On Juno we are only setting the security state, entrypoint
- ******************************************************************************/
-void bl1_plat_set_bl2_ep_info(image_info_t *bl2_image,
-			      entry_point_info_t *bl2_ep)
-{
-	SET_SECURITY_STATE(bl2_ep->h.attr, SECURE);
-	bl2_ep->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
-}
diff --git a/plat/hikey/bl2_plat_setup.c b/plat/hikey/bl2_plat_setup.c
deleted file mode 100644
index 0824394..0000000
--- a/plat/hikey/bl2_plat_setup.c
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch_helpers.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <console.h>
-#include <debug.h>
-#include <partitions.h>
-#include <platform.h>
-#include <platform_def.h>
-#include <string.h>
-#include <mmio.h>
-#include <hi6220.h>
-#include "hikey_def.h"
-#include "hikey_private.h"
-
-/*******************************************************************************
- * Declarations of linker defined symbols which will help us find the layout
- * of trusted RAM
- ******************************************************************************/
-extern unsigned long __RO_START__;
-extern unsigned long __RO_END__;
-
-extern unsigned long __COHERENT_RAM_START__;
-extern unsigned long __COHERENT_RAM_END__;
-
-/*
- * The next 2 constants identify the extents of the code & RO data region.
- * These addresses are used by the MMU setup code and therefore they must be
- * page-aligned.  It is the responsibility of the linker script to ensure that
- * __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
- */
-#define BL2_RO_BASE (unsigned long)(&__RO_START__)
-#define BL2_RO_LIMIT (unsigned long)(&__RO_END__)
-
-/*
- * The next 2 constants identify the extents of the coherent memory region.
- * These addresses are used by the MMU setup code and therefore they must be
- * page-aligned.  It is the responsibility of the linker script to ensure that
- * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
- * page-aligned addresses.
- */
-#define BL2_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
-#define BL2_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
-
-/* Data structure which holds the extents of the trusted RAM for BL2 */
-static meminfo_t bl2_tzram_layout
-__attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE),
-		section("tzfw_coherent_mem")));
-
-/*******************************************************************************
- * Structure which holds the arguments which need to be passed to BL3-1
- ******************************************************************************/
-static bl2_to_bl31_params_mem_t bl31_params_mem;
-
-meminfo_t *bl2_plat_sec_mem_layout(void)
-{
-	return &bl2_tzram_layout;
-}
-
-/*******************************************************************************
- * This function assigns a pointer to the memory that the platform has kept
- * aside to pass platform specific and trusted firmware related information
- * to BL31. This memory is allocated by allocating memory to
- * bl2_to_bl31_params_mem_t structure which is a superset of all the
- * structure whose information is passed to BL31
- * NOTE: This function should be called only once and should be done
- * before generating params to BL31
- ******************************************************************************/
-bl31_params_t *bl2_plat_get_bl31_params(void)
-{
-	bl31_params_t *bl2_to_bl31_params;
-
-	/*
-	 * Initialise the memory for all the arguments that needs to
-	 * be passed to BL3-1
-	 */
-	memset(&bl31_params_mem, 0, sizeof(bl2_to_bl31_params_mem_t));
-
-	/* Assign memory for TF related information */
-	bl2_to_bl31_params = &bl31_params_mem.bl31_params;
-	SET_PARAM_HEAD(bl2_to_bl31_params, PARAM_BL31, VERSION_1, 0);
-
-	/* Fill BL3-1 related information */
-	bl2_to_bl31_params->bl31_image_info = &bl31_params_mem.bl31_image_info;
-	SET_PARAM_HEAD(bl2_to_bl31_params->bl31_image_info, PARAM_IMAGE_BINARY,
-		VERSION_1, 0);
-
-	/* Fill BL3-2 related information if it exists */
-#if BL32_BASE
-	bl2_to_bl31_params->bl32_ep_info = &bl31_params_mem.bl32_ep_info;
-	SET_PARAM_HEAD(bl2_to_bl31_params->bl32_ep_info, PARAM_EP,
-		VERSION_1, 0);
-	bl2_to_bl31_params->bl32_image_info = &bl31_params_mem.bl32_image_info;
-	SET_PARAM_HEAD(bl2_to_bl31_params->bl32_image_info, PARAM_IMAGE_BINARY,
-		VERSION_1, 0);
-#endif
-
-	/* Fill BL3-3 related information */
-	bl2_to_bl31_params->bl33_ep_info = &bl31_params_mem.bl33_ep_info;
-	SET_PARAM_HEAD(bl2_to_bl31_params->bl33_ep_info,
-		PARAM_EP, VERSION_1, 0);
-
-	/* BL3-3 expects to receive the primary CPU MPID (through x0) */
-	bl2_to_bl31_params->bl33_ep_info->args.arg0 = 0xffff & read_mpidr();
-
-	bl2_to_bl31_params->bl33_image_info = &bl31_params_mem.bl33_image_info;
-	SET_PARAM_HEAD(bl2_to_bl31_params->bl33_image_info, PARAM_IMAGE_BINARY,
-		VERSION_1, 0);
-
-	return bl2_to_bl31_params;
-}
-
-/*******************************************************************************
- * This function returns a pointer to the shared memory that the platform
- * has kept to point to entry point information of BL31 to BL2
- ******************************************************************************/
-struct entry_point_info *bl2_plat_get_bl31_ep_info(void)
-{
-	return &bl31_params_mem.bl31_ep_info;
-}
-
-void init_boardid(void)
-{
-	unsigned int reg;
-
-	/* Set chip id to sram */
-	reg = read_midr_el1();
-	mmio_write_32(MEMORY_AXI_CHIP_ADDR, reg);
-	INFO("[BDID] [%x] midr: 0x%x\n", MEMORY_AXI_CHIP_ADDR, reg);
-
-	/* Set board type to sram */
-	mmio_write_32(MEMORY_AXI_BOARD_TYPE_ADDR, 0x0);
-	INFO("[BDID] [%x] board type: 0\n", MEMORY_AXI_BOARD_TYPE_ADDR);
-
-	/* Set board id to sram */
-	mmio_write_32(MEMORY_AXI_BOARD_ID_ADDR, 0x2b);
-	INFO("[BDID] [%x] board id: 0x2b\n", MEMORY_AXI_BOARD_ID_ADDR);
-
-	mmio_write_32(ACPU_ARM64_FLAGA, 0x1234);
-	mmio_write_32(ACPU_ARM64_FLAGB, 0x5678);
-	return;
-}
-
-/*******************************************************************************
- * BL1 has passed the extents of the trusted RAM that should be visible to BL2
- * in x0. This memory layout is sitting at the base of the free trusted RAM.
- * Copy it to a safe loaction before its reclaimed by later BL2 functionality.
- ******************************************************************************/
-void bl2_early_platform_setup(meminfo_t *mem_layout)
-{
-	/* Initialize the console to provide early debug support */
-	console_init(CONSOLE_BASE, PL011_UART_CLK_IN_HZ, PL011_BAUDRATE);
-
-	/* Setup the BL2 memory layout */
-	bl2_tzram_layout = *mem_layout;
-
-	init_boardid();
-	init_acpu_dvfs();
-
-	io_setup();
-	get_partition();
-}
-
-/*******************************************************************************
- * Perform platform specific setup, i.e. initialize the IO layer, load BL3-0
- * image and initialise the memory location to use for passing arguments to
- * BL3-1.
- ******************************************************************************/
-void bl2_platform_setup(void)
-{
-	plat_security_setup();
-}
-
-/* Flush the TF params and the TF plat params */
-void bl2_plat_flush_bl31_params(void)
-{
-	flush_dcache_range((unsigned long)&bl31_params_mem,
-			sizeof(bl2_to_bl31_params_mem_t));
-}
-
-/*******************************************************************************
- * Perform the very early platform specific architectural setup here. At the
- * moment this is only intializes the mmu in a quick and dirty way.
- ******************************************************************************/
-void bl2_plat_arch_setup(void)
-{
-	configure_mmu_el1(bl2_tzram_layout.total_base,
-			  bl2_tzram_layout.total_size,
-			  BL2_RO_BASE,
-			  BL2_RO_LIMIT,
-			  BL2_COHERENT_RAM_BASE,
-			  BL2_COHERENT_RAM_LIMIT);
-}
-
-/*******************************************************************************
- * Populate the extents of memory available for loading BL3-0, i.e. anywhere
- * in trusted RAM as long as it doesn't overwrite BL2.
- ******************************************************************************/
-void bl2_plat_get_bl30_meminfo(meminfo_t *bl30_meminfo)
-{
-	bl30_meminfo->total_base = BL30_BASE;
-	bl30_meminfo->total_size = BL30_SIZE;
-	bl30_meminfo->free_base  = BL30_BASE;
-	bl30_meminfo->free_size  = BL30_SIZE;
-}
-
-/*******************************************************************************
- * Transfer BL3-0 from Trusted RAM using the SCP Download protocol.
- * Return 0 on success, -1 otherwise.
- ******************************************************************************/
-int bl2_plat_handle_bl30(image_info_t *bl30_image_info)
-{
-	int *buf = (int *)bl30_image_info->image_base;
-
-	INFO("%s: [%x] %x %x %x %x\n",
-	     __func__, buf, buf[0], buf[1], buf[2], buf[3]);
-
-	buf += 50;
-	INFO("%s: [%x] %x %x %x %x\n",
-	     __func__, buf, buf[0], buf[1], buf[2], buf[3]);
-
-	buf += 50;
-	INFO("%s: [%x] %x %x %x %x\n",
-	     __func__, buf, buf[0], buf[1], buf[2], buf[3]);
-
-	buf  = (int *)(bl30_image_info->image_base +
-		       bl30_image_info->image_size);
-	buf -= 4;
-	INFO("%s: [%x] %x %x %x %x\n",
-	     __func__, buf, buf[0], buf[1], buf[2], buf[3]);
-
-	/* enable mcu sram */
-	hisi_mcu_enable_sram();
-
-	/* load mcu binary to sram */
-	hisi_mcu_load_image(bl30_image_info->image_base,
-			    bl30_image_info->image_size);
-
-	/* let mcu to run */
-	hisi_mcu_start_run();
-
-	INFO("%s: mcu pc is %x\n",
-		__func__, mmio_read_32(AO_SC_MCU_SUBSYS_STAT2));
-
-	INFO("%s: AO_SC_PERIPH_CLKSTAT4	is %x\n",
-		__func__, mmio_read_32(AO_SC_PERIPH_CLKSTAT4));
-	return 0;
-}
-
-/*******************************************************************************
- * Before calling this function BL31 is loaded in memory and its entrypoint
- * is set by load_image. This is a placeholder for the platform to change
- * the entrypoint of BL31 and set SPSR and security state.
- * On Juno we are only setting the security state, entrypoint
- ******************************************************************************/
-void bl2_plat_set_bl31_ep_info(image_info_t *bl31_image_info,
-			       entry_point_info_t *bl31_ep_info)
-{
-	SET_SECURITY_STATE(bl31_ep_info->h.attr, SECURE);
-	bl31_ep_info->spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
-				       DISABLE_ALL_EXCEPTIONS);
-}
-
-/*******************************************************************************
- * Before calling this function BL32 is loaded in memory and its entrypoint
- * is set by load_image. This is a placeholder for the platform to change
- * the entrypoint of BL32 and set SPSR and security state.
- * On Juno we are only setting the security state, entrypoint
- ******************************************************************************/
-void bl2_plat_set_bl32_ep_info(image_info_t *bl32_image_info,
-			       entry_point_info_t *bl32_ep_info)
-{
-	SET_SECURITY_STATE(bl32_ep_info->h.attr, SECURE);
-	/*
-	* The Secure Payload Dispatcher service is responsible for
-	* setting the SPSR prior to entry into the BL32 image.
-	*/
-	bl32_ep_info->spsr = 0;
-}
-
-/*******************************************************************************
- * Before calling this function BL33 is loaded in memory and its entrypoint
- * is set by load_image. This is a placeholder for the platform to change
- * the entrypoint of BL33 and set SPSR and security state.
- * On Juno we are only setting the security state, entrypoint
- ******************************************************************************/
-void bl2_plat_set_bl33_ep_info(image_info_t *image,
-				       entry_point_info_t *bl33_ep_info)
-{
-	unsigned long el_status;
-	unsigned int mode;
-
-	/* Figure out what mode we enter the non-secure world in */
-	el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
-	el_status &= ID_AA64PFR0_ELX_MASK;
-
-	if (el_status)
-		mode = MODE_EL2;
-	else
-		mode = MODE_EL1;
-
-	/*
-	 * TODO: Consider the possibility of specifying the SPSR in
-	 * the FIP ToC and allowing the platform to have a say as
-	 * well.
-	 */
-	bl33_ep_info->spsr = SPSR_64(mode, MODE_SP_ELX,
-				       DISABLE_ALL_EXCEPTIONS);
-	SET_SECURITY_STATE(bl33_ep_info->h.attr, NON_SECURE);
-}
-
-/*******************************************************************************
- * Populate the extents of memory available for loading BL3-2
- ******************************************************************************/
-void bl2_plat_get_bl32_meminfo(meminfo_t *bl32_meminfo)
-{
-	/*
-	 * Populate the extents of memory available for loading BL3-2.
-	 */
-	bl32_meminfo->total_base = BL32_BASE;
-	bl32_meminfo->free_base = BL32_BASE;
-	bl32_meminfo->total_size =
-		       (TSP_SEC_MEM_BASE + TSP_SEC_MEM_SIZE) - BL32_BASE;
-	bl32_meminfo->free_size =
-		       (TSP_SEC_MEM_BASE + TSP_SEC_MEM_SIZE) - BL32_BASE;
-}
-
-/*******************************************************************************
- * Populate the extents of memory available for loading BL3-3
- ******************************************************************************/
-void bl2_plat_get_bl33_meminfo(meminfo_t *bl33_meminfo)
-{
-	bl33_meminfo->total_base = DRAM_NS_BASE;
-	bl33_meminfo->total_size = DRAM_NS_SIZE;
-	bl33_meminfo->free_base = DRAM_NS_BASE;
-	bl33_meminfo->free_size = DRAM_NS_SIZE;
-}
diff --git a/plat/hikey/bl31_plat_setup.c b/plat/hikey/bl31_plat_setup.c
deleted file mode 100644
index 305835c..0000000
--- a/plat/hikey/bl31_plat_setup.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <arm_gic.h>
-#include <assert.h>
-#include <bl31.h>
-#include <bl_common.h>
-#include <cci400.h>
-#include <console.h>
-#include <debug.h>
-#include <hisi_ipc.h>
-#include <hisi_pwrc.h>
-#include <mmio.h>
-#include <platform.h>
-#include <stddef.h>
-#include <hi6220_regs_ao.h>
-#include <hi6220.h>
-
-#include "hikey_def.h"
-#include "hikey_private.h"
-
-/*******************************************************************************
- * Declarations of linker defined symbols which will help us find the layout
- * of trusted RAM
- ******************************************************************************/
-extern unsigned long __RO_START__;
-extern unsigned long __RO_END__;
-
-extern unsigned long __COHERENT_RAM_START__;
-extern unsigned long __COHERENT_RAM_END__;
-
-/*
- * The next 2 constants identify the extents of the code & RO data region.
- * These addresses are used by the MMU setup code and therefore they must be
- * page-aligned.  It is the responsibility of the linker script to ensure that
- * __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
- */
-#define BL31_RO_BASE (unsigned long)(&__RO_START__)
-#define BL31_RO_LIMIT (unsigned long)(&__RO_END__)
-
-/*
- * The next 2 constants identify the extents of the coherent memory region.
- * These addresses are used by the MMU setup code and therefore they must be
- * page-aligned.  It is the responsibility of the linker script to ensure that
- * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols
- * refer to page-aligned addresses.
- */
-#define BL31_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
-#define BL31_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
-
-/******************************************************************************
- * Placeholder variables for copying the arguments that have been passed to
- * BL3-1 from BL2.
- ******************************************************************************/
-static entry_point_info_t bl32_ep_info;
-static entry_point_info_t bl33_ep_info;
-
-/*******************************************************************************
- * Return a pointer to the 'entry_point_info' structure of the next image for
- * the security state specified. BL3-3 corresponds to the non-secure image type
- * while BL3-2 corresponds to the secure image type. A NULL pointer is returned
- * if the image does not exist.
- ******************************************************************************/
-entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
-{
-	entry_point_info_t *next_image_info;
-
-	next_image_info = (type == NON_SECURE) ? &bl33_ep_info : &bl32_ep_info;
-
-	/* None of the images on this platform can have 0x0 as the entrypoint */
-	if (next_image_info->pc)
-		return next_image_info;
-	else
-		return NULL;
-}
-
-/*******************************************************************************
- * Perform any BL3-1 specific platform actions. Here is an opportunity to copy
- * parameters passed by the calling EL (S-EL1 in BL2 & S-EL3 in BL1) before they
- * are lost (potentially). This needs to be done before the MMU is initialized
- * so that the memory layout can be used while creating page tables. Also, BL2
- * has flushed this information to memory, so we are guaranteed to pick up good
- * data
- ******************************************************************************/
-void bl31_early_platform_setup(bl31_params_t *from_bl2,
-			       void *plat_params_from_bl2)
-{
-	/* Initialize the console to provide early debug support */
-	console_init(CONSOLE_BASE, PL011_UART_CLK_IN_HZ, PL011_BAUDRATE);
-
-	/*
-	 * Initialise the CCI-400 driver for BL31 so that it is accessible after
-	 * a warm boot. BL1 should have already enabled CCI coherency for this
-	 * cluster during cold boot.
-	 */
-	cci_init(CCI400_BASE,
-		 CCI400_SL_IFACE3_CLUSTER_IX,
-		 CCI400_SL_IFACE4_CLUSTER_IX);
-
-	/*
-	 * Copy BL3-2 and BL3-3 entry point information.
-	 * They are stored in Secure RAM, in BL2's address space.
-	 */
-	bl32_ep_info = *from_bl2->bl32_ep_info;
-	bl33_ep_info = *from_bl2->bl33_ep_info;
-}
-
-static void init_rtc(void)
-{
-	uint32_t data;
-
-	data = mmio_read_32(AO_SC_PERIPH_CLKEN4);
-	data |= AO_SC_PERIPH_RSTDIS4_RESET_RTC0_N;
-	mmio_write_32(AO_SC_PERIPH_CLKEN4, data);
-}
-
-static void init_edma(void)
-{
-	int i;
-
-	mmio_write_32(EDMAC_SEC_CTRL, 0x3);
-
-	for (i = 0; i <= 15; i++) {
-		VERBOSE("EDMAC_AXI_CONF(%d): data:0x%x\n", i, mmio_read_32(EDMAC_AXI_CONF(i)));
-		mmio_write_32(EDMAC_AXI_CONF(i), (1 << 6) | (1 << 18));
-		VERBOSE("EDMAC_AXI_CONF(%d): data:0x%x\n", i, mmio_read_32(EDMAC_AXI_CONF(i)));
-	}
-}
-
-/*******************************************************************************
- * Initialize the GIC.
- ******************************************************************************/
-void bl31_platform_setup(void)
-{
-	/* Initialize the gic cpu and distributor interfaces */
-	plat_gic_init();
-	arm_gic_setup();
-
-	init_rtc();
-	init_edma();
-	hisi_ipc_init();
-	hisi_pwrc_setup();
-}
-
-/*******************************************************************************
- * Perform the very early platform specific architectural setup here. At the
- * moment this is only intializes the mmu in a quick and dirty way.
- ******************************************************************************/
-void bl31_plat_arch_setup()
-{
-	configure_mmu_el3(BL31_RO_BASE,
-			  BL31_COHERENT_RAM_LIMIT - BL31_RO_BASE,
-			  BL31_RO_BASE,
-			  BL31_RO_LIMIT,
-			  BL31_COHERENT_RAM_BASE,
-			  BL31_COHERENT_RAM_LIMIT);
-}
diff --git a/plat/hikey/drivers/dw_mmc.c b/plat/hikey/drivers/dw_mmc.c
deleted file mode 100644
index 5eecd0c..0000000
--- a/plat/hikey/drivers/dw_mmc.c
+++ /dev/null
@@ -1,754 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <console.h>
-#include <debug.h>
-#include <errno.h>
-#include <mmio.h>
-#include <string.h>
-#include <sp804_timer.h>
-#include <dw_mmc.h>
-#include <partitions.h>
-#include <platform_def.h>
-#include <hi6220.h>
-#include <hi6553.h>
-
-#define MMC_PLL			100000000
-
-#define IDMAC_DES0_DIC		(1 << 1)
-#define IDMAC_DES0_LD		(1 << 2)
-#define IDMAC_DES0_FS		(1 << 3)
-#define IDMAC_DES0_CH		(1 << 4)
-#define IDMAC_DES0_ER		(1 << 5)
-#define IDMAC_DES0_CES		(1 << 30)
-#define IDMAC_DES0_OWN		(1 << 31)
-
-#define IDMAC_DES1_BS1(x)	((x) & 0x1fff)
-#define IDMAC_DES2_BS2(x)	(((x) & 0x1fff) << 13)
-
-struct idmac_desc {
-	unsigned int		des0;
-	unsigned int		des1;
-	unsigned int		des2;
-	unsigned int		des3;
-};
-
-static inline int mmc_state(unsigned int data)
-{
-	return ((data & MMC_STATUS_CURRENT_STATE_MASK) >>
-		MMC_STATUS_CURRENT_STATE_SHIFT);
-}
-
-static inline int wait_data_ready(void)
-{
-	unsigned int data;
-
-	while (1) {
-		data = mmio_read_32(MMC0_RINTSTS);
-		if (data & (MMC_INT_DCRC | MMC_INT_DRT | MMC_INT_SBE |
-		    MMC_INT_EBE)) {
-			NOTICE("unwanted interrupts:0x%x\n", data);
-			return -EINVAL;
-		}
-		if (data & MMC_INT_DTO)
-			break;
-	}
-	/* clear interrupts */
-	mmio_write_32(MMC0_RINTSTS, ~0);
-	return 0;
-}
-
-static int update_mmc0_clock(void)
-{
-	unsigned int data;
-
-	/* CMD_UPDATE_CLK */
-	data = BIT_CMD_WAIT_PRVDATA_COMPLETE | BIT_CMD_UPDATE_CLOCK_ONLY |
-		BIT_CMD_START;
-	mmio_write_32(MMC0_CMD, data);
-	while (1) {
-		data = mmio_read_32(MMC0_CMD);
-		if (!(data & CMD_START_BIT))
-			break;
-		data = mmio_read_32(MMC0_RINTSTS);
-		if (data & MMC_INT_HLE) {
-			NOTICE("fail to update mmc clock frequency\n");
-			return -EINVAL;
-		}
-	}
-	return 0;
-}
-
-static int set_mmc0_clock(int rate)
-{
-	int ret, divider, found = 0;
-	unsigned int data;
-
-	for (divider = 1; divider < 256; divider++) {
-		if ((MMC_PLL / (2 * divider)) <= rate) {
-			found = 1;
-			break;
-		}
-	}
-	if (!found)
-		return -EINVAL;
-
-	/* wait until mmc is idle */
-	do {
-		data = mmio_read_32(MMC0_STATUS);
-	} while (data & MMC_STS_DATA_BUSY);
-
-	/* Disable mmc clock first */
-	mmio_write_32(MMC0_CLKENA, 0);
-	do {
-		ret = update_mmc0_clock();
-	} while (ret);
-
-	/* enable mmc clock */
-	do {
-		mmio_write_32(MMC0_CLKENA, 1);
-		mmio_write_32(MMC0_CLKSRC, 0);
-		mmio_write_32(MMC0_CLKDIV, divider);
-		ret = update_mmc0_clock();
-	} while (ret);
-	return 0;
-}
-
-static void set_mmc0_io(void)
-{
-	mmio_write_32(MMC0_CTYPE, MMC_8BIT_MODE);
-	mmio_write_32(MMC0_TMOUT, ~0);	/* maxium timeout value */
-	mmio_write_32(MMC0_DEBNCE, 0x00ffffff);
-	mmio_write_32(MMC0_BLKSIZ, MMC_BLOCK_SIZE);
-	mmio_write_32(MMC0_BYTCNT, 256 * 1024);
-}
-
-static int mmc0_send_cmd(unsigned int cmd, unsigned int arg, unsigned int *buf)
-{
-	unsigned int data, err_mask;
-
-	if (!buf) {
-		NOTICE("buf is invalid\n");
-		return -EFAULT;
-	}
-
-	mmio_write_32(MMC0_CMDARG, arg);
-
-	/* clear interrupts */
-	mmio_write_32(MMC0_RINTSTS, ~0);
-
-	switch (cmd) {
-	case 0:
-		data = BIT_CMD_SEND_INIT;
-		break;
-	case 1:
-		data = BIT_CMD_RESPONSE_EXPECT;
-		break;
-	case 2:
-		data = BIT_CMD_RESPONSE_EXPECT | BIT_CMD_LONG_RESPONSE |
-			BIT_CMD_CHECK_RESPONSE_CRC | BIT_CMD_SEND_INIT;
-		break;
-	case 3:
-		data = BIT_CMD_RESPONSE_EXPECT | BIT_CMD_CHECK_RESPONSE_CRC |
-			BIT_CMD_SEND_INIT;
-		break;
-	case 8:
-		data = BIT_CMD_RESPONSE_EXPECT | BIT_CMD_CHECK_RESPONSE_CRC |
-			BIT_CMD_DATA_EXPECTED | BIT_CMD_READ |
-			BIT_CMD_WAIT_PRVDATA_COMPLETE;
-		break;
-	case 9:
-		data = BIT_CMD_RESPONSE_EXPECT | BIT_CMD_CHECK_RESPONSE_CRC |
-			BIT_CMD_LONG_RESPONSE;
-		break;
-	case 12:
-		data = BIT_CMD_RESPONSE_EXPECT | BIT_CMD_CHECK_RESPONSE_CRC |
-			BIT_CMD_STOP_ABORT_CMD;
-		break;
-	case 17:
-	case 18:
-		data = BIT_CMD_RESPONSE_EXPECT | BIT_CMD_CHECK_RESPONSE_CRC |
-			BIT_CMD_DATA_EXPECTED | BIT_CMD_READ |
-			BIT_CMD_WAIT_PRVDATA_COMPLETE;
-		break;
-	case 24:
-	case 25:
-		data = BIT_CMD_RESPONSE_EXPECT | BIT_CMD_CHECK_RESPONSE_CRC |
-			BIT_CMD_DATA_EXPECTED | BIT_CMD_WRITE |
-			BIT_CMD_WAIT_PRVDATA_COMPLETE;
-		break;
-	case 30:
-		data = BIT_CMD_RESPONSE_EXPECT | BIT_CMD_CHECK_RESPONSE_CRC |
-			BIT_CMD_DATA_EXPECTED;
-		break;
-	case 7:
-		if (arg)
-			data = BIT_CMD_RESPONSE_EXPECT |
-				BIT_CMD_CHECK_RESPONSE_CRC;
-		else
-			data = 0;
-		break;
-	default:
-		data = BIT_CMD_RESPONSE_EXPECT | BIT_CMD_CHECK_RESPONSE_CRC;
-		break;
-	}
-	data |= (cmd & 0x3f) | BIT_CMD_USE_HOLD_REG | BIT_CMD_START;
-	mmio_write_32(MMC0_CMD, data);
-	err_mask = MMC_INT_EBE | MMC_INT_HLE | MMC_INT_RTO | MMC_INT_RCRC |
-		   MMC_INT_RE;
-	do {
-		data = mmio_read_32(MMC0_RINTSTS);
-		if (data & err_mask) {
-			NOTICE("mmc: error status 0x%x\n", data);
-			return -EIO;
-		}
-	} while (!(data & MMC_INT_CMD_DONE));
-
-	buf[0] = mmio_read_32(MMC0_RESP0);
-	if ((cmd == 2) || (cmd == 9)) {
-		buf[1] = mmio_read_32(MMC0_RESP1);
-		buf[2] = mmio_read_32(MMC0_RESP2);
-		buf[3] = mmio_read_32(MMC0_RESP3);
-	}
-	return 0;
-}
-
-/* Only print error message if it meets failure? */
-static void mmc0_check_tran_mode(void)
-{
-	unsigned int buf[4];
-	int ret;
-
-	mmio_write_32(MMC0_RINTSTS, ~0);
-
-	while (1) {
-		ret = mmc0_send_cmd(13, EMMC_FIX_RCA << 16, buf);
-		if (ret) {
-			NOTICE("failed on command 13\n");
-			return;
-		}
-		if (((buf[0] >> 9) & 0xf) == 4)
-			return;
-	}
-}
-
-static int mmc0_update_ext_csd(int index, int value)
-{
-	unsigned int arg, data, buf[4];
-	int ret;
-
-	arg = 3 << 24;
-	arg |= (index & 0xff) << 16;
-	arg |= (value & 0xff) << 8;
-	arg |= 1;
-	memset(buf, 0, 4 * sizeof(buf[0]));
-
-	ret = mmc0_send_cmd(6, arg, buf);
-	if (ret) {
-		NOTICE("failed to send command 6\n");
-		return ret;
-	}
-
-	/* wait busy de-assert */
-	while (1) {
-		data = mmio_read_32(MMC0_STATUS);
-		if (!(data & MMC_STS_DATA_BUSY))
-			break;
-	}
-
-	do {
-		ret = mmc0_send_cmd(13, EMMC_FIX_RCA << 16, buf);
-		if (ret) {
-			NOTICE("failed to send command 13\n");
-			return ret;
-		}
-
-		if (buf[0] & MMC_STATUS_SWITCH_ERROR) {
-			NOTICE("maybe switch mmc mode error\n");
-			return -1;
-		}
-	} while (mmc_state(buf[0]) == MMC_STATE_PRG);
-
-	return 0;
-}
-
-#define EXTCSD_BUS_WIDTH		183
-
-static int mmc0_set_clock_and_width(int rate, int width)
-{
-	int ret;
-
-	switch (width) {
-	case 0:
-		mmio_write_32(MMC0_CTYPE, 0);
-		ret = mmc0_update_ext_csd(EXTCSD_BUS_WIDTH, 0);
-		break;
-	case 8:
-		mmio_write_32(MMC0_CTYPE, 1 << 16);
-		ret = mmc0_update_ext_csd(EXTCSD_BUS_WIDTH, 2 + 4);
-		mmio_write_32(MMC0_UHSREG, 1 << 16);
-		break;
-	default:
-		NOTICE("wrong bus width:%d\n", width);
-		return -EINVAL;
-	}
-	if (ret) {
-		NOTICE("return failure on %s, %d\n", __func__, __LINE__);
-		return ret;
-	}
-
-	set_mmc0_clock(rate);
-	return 0;
-}
-
-static int manu_id;
-
-#define EXTCSD_HS_TIMING		185
-
-#ifdef EMMC_READ_EXT_CSD
-static int mmc0_read_ext_csd(unsigned int dst_start);
-#endif
-static int enum_mmc0_card(void)
-{
-	unsigned int buf[4], cid[4];
-	int ret = 0, i, version;
-
-	/* CMD0: reset to IDLE */
-	ret = mmc0_send_cmd(0, 0, buf);
-	if (ret) {
-		NOTICE("failed to send IDLE command\n");
-		return ret;
-	}
-
-	while (1) {
-		udelay(100);
-		/* CMD1: READY */
-		ret = mmc0_send_cmd(1, 0x40ff8000, buf);
-		if (ret) {
-			NOTICE("failed to send READY command\n");
-			return ret;
-		}
-		if (buf[0] & 0x80000000)
-			break;
-	}
-
-	/* CMD2: IDENT */
-	ret = mmc0_send_cmd(2, 0, buf);
-	if (ret) {
-		NOTICE("failed to send IDENT command\n");
-		return ret;
-	}
-	VERBOSE("manuid:");
-	for (i = 0; i < 4; i++) {
-		cid[i] = buf[i];
-		VERBOSE(" 0x%x", cid[i]);
-	}
-	VERBOSE("\n");
-
-	/* CMD3: STBY */
-	ret = mmc0_send_cmd(3, EMMC_FIX_RCA << 16, buf);
-	if (ret) {
-		NOTICE("failed to send STBY command\n");
-		return ret;
-	}
-
-	/* CMD9: get CSD */
-	ret = mmc0_send_cmd(9, EMMC_FIX_RCA << 16, buf);
-	if (ret) {
-		NOTICE("failed to get CSD\n");
-		return ret;
-	}
-	VERBOSE("CSD: %x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3]);
-	version = (buf[3] >> 26) & 0xf;
-	switch (version) {
-	case 0:	/* MMC v1.0-v1.2 */
-	case 1:	/* MMC v1.4 */
-		manu_id = (cid[3] >> 8) & 0xffffff;
-		break;
-	case 2:	/* MMC v2.0-v2.2 */
-	case 3:	/* MMC v3.1-v3.3 */
-	case 4:	/* MMC v4 */
-		manu_id = (cid[3] >> 24) & 0xff;
-		break;
-	default:
-		WARN("wrong mmc version (%d) is specified.\n", version);
-		break;
-	}
-
-	VERBOSE("mmc version:%d\n", version);
-	/* CMD7: TRAN */
-	ret = mmc0_send_cmd(7, EMMC_FIX_RCA << 16, buf);
-	if (ret) {
-		NOTICE("failed to send TRAN command\n");
-		return ret;
-	}
-	mmc0_check_tran_mode();
-
-	mmc0_set_clock_and_width(400000, 0);
-#ifdef EMMC_READ_EXT_CSD
-	mmc0_read_ext_csd(0x50000);
-#endif
-	ret = mmc0_update_ext_csd(EXTCSD_HS_TIMING, 1);
-	if (ret) {
-		NOTICE("alter HS mode fail\n");
-	}
-
-	ret = mmc0_set_clock_and_width(50000000, 8);
-	return ret;
-}
-
-static int enable_mmc0(void)
-{
-	unsigned int data;
-
-	/* reset mmc0 */
-	data = MMC_CTRL_RESET | MMC_FIFO_RESET | MMC_DMA_RESET;
-	mmio_write_32(MMC0_CTRL, data);
-	/* wait until reset operation finished */
-	do {
-		data = mmio_read_32(MMC0_CTRL);
-	} while (data);
-
-	data = MMC_INT_EN | MMC_DMA_EN;
-	mmio_write_32(MMC0_CTRL, data);
-
-	mmio_write_32(MMC0_INTMASK, 0x0);
-	mmio_write_32(MMC0_RINTSTS, ~0);
-	mmio_write_32(MMC0_IDINTEN, ~0);
-	mmio_write_32(MMC0_IDSTS, ~0);
-
-	mmio_write_32(MMC0_BLKSIZ, MMC_BLOCK_SIZE);
-	mmio_write_32(MMC0_BMOD, MMC_IDMAC_SWRESET);
-	do {
-		data = mmio_read_32(MMC0_BMOD);
-	} while (data & MMC_IDMAC_SWRESET);
-
-	data |= MMC_IDMAC_ENABLE | MMC_IDMAC_FB;
-	mmio_write_32(MMC0_BMOD, data);
-
-	data = MMC_DMA_BURST_SIZE(2) | MMC_FIFO_TWMARK(8) | MMC_FIFO_RWMARK(7);
-	mmio_write_32(MMC0_FIFOTH, data);
-	data = MMC_CARD_RD_THR(512) | MMC_CARD_RD_THR_EN;
-	mmio_write_32(MMC0_CARDTHRCTL, data);
-
-	udelay(100);
-	set_mmc0_clock(378000);
-	udelay(100);
-
-	set_mmc0_io();
-	return 0;
-}
-
-#define MMC_BLOCK_SIZE			512
-#define MMC_DMA_MAX_BUFFER_SIZE		(512 * 8)
-
-#ifdef EMMC_READ_EXT_CSD
-static int mmc0_read_ext_csd(unsigned int dst_start)
-{
-	unsigned int blk_cnt, bytes, desc_num, buf[4], data;
-	struct idmac_desc *desc = NULL;
-	int i, ret, last_idx;
-	uintptr_t src_addr, dst_addr = dst_start;
-
-	blk_cnt = 1;
-	bytes = blk_cnt * MMC_BLOCK_SIZE;
-	memset((void *)MMC_DATA_BASE, 0, bytes);
-
-	mmio_write_32(MMC0_BYTCNT, bytes);
-
-	mmio_write_32(MMC0_RINTSTS, ~0);
-
-	desc_num = (bytes + MMC_DMA_MAX_BUFFER_SIZE - 1) /
-		   MMC_DMA_MAX_BUFFER_SIZE;
-
-	desc = (struct idmac_desc *)MMC_DESC_BASE;
-
-	for (i = 0; i < desc_num; i++) {
-		(desc + i)->des0 = IDMAC_DES0_OWN | IDMAC_DES0_CH |
-				   IDMAC_DES0_DIC;
-		(desc + i)->des1 = IDMAC_DES1_BS1(MMC_DMA_MAX_BUFFER_SIZE);
-		/* buffer address */
-		(desc + i)->des2 = MMC_DATA_BASE + MMC_DMA_MAX_BUFFER_SIZE * i;
-		/* next descriptor address */
-		(desc + i)->des3 = MMC_DESC_BASE +
-				   (sizeof(struct idmac_desc) * (i + 1));
-	}
-	/* first descriptor */
-	desc->des0 |= IDMAC_DES0_FS;
-	/* last descriptor */
-	last_idx = desc_num - 1;
-	(desc + last_idx)->des0 |= IDMAC_DES0_LD;
-	(desc + last_idx)->des0 &= ~(IDMAC_DES0_DIC | IDMAC_DES0_CH);
-	(desc + last_idx)->des1 = IDMAC_DES1_BS1(bytes - (last_idx *
-				  MMC_DMA_MAX_BUFFER_SIZE));
-	/* set next descriptor address as 0 */
-	(desc + last_idx)->des3 = 0;
-
-	mmio_write_32(MMC0_DBADDR, MMC_DESC_BASE);
-
-	/* read extended CSD */
-	ret = mmc0_send_cmd(8, EMMC_FIX_RCA << 16, buf);
-	if (ret) {
-		NOTICE("failed to send CMD8\n");
-		mmio_write_32(MMC0_RINTSTS, ~0);
-		return -EFAULT;
-	}
-
-	ret = wait_data_ready();
-	if (ret)
-		return ret;
-
-	if (blk_cnt > 1) {
-		ret = mmc0_send_cmd(12, EMMC_FIX_RCA << 16, buf);
-		if (ret) {
-			NOTICE("failed to send Stop Transmission command\n");
-			return ret;
-		}
-		mmio_write_32(MMC0_RINTSTS, ~0);
-	}
-	src_addr = MMC_DATA_BASE;
-	memcpy((void *)dst_addr, (void *)src_addr, MMC_BLOCK_SIZE);
-
-	return 0;
-}
-#endif
-
-int mmc0_read(unsigned long src_start, size_t src_size,
-		unsigned long dst_start, uint32_t boot_partition)
-{
-	unsigned int src_blk_start = src_start / MMC_BLOCK_SIZE;
-	unsigned int src_blk_cnt, offset, bytes, desc_num, buf[4];
-	struct idmac_desc *desc = NULL;
-	int i, ret, last_idx;
-	uintptr_t src_addr, dst_addr = dst_start;
-
-	if (boot_partition) {
-		/* switch to boot partition 1 */
-		ret = mmc0_update_ext_csd(EXT_CSD_PARTITION_CONFIG,
-					  PART_CFG_BOOT_PARTITION1_ENABLE |
-					  PART_CFG_PARTITION1_ACCESS);
-		if (ret) {
-			NOTICE("fail to switch eMMC boot partition\n");
-			return ret;
-		}
-	}
-	offset = src_start % MMC_BLOCK_SIZE;
-	src_blk_cnt = (src_size + offset + MMC_BLOCK_SIZE - 1) / MMC_BLOCK_SIZE;
-	bytes = src_blk_cnt * MMC_BLOCK_SIZE;
-
-	mmio_write_32(MMC0_BYTCNT, bytes);
-
-	mmio_write_32(MMC0_RINTSTS, ~0);
-
-	desc_num = (bytes + MMC_DMA_MAX_BUFFER_SIZE - 1) /
-		   MMC_DMA_MAX_BUFFER_SIZE;
-
-	desc = (struct idmac_desc *)MMC_DESC_BASE;
-
-	for (i = 0; i < desc_num; i++) {
-		(desc + i)->des0 = IDMAC_DES0_OWN | IDMAC_DES0_CH |
-				   IDMAC_DES0_DIC;
-		(desc + i)->des1 = IDMAC_DES1_BS1(MMC_DMA_MAX_BUFFER_SIZE);
-		/* buffer address */
-		(desc + i)->des2 = MMC_DATA_BASE + MMC_DMA_MAX_BUFFER_SIZE * i;
-		/* next descriptor address */
-		(desc + i)->des3 = MMC_DESC_BASE +
-				   (sizeof(struct idmac_desc) * (i + 1));
-	}
-	/* first descriptor */
-	desc->des0 |= IDMAC_DES0_FS;
-	/* last descriptor */
-	last_idx = desc_num - 1;
-	(desc + last_idx)->des0 |= IDMAC_DES0_LD;
-	(desc + last_idx)->des0 &= ~(IDMAC_DES0_DIC | IDMAC_DES0_CH);
-	(desc + last_idx)->des1 = IDMAC_DES1_BS1(bytes - (last_idx *
-				  MMC_DMA_MAX_BUFFER_SIZE));
-	/* set next descriptor address as 0 */
-	(desc + last_idx)->des3 = 0;
-
-	mmio_write_32(MMC0_DBADDR, MMC_DESC_BASE);
-
-	ret = mmc0_send_cmd(23, src_blk_cnt & 0xffff, buf);
-	if (ret) {
-		NOTICE("failed to send CMD23\n");
-		mmio_write_32(MMC0_RINTSTS, ~0);
-		return -EFAULT;
-	}
-	/* multiple read */
-	ret = mmc0_send_cmd(18, src_blk_start, buf);
-	if (ret) {
-		NOTICE("failed to send CMD18\n");
-		mmio_write_32(MMC0_RINTSTS, ~0);
-		return -EFAULT;
-	}
-
-	ret = wait_data_ready();
-	if (ret)
-		return ret;
-
-	src_addr = MMC_DATA_BASE + offset;
-	memcpy((void *)dst_addr, (void *)src_addr, src_size);
-
-	if (boot_partition) {
-		/* switch back to normal partition */
-		ret = mmc0_update_ext_csd(EXT_CSD_PARTITION_CONFIG,
-					  PART_CFG_BOOT_PARTITION1_ENABLE);
-		if (ret)
-			NOTICE("fail to switch eMMC normal partition\n");
-	}
-	return ret;
-}
-
-static int write_multi_blocks(unsigned int lba, unsigned int count,
-			      unsigned int buffer, unsigned int boot_partition)
-{
-	unsigned int bytes, resp_buf[4], desc_num;
-	struct idmac_desc *desc = NULL;
-	int ret, last_idx, i;
-
-	if (buffer % 4) {
-		NOTICE("invalid buffer address:0x%x\n", buffer);
-		return -EINVAL;
-	}
-	if (boot_partition) {
-		/* switch to boot partition 1 */
-		ret = mmc0_update_ext_csd(EXT_CSD_PARTITION_CONFIG,
-					  PART_CFG_BOOT_PARTITION1_ENABLE |
-					  PART_CFG_PARTITION1_ACCESS);
-		if (ret) {
-			NOTICE("fail to switch eMMC boot partition\n");
-			return ret;
-		}
-	}
-	bytes = MMC_BLOCK_SIZE * count;
-
-	mmio_write_32(MMC0_BYTCNT, bytes);
-	mmio_write_32(MMC0_RINTSTS, ~0);
-
-	desc_num = (bytes + MMC_DMA_MAX_BUFFER_SIZE - 1) /
-		   MMC_DMA_MAX_BUFFER_SIZE;
-
-	desc = (struct idmac_desc *)MMC_DESC_BASE;
-
-	for (i = 0; i < desc_num; i++) {
-		(desc + i)->des0 = IDMAC_DES0_OWN | IDMAC_DES0_CH |
-				   IDMAC_DES0_DIC;
-		(desc + i)->des1 = IDMAC_DES1_BS1(MMC_DMA_MAX_BUFFER_SIZE);
-		/* buffer address */
-		(desc + i)->des2 = buffer + MMC_DMA_MAX_BUFFER_SIZE * i;
-		/* next descriptor address */
-		(desc + i)->des3 = MMC_DESC_BASE +
-				   (sizeof(struct idmac_desc) * (i + 1));
-	}
-	/* first descriptor */
-	desc->des0 |= IDMAC_DES0_FS;
-	/* last descriptor */
-	last_idx = desc_num - 1;
-	(desc + last_idx)->des0 |= IDMAC_DES0_LD;
-	(desc + last_idx)->des0 &= ~(IDMAC_DES0_DIC | IDMAC_DES0_CH);
-	(desc + last_idx)->des1 = IDMAC_DES1_BS1(bytes - (last_idx *
-				  MMC_DMA_MAX_BUFFER_SIZE));
-	/* set next descriptor address as 0 */
-	(desc + last_idx)->des3 = 0;
-
-	mmio_write_32(MMC0_DBADDR, MMC_DESC_BASE);
-
-	ret = mmc0_send_cmd(25, lba, resp_buf);
-	if (ret) {
-		NOTICE("failed to send CMD25\n");
-		mmio_write_32(MMC0_RINTSTS, ~0);
-		return -EFAULT;
-	}
-	ret = wait_data_ready();
-	if (ret)
-		return ret;
-
-	ret = mmc0_send_cmd(12, EMMC_FIX_RCA << 16, resp_buf);
-	if (ret) {
-		NOTICE("failed to send CMD12\n");
-		mmio_write_32(MMC0_RINTSTS, ~0);
-		return -EFAULT;
-	}
-
-	do {
-		ret = mmc0_send_cmd(13, EMMC_FIX_RCA << 16, resp_buf);
-		if (ret) {
-			NOTICE("failed to send command 13\n");
-			return ret;
-		}
-	} while (!(resp_buf[0] & MMC_STATUS_READY_FOR_DATA) ||
-		 (mmc_state(resp_buf[0] != MMC_STATE_TRAN)));
-
-	if (boot_partition) {
-		/* switch back to normal partition */
-		ret = mmc0_update_ext_csd(EXT_CSD_PARTITION_CONFIG,
-					  PART_CFG_BOOT_PARTITION1_ENABLE);
-		if (ret)
-			NOTICE("fail to switch eMMC normal partition\n");
-	}
-	return ret;
-}
-
-int mmc0_write(unsigned long mmc_start, size_t size,
-		unsigned long buffer, uint32_t boot_partition)
-{
-	unsigned int mmc_blk_start = mmc_start / MMC_BLOCK_SIZE;
-	unsigned int mmc_blk_cnt, offset;
-
-	offset = mmc_start % MMC_BLOCK_SIZE;
-	mmc_blk_cnt = (size + offset + MMC_BLOCK_SIZE - 1) / MMC_BLOCK_SIZE;
-
-	return write_multi_blocks(mmc_blk_start, mmc_blk_cnt, buffer,
-				  boot_partition);
-}
-
-int init_mmc(void)
-{
-	int ret;
-
-	enable_mmc0();
-
-	ret = enum_mmc0_card();
-	if (ret)
-		return ret;
-
-	/* set boot mode to 8-bit */
-	mmc0_update_ext_csd(177, 2);
-	/* response to RESET signal */
-	mmc0_update_ext_csd(162, 1);
-	/* set access userdata area */
-	mmc0_update_ext_csd(EXT_CSD_PARTITION_CONFIG,
-			    PART_CFG_BOOT_PARTITION1_ENABLE);
-
-	mmio_write_32(MMC0_RINTSTS, ~0);
-
-	return 0;
-}
diff --git a/plat/hikey/drivers/hi6553.c b/plat/hikey/drivers/hi6553.c
deleted file mode 100644
index 521c59a..0000000
--- a/plat/hikey/drivers/hi6553.c
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <assert.h>
-#include <mmio.h>
-#include <hi6553.h>
-#include <hi6220.h>
-
-unsigned char hi6553_read_8(unsigned int offset)
-{
-	return mmio_read_8(PMUSSI_BASE + (offset << 2));
-}
-
-void hi6553_write_8(unsigned int offset, unsigned int value)
-{
-	mmio_write_8(PMUSSI_BASE + (offset << 2), value);
-}
diff --git a/plat/hikey/drivers/hisi_dvfs.c b/plat/hikey/drivers/hisi_dvfs.c
deleted file mode 100644
index 3fb4a8e..0000000
--- a/plat/hikey/drivers/hisi_dvfs.c
+++ /dev/null
@@ -1,808 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch_helpers.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <console.h>
-#include <debug.h>
-#include <partitions.h>
-#include <platform.h>
-#include <platform_def.h>
-#include <string.h>
-#include <mmio.h>
-#include <hi6220.h>
-#include <hi6553.h>
-
-#define ACPU_FREQ_MAX_NUM  		5
-#define	ACPU_OPP_NUM			7
-
-#define ACPU_VALID_VOLTAGE_MAGIC	(0x5A5AC5C5)
-
-#define ACPU_WAIT_TIMEOUT		(200)
-#define ACPU_WAIT_FOR_WFI_TIMOUT	(2000)
-#define ACPU_DFS_STATE_CNT		(0x10000)
-
-struct acpu_dvfs_sram_stru {
-	unsigned int magic;
-	unsigned int support_freq_num;
-	unsigned int support_freq_max;
-	unsigned int start_prof;
-	unsigned int vol[ACPU_OPP_NUM];
-};
-
-struct acpu_volt_cal_para {
-	unsigned int freq;
-	unsigned int ul_vol;
-	unsigned int dl_vol;
-	unsigned int core_ref_hpm;
-};
-
-struct ddr_volt_cal_para {
-	unsigned int freq;
-	unsigned int ul_vol;
-	unsigned int dl_vol;
-	unsigned int ddr_ref_hpm;
-};
-
-struct acpu_dvfs_opp_para {
-	unsigned int freq;
-	unsigned int acpu_clk_profile0;
-	unsigned int acpu_clk_profile1;
-	unsigned int acpu_vol_profile;
-	unsigned int acpu_pll_freq;
-	unsigned int acpu_pll_frac;
-};
-
-unsigned int efuse_acpu_freq[]= {
-	1200000, 1250000, 1300000, 1350000,
-	1400000, 1450000, 1500000, 1550000,
-	1600000, 1650000, 1700000, 1750000,
-	1800000, 1850000, 1900000, 1950000,
-};
-
-struct acpu_dvfs_opp_para hi6220_acpu_profile[] = {
-	{ 208000,  0x61E5, 0x022, 0x3A, 0x5220102B, 0x05555555 },
-	{ 432000,  0x10A6, 0x121, 0x3A, 0x5120102D, 0x10000005 },
-	{ 729000,  0x2283, 0x100, 0x4A, 0x51101026, 0x10000005 },
-	{ 960000,  0x1211, 0x100, 0x5B, 0x51101032, 0x10000005 },
-	{ 1200000, 0x1211, 0x100, 0x6B, 0x5110207D, 0x10000005 },
-	{ 1400000, 0x1211, 0x100, 0x6B, 0x51101049, 0x10000005 },
-	{ 1500000, 0x1211, 0x100, 0x6B, 0x51101049, 0x10000005 },
-};
-
-struct acpu_dvfs_opp_para *acpu_dvfs_profile = hi6220_acpu_profile;
-struct acpu_dvfs_sram_stru *acpu_dvfs_sram_buf =
-	(struct acpu_dvfs_sram_stru *)MEMORY_AXI_ACPU_FREQ_VOL_ADDR;
-
-static inline void write_reg_mask(uintptr_t addr,
-				  uint32_t val, uint32_t mask)
-{
-	uint32_t reg;
-
-	reg = mmio_read_32(addr);
-	reg = (reg & ~(mask)) | val;
-	mmio_write_32(addr, reg);
-}
-
-static inline uint32_t read_reg_mask(uintptr_t addr,
-				     uint32_t mask, uint32_t offset)
-{
-	uint32_t reg;
-
-	reg = mmio_read_32(addr);
-	reg &= (mask << offset);
-	return (reg >> offset);
-}
-
-static int acpu_dvfs_syspll_cfg(unsigned int prof_id)
-{
-	uint32_t reg0 = 0;
-	uint32_t count = 0;
-	uint32_t clk_div_status = 0;
-
-	/*
-	 * step 1:
-	 *  - ACPUSYSPLLCFG.acpu_subsys_clk_div_sw = 0x3;
-	 *  - ACPUSYSPLLCFG.acpu_syspll_clken_cfg = 0x1;
-	 */
-	write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x3 << 12, 0x3 << 12);
-	write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x1 << 4,  0x1 << 4);
-
-	/*
-	 * step 2:
-	 *  - ACPUSYSPLLCFG.acpu_syspll_div_cfg:
-	 *     208MHz, set to 0x5;
-	 *     500MHz, set to 0x2;
-	 *     other opps set to 0x1
-	 */
-	if (prof_id == 0)
-		write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x5 << 0, 0x7 << 0);
-	else if (prof_id == 1)
-		write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x2 << 0, 0x7 << 0);
-	else
-		write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x1 << 0, 0x7 << 0);
-
-	/*
-	 * step 3:
-	 *  - Polling ACPU_SC_CPU_STAT.clk_div_status_vd == 0x3;
-	 *  - ACPU_SC_VD_CTRL.tune_en_dif = 0
-	 *  - ACPU_SC_VD_CTRL.tune_en_int = 0
-	 *  - PMCTRL_ACPUCLKDIV.acpu_ddr_clk_div_cfg = 0x1
-	 *  - PMCTRL_ACPUPLLSEL.acpu_pllsw_cfg = 0x1
-	 */
-	clk_div_status = 0x3;
-	do {
-		reg0 = read_reg_mask(ACPU_SC_CPU_STAT, 0x3, 20);
-		if ((count++) > ACPU_DFS_STATE_CNT) {
-			ERROR("%s: clk div status timeout!\n", __func__);
-			return -1;
-		}
-	} while(clk_div_status != reg0);
-
-	write_reg_mask(ACPU_SC_VD_CTRL, 0x0, (0x1 << 0) | (0x1 << 11));
-	write_reg_mask(PMCTRL_ACPUCLKDIV, 0x1 << 8, 0x3 << 8);
-	write_reg_mask(PMCTRL_ACPUPLLSEL, 0x1 << 0, 0x1 << 0);
-
-	return 0;
-}
-
-static void acpu_dvfs_clk_div_cfg(unsigned int prof_id,
-				  unsigned int *cpuext_cfg,
-				  unsigned int *acpu_ddr_cfg)
-{
-	if (0 == prof_id) {
-		write_reg_mask(PMCTRL_ACPUCLKDIV,
-			(0x1 << SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_START) |
-			(0x1 << SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_START),
-			(0x3 << SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_START) |
-			(0x3 << SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_START));
-		*cpuext_cfg = 0x1;
-		*acpu_ddr_cfg = 0x1;
-	} else if (1 == prof_id) {
-		write_reg_mask(PMCTRL_ACPUCLKDIV,
-			(0x1 << SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_START) |
-			(0x1 << SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_START),
-			(0x3 << SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_START) |
-			(0x3 << SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_START));
-		*cpuext_cfg = 0x1;
-		*acpu_ddr_cfg = 0x1;
-	} else {
-		/* ddr has not been inited */
-		write_reg_mask(PMCTRL_ACPUCLKDIV,
-			(0x1 << SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_START) |
-			(0x0 << SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_START),
-			(0x3 << SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_START) |
-			(0x3 << SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_START));
-		*cpuext_cfg = 0x1;
-		*acpu_ddr_cfg = 0x0;
-	}
-
-	return;
-}
-
-static int acpu_dvfs_freq_ascend(unsigned int cur_prof, unsigned int tar_prof)
-{
-	unsigned int reg0 = 0;
-	unsigned int reg1 = 0;
-	unsigned int reg2 = 0;
-	unsigned int count = 0;
-	unsigned int cpuext_cfg_val = 0;
-	unsigned int acpu_ddr_cfg_val = 0;
-	int ret = 0;
-
-	/*
-	 * step 1:
-	 *  - PMCTRL_ACPUSYSPLLCFG.acpu_subsys_clk_div_sw = 0x3;
-	 *  - ACPUSYSPLLCFG.acpu_syspll_clken_cfg = 0x1;
-	 *
-	 * step 2:
-	 *  - PMCTRL_ACPUSYSPLLCFG.acpu_syspll_div_cfg = 0x5 (208MHz)
-	 *  - PMCTRL_ACPUSYSPLLCFG.acpu_syspll_div_cfg = 0x2 (500MHz)
-	 *  - PMCTRL_ACPUSYSPLLCFG.acpu_syspll_div_cfg = 0x1 (Other OPPs)
-	 *
-	 * step 3:
-	 *  - ACPU_SC_CPU_STAT.clk_div_status_vd = 0x3;
-	 *  - ACPU_SC_VD_CTRL.tune_en_dif = 0x0;
-	 *  - ACPU_SC_VD_CTRL.tune_en_int = 0x0;
-	 *  - PMCTRL_ACPUCLKDIV.acpu_ddr_clk_div_cfg = 0x1;
-	 *  - PMCTRL_ACPUPLLSEL.acpu_pllsw_cfg = 0x1
-	 */
-	ret = acpu_dvfs_syspll_cfg(cur_prof);
-	if (ret)
-		return -1;
-
-	/*
-	 * step 4:
-	 *  - Polling PMCTRL_ACPUPLLSEL.syspll_sw_stat == 0x1
-	 */
-	count = 0;
-	do {
-		reg0 = read_reg_mask(PMCTRL_ACPUPLLSEL, 0x1,
-			SOC_PMCTRL_ACPUPLLSEL_syspll_sw_stat_START);
-		if ((count++) > ACPU_DFS_STATE_CNT) {
-			ERROR("%s: syspll sw status timeout\n", __func__);
-			return -1;
-		}
-	} while(0x1 != reg0);
-
-	/* Enable VD functionality if > 800MHz */
-	if (acpu_dvfs_profile[tar_prof].freq > 800000) {
-
-		write_reg_mask(ACPU_SC_VD_HPM_CTRL,
-			HPM_OSC_DIV_VAL, HPM_OSC_DIV_MASK);
-
-		/*
-		 * step 5:
-		 *  - ACPU_SC_VD_HPM_CTRL.hpm_dly_exp = 0xC7A;
-		 *  - ACPU_SC_VD_MASK_PATTERN_CTRL[12:0] = 0xCCB;
-		 */
-		write_reg_mask(ACPU_SC_VD_HPM_CTRL,
-			HPM_DLY_EXP_VAL, HPM_DLY_EXP_MASK);
-		write_reg_mask(ACPU_SC_VD_MASK_PATTERN_CTRL,
-			ACPU_SC_VD_MASK_PATTERN_VAL,
-			ACPU_SC_VD_MASK_PATTERN_MASK);
-
-		/*
-		 * step 6:
-		 *  - ACPU_SC_VD_DLY_TABLE0_CTRL = 0x1FFF;
-		 *  - ACPU_SC_VD_DLY_TABLE1_CTRL = 0x1FFFFFF;
-		 *  - ACPU_SC_VD_DLY_TABLE2_CTRL = 0x7FFFFFFF;
-		 *  - ACPU_SC_VD_DLY_FIXED_CTRL  = 0x1;
-		 */
-		mmio_write_32(ACPU_SC_VD_DLY_TABLE0_CTRL, 0x1FFF);
-		mmio_write_32(ACPU_SC_VD_DLY_TABLE1_CTRL, 0x1FFFFFF);
-		mmio_write_32(ACPU_SC_VD_DLY_TABLE2_CTRL, 0x7FFFFFFF);
-		mmio_write_32(ACPU_SC_VD_DLY_FIXED_CTRL, 0x1);
-
-		/*
-		 * step 7:
-		 *  - ACPU_SC_VD_CTRL.shift_table0 = 0x1;
-		 *  - ACPU_SC_VD_CTRL.shift_table1 = 0x3;
-		 *  - ACPU_SC_VD_CTRL.shift_table2 = 0x5;
-		 *  - ACPU_SC_VD_CTRL.shift_table3 = 0x6;
-		 *
-		 * step 8:
-		 *  - ACPU_SC_VD_CTRL.tune = 0x7;
-		 */
-		write_reg_mask(ACPU_SC_VD_CTRL,
-			ACPU_SC_VD_SHIFT_TABLE_TUNE_VAL,
-			ACPU_SC_VD_SHIFT_TABLE_TUNE_MASK);
-	}
-
-	/* step 9: ACPUPLLCTRL.acpupll_en_cfg = 0x0 */
-	write_reg_mask(PMCTRL_ACPUPLLCTRL, 0x0,
-		0x1 << SOC_PMCTRL_ACPUPLLCTRL_acpupll_en_cfg_START);
-
-	/* step 10: set PMCTRL_ACPUPLLFREQ and PMCTRL_ACPUPLLFRAC */
-	mmio_write_32(PMCTRL_ACPUPLLFREQ,
-		acpu_dvfs_profile[tar_prof].acpu_pll_freq);
-	mmio_write_32(PMCTRL_ACPUPLLFRAC,
-		acpu_dvfs_profile[tar_prof].acpu_pll_frac);
-
-	/*
-	 * step 11:
-	 *  - wait for 1us;
-	 *  - PMCTRL_ACPUPLLCTRL.acpupll_en_cfg = 0x1
-	 */
-	count = 0 ;
-	while (count < ACPU_WAIT_TIMEOUT) {
-		count++;
-	}
-	write_reg_mask(PMCTRL_ACPUPLLCTRL,
-		0x1 << SOC_PMCTRL_ACPUPLLCTRL_acpupll_en_cfg_START,
-		0x1 << SOC_PMCTRL_ACPUPLLCTRL_acpupll_en_cfg_START);
-
-	/* step 12: PMCTRL_ACPUVOLPMUADDR = 0x100da */
-	mmio_write_32(PMCTRL_ACPUVOLPMUADDR, 0x100da);
-
-	/*
-	 * step 13:
-	 *  - PMCTRL_ACPUDESTVOL.acpu_dest_vol = 0x13 (208MHz);
-	 *  - PMCTRL_ACPUDESTVOL.acpu_dest_vol = 0x13 (500MHz);
-	 *  - PMCTRL_ACPUDESTVOL.acpu_dest_vol = 0x20 (798MHz);
-	 *  - PMCTRL_ACPUDESTVOL.acpu_dest_vol = 0x3A (1300MHz);
-	 *  - PMCTRL_ACPUDESTVOL.acpu_dest_vol = 0x3A (1500MHz);
-	 */
-	write_reg_mask(PMCTRL_ACPUDESTVOL,
-		acpu_dvfs_profile[tar_prof].acpu_vol_profile,
-		((0x1 << (SOC_PMCTRL_ACPUDESTVOL_acpu_dest_vol_END + 1)) - 1));
-
-	/*
-	 * step 14:
-	 *  - Polling PMCTRL_ACPUDESTVOL.acpu_vol_using == ACPUDESTVOL.acpu_dest_vol
-	 *  - Polling ACPUVOLTIMEOUT.acpu_vol_timeout == 0x1
-	 *  - Config PMCTRL_ACPUCLKDIV.acpu_ddr_clk_div_cfg
-	 *  - Config ACPUCLKDIV.cpuext_clk_div_cfg;
-	 */
-	count = 0;
-	do {
-		reg0 = read_reg_mask(PMCTRL_ACPUDESTVOL, 0x7F,
-			SOC_PMCTRL_ACPUDESTVOL_acpu_dest_vol_START);
-		reg1 = read_reg_mask(PMCTRL_ACPUDESTVOL, 0x7F,
-			SOC_PMCTRL_ACPUDESTVOL_acpu_vol_using_START);
-		reg2 = read_reg_mask(PMCTRL_ACPUVOLTTIMEOUT, 0x1,
-			SOC_PMCTRL_ACPUVOLTIMEOUT_acpu_vol_timeout_START);
-		if ((count++) > ACPU_DFS_STATE_CNT) {
-			ERROR("%s: acpu destvol cfg timeout.\n", __func__);
-			return -1;
-		}
-	} while((reg0 != reg1) || (0x1 != reg2));
-
-	acpu_dvfs_clk_div_cfg(tar_prof, &cpuext_cfg_val, &acpu_ddr_cfg_val);
-
-	/*
-	 * step 15:
-	 *  - Polling PMCTRL_ACPUCLKDIV.cpuext_clk_div_stat;
-	 *  - Polling ACPUCLKDIV.acpu_ddr_clk_div_stat;
-	 *  - ACPUPLLCTRL.acpupll_timeout = 0x1;
-	 *  - PMCTRL_ACPUPLLSEL.acpu_pllsw_cfg = 0x0;
-	 */
-	count = 0;
-	do {
-		reg0 = read_reg_mask(PMCTRL_ACPUCLKDIV, 0x3,
-			SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_stat_START);
-		reg1 = read_reg_mask(PMCTRL_ACPUCLKDIV, 0x3,
-			SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_stat_START);
-		reg2 = read_reg_mask(PMCTRL_ACPUPLLCTRL, 0x1,
-			SOC_PMCTRL_ACPUPLLCTRL_acpupll_timeout_START);
-		if ((count++) > ACPU_DFS_STATE_CNT) {
-			ERROR("%s: acpu clk div cfg timeout.\n", __func__);
-			return -1;
-		}
-	} while((cpuext_cfg_val != reg1) ||
-		(acpu_ddr_cfg_val != reg0) ||
-		(0x1 != reg2));
-
-	write_reg_mask(PMCTRL_ACPUPLLSEL, 0x0,
-		0x1 << SOC_PMCTRL_ACPUPLLSEL_acpu_pllsw_cfg_START);
-
-	/*
-	 * step 16:
-	 *  - Polling PMCTRL_ACPUPLLSEL.acpupll_sw_stat == 0x1;
-	 *  - ACPU_SC_VD_CTRL.force_clk_en = 0x0;
-	 *  - ACPU_SC_VD_CTRL.clk_dis_cnt_en = 0x0;
-	 *  - ACPU_SC_VD_CTRL.calibrate_en_ini = 0x0;
-	 *  - ACPU_SC_VD_CTRL.calibrate_en_dif = 0x0;
-	 *  - ACPU_SC_VD_CTRL.div_en_dif = 0x1;
-	 *  - ACPU_SC_VD_CTRL.tune_en_int = 0x1;
-	 *  - ACPU_SC_VD_CTRL.tune_en_dif = 0x1;
-	 *  - PMCTRL_ACPUSYSPLLCFG.acpu_subsys_clk_div_sw = 0x0;
-	 *  - ACPUSYSPLLCFG.acpu_syspll_clken_cfg = 0x0;
-	 */
-	count = 0;
-	do {
-		reg0 = read_reg_mask(PMCTRL_ACPUPLLSEL, 0x1,
-			SOC_PMCTRL_ACPUPLLSEL_acpu_pllsw_stat_START);
-		if ((count++) > ACPU_DFS_STATE_CNT) {
-			ERROR("%s: acpu pll sw status timeout.\n", __func__);
-			return -1;
-		}
-	} while(0x1 != reg0);
-
-	if (acpu_dvfs_profile[tar_prof].freq > 800000)
-		write_reg_mask(ACPU_SC_VD_CTRL,
-			ACPU_SC_VD_EN_ASIC_VAL, ACPU_SC_VD_EN_MASK);
-
-	write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x0,
-		(0x3 << SOC_PMCTRL_ACPUSYSPLLCFG_acpu_subsys_clk_div_sw_START) |
-		(0x1 << SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_clken_cfg_START));
-
-	return 0;
-}
-
-static int acpu_dvfs_freq_descend(unsigned int cur_prof, unsigned int tar_prof)
-{
-	unsigned int reg0 = 0;
-	unsigned int reg1 = 0;
-	unsigned int reg2 = 0;
-	unsigned int count = 0;
-	unsigned int cpuext_cfg_val = 0;
-	unsigned int acpu_ddr_cfg_val = 0;
-	int ret = 0;
-
-	ret = acpu_dvfs_syspll_cfg(tar_prof);
-	if (ret)
-		return -1;
-
-	/*
-	 * step 4:
-	 *  - Polling PMCTRL_ACPUPLLSEL.syspll_sw_stat == 0x1
-	 */
-	count = 0;
-	do {
-		reg0 = read_reg_mask(PMCTRL_ACPUPLLSEL, 0x1, 2);
-		if ((count++) > ACPU_DFS_STATE_CNT) {
-			ERROR("%s: syspll sw status timeout.\n", __func__);
-			return -1;
-		}
-	} while(0x1 != reg0);
-
-	/*
-	 * Step 5:
-	 *  - PMCTRL_ACPUPLLCTRL.acpupll_en_cfg = 0x0
-	 */
-	write_reg_mask(PMCTRL_ACPUPLLCTRL, 0x0, 0x1 << 0);
-
-	/*
-	 * step 6
-	 *  - Config PMCTRL_ACPUPLLFREQ and ACPUPLLFRAC
-	 */
-	mmio_write_32(PMCTRL_ACPUPLLFREQ, acpu_dvfs_profile[tar_prof].acpu_pll_freq);
-	mmio_write_32(PMCTRL_ACPUPLLFRAC, acpu_dvfs_profile[tar_prof].acpu_pll_frac);
-
-	/*
-	 * step 7:
-	 *  - Wait 1us;
-	 *  - Config PMCTRL_ACPUPLLCTRL.acpupll_en_cfg = 0x1
-	 */
-	count = 0 ;
-	while (count < ACPU_WAIT_TIMEOUT) {
-		count++;
-	}
-
-	write_reg_mask(PMCTRL_ACPUPLLCTRL,
-		0x1 << SOC_PMCTRL_ACPUPLLCTRL_acpupll_en_cfg_START,
-		0x1 << SOC_PMCTRL_ACPUPLLCTRL_acpupll_en_cfg_START);
-
-	/* Enable VD functionality if > 800MHz */
-	if (acpu_dvfs_profile[tar_prof].freq > 800000) {
-
-		write_reg_mask(ACPU_SC_VD_HPM_CTRL,
-			HPM_OSC_DIV_VAL, HPM_OSC_DIV_MASK);
-
-		/*
-		 * step 9:
-		 *  - ACPU_SC_VD_HPM_CTRL.hpm_dly_exp = 0xC7A;
-		 *  - ACPU_SC_VD_MASK_PATTERN_CTRL[12:0] = 0xCCB;
-		 */
-		write_reg_mask(ACPU_SC_VD_HPM_CTRL,
-			HPM_DLY_EXP_VAL, HPM_DLY_EXP_MASK);
-		write_reg_mask(ACPU_SC_VD_MASK_PATTERN_CTRL,
-			ACPU_SC_VD_MASK_PATTERN_VAL,
-			ACPU_SC_VD_MASK_PATTERN_MASK);
-
-		/*
-		 * step 10:
-		 *  - ACPU_SC_VD_DLY_TABLE0_CTRL = 0x1FFF;
-		 *  - ACPU_SC_VD_DLY_TABLE1_CTRL = 0x1FFFFFF;
-		 *  - ACPU_SC_VD_DLY_TABLE2_CTRL = 0x7FFFFFFF;
-		 *  - ACPU_SC_VD_DLY_FIXED_CTRL  = 0x1;
-		 */
-		mmio_write_32(ACPU_SC_VD_DLY_TABLE0_CTRL, 0x1FFF);
-		mmio_write_32(ACPU_SC_VD_DLY_TABLE1_CTRL, 0x1FFFFFF);
-		mmio_write_32(ACPU_SC_VD_DLY_TABLE2_CTRL, 0x7FFFFFFF);
-		mmio_write_32(ACPU_SC_VD_DLY_FIXED_CTRL, 0x1);
-
-		/*
-		 * step 11:
-		 *  - ACPU_SC_VD_CTRL.shift_table0 = 0x1;
-		 *  - ACPU_SC_VD_CTRL.shift_table1 = 0x3;
-		 *  - ACPU_SC_VD_CTRL.shift_table2 = 0x5;
-		 *  - ACPU_SC_VD_CTRL.shift_table3 = 0x6;
-		 *
-		 * step 12:
-		 *  - ACPU_SC_VD_CTRL.tune = 0x7;
-		 */
-		write_reg_mask(ACPU_SC_VD_CTRL,
-			ACPU_SC_VD_SHIFT_TABLE_TUNE_VAL,
-			ACPU_SC_VD_SHIFT_TABLE_TUNE_MASK);
-	}
-
-	/*
-	 * step 13:
-	 *  - Pollig PMCTRL_ACPUPLLCTRL.acpupll_timeout == 0x1;
-	 *  - PMCTRL_ACPUPLLSEL.acpu_pllsw_cfg = 0x0;
-	 */
-	count = 0;
-	do {
-		reg0 = read_reg_mask(PMCTRL_ACPUPLLCTRL, 0x1,
-			SOC_PMCTRL_ACPUPLLCTRL_acpupll_timeout_START);
-		if ((count++) > ACPU_DFS_STATE_CNT) {
-			ERROR("%s: acpupll timeout.\n", __func__);
-			return -1;
-		}
-	} while(0x1 != reg0);
-
-	write_reg_mask(PMCTRL_ACPUPLLSEL, 0x0,
-		0x1 << SOC_PMCTRL_ACPUPLLSEL_acpu_pllsw_cfg_START);
-
-	/*
-	 * step 14:
-	 *  - Polling PMCTRL_ACPUPLLSEL.acpupll_sw_stat == 0x1;
-	 *  - ACPU_SC_VD_CTRL.force_clk_en = 0x0;
-	 *  - ACPU_SC_VD_CTRL.clk_dis_cnt_en = 0x0;
-	 *  - ACPU_SC_VD_CTRL.calibrate_en_ini = 0x0;
-	 *  - ACPU_SC_VD_CTRL.calibrate_en_dif = 0x0;
-	 *  - ACPU_SC_VD_CTRL.div_en_dif = 0x1;
-	 *  - ACPU_SC_VD_CTRL.tune_en_int = 0x1;
-	 *  - ACPU_SC_VD_CTRL.tune_en_dif = 0x1;
-	 */
-	count = 0;
-	do {
-		reg0 = read_reg_mask(PMCTRL_ACPUPLLSEL, 0x1,
-			SOC_PMCTRL_ACPUPLLSEL_acpu_pllsw_stat_START);
-		if ((count++) > ACPU_DFS_STATE_CNT) {
-			ERROR("%s: acpupll sw status timeout.\n", __func__);
-			return -1;
-		}
-	} while(0x1 != reg0);
-
-	if (acpu_dvfs_profile[tar_prof].freq > 800000)
-		write_reg_mask(ACPU_SC_VD_CTRL,
-			ACPU_SC_VD_EN_ASIC_VAL, ACPU_SC_VD_EN_MASK);
-
-	/*
-	 * step 15:
-	 *  - PMCTRL_ACPUSYSPLLCFG.acpu_subsys_clk_div_sw = 0x0;
-	 *  - ACPUSYSPLLCFG.acpu_syspll_clken_cfg = 0x0;
-	 */
-	write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x0,
-		(0x3 << SOC_PMCTRL_ACPUSYSPLLCFG_acpu_subsys_clk_div_sw_START) |
-		(0x1 << SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_clken_cfg_START));
-
-	/*
-	 * step 16:
-	 *  - Polling ACPU_SC_CPU_STAT.clk_div_status_vd == 0x0;
-	 */
-	count = 0;
-	do {
-		reg0 = read_reg_mask(ACPU_SC_CPU_STAT, 0x3,
-			ACPU_SC_CPU_STAT_CLK_DIV_STATUS_VD_SHIFT);
-		if ((count++) > ACPU_DFS_STATE_CNT) {
-			ERROR("%s: clk div status timeout.\n", __func__);
-			return -1;
-		}
-	} while(0x0 != reg0);
-
-	acpu_dvfs_clk_div_cfg(tar_prof, &cpuext_cfg_val, &acpu_ddr_cfg_val);
-
-	/*
-	 * step 17:
-	 *  - Polling PMCTRL_ACPUCLKDIV.cpuext_clk_div_stat;
-	 *  - Polling ACPUCLKDIV.acpu_ddr_clk_div_stat;
-	 *  - PMCTRL_ACPUVOLPMUADDR = 0x1006C;
-	 */
-	count = 0;
-	do {
-		reg0 = read_reg_mask(PMCTRL_ACPUCLKDIV, 0x3,
-			SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_stat_START);
-		reg1 = read_reg_mask(PMCTRL_ACPUCLKDIV, 0x3,
-			SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_stat_START);
-		if ((count++) > ACPU_DFS_STATE_CNT) {
-			ERROR("%s: acpu clk div cfg timeout.\n", __func__);
-			return -1;
-		}
-	} while((cpuext_cfg_val != reg0) || (acpu_ddr_cfg_val != reg1));
-
-	mmio_write_32(PMCTRL_ACPUVOLPMUADDR, 0x100da);
-
-	/*
-	 * step 16:
-	 *  - Polling PMCTRL_ACPUPLLSEL.acpupll_sw_stat == 0x1;
-	 *  - ACPU_SC_VD_CTRL.force_clk_en = 0x0;
-	 *  - ACPU_SC_VD_CTRL.clk_dis_cnt_en = 0x0;
-	 *  - ACPU_SC_VD_CTRL.calibrate_en_ini = 0x0;
-	 *  - ACPU_SC_VD_CTRL.calibrate_en_dif = 0x0;
-	 *  - ACPU_SC_VD_CTRL.div_en_dif = 0x1;
-	 *  - ACPU_SC_VD_CTRL.tune_en_int = 0x1;
-	 *  - ACPU_SC_VD_CTRL.tune_en_dif = 0x1;
-	 *  - PMCTRL_ACPUSYSPLLCFG.acpu_subsys_clk_div_sw = 0x0;
-	 *  - ACPUSYSPLLCFG.acpu_syspll_clken_cfg = 0x0;
-	 */
-	write_reg_mask(PMCTRL_ACPUDESTVOL,
-		acpu_dvfs_profile[tar_prof].acpu_vol_profile,
-		((0x1 << (SOC_PMCTRL_ACPUDESTVOL_acpu_dest_vol_END + 1)) - 1));
-
-	/*
-	 * step 19:
-	 *  - Polling PMCTRL_ACPUDESTVOL.acpu_vol_using == ACPUDESTVOL.acpu_dest_vol
-	 *  - ACPUVOLTIMEOUT.acpu_vol_timeout = 0x1;
-	 */
-	count = 0;
-	do {
-		reg0 = read_reg_mask(PMCTRL_ACPUDESTVOL, 0x7F,
-			SOC_PMCTRL_ACPUDESTVOL_acpu_dest_vol_START);
-		reg1 = read_reg_mask(PMCTRL_ACPUDESTVOL, 0x7F,
-			SOC_PMCTRL_ACPUDESTVOL_acpu_vol_using_START);
-		reg2 = read_reg_mask(PMCTRL_ACPUVOLTTIMEOUT, 0x1,
-			SOC_PMCTRL_ACPUVOLTIMEOUT_acpu_vol_timeout_START);
-		if ((count++) > ACPU_DFS_STATE_CNT) {
-			ERROR("%s: acpu destvol cfg timeout.\n", __func__);
-			return -1;
-		}
-	} while((reg0 != reg1) || (0x1 != reg2));
-
-	return 0;
-}
-
-int acpu_dvfs_target(unsigned int curr_prof, unsigned int target_prof)
-{
-	int ret = 0;
-
-	if (curr_prof == target_prof) {
-		INFO("%s: target_prof is equal curr_prof: is %d!\n",
-			__func__, curr_prof);
-		return 0;
-	}
-
-	if ((curr_prof >= ACPU_FREQ_MAX_NUM) ||
-	    (target_prof >= ACPU_FREQ_MAX_NUM)) {
-		INFO("%s: invalid parameter %d %d\n",
-			__func__, curr_prof, target_prof);
-		return -1;
-	}
-
-	if (target_prof > acpu_dvfs_sram_buf->support_freq_num)
-		target_prof = acpu_dvfs_sram_buf->support_freq_num;
-
-	if (target_prof < curr_prof)
-		ret = acpu_dvfs_freq_descend(curr_prof, target_prof);
-	else if (target_prof > curr_prof)
-		ret = acpu_dvfs_freq_ascend(curr_prof, target_prof);
-
-	if (ret) {
-		ERROR("%s: acpu_dvfs_target failed!\n");
-		return -1;
-	}
-
-	/* Complete acpu dvfs setting and set magic number */
-	acpu_dvfs_sram_buf->start_prof = target_prof;
-	acpu_dvfs_sram_buf->magic = ACPU_VALID_VOLTAGE_MAGIC;
-
-	mmio_write_32(DDR_DFS_FREQ_ADDR, 800000);
-	return 0;
-}
-
-static int acpu_dvfs_set_freq(void)
-{
-	unsigned int i;
-	unsigned int curr_prof;
-	unsigned int target_prof;
-	unsigned int max_freq = 0;
-
-	max_freq = acpu_dvfs_sram_buf->support_freq_max;
-
-	for (i = 0; i < acpu_dvfs_sram_buf->support_freq_num; i++) {
-
-		if (max_freq == hi6220_acpu_profile[i].freq) {
-			target_prof = i;
-			break;
-		}
-	}
-
-	if (i == acpu_dvfs_sram_buf->support_freq_num) {
-		ERROR("%s: cannot found max freq profile\n", __func__);
-		return -1;
-	}
-
-	curr_prof = 0;
-	target_prof = i;
-
-	/* if max freq is 208MHz, do nothing */
-	if (curr_prof == target_prof)
-		return 0;
-
-	if (acpu_dvfs_target(curr_prof, target_prof)) {
-		ERROR("%s: set acpu freq failed!", __func__);
-		return -1;
-	}
-
-	INFO("%s: support freq num is %d\n",
-		__func__, acpu_dvfs_sram_buf->support_freq_num);
-	INFO("%s: start prof is 0x%x\n",
-		__func__,  acpu_dvfs_sram_buf->start_prof);
-	INFO("%s: magic is 0x%x\n",
-		__func__, acpu_dvfs_sram_buf->magic);
-	INFO("%s: voltage:\n", __func__);
-	for (i = 0; i < acpu_dvfs_sram_buf->support_freq_num; i++)
-		INFO("  - %d: 0x%x\n", i, acpu_dvfs_sram_buf->vol[i]);
-
-	NOTICE("%s: set acpu freq success!", __func__);
-        return 0;
-}
-
-struct acpu_dvfs_volt_setting
-{
-	unsigned int magic;
-	unsigned int support_freq_num;
-	unsigned int support_freq_max;
-	unsigned int start_prof;
-	unsigned int vol[7];
-	unsigned int hmp_dly_threshold[7];
-};
-
-static void acpu_dvfs_volt_init(void)
-{
-	struct acpu_dvfs_volt_setting *volt;
-
-	/*
-	 * - set default voltage;
-	 * - set pmu address;
-	 * - set voltage up and down step;
-	 * - set voltage stable time;
-	 */
-	mmio_write_32(PMCTRL_ACPUDFTVOL, 0x4a);
-	mmio_write_32(PMCTRL_ACPUVOLPMUADDR, 0xda);
-	mmio_write_32(PMCTRL_ACPUVOLUPSTEP, 0x1);
-	mmio_write_32(PMCTRL_ACPUVOLDNSTEP, 0x1);
-	mmio_write_32(PMCTRL_ACPUPMUVOLUPTIME, 0x60);
-	mmio_write_32(PMCTRL_ACPUPMUVOLDNTIME, 0x60);
-	mmio_write_32(PMCTRL_ACPUCLKOFFCFG, 0x1000);
-
-	volt= (void *)MEMORY_AXI_ACPU_FREQ_VOL_ADDR;
-	volt->magic = 0x5a5ac5c5;
-	volt->support_freq_num = 5;
-	volt->support_freq_max = 1200000;
-	volt->start_prof = 4;
-	volt->vol[0] = 0x49;
-	volt->vol[1] = 0x49;
-	volt->vol[2] = 0x50;
-	volt->vol[3] = 0x60;
-	volt->vol[4] = 0x78;
-	volt->vol[5] = 0x78;
-	volt->vol[6] = 0x78;
-
-	volt->hmp_dly_threshold[0] = 0x0;
-	volt->hmp_dly_threshold[1] = 0x0;
-	volt->hmp_dly_threshold[2] = 0x0;
-	volt->hmp_dly_threshold[3] = 0x0e8b0e45;
-	volt->hmp_dly_threshold[4] = 0x10691023;
-	volt->hmp_dly_threshold[5] = 0x10691023;
-	volt->hmp_dly_threshold[6] = 0x10691023;
-
-	INFO("%s: success!\n", __func__);
-}
-
-void init_acpu_dvfs(void)
-{
-	unsigned int i = 0;
-
-	INFO("%s: pmic version %d\n", __func__, hi6553_read_8(VERSION_REG));
-
-	/* init parameters */
-	mmio_write_32(ACPU_CHIP_MAX_FREQ, efuse_acpu_freq[8]);
-	INFO("%s: ACPU_CHIP_MAX_FREQ=0x%x.\n",
-		__func__, mmio_read_32(ACPU_CHIP_MAX_FREQ));
-
-        /* set maximum support frequency to 1.2GHz */
-        for(i = 0; i < ACPU_FREQ_MAX_NUM; i++)
-		acpu_dvfs_sram_buf->vol[i] = hi6220_acpu_profile[i].acpu_vol_profile;
-
-        acpu_dvfs_sram_buf->support_freq_num = ACPU_FREQ_MAX_NUM;
-        acpu_dvfs_sram_buf->support_freq_max = 1200000;
-
-	/* init acpu dvfs */
-	acpu_dvfs_volt_init();
-	acpu_dvfs_set_freq();
-
-	return;
-}
diff --git a/plat/hikey/drivers/hisi_ipc.c b/plat/hikey/drivers/hisi_ipc.c
deleted file mode 100644
index 393982c..0000000
--- a/plat/hikey/drivers/hisi_ipc.c
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <hisi_ipc.h>
-#include <hisi_sram_map.h>
-#include <mmio.h>
-#include <platform_def.h>
-#include <stdio.h>
-#include <stdarg.h>
-#include <string.h>
-
-#include <hi6220_regs_acpu.h>
-
-#define BIT(x)	(0x1 << (x))
-
-static int  _ipc_init = 0;
-
-static unsigned int cpu_ipc_num[PLATFORM_CLUSTER_COUNT][PLATFORM_CORE_COUNT_PER_CLUSTER] = {
-	{
-		HISI_IPC_MCU_INT_SRC_ACPU0_PD,
-		HISI_IPC_MCU_INT_SRC_ACPU1_PD,
-		HISI_IPC_MCU_INT_SRC_ACPU2_PD,
-		HISI_IPC_MCU_INT_SRC_ACPU3_PD,
-	},
-	{
-		HISI_IPC_MCU_INT_SRC_ACPU4_PD,
-		HISI_IPC_MCU_INT_SRC_ACPU5_PD,
-		HISI_IPC_MCU_INT_SRC_ACPU6_PD,
-		HISI_IPC_MCU_INT_SRC_ACPU7_PD,
-	}
-};
-
-int hisi_cpus_pd_in_cluster_besides_curr(unsigned int cpu,
-					 unsigned int cluster)
-{
-	unsigned int val = 0, cpu_val = 0;
-	int i;
-
-	val = mmio_read_32(ACPU_CORE_POWERDOWN_FLAGS_ADDR);
-	val = val >> (cluster * 16);
-
-	for (i = 0; i < PLATFORM_CORE_COUNT_PER_CLUSTER; i++) {
-
-		if (cpu == i)
-			continue;
-
-		cpu_val = (val >> (i * 4)) & 0xF;
-		if (cpu_val == 0x8)
-			return 0;
-        }
-
-	return 1;
-}
-
-int hisi_cpus_powered_off_besides_curr(unsigned int cpu)
-{
-	unsigned int val;
-
-	val = mmio_read_32(ACPU_CORE_POWERDOWN_FLAGS_ADDR);
-	return (val == (0x8 << (cpu * 4)));
-}
-
-static void hisi_ipc_send(unsigned int ipc_num)
-{
-	if (!_ipc_init) {
-		printf("error ipc base is null!!!\n");
-		return;
-	}
-
-	mmio_write_32(HISI_IPC_CPU_RAW_INT_ADDR, 1 << ipc_num);
-}
-
-void hisi_ipc_spin_lock(unsigned int signal)
-{
-	unsigned int hs_ctrl;
-
-	if (signal >= HISI_IPC_INT_SRC_NUM)
-		return;
-
-	do {
-		hs_ctrl = mmio_read_32(HISI_IPC_ACPU_CTRL(signal));
-	} while (hs_ctrl);
-}
-
-void hisi_ipc_spin_unlock(unsigned int signal)
-{
-	if (signal >= HISI_IPC_INT_SRC_NUM)
-		return;
-
-	mmio_write_32(HISI_IPC_ACPU_CTRL(signal), 0);
-}
-
-void hisi_ipc_cpu_on_off(unsigned int cpu, unsigned int cluster,
-			 unsigned int mode)
-{
-	unsigned int val = 0;
-	unsigned int offset;
-
-	if (mode == HISI_IPC_PM_ON)
-		offset = cluster * 16 + cpu * 4;
-	else
-		offset = cluster * 16 + cpu * 4 + 1;
-
-	hisi_ipc_spin_lock(HISI_IPC_SEM_CPUIDLE);
-	val = mmio_read_32(ACPU_CORE_POWERDOWN_FLAGS_ADDR);
-	val |= (0x01 << offset);
-	mmio_write_32(ACPU_CORE_POWERDOWN_FLAGS_ADDR, val);
-	hisi_ipc_spin_unlock(HISI_IPC_SEM_CPUIDLE);
-
-	hisi_ipc_send(cpu_ipc_num[cluster][cpu]);
-}
-
-void hisi_ipc_cpu_on(unsigned int cpu, unsigned int cluster)
-{
-	unsigned int data, expected;
-
-	hisi_ipc_cpu_on_off(cpu, cluster, HISI_IPC_PM_ON);
-
-	/* Enable debug module */
-	data = mmio_read_32(ACPU_SC_PDBGUP_MBIST);
-	if (cluster)
-		expected = 1 << (cpu + PDBGUP_CLUSTER1_SHIFT);
-	else
-		expected = 1 << cpu;
-	mmio_write_32(ACPU_SC_PDBGUP_MBIST, data | expected);
-	do {
-		/* RAW barrier */
-		data = mmio_read_32(ACPU_SC_PDBGUP_MBIST);
-	} while (!(data & expected));
-}
-
-void hisi_ipc_cpu_off(unsigned int cpu, unsigned int cluster)
-{
-	hisi_ipc_cpu_on_off(cpu, cluster, HISI_IPC_PM_OFF);
-}
-
-void hisi_ipc_cluster_on_off(unsigned int cpu, unsigned int cluster,
-			     unsigned int mode)
-{
-	unsigned int val = 0;
-	unsigned int offset;
-
-	if (mode == HISI_IPC_PM_ON)
-		offset = cluster * 4;
-	else
-		offset = cluster * 4 + 1;
-
-	hisi_ipc_spin_lock(HISI_IPC_SEM_CPUIDLE);
-	val = mmio_read_32(ACPU_CLUSTER_POWERDOWN_FLAGS_ADDR);
-	val |= (0x01 << offset);
-	mmio_write_32(ACPU_CLUSTER_POWERDOWN_FLAGS_ADDR, val);
-	hisi_ipc_spin_unlock(HISI_IPC_SEM_CPUIDLE);
-
-	hisi_ipc_send(cpu_ipc_num[cluster][cpu]);
-}
-
-void hisi_ipc_cluster_on(unsigned int cpu, unsigned int cluster)
-{
-	hisi_ipc_cluster_on_off(cpu, cluster, HISI_IPC_PM_ON);
-}
-
-void hisi_ipc_cluster_off(unsigned int cpu, unsigned int cluster)
-{
-	hisi_ipc_cluster_on_off(cpu, cluster, HISI_IPC_PM_OFF);
-}
-
-void hisi_ipc_cpu_suspend(unsigned int cpu, unsigned int cluster)
-{
-	unsigned int val = 0;
-	unsigned int offset;
-
-	offset = cluster * 16 + cpu * 4 + 2;
-
-	hisi_ipc_spin_lock(HISI_IPC_SEM_CPUIDLE);
-	val = mmio_read_32(ACPU_CORE_POWERDOWN_FLAGS_ADDR);
-	val |= (0x01 << offset);
-	mmio_write_32(ACPU_CORE_POWERDOWN_FLAGS_ADDR, val);
-	hisi_ipc_spin_unlock(HISI_IPC_SEM_CPUIDLE);
-
-	hisi_ipc_send(cpu_ipc_num[cluster][cpu]);
-}
-
-void hisi_ipc_cluster_suspend(unsigned int cpu, unsigned int cluster)
-{
-	unsigned int val;
-	unsigned int offset;
-
-	offset = cluster * 4 + 1;
-
-	hisi_ipc_spin_lock(HISI_IPC_SEM_CPUIDLE);
-	if (hisi_cpus_pd_in_cluster_besides_curr(cpu, cluster)) {
-		val = mmio_read_32(ACPU_CLUSTER_POWERDOWN_FLAGS_ADDR);
-		val |= (0x01 << offset);
-		mmio_write_32(ACPU_CLUSTER_POWERDOWN_FLAGS_ADDR, val);
-	}
-	hisi_ipc_spin_unlock(HISI_IPC_SEM_CPUIDLE);
-
-	hisi_ipc_send(cpu_ipc_num[cluster][cpu]);
-}
-
-void hisi_ipc_psci_system_off(void)
-{
-	hisi_ipc_send(HISI_IPC_MCU_INT_SRC_ACPU_PD);
-}
-
-int hisi_ipc_init(void)
-{
-	_ipc_init = 1;
-
-	mmio_write_32(ACPU_CORE_POWERDOWN_FLAGS_ADDR, 0x8);
-	mmio_write_32(ACPU_CLUSTER_POWERDOWN_FLAGS_ADDR, 0x8);
-	return 0;
-}
diff --git a/plat/hikey/drivers/hisi_mcu.c b/plat/hikey/drivers/hisi_mcu.c
deleted file mode 100644
index 50d5f54..0000000
--- a/plat/hikey/drivers/hisi_mcu.c
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch_helpers.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <console.h>
-#include <debug.h>
-#include <partitions.h>
-#include <platform.h>
-#include <platform_def.h>
-#include <string.h>
-#include <mmio.h>
-#include <hi6220.h>
-
-#define MCU_SECTION_MAX		30
-
-enum MCU_IMAGE_SEC_TYPE_ENUM {
-	MCU_IMAGE_SEC_TYPE_TEXT = 0,	/* text section */
-	MCU_IMAGE_SEC_TYPE_DATA,	/* data section */
-	MCU_IMAGE_SEC_TYPE_BUTT
-};
-
-enum MCU_IMAGE_SEC_LOAD_ENUM {
-	MCU_IMAGE_SEC_LOAD_STATIC = 0,
-	MCU_IMAGE_SEC_LOAD_DYNAMIC,
-	MCU_IMAGE_SEC_LOAD_BUFFER,
-	MCU_IMAGE_SEC_LOAD_MODEM_ENTRY,
-	MCU_IMAGE_SEC_LOAD_BUTT
-};
-
-struct mcu_image_sec {
-	unsigned short serial;
-	char type;
-	char load_attr;
-	uint32_t src_offset;		/* offset in image */
-	uint32_t dst_offset;		/* offset in memory */
-	uint32_t size;
-};
-
-struct mcu_image_head {
-	char time_stamp[24];
-	uint32_t image_size;
-	uint32_t secs_num;
-	struct mcu_image_sec secs[MCU_SECTION_MAX];
-};
-
-#define SOC_SRAM_M3_BASE_ADDR		(0xF6000000)
-
-#define MCU_SRAM_SIZE			(0x0000C000)
-#define MCU_CACHE_SIZE			(0x00004000)
-#define MCU_CODE_SIZE			(MCU_SRAM_SIZE - MCU_CACHE_SIZE)
-
-#define MCU_SYS_MEM_ADDR		(0x05E00000)
-#define MCU_SYS_MEM_SIZE		(0x00100000)
-
-#if 0
-static uint32_t ap2mcu_addr(uint32_t ap_addr)
-{
-	if (ap_addr >= SOC_SRAM_M3_BASE_ADDR &&
-	    ap_addr < SOC_SRAM_M3_BASE_ADDR + MCU_SRAM_SIZE)
-		return ap_addr - SOC_SRAM_M3_BASE_ADDR;
-	else if (ap_addr >= MCU_SYS_MEM_ADDR &&
-		 ap_addr < MCU_SYS_MEM_ADDR + MCU_SYS_MEM_SIZE )
-		return ap_addr - MCU_SYS_MEM_ADDR + MCU_SRAM_SIZE;
-	else
-		return ap_addr;
-}
-#endif
-
-static uint32_t mcu2ap_addr(uint32_t mcu_addr)
-{
-	if (mcu_addr < MCU_CODE_SIZE)
-		return (mcu_addr + SOC_SRAM_M3_BASE_ADDR);
-	else if ((mcu_addr >= MCU_SRAM_SIZE) &&
-		 (mcu_addr < MCU_SRAM_SIZE + MCU_SYS_MEM_SIZE))
-		return mcu_addr - MCU_SRAM_SIZE + MCU_SYS_MEM_ADDR;
-	else
-		return mcu_addr;
-}
-
-static int is_binary_header_invalid(struct mcu_image_head *head,
-				    unsigned length)
-{
-	/* invalid cases */
-	if ((head->image_size == 0) ||
-	    (head->image_size > length) ||
-	    (head->secs_num > MCU_SECTION_MAX) ||
-	    (head->secs_num == 0))
-		return 1;
-
-        return 0;
-}
-
-static int is_binary_section_invalid(struct mcu_image_sec *sec,
-				     struct mcu_image_head *head)
-{
-	unsigned long ap_dst_offset = 0;
-
-	if ((sec->serial >= head->secs_num) ||
-	    (sec->src_offset + sec->size > head->image_size))
-		return 1;
-
-	if ((sec->type >= MCU_IMAGE_SEC_TYPE_BUTT) ||
-	    (sec->load_attr >= MCU_IMAGE_SEC_LOAD_BUTT))
-		return 1;
-
-        ap_dst_offset = mcu2ap_addr(sec->dst_offset);
-        if ((ap_dst_offset >= SOC_SRAM_M3_BASE_ADDR) &&
-	    (ap_dst_offset < SOC_SRAM_M3_BASE_ADDR + 0x20000 - sec->size))
-		return 0;
-        else if ((ap_dst_offset >= MCU_SYS_MEM_ADDR) &&
-		 (ap_dst_offset < MCU_SYS_MEM_ADDR + MCU_SYS_MEM_SIZE - sec->size))
-		return 0;
-        else if ((ap_dst_offset >= 0xfff8e000) &&
-		 (ap_dst_offset < 0xfff91c00 - sec->size))
-		return 0;
-
-	ERROR("%s: mcu destination address invalid.\n", __func__);
-	ERROR("%s: number=%d, dst offset=%d size=%d\n",
-		__func__, sec->serial, sec->dst_offset, sec->size);
-	return 1;
-}
-
-void hisi_mcu_enable_sram(void)
-{
-	mmio_write_32(AO_SC_PERIPH_CLKEN4,
-		      AO_SC_PERIPH_CLKEN4_HCLK_IPC_S |
-		      AO_SC_PERIPH_CLKEN4_HCLK_IPC_NS);
-
-	/* set register to enable dvfs which is used by mcu */
-	mmio_write_32(PERI_SC_RESERVED8_ADDR, 0x0A001022);
-
-	/* mcu mem is powered on, need de-assert reset */
-	mmio_write_32(AO_SC_PERIPH_RSTDIS4,
-		      AO_SC_PERIPH_RSTDIS4_RESET_MCU_ECTR_N);
-
-	/* enable mcu hclk */
-	mmio_write_32(AO_SC_PERIPH_CLKEN4,
-		      AO_SC_PERIPH_CLKEN4_HCLK_MCU |
-		      AO_SC_PERIPH_CLKEN4_CLK_MCU_DAP);
-}
-
-void hisi_mcu_start_run(void)
-{
-	unsigned int val;
-
-#if 0
-        /* set mcu's self loop instruction 0xE7FE at entry point */
-        val = mmio_read_32((SOC_SRAM_M3_BASE_ADDR + 0x200));
-        val &= 0xFFFF0000;
-        val |= 0xE7FE;
-        mmio_write_32((SOC_SRAM_M3_BASE_ADDR + 0x200), val);
-#endif
-
-	/* set mcu ddr remap configuration */
-	mmio_write_32(AO_SC_MCU_SUBSYS_CTRL2, MCU_SYS_MEM_ADDR);
-
-	/* de-assert reset for mcu and to run */
-	mmio_write_32(AO_SC_PERIPH_RSTDIS4,
-		AO_SC_PERIPH_RSTDIS4_RESET_MCU_ECTR_N |
-		AO_SC_PERIPH_RSTDIS4_RESET_MCU_SYS_N |
-		AO_SC_PERIPH_RSTDIS4_RESET_MCU_POR_N |
-		AO_SC_PERIPH_RSTDIS4_RESET_MCU_DAP_N);
-
-	val = mmio_read_32(AO_SC_SYS_CTRL2);
-	mmio_write_32(AO_SC_SYS_CTRL2,
-		val | AO_SC_SYS_CTRL2_GLB_SRST_STAT_CLEAR);
-
-	INFO("%s: AO_SC_SYS_CTRL2=%x\n", __func__,
-		mmio_read_32(AO_SC_SYS_CTRL2));
-}
-
-int hisi_mcu_load_image(uintptr_t image_base, uint32_t image_size)
-{
-	unsigned int i;
-	struct mcu_image_head *head;
-	char *buf;
-
-	head = (struct mcu_image_head *)image_base;
-	if (is_binary_header_invalid(head, image_size)) {
-		ERROR("Invalid %s image header.\n", head->time_stamp);
-		return -1;
-	}
-
-	buf = (char *)head;
-	for (i = 0; i < head->secs_num; i++) {
-
-		int *src, *dst;
-
-		/* check the sections */
-		if (is_binary_section_invalid(&head->secs[i], head)) {
-			ERROR("Invalid mcu section.\n");
-			return -1;
-		}
-
-		/* check if the section is static-loaded */
-		if (head->secs[i].load_attr != MCU_IMAGE_SEC_LOAD_STATIC)
-			continue;
-
-		/* copy the sections */
-		src = (int *)(intptr_t)(buf + head->secs[i].src_offset);
-		dst = (int *)(intptr_t)mcu2ap_addr(head->secs[i].dst_offset);
-
-		memcpy((void *)dst, (void *)src, head->secs[i].size);
-
-		INFO("%s: mcu sections %d:\n", __func__, i);
-	        INFO("%s:  src  = 0x%x\n", __func__, src);
-	        INFO("%s:  dst  = 0x%x\n", __func__, dst);
-		INFO("%s:  size = %d\n", __func__, head->secs[i].size);
-
-		INFO("%s:  [SRC 0x%x] 0x%x 0x%x 0x%x 0x%x\n", __func__,
-			src, src[0], src[1], src[2], src[3]);
-		INFO("%s:  [DST 0x%x] 0x%x 0x%x 0x%x 0x%x\n", __func__,
-			dst, dst[0], dst[1], dst[2], dst[3]);
-	}
-
-	return 0;
-}
diff --git a/plat/hikey/drivers/hisi_pwrc.c b/plat/hikey/drivers/hisi_pwrc.c
deleted file mode 100644
index c1d9e47..0000000
--- a/plat/hikey/drivers/hisi_pwrc.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <bakery_lock.h>
-#include <mmio.h>
-#include <hisi_ipc.h>
-#include <hisi_pwrc.h>
-#include <hisi_sram_map.h>
-#include <hi6220_regs_acpu.h>
-#include <stdio.h>
-#include <stdarg.h>
-#include <string.h>
-#include <platform_def.h>
-
-#define CLUSTER_CORE_COUNT		(4)
-#define CLUSTER_CORE_MASK		((1 << CLUSTER_CORE_COUNT) - 1)
-
-#define BIT(x) (0x1 << (x))
-
-void hisi_pwrc_set_core_bx_addr(unsigned int core, unsigned int cluster,
-				uintptr_t entry_point)
-{
-	uintptr_t *core_entry = (uintptr_t *)PWRCTRL_ACPU_ASM_D_ARM_PARA_AD;
-	unsigned int i;
-
-	if (!core_entry) {
-		printf("%s: core entry point is null!\n", __func__);
-		return;
-	}
-
-	i = cluster * CLUSTER_CORE_COUNT + core;
-	mmio_write_64((uintptr_t)(core_entry + i), entry_point);
-}
-
-void hisi_pwrc_set_cluster_wfi(unsigned int cluster)
-{
-	unsigned int reg = 0;
-
-	if (cluster == 0) {
-		reg = mmio_read_32(ACPU_CTRL_BASE + 0x0E4);
-		reg |= BIT(0);
-		mmio_write_32(ACPU_CTRL_BASE + 0x0E4, reg);
-	} else if (cluster == 1) {
-		reg = mmio_read_32(ACPU_CTRL_BASE + 0x0E4);
-		reg |= BIT(16);
-		mmio_write_32(ACPU_CTRL_BASE + 0x0E4, reg);
-	}
-}
-
-int hisi_pwrc_setup(void)
-{
-	unsigned int reg;
-	extern char pm_asm_code[], pm_asm_code_end[];
-	extern char v7_asm[], v7_asm_end[];
-
-	mmio_write_32(ACPU_SC_CPUx_RVBARADDR(0), PWRCTRL_ACPU_ASM_CODE_BASE >> 2);
-	mmio_write_32(ACPU_SC_CPUx_RVBARADDR(1), PWRCTRL_ACPU_ASM_CODE_BASE >> 2);
-	mmio_write_32(ACPU_SC_CPUx_RVBARADDR(2), PWRCTRL_ACPU_ASM_CODE_BASE >> 2);
-	mmio_write_32(ACPU_SC_CPUx_RVBARADDR(3), PWRCTRL_ACPU_ASM_CODE_BASE >> 2);
-	mmio_write_32(ACPU_SC_CPUx_RVBARADDR(4), PWRCTRL_ACPU_ASM_CODE_BASE >> 2);
-	mmio_write_32(ACPU_SC_CPUx_RVBARADDR(5), PWRCTRL_ACPU_ASM_CODE_BASE >> 2);
-	mmio_write_32(ACPU_SC_CPUx_RVBARADDR(6), PWRCTRL_ACPU_ASM_CODE_BASE >> 2);
-	mmio_write_32(ACPU_SC_CPUx_RVBARADDR(7), PWRCTRL_ACPU_ASM_CODE_BASE >> 2);
-
-	memset((void *)PWRCTRL_ACPU_ASM_SPACE_ADDR, 0, 0x400);
-	memcpy((void *)PWRCTRL_ACPU_ASM_SPACE_ADDR, (void *)v7_asm,
-	       v7_asm_end - v7_asm);
-
-	memcpy((void *)PWRCTRL_ACPU_ASM_CODE_BASE, (void *)pm_asm_code,
-	       pm_asm_code_end - pm_asm_code);
-
-	reg = mmio_read_32(0xF7800000 + 0x004);
-
-	/* Remap SRAM address */
-	reg |= BIT(0x1) | BIT(17);
-
-	/* Enable reset signal for watchdog */
-	reg |= BIT(0x0) | BIT(16);
-
-	mmio_write_32(0xF7800000 + 0x004, reg);
-
-	return 0;
-}
diff --git a/plat/hikey/drivers/hisi_pwrc_sram.S b/plat/hikey/drivers/hisi_pwrc_sram.S
deleted file mode 100644
index ae8eec4..0000000
--- a/plat/hikey/drivers/hisi_pwrc_sram.S
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-#include <hisi_sram_map.h>
-
-	.global pm_asm_code
-	.global pm_asm_code_end
-	.global v7_asm
-	.global v7_asm_end
-
-	.align	3
-func pm_asm_code
-	mov	x0, 0
-	msr	oslar_el1, x0
-
-	mrs	x0, s3_1_c15_c2_0
-	bic	x0, x0, #0x1E000000
- 	orr	x0, x0, #0x180000
-	orr	x0, x0, #0xe000
-	msr	s3_1_c15_c2_0, x0
-
-	mrs	x3, actlr_el3
-        orr	x3, x3, #(0x1<<5)
-        msr	actlr_el3, x3
-
-        mrs	x3, actlr_el2
-        orr	x3, x3, #(0x1<<5)
-        msr	actlr_el2, x3
-
-	ldr	x3, =PWRCTRL_ACPU_ASM_D_ARM_PARA_AD
-	mrs	x0, mpidr_el1
-	and	x1, x0, #MPIDR_CPU_MASK
-	and	x0, x0, #MPIDR_CLUSTER_MASK
-	add	x0, x1, x0, LSR #6
-pen:	ldr	x4, [x3, x0, LSL #3]
-	cbz	x4, pen
-
-	mov	x0, #0x0
-	mov	x1, #0x0
-	mov	x2, #0x0
-	mov	x3, #0x0
-	br	x4
-
-	.ltorg
-
-pm_asm_code_end:
-
-	.align	3
-	.section .rodata.v7_asm, "aS"
-v7_asm:
-	.word	0xE1A00000	// nop
-	.word	0xE3A02003	// mov r2, #3
-	.word	0xEE0C2F50	// mcr 15, 0, r2, cr12, cr0, {2}
-	.word	0xE320F003	// wfi
-
-	.ltorg
-v7_asm_end:
diff --git a/plat/hikey/drivers/sp804_timer.c b/plat/hikey/drivers/sp804_timer.c
deleted file mode 100644
index 269bf1c..0000000
--- a/plat/hikey/drivers/sp804_timer.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <console.h>
-#include <debug.h>
-#include <errno.h>
-#include <hi6220.h>
-#include <mmio.h>
-#include <sp804_timer.h>
-
-/* Init dual timer0 (TIMER00 & TIMER01) */
-void hi6220_timer_init(void)
-{
-	unsigned int data;
-
-	/* select 32KHz as the clock of dual timer0 */
-	/* FIXME: But I find that it's 19.2MHz, not 32KHz. */
-	data = mmio_read_32(AO_SC_TIMER_EN0);
-	while (data & 3) {
-		data &= ~3;
-		data |= 3 << 16;
-		mmio_write_32(AO_SC_TIMER_EN0, data);
-		data = mmio_read_32(AO_SC_TIMER_EN0);
-	}
-	/* enable the pclk of dual timer0 */
-	data = mmio_read_32(AO_SC_PERIPH_CLKSTAT4);
-	while (!(data & PCLK_TIMER1) || !(data & PCLK_TIMER0)) {
-		mmio_write_32(AO_SC_PERIPH_CLKEN4, PCLK_TIMER1 | PCLK_TIMER0);
-		data = mmio_read_32(AO_SC_PERIPH_CLKSTAT4);
-	}
-	/* reset dual timer0 */
-	data = mmio_read_32(AO_SC_PERIPH_RSTSTAT4);
-	mmio_write_32(AO_SC_PERIPH_RSTEN4, PCLK_TIMER1 | PCLK_TIMER0);
-	do {
-		data = mmio_read_32(AO_SC_PERIPH_RSTSTAT4);
-	} while (!(data & PCLK_TIMER1) || !(data & PCLK_TIMER0));
-	/* unreset dual timer0 */
-	mmio_write_32(AO_SC_PERIPH_RSTDIS4, PCLK_TIMER1 | PCLK_TIMER0);
-	do {
-		data = mmio_read_32(AO_SC_PERIPH_RSTSTAT4);
-	} while ((data & PCLK_TIMER1) || (data & PCLK_TIMER0));
-	
-	/* disable timer00 */
-	mmio_write_32(TIMER00_CONTROL, 0);
-	mmio_write_32(TIMER00_LOAD, 0xffffffff);
-	/* free running */
-	mmio_write_32(TIMER00_CONTROL, 0x82);
-}
-
-static unsigned int get_timer_value(void)
-{
-	return mmio_read_32(TIMER00_VALUE);
-}
-
-void udelay(int us)
-{
-	unsigned int start, cnt, delta, delta_us;
-
-	if (us <= 0)
-		us = 1;
-	/* counter is decreasing */
-	start = get_timer_value();
-	do {
-		cnt = get_timer_value();
-		if (cnt > start) {
-			delta = 0xffffffff - cnt;
-			delta += start;
-		} else
-			delta = start - cnt;
-		delta_us = (delta * 10) / 192;
-	} while (delta_us < us);
-}
-
-void mdelay(int ms)
-{
-	unsigned int start, cnt, delta, delta_ms;
-
-	if (ms <= 0)
-		ms = 1;
-
-	/* counter is decreasing */
-	start = get_timer_value();
-	do {
-		cnt = get_timer_value();
-		if (cnt > start) {
-			delta = 0xffffffff - cnt;
-			delta += start;
-		} else
-			delta = start - cnt;
-		delta_ms = delta / 19200;
-	} while (delta_ms < ms);
-}
diff --git a/plat/hikey/hikey_def.h b/plat/hikey/hikey_def.h
deleted file mode 100644
index d81b6d7..0000000
--- a/plat/hikey/hikey_def.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HIKEY_DEF_H__
-#define __HIKEY_DEF_H__
-
-#define DEVICE_BASE			0xf4000000
-#define DEVICE_SIZE			0x05800000
-
-/* The size of DDR RAM is 1GB. */
-#define DRAM_BASE			0x00000000
-#define DRAM_SIZE			0x40000000
-
-#define XG2RAM0_BASE			0xF9800000
-#define XG2RAM0_SIZE			0x00400000
-
-#define PLAT_TRUSTED_SRAM_ID	0
-#define PLAT_TRUSTED_DRAM_ID	1
-
-/*
- * DRAM (1 GB) at 0x0000_0000 is divided in several regions:
- *   - Secure DRAM (default is the top 16MB) used by OP-TEE
- *   - Non-secure DRAM used by OP-TEE (shared memory and padding) (4MB)
- *   - Secure DRAM (4MB aligned on 4MB) for OP-TEE's "Secure Data Path" feature
- *   - Non-Secure DRAM (remaining DRAM starting at DRAM_BASE)
- */
-#define DRAM_SEC_SIZE			0x01000000
-#define DRAM_SEC_BASE			(DRAM_BASE + DRAM_SIZE - DRAM_SEC_SIZE)
-
-#define DRAM_SDP_SIZE			0x00400000
-#define DRAM_SDP_BASE			(DRAM_SEC_BASE - 0x400000 /* align */ - \
-					 DRAM_SDP_SIZE)
-
-#define DRAM_NS_BASE			DRAM_BASE
-#define DRAM_NS_SIZE			(DRAM_SIZE - DRAM_SEC_SIZE)
-
-#define SRAM_BASE			0xFFF80000
-#define SRAM_SIZE			0x00012000
-
-/*******************************************************************************
- * GIC-400 & interrupt handling related constants
- ******************************************************************************/
-#define GICD_BASE			0xF6801000
-#define GICC_BASE			0xF6802000
-
-#define IRQ_SEC_PHY_TIMER		29
-#define IRQ_SEC_SGI_0			8
-#define IRQ_SEC_SGI_1			9
-#define IRQ_SEC_SGI_2			10
-#define IRQ_SEC_SGI_3			11
-#define IRQ_SEC_SGI_4			12
-#define IRQ_SEC_SGI_5			13
-#define IRQ_SEC_SGI_6			14
-#define IRQ_SEC_SGI_7			15
-#define IRQ_SEC_SGI_8			16
-
-/*******************************************************************************
- * PL011 related constants
- ******************************************************************************/
-#define PL011_UART0_BASE		0xF8015000
-#define PL011_UART3_BASE		0xF7113000
-
-#define PL011_BAUDRATE			115200
-
-#define PL011_UART_CLK_IN_HZ		19200000
-
-/*******************************************************************************
- * CCI-400 related constants
- ******************************************************************************/
-#define CCI400_BASE			0xF6E90000
-#define CCI400_SL_IFACE3_CLUSTER_IX	0
-#define CCI400_SL_IFACE4_CLUSTER_IX	1
-
-#endif /* __HIKEY_DEF_H__ */
diff --git a/plat/hikey/hikey_private.h b/plat/hikey/hikey_private.h
deleted file mode 100644
index 0627687..0000000
--- a/plat/hikey/hikey_private.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HIKEY_PRIVATE_H__
-#define __HIKEY_PRIVATE_H__
-
-#include <bl_common.h>
-
-/*******************************************************************************
- * This structure represents the superset of information that is passed to
- * BL3-1 e.g. while passing control to it from BL2 which is bl31_params
- * and other platform specific params
- ******************************************************************************/
-typedef struct bl2_to_bl31_params_mem {
-	struct bl31_params bl31_params;
-	struct image_info bl31_image_info;
-	struct image_info bl32_image_info;
-	struct image_info bl33_image_info;
-	struct entry_point_info bl33_ep_info;
-	struct entry_point_info bl32_ep_info;
-	struct entry_point_info bl31_ep_info;
-} bl2_to_bl31_params_mem_t;
-
-#define RANDOM_MAX		0x7fffffffffffffff
-#define RANDOM_MAGIC		0x9a4dbeaf
-
-struct random_serial_num {
-	uint64_t	magic;
-	uint64_t	data;
-	char		serialno[32];
-};
-
-/*******************************************************************************
- * Function and variable prototypes
- ******************************************************************************/
-void configure_mmu_el1(unsigned long total_base,
-		       unsigned long total_size,
-		       unsigned long ro_start,
-		       unsigned long ro_limit,
-		       unsigned long coh_start,
-		       unsigned long coh_limit);
-void configure_mmu_el3(unsigned long total_base,
-		       unsigned long total_size,
-		       unsigned long ro_start,
-		       unsigned long ro_limit,
-		       unsigned long coh_start,
-		       unsigned long coh_limit);
-extern int flush_loader_image(void);
-extern int flush_user_images(char *cmdbuf, unsigned long addr,
-			     unsigned long length);
-extern int flush_random_serialno(unsigned long addr, unsigned long length);
-extern void generate_serialno(struct random_serial_num *random);
-extern int assign_serialno(char *cmdbuf, struct random_serial_num *random);
-extern char *load_serialno(void);
-extern void hi6220_pll_init(void);
-extern void io_setup(void);
-extern int plat_get_image_source(const char *image_name,
-				 uintptr_t *dev_handle,
-				 uintptr_t *image_spec);
-extern void plat_gic_init(void);
-extern void usb_download(void);
-
-void plat_security_setup(void);
-
-#endif /* __HIKEY_PRIVATE_H__ */
diff --git a/plat/hikey/include/dw_mmc.h b/plat/hikey/include/dw_mmc.h
deleted file mode 100644
index fc12018..0000000
--- a/plat/hikey/include/dw_mmc.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Copyright (c) 2014, Hisilicon Ltd.
- * Copyright (c) 2014, Linaro Ltd.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __DW_MMC_H__
-#define __DW_MMC_H__
-
-#include <stdint.h>
-
-#define MMC0_CTRL				(MMC0_BASE + 0x000)
-#define MMC0_CLKDIV				(MMC0_BASE + 0x008)
-#define MMC0_CLKSRC				(MMC0_BASE + 0x00c)
-#define MMC0_CLKENA				(MMC0_BASE + 0x010)
-#define MMC0_TMOUT				(MMC0_BASE + 0x014)
-#define MMC0_CTYPE				(MMC0_BASE + 0x018)
-#define MMC0_BLKSIZ				(MMC0_BASE + 0x01c)
-#define MMC0_BYTCNT				(MMC0_BASE + 0x020)
-#define MMC0_INTMASK				(MMC0_BASE + 0x024)
-#define MMC0_CMDARG				(MMC0_BASE + 0x028)
-#define MMC0_CMD				(MMC0_BASE + 0x02c)
-#define MMC0_RESP0				(MMC0_BASE + 0x030)
-#define MMC0_RESP1				(MMC0_BASE + 0x034)
-#define MMC0_RESP2				(MMC0_BASE + 0x038)
-#define MMC0_RESP3				(MMC0_BASE + 0x03c)
-#define MMC0_RINTSTS				(MMC0_BASE + 0x044)
-#define MMC0_STATUS				(MMC0_BASE + 0x048)
-#define MMC0_FIFOTH				(MMC0_BASE + 0x04c)
-#define MMC0_DEBNCE				(MMC0_BASE + 0x064)
-#define MMC0_UHSREG				(MMC0_BASE + 0x074)
-#define MMC0_BMOD				(MMC0_BASE + 0x080)
-#define MMC0_DBADDR				(MMC0_BASE + 0x088)
-#define MMC0_IDSTS				(MMC0_BASE + 0x08c)
-#define MMC0_IDINTEN				(MMC0_BASE + 0x090)
-#define MMC0_DSCADDR				(MMC0_BASE + 0x094)
-#define MMC0_BUFADDR				(MMC0_BASE + 0x098)
-#define MMC0_CARDTHRCTL				(MMC0_BASE + 0X100)
-
-#define CMD_UPDATE_CLK				0x80202000
-#define CMD_START_BIT				(1 << 31)
-
-#define MMC_8BIT_MODE				(1 << 16)
-
-#define MMC_BLOCK_SIZE				512
-
-#define BIT_CMD_RESPONSE_EXPECT			(1 << 6)
-#define BIT_CMD_LONG_RESPONSE			(1 << 7)
-#define BIT_CMD_CHECK_RESPONSE_CRC		(1 << 8)
-#define BIT_CMD_DATA_EXPECTED			(1 << 9)
-#define BIT_CMD_READ				(0 << 10)
-#define BIT_CMD_WRITE				(1 << 10)
-#define BIT_CMD_BLOCK_TRANSFER			(0 << 11)
-#define BIT_CMD_STREAM_TRANSFER			(1 << 11)
-#define BIT_CMD_SEND_AUTO_STOP			(1 << 12)
-#define BIT_CMD_WAIT_PRVDATA_COMPLETE		(1 << 13)
-#define BIT_CMD_STOP_ABORT_CMD			(1 << 14)
-#define BIT_CMD_SEND_INIT			(1 << 15)
-#define BIT_CMD_UPDATE_CLOCK_ONLY		(1 << 21)
-#define BIT_CMD_READ_CEATA_DEVICE		(1 << 22)
-#define BIT_CMD_CCS_EXPECTED			(1 << 23)
-#define BIT_CMD_ENABLE_BOOT			(1 << 24)
-#define BIT_CMD_EXPECT_BOOT_ACK			(1 << 25)
-#define BIT_CMD_DISABLE_BOOT			(1 << 26)
-#define BIT_CMD_MANDATORY_BOOT			(0 << 27)
-#define BIT_CMD_ALTERNATE_BOOT			(1 << 27)
-#define BIT_CMD_VOLT_SWITCH			(1 << 28)
-#define BIT_CMD_USE_HOLD_REG			(1 << 29)
-#define BIT_CMD_START				(1 << 31)
-
-#define MMC_INT_EBE			(1 << 15)	/* End-bit Err */
-#define MMC_INT_SBE			(1 << 13)	/* Start-bit  Err */
-#define MMC_INT_HLE			(1 << 12)	/* Hardware-lock Err */
-#define MMC_INT_FRUN			(1 << 11)	/* FIFO UN/OV RUN */
-#define MMC_INT_DRT			(1 << 9)	/* Data timeout */
-#define MMC_INT_RTO			(1 << 8)	/* Response timeout */
-#define MMC_INT_DCRC			(1 << 7)	/* Data CRC err */
-#define MMC_INT_RCRC			(1 << 6)	/* Response CRC err */
-#define MMC_INT_RXDR			(1 << 5)
-#define MMC_INT_TXDR			(1 << 4)
-#define MMC_INT_DTO			(1 << 3)	/* Data trans over */
-#define MMC_INT_CMD_DONE		(1 << 2)
-#define MMC_INT_RE			(1 << 1)
-
-#define EMMC_FIX_RCA				6
-
-/* bits in MMC0_CTRL */
-#define MMC_CTRL_RESET				(1 << 0)
-#define MMC_FIFO_RESET				(1 << 1)
-#define MMC_DMA_RESET				(1 << 2)
-#define MMC_INT_EN				(1 << 4)
-#define MMC_DMA_EN				(1 << 25)
-
-#define MMC_STS_DATA_BUSY			(1 << 9)
-
-#define MMC_STATUS_CURRENT_STATE_MASK	(0xf << 9)
-#define MMC_STATUS_CURRENT_STATE_SHIFT	9
-#define MMC_STATUS_READY_FOR_DATA	(1 << 8)
-#define MMC_STATUS_SWITCH_ERROR		(1 << 7)
-
-#define MMC_STATE_IDLE			0
-#define MMC_STATE_READY			1
-#define MMC_STATE_IDENT			2
-#define MMC_STATE_STBY			3
-#define MMC_STATE_TRAN			4
-#define MMC_STATE_DATA			5
-#define MMC_STATE_RCV			6
-#define MMC_STATE_PRG			7
-#define MMC_STATE_DIS			8
-#define MMC_STATE_BTST			9
-#define MMC_STATE_SLP			10
-
-#define EXT_CSD_CACHE_CTRL		33
-#define EXT_CSD_PARTITION_CONFIG	179
-
-#define PART_CFG_BOOT_PARTITION1_ENABLE	(1 << 3)
-#define PART_CFG_PARTITION1_ACCESS	(1 << 0)
-
-#define MMC_IDMAC_ENABLE			(1 << 7)
-#define MMC_IDMAC_FB				(1 << 1)
-#define MMC_IDMAC_SWRESET			(1 << 0)
-
-#define MMC_FIFO_TWMARK(x)			(x & 0xfff)
-#define MMC_FIFO_RWMARK(x)			((x & 0x1ff) << 16)
-#define MMC_DMA_BURST_SIZE(x)			((x & 0x7) << 28)
-
-#define MMC_CARD_RD_THR(x)			((x & 0xfff) << 16)
-#define MMC_CARD_RD_THR_EN			(1 << 0)
-
-extern int init_mmc(void);
-extern int mmc0_read(unsigned long, size_t, unsigned long, uint32_t);
-extern int mmc0_write(unsigned long, size_t, unsigned long, uint32_t);
-
-#endif /* __DW_MMC_H */
diff --git a/plat/hikey/include/hi6220.h b/plat/hikey/include/hi6220.h
deleted file mode 100644
index 7ec414d..0000000
--- a/plat/hikey/include/hi6220.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HI6220_H__
-#define __HI6220_H__
-
-#include <hi6220_regs_acpu.h>
-#include <hi6220_regs_ao.h>
-#include <hi6220_regs_peri.h>
-#include <hi6220_regs_pmctrl.h>
-
-#include <hisi_mcu.h>
-#include <hisi_sram_map.h>
-
-#define MEDIA_CTRL_BASE				0xf4410000
-#define MEDIA_SUBSYS_CTRL2			(MEDIA_CTRL_BASE + 0x508)
-#define MEDIA_SUBSYS_NOC_DFS			(MEDIA_CTRL_BASE + 0x510)
-#define MEDIA_SUBSYS_CTRL5			(MEDIA_CTRL_BASE + 0x51c)
-
-#define MMC0_BASE				0xf723d000
-#define MMC1_BASE				0xf723e000
-
-#define EDMAC_BASE				0xf7370000
-#define EDMAC_SEC_CTRL				(EDMAC_BASE + 0x694)
-#define EDMAC_AXI_CONF(x)			(EDMAC_BASE + 0x820 + (x << 6))
-
-#define PMUSSI_BASE				0xf8000000
-
-#define TIMER0_BASE				0xf8008000
-#define TIMER00_LOAD				(TIMER0_BASE + 0x000)
-#define TIMER00_VALUE				(TIMER0_BASE + 0x004)
-#define TIMER00_CONTROL				(TIMER0_BASE + 0x008)
-#define TIMER00_BGLOAD				(TIMER0_BASE + 0x018)
-
-#define GPIO0_BASE				0xf8011000
-#define GPIO1_BASE				0xf8012000
-#define GPIO2_BASE				0xf8013000
-#define GPIO3_BASE				0xf8014000
-#define GPIO4_BASE				0xf7020000
-#define GPIO5_BASE				0xf7021000
-#define GPIO6_BASE				0xf7022000
-#define GPIO7_BASE				0xf7023000
-#define GPIO8_BASE				0xf7024000
-#define GPIO9_BASE				0xf7025000
-#define GPIO10_BASE				0xf7026000
-#define GPIO11_BASE				0xf7027000
-#define GPIO12_BASE				0xf7028000
-#define GPIO13_BASE				0xf7029000
-#define GPIO14_BASE				0xf702a000
-#define GPIO15_BASE				0xf702b000
-#define GPIO16_BASE				0xf702c000
-#define GPIO17_BASE				0xf702d000
-#define GPIO18_BASE				0xf702e000
-#define GPIO19_BASE				0xf702f000
-
-extern void init_acpu_dvfs(void);
-
-#endif	/* __HI6220_H__ */
diff --git a/plat/hikey/include/hi6220_regs_acpu.h b/plat/hikey/include/hi6220_regs_acpu.h
deleted file mode 100644
index 19dc15d..0000000
--- a/plat/hikey/include/hi6220_regs_acpu.h
+++ /dev/null
@@ -1,321 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HI6220_REGS_ACPU_H__
-#define __HI6220_REGS_ACPU_H__
-
-#define ACPU_CTRL_BASE				0xF6504000
-
-#define ACPU_SC_CPU_CTRL			(ACPU_CTRL_BASE + 0x000)
-#define ACPU_SC_CPU_STAT			(ACPU_CTRL_BASE + 0x008)
-#define ACPU_SC_CPU_STAT_SC_STANDBYWFIL2		(1 << 0)
-#define ACPU_SC_CPU_STAT_SC_STANDBYWFIL2_SHIFT		(0)
-#define ACPU_SC_CPU_STAT_SC_STANDBYWFI0			(1 << 1)
-#define ACPU_SC_CPU_STAT_SC_STANDBYWFI0_SHIFT		(1)
-#define ACPU_SC_CPU_STAT_SC_STANDBYWFI1			(1 << 2)
-#define ACPU_SC_CPU_STAT_SC_STANDBYWFI1_SHIFT		(2)
-#define ACPU_SC_CPU_STAT_SC_STANDBYWFI2			(1 << 3)
-#define ACPU_SC_CPU_STAT_SC_STANDBYWFI2_SHIFT		(3)
-#define ACPU_SC_CPU_STAT_SC_STANDBYWFI3			(1 << 4)
-#define ACPU_SC_CPU_STAT_SC_STANDBYWFI3_SHIFT		(4)
-#define ACPU_SC_CPU_STAT_A53_1_STANDBYWFIL2		(1 << 8)
-#define ACPU_SC_CPU_STAT_A53_1_STANDBYWFIL2_SHIFT	(8)
-#define ACPU_SC_CPU_STAT_A53_1_STANDBYWFI		(1 << 9)
-#define ACPU_SC_CPU_STAT_A53_1_STANDBYWFI_SHIFT		(9)
-#define ACPU_SC_CPU_STAT_L2FLSHUDONE0			(1 << 16)
-#define ACPU_SC_CPU_STAT_L2FLSHUDONE0_SHIFT		(16)
-#define ACPU_SC_CPU_STAT_L2FLSHUDONE1			(1 << 17)
-#define ACPU_SC_CPU_STAT_L2FLSHUDONE1_SHIFT		(17)
-#define ACPU_SC_CPU_STAT_CCI400_ACTIVE			(1 << 18)
-#define ACPU_SC_CPU_STAT_CCI400_ACTIVE_SHIFT		(18)
-#define ACPU_SC_CPU_STAT_CLK_DIV_STATUS_VD		(1 << 20)
-#define ACPU_SC_CPU_STAT_CLK_DIV_STATUS_VD_SHIFT	(20)
-
-#define ACPU_SC_CLKEN				(ACPU_CTRL_BASE + 0x00c)
-#define HPM_L2_1_CLKEN				(1 << 9)
-#define G_CPU_1_CLKEN				(1 << 8)
-#define HPM_L2_CLKEN				(1 << 1)
-#define G_CPU_CLKEN				(1 << 0)
-
-#define ACPU_SC_CLKDIS				(ACPU_CTRL_BASE + 0x010)
-#define ACPU_SC_CLK_STAT			(ACPU_CTRL_BASE + 0x014)
-#define ACPU_SC_RSTEN				(ACPU_CTRL_BASE + 0x018)
-#define SRST_PRESET1_RSTEN			(1 << 11)
-#define SRST_PRESET0_RSTEN			(1 << 10)
-#define SRST_CLUSTER1_RSTEN			(1 << 9)
-#define SRST_CLUSTER0_RSTEN			(1 << 8)
-#define SRST_L2_HPM_1_RSTEN			(1 << 5)
-#define SRST_AARM_L2_1_RSTEN			(1 << 4)
-#define SRST_L2_HPM_0_RSTEN			(1 << 3)
-#define SRST_AARM_L2_0_RSTEN			(1 << 1)
-#define SRST_CLUSTER1				(SRST_PRESET1_RSTEN | \
-						 SRST_CLUSTER1_RSTEN | \
-						 SRST_L2_HPM_1_RSTEN | \
-						 SRST_AARM_L2_1_RSTEN)
-#define SRST_CLUSTER0				(SRST_PRESET0_RSTEN | \
-						 SRST_CLUSTER0_RSTEN | \
-						 SRST_L2_HPM_0_RSTEN | \
-						 SRST_AARM_L2_0_RSTEN)
-
-#define ACPU_SC_RSTDIS				(ACPU_CTRL_BASE + 0x01c)
-#define ACPU_SC_RST_STAT			(ACPU_CTRL_BASE + 0x020)
-#define ACPU_SC_PDBGUP_MBIST			(ACPU_CTRL_BASE + 0x02c)
-#define PDBGUP_CLUSTER1_SHIFT			8
-
-#define ACPU_SC_VD_CTRL				(ACPU_CTRL_BASE + 0x054)
-#define ACPU_SC_VD_MASK_PATTERN_CTRL		(ACPU_CTRL_BASE + 0x058)
-#define ACPU_SC_VD_MASK_PATTERN_VAL		(0xCCB << 12)
-#define ACPU_SC_VD_MASK_PATTERN_MASK		((0x1 << 13) - 1)
-
-#define ACPU_SC_VD_DLY_FIXED_CTRL		(ACPU_CTRL_BASE + 0x05c)
-#define ACPU_SC_VD_DLY_TABLE0_CTRL		(ACPU_CTRL_BASE + 0x060)
-#define ACPU_SC_VD_DLY_TABLE1_CTRL		(ACPU_CTRL_BASE + 0x064)
-#define ACPU_SC_VD_DLY_TABLE2_CTRL		(ACPU_CTRL_BASE + 0x068)
-#define ACPU_SC_VD_HPM_CTRL			(ACPU_CTRL_BASE + 0x06c)
-#define ACPU_SC_A53_CLUSTER_MTCMOS_EN		(ACPU_CTRL_BASE + 0x088)
-#define PW_MTCMOS_EN_A53_1_EN			(1 << 1)
-#define PW_MTCMOS_EN_A53_0_EN			(1 << 0)
-
-#define ACPU_SC_A53_CLUSTER_MTCMOS_STA		(ACPU_CTRL_BASE + 0x090)
-#define ACPU_SC_A53_CLUSTER_ISO_EN		(ACPU_CTRL_BASE + 0x098)
-#define PW_ISO_A53_1_EN				(1 << 1)
-#define PW_ISO_A53_0_EN				(1 << 0)
-
-#define ACPU_SC_A53_CLUSTER_ISO_DIS		(ACPU_CTRL_BASE + 0x09c)
-#define ACPU_SC_A53_CLUSTER_ISO_STA		(ACPU_CTRL_BASE + 0x0a0)
-#define ACPU_SC_A53_1_MTCMOS_TIMER		(ACPU_CTRL_BASE + 0x0b4)
-#define ACPU_SC_A53_0_MTCMOS_TIMER		(ACPU_CTRL_BASE + 0x0bc)
-#define ACPU_SC_A53_x_MTCMOS_TIMER(x)		((x) ? ACPU_SC_A53_1_MTCMOS_TIMER : ACPU_SC_A53_0_MTCMOS_TIMER)
-
-#define ACPU_SC_CPU0_CTRL			(ACPU_CTRL_BASE + 0x100)
-#define CPU_CTRL_AARCH64_MODE			(1 << 7)
-
-#define ACPU_SC_CPU0_STAT			(ACPU_CTRL_BASE + 0x104)
-#define ACPU_SC_CPU0_CLKEN			(ACPU_CTRL_BASE + 0x108)
-#define CPU_CLKEN_HPM				(1 << 1)
-
-#define ACPU_SC_CPU0_CLK_STAT			(ACPU_CTRL_BASE + 0x110)
-
-#define ACPU_SC_CPU0_RSTEN			(ACPU_CTRL_BASE + 0x114)
-#define ACPU_SC_CPU0_RSTDIS			(ACPU_CTRL_BASE + 0x118)
-#define ACPU_SC_CPU0_MTCMOS_EN			(ACPU_CTRL_BASE + 0x120)
-#define CPU_MTCMOS_PW				(1 << 0)
-
-#define ACPU_SC_CPU0_PW_ISOEN			(ACPU_CTRL_BASE + 0x130)
-#define CPU_PW_ISO				(1 << 0)
-
-#define ACPU_SC_CPU0_PW_ISODIS			(ACPU_CTRL_BASE + 0x134)
-#define ACPU_SC_CPU0_PW_ISO_STAT		(ACPU_CTRL_BASE + 0x138)
-#define ACPU_SC_CPU0_MTCMOS_TIMER_STAT		(ACPU_CTRL_BASE + 0x154)
-#define CPU_MTCMOS_TIMER_STA			(1 << 0)
-
-#define ACPU_SC_CPU0_RVBARADDR			(ACPU_CTRL_BASE + 0x158)
-#define ACPU_SC_CPU1_CTRL			(ACPU_CTRL_BASE + 0x200)
-#define ACPU_SC_CPU1_STAT			(ACPU_CTRL_BASE + 0x204)
-#define ACPU_SC_CPU1_CLKEN			(ACPU_CTRL_BASE + 0x208)
-#define ACPU_SC_CPU1_CLK_STAT			(ACPU_CTRL_BASE + 0x210)
-#define ACPU_SC_CPU1_RSTEN			(ACPU_CTRL_BASE + 0x214)
-#define ACPU_SC_CPU1_RSTDIS			(ACPU_CTRL_BASE + 0x218)
-#define ACPU_SC_CPU1_MTCMOS_EN			(ACPU_CTRL_BASE + 0x220)
-#define ACPU_SC_CPU1_PW_ISODIS			(ACPU_CTRL_BASE + 0x234)
-#define ACPU_SC_CPU1_PW_ISO_STAT		(ACPU_CTRL_BASE + 0x238)
-#define ACPU_SC_CPU1_MTCMOS_TIMER_STAT		(ACPU_CTRL_BASE + 0x254)
-#define ACPU_SC_CPU1_RVBARADDR			(ACPU_CTRL_BASE + 0x258)
-#define ACPU_SC_CPU2_CTRL			(ACPU_CTRL_BASE + 0x300)
-#define ACPU_SC_CPU2_STAT			(ACPU_CTRL_BASE + 0x304)
-#define ACPU_SC_CPU2_CLKEN			(ACPU_CTRL_BASE + 0x308)
-#define ACPU_SC_CPU2_CLK_STAT			(ACPU_CTRL_BASE + 0x310)
-#define ACPU_SC_CPU2_RSTEN			(ACPU_CTRL_BASE + 0x314)
-#define ACPU_SC_CPU2_RSTDIS			(ACPU_CTRL_BASE + 0x318)
-#define ACPU_SC_CPU2_MTCMOS_EN			(ACPU_CTRL_BASE + 0x320)
-#define ACPU_SC_CPU2_PW_ISODIS			(ACPU_CTRL_BASE + 0x334)
-#define ACPU_SC_CPU2_PW_ISO_STAT		(ACPU_CTRL_BASE + 0x338)
-#define ACPU_SC_CPU2_MTCMOS_TIMER_STAT		(ACPU_CTRL_BASE + 0x354)
-#define ACPU_SC_CPU2_RVBARADDR			(ACPU_CTRL_BASE + 0x358)
-#define ACPU_SC_CPU3_CTRL			(ACPU_CTRL_BASE + 0x400)
-#define ACPU_SC_CPU3_STAT			(ACPU_CTRL_BASE + 0x404)
-#define ACPU_SC_CPU3_CLKEN			(ACPU_CTRL_BASE + 0x408)
-#define ACPU_SC_CPU3_CLK_STAT			(ACPU_CTRL_BASE + 0x410)
-#define ACPU_SC_CPU3_RSTEN			(ACPU_CTRL_BASE + 0x414)
-#define ACPU_SC_CPU3_RSTDIS			(ACPU_CTRL_BASE + 0x418)
-#define ACPU_SC_CPU3_MTCMOS_EN			(ACPU_CTRL_BASE + 0x420)
-#define ACPU_SC_CPU3_PW_ISODIS			(ACPU_CTRL_BASE + 0x434)
-#define ACPU_SC_CPU3_PW_ISO_STAT		(ACPU_CTRL_BASE + 0x438)
-#define ACPU_SC_CPU3_MTCMOS_TIMER_STAT		(ACPU_CTRL_BASE + 0x454)
-#define ACPU_SC_CPU3_RVBARADDR			(ACPU_CTRL_BASE + 0x458)
-#define ACPU_SC_CPU4_CTRL			(ACPU_CTRL_BASE + 0x500)
-#define ACPU_SC_CPU4_STAT			(ACPU_CTRL_BASE + 0x504)
-#define ACPU_SC_CPU4_CLKEN			(ACPU_CTRL_BASE + 0x508)
-#define ACPU_SC_CPU4_CLK_STAT			(ACPU_CTRL_BASE + 0x510)
-#define ACPU_SC_CPU4_RSTEN			(ACPU_CTRL_BASE + 0x514)
-#define ACPU_SC_CPU4_RSTDIS			(ACPU_CTRL_BASE + 0x518)
-#define ACPU_SC_CPU4_MTCMOS_EN			(ACPU_CTRL_BASE + 0x520)
-#define ACPU_SC_CPU4_PW_ISODIS			(ACPU_CTRL_BASE + 0x534)
-#define ACPU_SC_CPU4_PW_ISO_STAT		(ACPU_CTRL_BASE + 0x538)
-#define ACPU_SC_CPU4_MTCMOS_TIMER_STAT		(ACPU_CTRL_BASE + 0x554)
-#define ACPU_SC_CPU4_RVBARADDR			(ACPU_CTRL_BASE + 0x558)
-#define ACPU_SC_CPU5_CTRL			(ACPU_CTRL_BASE + 0x600)
-#define ACPU_SC_CPU5_STAT			(ACPU_CTRL_BASE + 0x604)
-#define ACPU_SC_CPU5_CLKEN			(ACPU_CTRL_BASE + 0x608)
-#define ACPU_SC_CPU5_CLK_STAT			(ACPU_CTRL_BASE + 0x610)
-#define ACPU_SC_CPU5_RSTEN			(ACPU_CTRL_BASE + 0x614)
-#define ACPU_SC_CPU5_RSTDIS			(ACPU_CTRL_BASE + 0x618)
-#define ACPU_SC_CPU5_MTCMOS_EN			(ACPU_CTRL_BASE + 0x620)
-#define ACPU_SC_CPU5_PW_ISODIS			(ACPU_CTRL_BASE + 0x634)
-#define ACPU_SC_CPU5_PW_ISO_STAT		(ACPU_CTRL_BASE + 0x638)
-#define ACPU_SC_CPU5_MTCMOS_TIMER_STAT		(ACPU_CTRL_BASE + 0x654)
-#define ACPU_SC_CPU5_RVBARADDR			(ACPU_CTRL_BASE + 0x658)
-#define ACPU_SC_CPU6_CTRL			(ACPU_CTRL_BASE + 0x700)
-#define ACPU_SC_CPU6_STAT			(ACPU_CTRL_BASE + 0x704)
-#define ACPU_SC_CPU6_CLKEN			(ACPU_CTRL_BASE + 0x708)
-#define ACPU_SC_CPU6_CLK_STAT			(ACPU_CTRL_BASE + 0x710)
-#define ACPU_SC_CPU6_RSTEN			(ACPU_CTRL_BASE + 0x714)
-#define ACPU_SC_CPU6_RSTDIS			(ACPU_CTRL_BASE + 0x718)
-#define ACPU_SC_CPU6_MTCMOS_EN			(ACPU_CTRL_BASE + 0x720)
-#define ACPU_SC_CPU6_PW_ISODIS			(ACPU_CTRL_BASE + 0x734)
-#define ACPU_SC_CPU6_PW_ISO_STAT		(ACPU_CTRL_BASE + 0x738)
-#define ACPU_SC_CPU6_MTCMOS_TIMER_STAT		(ACPU_CTRL_BASE + 0x754)
-#define ACPU_SC_CPU6_RVBARADDR			(ACPU_CTRL_BASE + 0x758)
-#define ACPU_SC_CPU7_CTRL			(ACPU_CTRL_BASE + 0x800)
-#define ACPU_SC_CPU7_STAT			(ACPU_CTRL_BASE + 0x804)
-#define ACPU_SC_CPU7_CLKEN			(ACPU_CTRL_BASE + 0x808)
-#define ACPU_SC_CPU7_CLK_STAT			(ACPU_CTRL_BASE + 0x810)
-#define ACPU_SC_CPU7_RSTEN			(ACPU_CTRL_BASE + 0x814)
-#define ACPU_SC_CPU7_RSTDIS			(ACPU_CTRL_BASE + 0x818)
-#define ACPU_SC_CPU7_MTCMOS_EN			(ACPU_CTRL_BASE + 0x820)
-#define ACPU_SC_CPU7_PW_ISODIS			(ACPU_CTRL_BASE + 0x834)
-#define ACPU_SC_CPU7_PW_ISO_STAT		(ACPU_CTRL_BASE + 0x838)
-#define ACPU_SC_CPU7_MTCMOS_TIMER_STAT		(ACPU_CTRL_BASE + 0x854)
-#define ACPU_SC_CPU7_RVBARADDR			(ACPU_CTRL_BASE + 0x858)
-#define ACPU_SC_CPUx_CTRL(x)			((x < 8) ? (ACPU_SC_CPU0_CTRL + 0x100 * x) : ACPU_SC_CPU0_CTRL)
-#define ACPU_SC_CPUx_STAT(x)			((x < 8) ? (ACPU_SC_CPU0_STAT + 0x100 * x) : ACPU_SC_CPU0_STAT)
-#define ACPU_SC_CPUx_CLKEN(x)			((x < 8) ? (ACPU_SC_CPU0_CLKEN + 0x100 * x) : ACPU_SC_CPU0_CLKEN)
-#define ACPU_SC_CPUx_CLK_STAT(x)		((x < 8) ? (ACPU_SC_CPU0_CLK_STAT + 0x100 *x) : ACPU_SC_CPU0_CLK_STAT)
-#define ACPU_SC_CPUx_RSTEN(x)			((x < 8) ? (ACPU_SC_CPU0_RSTEN + 0x100 * x) : ACPU_SC_CPU0_RSTEN)
-#define ACPU_SC_CPUx_RSTDIS(x)			((x < 8) ? (ACPU_SC_CPU0_RSTDIS + 0x100 * x) : ACPU_SC_CPU0_RSTDIS)
-#define ACPU_SC_CPUx_MTCMOS_EN(x)		((x < 8) ? (ACPU_SC_CPU0_MTCMOS_EN + 0x100 * x) : ACPU_SC_CPU0_MTCMOS_EN)
-#define ACPU_SC_CPUx_PW_ISODIS(x)		((x < 8) ? (ACPU_SC_CPU0_PW_ISODIS + 0x100 * x) : ACPU_SC_CPU0_PW_ISODIS)
-#define ACPU_SC_CPUx_PW_ISO_STAT(x)		((x < 8) ? (ACPU_SC_CPU0_PW_ISO_STAT + 0x100 * x) : ACPU_SC_CPU0_PW_ISO_STAT)
-#define ACPU_SC_CPUx_MTCMOS_TIMER_STAT(x)	((x < 8) ? (ACPU_SC_CPU0_MTCMOS_TIMER_STAT + 0x100 * x) : ACPU_SC_CPU0_MTCMOS_TIMER_STAT)
-#define ACPU_SC_CPUx_RVBARADDR(x)		((x < 8) ? (ACPU_SC_CPU0_RVBARADDR + 0x100 * x) : ACPU_SC_CPU0_RVBARADDR)
-
-#define ACPU_SC_CPU_STAT_CLKDIV_VD_MASK		(3 << 20)
-
-#define ACPU_SC_VD_CTRL_TUNE_EN_DIF		(1 << 0)
-#define ACPU_SC_VD_CTRL_TUNE_EN_DIF_SHIFT	(0)
-#define ACPU_SC_VD_CTRL_TUNE			(1 << 1)
-#define ACPU_SC_VD_CTRL_TUNE_SHIFT		(1)
-#define ACPU_SC_VD_CTRL_CALIBRATE_EN_DIF	(1 << 7)
-#define ACPU_SC_VD_CTRL_CALIBRATE_EN_DIF_SHIFT	(7)
-#define ACPU_SC_VD_CTRL_CALIBRATE_EN_INI	(1 << 8)
-#define ACPU_SC_VD_CTRL_CALIBRATE_EN_INI_SHIFT	(8)
-#define ACPU_SC_VD_CTRL_CLK_DIS_CNT_CLR		(1 << 9)
-#define ACPU_SC_VD_CTRL_CLK_DIS_CNT_CLR_SHIFT	(9)
-#define ACPU_SC_VD_CTRL_CLK_DIS_CNT_EN		(1 << 10)
-#define ACPU_SC_VD_CTRL_CLK_DIS_CNT_EN_SHIFT	(10)
-#define ACPU_SC_VD_CTRL_TUNE_EN_INT		(1 << 11)
-#define ACPU_SC_VD_CTRL_TUNE_EN_INT_SHIFT	(11)
-#define ACPU_SC_VD_CTRL_SHIFT_TABLE0		(1 << 12)
-#define ACPU_SC_VD_CTRL_SHIFT_TABLE0_MASK	(0xf << 12)
-#define ACPU_SC_VD_CTRL_SHIFT_TABLE0_SHIFT	(12)
-#define ACPU_SC_VD_CTRL_SHIFT_TABLE1		(1 << 16)
-#define ACPU_SC_VD_CTRL_SHIFT_TABLE1_MASK	(0xf << 16)
-#define ACPU_SC_VD_CTRL_SHIFT_TABLE1_SHIFT	(16)
-#define ACPU_SC_VD_CTRL_SHIFT_TABLE2		(1 << 20)
-#define ACPU_SC_VD_CTRL_SHIFT_TABLE2_MASK	(0xf << 20)
-#define ACPU_SC_VD_CTRL_SHIFT_TABLE2_SHIFT	(20)
-#define ACPU_SC_VD_CTRL_SHIFT_TABLE3		(1 << 24)
-#define ACPU_SC_VD_CTRL_SHIFT_TABLE3_MASK	(0xf << 24)
-#define ACPU_SC_VD_CTRL_SHIFT_TABLE3_SHIFT	(24)
-#define ACPU_SC_VD_CTRL_FORCE_CLK_EN		(1 << 28)
-#define ACPU_SC_VD_CTRL_FORCE_CLK_EN_SHIFT	(28)
-#define ACPU_SC_VD_CTRL_DIV_EN_DIF		(1 << 29)
-#define ACPU_SC_VD_CTRL_DIV_EN_DIF_SHIFT	(29)
-
-#define ACPU_SC_VD_SHIFT_TABLE_TUNE_VAL			\
-	((0x1 << ACPU_SC_VD_CTRL_SHIFT_TABLE0_SHIFT) | 	\
-	 (0x3 << ACPU_SC_VD_CTRL_SHIFT_TABLE1_SHIFT) | 	\
-	 (0x5 << ACPU_SC_VD_CTRL_SHIFT_TABLE2_SHIFT) | 	\
-	 (0x6 << ACPU_SC_VD_CTRL_SHIFT_TABLE3_SHIFT) | 	\
-         (0x7 << ACPU_SC_VD_CTRL_TUNE_SHIFT))
-
-#define ACPU_SC_VD_SHIFT_TABLE_TUNE_MASK		\
-	((0xF << ACPU_SC_VD_CTRL_SHIFT_TABLE0_SHIFT) |	\
-	 (0xF << ACPU_SC_VD_CTRL_SHIFT_TABLE1_SHIFT) |	\
-	 (0xF << ACPU_SC_VD_CTRL_SHIFT_TABLE2_SHIFT) |	\
-	 (0xF << ACPU_SC_VD_CTRL_SHIFT_TABLE3_SHIFT) |	\
-	 (0x3F << ACPU_SC_VD_CTRL_TUNE_SHIFT))
-
-#define ACPU_SC_VD_HPM_CTRL_OSC_DIV		(1 << 0)
-#define ACPU_SC_VD_HPM_CTRL_OSC_DIV_SHIFT	(0)
-#define ACPU_SC_VD_HPM_CTRL_OSC_DIV_MASK	(0x000000FF)
-#define ACPU_SC_VD_HPM_CTRL_DLY_EXP		(1 << 8)
-#define ACPU_SC_VD_HPM_CTRL_DLY_EXP_SHIFT	(8)
-#define ACPU_SC_VD_HPM_CTRL_DLY_EXP_MASK	(0x001FFF00)
-
-#define HPM_OSC_DIV_VAL \
-	(0x56 << ACPU_SC_VD_HPM_CTRL_OSC_DIV_SHIFT)
-#define HPM_OSC_DIV_MASK \
-	(ACPU_SC_VD_HPM_CTRL_OSC_DIV_MASK)
-
-#define HPM_DLY_EXP_VAL \
-	(0xC7A << ACPU_SC_VD_HPM_CTRL_DLY_EXP_SHIFT)
-#define HPM_DLY_EXP_MASK \
-	(ACPU_SC_VD_HPM_CTRL_DLY_EXP_MASK)
-
-#define ACPU_SC_VD_EN_ASIC_VAL					\
-	((0x0 << ACPU_SC_VD_CTRL_FORCE_CLK_EN_SHIFT) |		\
-	 (0x0 << ACPU_SC_VD_CTRL_CLK_DIS_CNT_EN_SHIFT) |	\
-	 (0x0 << ACPU_SC_VD_CTRL_CALIBRATE_EN_INI_SHIFT) |	\
-	 (0x0 << ACPU_SC_VD_CTRL_CALIBRATE_EN_DIF_SHIFT) |	\
-	 (0X0 << ACPU_SC_VD_CTRL_DIV_EN_DIF_SHIFT) |		\
-	 (0X0 << ACPU_SC_VD_CTRL_TUNE_EN_INT_SHIFT) |		\
-	 (0x0 << ACPU_SC_VD_CTRL_TUNE_EN_DIF_SHIFT))
-
-#define ACPU_SC_VD_EN_SFT_VAL					\
-	((0x0 << ACPU_SC_VD_CTRL_FORCE_CLK_EN_SHIFT) |		\
-	 (0x0 << ACPU_SC_VD_CTRL_CLK_DIS_CNT_EN_SHIFT) |	\
-	 (0x0 << ACPU_SC_VD_CTRL_CALIBRATE_EN_INI_SHIFT) |	\
-	 (0x0 << ACPU_SC_VD_CTRL_CALIBRATE_EN_DIF_SHIFT) |	\
-	 (0x0 << ACPU_SC_VD_CTRL_DIV_EN_DIF_SHIFT) |		\
-	 (0x0 << ACPU_SC_VD_CTRL_TUNE_EN_INT_SHIFT) |		\
-	 (0x0 << ACPU_SC_VD_CTRL_TUNE_EN_DIF_SHIFT))
-
-#define ACPU_SC_VD_EN_MASK					\
-	((0x1 << ACPU_SC_VD_CTRL_FORCE_CLK_EN_SHIFT) |		\
-	 (0x1 << ACPU_SC_VD_CTRL_CLK_DIS_CNT_EN_SHIFT) |	\
-	 (0x1 << ACPU_SC_VD_CTRL_CALIBRATE_EN_INI_SHIFT) |	\
-	 (0x1 << ACPU_SC_VD_CTRL_CALIBRATE_EN_DIF_SHIFT) |	\
-	 (0x1 << ACPU_SC_VD_CTRL_DIV_EN_DIF_SHIFT) |		\
-	 (0x1 << ACPU_SC_VD_CTRL_TUNE_EN_INT_SHIFT) |		\
-	 (0x1 << ACPU_SC_VD_CTRL_TUNE_EN_DIF_SHIFT))
-
-#endif /* __HI6220_REGS_ACPU_H__ */
diff --git a/plat/hikey/include/hi6220_regs_ao.h b/plat/hikey/include/hi6220_regs_ao.h
deleted file mode 100644
index 448d18e..0000000
--- a/plat/hikey/include/hi6220_regs_ao.h
+++ /dev/null
@@ -1,359 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HI6220_AO_H__
-#define __HI6220_AO_H__
-
-#define AO_CTRL_BASE				0xF7800000
-
-#define AO_SC_SYS_CTRL0				(AO_CTRL_BASE + 0x000)
-#define AO_SC_SYS_CTRL1				(AO_CTRL_BASE + 0x004)
-#define AO_SC_SYS_CTRL2				(AO_CTRL_BASE + 0x008)
-#define AO_SC_SYS_STAT0				(AO_CTRL_BASE + 0x010)
-#define AO_SC_SYS_STAT1				(AO_CTRL_BASE + 0x014)
-#define AO_SC_MCU_IMCTRL			(AO_CTRL_BASE + 0x018)
-#define AO_SC_MCU_IMSTAT			(AO_CTRL_BASE + 0x01C)
-#define AO_SC_SECONDRY_INT_EN0			(AO_CTRL_BASE + 0x044)
-#define AO_SC_SECONDRY_INT_STATR0		(AO_CTRL_BASE + 0x048)
-#define AO_SC_SECONDRY_INT_STATM0		(AO_CTRL_BASE + 0x04C)
-#define AO_SC_MCU_WKUP_INT_EN6			(AO_CTRL_BASE + 0x054)
-#define AO_SC_MCU_WKUP_INT_STATR6		(AO_CTRL_BASE + 0x058)
-#define AO_SC_MCU_WKUP_INT_STATM6		(AO_CTRL_BASE + 0x05C)
-#define AO_SC_MCU_WKUP_INT_EN5			(AO_CTRL_BASE + 0x064)
-#define AO_SC_MCU_WKUP_INT_STATR5		(AO_CTRL_BASE + 0x068)
-#define AO_SC_MCU_WKUP_INT_STATM5		(AO_CTRL_BASE + 0x06C)
-#define AO_SC_MCU_WKUP_INT_EN4			(AO_CTRL_BASE + 0x094)
-#define AO_SC_MCU_WKUP_INT_STATR4		(AO_CTRL_BASE + 0x098)
-#define AO_SC_MCU_WKUP_INT_STATM4		(AO_CTRL_BASE + 0x09C)
-#define AO_SC_MCU_WKUP_INT_EN0			(AO_CTRL_BASE + 0x0A8)
-#define AO_SC_MCU_WKUP_INT_STATR0		(AO_CTRL_BASE + 0x0AC)
-#define AO_SC_MCU_WKUP_INT_STATM0		(AO_CTRL_BASE + 0x0B0)
-#define AO_SC_MCU_WKUP_INT_EN1			(AO_CTRL_BASE + 0x0B4)
-#define AO_SC_MCU_WKUP_INT_STATR1		(AO_CTRL_BASE + 0x0B8)
-#define AO_SC_MCU_WKUP_INT_STATM1		(AO_CTRL_BASE + 0x0BC)
-#define AO_SC_INT_STATR				(AO_CTRL_BASE + 0x0C4)
-#define AO_SC_INT_STATM				(AO_CTRL_BASE + 0x0C8)
-#define AO_SC_INT_CLEAR				(AO_CTRL_BASE + 0x0CC)
-#define AO_SC_INT_EN_SET			(AO_CTRL_BASE + 0x0D0)
-#define AO_SC_INT_EN_DIS			(AO_CTRL_BASE + 0x0D4)
-#define AO_SC_INT_EN_STAT			(AO_CTRL_BASE + 0x0D8)
-#define AO_SC_INT_STATR1			(AO_CTRL_BASE + 0x0E4)
-#define AO_SC_INT_STATM1			(AO_CTRL_BASE + 0x0E8)
-#define AO_SC_INT_CLEAR1			(AO_CTRL_BASE + 0x0EC)
-#define AO_SC_INT_EN_SET1			(AO_CTRL_BASE + 0x0F0)
-#define AO_SC_INT_EN_DIS1			(AO_CTRL_BASE + 0x0F4)
-#define AO_SC_INT_EN_STAT1			(AO_CTRL_BASE + 0x0F8)
-#define AO_SC_TIMER_EN0				(AO_CTRL_BASE + 0x1D0)
-#define AO_SC_TIMER_EN1				(AO_CTRL_BASE + 0x1D4)
-#define AO_SC_TIMER_EN4				(AO_CTRL_BASE + 0x1F0)
-#define AO_SC_TIMER_EN5				(AO_CTRL_BASE + 0x1F4)
-#define AO_SC_MCU_SUBSYS_CTRL0			(AO_CTRL_BASE + 0x400)
-#define AO_SC_MCU_SUBSYS_CTRL1			(AO_CTRL_BASE + 0x404)
-#define AO_SC_MCU_SUBSYS_CTRL2			(AO_CTRL_BASE + 0x408)
-#define AO_SC_MCU_SUBSYS_CTRL3			(AO_CTRL_BASE + 0x40C)
-#define AO_SC_MCU_SUBSYS_CTRL4			(AO_CTRL_BASE + 0x410)
-#define AO_SC_MCU_SUBSYS_CTRL5			(AO_CTRL_BASE + 0x414)
-#define AO_SC_MCU_SUBSYS_CTRL6			(AO_CTRL_BASE + 0x418)
-#define AO_SC_MCU_SUBSYS_CTRL7			(AO_CTRL_BASE + 0x41C)
-#define AO_SC_MCU_SUBSYS_STAT0			(AO_CTRL_BASE + 0x440)
-#define AO_SC_MCU_SUBSYS_STAT1			(AO_CTRL_BASE + 0x444)
-#define AO_SC_MCU_SUBSYS_STAT2			(AO_CTRL_BASE + 0x448)
-#define AO_SC_MCU_SUBSYS_STAT3			(AO_CTRL_BASE + 0x44C)
-#define AO_SC_MCU_SUBSYS_STAT4			(AO_CTRL_BASE + 0x450)
-#define AO_SC_MCU_SUBSYS_STAT5			(AO_CTRL_BASE + 0x454)
-#define AO_SC_MCU_SUBSYS_STAT6			(AO_CTRL_BASE + 0x458)
-#define AO_SC_MCU_SUBSYS_STAT7			(AO_CTRL_BASE + 0x45C)
-#define AO_SC_PERIPH_CLKEN4			(AO_CTRL_BASE + 0x630)
-#define AO_SC_PERIPH_CLKDIS4			(AO_CTRL_BASE + 0x634)
-#define AO_SC_PERIPH_CLKSTAT4			(AO_CTRL_BASE + 0x638)
-#define AO_SC_PERIPH_CLKEN5			(AO_CTRL_BASE + 0x63C)
-#define AO_SC_PERIPH_CLKDIS5			(AO_CTRL_BASE + 0x640)
-#define AO_SC_PERIPH_CLKSTAT5			(AO_CTRL_BASE + 0x644)
-#define AO_SC_PERIPH_RSTEN4			(AO_CTRL_BASE + 0x6F0)
-#define AO_SC_PERIPH_RSTDIS4			(AO_CTRL_BASE + 0x6F4)
-#define AO_SC_PERIPH_RSTSTAT4			(AO_CTRL_BASE + 0x6F8)
-#define AO_SC_PERIPH_RSTEN5			(AO_CTRL_BASE + 0x6FC)
-#define AO_SC_PERIPH_RSTDIS5			(AO_CTRL_BASE + 0x700)
-#define AO_SC_PERIPH_RSTSTAT5			(AO_CTRL_BASE + 0x704)
-#define AO_SC_PW_CLKEN0				(AO_CTRL_BASE + 0x800)
-#define AO_SC_PW_CLKDIS0			(AO_CTRL_BASE + 0x804)
-#define AO_SC_PW_CLK_STAT0			(AO_CTRL_BASE + 0x808)
-#define AO_SC_PW_RSTEN0				(AO_CTRL_BASE + 0x810)
-#define AO_SC_PW_RSTDIS0			(AO_CTRL_BASE + 0x814)
-#define AO_SC_PW_RST_STAT0			(AO_CTRL_BASE + 0x818)
-#define AO_SC_PW_ISOEN0				(AO_CTRL_BASE + 0x820)
-#define AO_SC_PW_ISODIS0			(AO_CTRL_BASE + 0x824)
-#define AO_SC_PW_ISO_STAT0			(AO_CTRL_BASE + 0x828)
-#define AO_SC_PW_MTCMOS_EN0			(AO_CTRL_BASE + 0x830)
-#define AO_SC_PW_MTCMOS_DIS0			(AO_CTRL_BASE + 0x834)
-#define AO_SC_PW_MTCMOS_STAT0			(AO_CTRL_BASE + 0x838)
-#define AO_SC_PW_MTCMOS_ACK_STAT0		(AO_CTRL_BASE + 0x83C)
-#define AO_SC_PW_MTCMOS_TIMEOUT_STAT0		(AO_CTRL_BASE + 0x840)
-#define AO_SC_PW_STAT0				(AO_CTRL_BASE + 0x850)
-#define AO_SC_PW_STAT1				(AO_CTRL_BASE + 0x854)
-#define AO_SC_SYSTEST_STAT			(AO_CTRL_BASE + 0x880)
-#define AO_SC_SYSTEST_SLICER_CNT0		(AO_CTRL_BASE + 0x890)
-#define AO_SC_SYSTEST_SLICER_CNT1		(AO_CTRL_BASE + 0x894)
-#define AO_SC_PW_CTRL1				(AO_CTRL_BASE + 0x8C8)
-#define AO_SC_PW_CTRL				(AO_CTRL_BASE + 0x8CC)
-#define AO_SC_MCPU_VOTEEN			(AO_CTRL_BASE + 0x8D0)
-#define AO_SC_MCPU_VOTEDIS			(AO_CTRL_BASE + 0x8D4)
-#define AO_SC_MCPU_VOTESTAT			(AO_CTRL_BASE + 0x8D8)
-#define AO_SC_MCPU_VOTE_MSK0			(AO_CTRL_BASE + 0x8E0)
-#define AO_SC_MCPU_VOTE_MSK1			(AO_CTRL_BASE + 0x8E4)
-#define AO_SC_MCPU_VOTESTAT0_MSK		(AO_CTRL_BASE + 0x8E8)
-#define AO_SC_MCPU_VOTESTAT1_MSK		(AO_CTRL_BASE + 0x8EC)
-#define AO_SC_PERI_VOTEEN			(AO_CTRL_BASE + 0x8F0)
-#define AO_SC_PERI_VOTEDIS			(AO_CTRL_BASE + 0x8F4)
-#define AO_SC_PERI_VOTESTAT			(AO_CTRL_BASE + 0x8F8)
-#define AO_SC_PERI_VOTE_MSK0			(AO_CTRL_BASE + 0x900)
-#define AO_SC_PERI_VOTE_MSK1			(AO_CTRL_BASE + 0x904)
-#define AO_SC_PERI_VOTESTAT0_MSK		(AO_CTRL_BASE + 0x908)
-#define AO_SC_PERI_VOTESTAT1_MSK		(AO_CTRL_BASE + 0x90C)
-#define AO_SC_ACPU_VOTEEN			(AO_CTRL_BASE + 0x910)
-#define AO_SC_ACPU_VOTEDIS			(AO_CTRL_BASE + 0x914)
-#define AO_SC_ACPU_VOTESTAT			(AO_CTRL_BASE + 0x918)
-#define AO_SC_ACPU_VOTE_MSK0			(AO_CTRL_BASE + 0x920)
-#define AO_SC_ACPU_VOTE_MSK1			(AO_CTRL_BASE + 0x924)
-#define AO_SC_ACPU_VOTESTAT0_MSK		(AO_CTRL_BASE + 0x928)
-#define AO_SC_ACPU_VOTESTAT1_MSK		(AO_CTRL_BASE + 0x92C)
-#define AO_SC_MCU_VOTEEN			(AO_CTRL_BASE + 0x930)
-#define AO_SC_MCU_VOTEDIS			(AO_CTRL_BASE + 0x934)
-#define AO_SC_MCU_VOTESTAT			(AO_CTRL_BASE + 0x938)
-#define AO_SC_MCU_VOTE_MSK0			(AO_CTRL_BASE + 0x940)
-#define AO_SC_MCU_VOTE_MSK1			(AO_CTRL_BASE + 0x944)
-#define AO_SC_MCU_VOTESTAT0_MSK			(AO_CTRL_BASE + 0x948)
-#define AO_SC_MCU_VOTESTAT1_MSK			(AO_CTRL_BASE + 0x94C)
-#define AO_SC_MCU_VOTE1EN			(AO_CTRL_BASE + 0x960)
-#define AO_SC_MCU_VOTE1DIS			(AO_CTRL_BASE + 0x964)
-#define AO_SC_MCU_VOTE1STAT			(AO_CTRL_BASE + 0x968)
-#define AO_SC_MCU_VOTE1_MSK0			(AO_CTRL_BASE + 0x970)
-#define AO_SC_MCU_VOTE1_MSK1			(AO_CTRL_BASE + 0x974)
-#define AO_SC_MCU_VOTE1STAT0_MSK		(AO_CTRL_BASE + 0x978)
-#define AO_SC_MCU_VOTE1STAT1_MSK		(AO_CTRL_BASE + 0x97C)
-#define AO_SC_MCU_VOTE2EN			(AO_CTRL_BASE + 0x980)
-#define AO_SC_MCU_VOTE2DIS			(AO_CTRL_BASE + 0x984)
-#define AO_SC_MCU_VOTE2STAT			(AO_CTRL_BASE + 0x988)
-#define AO_SC_MCU_VOTE2_MSK0			(AO_CTRL_BASE + 0x990)
-#define AO_SC_MCU_VOTE2_MSK1			(AO_CTRL_BASE + 0x994)
-#define AO_SC_MCU_VOTE2STAT0_MSK		(AO_CTRL_BASE + 0x998)
-#define AO_SC_MCU_VOTE2STAT1_MSK		(AO_CTRL_BASE + 0x99C)
-#define AO_SC_VOTE_CTRL				(AO_CTRL_BASE + 0x9A0)
-#define AO_SC_VOTE_STAT				(AO_CTRL_BASE + 0x9A4)
-#define AO_SC_ECONUM				(AO_CTRL_BASE + 0xF00)
-#define AO_SCCHIPID				(AO_CTRL_BASE + 0xF10)
-#define AO_SCSOCID				(AO_CTRL_BASE + 0xF1C)
-#define AO_SC_SOC_FPGA_RTL_DEF			(AO_CTRL_BASE + 0xFE0)
-#define AO_SC_SOC_FPGA_PR_DEF			(AO_CTRL_BASE + 0xFE4)
-#define AO_SC_SOC_FPGA_RES_DEF0			(AO_CTRL_BASE + 0xFE8)
-#define AO_SC_SOC_FPGA_RES_DEF1			(AO_CTRL_BASE + 0xFEC)
-#define AO_SC_XTAL_CTRL0			(AO_CTRL_BASE + 0x102)
-#define AO_SC_XTAL_CTRL1			(AO_CTRL_BASE + 0x102)
-#define AO_SC_XTAL_CTRL3			(AO_CTRL_BASE + 0x103)
-#define AO_SC_XTAL_CTRL5			(AO_CTRL_BASE + 0x103)
-#define AO_SC_XTAL_STAT0			(AO_CTRL_BASE + 0x106)
-#define AO_SC_XTAL_STAT1			(AO_CTRL_BASE + 0x107)
-#define AO_SC_EFUSE_CHIPID0			(AO_CTRL_BASE + 0x108)
-#define AO_SC_EFUSE_CHIPID1			(AO_CTRL_BASE + 0x108)
-#define AO_SC_EFUSE_SYS_CTRL			(AO_CTRL_BASE + 0x108)
-#define AO_SC_DEBUG_CTRL1			(AO_CTRL_BASE + 0x128)
-#define AO_SC_DBG_STAT				(AO_CTRL_BASE + 0x12B)
-#define AO_SC_ARM_DBG_KEY0			(AO_CTRL_BASE + 0x12B)
-#define AO_SC_RESERVED31			(AO_CTRL_BASE + 0x13A)
-#define AO_SC_RESERVED32			(AO_CTRL_BASE + 0x13A)
-#define AO_SC_RESERVED33			(AO_CTRL_BASE + 0x13A)
-#define AO_SC_RESERVED34			(AO_CTRL_BASE + 0x13A)
-#define AO_SC_RESERVED35			(AO_CTRL_BASE + 0x13B)
-#define AO_SC_RESERVED36			(AO_CTRL_BASE + 0x13B)
-#define AO_SC_RESERVED37			(AO_CTRL_BASE + 0x13B)
-#define AO_SC_RESERVED38			(AO_CTRL_BASE + 0x13B)
-#define AO_SC_ALWAYSON_SYS_CTRL0		(AO_CTRL_BASE + 0x148)
-#define AO_SC_ALWAYSON_SYS_CTRL1		(AO_CTRL_BASE + 0x148)
-#define AO_SC_ALWAYSON_SYS_CTRL2		(AO_CTRL_BASE + 0x148)
-#define AO_SC_ALWAYSON_SYS_CTRL3		(AO_CTRL_BASE + 0x148)
-#define AO_SC_ALWAYSON_SYS_CTRL10		(AO_CTRL_BASE + 0x14A)
-#define AO_SC_ALWAYSON_SYS_CTRL11		(AO_CTRL_BASE + 0x14A)
-#define AO_SC_ALWAYSON_SYS_STAT0		(AO_CTRL_BASE + 0x14C)
-#define AO_SC_ALWAYSON_SYS_STAT1		(AO_CTRL_BASE + 0x14C)
-#define AO_SC_ALWAYSON_SYS_STAT2		(AO_CTRL_BASE + 0x14C)
-#define AO_SC_ALWAYSON_SYS_STAT3		(AO_CTRL_BASE + 0x14C)
-#define AO_SC_PWUP_TIME0			(AO_CTRL_BASE + 0x188)
-#define AO_SC_PWUP_TIME1			(AO_CTRL_BASE + 0x188)
-#define AO_SC_PWUP_TIME2			(AO_CTRL_BASE + 0x188)
-#define AO_SC_PWUP_TIME3			(AO_CTRL_BASE + 0x188)
-#define AO_SC_PWUP_TIME4			(AO_CTRL_BASE + 0x189)
-#define AO_SC_PWUP_TIME5			(AO_CTRL_BASE + 0x189)
-#define AO_SC_PWUP_TIME6			(AO_CTRL_BASE + 0x189)
-#define AO_SC_PWUP_TIME7			(AO_CTRL_BASE + 0x189)
-#define AO_SC_SECURITY_CTRL1			(AO_CTRL_BASE + 0x1C0)
-#define AO_SC_SYSTEST_SLICER_CNT0		(AO_CTRL_BASE + 0x890)
-#define AO_SC_SYSTEST_SLICER_CNT1		(AO_CTRL_BASE + 0x894)
-
-#define AO_SC_SYS_CTRL0_MODE_NORMAL				0x004
-#define AO_SC_SYS_CTRL0_MODE_MASK				0x007
-
-#define AO_SC_SYS_CTRL1_AARM_WD_RST_CFG				(1 << 0)
-#define AO_SC_SYS_CTRL1_REMAP_SRAM_AARM				(1 << 1)
-#define AO_SC_SYS_CTRL1_EFUSEC_REMAP				(1 << 2)
-#define AO_SC_SYS_CTRL1_EXT_PLL_SEL				(1 << 3)
-#define AO_SC_SYS_CTRL1_MCU_WDG0_RSTMCU_CFG			(1 << 4)
-#define AO_SC_SYS_CTRL1_USIM0_HPD_DE_BOUNCE_CFG			(1 << 6)
-#define AO_SC_SYS_CTRL1_USIM0_HPD_OE_CFG			(1 << 7)
-#define AO_SC_SYS_CTRL1_USIM1_HPD_DE_BOUNCE_CFG			(1 << 8)
-#define AO_SC_SYS_CTRL1_USIM1_HPD_OE_CFG			(1 << 9)
-#define AO_SC_SYS_CTRL1_BUS_DFS_FORE_HD_CFG			(1 << 10)
-#define AO_SC_SYS_CTRL1_BUS_DFS_FORE_HD_CFG1			(1 << 11)
-#define AO_SC_SYS_CTRL1_USIM0_HPD_OE_SFT			(1 << 12)
-#define AO_SC_SYS_CTRL1_USIM1_HPD_OE_SFT			(1 << 13)
-#define AO_SC_SYS_CTRL1_MCU_CLKEN_HARDCFG			(1 << 15)
-#define AO_SC_SYS_CTRL1_AARM_WD_RST_CFG_MSK			(1 << 16)
-#define AO_SC_SYS_CTRL1_REMAP_SRAM_AARM_MSK			(1 << 17)
-#define AO_SC_SYS_CTRL1_EFUSEC_REMAP_MSK			(1 << 18)
-#define AO_SC_SYS_CTRL1_EXT_PLL_SEL_MSK				(1 << 19)
-#define AO_SC_SYS_CTRL1_MCU_WDG0_RSTMCU_CFG_MSK			(1 << 20)
-#define AO_SC_SYS_CTRL1_USIM0_HPD_DE_BOUNCE_CFG_MSK		(1 << 22)
-#define AO_SC_SYS_CTRL1_USIM0_HPD_OE_CFG_MSK			(1 << 23)
-#define AO_SC_SYS_CTRL1_USIM1_HPD_DE_BOUNCE_CFG_MSK		(1 << 24)
-#define AO_SC_SYS_CTRL1_USIM1_HPD_OE_CFG_MSK			(1 << 25)
-#define AO_SC_SYS_CTRL1_BUS_DFS_FORE_HD_CFG_MSK			(1 << 26)
-#define AO_SC_SYS_CTRL1_BUS_DFS_FORE_HD_CFG1_MSK		(1 << 27)
-#define AO_SC_SYS_CTRL1_USIM0_HPD_OE_SFT_MSK			(1 << 28)
-#define AO_SC_SYS_CTRL1_USIM1_HPD_OE_SFT_MSK			(1 << 29)
-#define AO_SC_SYS_CTRL1_MCU_CLKEN_HARDCFG_MSK			(1 << 31)
-
-#define AO_SC_SYS_CTRL2_MCU_SFT_RST_STAT_CLEAR			(1 << 26)
-#define AO_SC_SYS_CTRL2_MCU_WDG0_RST_STAT_CLEAR			(1 << 27)
-#define AO_SC_SYS_CTRL2_TSENSOR_RST_STAT_CLEAR			(1 << 28)
-#define AO_SC_SYS_CTRL2_ACPU_WDG_RST_STAT_CLEAR			(1 << 29)
-#define AO_SC_SYS_CTRL2_MCU_WDG1_RST_STAT_CLEAR			(1 << 30)
-#define AO_SC_SYS_CTRL2_GLB_SRST_STAT_CLEAR			(1 << 31)
-
-#define AO_SC_SYS_STAT0_MCU_RST_STAT				(1 << 25)
-#define AO_SC_SYS_STAT0_MCU_SOFTRST_STAT			(1 << 26)
-#define AO_SC_SYS_STAT0_MCU_WDGRST_STAT				(1 << 27)
-#define AO_SC_SYS_STAT0_TSENSOR_HARDRST_STAT			(1 << 28)
-#define AO_SC_SYS_STAT0_ACPU_WD_GLB_RST_STAT			(1 << 29)
-#define AO_SC_SYS_STAT0_CM3_WDG1_RST_STAT			(1 << 30)
-#define AO_SC_SYS_STAT0_GLB_SRST_STAT				(1 << 31)
-
-#define AO_SC_SYS_STAT1_MODE_STATUS				(1 << 0)
-#define AO_SC_SYS_STAT1_BOOT_SEL_LOCK				(1 << 16)
-#define AO_SC_SYS_STAT1_FUNC_MODE_LOCK				(1 << 17)
-#define AO_SC_SYS_STAT1_BOOT_MODE_LOCK				(1 << 19)
-#define AO_SC_SYS_STAT1_FUN_JTAG_MODE_OUT			(1 << 20)
-#define AO_SC_SYS_STAT1_SECURITY_BOOT_FLG			(1 << 27)
-#define AO_SC_SYS_STAT1_EFUSE_NANDBOOT_MSK			(1 << 28)
-#define AO_SC_SYS_STAT1_EFUSE_NAND_BITWIDE			(1 << 29)
-
-#define AO_SC_PERIPH_RSTDIS4_RESET_MCU_ECTR_N			(1 << 0)
-#define AO_SC_PERIPH_RSTDIS4_RESET_MCU_SYS_N			(1 << 1)
-#define AO_SC_PERIPH_RSTDIS4_RESET_MCU_POR_N			(1 << 2)
-#define AO_SC_PERIPH_RSTDIS4_RESET_MCU_DAP_N			(1 << 3)
-#define AO_SC_PERIPH_RSTDIS4_PRESET_CM3_TIMER0_N		(1 << 4)
-#define AO_SC_PERIPH_RSTDIS4_PRESET_CM3_TIMER1_N		(1 << 5)
-#define AO_SC_PERIPH_RSTDIS4_PRESET_CM3_WDT0_N			(1 << 6)
-#define AO_SC_PERIPH_RSTDIS4_PRESET_CM3_WDT1_N			(1 << 7)
-#define AO_SC_PERIPH_RSTDIS4_HRESET_IPC_S_N			(1 << 8)
-#define AO_SC_PERIPH_RSTDIS4_HRESET_IPC_NS_N			(1 << 9)
-#define AO_SC_PERIPH_RSTDIS4_PRESET_EFUSEC_N			(1 << 10)
-#define AO_SC_PERIPH_RSTDIS4_PRESET_WDT0_N			(1 << 12)
-#define AO_SC_PERIPH_RSTDIS4_PRESET_WDT1_N			(1 << 13)
-#define AO_SC_PERIPH_RSTDIS4_PRESET_WDT2_N			(1 << 14)
-#define AO_SC_PERIPH_RSTDIS4_PRESET_TIMER0_N			(1 << 15)
-#define AO_SC_PERIPH_RSTDIS4_PRESET_TIMER1_N			(1 << 16)
-#define AO_SC_PERIPH_RSTDIS4_PRESET_TIMER2_N			(1 << 17)
-#define AO_SC_PERIPH_RSTDIS4_PRESET_TIMER3_N			(1 << 18)
-#define AO_SC_PERIPH_RSTDIS4_PRESET_TIMER4_N			(1 << 19)
-#define AO_SC_PERIPH_RSTDIS4_PRESET_TIMER5_N			(1 << 20)
-#define AO_SC_PERIPH_RSTDIS4_PRESET_TIMER6_N			(1 << 21)
-#define AO_SC_PERIPH_RSTDIS4_PRESET_TIMER7_N			(1 << 22)
-#define AO_SC_PERIPH_RSTDIS4_PRESET_TIMER8_N			(1 << 23)
-#define AO_SC_PERIPH_RSTDIS4_PRESET_UART0_N			(1 << 24)
-#define AO_SC_PERIPH_RSTDIS4_RESET_RTC0_N			(1 << 25)
-#define AO_SC_PERIPH_RSTDIS4_RESET_RTC1_N			(1 << 26)
-#define AO_SC_PERIPH_RSTDIS4_PRESET_PMUSSI_N			(1 << 27)
-#define AO_SC_PERIPH_RSTDIS4_RESET_JTAG_AUTH_N			(1 << 28)
-#define AO_SC_PERIPH_RSTDIS4_RESET_CS_DAPB_ON_N			(1 << 29)
-#define AO_SC_PERIPH_RSTDIS4_MDM_SUBSYS_GLB			(1 << 30)
-
-#define AO_SC_PERIPH_CLKEN4_HCLK_MCU				(1 << 0)
-#define AO_SC_PERIPH_CLKEN4_CLK_MCU_DAP				(1 << 3)
-#define AO_SC_PERIPH_CLKEN4_PCLK_CM3_TIMER0			(1 << 4)
-#define AO_SC_PERIPH_CLKEN4_PCLK_CM3_TIMER1			(1 << 5)
-#define AO_SC_PERIPH_CLKEN4_PCLK_CM3_WDT0			(1 << 6)
-#define AO_SC_PERIPH_CLKEN4_PCLK_CM3_WDT1			(1 << 7)
-#define AO_SC_PERIPH_CLKEN4_HCLK_IPC_S				(1 << 8)
-#define AO_SC_PERIPH_CLKEN4_HCLK_IPC_NS				(1 << 9)
-#define AO_SC_PERIPH_CLKEN4_PCLK_EFUSEC				(1 << 10)
-#define AO_SC_PERIPH_CLKEN4_PCLK_TZPC				(1 << 11)
-#define AO_SC_PERIPH_CLKEN4_PCLK_WDT0				(1 << 12)
-#define AO_SC_PERIPH_CLKEN4_PCLK_WDT1				(1 << 13)
-#define AO_SC_PERIPH_CLKEN4_PCLK_WDT2				(1 << 14)
-#define AO_SC_PERIPH_CLKEN4_PCLK_TIMER0				(1 << 15)
-#define AO_SC_PERIPH_CLKEN4_PCLK_TIMER1				(1 << 16)
-#define AO_SC_PERIPH_CLKEN4_PCLK_TIMER2				(1 << 17)
-#define AO_SC_PERIPH_CLKEN4_PCLK_TIMER3				(1 << 18)
-#define AO_SC_PERIPH_CLKEN4_PCLK_TIMER4				(1 << 19)
-#define AO_SC_PERIPH_CLKEN4_PCLK_TIMER5				(1 << 20)
-#define AO_SC_PERIPH_CLKEN4_PCLK_TIMER6				(1 << 21)
-#define AO_SC_PERIPH_CLKEN4_PCLK_TIMER7				(1 << 22)
-#define AO_SC_PERIPH_CLKEN4_PCLK_TIMER8				(1 << 23)
-#define AO_SC_PERIPH_CLKEN4_CLK_UART0				(1 << 24)
-#define AO_SC_PERIPH_CLKEN4_CLK_RTC0				(1 << 25)
-#define AO_SC_PERIPH_CLKEN4_CLK_RTC1				(1 << 26)
-#define AO_SC_PERIPH_CLKEN4_PCLK_PMUSSI				(1 << 27)
-#define AO_SC_PERIPH_CLKEN4_CLK_JTAG_AUTH			(1 << 28)
-#define AO_SC_PERIPH_CLKEN4_CLK_CS_DAPB_ON			(1 << 29)
-#define AO_SC_PERIPH_CLKEN4_CLK_PDM				(1 << 30)
-#define AO_SC_PERIPH_CLKEN4_CLK_SSI_PAD				(1 << 31)
-
-#define AO_SC_PERIPH_CLKEN5_PCLK_PMUSSI_CCPU			(1 << 0)
-#define AO_SC_PERIPH_CLKEN5_PCLK_EFUSEC_CCPU			(1 << 1)
-#define AO_SC_PERIPH_CLKEN5_HCLK_IPC_CCPU			(1 << 2)
-#define AO_SC_PERIPH_CLKEN5_HCLK_IPC_NS_CCPU			(1 << 3)
-#define AO_SC_PERIPH_CLKEN5_PCLK_PMUSSI_MCU			(1 << 16)
-#define AO_SC_PERIPH_CLKEN5_PCLK_EFUSEC_MCU			(1 << 17)
-#define AO_SC_PERIPH_CLKEN5_HCLK_IPC_MCU			(1 << 18)
-#define AO_SC_PERIPH_CLKEN5_HCLK_IPC_NS_MCU			(1 << 19)
-
-#define AO_SC_MCU_SUBSYS_CTRL3_RCLK_3				0x003
-#define AO_SC_MCU_SUBSYS_CTRL3_RCLK_MASK			0x007
-#define AO_SC_MCU_SUBSYS_CTRL3_CSSYS_CTRL_PROT			(1 << 3)
-#define AO_SC_MCU_SUBSYS_CTRL3_TCXO_AFC_OEN_CRG			(1 << 4)
-#define AO_SC_MCU_SUBSYS_CTRL3_AOB_IO_SEL18_USIM1		(1 << 8)
-#define AO_SC_MCU_SUBSYS_CTRL3_AOB_IO_SEL18_USIM0		(1 << 9)
-#define AO_SC_MCU_SUBSYS_CTRL3_AOB_IO_SEL18_SD			(1 << 10)
-#define AO_SC_MCU_SUBSYS_CTRL3_MCU_SUBSYS_CTRL3_RESERVED	(1 << 11)
-
-#define PCLK_TIMER1						(1 << 16)
-#define PCLK_TIMER0						(1 << 15)
-
-#endif /* __HI6220_AO_H__ */
diff --git a/plat/hikey/include/hi6220_regs_peri.h b/plat/hikey/include/hi6220_regs_peri.h
deleted file mode 100644
index 9b79d5a..0000000
--- a/plat/hikey/include/hi6220_regs_peri.h
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HI6220_PERI_H__
-#define __HI6220_PERI_H__
-
-#define PERI_BASE				0xF7030000
-
-#define PERI_SC_PERIPH_CTRL1			(PERI_BASE + 0x000)
-#define PERI_SC_PERIPH_CTRL2			(PERI_BASE + 0x004)
-#define PERI_SC_PERIPH_CTRL3			(PERI_BASE + 0x008)
-#define PERI_SC_PERIPH_CTRL4			(PERI_BASE + 0x00c)
-#define PERI_SC_PERIPH_CTRL5			(PERI_BASE + 0x010)
-#define PERI_SC_PERIPH_CTRL6			(PERI_BASE + 0x014)
-#define PERI_SC_PERIPH_CTRL8			(PERI_BASE + 0x018)
-#define PERI_SC_PERIPH_CTRL9			(PERI_BASE + 0x01c)
-#define PERI_SC_PERIPH_CTRL10			(PERI_BASE + 0x020)
-#define PERI_SC_PERIPH_CTRL12			(PERI_BASE + 0x024)
-#define PERI_SC_PERIPH_CTRL13			(PERI_BASE + 0x028)
-#define PERI_SC_PERIPH_CTRL14			(PERI_BASE + 0x02c)
-
-#define PERI_SC_DDR_CTRL0			(PERI_BASE + 0x050)
-#define PERI_SC_PERIPH_STAT1			(PERI_BASE + 0x094)
-
-#define PERI_SC_PERIPH_CLKEN0			(PERI_BASE + 0x200)
-#define PERI_SC_PERIPH_CLKDIS0			(PERI_BASE + 0x204)
-#define PERI_SC_PERIPH_CLKSTAT0			(PERI_BASE + 0x208)
-#define PERI_SC_PERIPH_CLKEN1			(PERI_BASE + 0x210)
-#define PERI_SC_PERIPH_CLKDIS1			(PERI_BASE + 0x214)
-#define PERI_SC_PERIPH_CLKSTAT1			(PERI_BASE + 0x218)
-#define PERI_SC_PERIPH_CLKEN2			(PERI_BASE + 0x220)
-#define PERI_SC_PERIPH_CLKDIS2			(PERI_BASE + 0x224)
-#define PERI_SC_PERIPH_CLKSTAT2			(PERI_BASE + 0x228)
-#define PERI_SC_PERIPH_CLKEN3			(PERI_BASE + 0x230)
-#define PERI_SC_PERIPH_CLKDIS3			(PERI_BASE + 0x234)
-#define PERI_SC_PERIPH_CLKSTAT3			(PERI_BASE + 0x238)
-#define PERI_SC_PERIPH_CLKEN8			(PERI_BASE + 0x240)
-#define PERI_SC_PERIPH_CLKDIS8			(PERI_BASE + 0x244)
-#define PERI_SC_PERIPH_CLKSTAT8			(PERI_BASE + 0x248)
-#define PERI_SC_PERIPH_CLKEN9			(PERI_BASE + 0x250)
-#define PERI_SC_PERIPH_CLKDIS9			(PERI_BASE + 0x254)
-#define PERI_SC_PERIPH_CLKSTAT9			(PERI_BASE + 0x258)
-#define PERI_SC_PERIPH_CLKEN10			(PERI_BASE + 0x260)
-#define PERI_SC_PERIPH_CLKDIS10			(PERI_BASE + 0x264)
-#define PERI_SC_PERIPH_CLKSTAT10		(PERI_BASE + 0x268)
-#define PERI_SC_PERIPH_CLKEN12			(PERI_BASE + 0x270)
-#define PERI_SC_PERIPH_CLKDIS12			(PERI_BASE + 0x274)
-#define PERI_SC_PERIPH_CLKSTAT12		(PERI_BASE + 0x278)
-
-#define PERI_SC_PERIPH_RSTEN0			(PERI_BASE + 0x300)
-#define PERI_SC_PERIPH_RSTDIS0			(PERI_BASE + 0x304)
-#define PERI_SC_PERIPH_RSTSTAT0			(PERI_BASE + 0x308)
-#define PERI_SC_PERIPH_RSTEN1			(PERI_BASE + 0x310)
-#define PERI_SC_PERIPH_RSTDIS1			(PERI_BASE + 0x314)
-#define PERI_SC_PERIPH_RSTSTAT1			(PERI_BASE + 0x318)
-#define PERI_SC_PERIPH_RSTEN2			(PERI_BASE + 0x320)
-#define PERI_SC_PERIPH_RSTDIS2			(PERI_BASE + 0x324)
-#define PERI_SC_PERIPH_RSTSTAT2			(PERI_BASE + 0x328)
-#define PERI_SC_PERIPH_RSTEN3			(PERI_BASE + 0x330)
-#define PERI_SC_PERIPH_RSTDIS3			(PERI_BASE + 0x334)
-#define PERI_SC_PERIPH_RSTSTAT3			(PERI_BASE + 0x338)
-#define PERI_SC_PERIPH_RSTEN8			(PERI_BASE + 0x340)
-#define PERI_SC_PERIPH_RSTDIS8			(PERI_BASE + 0x344)
-#define PERI_SC_PERIPH_RSTSTAT8			(PERI_BASE + 0x338)
-
-#define PERI_SC_CLK_SEL0			(PERI_BASE + 0x400)
-#define PERI_SC_CLKCFG8BIT1			(PERI_BASE + 0x494)
-#define PERI_SC_CLKCFG8BIT2			(PERI_BASE + 0x498)
-#define PERI_SC_RESERVED8_ADDR			(PERI_BASE + 0xd04)
-
-/* PERI_SC_PERIPH_CTRL1 */
-#define PERI_CTRL1_ETR_AXI_CSYSREQ_N		(1 << 0)
-#define PERI_CTRL1_ETR_AXI_CSYSREQ_N		(1 << 0)
-#define PERI_CTRL1_HIFI_INT_MASK		(1 << 1)
-#define PERI_CTRL1_HIFI_ALL_INT_MASK		(1 << 2)
-#define PERI_CTRL1_ETR_AXI_CSYSREQ_N_MSK	(1 << 16)
-#define PERI_CTRL1_HIFI_INT_MASK_MSK		(1 << 17)
-#define PERI_CTRL1_HIFI_ALL_INT_MASK_MSK	(1 << 18)
-
-/* PERI_SC_PERIPH_CTRL2	*/
-#define PERI_CTRL2_MMC_CLK_PHASE_BYPASS_EN_MMC0	(1 << 0)
-#define PERI_CTRL2_MMC_CLK_PHASE_BYPASS_EN_MMC1	(1 << 2)
-#define PERI_CTRL2_NAND_SYS_MEM_SEL		(1 << 6)
-#define PERI_CTRL2_G3D_DDRT_AXI_SEL		(1 << 7)
-#define PERI_CTRL2_GU_MDM_BBP_TESTPIN_SEL	(1 << 8)
-#define PERI_CTRL2_CODEC_SSI_MASTER_CHECK	(1 << 9)
-#define PERI_CTRL2_FUNC_TEST_SOFT		(1 << 12)
-#define PERI_CTRL2_CSSYS_TS_ENABLE		(1 << 15)
-#define PERI_CTRL2_HIFI_RAMCTRL_S_EMA		(1 << 16)
-#define PERI_CTRL2_HIFI_RAMCTRL_S_EMAW		(1 << 20)
-#define PERI_CTRL2_HIFI_RAMCTRL_S_EMAS		(1 << 22)
-#define PERI_CTRL2_HIFI_RAMCTRL_S_RET1N		(1 << 26)
-#define PERI_CTRL2_HIFI_RAMCTRL_S_RET2N		(1 << 27)
-#define PERI_CTRL2_HIFI_RAMCTRL_S_PGEN		(1 << 28)
-
-/* PERI_SC_PERIPH_CTRL3 */
-#define PERI_CTRL3_HIFI_DDR_HARQMEM_ADDR	(1 << 0)
-#define PERI_CTRL3_HIFI_HARQMEMRMP_EN		(1 << 12)
-#define PERI_CTRL3_HARQMEM_SYS_MED_SEL		(1 << 13)
-#define PERI_CTRL3_SOC_AP_OCCUPY_GRP1		(1 << 14)
-#define PERI_CTRL3_SOC_AP_OCCUPY_GRP2		(1 << 16)
-#define PERI_CTRL3_SOC_AP_OCCUPY_GRP3		(1 << 18)
-#define PERI_CTRL3_SOC_AP_OCCUPY_GRP4		(1 << 20)
-#define PERI_CTRL3_SOC_AP_OCCUPY_GRP5		(1 << 22)
-#define PERI_CTRL3_SOC_AP_OCCUPY_GRP6		(1 << 24)
-
-/* PERI_SC_PERIPH_CTRL4 */
-#define PERI_CTRL4_PICO_FSELV			(1 << 0)
-#define PERI_CTRL4_FPGA_EXT_PHY_SEL		(1 << 3)
-#define PERI_CTRL4_PICO_REFCLKSEL		(1 << 4)
-#define PERI_CTRL4_PICO_SIDDQ			(1 << 6)
-#define PERI_CTRL4_PICO_SUSPENDM_SLEEPM		(1 << 7)
-#define PERI_CTRL4_PICO_OGDISABLE		(1 << 8)
-#define PERI_CTRL4_PICO_COMMONONN		(1 << 9)
-#define PERI_CTRL4_PICO_VBUSVLDEXT		(1 << 10)
-#define PERI_CTRL4_PICO_VBUSVLDEXTSEL		(1 << 11)
-#define PERI_CTRL4_PICO_VATESTENB		(1 << 12)
-#define PERI_CTRL4_PICO_SUSPENDM		(1 << 14)
-#define PERI_CTRL4_PICO_SLEEPM			(1 << 15)
-#define PERI_CTRL4_BC11_C			(1 << 16)
-#define PERI_CTRL4_BC11_B			(1 << 17)
-#define PERI_CTRL4_BC11_A			(1 << 18)
-#define PERI_CTRL4_BC11_GND			(1 << 19)
-#define PERI_CTRL4_BC11_FLOAT			(1 << 20)
-#define PERI_CTRL4_OTG_PHY_SEL			(1 << 21)
-#define PERI_CTRL4_USB_OTG_SS_SCALEDOWN_MODE	(1 << 22)
-#define PERI_CTRL4_OTG_DM_PULLDOWN		(1 << 24)
-#define PERI_CTRL4_OTG_DP_PULLDOWN		(1 << 25)
-#define PERI_CTRL4_OTG_IDPULLUP			(1 << 26)
-#define PERI_CTRL4_OTG_DRVBUS			(1 << 27)
-#define PERI_CTRL4_OTG_SESSEND			(1 << 28)
-#define PERI_CTRL4_OTG_BVALID			(1 << 29)
-#define PERI_CTRL4_OTG_AVALID			(1 << 30)
-#define PERI_CTRL4_OTG_VBUSVALID		(1 << 31)
-
-/* PERI_SC_PERIPH_CTRL5 */
-#define PERI_CTRL5_USBOTG_RES_SEL		(1 << 3)
-#define PERI_CTRL5_PICOPHY_ACAENB		(1 << 4)
-#define PERI_CTRL5_PICOPHY_BC_MODE		(1 << 5)
-#define PERI_CTRL5_PICOPHY_CHRGSEL		(1 << 6)
-#define PERI_CTRL5_PICOPHY_VDATSRCEND		(1 << 7)
-#define PERI_CTRL5_PICOPHY_VDATDETENB		(1 << 8)
-#define PERI_CTRL5_PICOPHY_DCDENB		(1 << 9)
-#define PERI_CTRL5_PICOPHY_IDDIG		(1 << 10)
-#define PERI_CTRL5_DBG_MUX			(1 << 11)
-
-/* PERI_SC_PERIPH_CTRL6 */
-#define PERI_CTRL6_CSSYSOFF_RAMCTRL_S_EMA	(1 << 0)
-#define PERI_CTRL6_CSSYSOFF_RAMCTRL_S_EMAW	(1 << 4)
-#define PERI_CTRL6_CSSYSOFF_RAMCTRL_S_EMAS	(1 << 6)
-#define PERI_CTRL6_CSSYSOFF_RAMCTRL_S_RET1N	(1 << 10)
-#define PERI_CTRL6_CSSYSOFF_RAMCTRL_S_RET2N	(1 << 11)
-#define PERI_CTRL6_CSSYSOFF_RAMCTRL_S_PGEN	(1 << 12)
-
-/* PERI_SC_PERIPH_CTRL8 */
-#define PERI_CTRL8_PICOPHY_TXRISETUNE0		(1 << 0)
-#define PERI_CTRL8_PICOPHY_TXPREEMPAMPTUNE0	(1 << 2)
-#define PERI_CTRL8_PICOPHY_TXRESTUNE0		(1 << 4)
-#define PERI_CTRL8_PICOPHY_TXHSSVTUNE0		(1 << 6)
-#define PERI_CTRL8_PICOPHY_COMPDISTUNE0		(1 << 8)
-#define PERI_CTRL8_PICOPHY_TXPREEMPPULSETUNE0	(1 << 11)
-#define PERI_CTRL8_PICOPHY_OTGTUNE0		(1 << 12)
-#define PERI_CTRL8_PICOPHY_SQRXTUNE0		(1 << 16)
-#define PERI_CTRL8_PICOPHY_TXVREFTUNE0		(1 << 20)
-#define PERI_CTRL8_PICOPHY_TXFSLSTUNE0		(1 << 28)
-
-/* PERI_SC_PERIPH_CTRL9	*/
-#define PERI_CTRL9_PICOPLY_TESTCLKEN		(1 << 0)
-#define PERI_CTRL9_PICOPLY_TESTDATAOUTSEL	(1 << 1)
-#define PERI_CTRL9_PICOPLY_TESTADDR		(1 << 4)
-#define PERI_CTRL9_PICOPLY_TESTDATAIN		(1 << 8)
-
-/*
- * PERI_SC_PERIPH_CLKEN0
- * PERI_SC_PERIPH_CLKDIS0
- * PERI_SC_PERIPH_CLKSTAT0
- */
-#define PERI_CLK0_MMC0				(1 << 0)
-#define PERI_CLK0_MMC1				(1 << 1)
-#define PERI_CLK0_MMC2				(1 << 2)
-#define PERI_CLK0_NANDC				(1 << 3)
-#define PERI_CLK0_USBOTG			(1 << 4)
-#define PERI_CLK0_PICOPHY			(1 << 5)
-#define PERI_CLK0_PLL				(1 << 6)
-
-/*
- * PERI_SC_PERIPH_CLKEN1
- * PERI_SC_PERIPH_CLKDIS1
- * PERI_SC_PERIPH_CLKSTAT1
- */
-#define PERI_CLK1_HIFI				(1 << 0)
-#define PERI_CLK1_DIGACODEC			(1 << 5)
-
-/*
- * PERI_SC_PERIPH_CLKEN2
- * PERI_SC_PERIPH_CLKDIS2
- * PERI_SC_PERIPH_CLKSTAT2
- */
-#define PERI_CLK2_IPF				(1 << 0)
-#define PERI_CLK2_SOCP				(1 << 1)
-#define PERI_CLK2_DMAC				(1 << 2)
-#define PERI_CLK2_SECENG			(1 << 3)
-#define PERI_CLK2_HPM0				(1 << 5)
-#define PERI_CLK2_HPM1				(1 << 6)
-#define PERI_CLK2_HPM2				(1 << 7)
-#define PERI_CLK2_HPM3				(1 << 8)
-
-/*
- * PERI_SC_PERIPH_CLKEN3
- * PERI_SC_PERIPH_CLKDIS3
- * PERI_SC_PERIPH_CLKSTAT3
- */
-#define PERI_CLK3_CSSYS				(1 << 0)
-#define PERI_CLK3_I2C0				(1 << 1)
-#define PERI_CLK3_I2C1				(1 << 2)
-#define PERI_CLK3_I2C2				(1 << 3)
-#define PERI_CLK3_I2C3				(1 << 4)
-#define PERI_CLK3_UART1				(1 << 5)
-#define PERI_CLK3_UART2				(1 << 6)
-#define PERI_CLK3_UART3				(1 << 7)
-#define PERI_CLK3_UART4				(1 << 8)
-#define PERI_CLK3_SSP				(1 << 9)
-#define PERI_CLK3_PWM				(1 << 10)
-#define PERI_CLK3_BLPWM				(1 << 11)
-#define PERI_CLK3_TSENSOR			(1 << 12)
-#define PERI_CLK3_GPS				(1 << 15)
-#define PERI_CLK3_TCXO_PAD0			(1 << 16)
-#define PERI_CLK3_TCXO_PAD1			(1 << 17)
-#define PERI_CLK3_DAPB				(1 << 18)
-#define PERI_CLK3_HKADC				(1 << 19)
-#define PERI_CLK3_CODEC_SSI			(1 << 20)
-#define PERI_CLK3_TZPC_DEP			(1 << 21)
-
-/*
- * PERI_SC_PERIPH_CLKEN8
- * PERI_SC_PERIPH_CLKDIS8
- * PERI_SC_PERIPH_CLKSTAT8
- */
-#define PERI_CLK8_RS0				(1 << 0)
-#define PERI_CLK8_RS2				(1 << 1)
-#define PERI_CLK8_RS3				(1 << 2)
-#define PERI_CLK8_MS0				(1 << 3)
-#define PERI_CLK8_MS2				(1 << 5)
-#define PERI_CLK8_XG2RAM0			(1 << 6)
-#define PERI_CLK8_X2SRAM			(1 << 7)
-#define PERI_CLK8_SRAM				(1 << 8)
-#define PERI_CLK8_ROM				(1 << 9)
-#define PERI_CLK8_HARQ				(1 << 10)
-#define PERI_CLK8_MMU				(1 << 11)
-#define PERI_CLK8_DDRC				(1 << 12)
-#define PERI_CLK8_DDRPHY			(1 << 13)
-#define PERI_CLK8_DDRPHY_REF			(1 << 14)
-#define PERI_CLK8_X2X_SYSNOC			(1 << 15)
-#define PERI_CLK8_X2X_CCPU			(1 << 16)
-#define PERI_CLK8_DDRT				(1 << 17)
-#define PERI_CLK8_DDRPACK_RS			(1 << 18)
-
-/*
- * PERI_SC_PERIPH_CLKEN9
- * PERI_SC_PERIPH_CLKDIS9
- * PERI_SC_PERIPH_CLKSTAT9
- */
-#define PERI_CLK9_CARM_DAP			(1 << 0)
-#define PERI_CLK9_CARM_ATB			(1 << 1)
-#define PERI_CLK9_CARM_LBUS			(1 << 2)
-#define PERI_CLK9_CARM_KERNEL			(1 << 3)
-
-/*
- * PERI_SC_PERIPH_CLKEN10
- * PERI_SC_PERIPH_CLKDIS10
- * PERI_SC_PERIPH_CLKSTAT10
- */
-#define PERI_CLK10_IPF_CCPU			(1 << 0)
-#define PERI_CLK10_SOCP_CCPU			(1 << 1)
-#define PERI_CLK10_SECENG_CCPU			(1 << 2)
-#define PERI_CLK10_HARQ_CCPU			(1 << 3)
-#define PERI_CLK10_IPF_MCU			(1 << 16)
-#define PERI_CLK10_SOCP_MCU			(1 << 17)
-#define PERI_CLK10_SECENG_MCU			(1 << 18)
-#define PERI_CLK10_HARQ_MCU			(1 << 19)
-
-/*
- * PERI_SC_PERIPH_CLKEN12
- * PERI_SC_PERIPH_CLKDIS12
- * PERI_SC_PERIPH_CLKSTAT12
- */
-#define PERI_CLK12_HIFI_SRC			(1 << 0)
-#define PERI_CLK12_MMC0_SRC			(1 << 1)
-#define PERI_CLK12_MMC1_SRC			(1 << 2)
-#define PERI_CLK12_MMC2_SRC			(1 << 3)
-#define PERI_CLK12_SYSPLL_DIV			(1 << 4)
-#define PERI_CLK12_TPIU_SRC			(1 << 5)
-#define PERI_CLK12_MMC0_HF			(1 << 6)
-#define PERI_CLK12_MMC1_HF			(1 << 7)
-#define PERI_CLK12_PLL_TEST_SRC			(1 << 8)
-#define PERI_CLK12_CODEC_SOC			(1 << 9)
-#define PERI_CLK12_MEDIA			(1 << 10)
-
-/*
- * PERI_SC_PERIPH_RSTEN0
- * PERI_SC_PERIPH_RSTDIS0
- * PERI_SC_PERIPH_RSTSTAT0
- */
-#define PERI_RST0_MMC0				(1 << 0)
-#define PERI_RST0_MMC1				(1 << 1)
-#define PERI_RST0_MMC2				(1 << 2)
-#define PERI_RST0_NANDC				(1 << 3)
-#define PERI_RST0_USBOTG_BUS			(1 << 4)
-#define PERI_RST0_POR_PICOPHY			(1 << 5)
-#define PERI_RST0_USBOTG			(1 << 6)
-#define PERI_RST0_USBOTG_32K			(1 << 7)
-
-/*
- * PERI_SC_PERIPH_RSTEN1
- * PERI_SC_PERIPH_RSTDIS1
- * PERI_SC_PERIPH_RSTSTAT1
- */
-#define PERI_RST1_HIFI				(1 << 0)
-#define PERI_RST1_DIGACODEC			(1 << 5)
-
-/*
- * PERI_SC_PERIPH_RSTEN2
- * PERI_SC_PERIPH_RSTDIS2
- * PERI_SC_PERIPH_RSTSTAT2
- */
-#define PERI_RST2_IPF				(1 << 0)
-#define PERI_RST2_SOCP				(1 << 1)
-#define PERI_RST2_DMAC				(1 << 2)
-#define PERI_RST2_SECENG			(1 << 3)
-#define PERI_RST2_ABB				(1 << 4)
-#define PERI_RST2_HPM0				(1 << 5)
-#define PERI_RST2_HPM1				(1 << 6)
-#define PERI_RST2_HPM2				(1 << 7)
-#define PERI_RST2_HPM3				(1 << 8)
-
-/*
- * PERI_SC_PERIPH_RSTEN3
- * PERI_SC_PERIPH_RSTDIS3
- * PERI_SC_PERIPH_RSTSTAT3
- */
-#define PERI_RST3_CSSYS				(1 << 0)
-#define PERI_RST3_I2C0				(1 << 1)
-#define PERI_RST3_I2C1				(1 << 2)
-#define PERI_RST3_I2C2				(1 << 3)
-#define PERI_RST3_I2C3				(1 << 4)
-#define PERI_RST3_UART1				(1 << 5)
-#define PERI_RST3_UART2				(1 << 6)
-#define PERI_RST3_UART3				(1 << 7)
-#define PERI_RST3_UART4				(1 << 8)
-#define PERI_RST3_SSP				(1 << 9)
-#define PERI_RST3_PWM				(1 << 10)
-#define PERI_RST3_BLPWM				(1 << 11)
-#define PERI_RST3_TSENSOR			(1 << 12)
-#define PERI_RST3_DAPB				(1 << 18)
-#define PERI_RST3_HKADC				(1 << 19)
-#define PERI_RST3_CODEC				(1 << 20)
-
-/*
- * PERI_SC_PERIPH_RSTEN8
- * PERI_SC_PERIPH_RSTDIS8
- * PERI_SC_PERIPH_RSTSTAT8
- */
-#define PERI_RST8_RS0				(1 << 0)
-#define PERI_RST8_RS2				(1 << 1)
-#define PERI_RST8_RS3				(1 << 2)
-#define PERI_RST8_MS0				(1 << 3)
-#define PERI_RST8_MS2				(1 << 5)
-#define PERI_RST8_XG2RAM0			(1 << 6)
-#define PERI_RST8_X2SRAM_TZMA			(1 << 7)
-#define PERI_RST8_SRAM				(1 << 8)
-#define PERI_RST8_HARQ				(1 << 10)
-#define PERI_RST8_DDRC				(1 << 12)
-#define PERI_RST8_DDRC_APB			(1 << 13)
-#define PERI_RST8_DDRPACK_APB			(1 << 14)
-#define PERI_RST8_DDRT				(1 << 17)
-
-#endif /* __HI6220_PERI_H__ */
diff --git a/plat/hikey/include/hi6220_regs_pmctrl.h b/plat/hikey/include/hi6220_regs_pmctrl.h
deleted file mode 100644
index 4a2e905..0000000
--- a/plat/hikey/include/hi6220_regs_pmctrl.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HI6220_REGS_PMCTRL_H__
-#define __HI6220_REGS_PMCTRL_H__
-
-#define PMCTRL_BASE				0xF7032000
-
-#define PMCTRL_ACPUPLLCTRL			(PMCTRL_BASE + 0x000)
-#define PMCTRL_ACPUPLLFREQ			(PMCTRL_BASE + 0x004)
-#define PMCTRL_DDRPLL1CTRL			(PMCTRL_BASE + 0x010)
-#define PMCTRL_DDRPLL0CTRL			(PMCTRL_BASE + 0x030)
-#define PMCTRL_MEDPLLCTRL			(PMCTRL_BASE + 0x038)
-#define PMCTRL_ACPUPLLSEL			(PMCTRL_BASE + 0x100)
-#define PMCTRL_ACPUCLKDIV			(PMCTRL_BASE + 0x104)
-#define PMCTRL_ACPUSYSPLLCFG			(PMCTRL_BASE + 0x110)
-#define PMCTRL_ACPUCLKOFFCFG			(PMCTRL_BASE + 0x114)
-#define PMCTRL_ACPUPLLFRAC			(PMCTRL_BASE + 0x134)
-#define PMCTRL_ACPUPMUVOLUPTIME			(PMCTRL_BASE + 0x360)
-#define PMCTRL_ACPUPMUVOLDNTIME			(PMCTRL_BASE + 0x364)
-#define PMCTRL_ACPUVOLPMUADDR			(PMCTRL_BASE + 0x368)
-#define PMCTRL_ACPUVOLUPSTEP			(PMCTRL_BASE + 0x36c)
-#define PMCTRL_ACPUVOLDNSTEP			(PMCTRL_BASE + 0x370)
-#define PMCTRL_ACPUDFTVOL			(PMCTRL_BASE + 0x374)
-#define PMCTRL_ACPUDESTVOL			(PMCTRL_BASE + 0x378)
-#define PMCTRL_ACPUVOLTTIMEOUT			(PMCTRL_BASE + 0x37c)
-
-#define PMCTRL_ACPUPLLCTRL_EN_CFG		(1 << 0)
-
-#define PMCTRL_ACPUCLKDIV_CPUEXT_CFG_MASK	(3 << 0)
-#define PMCTRL_ACPUCLKDIV_DDR_CFG_MASK		(3 << 8)
-#define PMCTRL_ACPUCLKDIV_CPUEXT_STAT_MASK	(3 << 16)
-#define PMCTRL_ACPUCLKDIV_DDR_STAT_MASK		(3 << 24)
-
-#define PMCTRL_ACPUPLLSEL_ACPUPLL_CFG		(1 << 0)
-#define PMCTRL_ACPUPLLSEL_ACPUPLL_STAT		(1 << 1)
-#define PMCTRL_ACPUPLLSEL_SYSPLL_STAT		(1 << 2)
-
-#define PMCTRL_ACPUSYSPLL_CLKDIV_CFG_MASK	0x7
-#define PMCTRL_ACPUSYSPLL_CLKEN_CFG		(1 << 4)
-#define PMCTRL_ACPUSYSPLL_CLKDIV_SW		(3 << 12)
-
-#define PMCTRL_ACPUSYSPLLCFG_SYSPLL_CLKEN	(1 << 4)
-#define PMCTRL_ACPUSYSPLLCFG_CLKDIV_MASK	(3 << 12)
-
-#define PMCTRL_ACPUDESTVOL_DEST_VOL_MASK	0x7f
-#define PMCTRL_ACPUDESTVOL_CURR_VOL_MASK	(0x7f << 8)
-
-#define SOC_PMCTRL_ACPUPLLCTRL_acpupll_en_cfg_START   (0)
-#define SOC_PMCTRL_ACPUPLLCTRL_acpupll_en_cfg_END     (0)
-#define SOC_PMCTRL_ACPUPLLCTRL_acpupll_rst_START      (2)
-#define SOC_PMCTRL_ACPUPLLCTRL_acpupll_rst_END        (2)
-#define SOC_PMCTRL_ACPUPLLCTRL_acpupll_time_START     (4)
-#define SOC_PMCTRL_ACPUPLLCTRL_acpupll_time_END       (27)
-#define SOC_PMCTRL_ACPUPLLCTRL_acpupll_timeout_START  (28)
-#define SOC_PMCTRL_ACPUPLLCTRL_acpupll_timeout_END    (28)
-#define SOC_PMCTRL_ACPUPLLCTRL_acpupll_lock_START     (29)
-#define SOC_PMCTRL_ACPUPLLCTRL_acpupll_lock_END       (29)
-
-#define SOC_PMCTRL_ACPUPLLFRAC_ADDR(base)   ((base) + (0x134))
-#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_subsys_clk_div_sw_START   (12)
-
-#define SOC_PMCTRL_ACPUPLLSEL_acpu_pllsw_cfg_START   (0)
-#define SOC_PMCTRL_ACPUPLLSEL_acpu_pllsw_cfg_END     (0)
-#define SOC_PMCTRL_ACPUPLLSEL_acpu_pllsw_stat_START  (1)
-#define SOC_PMCTRL_ACPUPLLSEL_acpu_pllsw_stat_END    (1)
-#define SOC_PMCTRL_ACPUPLLSEL_syspll_sw_stat_START   (2)
-#define SOC_PMCTRL_ACPUPLLSEL_syspll_sw_stat_END     (2)
-
-#define SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_START     (0)
-#define SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_END       (1)
-#define SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_START   (8)
-#define SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_END     (9)
-#define SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_stat_START    (16)
-#define SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_stat_END      (17)
-#define SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_stat_START  (24)
-#define SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_stat_END    (25)
-
-#define SOC_PMCTRL_ACPUDESTVOL_acpu_dest_vol_START   (0)
-#define SOC_PMCTRL_ACPUDESTVOL_acpu_dest_vol_END     (6)
-#define SOC_PMCTRL_ACPUDESTVOL_acpu_vol_using_START  (8)
-#define SOC_PMCTRL_ACPUDESTVOL_acpu_vol_using_END    (14)
-
-#define SOC_PMCTRL_ACPUVOLTIMEOUT_acpu_vol_timeout_START  (0)
-#define SOC_PMCTRL_ACPUVOLTIMEOUT_acpu_vol_timeout_END    (0)
-
-#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_div_cfg_START      (0)
-#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_div_cfg_END        (2)
-#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_clken_cfg_START    (4)
-#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_clken_cfg_END      (4)
-#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_subsys_clk_div_cfg_START  (8)
-#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_subsys_clk_div_cfg_END    (9)
-#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_div_stat_START     (16)
-#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_div_stat_END       (19)
-#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_clken_stat_START   (20)
-#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_clken_stat_END     (20)
-
-#endif /* __HI6220_REGS_PMCTRL_H__ */
diff --git a/plat/hikey/include/hi6553.h b/plat/hikey/include/hi6553.h
deleted file mode 100644
index 7e642c0..0000000
--- a/plat/hikey/include/hi6553.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HI6553_H__
-#define __HI6553_H__
-
-#define DISABLE6_XO_CLK				0x036
-
-#define DISABLE6_XO_CLK_BB			(1 << 0)
-#define DISABLE6_XO_CLK_CONN			(1 << 1)
-#define DISABLE6_XO_CLK_NFC			(1 << 2)
-#define DISABLE6_XO_CLK_RF1			(1 << 3)
-#define DISABLE6_XO_CLK_RF2			(1 << 4)
-
-#define VERSION_REG				0x000
-#define IRQ2_MASK				0x008
-#define ENABLE2_LDO1_8				0x029
-#define DISABLE2_LDO1_8				0x02a
-#define ONOFF_STATUS2_LDO1_8			0x02b
-#define ENABLE3_LDO9_16				0x02c
-#define DISABLE3_LDO9_16			0x02d
-#define ONOFF_STATUS3_LDO9_16			0x02e
-#define ENABLE4_LDO17_22			0x02f
-#define DISABLE4_LDO17_22			0x030
-#define ONOFF_STATUS4_LDO17_22			0x031
-#define PERI_EN_MARK				0x040
-#define BUCK2_REG1				0x04a
-#define BUCK2_REG5				0x04e
-#define BUCK2_REG6				0x04f
-#define BUCK3_REG3				0x054
-#define BUCK3_REG5				0x056
-#define BUCK3_REG6				0x057
-#define BUCK4_REG2				0x05b
-#define BUCK4_REG5				0x05e
-#define BUCK4_REG6				0x05f
-#define CLK_TOP0				0x063
-#define CLK_TOP3				0x066
-#define CLK_TOP4				0x067
-#define VSET_BUCK2_ADJ				0x06d
-#define VSET_BUCK3_ADJ				0x06e
-#define LDO7_REG_ADJ				0x078
-#define LDO10_REG_ADJ				0x07b
-#define LDO15_REG_ADJ				0x080
-#define LDO19_REG_ADJ				0x084
-#define LDO20_REG_ADJ				0x085
-#define LDO21_REG_ADJ				0x086
-#define LDO22_REG_ADJ				0x087
-#define DR_LED_CTRL				0x098
-#define DR_OUT_CTRL				0x099
-#define DR3_ISET				0x09a
-#define DR3_START_DEL				0x09b
-#define DR4_ISET				0x09c
-#define DR4_START_DEL				0x09d
-#define DR345_TIM_CONF0				0x0a0
-#define NP_REG_ADJ1				0x0be
-#define NP_REG_CHG				0x0c0
-#define BUCK01_CTRL2				0x0d9
-#define BUCK0_CTRL1				0x0dd
-#define BUCK0_CTRL5				0x0e1
-#define BUCK0_CTRL7				0x0e3
-#define BUCK1_CTRL1				0x0e8
-#define BUCK1_CTRL5				0x0ec
-#define BUCK1_CTRL7				0x0ef
-#define CLK19M2_600_586_EN			0x0fe
-
-#define LED_START_DELAY_TIME			0x00
-#define LED_ELEC_VALUE				0x07
-#define LED_LIGHT_TIME				0xf0
-#define LED_GREEN_ENABLE			(1 << 1)
-#define LED_OUT_CTRL				0x00
-
-#define PMU_HI6552_V300				0x30
-#define PMU_HI6552_V310				0x31
-
-extern unsigned char hi6553_read_8(unsigned int offset);
-extern void hi6553_write_8(unsigned int offset, unsigned int value);
-
-#endif	/* __HI6553_H__ */
diff --git a/plat/hikey/include/hisi_ipc.h b/plat/hikey/include/hisi_ipc.h
deleted file mode 100644
index 0afe011..0000000
--- a/plat/hikey/include/hisi_ipc.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef __HISI_IPC_H__
-#define __HISI_IPC_H__
-
-#define HISI_IPC_CORE_ACPU		0x0
-
-#define HISI_IPC_MCU_INT_SRC_ACPU0_PD	10
-#define HISI_IPC_MCU_INT_SRC_ACPU1_PD	11
-#define HISI_IPC_MCU_INT_SRC_ACPU2_PD	12
-#define HISI_IPC_MCU_INT_SRC_ACPU3_PD	13
-#define HISI_IPC_MCU_INT_SRC_ACPU_PD 	16
-#define HISI_IPC_MCU_INT_SRC_ACPU4_PD	26
-#define HISI_IPC_MCU_INT_SRC_ACPU5_PD	27
-#define HISI_IPC_MCU_INT_SRC_ACPU6_PD	28
-#define HISI_IPC_MCU_INT_SRC_ACPU7_PD	29
-
-#define HISI_IPC_SEM_CPUIDLE		27
-#define HISI_IPC_INT_SRC_NUM		32
-
-#define HISI_IPC_PM_ON			0
-#define HISI_IPC_PM_OFF			1
-
-#define HISI_IPC_OK			(0)
-#define HISI_IPC_ERROR			(-1)
-
-#define HISI_IPC_BASE_ADDR		(0xF7510000)
-#define HISI_IPC_CPU_RAW_INT_ADDR	(0xF7510420)
-#define HISI_IPC_ACPU_CTRL(i)		(0xF7510800 + (i << 3))
-
-void hisi_ipc_spin_lock(unsigned int signal);
-void hisi_ipc_spin_unlock(unsigned int signal);
-void hisi_ipc_cpu_on(unsigned int cpu, unsigned int cluster);
-void hisi_ipc_cpu_off(unsigned int cpu, unsigned int cluster);
-void hisi_ipc_cpu_suspend(unsigned int cpu, unsigned int cluster);
-void hisi_ipc_cluster_on(unsigned int cpu, unsigned int cluster);
-void hisi_ipc_cluster_off(unsigned int cpu, unsigned int cluster);
-void hisi_ipc_cluster_suspend(unsigned int cpu, unsigned int cluster);
-void hisi_ipc_psci_system_off(void);
-int hisi_ipc_init(void);
-
-#endif
diff --git a/plat/hikey/include/hisi_mcu.h b/plat/hikey/include/hisi_mcu.h
deleted file mode 100644
index 74dbf17..0000000
--- a/plat/hikey/include/hisi_mcu.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef	__MCU_H__
-#define	__MCU_H__
-
-#include <stdint.h>
-
-extern void hisi_mcu_enable_sram(void);
-extern void hisi_mcu_start_run(void);
-extern int hisi_mcu_load_image(uintptr_t image_base, uint32_t image_size);
-
-#endif	/* __MCU_H__ */
diff --git a/plat/hikey/include/hisi_pwrc.h b/plat/hikey/include/hisi_pwrc.h
deleted file mode 100644
index d7907fb..0000000
--- a/plat/hikey/include/hisi_pwrc.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HISI_PWRC_H__
-#define __HISI_PWRC_H__
-
-#ifndef __ASSEMBLY__
-
-void hisi_pwrc_set_cluster_wfi(unsigned int id);
-void hisi_pwrc_set_core_bx_addr(unsigned int core,
-				unsigned int cluster,
-				uintptr_t entry_point);
-int hisi_pwrc_setup(void);
-
-#endif /*__ASSEMBLY__*/
-
-#endif /* __HISI_PWRC_H__ */
diff --git a/plat/hikey/include/hisi_sram_map.h b/plat/hikey/include/hisi_sram_map.h
deleted file mode 100644
index dc4425d..0000000
--- a/plat/hikey/include/hisi_sram_map.h
+++ /dev/null
@@ -1,352 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __SRAM_MAP_H__
-#define __SRAM_MAP_H__
-
-/*
- * SRAM Memory Region Layout
- *
- *  +-----------------------+
- *  |  Low Power Mode       | 7KB
- *  +-----------------------+
- *  |  Secure OS            | 64KB
- *  +-----------------------+
- *  |  Software Flag        | 1KB
- *  +-----------------------+
- *
- */
-
-#define SOC_SRAM_OFF_BASE_ADDR		(0xFFF80000)
-
-/* PM Section: 7KB */
-#define SRAM_PM_ADDR			(SOC_SRAM_OFF_BASE_ADDR)
-#define SRAM_PM_SIZE			(0x00001C00)
-
-/* TEE OS Section: 64KB */
-#define SRAM_TEEOS_ADDR			(SRAM_PM_ADDR + SRAM_PM_SIZE)
-#define SRAM_TEEOS_SIZE			(0x00010000)
-
-/* General Use Section: 1KB */
-#define SRAM_GENERAL_ADDR		(SRAM_TEEOS_ADDR + SRAM_TEEOS_SIZE)
-#define SRAM_GENERAL_SIZE		(0x00000400)
-
-/*
- * General Usage Section Layout:
- *
- *  +-----------------------+
- *  |  AP boot flag         | 64B
- *  +-----------------------+
- *  |  DICC flag            | 32B
- *  +-----------------------+
- *  |  Soft flag            | 256B
- *  +-----------------------+
- *  |  Thermal flag         | 128B
- *  +-----------------------+
- *  |  CSHELL               | 4B
- *  +-----------------------+
- *  |  Uart Switching       | 4B
- *  +-----------------------+
- *  |  ICC                  | 1024B
- *  +-----------------------+
- *  |  Memory Management    | 1024B
- *  +-----------------------+
- *  |  IFC                  | 32B
- *  +-----------------------+
- *  |  HIFI                 | 32B
- *  +-----------------------+
- *  |  DDR capacity         | 4B
- *  +-----------------------+
- *  |  Reserved             |
- *  +-----------------------+
- *
- */
-
-/* App Core Boot Flags */
-#define MEMORY_AXI_ACPU_START_ADDR		(SRAM_GENERAL_ADDR)
-#define MEMORY_AXI_ACPU_START_SIZE		(64)
-
-#define MEMORY_AXI_SRESET_FLAG_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x0000)
-#define MEMORY_AXI_SECOND_CPU_BOOT_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x0004)
-#define MEMORY_AXI_READY_FLAG_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x0008)
-#define MEMORY_AXI_FASTBOOT_ENTRY_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x000C)
-#define MEMORY_AXI_PD_CHARGE_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x0010)
-#define MEMORY_AXI_DBG_ALARM_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x0014)
-#define MEMORY_AXI_CHIP_ADDR			(MEMORY_AXI_ACPU_START_ADDR + 0x0018)
-#define MEMORY_AXI_BOARD_TYPE_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x001C)
-#define MEMORY_AXI_BOARD_ID_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x0020)
-#define MEMORY_AXI_CHARGETYPE_FLAG_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x0024)
-#define MEMORY_AXI_COLD_START_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x0028)
-#define MEMORY_AXI_ANDROID_REBOOT_FLAG_ADDR	(MEMORY_AXI_ACPU_START_ADDR + 0x002C)
-#define MEMORY_AXI_ACPU_WDTRST_REBOOT_FLAG_ADDR	(MEMORY_AXI_ACPU_START_ADDR + 0x0030)
-#define MEMORY_AXI_ABNRST_BITMAP_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x0034)
-#define MEMORY_AXI_32K_CLK_TYPE_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x0038)
-#define AXI_MODEM_PANIC_FLAG_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x003C)
-#define AXI_MODEM_PANIC_FLAG			(0x68697369)
-#define MEMORY_AXI_ACPU_END_ADDR		(AXI_MODEM_PANIC_FLAG_ADDR + 4)
-
-/* DICC Flags */
-#define MEMORY_AXI_DICC_ADDR			(MEMORY_AXI_ACPU_START_ADDR + MEMORY_AXI_ACPU_START_SIZE)
-#define MEMORY_AXI_DICC_SIZE			(32)
-
-#define MEMORY_AXI_SOFT_FLAG_ADDR		(MEMORY_AXI_DICC_ADDR + MEMORY_AXI_DICC_SIZE)
-#define MEMORY_AXI_SOFT_FLAG_SIZE		(256)
-
-/* Thermal Flags */
-#define MEMORY_AXI_TEMP_PROTECT_ADDR		(MEMORY_AXI_SOFT_FLAG_ADDR + MEMORY_AXI_SOFT_FLAG_SIZE)
-#define MEMORY_AXI_TEMP_PROTECT_SIZE		(128)
-
-/* CSHELL */
-#define MEMORY_AXI_USB_CSHELL_ADDR		(MEMORY_AXI_TEMP_PROTECT_ADDR + MEMORY_AXI_TEMP_PROTECT_SIZE)
-#define MEMORY_AXI_USB_CSHELL_SIZE		(4)
-
-/* Uart and A/C Shell Switch Flags */
-#define MEMORY_AXI_UART_INOUT_ADDR		(MEMORY_AXI_USB_CSHELL_ADDR + MEMORY_AXI_USB_CSHELL_SIZE)
-#define MEMORY_AXI_UART_INOUT_SIZE		(4)
-
-/* IFC Flags */
-#define MEMORY_AXI_IFC_ADDR			(MEMORY_AXI_UART_INOUT_ADDR + MEMORY_AXI_UART_INOUT_SIZE)
-#define MEMORY_AXI_IFC_SIZE			(32)
-
-/* HIFI Data */
-#define MEMORY_AXI_HIFI_ADDR			(MEMORY_AXI_IFC_ADDR + MEMORY_AXI_IFC_SIZE)
-#define MEMORY_AXI_HIFI_SIZE			(32)
-
-/* CONFIG Flags */
-#define MEMORY_AXI_CONFIG_ADDR			(MEMORY_AXI_HIFI_ADDR + MEMORY_AXI_HIFI_SIZE)
-#define MEMORY_AXI_CONFIG_SIZE			(32)
-
-/* DDR Capacity Flags */
-#define MEMORY_AXI_DDR_CAPACITY_ADDR		(MEMORY_AXI_CONFIG_ADDR + MEMORY_AXI_CONFIG_SIZE)
-#define MEMORY_AXI_DDR_CAPACITY_SIZE		(4)
-
-/* USB Shell Flags */
-#define MEMORY_AXI_USB_SHELL_FLAG_ADDR		(MEMORY_AXI_DDR_CAPACITY_ADDR + MEMORY_AXI_DDR_CAPACITY_SIZE )
-#define MEMORY_AXI_USB_SHELL_FLAG_SIZE		(4)
-
-/* MCU WDT Switch Flag */
-#define MEMORY_AXI_MCU_WDT_FLAG_ADDR		(MEMORY_AXI_USB_SHELL_FLAG_ADDR + MEMORY_AXI_USB_SHELL_FLAG_SIZE)
-#define MEMORY_AXI_MCU_WDT_FLAG_SIZE		(4)
-
-/* TLDSP Mailbox MNTN */
-#define SRAM_DSP_MNTN_INFO_ADDR			(MEMORY_AXI_MCU_WDT_FLAG_ADDR + MEMORY_AXI_MCU_WDT_FLAG_SIZE)
-#define SRAM_DSP_MNTN_SIZE			(32)
-
-/* TLDSP ARM Mailbox Protect Flag */
-#define SRAM_DSP_ARM_MAILBOX_PROTECT_FLAG_ADDR	(SRAM_DSP_MNTN_INFO_ADDR + SRAM_DSP_MNTN_SIZE)
-#define SRAM_DSP_ARM_MAILBOX_PROTECT_FLAG_SIZE	(4)
-
-/* RTT Sleep Flag */
-#define SRAM_RTT_SLEEP_FLAG_ADDR                (SRAM_DSP_ARM_MAILBOX_PROTECT_FLAG_ADDR + SRAM_DSP_ARM_MAILBOX_PROTECT_FLAG_SIZE)
-#define SRAM_RTT_SLEEP_FLAG_SIZE                (32)
-
-/* LDSP Awake Flag */
-#define MEMORY_AXI_LDSP_AWAKE_ADDR              (SRAM_RTT_SLEEP_FLAG_ADDR + SRAM_RTT_SLEEP_FLAG_SIZE)
-#define MEMORY_AXI_LDSP_AWAKE_SIZE              (4)
-
-#define NVUPDATE_SUCCESS			0x5555AAAA
-#define NVUPDATE_FAILURE			0xAAAA5555
-
-/*
- * Low Power Mode Region
- */
-#define PWRCTRL_ACPU_ASM_SPACE_ADDR		(SRAM_PM_ADDR)
-#define PWRCTRL_ACPU_ASM_SPACE_SIZE		(SRAM_PM_SIZE)
-
-#define PWRCTRL_ACPU_ASM_MEM_BASE		(PWRCTRL_ACPU_ASM_SPACE_ADDR)
-#define PWRCTRL_ACPU_ASM_MEM_SIZE		(PWRCTRL_ACPU_ASM_SPACE_SIZE)
-#define PWRCTRL_ACPU_ASM_CODE_BASE		(PWRCTRL_ACPU_ASM_MEM_BASE + 0x200)
-#define PWRCTRL_ACPU_ASM_DATA_BASE		(PWRCTRL_ACPU_ASM_MEM_BASE + 0xE00)
-#define PWRCTRL_ACPU_ASM_DATA_SIZE		(0xE00)
-
-#define PWRCTRL_ACPU_ASM_D_C0_ADDR		(PWRCTRL_ACPU_ASM_DATA_BASE)
-#define PWRCTRL_ACPU_ASM_D_C0_MMU_PARA_AD	(PWRCTRL_ACPU_ASM_DATA_BASE + 0)
-#define PWRCTRL_ACPU_ASM_D_ARM_PARA_AD		(PWRCTRL_ACPU_ASM_DATA_BASE + 0x20)
-
-#define PWRCTRL_ACPU_ASM_D_COMM_ADDR		(PWRCTRL_ACPU_ASM_DATA_BASE + 0x700)
-
-#define PWRCTRL_ACPU_REBOOT			(PWRCTRL_ACPU_ASM_D_COMM_ADDR)
-#define PWRCTRL_ACPU_REBOOT_SIZE		(0x200)
-#define PWRCTRL_ACPU_ASM_SLICE_BAK_ADDR		(PWRCTRL_ACPU_REBOOT + PWRCTRL_ACPU_REBOOT_SIZE)
-#define PWRCTRL_ACPU_ASM_SLICE_BAK_SIZE		(4)
-#define PWRCTRL_ACPU_ASM_DEBUG_FLAG_ADDR	(PWRCTRL_ACPU_ASM_SLICE_BAK_ADDR + PWRCTRL_ACPU_ASM_SLICE_BAK_SIZE)
-#define PWRCTRL_ACPU_ASM_DEBUG_FLAG_SIZE	(4)
-#define EXCH_A_CORE_POWRCTRL_CONV_ADDR		(PWRCTRL_ACPU_ASM_DEBUG_FLAG_ADDR + PWRCTRL_ACPU_ASM_DEBUG_FLAG_SIZE)
-#define EXCH_A_CORE_POWRCTRL_CONV_SIZE		(4)
-
-/*
- * Below region memory mapping is:
- * 4 + 12 + 16 + 28 + 28 + 16 + 28 + 12 + 24 + 20 + 64 +
- * 4 + 4 + 4 + 4 + 12 + 4 + 4 + 4 + 4 + 16 + 4 + 0x2BC +
- * 24 + 20 + 12 + 16
- */
-
-#define MEMORY_AXI_CPU_IDLE_ADDR		(EXCH_A_CORE_POWRCTRL_CONV_ADDR + EXCH_A_CORE_POWRCTRL_CONV_SIZE)
-#define MEMORY_AXI_CPU_IDLE_SIZE		(4)
-
-#define MEMORY_AXI_CUR_FREQ_ADDR		(MEMORY_AXI_CPU_IDLE_ADDR + MEMORY_AXI_CPU_IDLE_SIZE)
-#define MEMORY_AXI_CUR_FREQ_SIZE		(12)
-
-#define MEMORY_AXI_ACPU_FREQ_VOL_ADDR		(MEMORY_AXI_CUR_FREQ_ADDR + MEMORY_AXI_CUR_FREQ_SIZE)
-#define MEMORY_AXI_ACPU_FREQ_VOL_SIZE		(16 + 28 + 28)
-
-#define MEMORY_AXI_DDR_FREQ_VOL_ADDR		(MEMORY_AXI_ACPU_FREQ_VOL_ADDR + MEMORY_AXI_ACPU_FREQ_VOL_SIZE)
-#define MEMORY_AXI_DDR_FREQ_VOL_SIZE		(16 + 28)
-
-#define MEMORY_AXI_ACPU_FIQ_TEST_ADDR		(MEMORY_AXI_DDR_FREQ_VOL_ADDR + MEMORY_AXI_DDR_FREQ_VOL_SIZE)
-#define MEMORY_AXI_ACPU_FIQ_TEST_SIZE		(12)
-
-#define MEMORY_AXI_ACPU_FIQ_CPU_INFO_ADDR	(MEMORY_AXI_ACPU_FIQ_TEST_ADDR + MEMORY_AXI_ACPU_FIQ_TEST_SIZE)
-#define MEMORY_AXI_ACPU_FIQ_CPU_INFO_SIZE	(24)
-
-#define MEMORY_AXI_ACPU_FIQ_DEBUG_INFO_ADDR	(MEMORY_AXI_ACPU_FIQ_CPU_INFO_ADDR + MEMORY_AXI_ACPU_FIQ_CPU_INFO_SIZE)
-#define MEMORY_AXI_ACPU_FIQ_DEBUG_INFO_SIZE	(20)
-
-#define MEMORY_FREQDUMP_ADDR			(MEMORY_AXI_ACPU_FIQ_DEBUG_INFO_ADDR + MEMORY_AXI_ACPU_FIQ_DEBUG_INFO_SIZE)
-#define MEMORY_FREQDUMP_SIZE			(64)
-
-#define MEMORY_AXI_CCPU_LOG_ADDR		(MEMORY_FREQDUMP_ADDR + MEMORY_FREQDUMP_SIZE)
-#define MEMORY_AXI_CCPU_LOG_SIZE		(4)
-
-#define MEMORY_AXI_MCU_LOG_ADDR			(MEMORY_AXI_CCPU_LOG_ADDR + MEMORY_AXI_CCPU_LOG_SIZE)
-#define MEMORY_AXI_MCU_LOG_SIZE			(4)
-
-#define MEMORY_AXI_SEC_CORE_BOOT_ADDR		(MEMORY_AXI_MCU_LOG_ADDR + MEMORY_AXI_MCU_LOG_SIZE)
-#define MEMORY_AXI_SEC_CORE_BOOT_SIZE		(4)
-
-#define MEMORY_AXI_BBP_PS_VOTE_FLAG_ADDR	(MEMORY_AXI_SEC_CORE_BOOT_ADDR + MEMORY_AXI_SEC_CORE_BOOT_SIZE)
-#define MEMORY_AXI_BBP_PS_VOTE_FLAG_SIZE	(0x4)
-
-#define POLICY_AREA_RESERVED			(MEMORY_AXI_BBP_PS_VOTE_FLAG_ADDR + MEMORY_AXI_BBP_PS_VOTE_FLAG_SIZE)
-#define POLICY_AREA_RESERVED_SIZE		(12)
-
-#define DDR_POLICY_VALID_MAGIC			(POLICY_AREA_RESERVED + POLICY_AREA_RESERVED_SIZE)
-#define DDR_POLICY_VALID_MAGIC_SIZE		(4)
-
-#define DDR_POLICY_MAX_NUM			(DDR_POLICY_VALID_MAGIC + DDR_POLICY_VALID_MAGIC_SIZE)
-#define DDR_POLICY_MAX_NUM_SIZE			(4)
-
-#define DDR_POLICY_SUPPORT_NUM			(DDR_POLICY_MAX_NUM + DDR_POLICY_MAX_NUM_SIZE)
-#define DDR_POLICY_SUPPORT_NUM_SIZE		(4)
-
-#define DDR_POLICY_CUR_POLICY			(DDR_POLICY_SUPPORT_NUM + DDR_POLICY_SUPPORT_NUM_SIZE)
-#define DDR_POLICY_CUR_POLICY_SIZE		(4)
-
-#define ACPU_POLICY_VALID_MAGIC			(DDR_POLICY_CUR_POLICY + DDR_POLICY_CUR_POLICY_SIZE)
-#define ACPU_POLICY_VALID_MAGIC_SIZE		(4)
-
-#define ACPU_POLICY_MAX_NUM			(ACPU_POLICY_VALID_MAGIC + ACPU_POLICY_VALID_MAGIC_SIZE)
-#define ACPU_POLICY_MAX_NUM_SIZE		(4)
-
-#define ACPU_POLICY_SUPPORT_NUM			(ACPU_POLICY_MAX_NUM + ACPU_POLICY_MAX_NUM_SIZE)
-#define ACPU_POLICY_SUPPORT_NUM_SIZE		(4)
-
-#define ACPU_POLICY_CUR_POLICY			(ACPU_POLICY_SUPPORT_NUM + ACPU_POLICY_SUPPORT_NUM_SIZE)
-#define ACPU_POLICY_CUR_POLICY_SIZE		(4)
-
-#define LPDDR_OPTION_ADDR			(ACPU_POLICY_CUR_POLICY + ACPU_POLICY_CUR_POLICY_SIZE)
-#define LPDDR_OPTION_SIZE			(4)
-
-#define MEMORY_AXI_DDR_DDL_ADDR			(LPDDR_OPTION_ADDR + LPDDR_OPTION_SIZE)
-#define MEMORY_AXI_DDR_DDL_SIZE			(0x2BC)
-
-#define DDR_TEST_DFS_ADDR			(MEMORY_AXI_DDR_DDL_ADDR + MEMORY_AXI_DDR_DDL_SIZE)
-#define DDR_TEST_DFS_ADDR_SIZE			(4)
-
-#define DDR_TEST_DFS_TIMES_ADDR			(DDR_TEST_DFS_ADDR + DDR_TEST_DFS_ADDR_SIZE)
-#define DDR_TEST_DFS_TIMES_ADDR_SIZE		(4)
-
-#define DDR_TEST_QOS_ADDR			(DDR_TEST_DFS_TIMES_ADDR + DDR_TEST_DFS_TIMES_ADDR_SIZE)
-#define DDR_TEST_QOS_ADDR_SIZE			(4)
-
-#define DDR_TEST_FUN_ADDR			(DDR_TEST_QOS_ADDR + DDR_TEST_QOS_ADDR_SIZE)
-#define DDR_TEST_FUN_ADDR_SIZE			(4)
-
-#define BOARD_TYPE_ADDR				(DDR_TEST_FUN_ADDR + DDR_TEST_FUN_ADDR_SIZE)
-#define BOARD_ADDR_SIZE				(4)
-#define DDR_DFS_FREQ_ADDR			(BOARD_TYPE_ADDR + BOARD_ADDR_SIZE)
-#define DDR_DFS_FREQ_SIZE			(4)
-
-#define DDR_PASR_ADDR				(DDR_DFS_FREQ_ADDR + DDR_DFS_FREQ_SIZE)
-#define DDR_PASR_SIZE				(20)
-
-#define ACPU_DFS_FREQ_ADDR			(DDR_PASR_ADDR + DDR_PASR_SIZE)
-#define ACPU_DFS_FREQ_ADDR_SIZE			(12)
-
-#define ACPU_CHIP_MAX_FREQ			(ACPU_DFS_FREQ_ADDR + ACPU_DFS_FREQ_ADDR_SIZE)
-#define ACPU_CHIP_MAX_FREQ_SIZE			(4)
-
-#define MEMORY_MEDPLL_STATE_ADDR		(ACPU_CHIP_MAX_FREQ + ACPU_CHIP_MAX_FREQ_SIZE)
-#define MEMORY_MEDPLL_STATE_SIZE		(8)
-
-#define MEMORY_CCPU_LOAD_FLAG_ADDR		(MEMORY_MEDPLL_STATE_ADDR + MEMORY_MEDPLL_STATE_SIZE)
-#define MEMORY_CCPU_LOAD_FLAG_SIZE		(4)
-
-
-#define ACPU_CORE_BITS_ADDR			(MEMORY_CCPU_LOAD_FLAG_ADDR + MEMORY_CCPU_LOAD_FLAG_SIZE)
-#define ACPU_CORE_BITS_SIZE			(4)
-
-#define ACPU_CLUSTER_IDLE_ADDR			(ACPU_CORE_BITS_ADDR + ACPU_CORE_BITS_SIZE)
-#define ACPU_CLUSTER_IDLE_SIZE			(4)
-
-#define ACPU_A53_FLAGS_ADDR			(ACPU_CLUSTER_IDLE_ADDR + ACPU_CLUSTER_IDLE_SIZE)
-#define ACPU_A53_FLAGS_SIZE			(4)
-
-#define ACPU_POWER_STATE_QOS_ADDR		(ACPU_A53_FLAGS_ADDR+ACPU_A53_FLAGS_SIZE)
-#define ACPU_POWER_STATE_QOS_SIZE		(4)
-
-#define ACPU_UNLOCK_CORE_FLAGS_ADDR		(ACPU_POWER_STATE_QOS_ADDR+ACPU_POWER_STATE_QOS_SIZE)
-#define ACPU_UNLOCK_CORE_FLAGS_SIZE		(8)
-
-#define ACPU_SUBSYS_POWERDOWN_FLAGS_ADDR	(ACPU_UNLOCK_CORE_FLAGS_ADDR + ACPU_UNLOCK_CORE_FLAGS_SIZE)
-#define ACPU_SUBSYS_POWERDOWN_FLAGS_SIZE	(4)
-
-#define ACPU_CORE_POWERDOWN_FLAGS_ADDR		(ACPU_SUBSYS_POWERDOWN_FLAGS_ADDR + ACPU_SUBSYS_POWERDOWN_FLAGS_SIZE)
-#define ACPU_CORE_POWERDOWN_FLAGS_SIZE		(4)
-
-#define ACPU_CLUSTER_POWERDOWN_FLAGS_ADDR	(ACPU_CORE_POWERDOWN_FLAGS_ADDR + ACPU_CORE_POWERDOWN_FLAGS_SIZE)
-#define ACPU_CLUSTER_POWERDOWN_FLAGS_SIZE	(4)
-
-#define ACPU_ARM64_FLAGA			(ACPU_CLUSTER_POWERDOWN_FLAGS_ADDR + ACPU_CLUSTER_POWERDOWN_FLAGS_SIZE)
-#define ACPU_ARM64_FLAGA_SIZE			(4)
-
-#define ACPU_ARM64_FLAGB			(ACPU_ARM64_FLAGA + ACPU_ARM64_FLAGA_SIZE)
-#define ACPU_ARM64_FLAGB_SIZE			(4)
-
-#define MCU_EXCEPTION_FLAGS_ADDR		(ACPU_ARM64_FLAGB + ACPU_ARM64_FLAGB_SIZE)
-#define MCU_EXCEPTION_FLAGS_SIZE		(4)
-
-#define ACPU_MASTER_CORE_STATE_ADDR		(MCU_EXCEPTION_FLAGS_ADDR + MCU_EXCEPTION_FLAGS_SIZE)
-#define ACPU_MASTER_CORE_STATE_SIZE		(4)
-
-#define PWRCTRL_AXI_RESERVED_ADDR		(ACPU_MASTER_CORE_STATE_ADDR + ACPU_MASTER_CORE_STATE_SIZE)
-
-#endif /* __SRAM_MAP_H__ */
diff --git a/plat/hikey/include/partitions.h b/plat/hikey/include/partitions.h
deleted file mode 100644
index b386186..0000000
--- a/plat/hikey/include/partitions.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __PARTITIONS_H__
-#define __PARTITIONS_H__
-
-#define MAX_PARTITION_NUM		128
-#define EFI_NAMELEN			36
-
-struct ptentry {
-	uint64_t	start;
-	uint64_t	length;
-	unsigned int	flags;
-	unsigned int	loadaddr;
-	unsigned int	loadsize;
-	int		id;
-	char		name[EFI_NAMELEN];
-};
-
-extern int get_partition(void);
-extern struct ptentry *find_ptn(const char *str);
-extern int update_fip_spec(void);
-
-#endif /* __PARTITIONS_H__ */
-
diff --git a/plat/hikey/include/plat_macros.S b/plat/hikey/include/plat_macros.S
deleted file mode 100644
index 624c49f..0000000
--- a/plat/hikey/include/plat_macros.S
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <cci400.h>
-#include <gic_v2.h>
-#include "platform_def.h"
-#include "../hikey_def.h"
-
-.section .rodata.gic_reg_name, "aS"
-gicc_regs:
-	.asciz "gicc_hppir", "gicc_ahppir", "gicc_ctlr", ""
-gicd_pend_reg:
-	.asciz "gicd_ispendr regs (Offsets 0x200 - 0x278)\n Offset:\t\t\tvalue\n"
-newline:
-	.asciz "\n"
-spacer:
-	.asciz ":\t\t0x"
-
-
-	/* ---------------------------------------------
-	 * The below macro prints out relevant GIC
-	 * registers whenever an unhandled exception is
-	 * taken in BL3-1.
-	 * Clobbers: x0 - x10, x16, sp
-	 * ---------------------------------------------
-	 */
-	.macro plat_print_gic_regs
-	mov_imm	x16, GICD_BASE
-	mov_imm	x17, GICC_BASE
-	/* Load the gicc reg list to x6 */
-	adr	x6, gicc_regs
-	/* Load the gicc regs to gp regs used by str_in_crash_buf_print */
-	ldr	w8, [x17, #GICC_HPPIR]
-	ldr	w9, [x17, #GICC_AHPPIR]
-	ldr	w10, [x17, #GICC_CTLR]
-	/* Store to the crash buf and print to console */
-	bl	str_in_crash_buf_print
-
-	/* Print the GICD_ISPENDR regs */
-	add	x7, x16, #GICD_ISPENDR
-	adr	x4, gicd_pend_reg
-	bl	asm_print_str
-gicd_ispendr_loop:
-	sub	x4, x7, x16
-	cmp	x4, #0x280
-	b.eq	exit_print_gic_regs
-	bl	asm_print_hex
-
-	adr	x4, spacer
-	bl	asm_print_str
-
-	ldr	x4, [x7], #8
-	bl	asm_print_hex
-
-	adr	x4, newline
-	bl	asm_print_str
-	b	gicd_ispendr_loop
-exit_print_gic_regs:
-	.endm
-
-.section .rodata.cci_reg_name, "aS"
-cci_iface_regs:
-	.asciz "cci_snoop_ctrl_cluster0", "cci_snoop_ctrl_cluster1" , ""
-
-	/* ------------------------------------------------
-	 * The below macro prints out relevant interconnect
-	 * registers whenever an unhandled exception is
-	 * taken in BL3-1.
-	 * Clobbers: x0 - x9, sp
-	 * ------------------------------------------------
-	 */
-	.macro plat_print_interconnect_regs
-	adr	x6, cci_iface_regs
-	/* Store in x7 the base address of the first interface */
-	mov_imm	x7, (CCI400_BASE + SLAVE_IFACE3_OFFSET)
-	ldr	w8, [x7, #SNOOP_CTRL_REG]
-	/* Store in x7 the base address of the second interface */
-	mov_imm	x7, (CCI400_BASE + SLAVE_IFACE4_OFFSET)
-	ldr	w9, [x7, #SNOOP_CTRL_REG]
-	/* Store to the crash buf and print to console */
-	bl	str_in_crash_buf_print
-	.endm
diff --git a/plat/hikey/include/platform_def.h b/plat/hikey/include/platform_def.h
deleted file mode 100644
index d8694da..0000000
--- a/plat/hikey/include/platform_def.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __PLATFORM_DEF_H__
-#define __PLATFORM_DEF_H__
-
-#include <arch.h>
-#include "../hikey_def.h"
-
-/*******************************************************************************
- * Platform binary types for linking
- ******************************************************************************/
-#define PLATFORM_LINKER_FORMAT          "elf64-littleaarch64"
-#define PLATFORM_LINKER_ARCH            aarch64
-
-/*******************************************************************************
- * Generic platform constants
- ******************************************************************************/
-
-/* Size of cacheable stacks */
-#define PLATFORM_STACK_SIZE		0x800
-
-#define FIRMWARE_WELCOME_STR		"Booting Trusted Firmware\n"
-
-#define LOADER_MEM_NAME			"loader_mem"
-
-#define BOOT_EMMC_NAME			"l-loader.bin"
-
-#define NORMAL_EMMC_NAME		"normal emmc"
-
-/* Trusted Boot Firmware BL2 */
-#define BL2_IMAGE_NAME			"bl2.bin"
-
-/* EL3 Runtime Firmware BL3-1 */
-#define BL31_IMAGE_NAME			"bl31.bin"
-
-/* SCP Firmware BL3-0 */
-#define BL30_IMAGE_NAME			"bl30.bin"
-
-/* Secure Payload BL3-2 (Trusted OS) */
-#define BL32_IMAGE_NAME			"bl32.bin"
-
-/* Non-Trusted Firmware BL3-3 */
-#define BL33_IMAGE_NAME			"bl33.bin" /* e.g. UEFI */
-
-/* Firmware Image Package */
-#define FIP_IMAGE_NAME			"fip.bin"
-
-#define PLATFORM_CACHE_LINE_SIZE	64
-#define PLATFORM_CLUSTER_COUNT		2
-#define PLATFORM_CORE_COUNT_PER_CLUSTER	4
-#define PLATFORM_CORE_COUNT             8
-#define PLATFORM_NUM_AFFS		(PLATFORM_CLUSTER_COUNT + \
-					 PLATFORM_CORE_COUNT)
-#define PLATFORM_MAX_AFFLVL             MPIDR_AFFLVL1
-
-#define MAX_IO_DEVICES			3
-#define MAX_IO_HANDLES			4
-
-/*******************************************************************************
- * Platform memory map related constants
- ******************************************************************************/
-
-/*******************************************************************************
- * BL1 is stored in XG2RAM0_HIRQ that is 784KB large. Could we use 8MB size?
- * The first part is BL1_RAM, and the second part is TZRAM. The name isn't good
- * enough. We need to update it later.
- ******************************************************************************/
-#define MMC_BASE			0x00000000
-#define MMC_SIZE			0x80000000
-#define MMC_LOADER_BASE			MMC_BASE		/* boot */
-#define MMC_BL1_SIZE			0x00200000
-
-#define ONCHIPROM_PARAM_BASE		(XG2RAM0_BASE + 0x700)
-#define LOADER_RAM_BASE			(XG2RAM0_BASE + 0x800)
-#define BL1_XG2RAM0_OFFSET		0x1000
-
-#define DDR_BASE			0x00000000
-
-#define MMC_DESC_BASE			(DDR_BASE + 0x0080000)
-#define MMC_DESC_SIZE			0x00080000
-#define MMC_DATA_BASE			(MMC_DESC_BASE + MMC_DESC_SIZE)
-#define MMC_DATA_SIZE			0x00800000
-
-/*******************************************************************************
- * BL1 specific defines.
- * BL1 RW data is relocated from ROM to RAM at runtime so we need 2 base
- * addresses.
- ******************************************************************************/
-#define BL1_RO_BASE			(XG2RAM0_BASE + BL1_XG2RAM0_OFFSET)
-#define BL1_RO_LIMIT			(XG2RAM0_BASE + 0x10000)
-#define BL1_RW_BASE			(BL1_RO_LIMIT)	/* 0xf981_0000 */
-#define BL1_RW_SIZE			(BL31_LIMIT - BL1_RW_BASE)
-#define BL1_RW_LIMIT			(BL31_LIMIT)
-
-/*******************************************************************************
- * BL2 specific defines.
- ******************************************************************************/
-/* Set it in DDR first. If necessary, we can set them into SRAM again. */
-#define BL2_BASE			(BL1_RW_BASE + 0x8000)	/* 0xf981_8000 */
-#define BL2_LIMIT			(BL2_BASE + 0x40000)
-
-/*******************************************************************************
- * BL3-1 specific defines.
- ******************************************************************************/
-#define BL31_BASE			(BL2_LIMIT)	/* 0xf985_8000 */
-#define BL31_LIMIT			(BL31_BASE + 0x40000)
-
-/*******************************************************************************
- * BL3-2 specific defines.
- ******************************************************************************/
-
-/*
- * The TSP can execute either from Trusted SRAM or Trusted DRAM.
- */
-#define BL32_SRAM_BASE                  BL31_LIMIT
-#define BL32_SRAM_LIMIT                 (BL31_LIMIT+0x00080000) /* 512K */
-
-#define BL32_DRAM_BASE                  DRAM_SEC_BASE
-#define BL32_DRAM_LIMIT                 (DRAM_SEC_BASE+DRAM_SEC_SIZE)
-
-#if (PLAT_TSP_LOCATION_ID == PLAT_TRUSTED_SRAM_ID)
-#define TSP_SEC_MEM_BASE		BL32_SRAM_BASE
-#define TSP_SEC_MEM_SIZE		(BL32_SRAM_LIMIT - BL32_SRAM_BASE)
-#define BL32_BASE			BL32_SRAM_BASE
-#define BL32_LIMIT			BL32_SRAM_LIMIT
-#elif (PLAT_TSP_LOCATION_ID == PLAT_TRUSTED_DRAM_ID)
-#define TSP_SEC_MEM_BASE		BL32_DRAM_BASE
-#define TSP_SEC_MEM_SIZE		(BL32_DRAM_LIMIT - BL32_DRAM_BASE)
-#define BL32_BASE			BL32_DRAM_BASE
-#define BL32_LIMIT			BL32_DRAM_LIMIT
-#else
-#error "Unsupported PLAT_TSP_LOCATION_ID value"
-#endif
-
-/*******************************************************************************
- * BL3-0 specific defines:
- *
- * BL3-0 is loaded for mcu firmware, firstly load it into temperary buffer
- * into 0x0100_0000; then BL2 will parse the sections and load then into
- * seperated buffers as needed.
- *
- ******************************************************************************/
-#define BL30_BASE			(DRAM_NS_BASE + 0x01000000)
-#define BL30_LIMIT			(DRAM_NS_BASE + 0x01100000)
-#define BL30_SIZE			(BL30_LIMIT - BL30_BASE)
-
-/*******************************************************************************
- * Load address of BL3-3 in the HiKey port
- ******************************************************************************/
-#define NS_IMAGE_OFFSET			(DRAM_BASE + 0x35000000)  /* 848MB */
-
-/*******************************************************************************
- * Platform specific page table and MMU setup constants
- ******************************************************************************/
-#define ADDR_SPACE_SIZE			(1ull << 32)
-
-#if IMAGE_BL1 || IMAGE_BL32
-# define MAX_XLAT_TABLES		3
-#endif
-
-#if IMAGE_BL2
-# define MAX_XLAT_TABLES		4
-#endif
-
-#if IMAGE_BL31
-# define MAX_XLAT_TABLES		4
-#endif
-
-#define MAX_MMAP_REGIONS		16
-
-/*******************************************************************************
- * Declarations and constants to access the mailboxes safely. Each mailbox is
- * aligned on the biggest cache line size in the platform. This is known only
- * to the platform as it might have a combination of integrated and external
- * caches. Such alignment ensures that two maiboxes do not sit on the same cache
- * line at any cache level. They could belong to different cpus/clusters &
- * get written while being protected by different locks causing corruption of
- * a valid mailbox address.
- ******************************************************************************/
-#define CACHE_WRITEBACK_SHIFT   6
-#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
-
-#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/hikey/include/sp804_timer.h b/plat/hikey/include/sp804_timer.h
deleted file mode 100644
index 6d1b664..0000000
--- a/plat/hikey/include/sp804_timer.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef	__SP804_TIMER_H__
-#define	__SP804_TIMER_H__
-
-extern void hi6220_timer_init(void);
-extern void udelay(int);
-extern void mdelay(int);
-
-#endif	/* __SP804_TIMER_H__ */
diff --git a/plat/hikey/include/usb.h b/plat/hikey/include/usb.h
deleted file mode 100644
index 8fdcc6c..0000000
--- a/plat/hikey/include/usb.h
+++ /dev/null
@@ -1,878 +0,0 @@
-/*
- * Copyright (c) 2014, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __DWC_USB_H__
-#define __DWC_USB_H__
-
-#define USB_DMA
-
-#define DWC_OTG_BASE			0xF72C0000
-
-#define USB_NUM_ENDPOINTS		2
-#define MAX_EPS_CHANNELS		16
-
-#define BULK_OUT_EP			1
-#define BULK_IN_EP			1
-
-#define RX_REQ_LEN			512
-#define MAX_PACKET_LEN			512
-
-#define DATA_FIFO_CONFIG		(0x780 << GDFIFOCFG_EPINFOBASE_SHIFT |\
-					 0x800 << GDFIFOCFG_GDFIFOCFG_SHIFT)
-/* RX FIFO: 2048 bytes */
-#define RX_SIZE				0x00000200
-/* Non-periodic TX FIFO: 128 bytes. start address: 0x200 * 4. */
-#define ENDPOINT_TX_SIZE		0x00200200
-
-/* EP1  TX FIFO: 1024 bytes. start address: 0x220 * 4. */
-/* EP2  TX FIFO: 1024 bytes. start address: 0x320 * 4. */
-/* EP3  TX FIFO: 1024 bytes. start address: 0x420 * 4. */
-/* EP4  TX FIFO: 1024 bytes. start address: 0x520 * 4. */
-/* EP5  TX FIFO: 128  bytes. start address: 0x620 * 4. */
-/* EP6  TX FIFO: 128  bytes. start address: 0x640 * 4. */
-/* EP7  TX FIFO: 128  bytes. start address: 0x660 * 4. */
-/* EP8  TX FIFO: 128  bytes. start address: 0x680 * 4. */
-/* EP9  TX FIFO: 128  bytes. start address: 0x6a0 * 4. */
-/* EP10 TX FIFO: 128  bytes. start address: 0x6c0 * 4. */
-/* EP11 TX FIFO: 128  bytes. start address: 0x6e0 * 4. */
-/* EP12 TX FIFO: 128  bytes. start address: 0x700 * 4. */
-/* EP13 TX FIFO: 128  bytes. start address: 0x720 * 4. */
-/* EP14 TX FIFO: 128  bytes. start address: 0x740 * 4. */
-/* EP15 TX FIFO: 128  bytes. start address: 0x760 * 4. */
-
-#define DATA_IN_ENDPOINT_TX_FIFO1	0x01000220
-#define DATA_IN_ENDPOINT_TX_FIFO2	0x01000320
-#define DATA_IN_ENDPOINT_TX_FIFO3	0x01000420
-#define DATA_IN_ENDPOINT_TX_FIFO4	0x01000520
-#define DATA_IN_ENDPOINT_TX_FIFO5	0x00200620
-#define DATA_IN_ENDPOINT_TX_FIFO6	0x00200640
-#define DATA_IN_ENDPOINT_TX_FIFO7	0x00200660
-#define DATA_IN_ENDPOINT_TX_FIFO8	0x00200680
-#define DATA_IN_ENDPOINT_TX_FIFO9	0x002006a0
-#define DATA_IN_ENDPOINT_TX_FIFO10	0x002006c0
-#define DATA_IN_ENDPOINT_TX_FIFO11	0x002006e0
-#define DATA_IN_ENDPOINT_TX_FIFO12	0x00200700
-#define DATA_IN_ENDPOINT_TX_FIFO13	0x00200720
-#define DATA_IN_ENDPOINT_TX_FIFO14	0x00200740
-#define DATA_IN_ENDPOINT_TX_FIFO15	0x00200760
-
-typedef struct {
-	unsigned char		type;
-	unsigned char		request;
-	unsigned short		value;
-	unsigned short		index;
-	unsigned short		length;
-} setup_packet;
-
-struct ept_queue_item {
-	unsigned int		next;
-	unsigned int		info;
-};
-
-struct usb_request {
-	struct ept_queue_item	*item;
-	void			*buf;
-	unsigned int		length;
-	void (*complete)(unsigned int actual, int status);
-	void			*context;
-};
-
-/*DWC_OTG regsiter descriptor*/
-/*Global CSR MAP*/
-#define GLOBAL_CSR_BASE		(DWC_OTG_BASE)
-/*Device mode CSR MAP*/
-#define DEVICE_CSR_BASE		(DWC_OTG_BASE+0x800)
-/*Device mode CSR MAP*/
-#define DEVICE_INEP_BASE	(DWC_OTG_BASE+0x900)
-/*Device mode CSR MAP*/
-#define DEVICE_OUTEP_BASE	(DWC_OTG_BASE+0xB00)
-
-/*** OTG LINK CORE REGISTERS ***/
-/* Core Global Registers */
-#define GOTGCTL			(DWC_OTG_BASE + 0x000)
-#define GOTGINT			(DWC_OTG_BASE + 0x004)
-#define GOTGINT_DBNCE_DONE		(1 << 19)
-#define GOTGINT_A_DEV_TOUT_CHG		(1 << 18)
-#define GOTGINT_HST_NEG_DET		(1 << 17)
-#define GOTGINT_HST_NEG_SUC_STS_CHNG	(1 << 9)
-#define GOTGINT_SES_REQ_SUC_STS_CHNG	(1 << 8)
-#define GOTGINT_SES_END_DET		(1 << 2)
-
-#define GAHBCFG			(DWC_OTG_BASE + 0x008)
-#define GAHBCFG_P_TXF_EMP_LVL           (1 << 8)
-#define GAHBCFG_NP_TXF_EMP_LVL          (1 << 7)
-#define GAHBCFG_DMA_EN                  (1 << 5)
-#define GAHBCFG_GLBL_INTR_EN            (1 << 0)
-#define GAHBCFG_CTRL_MASK               (GAHBCFG_P_TXF_EMP_LVL | \
-					 GAHBCFG_NP_TXF_EMP_LVL | \
-					 GAHBCFG_DMA_EN | \
-					 GAHBCFG_GLBL_INTR_EN)
-
-#define GUSBCFG			(DWC_OTG_BASE + 0x00C)
-#define GRSTCTL			(DWC_OTG_BASE + 0x010)
-#define GRSTCTL_AHBIDLE		(1 << 31)
-#define GRSTCTL_CSFTRST		(1 << 0)
-
-#define GINTSTS			(DWC_OTG_BASE + 0x014)
-#define GINTMSK			(DWC_OTG_BASE + 0x018)
-#define GINTSTS_WKUPINT			(1 << 31)
-#define GINTSTS_SESSREQINT		(1 << 30)
-#define GINTSTS_DISCONNINT		(1 << 29)
-#define GINTSTS_CONIDSTSCHNG		(1 << 28)
-#define GINTSTS_LPMTRANRCVD		(1 << 27)
-#define GINTSTS_PTXFEMP			(1 << 26)
-#define GINTSTS_HCHINT			(1 << 25)
-#define GINTSTS_PRTINT			(1 << 24)
-#define GINTSTS_RESETDET		(1 << 23)
-#define GINTSTS_FET_SUSP		(1 << 22)
-#define GINTSTS_INCOMPL_IP		(1 << 21)
-#define GINTSTS_INCOMPL_SOIN		(1 << 20)
-#define GINTSTS_OEPINT			(1 << 19)
-#define GINTSTS_IEPINT			(1 << 18)
-#define GINTSTS_EPMIS			(1 << 17)
-#define GINTSTS_RESTOREDONE		(1 << 16)
-#define GINTSTS_EOPF			(1 << 15)
-#define GINTSTS_ISOUTDROP		(1 << 14)
-#define GINTSTS_ENUMDONE		(1 << 13)
-#define GINTSTS_USBRST			(1 << 12)
-#define GINTSTS_USBSUSP			(1 << 11)
-#define GINTSTS_ERLYSUSP		(1 << 10)
-#define GINTSTS_I2CINT			(1 << 9)
-#define GINTSTS_ULPI_CK_INT		(1 << 8)
-#define GINTSTS_GOUTNAKEFF		(1 << 7)
-#define GINTSTS_GINNAKEFF		(1 << 6)
-#define GINTSTS_NPTXFEMP		(1 << 5)
-#define GINTSTS_RXFLVL			(1 << 4)
-#define GINTSTS_SOF			(1 << 3)
-#define GINTSTS_OTGINT			(1 << 2)
-#define GINTSTS_MODEMIS			(1 << 1)
-#define GINTSTS_CURMODE_HOST		(1 << 0)
-
-#define GRXSTSR			(DWC_OTG_BASE + 0x01C)
-#define GRXSTSP			(DWC_OTG_BASE + 0x020)
-#define GRXFSIZ			(DWC_OTG_BASE + 0x024)
-#define GNPTXFSIZ		(DWC_OTG_BASE + 0x028)
-#define GNPTXSTS		(DWC_OTG_BASE + 0x02C)
-
-#define GHWCFG1			(DWC_OTG_BASE + 0x044)
-#define GHWCFG2			(DWC_OTG_BASE + 0x048)
-#define GHWCFG3			(DWC_OTG_BASE + 0x04c)
-#define GHWCFG4			(DWC_OTG_BASE + 0x050)
-#define GLPMCFG			(DWC_OTG_BASE + 0x054)
-
-#define GDFIFOCFG		(DWC_OTG_BASE + 0x05c)
-#define GDFIFOCFG_EPINFOBASE_MASK	(0xffff << 16)
-#define GDFIFOCFG_EPINFOBASE_SHIFT	16
-#define GDFIFOCFG_GDFIFOCFG_MASK	(0xffff << 0)
-#define GDFIFOCFG_GDFIFOCFG_SHIFT	0
-
-
-#define HPTXFSIZ		(DWC_OTG_BASE + 0x100)
-#define DIEPTXF(x)		(DWC_OTG_BASE + 0x100 + 4 * (x))
-#define DIEPTXF1		(DWC_OTG_BASE + 0x104)
-#define DIEPTXF2		(DWC_OTG_BASE + 0x108)
-#define DIEPTXF3		(DWC_OTG_BASE + 0x10C)
-#define DIEPTXF4		(DWC_OTG_BASE + 0x110)
-#define DIEPTXF5		(DWC_OTG_BASE + 0x114)
-#define DIEPTXF6		(DWC_OTG_BASE + 0x118)
-#define DIEPTXF7		(DWC_OTG_BASE + 0x11C)
-#define DIEPTXF8		(DWC_OTG_BASE + 0x120)
-#define DIEPTXF9		(DWC_OTG_BASE + 0x124)
-#define DIEPTXF10		(DWC_OTG_BASE + 0x128)
-#define DIEPTXF11		(DWC_OTG_BASE + 0x12C)
-#define DIEPTXF12		(DWC_OTG_BASE + 0x130)
-#define DIEPTXF13		(DWC_OTG_BASE + 0x134)
-#define DIEPTXF14		(DWC_OTG_BASE + 0x138)
-#define DIEPTXF15		(DWC_OTG_BASE + 0x13C)
-
-/*** HOST MODE REGISTERS ***/
-/* Host Global Registers */
-#define HCFG			(DWC_OTG_BASE + 0x400)
-#define HFIR			(DWC_OTG_BASE + 0x404)
-#define HFNUM			(DWC_OTG_BASE + 0x408)
-#define HPTXSTS			(DWC_OTG_BASE + 0x410)
-#define HAINT			(DWC_OTG_BASE + 0x414)
-#define HAINTMSK		(DWC_OTG_BASE + 0x418)
-
-/* Host Port Control and Status Registers */
-#define HPRT			(DWC_OTG_BASE + 0x440)
-
-/* Host Channel-Specific Registers */
-#define HCCHAR(x)		(DWC_OTG_BASE + 0x500 + 0x20 * (x))
-#define HCSPLT(x)		(DWC_OTG_BASE + 0x504 + 0x20 * (x))
-#define HCINT(x)		(DWC_OTG_BASE + 0x508 + 0x20 * (x))
-#define HCINTMSK(x)		(DWC_OTG_BASE + 0x50C + 0x20 * (x))
-#define HCTSIZ(x)		(DWC_OTG_BASE + 0x510 + 0x20 * (x))
-#define HCDMA(x)		(DWC_OTG_BASE + 0x514 + 0x20 * (x))
-#define HCCHAR0			(DWC_OTG_BASE + 0x500)
-#define HCSPLT0			(DWC_OTG_BASE + 0x504)
-#define HCINT0			(DWC_OTG_BASE + 0x508)
-#define HCINTMSK0		(DWC_OTG_BASE + 0x50C)
-#define HCTSIZ0			(DWC_OTG_BASE + 0x510)
-#define HCDMA0			(DWC_OTG_BASE + 0x514)
-#define HCCHAR1			(DWC_OTG_BASE + 0x520)
-#define HCSPLT1			(DWC_OTG_BASE + 0x524)
-#define HCINT1			(DWC_OTG_BASE + 0x528)
-#define HCINTMSK1		(DWC_OTG_BASE + 0x52C)
-#define HCTSIZ1			(DWC_OTG_BASE + 0x530)
-#define HCDMA1			(DWC_OTG_BASE + 0x534)
-#define HCCHAR2			(DWC_OTG_BASE + 0x540)
-#define HCSPLT2			(DWC_OTG_BASE + 0x544)
-#define HCINT2			(DWC_OTG_BASE + 0x548)
-#define HCINTMSK2		(DWC_OTG_BASE + 0x54C)
-#define HCTSIZ2			(DWC_OTG_BASE + 0x550)
-#define HCDMA2			(DWC_OTG_BASE + 0x554)
-#define HCCHAR3			(DWC_OTG_BASE + 0x560)
-#define HCSPLT3			(DWC_OTG_BASE + 0x564)
-#define HCINT3			(DWC_OTG_BASE + 0x568)
-#define HCINTMSK3   		(DWC_OTG_BASE + 0x56C)
-#define HCTSIZ3     		(DWC_OTG_BASE + 0x570)
-#define HCDMA3      		(DWC_OTG_BASE + 0x574)
-#define HCCHAR4     		(DWC_OTG_BASE + 0x580)
-#define HCSPLT4     		(DWC_OTG_BASE + 0x584)
-#define HCINT4      		(DWC_OTG_BASE + 0x588)
-#define HCINTMSK4   		(DWC_OTG_BASE + 0x58C)
-#define HCTSIZ4     		(DWC_OTG_BASE + 0x590)
-#define HCDMA4      		(DWC_OTG_BASE + 0x594)
-#define HCCHAR5     		(DWC_OTG_BASE + 0x5A0)
-#define HCSPLT5     		(DWC_OTG_BASE + 0x5A4)
-#define HCINT5      		(DWC_OTG_BASE + 0x5A8)
-#define HCINTMSK5   		(DWC_OTG_BASE + 0x5AC)
-#define HCTSIZ5     		(DWC_OTG_BASE + 0x5B0)
-#define HCDMA5      		(DWC_OTG_BASE + 0x5B4)
-#define HCCHAR6     		(DWC_OTG_BASE + 0x5C0)
-#define HCSPLT6     		(DWC_OTG_BASE + 0x5C4)
-#define HCINT6      		(DWC_OTG_BASE + 0x5C8)
-#define HCINTMSK6   		(DWC_OTG_BASE + 0x5CC)
-#define HCTSIZ6     		(DWC_OTG_BASE + 0x5D0)
-#define HCDMA6      		(DWC_OTG_BASE + 0x5D4)
-#define HCCHAR7     		(DWC_OTG_BASE + 0x5E0)
-#define HCSPLT7     		(DWC_OTG_BASE + 0x5E4)
-#define HCINT7      		(DWC_OTG_BASE + 0x5E8)
-#define HCINTMSK7   		(DWC_OTG_BASE + 0x5EC)
-#define HCTSIZ7     		(DWC_OTG_BASE + 0x5F0)
-#define HCDMA7      		(DWC_OTG_BASE + 0x5F4)
-#define HCCHAR8     		(DWC_OTG_BASE + 0x600)
-#define HCSPLT8     		(DWC_OTG_BASE + 0x604)
-#define HCINT8      		(DWC_OTG_BASE + 0x608)
-#define HCINTMSK8   		(DWC_OTG_BASE + 0x60C)
-#define HCTSIZ8     		(DWC_OTG_BASE + 0x610)
-#define HCDMA8      		(DWC_OTG_BASE + 0x614)
-#define HCCHAR9     		(DWC_OTG_BASE + 0x620)
-#define HCSPLT9     		(DWC_OTG_BASE + 0x624)
-#define HCINT9      		(DWC_OTG_BASE + 0x628)
-#define HCINTMSK9   		(DWC_OTG_BASE + 0x62C)
-#define HCTSIZ9     		(DWC_OTG_BASE + 0x630)
-#define HCDMA9      		(DWC_OTG_BASE + 0x634)
-#define HCCHAR10    		(DWC_OTG_BASE + 0x640)
-#define HCSPLT10    		(DWC_OTG_BASE + 0x644)
-#define HCINT10     		(DWC_OTG_BASE + 0x648)
-#define HCINTMSK10  		(DWC_OTG_BASE + 0x64C)
-#define HCTSIZ10    		(DWC_OTG_BASE + 0x650)
-#define HCDMA10     		(DWC_OTG_BASE + 0x654)
-#define HCCHAR11    		(DWC_OTG_BASE + 0x660)
-#define HCSPLT11    		(DWC_OTG_BASE + 0x664)
-#define HCINT11     		(DWC_OTG_BASE + 0x668)
-#define HCINTMSK11  		(DWC_OTG_BASE + 0x66C)
-#define HCTSIZ11    		(DWC_OTG_BASE + 0x670)
-#define HCDMA11     		(DWC_OTG_BASE + 0x674)
-#define HCCHAR12    		(DWC_OTG_BASE + 0x680)
-#define HCSPLT12    		(DWC_OTG_BASE + 0x684)
-#define HCINT12     		(DWC_OTG_BASE + 0x688)
-#define HCINTMSK12  		(DWC_OTG_BASE + 0x68C)
-#define HCTSIZ12    		(DWC_OTG_BASE + 0x690)
-#define HCDMA12     		(DWC_OTG_BASE + 0x694)
-#define HCCHAR13    		(DWC_OTG_BASE + 0x6A0)
-#define HCSPLT13    		(DWC_OTG_BASE + 0x6A4)
-#define HCINT13     		(DWC_OTG_BASE + 0x6A8)
-#define HCINTMSK13  		(DWC_OTG_BASE + 0x6AC)
-#define HCTSIZ13    		(DWC_OTG_BASE + 0x6B0)
-#define HCDMA13     		(DWC_OTG_BASE + 0x6B4)
-#define HCCHAR14    		(DWC_OTG_BASE + 0x6C0)
-#define HCSPLT14    		(DWC_OTG_BASE + 0x6C4)
-#define HCINT14     		(DWC_OTG_BASE + 0x6C8)
-#define HCINTMSK14  		(DWC_OTG_BASE + 0x6CC)
-#define HCTSIZ14    		(DWC_OTG_BASE + 0x6D0)
-#define HCDMA14     		(DWC_OTG_BASE + 0x6D4)
-#define HCCHAR15    		(DWC_OTG_BASE + 0x6E0)
-#define HCSPLT15    		(DWC_OTG_BASE + 0x6E4)
-#define HCINT15     		(DWC_OTG_BASE + 0x6E8)
-#define HCINTMSK15  		(DWC_OTG_BASE + 0x6EC)
-#define HCTSIZ15    		(DWC_OTG_BASE + 0x6F0)
-#define HCDMA15     		(DWC_OTG_BASE + 0x6F4)
-
-/*** DEVICE MODE REGISTERS ***/
-/* Device Global Registers */
-#define DCFG        		(DWC_OTG_BASE + 0x800)
-#define DCFG_EPMISCNT_MASK		(0x1f << 18)
-#define DCFG_EPMISCNT_SHIFT		18
-#define DCFG_NZ_STS_OUT_HSHK		(1 << 2)
-
-#define DCTL        		(DWC_OTG_BASE + 0x804)
-#define DSTS        		(DWC_OTG_BASE + 0x808)
-#define DIEPMSK     		(DWC_OTG_BASE + 0x810)
-#define DOEPMSK     		(DWC_OTG_BASE + 0x814)
-#define DAINT       		(DWC_OTG_BASE + 0x818)
-#define DAINTMSK    		(DWC_OTG_BASE + 0x81C)
-#define DAINT_OUTEP_SHIFT		16
-#define DAINT_OUTEP(_x)			(1 << ((_x) + 16))
-#define DAINT_INEP(_x)			(1 << (_x))
-
-#define DTKNQR1     		(DWC_OTG_BASE + 0x820)
-#define DTKNQR2     		(DWC_OTG_BASE + 0x824)
-#define DVBUSDIS    		(DWC_OTG_BASE + 0x828)
-#define DVBUSPULSE  		(DWC_OTG_BASE + 0x82C)
-#define DTHRCTL     		(DWC_OTG_BASE + 0x830)
-
-/* Device Logical IN Endpoint-Specific Registers */
-#define DIEPCTL(x)  		(DWC_OTG_BASE + 0x900 + 0x20 * (x))
-#define DIEPINT(x)  		(DWC_OTG_BASE + 0x908 + 0x20 * (x))
-#define DIEPTSIZ(x) 		(DWC_OTG_BASE + 0x910 + 0x20 * (x))
-#define DIEPDMA(x)  		(DWC_OTG_BASE + 0x914 + 0x20 * (x))
-#define DTXFSTS(x)  		(DWC_OTG_BASE + 0x918 + 0x20 * (x))
-
-#define DIEPCTL0    		(DWC_OTG_BASE + 0x900)
-#define DIEPINT0    		(DWC_OTG_BASE + 0x908)
-#define DIEPTSIZ0   		(DWC_OTG_BASE + 0x910)
-#define DIEPDMA0    		(DWC_OTG_BASE + 0x914)
-#define DIEPCTL1    		(DWC_OTG_BASE + 0x920)
-#define DIEPINT1    		(DWC_OTG_BASE + 0x928)
-#define DIEPTSIZ1   		(DWC_OTG_BASE + 0x930)
-#define DIEPDMA1    		(DWC_OTG_BASE + 0x934)
-#define DIEPCTL2    		(DWC_OTG_BASE + 0x940)
-#define DIEPINT2    		(DWC_OTG_BASE + 0x948)
-#define DIEPTSIZ2  		(DWC_OTG_BASE + 0x950)
-#define DIEPDMA2    		(DWC_OTG_BASE + 0x954)
-#define DIEPCTL3    		(DWC_OTG_BASE + 0x960)
-#define DIEPINT3    		(DWC_OTG_BASE + 0x968)
-#define DIEPTSIZ3   		(DWC_OTG_BASE + 0x970)
-#define DIEPDMA3    		(DWC_OTG_BASE + 0x974)
-#define DIEPCTL4    		(DWC_OTG_BASE + 0x980)
-#define DIEPINT4    		(DWC_OTG_BASE + 0x988)
-#define DIEPTSIZ4   		(DWC_OTG_BASE + 0x990)
-#define DIEPDMA4    		(DWC_OTG_BASE + 0x994)
-#define DIEPCTL5    		(DWC_OTG_BASE + 0x9A0)
-#define DIEPINT5    		(DWC_OTG_BASE + 0x9A8)
-#define DIEPTSIZ5   		(DWC_OTG_BASE + 0x9B0)
-#define DIEPDMA5    		(DWC_OTG_BASE + 0x9B4)
-#define DIEPCTL6    		(DWC_OTG_BASE + 0x9C0)
-#define DIEPINT6    		(DWC_OTG_BASE + 0x9C8)
-#define DIEPTSIZ6   		(DWC_OTG_BASE + 0x9D0)
-#define DIEPDMA6    		(DWC_OTG_BASE + 0x9D4)
-#define DIEPCTL7    		(DWC_OTG_BASE + 0x9E0)
-#define DIEPINT7    		(DWC_OTG_BASE + 0x9E8)
-#define DIEPTSIZ7   		(DWC_OTG_BASE + 0x9F0)
-#define DIEPDMA7    		(DWC_OTG_BASE + 0x9F4)
-#define DIEPCTL8    		(DWC_OTG_BASE + 0xA00)
-#define DIEPINT8    		(DWC_OTG_BASE + 0xA08)
-#define DIEPTSIZ8   		(DWC_OTG_BASE + 0xA10)
-#define DIEPDMA8    		(DWC_OTG_BASE + 0xA14)
-#define DIEPCTL9    		(DWC_OTG_BASE + 0xA20)
-#define DIEPINT9    		(DWC_OTG_BASE + 0xA28)
-#define DIEPTSIZ9   		(DWC_OTG_BASE + 0xA30)
-#define DIEPDMA9    		(DWC_OTG_BASE + 0xA34)
-#define DIEPCTL10   		(DWC_OTG_BASE + 0xA40)
-#define DIEPINT10   		(DWC_OTG_BASE + 0xA48)
-#define DIEPTSIZ10  		(DWC_OTG_BASE + 0xA50)
-#define DIEPDMA10   		(DWC_OTG_BASE + 0xA54)
-#define DIEPCTL11   		(DWC_OTG_BASE + 0xA60)
-#define DIEPINT11   		(DWC_OTG_BASE + 0xA68)
-#define DIEPTSIZ11  		(DWC_OTG_BASE + 0xA70)
-#define DIEPDMA11   		(DWC_OTG_BASE + 0xA74)
-#define DIEPCTL12   		(DWC_OTG_BASE + 0xA80)
-#define DIEPINT12   		(DWC_OTG_BASE + 0xA88)
-#define DIEPTSIZ12  		(DWC_OTG_BASE + 0xA90)
-#define DIEPDMA12   		(DWC_OTG_BASE + 0xA94)
-#define DIEPCTL13   		(DWC_OTG_BASE + 0xAA0)
-#define DIEPINT13   		(DWC_OTG_BASE + 0xAA8)
-#define DIEPTSIZ13  		(DWC_OTG_BASE + 0xAB0)
-#define DIEPDMA13   		(DWC_OTG_BASE + 0xAB4)
-#define DIEPCTL14   		(DWC_OTG_BASE + 0xAC0)
-#define DIEPINT14   		(DWC_OTG_BASE + 0xAC8)
-#define DIEPTSIZ14  		(DWC_OTG_BASE + 0xAD0)
-#define DIEPDMA14   		(DWC_OTG_BASE + 0xAD4)
-#define DIEPCTL15   		(DWC_OTG_BASE + 0xAE0)
-#define DIEPINT15   		(DWC_OTG_BASE + 0xAE8)
-#define DIEPTSIZ15  		(DWC_OTG_BASE + 0xAF0)
-#define DIEPDMA15   		(DWC_OTG_BASE + 0xAF4)
-
-/* Device Logical OUT Endpoint-Specific Registers */
-#define DOEPCTL(x)  		(DWC_OTG_BASE + 0xB00 + 0x20 * (x))
-#define DXEPCTL_EPENA			(1 << 31)
-#define DXEPCTL_EPDIS			(1 << 30)
-#define DXEPCTL_SETD1PID		(1 << 29)
-#define DXEPCTL_SETODDFR		(1 << 29)
-#define DXEPCTL_SETD0PID		(1 << 28)
-#define DXEPCTL_SETEVENFR		(1 << 28)
-#define DXEPCTL_SNAK			(1 << 27)
-#define DXEPCTL_CNAK			(1 << 26)
-#define DXEPCTL_NAKSTS			(1 << 17)
-#define DXEPCTL_DPID			(1 << 16)
-#define DXEPCTL_EOFRNUM			(1 << 16)
-#define DXEPCTL_USBACTEP		(1 << 15)
-#define DXEPCTL_NEXTEP_MASK		(0xf << 11)
-#define DXEPCTL_NEXTEP_SHIFT		11
-#define DXEPCTL_NEXTEP_LIMIT		0xf
-#define DXEPCTL_NEXTEP(_x)		((_x) << 11)
-
-
-#define DOEPINT(x)  		(DWC_OTG_BASE + 0xB08 + 0x20 * (x))
-#define DXEPINT_INEPNAKEFF              (1 << 6)
-#define DXEPINT_BACK2BACKSETUP          (1 << 6)
-#define DXEPINT_INTKNEPMIS              (1 << 5)
-#define DXEPINT_INTKNTXFEMP             (1 << 4)
-#define DXEPINT_OUTTKNEPDIS             (1 << 4)
-#define DXEPINT_TIMEOUT                 (1 << 3)
-#define DXEPINT_SETUP                   (1 << 3)
-#define DXEPINT_AHBERR                  (1 << 2)
-#define DXEPINT_EPDISBLD                (1 << 1)
-#define DXEPINT_XFERCOMPL               (1 << 0)
-
-#define DOEPTSIZ(x) 		(DWC_OTG_BASE + 0xB10 + 0x20 * (x))
-#define DXEPTSIZ_MC_MASK		(0x3 << 29)
-#define DXEPTSIZ_MC_SHIFT		29
-#define DXEPTSIZ_MC_LIMIT		0x3
-#define DXEPTSIZ_MC(_x)			((_x) << 29)
-#define DXEPTSIZ_PKTCNT_MASK		(0x3ff << 19)
-#define DXEPTSIZ_PKTCNT_SHIFT		19
-#define DXEPTSIZ_PKTCNT_LIMIT		0x3ff
-#define DXEPTSIZ_PKTCNT_GET(_v)		(((_v) >> 19) & 0x3ff)
-#define DXEPTSIZ_PKTCNT(_x)		((_x) << 19)
-#define DXEPTSIZ_XFERSIZE_MASK		(0x7ffff << 0)
-#define DXEPTSIZ_XFERSIZE_SHIFT		0
-#define DXEPTSIZ_XFERSIZE_LIMIT		0x7ffff
-#define DXEPTSIZ_XFERSIZE_GET(_v)	(((_v) >> 0) & 0x7ffff)
-#define DXEPTSIZ_XFERSIZE(_x)		((_x) << 0)
-
-#define DOEPDMA(x)  		(DWC_OTG_BASE + 0xB14 + 0x20 * (x))
-#define DOEPCTL0    		(DWC_OTG_BASE + 0xB00)
-#define DOEPINT0    		(DWC_OTG_BASE + 0xB08)
-#define DOEPTSIZ0   		(DWC_OTG_BASE + 0xB10)
-#define DOEPTSIZ0_SUPCNT_MASK		(0x3 << 29)
-#define DOEPTSIZ0_SUPCNT_SHIFT		29
-#define DOEPTSIZ0_SUPCNT_LIMIT		0x3
-#define DOEPTSIZ0_SUPCNT(_x)		((_x) << 29)
-#define DOEPTSIZ0_PKTCNT		(1 << 19)
-#define DOEPTSIZ0_XFERSIZE_MASK		(0x7f << 0)
-#define DOEPTSIZ0_XFERSIZE_SHIFT	0
-
-#define DOEPDMA0    		(DWC_OTG_BASE + 0xB14)
-#define DOEPCTL1    		(DWC_OTG_BASE + 0xB20)
-#define DOEPINT1    		(DWC_OTG_BASE + 0xB28)
-#define DOEPTSIZ1   		(DWC_OTG_BASE + 0xB30)
-#define DOEPDMA1    		(DWC_OTG_BASE + 0xB34)
-#define DOEPCTL2    		(DWC_OTG_BASE + 0xB40)
-#define DOEPINT2    		(DWC_OTG_BASE + 0xB48)
-#define DOEPTSIZ2   		(DWC_OTG_BASE + 0xB50)
-#define DOEPDMA2    		(DWC_OTG_BASE + 0xB54)
-#define DOEPCTL3    		(DWC_OTG_BASE + 0xB60)
-#define DOEPINT3    		(DWC_OTG_BASE + 0xB68)
-#define DOEPTSIZ3   		(DWC_OTG_BASE + 0xB70)
-#define DOEPDMA3    		(DWC_OTG_BASE + 0xB74)
-#define DOEPCTL4    		(DWC_OTG_BASE + 0xB80)
-#define DOEPINT4    		(DWC_OTG_BASE + 0xB88)
-#define DOEPTSIZ4   		(DWC_OTG_BASE + 0xB90)
-#define DOEPDMA4    		(DWC_OTG_BASE + 0xB94)
-#define DOEPCTL5    		(DWC_OTG_BASE + 0xBA0)
-#define DOEPINT5    		(DWC_OTG_BASE + 0xBA8)
-#define DOEPTSIZ5   		(DWC_OTG_BASE + 0xBB0)
-#define DOEPDMA5    		(DWC_OTG_BASE + 0xBB4)
-#define DOEPCTL6    		(DWC_OTG_BASE + 0xBC0)
-#define DOEPINT6    		(DWC_OTG_BASE + 0xBC8)
-#define DOEPTSIZ6   		(DWC_OTG_BASE + 0xBD0)
-#define DOEPDMA6    		(DWC_OTG_BASE + 0xBD4)
-#define DOEPCTL7    		(DWC_OTG_BASE + 0xBE0)
-#define DOEPINT7    		(DWC_OTG_BASE + 0xBE8)
-#define DOEPTSIZ7   		(DWC_OTG_BASE + 0xBF0)
-#define DOEPDMA7    		(DWC_OTG_BASE + 0xBF4)
-#define DOEPCTL8    		(DWC_OTG_BASE + 0xC00)
-#define DOEPINT8    		(DWC_OTG_BASE + 0xC08)
-#define DOEPTSIZ8   		(DWC_OTG_BASE + 0xC10)
-#define DOEPDMA8    		(DWC_OTG_BASE + 0xC14)
-#define DOEPCTL9    		(DWC_OTG_BASE + 0xC20)
-#define DOEPINT9    		(DWC_OTG_BASE + 0xC28)
-#define DOEPTSIZ9   		(DWC_OTG_BASE + 0xC30)
-#define DOEPDMA9    		(DWC_OTG_BASE + 0xC34)
-#define DOEPCTL10   		(DWC_OTG_BASE + 0xC40)
-#define DOEPINT10   		(DWC_OTG_BASE + 0xC48)
-#define DOEPTSIZ10  		(DWC_OTG_BASE + 0xC50)
-#define DOEPDMA10   		(DWC_OTG_BASE + 0xC54)
-#define DOEPCTL11   		(DWC_OTG_BASE + 0xC60)
-#define DOEPINT11   		(DWC_OTG_BASE + 0xC68)
-#define DOEPTSIZ11  		(DWC_OTG_BASE + 0xC70)
-#define DOEPDMA11   		(DWC_OTG_BASE + 0xC74)
-#define DOEPCTL12   		(DWC_OTG_BASE + 0xC80)
-#define DOEPINT12   		(DWC_OTG_BASE + 0xC88)
-#define DOEPTSIZ12  		(DWC_OTG_BASE + 0xC90)
-#define DOEPDMA12   		(DWC_OTG_BASE + 0xC94)
-#define DOEPCTL13   		(DWC_OTG_BASE + 0xCA0)
-#define DOEPINT13   		(DWC_OTG_BASE + 0xCA8)
-#define DOEPTSIZ13  		(DWC_OTG_BASE + 0xCB0)
-#define DOEPDMA13   		(DWC_OTG_BASE + 0xCB4)
-#define DOEPCTL14   		(DWC_OTG_BASE + 0xCC0)
-#define DOEPINT14   		(DWC_OTG_BASE + 0xCC8)
-#define DOEPTSIZ14  		(DWC_OTG_BASE + 0xCD0)
-#define DOEPDMA14   		(DWC_OTG_BASE + 0xCD4)
-#define DOEPCTL15   		(DWC_OTG_BASE + 0xCE0)
-#define DOEPINT15   		(DWC_OTG_BASE + 0xCE8)
-#define DOEPTSIZ15  		(DWC_OTG_BASE + 0xCF0)
-#define DOEPDMA15   		(DWC_OTG_BASE + 0xCF4)
-
-/* Power and Clock Gating Register */
-#define PCGCCTL			(DWC_OTG_BASE + 0xE00)
-
-#define EP0FIFO			(DWC_OTG_BASE + 0x1000)
-
-#define PERI_CTRL16_PICOPHY_SIDDQ_BIT		(1<<0)
-#define PERI_CTRL16_PICOPHY_TXPREEMPHASISTUNE	(1<<31)
-#define PERI_CTRL15_HSICPHY_SIDDQ_BIT		(1<<16)
-#define PERI_CTRL14_NANOPHY_SIDDQ_BIT		(1<<0)
-#define PERI_CTRL0_USB2DVC_NANOPHY_BIT		(1<<7)
-#define NANOPHY_DMPULLDOWN    (1 << 6)    /* bit[6]:nanophy_dmpulldown;为1'b0 */
-#define NANOPHY_DPPULLDOWN    (1 << 5)    /* bit[5]:nanophy_dppulldown;为1'b0 */
-
-#define EN_LDO4_INT (1 << 4)
-#define EN_LDO8_INT (1 << 4)
-
-/* SCPEREN1/DIS1 */
-#define GT_CLK_USBHSICPHY480            (1<<26)
-#define GT_CLK_USBHSICPHY               (1<<25)
-#define GT_CLK_USBPICOPHY               (1<<24)
-#define GT_CLK_USBNANOPHY               (1<<23)
-/* SCPEREN3/DIS3 */
-#define GT_CLK_USB2HST                  (1<<18)
-#define GT_CLK_USB2DVC                  (1<<17)
-/* SCPERRSTEN3 */
-#define IP_RST_PICOPHY_POR              (1<<31)
-#define IP_RST_HSICPHY_POR              (1<<30)
-#define IP_RST_NANOPHY_POR              (1<<29)
-#define IP_RST_USB2DVC_PHY              (1<<28)
-#define IP_RST_USB2H_UTMI1              (1<<21)
-#define IP_RST_USB2H_UTMI0              (1<<20)
-#define IP_RST_USB2H_PHY                (1<<19)
-#define IP_RST_USB2HST                  (1<<18)
-#define IP_RST_USB2DVC                  (1<<17)
-/* SCPERRSTEN1 */
-#define IP_RST_HSICPHY                  (1<<25)
-#define IP_RST_PICOPHY                  (1<<24)
-#define IP_RST_NANOPHY                  (1<<23)
-
-/*
- * USB directions
- *
- * This bit flag is used in endpoint descriptors' bEndpointAddress field.
- * It's also one of three fields in control requests bRequestType.
- */
-#define USB_DIR_OUT                     0               /* to device */
-#define USB_DIR_IN                      0x80            /* to host */
-
-/*
- * Descriptor types ... USB 2.0 spec table 9.5
- */
-#define USB_DT_DEVICE                   0x01
-#define USB_DT_CONFIG                   0x02
-#define USB_DT_STRING                   0x03
-#define USB_DT_INTERFACE                0x04
-#define USB_DT_ENDPOINT                 0x05
-#define USB_DT_DEVICE_QUALIFIER         0x06
-#define USB_DT_OTHER_SPEED_CONFIG       0x07
-#define USB_DT_INTERFACE_POWER          0x08
-/* these are from a minor usb 2.0 revision (ECN) */
-#define USB_DT_OTG                      0x09
-#define USB_DT_DEBUG                    0x0a
-#define USB_DT_INTERFACE_ASSOCIATION    0x0b
-/* these are from the Wireless USB spec */
-#define USB_DT_SECURITY                 0x0c
-#define USB_DT_KEY                      0x0d
-#define USB_DT_ENCRYPTION_TYPE          0x0e
-#define USB_DT_BOS                      0x0f
-#define USB_DT_DEVICE_CAPABILITY        0x10
-#define USB_DT_WIRELESS_ENDPOINT_COMP   0x11
-#define USB_DT_WIRE_ADAPTER             0x21
-#define USB_DT_RPIPE                    0x22
-#define USB_DT_CS_RADIO_CONTROL         0x23
-
-/*
- * USB recipients, the third of three bRequestType fields
- */
-#define USB_RECIP_MASK                  0x1f
-#define USB_RECIP_DEVICE                0x00
-#define USB_RECIP_INTERFACE             0x01
-#define USB_RECIP_ENDPOINT              0x02
-#define USB_RECIP_OTHER                 0x03
-
-/* IN/OUT will STALL */
-#define USB_ENDPOINT_HALT      			0
-
-/*
- * Endpoints
- */
-#define USB_ENDPOINT_NUMBER_MASK        0x0f    /* in bEndpointAddress */
-#define USB_ENDPOINT_DIR_MASK           0x80
-
-#define USB_ENDPOINT_XFERTYPE_MASK      0x03    /* in bmAttributes */
-#define USB_ENDPOINT_XFER_CONTROL       0
-#define USB_ENDPOINT_XFER_ISOC          1
-#define USB_ENDPOINT_XFER_BULK          2
-#define USB_ENDPOINT_XFER_INT           3
-#define USB_ENDPOINT_MAX_ADJUSTABLE     0x80
-
-/*
- * Standard requests, for the bRequest field of a SETUP packet.
- *
- * These are qualified by the bRequestType field, so that for example
- * TYPE_CLASS or TYPE_VENDOR specific feature flags could be retrieved
- * by a GET_STATUS request.
- */
-#define USB_REQ_GET_STATUS              0x00
-#define USB_REQ_CLEAR_FEATURE           0x01
-#define USB_REQ_SET_FEATURE             0x03
-#define USB_REQ_SET_ADDRESS             0x05
-#define USB_REQ_GET_DESCRIPTOR          0x06
-#define USB_REQ_SET_DESCRIPTOR          0x07
-#define USB_REQ_GET_CONFIGURATION       0x08
-#define USB_REQ_SET_CONFIGURATION       0x09
-#define USB_REQ_GET_INTERFACE           0x0A
-#define USB_REQ_SET_INTERFACE           0x0B
-#define USB_REQ_SYNCH_FRAME             0x0C
-
-/* USB_DT_DEVICE: Device descriptor */
-struct usb_device_descriptor {
-        unsigned char  bLength;
-        unsigned char  bDescriptorType;
-
-        unsigned short bcdUSB;
-        unsigned char  bDeviceClass;
-        unsigned char  bDeviceSubClass;
-        unsigned char  bDeviceProtocol;
-        unsigned char  bMaxPacketSize0;
-        unsigned short idVendor;
-        unsigned short idProduct;
-        unsigned short bcdDevice;
-        unsigned char  iManufacturer;
-        unsigned char  iProduct;
-        unsigned char  iSerialNumber;
-        unsigned char  bNumConfigurations;
-} __attribute__ ((packed));
-
-#define USB_DT_DEVICE_SIZE              18
-
-/*
- * Device and/or Interface Class codes
- * as found in bDeviceClass or bInterfaceClass
- * and defined by www.usb.org documents
- */
-#define USB_CLASS_PER_INTERFACE         0       /* for DeviceClass */
-#define USB_CLASS_AUDIO                 1
-#define USB_CLASS_COMM                  2
-#define USB_CLASS_HID                   3
-#define USB_CLASS_PHYSICAL              5
-#define USB_CLASS_STILL_IMAGE           6
-#define USB_CLASS_PRINTER               7
-#define USB_CLASS_MASS_STORAGE          8
-#define USB_CLASS_HUB                   9
-#define USB_CLASS_CDC_DATA              0x0a
-#define USB_CLASS_CSCID                 0x0b    /* chip+ smart card */
-#define USB_CLASS_CONTENT_SEC           0x0d    /* content security */
-#define USB_CLASS_VIDEO                 0x0e
-#define USB_CLASS_WIRELESS_CONTROLLER   0xe0
-#define USB_CLASS_MISC                  0xef
-#define USB_CLASS_APP_SPEC              0xfe
-#define USB_CLASS_VENDOR_SPEC           0xff
-
-/*-------------------------------------------------------------------------*/
-
-/* USB_DT_CONFIG: Configuration descriptor information.
- *
- * USB_DT_OTHER_SPEED_CONFIG is the same descriptor, except that the
- * descriptor type is different.  Highspeed-capable devices can look
- * different depending on what speed they're currently running.  Only
- * devices with a USB_DT_DEVICE_QUALIFIER have any OTHER_SPEED_CONFIG
- * descriptors.
- */
-struct usb_config_descriptor {
-       unsigned char  bLength;
-       unsigned char  bDescriptorType;
-
-       unsigned short wTotalLength;
-       unsigned char  bNumInterfaces;
-       unsigned char  bConfigurationValue;
-       unsigned char  iConfiguration;
-       unsigned char  bmAttributes;
-       unsigned char  bMaxPower;
-} __attribute__((packed));
-
-#define USB_DT_CONFIG_SIZE              9
-
-/* from config descriptor bmAttributes */
-#define USB_CONFIG_ATT_ONE              (1 << 7)        /* must be set */
-#define USB_CONFIG_ATT_SELFPOWER        (1 << 6)        /* self powered */
-#define USB_CONFIG_ATT_WAKEUP           (1 << 5)        /* can wakeup */
-#define USB_CONFIG_ATT_BATTERY          (1 << 4)        /* battery powered */
-
-/*-------------------------------------------------------------------------*/
-
-/* USB_DT_STRING: String descriptor */
-struct usb_string_descriptor {
-        unsigned char  bLength;
-        unsigned char  bDescriptorType;
-
-        unsigned short wString[16];             /* UTF-16LE encoded */
-} __attribute__((packed));
-
-/*-------------------------------------------------------------------------*/
-/* USB_DT_INTERFACE: Interface descriptor */
-struct usb_interface_descriptor {
-        unsigned char  bLength;
-        unsigned char  bDescriptorType;
-
-        unsigned char  bInterfaceNumber;
-        unsigned char  bAlternateSetting;
-        unsigned char  bNumEndpoints;
-        unsigned char  bInterfaceClass;
-        unsigned char  bInterfaceSubClass;
-        unsigned char  bInterfaceProtocol;
-        unsigned char  iInterface;
-};
-
-#define USB_DT_INTERFACE_SIZE           9
-
-/*-------------------------------------------------------------------------*/
-
-/* USB_DT_ENDPOINT: Endpoint descriptor */
-struct usb_endpoint_descriptor {
-        unsigned char  bLength;
-        unsigned char  bDescriptorType;
-
-        unsigned char  bEndpointAddress;
-        unsigned char  bmAttributes;
-        unsigned short wMaxPacketSize;
-        unsigned char  bInterval;
-} __attribute__ ((packed));
-
-#define USB_DT_ENDPOINT_SIZE            7
-#define USB_DT_ENDPOINT_AUDIO_SIZE      9       /* Audio extension */
-
-extern int usb_need_reset;
-
-/**
- * This union represents the bit fields in the DMA Descriptor
- * status quadlet. Read the quadlet into the <i>d32</i> member then
- * set/clear the bits using the <i>b</i>it, <i>b_iso_out</i> and
- * <i>b_iso_in</i> elements.
- */
-typedef union dev_dma_desc_sts {
-		/** raw register data */
-	unsigned int d32;
-		/** quadlet bits */
-	struct {
-		/** Received number of bytes */
-		unsigned bytes:16;
-		/** NAK bit - only for OUT EPs */
-		unsigned nak:1;
-		unsigned reserved17_22:6;
-		/** Multiple Transfer - only for OUT EPs */
-		unsigned mtrf:1;
-		/** Setup Packet received - only for OUT EPs */
-		unsigned sr:1;
-		/** Interrupt On Complete */
-		unsigned ioc:1;
-		/** Short Packet */
-		unsigned sp:1;
-		/** Last */
-		unsigned l:1;
-		/** Receive Status */
-		unsigned sts:2;
-		/** Buffer Status */
-		unsigned bs:2;
-	} b;
-
-//#ifdef DWC_EN_ISOC
-		/** iso out quadlet bits */
-	struct {
-		/** Received number of bytes */
-		unsigned rxbytes:11;
-
-		unsigned reserved11:1;
-		/** Frame Number */
-		unsigned framenum:11;
-		/** Received ISO Data PID */
-		unsigned pid:2;
-		/** Interrupt On Complete */
-		unsigned ioc:1;
-		/** Short Packet */
-		unsigned sp:1;
-		/** Last */
-		unsigned l:1;
-		/** Receive Status */
-		unsigned rxsts:2;
-		/** Buffer Status */
-		unsigned bs:2;
-	} b_iso_out;
-
-		/** iso in quadlet bits */
-	struct {
-		/** Transmited number of bytes */
-		unsigned txbytes:12;
-		/** Frame Number */
-		unsigned framenum:11;
-		/** Transmited ISO Data PID */
-		unsigned pid:2;
-		/** Interrupt On Complete */
-		unsigned ioc:1;
-		/** Short Packet */
-		unsigned sp:1;
-		/** Last */
-		unsigned l:1;
-		/** Transmit Status */
-		unsigned txsts:2;
-		/** Buffer Status */
-		unsigned bs:2;
-	} b_iso_in;
-//#endif                                /* DWC_EN_ISOC */
-} dev_dma_desc_sts_t;
-
-/**
- * DMA Descriptor structure
- *
- * DMA Descriptor structure contains two quadlets:
- * Status quadlet and Data buffer pointer.
- */
-typedef struct dwc_otg_dev_dma_desc {
-	/** DMA Descriptor status quadlet */
-	dev_dma_desc_sts_t status;
-	/** DMA Descriptor data buffer pointer */
-	unsigned int buf;
-} dwc_otg_dev_dma_desc_t;
-
-extern void usb_reinit(void);
-
-#endif	/* __DWC_USB_H__*/
diff --git a/plat/hikey/partitions.c b/plat/hikey/partitions.c
deleted file mode 100644
index e4d303c..0000000
--- a/plat/hikey/partitions.c
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <debug.h>
-#include <dw_mmc.h>
-#include <errno.h>
-#include <io_storage.h>
-#include <mmio.h>
-#include <partitions.h>
-#include <platform_def.h>
-#include <string.h>
-#include "hikey_private.h"
-
-#define EFI_ENTRIES		128
-#define EFI_ENTRY_SIZE		(sizeof(struct efi_entry))
-#define EFI_MBR_SIZE		512
-#define EFI_HEADER_SIZE		512
-#define EFI_TOTAL_SIZE		(EFI_MBR_SIZE + EFI_HEADER_SIZE +	\
-				EFI_ENTRY_SIZE * EFI_ENTRIES)
-
-struct efi_header {
-	char		signature[8];
-	uint32_t	revision;
-	uint32_t	size;
-	uint32_t	header_crc;
-	uint32_t	reserved;
-	uint64_t	current_lba;
-	uint64_t	backup_lba;
-	uint64_t	first_lba;
-	uint64_t	last_lba;
-	uint8_t		disk_uuid[16];
-	/* starting LBA of array of partition entries */
-	uint64_t	part_lba;
-	/* number of partition entries in array */
-	uint32_t	part_num;
-	/* size of a single partition entry (usually 128) */
-	uint32_t	part_size;
-	uint32_t	part_crc;
-};
-
-struct efi_entry {
-	uint8_t		type_uuid[16];
-	uint8_t		uniq_uuid[16];
-	uint64_t	first_lba;
-	uint64_t	last_lba;
-	uint64_t	attr;
-	uint16_t	name[EFI_NAMELEN];
-};
-
-/* the first entry is dummy for ptable (covers both primary & secondary) */
-static struct ptentry ptable[EFI_ENTRIES + 1];
-static int entries;	/* partition entry entries */
-
-static void dump_entries(void)
-{
-	int i;
-
-	VERBOSE("Partition table with %d entries:\n", entries);
-	for (i = 0; i < entries; i++) {
-		VERBOSE("%s %llx-%llx\n", ptable[i].name,
-			ptable[i].start,
-			ptable[i].start + ptable[i].length - 4);
-	}
-}
-
-static int convert_ascii_string(uint16_t *str_in, uint8_t *str_out)
-{
-	uint8_t *name = (uint8_t *)str_in;
-	int i;
-
-	if (name[0] == '\0' || !str_in || !str_out)
-		return -EINVAL;
-	for (i = 1; i < (EFI_NAMELEN << 1); i += 2) {
-		if (name[i] != '\0')
-			return -EINVAL;
-	}
-	for (i = 0; i < (EFI_NAMELEN << 1); i += 2) {
-		str_out[i >> 1] = name[i];
-		if (name[i] == '\0')
-			break;
-	}
-	return 0;
-}
-
-static int parse_entry(uintptr_t buf)
-{
-	struct efi_entry *entry = (struct efi_entry *)buf;
-	int ret;
-
-	/* exhaused partition entry */
-	if ((entry->first_lba == 0) && (entry->last_lba == 0))
-		return 1;
-	ret = convert_ascii_string(entry->name, (uint8_t *)ptable[entries].name);
-	if (ret < 0)
-		return ret;
-	ptable[entries].start = (uint64_t)entry->first_lba * 512;
-	ptable[entries].length = (uint64_t)(entry->last_lba - entry->first_lba + 1) * 512;
-	entries++;
-	return 0;
-}
-
-/* create dummy entry for ptable */
-static void create_dummy_entry(void)
-{
-	int bytes;
-	ptable[entries].start = 0;
-	ptable[entries].length = 0;
-	bytes = sprintf(ptable[entries].name, "ptable");
-	ptable[entries].name[bytes] = '\0';
-	entries++;
-}
-
-struct ptentry *find_ptn(const char *str)
-{
-	struct ptentry *ptn = NULL;
-	int i;
-
-	for (i = 0; i < entries; i++) {
-		if (!strcmp(ptable[i].name, str)) {
-			ptn = &ptable[i];
-			break;
-		}
-	}
-	return ptn;
-}
-
-int get_partition(void)
-{
-	int result = IO_FAIL;
-	int i, ret, num_entries;
-	size_t bytes_read;
-	uintptr_t emmc_dev_handle, spec, img_handle;
-	unsigned int buf[MMC_BLOCK_SIZE >> 2];
-	struct efi_header *hd = NULL;
-
-	create_dummy_entry();
-	result = plat_get_image_source(NORMAL_EMMC_NAME, &emmc_dev_handle,
-				       &spec);
-	if (result) {
-		WARN("failed to open eMMC normal partition\n");
-		return result;
-	}
-	result = io_open(emmc_dev_handle, spec, &img_handle);
-	if (result != IO_SUCCESS) {
-		WARN("Failed to open eMMC device\n");
-		return result;
-	}
-	result = io_seek(img_handle, IO_SEEK_SET, 0);
-	if (result)
-		goto exit;
-	result = io_read(img_handle, (uintptr_t)buf, EFI_MBR_SIZE,
-			 &bytes_read);
-	if ((result != IO_SUCCESS) || (bytes_read < EFI_MBR_SIZE)) {
-		WARN("Failed to read eMMC (%i)\n", result);
-		goto exit;
-	}
-	/* check the magic number in last word */
-	if (buf[(MMC_BLOCK_SIZE >> 2) - 1] != 0xaa550000) {
-		WARN("Can't find MBR protection information\n");
-		goto exit;
-	}
-
-	result = io_read(img_handle, (uintptr_t)buf, EFI_HEADER_SIZE,
-			 &bytes_read);
-	if ((result != IO_SUCCESS) || (bytes_read < EFI_HEADER_SIZE)) {
-		WARN("Failed to read eMMC (%i)\n", result);
-		goto exit;
-	}
-	hd = (struct efi_header *)((uintptr_t)buf);
-	if (strncmp(hd->signature, "EFI PART", 8)) {
-		WARN("Failed to find partition table\n");
-		goto exit;
-	}
-	num_entries = hd->part_num;
-	for (i = 0; i < num_entries; i++) {
-		result = io_read(img_handle, (uintptr_t)buf, EFI_HEADER_SIZE,
-				 &bytes_read);
-		if ((result != IO_SUCCESS) || (bytes_read < EFI_HEADER_SIZE)) {
-			WARN("Failed to read eMMC (%i)\n", result);
-			goto exit;
-		}
-		/* each header contains four partition entries */
-		ret = parse_entry((uintptr_t)buf);
-		if (ret)
-			break;
-		ret = parse_entry((uintptr_t)buf + EFI_ENTRY_SIZE);
-		if (ret)
-			break;
-		ret = parse_entry((uintptr_t)buf + EFI_ENTRY_SIZE * 2);
-		if (ret)
-			break;
-		ret = parse_entry((uintptr_t)buf + EFI_ENTRY_SIZE * 3);
-		if (ret)
-			break;
-	}
-exit:
-	io_close(img_handle);
-	update_fip_spec();
-	dump_entries();
-	return result;
-}
diff --git a/plat/hikey/plat_io_storage.c b/plat/hikey/plat_io_storage.c
deleted file mode 100644
index 6158710..0000000
--- a/plat/hikey/plat_io_storage.c
+++ /dev/null
@@ -1,694 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch_helpers.h>
-#include <assert.h>
-#include <debug.h>
-#include <dw_mmc.h>
-#include <fastboot.h>
-#include <io_block.h>
-#include <io_driver.h>
-#include <io_fip.h>
-#include <io_memmap.h>
-#include <io_storage.h>
-#include <mmio.h>
-#include <partitions.h>
-#include <platform_def.h>
-#include <semihosting.h>	/* For FOPEN_MODE_... */
-#include <string.h>
-#include "hikey_private.h"
-
-#define LOADER_MAX_ENTRIES		2
-#define PTABLE_MAX_ENTRIES		3
-#define USER_MAX_ENTRIES		2
-
-#define FLUSH_BASE			(DDR_BASE + 0x100000)
-
-struct entry_head {
-	unsigned char	magic[8];
-	unsigned char	name[8];
-	unsigned int	start;	/* lba */
-	unsigned int	count;	/* lba */
-	unsigned int	flag;
-};
-
-static const io_dev_connector_t *bl1_mem_dev_con;
-static uintptr_t bl1_mem_dev_spec;
-static uintptr_t loader_mem_dev_handle;
-static uintptr_t bl1_mem_init_params;
-static const io_dev_connector_t *fip_dev_con;
-static uintptr_t fip_dev_spec;
-static uintptr_t fip_dev_handle;
-static const io_dev_connector_t *dw_mmc_dev_con;
-static struct block_ops dw_mmc_ops;
-static uintptr_t emmc_dev_handle;
-
-#define SPARSE_FILL_BUFFER_ADDRESS	0x18000000
-#define SPARSE_FILL_BUFFER_SIZE		0x08000000
-
-/* Page 1024, since only a few pages before 2048 are used as partition table */
-#define SERIALNO_OFFSET			(1024 * 512)
-
-static const io_block_spec_t loader_mem_spec = {
-	/* l-loader.bin that contains bl1.bin */
-	.offset = LOADER_RAM_BASE,
-	.length = BL1_RO_LIMIT - LOADER_RAM_BASE,
-};
-
-static const io_block_spec_t boot_emmc_spec = {
-	.offset = MMC_LOADER_BASE,
-	.length = BL1_RO_LIMIT - LOADER_RAM_BASE,
-};
-
-static const io_block_spec_t normal_emmc_spec = {
-	.offset = MMC_BASE,
-	.length = MMC_SIZE,
-};
-
-static io_block_spec_t fip_block_spec = {
-	.offset = 0,
-	.length = 0,
-};
-
-static const io_file_spec_t bl2_file_spec = {
-	.path = BL2_IMAGE_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl30_file_spec = {
-	.path = BL30_IMAGE_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl31_file_spec = {
-	.path = BL31_IMAGE_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl32_file_spec = {
-	.path = BL32_IMAGE_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl33_file_spec = {
-	.path = BL33_IMAGE_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static int open_loader_mem(const uintptr_t spec);
-static int open_fip(const uintptr_t spec);
-static int open_dw_mmc(const uintptr_t spec);
-static int open_dw_mmc_boot(const uintptr_t spec);
-
-struct plat_io_policy {
-	const char	*image_name;
-	uintptr_t	*dev_handle;
-	uintptr_t	image_spec;
-	int		(*check)(const uintptr_t spec);
-};
-
-static const struct plat_io_policy policies[] = {
-	{
-		LOADER_MEM_NAME,
-		&loader_mem_dev_handle,
-		(uintptr_t)&loader_mem_spec,
-		open_loader_mem
-	}, {
-		BOOT_EMMC_NAME,
-		&emmc_dev_handle,
-		(uintptr_t)&boot_emmc_spec,
-		open_dw_mmc_boot
-	}, {
-		NORMAL_EMMC_NAME,
-		&emmc_dev_handle,
-		(uintptr_t)&normal_emmc_spec,
-		open_dw_mmc
-	}, {
-		FIP_IMAGE_NAME,
-		&emmc_dev_handle,
-		(uintptr_t)&fip_block_spec,
-		open_dw_mmc
-	}, {
-		BL2_IMAGE_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl2_file_spec,
-		open_fip
-	}, {
-		BL30_IMAGE_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl30_file_spec,
-		open_fip
-	}, {
-		BL31_IMAGE_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl31_file_spec,
-		open_fip
-	}, {
-		BL32_IMAGE_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl32_file_spec,
-		open_fip
-	}, {
-		BL33_IMAGE_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl33_file_spec,
-		open_fip
-	}, {
-		0, 0, 0, 0
-	}
-};
-
-static int open_loader_mem(const uintptr_t spec)
-{
-	int result = IO_FAIL;
-	uintptr_t image_handle;
-
-	result = io_dev_init(loader_mem_dev_handle, bl1_mem_init_params);
-	if (result == IO_SUCCESS) {
-		result = io_open(loader_mem_dev_handle, spec, &image_handle);
-		if (result == IO_SUCCESS) {
-			io_close(image_handle);
-		}
-	}
-	return result;
-}
-
-static int open_fip(const uintptr_t spec)
-{
-	int result = IO_FAIL;
-
-	/* See if a Firmware Image Package is available */
-	result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_NAME);
-	if (result == IO_SUCCESS) {
-		INFO("Using FIP\n");
-		/*TODO: Check image defined in spec is present in FIP. */
-	}
-	return result;
-}
-
-
-static int open_dw_mmc(const uintptr_t spec)
-{
-	int result = IO_FAIL;
-	uintptr_t image_handle;
-
-	/* indicate to select normal partition in eMMC */
-	result = io_dev_init(emmc_dev_handle, 0);
-	if (result == IO_SUCCESS) {
-		result = io_open(emmc_dev_handle, spec, &image_handle);
-		if (result == IO_SUCCESS) {
-			/* INFO("Using DW MMC IO\n"); */
-			io_close(image_handle);
-		}
-	}
-	return result;
-}
-
-static int open_dw_mmc_boot(const uintptr_t spec)
-{
-	int result = IO_FAIL;
-	uintptr_t image_handle;
-
-	/* indicate to select boot partition in eMMC */
-	result = io_dev_init(emmc_dev_handle, 1);
-	if (result == IO_SUCCESS) {
-		result = io_open(emmc_dev_handle, spec, &image_handle);
-		if (result == IO_SUCCESS) {
-			/* INFO("Using DW MMC IO\n"); */
-			io_close(image_handle);
-		}
-	}
-	return result;
-}
-
-void io_setup(void)
-{
-	int io_result = IO_FAIL;
-
-	/* Register the IO devices on this platform */
-	io_result = register_io_dev_fip(&fip_dev_con);
-	assert(io_result == IO_SUCCESS);
-
-	io_result = register_io_dev_block(&dw_mmc_dev_con);
-	assert(io_result == IO_SUCCESS);
-
-	io_result = register_io_dev_memmap(&bl1_mem_dev_con);
-	assert(io_result == IO_SUCCESS);
-
-	/* Open connections to devices and cache the handles */
-	io_result = io_dev_open(fip_dev_con, fip_dev_spec, &fip_dev_handle);
-	assert(io_result == IO_SUCCESS);
-
-	dw_mmc_ops.init = init_mmc;
-	dw_mmc_ops.read = mmc0_read;
-	dw_mmc_ops.write = mmc0_write;
-	io_result = io_dev_open(dw_mmc_dev_con, (uintptr_t)&dw_mmc_ops,
-				&emmc_dev_handle);
-	assert(io_result == IO_SUCCESS);
-
-	io_result = io_dev_open(bl1_mem_dev_con, bl1_mem_dev_spec,
-				&loader_mem_dev_handle);
-	assert(io_result == IO_SUCCESS);
-
-	/* Ignore improbable errors in release builds */
-	(void)io_result;
-}
-
-/* Return an IO device handle and specification which can be used to access
- * an image. Use this to enforce platform load policy */
-int plat_get_image_source(const char *image_name, uintptr_t *dev_handle,
-			  uintptr_t *image_spec)
-{
-	int result = IO_FAIL;
-	const struct plat_io_policy *policy;
-
-	if ((image_name != NULL) && (dev_handle != NULL) &&
-	    (image_spec != NULL)) {
-		policy = policies;
-		while (policy->image_name != NULL) {
-			if (strcmp(policy->image_name, image_name) == 0) {
-				result = policy->check(policy->image_spec);
-				if (result == IO_SUCCESS) {
-					*image_spec = policy->image_spec;
-					*dev_handle = *(policy->dev_handle);
-					break;
-				}
-			}
-			policy++;
-		}
-	} else {
-		result = IO_FAIL;
-	}
-	return result;
-}
-
-int update_fip_spec(void)
-{
-	struct ptentry *ptn;
-
-	ptn = find_ptn("fastboot");
-	if (!ptn) {
-		WARN("failed to find partition fastboot\n");
-		ptn = find_ptn("bios");
-		if (!ptn) {
-			WARN("failed to find partition bios\n");
-			return IO_FAIL;
-		}
-	}
-	VERBOSE("%s: name:%s, start:%llx, length:%llx\n",
-		__func__, ptn->name, ptn->start, ptn->length);
-	fip_block_spec.offset = ptn->start;
-	fip_block_spec.length = ptn->length;
-	return IO_SUCCESS;
-}
-
-static int fetch_entry_head(void *buf, int num, struct entry_head *hd)
-{
-	unsigned char magic[8] = "ENTRYHDR";
-	if (hd == NULL)
-		return IO_FAIL;
-	memcpy((void *)hd, buf, sizeof(struct entry_head) * num);
-	if (!strncmp((void *)hd->magic, (void *)magic, 8))
-		return IO_SUCCESS;
-	return IO_NOT_SUPPORTED;
-}
-
-static int flush_loader(void)
-{
-	struct entry_head entries[5];
-	uintptr_t img_handle, spec;
-	int result = IO_FAIL;
-	size_t bytes_read, length;
-	ssize_t offset;
-	int i, fp;
-
-	result = fetch_entry_head((void *)(FLUSH_BASE + 28),
-				  LOADER_MAX_ENTRIES, entries);
-	if (result) {
-		WARN("failed to parse entries in loader image\n");
-		return result;
-	}
-
-	spec = 0;
-	for (i = 0, fp = 0; i < LOADER_MAX_ENTRIES; i++) {
-		if (entries[i].flag != 1) {
-			WARN("Invalid flag in entry:0x%x\n", entries[i].flag);
-			return IO_NOT_SUPPORTED;
-		}
-		result = plat_get_image_source(BOOT_EMMC_NAME, &emmc_dev_handle,
-					       &spec);
-		if (result) {
-			WARN("failed to open emmc boot area\n");
-			return result;
-		}
-		/* offset in Boot Area1 */
-		offset = MMC_LOADER_BASE + entries[i].start * 512;
-
-		result = io_open(emmc_dev_handle, spec, &img_handle);
-		if (result != IO_SUCCESS) {
-			WARN("Failed to open memmap device\n");
-			return result;
-		}
-		length = entries[i].count * 512;
-
-		result = io_seek(img_handle, IO_SEEK_SET, offset);
-		if (result)
-			goto exit;
-
-		if (i == 1)
-			fp = (entries[1].start - entries[0].start) * 512;
-		result = io_write(img_handle, FLUSH_BASE + fp, length,
-				  &bytes_read);
-		if ((result != IO_SUCCESS) || (bytes_read < length)) {
-			WARN("Failed to write '%s' file (%i)\n",
-			     LOADER_MEM_NAME, result);
-			goto exit;
-		}
-		io_close(img_handle);
-	}
-	return result;
-exit:
-	io_close(img_handle);
-	return result;
-}
-
-/*
- * Flush l-loader.bin (loader & bl1.bin) into Boot Area1 of eMMC.
- */
-int flush_loader_image(void)
-{
-	uintptr_t bl1_image_spec;
-	int result = IO_FAIL;
-	size_t bytes_read, length;
-	uintptr_t img_handle;
-
-	result = plat_get_image_source(LOADER_MEM_NAME, &loader_mem_dev_handle,
-				       &bl1_image_spec);
-
-	result = io_open(loader_mem_dev_handle, bl1_image_spec, &img_handle);
-	if (result != IO_SUCCESS) {
-		WARN("Failed to open memmap device\n");
-		goto exit;
-	}
-	length = loader_mem_spec.length;
-	result = io_read(img_handle, FLUSH_BASE, length, &bytes_read);
-	if ((result != IO_SUCCESS) || (bytes_read < length)) {
-		WARN("Failed to load '%s' file (%i)\n", LOADER_MEM_NAME, result);
-		goto exit;
-	}
-	io_close(img_handle);
-
-	result = flush_loader();
-	if (result != IO_SUCCESS) {
-		io_dev_close(loader_mem_dev_handle);
-		return result;
-	}
-exit:
-	io_close(img_handle);
-	io_dev_close(loader_mem_dev_handle);
-	return result;
-}
-
-static int flush_single_image(const char *mmc_name, unsigned long img_addr,
-				ssize_t offset, size_t length)
-{
-	uintptr_t img_handle, spec = 0;
-	size_t bytes_read;
-	int result = IO_FAIL;
-
-	result = plat_get_image_source(mmc_name, &emmc_dev_handle,
-				       &spec);
-	if (result) {
-		NOTICE("failed to open emmc user data area\n");
-		return result;
-	}
-
-	result = io_open(emmc_dev_handle, spec, &img_handle);
-	if (result != IO_SUCCESS) {
-		NOTICE("Failed to open memmap device\n");
-		return result;
-	}
-
-	result = io_seek(img_handle, IO_SEEK_SET, offset);
-	if (result) {
-		NOTICE("Failed to seek at offset:0x%x\n", offset);
-		goto exit;
-	}
-
-	result = io_write(img_handle, img_addr, length,
-			  &bytes_read);
-	if ((result != IO_SUCCESS) || (bytes_read < length)) {
-		NOTICE("Failed to write file (%i)\n", result);
-		goto exit;
-	}
-exit:
-	io_close(img_handle);
-	return result;
-}
-
-static int is_sparse_image(unsigned long img_addr)
-{
-	if (*(uint32_t *)img_addr == SPARSE_HEADER_MAGIC)
-		return 1;
-	return 0;
-}
-
-static int do_unsparse(char *cmdbuf, unsigned long img_addr, unsigned long img_length)
-{
-	sparse_header_t *header = (sparse_header_t *)img_addr;
-	chunk_header_t *chunk = NULL;
-	struct ptentry *ptn;
-	void *data = (void *)img_addr;
-	uint64_t out_blks = 0, out_length = 0;
-	uint64_t length;
-	uint32_t fill_value;
-	uint64_t left, count;
-	int i, result;
-
-	ptn = find_ptn(cmdbuf);
-	if (!ptn) {
-		NOTICE("failed to find partition %s\n", cmdbuf);
-		return IO_FAIL;
-	}
-	length = (uint64_t)(header->total_blks) * (uint64_t)(header->blk_sz);
-	if (length > ptn->length) {
-		NOTICE("Unsparsed image length is %lld, pentry length is %lld.\n",
-			length, ptn->length);
-		return IO_FAIL;
-	}
-
-	data = (void *)((unsigned long)data + header->file_hdr_sz);
-	for (i = 0; i < header->total_chunks; i++) {
-		chunk = (chunk_header_t *)data;
-		data = (void *)((unsigned long)data + sizeof(chunk_header_t));
-		length = (uint64_t)chunk->chunk_sz * (uint64_t)header->blk_sz;
-
-		switch (chunk->chunk_type) {
-		case CHUNK_TYPE_RAW:
-			result = flush_single_image(NORMAL_EMMC_NAME,
-						    (unsigned long)data,
-						    ptn->start + out_length, length);
-			if (result < 0) {
-				NOTICE("sparse: failed to flush raw chunk\n");
-				return result;
-			}
-			out_blks += length / 512;
-			out_length += length;
-			/* next chunk is just after the raw data */
-			data = (void *)((unsigned long)data + length);
-			break;
-		case CHUNK_TYPE_FILL:
-			if (chunk->total_sz != (sizeof(unsigned int) + sizeof(chunk_header_t))) {
-				NOTICE("sparse: bad chunk size\n");
-				return IO_FAIL;
-			}
-			fill_value = *(unsigned int *)data;
-			if (fill_value != 0) {
-				NOTICE("sparse: filled value shouldn't be zero.\n");
-			}
-			memset((void *)SPARSE_FILL_BUFFER_ADDRESS,
-				0, SPARSE_FILL_BUFFER_SIZE);
-			left = length;
-			while (left > 0) {
-				if (left < SPARSE_FILL_BUFFER_SIZE)
-					count = left;
-				else
-					count = SPARSE_FILL_BUFFER_SIZE;
-				result = flush_single_image(NORMAL_EMMC_NAME,
-							    SPARSE_FILL_BUFFER_ADDRESS,
-							    ptn->start + out_length, count);
-				if (result < 0) {
-					WARN("sparse: failed to flush fill chunk\n");
-					return result;
-				}
-				out_blks += count / 512;
-				out_length += count;
-				left = left - count;
-			}
-			/* next chunk is just after the filled data */
-			data = (void *)((unsigned long)data + sizeof(unsigned int));
-			break;
-		case CHUNK_TYPE_DONT_CARE:
-			if (chunk->total_sz != sizeof(chunk_header_t)) {
-				NOTICE("sparse: unmatched chunk size\n");
-				return IO_FAIL;
-			}
-			out_blks += length / 512;
-			out_length += length;
-			break;
-		default:
-			NOTICE("sparse: unrecognized type 0x%x\n", chunk->chunk_type);
-			break;
-		}
-	}
-	return 0;
-}
-
-/* Page 1024 is used to store serial number */
-int flush_random_serialno(unsigned long addr, unsigned long length)
-{
-	int result;
-
-	memset((void *)SPARSE_FILL_BUFFER_ADDRESS, 0, 512);
-	memcpy((void *)SPARSE_FILL_BUFFER_ADDRESS, (void *)addr, length);
-	result = flush_single_image(NORMAL_EMMC_NAME, SPARSE_FILL_BUFFER_ADDRESS,
-				    SERIALNO_OFFSET, 512);
-	return result;
-}
-
-char *load_serialno(void)
-{
-	uintptr_t img_handle, spec = 0;
-	size_t bytes_read;
-	struct random_serial_num *random = NULL;
-	int result;
-
-	result = plat_get_image_source(NORMAL_EMMC_NAME, &emmc_dev_handle,
-				       &spec);
-	if (result) {
-		NOTICE("failed to open emmc user data area\n");
-		return NULL;
-	}
-
-	result = io_open(emmc_dev_handle, spec, &img_handle);
-	if (result != IO_SUCCESS) {
-		NOTICE("Failed to open memmap device\n");
-		return NULL;
-	}
-
-	result = io_seek(img_handle, IO_SEEK_SET, SERIALNO_OFFSET);
-	if (result) {
-		NOTICE("Failed to seek at offset 0\n");
-		goto exit;
-	}
-	result = io_read(img_handle, SPARSE_FILL_BUFFER_ADDRESS, 512, &bytes_read);
-	if ((result != IO_SUCCESS) || (bytes_read < 512)) {
-		NOTICE("Failed to load '%s' file (%i)\n", LOADER_MEM_NAME, result);
-		goto exit;
-	}
-	io_close(img_handle);
-
-	random = (struct random_serial_num *)SPARSE_FILL_BUFFER_ADDRESS;
-	if (random->magic != RANDOM_MAGIC)
-		return NULL;
-
-	return random->serialno;
-exit:
-	io_close(img_handle);
-	return NULL;
-}
-
-/*
- * Flush bios.bin into User Data Area in eMMC
- */
-int flush_user_images(char *cmdbuf, unsigned long img_addr,
-		      unsigned long img_length)
-{
-	struct entry_head entries[5];
-	struct ptentry *ptn;
-	size_t length;
-	ssize_t offset;
-	int result = IO_FAIL;
-	int i, fp;
-
-	result = fetch_entry_head((void *)img_addr, USER_MAX_ENTRIES, entries);
-	switch (result) {
-	case IO_NOT_SUPPORTED:
-		if (!strncmp(cmdbuf, "fastboot", 8) ||
-		    !strncmp(cmdbuf, "bios", 4)) {
-			update_fip_spec();
-		}
-		if (is_sparse_image(img_addr)) {
-			result = do_unsparse(cmdbuf, img_addr, img_length);
-		} else {
-			ptn = find_ptn(cmdbuf);
-			if (!ptn) {
-				WARN("failed to find partition %s\n", cmdbuf);
-				return IO_FAIL;
-			}
-			img_length = (img_length + 512 - 1) / 512 * 512;
-			result = flush_single_image(NORMAL_EMMC_NAME, img_addr,
-						    ptn->start, img_length);
-		}
-		break;
-	case IO_SUCCESS:
-		if (strncmp(cmdbuf, "ptable", 6)) {
-			WARN("it's not for ptable\n");
-			return IO_FAIL;
-		}
-		/* currently it's for partition table */
-		/* the first block is for entry headers */
-		fp = 512;
-
-		for (i = 0; i < USER_MAX_ENTRIES; i++) {
-			if (entries[i].flag != 0) {
-				WARN("Invalid flag in entry:0x%x\n",
-					entries[i].flag);
-				return IO_NOT_SUPPORTED;
-			}
-			if (entries[i].count == 0)
-				continue;
-			length = entries[i].count * 512;
-			offset = MMC_BASE + entries[i].start * 512;
-			VERBOSE("i:%d, start:%x, count:%x\n",
-				i, entries[i].start, entries[i].count);
-			result = flush_single_image(NORMAL_EMMC_NAME,
-						img_addr + fp, offset, length);
-			fp += entries[i].count * 512;
-		}
-		get_partition();
-		break;
-	case IO_FAIL:
-		WARN("failed to parse entries in user image.\n");
-		return result;
-	}
-	return result;
-}
diff --git a/plat/hikey/plat_pm.c b/plat/hikey/plat_pm.c
deleted file mode 100644
index 0347cec..0000000
--- a/plat/hikey/plat_pm.c
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <assert.h>
-#include <arch_helpers.h>
-#include <arm_gic.h>
-#include <debug.h>
-#include <cci400.h>
-#include <errno.h>
-#include <gic_v2.h>
-#include <gpio.h>
-#include <hi6220.h>
-#include <hisi_ipc.h>
-#include <hisi_pwrc.h>
-#include <mmio.h>
-#include <platform.h>
-#include <platform_def.h>
-#include <psci.h>
-#include <sp804_timer.h>
-
-#include "hikey_def.h"
-#include "hikey_private.h"
-
-#define PLAT_SOC_SUSPEND_STATE	0x4
-
-static int32_t hikey_do_plat_actions(uint32_t afflvl, uint32_t state)
-{
-	assert(afflvl <= MPIDR_AFFLVL1);
-
-	if (state != PSCI_STATE_OFF)
-		return -EAGAIN;
-
-	return 0;
-}
-
-int32_t hikey_affinst_on(uint64_t mpidr,
-			 uint64_t sec_entrypoint,
-			 uint32_t afflvl,
-			 uint32_t state)
-{
-	int cpu, cluster;
-
-	cluster = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFF1_SHIFT;
-	cpu = mpidr & MPIDR_CPU_MASK;
-
-	VERBOSE("#%s, mpidr:%llx, afflvl:%x, state:%x\n", __func__, mpidr, afflvl, state);
-
-	/* directly return for power on */
-	if (state == PSCI_STATE_ON)
-		return PSCI_E_SUCCESS;
-
-	switch (afflvl) {
-	case MPIDR_AFFLVL0:
-		hisi_pwrc_set_core_bx_addr(cpu, cluster, sec_entrypoint);
-		hisi_ipc_cpu_on(cpu, cluster);
-		break;
-
-	case MPIDR_AFFLVL1:
-		hisi_ipc_cluster_on(cpu, cluster);
-		break;
-	}
-
-	return PSCI_E_SUCCESS;
-}
-
-
-static void hikey_affinst_off(uint32_t afflvl, uint32_t state)
-{
-	unsigned int mpidr = read_mpidr_el1();
-	int cpu, cluster;
-
-	cluster = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFF1_SHIFT;
-	cpu = mpidr & MPIDR_CPU_MASK;
-
-	if (hikey_do_plat_actions(afflvl, state) == -EAGAIN)
-		return;
-
-	switch (afflvl) {
-	case MPIDR_AFFLVL1:
-		hisi_ipc_spin_lock(HISI_IPC_SEM_CPUIDLE);
-		cci_disable_cluster_coherency(mpidr);
-		hisi_ipc_spin_unlock(HISI_IPC_SEM_CPUIDLE);
-
-		hisi_ipc_cluster_off(cpu, cluster);
-		break;
-
-	case MPIDR_AFFLVL0:
-		arm_gic_cpuif_deactivate();
-		hisi_ipc_cpu_off(cpu, cluster);
-		break;
-	}
-
-	return;
-}
-
-static void hikey_affinst_suspend(uint64_t sec_entrypoint,
-				  uint32_t afflvl,
-				  uint32_t state)
-{
-	unsigned int mpidr = read_mpidr_el1();
-	int cpu, cluster;
-
-	cluster = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFF1_SHIFT;
-	cpu = mpidr & MPIDR_CPU_MASK;
-
-	if (hikey_do_plat_actions(afflvl, state) == -EAGAIN)
-		return;
-
-	switch (afflvl) {
-	case MPIDR_AFFLVL1:
-
-		hisi_ipc_spin_lock(HISI_IPC_SEM_CPUIDLE);
-		cci_disable_cluster_coherency(mpidr);
-		hisi_ipc_spin_unlock(HISI_IPC_SEM_CPUIDLE);
-
-		if (psci_get_suspend_stateid() == PLAT_SOC_SUSPEND_STATE) {
-			hisi_pwrc_set_cluster_wfi(1);
-			hisi_pwrc_set_cluster_wfi(0);
-			hisi_ipc_psci_system_off();
-		} else
-			hisi_ipc_cluster_suspend(cpu, cluster);
-
-		break;
-
-	case MPIDR_AFFLVL0:
-
-		/* Program the jump address for the target cpu */
-		hisi_pwrc_set_core_bx_addr(cpu, cluster, sec_entrypoint);
-
-		arm_gic_cpuif_deactivate();
-
-		if (psci_get_suspend_stateid() != PLAT_SOC_SUSPEND_STATE)
-			hisi_ipc_cpu_suspend(cpu, cluster);
-		break;
-	}
-
-	return;
-}
-
-void hikey_affinst_on_finish(uint32_t afflvl, uint32_t state)
-{
-	unsigned long mpidr;
-	int cpu, cluster;
-
-	if (hikey_do_plat_actions(afflvl, state) == -EAGAIN)
-		return;
-
-	/* Get the mpidr for this cpu */
-	mpidr = read_mpidr_el1();
-	cluster = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFF1_SHIFT;
-	cpu = mpidr & MPIDR_CPU_MASK;
-
-	/* Perform the common cluster specific operations */
-	if (afflvl != MPIDR_AFFLVL0)
-		cci_enable_cluster_coherency(mpidr);
-
-	/* Zero the jump address in the mailbox for this cpu */
-	hisi_pwrc_set_core_bx_addr(cpu, cluster, 0);
-
-	if (psci_get_suspend_stateid() == PLAT_SOC_SUSPEND_STATE) {
-		arm_gic_setup();
-	} else {
-		/* Enable the gic cpu interface */
-		arm_gic_cpuif_setup();
-
-		/* TODO: This setup is needed only after a cold boot */
-		arm_gic_pcpu_distif_setup();
-	}
-
-	return;
-}
-
-static void hikey_affinst_suspend_finish(uint32_t afflvl,
-					 uint32_t state)
-{
-	hikey_affinst_on_finish(afflvl, state);
-	return;
-}
-
-static void __dead2 hikey_system_off(void)
-{
-	unsigned int start, cnt, delta, delta_ms;
-	unsigned int show = 1;
-
-	NOTICE("%s: off system\n", __func__);
-
-	/* pulling GPIO_0_0 low to trigger PMIC shutdown */
-	/* setting pinmux */
-	mmio_write_32(0xF8001810, 0x2);
-	/* setting pin direction */
-	mmio_write_8(0xF8011400, 1);
-	/* setting pin output value */
-	mmio_write_8(0xF8011004, 0);
-
-	/* PMIC shutdown depends on two conditions: GPIO_0_0 (PWR_HOLD) low,
-	 * and VBUS_DET < 3.6V. For HiKey, VBUS_DET is connected to VDD_4V2
-	 * through Jumper 1-2. So, to complete shutdown, user needs to manually
-	 * remove Jumper 1-2.
-	 */
-	/* init timer00 */
-	mmio_write_32(TIMER00_CONTROL, 0);
-	mmio_write_32(TIMER00_LOAD, 0xffffffff);
-	/* free running */
-	mmio_write_32(TIMER00_CONTROL, 0x82);
-
-	/* adding delays */
-	start = mmio_read_32(TIMER00_VALUE);
-	do {
-		cnt = mmio_read_32(TIMER00_VALUE);
-		if (cnt > start) {
-			delta = 0xffffffff - cnt;
-			delta += start;
-		} else
-			delta = start - cnt;
-		delta_ms = delta / 19200;
-		if (delta_ms > 1000 && show) { /* after 1 second */
-			/* if we are still alive, that means Jumper
-			 * 1-2 is mounted. Need to warn and reboot
-			 */
-			NOTICE("..........................................\n");
-			NOTICE(" IMPORTANT: Remove Jumper 1-2 to shutdown\n");
-			NOTICE(" DANGER:    SoC is still burning. DANGER!\n");
-			NOTICE(" Board will be reboot to avoid overheat\n");
-			NOTICE("..........................................\n");
-			show = 0;
-		}
-	} while (delta_ms < 5000); /* no. of delay in ms */
-
-	/* Send the system reset request */
-	mmio_write_32(AO_SC_SYS_STAT0, 0x48698284);
-
-	wfi();
-	panic();
-}
-
-static void __dead2 hikey_system_reset(void)
-{
-	VERBOSE("%s: reset system\n", __func__);
-
-	/* Send the system reset request */
-	mmio_write_32(AO_SC_SYS_STAT0, 0x48698284);
-
-	wfi();
-	panic();
-}
-
-unsigned int hikey_get_sys_suspend_power_state(void)
-{
-	unsigned int power_state;
-
-	power_state = psci_make_powerstate(PLAT_SOC_SUSPEND_STATE,
-			PSTATE_TYPE_POWERDOWN, MPIDR_AFFLVL1);
-
-	return power_state;
-}
-
-static const plat_pm_ops_t hikey_plat_pm_ops = {
-	.affinst_on		     = hikey_affinst_on,
-	.affinst_on_finish	     = hikey_affinst_on_finish,
-	.affinst_off		     = hikey_affinst_off,
-	.affinst_standby	     = NULL,
-	.affinst_suspend	     = hikey_affinst_suspend,
-	.affinst_suspend_finish	     = hikey_affinst_suspend_finish,
-	.system_off		     = hikey_system_off,
-	.system_reset		     = hikey_system_reset,
-	.get_sys_suspend_power_state = hikey_get_sys_suspend_power_state,
-};
-
-int platform_setup_pm(const plat_pm_ops_t **plat_ops)
-{
-	*plat_ops = &hikey_plat_pm_ops;
-	return 0;
-}
diff --git a/plat/hikey/plat_security.c b/plat/hikey/plat_security.c
deleted file mode 100644
index dc439c7..0000000
--- a/plat/hikey/plat_security.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Copyright (c) 2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <debug.h>
-#include <stdint.h>
-#include <strings.h>
-#include <platform_def.h>
-
-#define PORTNUM_MAX		5
-
-#define MDDRC_SECURITY_BASE	0xF7121000
-
-struct int_en_reg {
-	unsigned in_en:1;
-	unsigned reserved:31;
-};
-
-struct rgn_map_reg {
-	unsigned rgn_base_addr:24;
-	unsigned rgn_size:6;
-	unsigned reserved:1;
-	unsigned rgn_en:1;
-};
-
-struct rgn_attr_reg {
-	unsigned sp:4;
-	unsigned security_inv:1;
-	unsigned reserved_0:3;
-	unsigned mid_en:1;
-	unsigned mid_inv:1;
-	unsigned reserved_1:6;
-	unsigned rgn_en:1;
-	unsigned subrgn_disable:16;
-};
-
-static volatile struct int_en_reg *get_int_en_reg(uint32_t base)
-{
-	uint64_t addr = base + 0x20;
-	return (struct int_en_reg *)addr;
-}
-
-static volatile struct rgn_map_reg *get_rgn_map_reg(uint32_t base, int region, int port)
-{
-	uint64_t addr = base + 0x100 + 0x10 * region + 0x400 * port;
-	return (struct rgn_map_reg *)addr;
-}
-
-static volatile struct rgn_attr_reg *get_rgn_attr_reg(uint32_t base, int region,
-					     int port)
-{
-	uint64_t addr = base + 0x104 + 0x10 * region + 0x400 * port;
-	return (struct rgn_attr_reg *)addr;
-}
-
-static int is_power_of_two(uint32_t x)
-{
-	return ((x != 0) && !(x & (x - 1)));
-}
-
-/*
- * Configure secure memory region
- * region_size must be a power of 2 and at least 64KB
- * region_base must be region_size aligned
- */
-static void sec_protect(uint32_t region_base, uint32_t region_size,
-			int region)
-{
-	volatile struct int_en_reg *int_en_reg ;
-	volatile struct rgn_map_reg *rgn_map_reg;
-	volatile struct rgn_attr_reg *rgn_attr_reg;
-	uint32_t i = 0;
-
-	if (region < 1 || region > 15) {
-		ERROR("Secure region number is invalid\n");
-	}
-	if (!is_power_of_two(region_size) || region_size < 0x10000) {
-		ERROR("Secure region size is not a power of 2 >= 64KB\n");
-		return;
-	}
-	if (region_base & (region_size - 1)) {
-		ERROR("Secure region address is not aligned to region size\n");
-		return;
-	}
-
-	INFO("BL2: TrustZone: protecting %u bytes of memory at 0x%x\n", region_size,
-	     region_base);
-
-	int_en_reg = get_int_en_reg(MDDRC_SECURITY_BASE);
-	int_en_reg->in_en = 0x1;
-
-	for (i = 0; i < PORTNUM_MAX; i++) {
-		rgn_map_reg = get_rgn_map_reg(MDDRC_SECURITY_BASE, region, i);
-		rgn_attr_reg = get_rgn_attr_reg(MDDRC_SECURITY_BASE, region, i);
-		rgn_map_reg->rgn_base_addr = region_base >> 16;
-		rgn_attr_reg->subrgn_disable = 0x0;
-		rgn_attr_reg->sp = (i == 3) ? 0xC : 0x0;
-		rgn_map_reg->rgn_size = __builtin_ffs(region_size) - 2;
-		rgn_map_reg->rgn_en = 0x1;
-	}
-}
-
-/*******************************************************************************
- * Initialize the secure environment.
- ******************************************************************************/
-void plat_security_setup(void)
-{
-	sec_protect(DRAM_SEC_BASE, DRAM_SEC_SIZE, 1);
-	sec_protect(DRAM_SDP_BASE, DRAM_SDP_SIZE, 2);
-}
diff --git a/plat/hikey/plat_topology.c b/plat/hikey/plat_topology.c
deleted file mode 100644
index 1e9fa90..0000000
--- a/plat/hikey/plat_topology.c
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <platform_def.h>
-#include <psci.h>
-
-unsigned int plat_get_aff_count(unsigned int aff_lvl, unsigned long mpidr)
-{
-	/* Report 1 (absent) instance at levels higher that the cluster level */
-	if (aff_lvl > MPIDR_AFFLVL1)
-		return 1;
-
-	if (aff_lvl == MPIDR_AFFLVL1)
-		return 2; /* We have two clusters */
-
-	return 4; /* 4 cpus in cluster 1 or cluster 0 */
-}
-
-unsigned int plat_get_aff_state(unsigned int aff_lvl, unsigned long mpidr)
-{
-	return aff_lvl <= MPIDR_AFFLVL1 ? PSCI_AFF_PRESENT : PSCI_AFF_ABSENT;
-}
-
-int plat_get_max_afflvl()
-{
-	return MPIDR_AFFLVL1;
-}
-
-int plat_setup_topology()
-{
-	/* Juno todo: Make topology configurable via SCC */
-	return 0;
-}
diff --git a/plat/hikey/platform.mk b/plat/hikey/platform.mk
deleted file mode 100644
index fd32307..0000000
--- a/plat/hikey/platform.mk
+++ /dev/null
@@ -1,105 +0,0 @@
-#
-# Copyright (c) 2014-2015, Linaro Ltd. All rights reserved.
-# Copyright (c) 2014-2015, Hisilicon Ltd.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# Neither the name of ARM nor the names of its contributors may be used
-# to endorse or promote products derived from this software without specific
-# prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-
-# On Hikey, the TSP can execute either from Trusted SRAM or Trusted DRAM.
-# Trusted DRAM is the default.
-#
-PLAT_TSP_LOCATION	:=	tdram
-ifeq (${PLAT_TSP_LOCATION}, tsram)
-  PLAT_TSP_LOCATION_ID := PLAT_TRUSTED_SRAM_ID
-else ifeq (${PLAT_TSP_LOCATION}, tdram)
-  PLAT_TSP_LOCATION_ID := PLAT_TRUSTED_DRAM_ID
-else
-  $(error "Unsupported PLAT_TSP_LOCATION value")
-endif
-
-CONSOLE_BASE		:=	PL011_UART3_BASE
-CRASH_CONSOLE_BASE	:=	PL011_UART3_BASE
-
-# Process flags
-$(eval $(call add_define,PLAT_TSP_LOCATION_ID))
-$(eval $(call add_define,CONSOLE_BASE))
-$(eval $(call add_define,CRASH_CONSOLE_BASE))
-
-
-PLAT_INCLUDES		:=	-Iplat/hikey/include/
-
-PLAT_BL_COMMON_SOURCES	:=	drivers/arm/pl011/pl011_console.S	\
-				drivers/io/io_block.c			\
-				drivers/io/io_fip.c			\
-				drivers/io/io_memmap.c			\
-				drivers/io/io_storage.c			\
-				lib/aarch64/xlat_tables.c		\
-				plat/common/aarch64/plat_common.c	\
-				plat/common/plat_gic.c			\
-				plat/hikey/aarch64/hikey_common.c	\
-				plat/hikey/aarch64/plat_helpers.S	\
-				plat/hikey/plat_io_storage.c
-
-BL1_SOURCES		+=	drivers/arm/cci400/cci400.c		\
-				drivers/arm/gpio/gpio.c			\
-				lib/cpus/aarch64/cortex_a53.S		\
-				plat/common/aarch64/platform_up_stack.S	\
-				plat/hikey/aarch64/bl1_plat_helpers.S	\
-				plat/hikey/bl1_plat_setup.c		\
-				plat/hikey/drivers/dw_mmc.c		\
-				plat/hikey/drivers/hi6553.c		\
-				plat/hikey/drivers/sp804_timer.c	\
-				plat/hikey/partitions.c			\
-				plat/hikey/pll.c			\
-				plat/hikey/usb.c
-
-BL2_SOURCES		+=	plat/common/aarch64/platform_up_stack.S	\
-				plat/hikey/bl2_plat_setup.c		\
-				plat/hikey/plat_security.c		\
-				plat/hikey/drivers/dw_mmc.c		\
-				plat/hikey/drivers/hi6553.c		\
-				plat/hikey/drivers/hisi_dvfs.c		\
-				plat/hikey/drivers/hisi_mcu.c           \
-				plat/hikey/drivers/sp804_timer.c	\
-				plat/hikey/partitions.c
-
-BL31_SOURCES		+=	drivers/arm/cci400/cci400.c		\
-				drivers/arm/gic/arm_gic.c		\
-				drivers/arm/gic/gic_v2.c		\
-				drivers/arm/gic/gic_v3.c		\
-				drivers/arm/gpio/gpio.c			\
-				lib/cpus/aarch64/cortex_a53.S		\
-				plat/common/aarch64/platform_mp_stack.S	\
-				plat/hikey/bl31_plat_setup.c		\
-				plat/hikey/drivers/hisi_pwrc.c		\
-				plat/hikey/drivers/hisi_pwrc_sram.S	\
-				plat/hikey/drivers/hisi_ipc.c		\
-				plat/hikey/drivers/sp804_timer.c	\
-				plat/hikey/plat_pm.c			\
-				plat/hikey/plat_topology.c
-
-NEED_BL30		:=	yes
diff --git a/plat/hikey/pll.c b/plat/hikey/pll.c
deleted file mode 100644
index 0a5dd28..0000000
--- a/plat/hikey/pll.c
+++ /dev/null
@@ -1,1166 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch_helpers.h>
-#include <console.h>
-#include <debug.h>
-#include <errno.h>
-#include <hi6220.h>
-#include <hi6553.h>
-#include <mmio.h>
-#include <platform_def.h>
-#include <sp804_timer.h>
-
-static void init_pll(void)
-{
-	unsigned int data;
-
-	data = mmio_read_32((0xf7032000 + 0x000));
-	data |= 0x1;
-	mmio_write_32((0xf7032000 + 0x000), data);
-	dsb();
-	do {
-		data = mmio_read_32((0xf7032000 + 0x000));
-	} while (!(data & (1 << 28)));
-
-	data = mmio_read_32((0xf7800000 + 0x000));
-	data &= ~0x007;
-	data |= 0x004;
-	mmio_write_32((0xf7800000 + 0x000), data);
-	dsb();
-	do {
-		data = mmio_read_32((0xf7800000 + 0x014));
-		data &= 0x007;
-	} while (data != 0x004);
-
-	mmio_write_32(PERI_SC_PERIPH_CTRL14, 0x2101);
-	data = mmio_read_32(PERI_SC_PERIPH_STAT1);
-	mmio_write_32(0xf7032000 + 0x02c, 0x5110103e);
-	data = mmio_read_32(0xf7032000 + 0x050);
-	data |= 1 << 28;
-	mmio_write_32(0xf7032000 + 0x050, data);
-	mmio_write_32(PERI_SC_PERIPH_CTRL14, 0x2101);
-	mdelay(1);
-	data = mmio_read_32(PERI_SC_PERIPH_STAT1);
-	NOTICE("syspll frequency:%dHz\n", data);
-}
-
-static void init_freq(void)
-{
-	unsigned int data, tmp;
-	unsigned int cpuext_cfg, ddr_cfg;
-
-	mmio_write_32((0xf7032000 + 0x374), 0x4a);
-	mmio_write_32((0xf7032000 + 0x368), 0xda);
-	mmio_write_32((0xf7032000 + 0x36c), 0x01);
-	mmio_write_32((0xf7032000 + 0x370), 0x01);
-	mmio_write_32((0xf7032000 + 0x360), 0x60);
-	mmio_write_32((0xf7032000 + 0x364), 0x60);
-
-	mmio_write_32((0xf7032000 + 0x114), 0x1000);
-
-	data = mmio_read_32((0xf7032000 + 0x110));
-	data |= (3 << 12);
-	mmio_write_32((0xf7032000 + 0x110), data);
-
-	data = mmio_read_32((0xf7032000 + 0x110));
-	data |= (1 << 4);
-	mmio_write_32((0xf7032000 + 0x110), data);
-
-
-	data = mmio_read_32((0xf7032000 + 0x110));
-	data &= ~0x7;
-	data |= 0x5;
-	mmio_write_32((0xf7032000 + 0x110), data);
-	dsb();
-	mdelay(10);
-
-
-	do {
-		data = mmio_read_32((0xf6504000 + 0x008));
-		data &= (3 << 20);
-	} while (data != (3 << 20));
-	dsb();
-	mdelay(10);
-
-
-	data = mmio_read_32((0xf6504000 + 0x054));
-	data &= ~((1 << 0) | (1 << 11));
-	mmio_write_32((0xf6504000 + 0x054), data);
-	mdelay(10);
-
-	data = mmio_read_32((0xf7032000 + 0x104));
-	data &= ~(3 << 8);
-	data |= (1 << 8);
-	mmio_write_32((0xf7032000 + 0x104), data);
-
-	data = mmio_read_32((0xf7032000 + 0x100));
-	data |= (1 << 0);
-	mmio_write_32((0xf7032000 + 0x100), data);
-	dsb();
-
-	do {
-		data = mmio_read_32((0xf7032000 + 0x100));
-		data &= (1 << 2);
-	} while (data != (1 << 2));
-
-	data = mmio_read_32((0xf6504000 + 0x06c));
-	data &= ~0xffff;
-	data |= 0x56;
-	mmio_write_32((0xf6504000 + 0x06c), data);
-
-	data = mmio_read_32((0xf6504000 + 0x06c));
-	data &= ~(0xffffff << 8);
-	data |= 0xc7a << 8;
-	mmio_write_32((0xf6504000 + 0x06c), data);
-
-	data = mmio_read_32((0xf6504000 + 0x058));
-	data &= ((1 << 13) - 1);
-	data |= 0xccb;
-	mmio_write_32((0xf6504000 + 0x058), data);
-
-	mmio_write_32((0xf6504000 + 0x060), 0x1fff);
-	mmio_write_32((0xf6504000 + 0x064), 0x1ffffff);
-	mmio_write_32((0xf6504000 + 0x068), 0x7fffffff);
-	mmio_write_32((0xf6504000 + 0x05c), 0x1);
-
-	data = mmio_read_32((0xf6504000 + 0x054));
-	data &= ~(0xf << 12);
-	data |= 1 << 12;
-	mmio_write_32((0xf6504000 + 0x054), data);
-	dsb();
-
-
-	data = mmio_read_32((0xf7032000 + 0x000));
-	data &= ~(1 << 0);
-	mmio_write_32((0xf7032000 + 0x000), data);
-
-	mmio_write_32((0xf7032000 + 0x004), 0x5110207d);
-	mmio_write_32((0xf7032000 + 0x134), 0x10000005);
-	data = mmio_read_32((0xf7032000 + 0x134));
-
-
-	data = mmio_read_32((0xf7032000 + 0x000));
-	data |= (1 << 0);
-	mmio_write_32((0xf7032000 + 0x000), data);
-
-	mmio_write_32((0xf7032000 + 0x368), 0x100da);
-	data = mmio_read_32((0xf7032000 + 0x378));
-	data &= ~((1 << 7) - 1);
-	data |= 0x6b;
-	mmio_write_32((0xf7032000 + 0x378), data);
-	dsb();
-	do {
-		data = mmio_read_32((0xf7032000 + 0x378));
-		tmp = data & 0x7f;
-		data = (data & (0x7f << 8)) >> 8;
-		if (data != tmp)
-			continue;
-		data = mmio_read_32((0xf7032000 + 0x37c));
-	} while (!(data & 1));
-
-	data = mmio_read_32((0xf7032000 + 0x104));
-	data &= ~((3 << 0) |
-			(3 << 8));
-	cpuext_cfg = 1;
-	ddr_cfg = 1;
-	data |= cpuext_cfg | (ddr_cfg << 8);
-	mmio_write_32((0xf7032000 + 0x104), data);
-	dsb();
-
-	do {
-		data = mmio_read_32((0xf7032000 + 0x104));
-		tmp = (data & (3 << 16)) >> 16;
-		if (cpuext_cfg != tmp)
-			continue;
-		tmp = (data & (3 << 24)) >> 24;
-		if (ddr_cfg != tmp)
-			continue;
-		data = mmio_read_32((0xf7032000 + 0x000));
-		data &= 1 << 28;
-	} while (!data);
-
-	data = mmio_read_32((0xf7032000 + 0x100));
-	data &= ~(1 << 0);
-	mmio_write_32((0xf7032000 + 0x100), data);
-	dsb();
-	do {
-		data = mmio_read_32((0xf7032000 + 0x100));
-		data &= (1 << 1);
-	} while (data != (1 << 1));
-	mdelay(1000);
-
-	data = mmio_read_32((0xf6504000 + 0x054));
-	data &= ~(1 << 28);
-	mmio_write_32((0xf6504000 + 0x054), data);
-	dsb();
-
-	data = mmio_read_32((0xf7032000 + 0x110));
-	data &= ~((1 << 4) |
-			(3 << 12));
-	mmio_write_32((0xf7032000 + 0x110), data);
-}
-
-int cat_533mhz_800mhz(void)
-{
-	unsigned int data, i;
-	unsigned int bdl[5];
-
-
-	data = mmio_read_32((0xf712c000 + 0x1c8));
-	data &= 0xfffff0f0;
-	data |= 0x100f0f;
-	mmio_write_32((0xf712c000 + 0x1c8), data);
-
-	for (i = 0; i < 0x20; i++) {
-		mmio_write_32((0xf712c000 + 0x1d4), 0xc0000);
-		data = (i << 0x10) + i;
-		mmio_write_32((0xf712c000 + 0x140), data);
-		mmio_write_32((0xf712c000 + 0x144), data);
-		mmio_write_32((0xf712c000 + 0x148), data);
-		mmio_write_32((0xf712c000 + 0x14c), data);
-		mmio_write_32((0xf712c000 + 0x150), data);
-
-
-		data = mmio_read_32((0xf712c000 + 0x070));
-		data |= 0x80000;
-		mmio_write_32((0xf712c000 + 0x070), data);
-		data = mmio_read_32((0xf712c000 + 0x070));
-		data &= 0xfff7ffff;
-		mmio_write_32((0xf712c000 + 0x070), data);
-
-
-		mmio_write_32((0xf712c000 + 0x004), 0x8000);
-		mmio_write_32((0xf712c000 + 0x004), 0x0);
-		mmio_write_32((0xf712c000 + 0x004), 0x801);
-		do {
-			data = mmio_read_32((0xf712c000 + 0x004));
-		} while (data & 1);
-
-		data = mmio_read_32((0xf712c000 + 0x008));
-		if (!(data & 0x400)) {
-			mdelay(10);
-			return 0;
-		}
-		tf_printf("WARN:  " "lpddr3 cat fail\n");
-		data = mmio_read_32((0xf712c000 + 0x1d4));
-		if ((data & 0x1f00) && ((data & 0x1f) == 0)) {
-			bdl[0] = mmio_read_32((0xf712c000 + 0x140));
-			bdl[1] = mmio_read_32((0xf712c000 + 0x144));
-			bdl[2] = mmio_read_32((0xf712c000 + 0x148));
-			bdl[3] = mmio_read_32((0xf712c000 + 0x14c));
-			bdl[4] = mmio_read_32((0xf712c000 + 0x150));
-			if ((!(bdl[0] & 0x1f001f)) || (!(bdl[1] & 0x1f001f)) ||
-					(!(bdl[2] & 0x1f001f)) || (!(bdl[3] & 0x1f001f)) ||
-					(!(bdl[4] & 0x1f001f))) {
-				tf_printf("WARN:  " "lpddr3 cat deskew error\n");
-				if (i == 0x1f) {
-					tf_printf("WARN:  " "addrnbdl is max\n");
-					return -22;
-				}
-				mmio_write_32((0xf712c000 + 0x008), 0x400);
-			} else {
-				tf_printf("WARN:  " "lpddr3 cat other error1\n");
-				return -22;
-			}
-		} else {
-			tf_printf("WARN:  " "lpddr3 cat other error2\n");
-			return -22;
-		}
-	}
-	return -22;
-}
-
-static void ddrx_rdet(void)
-{
-	unsigned int data, rdet, bdl[4];
-
-	data = mmio_read_32((0xf712c000 + 0x0d0));
-	data &= 0xf800ffff;
-	data |= 0x8f0000;
-	mmio_write_32((0xf712c000 + 0x0d0), data);
-
-	data = mmio_read_32((0xf712c000 + 0x0dc));
-	data &= 0xfffffff0;
-	data |= 0xf;
-	mmio_write_32((0xf712c000 + 0x0dc), data);
-
-
-	data = mmio_read_32((0xf712c000 + 0x070));
-	data |= 0x80000;
-	mmio_write_32((0xf712c000 + 0x070), data);
-	data = mmio_read_32((0xf712c000 + 0x070));
-	data &= 0xfff7ffff;
-	mmio_write_32((0xf712c000 + 0x070), data);
-
-	mmio_write_32((0xf712c000 + 0x004), 0x8000);
-	mmio_write_32((0xf712c000 + 0x004), 0);
-
-	data = mmio_read_32((0xf712c000 + 0x0d0));
-	data &= ~0xf0000000;
-	data |= 0x80000000;
-	mmio_write_32((0xf712c000 + 0x0d0), data);
-
-	mmio_write_32((0xf712c000 + 0x004), 0x101);
-	do {
-		data = mmio_read_32((0xf712c000 + 0x004));
-	} while (!(data & 1));
-	data = mmio_read_32((0xf712c000 + 0x008));
-	if (data & 0x100)
-		tf_printf("WARN:    " "rdet lbs fail\n");
-
-	bdl[0] = mmio_read_32((0xf712c000 + 0x22c)) & 0x7f;
-	bdl[1] = mmio_read_32((0xf712c000 + 0x2ac)) & 0x7f;
-	bdl[2] = mmio_read_32((0xf712c000 + 0x32c)) & 0x7f;
-	bdl[3] = mmio_read_32((0xf712c000 + 0x3ac)) & 0x7f;
-	do {
-		data = mmio_read_32((0xf712c000 + 0x22c));
-		data &= ~0x7f;
-		data |= bdl[0];
-		mmio_write_32((0xf712c000 + 0x22c), data);
-		data = mmio_read_32((0xf712c000 + 0x2ac));
-		data &= ~0x7f;
-		data |= bdl[1];
-		mmio_write_32((0xf712c000 + 0x2ac), data);
-		data = mmio_read_32((0xf712c000 + 0x32c));
-		data &= ~0x7f;
-		data |= bdl[2];
-		mmio_write_32((0xf712c000 + 0x32c), data);
-		data = mmio_read_32((0xf712c000 + 0x3ac));
-		data &= ~0x7f;
-		data |= bdl[3];
-		mmio_write_32((0xf712c000 + 0x3ac), data);
-
-
-		data = mmio_read_32((0xf712c000 + 0x070));
-		data |= 0x80000;
-		mmio_write_32((0xf712c000 + 0x070), data);
-		data = mmio_read_32((0xf712c000 + 0x070));
-		data &= 0xfff7ffff;
-		mmio_write_32((0xf712c000 + 0x070), data);
-
-		mmio_write_32((0xf712c000 + 0x004), 0x8000);
-		mmio_write_32((0xf712c000 + 0x004), 0);
-
-		data = mmio_read_32((0xf712c000 + 0x0d0));
-		data &= ~0xf0000000;
-		data |= 0x40000000;
-		mmio_write_32((0xf712c000 + 0x0d0), data);
-		mmio_write_32((0xf712c000 + 0x004), 0x101);
-		do {
-			data = mmio_read_32((0xf712c000 + 0x004));
-		} while (data & 1);
-
-		data = mmio_read_32((0xf712c000 + 0x008));
-		rdet = data & 0x100;
-		if (rdet) {
-			tf_printf("INFO:    " "rdet ds fail\n");
-			mmio_write_32((0xf712c000 + 0x008), 0x100);
-		}
-		bdl[0]++;
-		bdl[1]++;
-		bdl[2]++;
-		bdl[3]++;
-	} while (rdet);
-
-	data = mmio_read_32((0xf712c000 + 0x0d0));
-	data &= ~0xf0000000;
-	data |= 0x30000000;
-	mmio_write_32((0xf712c000 + 0x0d0), data);
-
-	mmio_write_32((0xf712c000 + 0x004), 0x101);
-	do {
-		data = mmio_read_32((0xf712c000 + 0x004));
-	} while (data & 1);
-	data = mmio_read_32((0xf712c000 + 0x008));
-	if (data & 0x100)
-		tf_printf("INFO:    " "rdet rbs av fail\n");
-}
-
-static void ddrx_wdet(void)
-{
-	unsigned int data, wdet, zero_bdl, dq[4];
-	int i;
-
-	data = mmio_read_32((0xf712c000 + 0x0d0));
-	data &= ~0xf;
-	data |= 0xf;
-	mmio_write_32((0xf712c000 + 0x0d0), data);
-
-	data = mmio_read_32((0xf712c000 + 0x070));
-	data |= 0x80000;
-	mmio_write_32((0xf712c000 + 0x070), data);
-	data = mmio_read_32((0xf712c000 + 0x070));
-	data &= ~0x80000;
-	mmio_write_32((0xf712c000 + 0x070), data);
-
-	mmio_write_32((0xf712c000 + 0x004), 0x8000);
-	mmio_write_32((0xf712c000 + 0x004), 0);
-	data = mmio_read_32((0xf712c000 + 0x0d0));
-	data &= ~0xf000;
-	data |= 0x8000;
-	mmio_write_32((0xf712c000 + 0x0d0), data);
-	mmio_write_32((0xf712c000 + 0x004), 0x201);
-	do {
-		data = mmio_read_32((0xf712c000 + 0x004));
-	} while (data & 1);
-	data = mmio_read_32((0xf712c000 + 0x008));
-	if (data & 0x200)
-		tf_printf("INFO:    " "wdet lbs fail\n");
-
-	dq[0] = mmio_read_32((0xf712c000 + 0x234)) & 0x1f00;
-	dq[1] = mmio_read_32((0xf712c000 + 0x2b4)) & 0x1f00;
-	dq[2] = mmio_read_32((0xf712c000 + 0x334)) & 0x1f00;
-	dq[3] = mmio_read_32((0xf712c000 + 0x3b4)) & 0x1f00;
-
-	do {
-		mmio_write_32((0xf712c000 + 0x234), dq[0]);
-		mmio_write_32((0xf712c000 + 0x2b4), dq[1]);
-		mmio_write_32((0xf712c000 + 0x334), dq[2]);
-		mmio_write_32((0xf712c000 + 0x3b4), dq[3]);
-
-		data = mmio_read_32((0xf712c000 + 0x070));
-		data |= 0x80000;
-		mmio_write_32((0xf712c000 + 0x070), data);
-		data = mmio_read_32((0xf712c000 + 0x070));
-		data &= ~0x80000;
-		mmio_write_32((0xf712c000 + 0x070), data);
-		mmio_write_32((0xf712c000 + 0x004), 0x8000);
-		mmio_write_32((0xf712c000 + 0x004), 0);
-
-		data = mmio_read_32((0xf712c000 + 0x0d0));
-		data &= ~0xf000;
-		data |= 0x4000;
-		mmio_write_32((0xf712c000 + 0x0d0), data);
-		mmio_write_32((0xf712c000 + 0x004), 0x201);
-		do {
-			data = mmio_read_32((0xf712c000 + 0x004));
-		} while (data & 1);
-
-		data = mmio_read_32((0xf712c000 + 0x008));
-		wdet = data & 0x200;
-		if (wdet) {
-			tf_printf("INFO:    " "wdet ds fail\n");
-			mmio_write_32((0xf712c000 + 0x008), 0x200);
-		}
-		mdelay(10);
-
-		for (i = 0; i < 4; i++) {
-			data = mmio_read_32((0xf712c000 + 0x210 + i * 0x80));
-			if ((!(data & 0x1f)) || (!(data & 0x1f00)) ||
-					(!(data & 0x1f0000)) || (!(data & 0x1f000000)))
-				zero_bdl = 1;
-			data = mmio_read_32((0xf712c000 + 0x214 + i * 0x80));
-			if ((!(data & 0x1f)) || (!(data & 0x1f00)) ||
-					(!(data & 0x1f0000)) || (!(data & 0x1f000000)))
-				zero_bdl = 1;
-			data = mmio_read_32((0xf712c000 + 0x218 + i * 0x80));
-			if (!(data & 0x1f))
-				zero_bdl = 1;
-			if (zero_bdl) {
-				if (i == 0)
-					dq[0] = dq[0] - 0x100;
-				if (i == 1)
-					dq[1] = dq[1] - 0x100;
-				if (i == 2)
-					dq[2] = dq[2] - 0x100;
-				if (i == 3)
-					dq[3] = dq[3] - 0x100;
-			}
-		}
-	} while (wdet);
-
-	data = mmio_read_32((0xf712c000 + 0x0d0));
-	data &= ~0xf000;
-	data |= 0x3000;
-	mmio_write_32((0xf712c000 + 0x0d0), data);
-	mmio_write_32((0xf712c000 + 0x004), 0x201);
-	do {
-		data = mmio_read_32((0xf712c000 + 0x004));
-	} while (data & 1);
-	data = mmio_read_32((0xf712c000 + 0x008));
-	if (data & 0x200)
-		tf_printf("INFO:    " "wdet rbs av fail\n");
-}
-
-static void set_ddrc_533mhz(void)
-{
-	unsigned int data;
-
-	mmio_write_32((0xf7032000 + 0x580), 0x3);
-	mmio_write_32((0xf7032000 + 0x5a8), 0x11111);
-	data = mmio_read_32((0xf7032000 + 0x104));
-	data |= 0x100;
-	mmio_write_32((0xf7032000 + 0x104), data);
-
-	mmio_write_32((0xf7030000 + 0x050), 0x30);
-	mmio_write_32((0xf7030000 + 0x240), 0x5ffff);
-	mmio_write_32((0xf7030000 + 0x344), 0xf5ff);
-	mmio_write_32((0xf712c000 + 0x00c), 0x400);
-	mmio_write_32((0xf712c000 + 0x018), 0x7);
-	mmio_write_32((0xf712c000 + 0x090), 0x6400000);
-	mmio_write_32((0xf712c000 + 0x258), 0x640);
-	mmio_write_32((0xf712c000 + 0x2d8), 0x640);
-	mmio_write_32((0xf712c000 + 0x358), 0x640);
-	mmio_write_32((0xf712c000 + 0x3d8), 0x640);
-	mmio_write_32((0xf712c000 + 0x018), 0x0);
-	mmio_write_32((0xf712c000 + 0x0b0), 0xf00000f);
-	mmio_write_32((0xf712c000 + 0x0b4), 0xf);
-	mmio_write_32((0xf712c000 + 0x088), 0x3fff801);
-	mmio_write_32((0xf712c000 + 0x070), 0x8940000);
-
-	data = mmio_read_32((0xf712c000 + 0x078));
-	data |= 4;
-	mmio_write_32((0xf712c000 + 0x078), data);
-	mmio_write_32((0xf712c000 + 0x01c), 0x8000080);
-	data = mmio_read_32((0xf712c000 + 0x020));
-	data &= 0xfffffffe;
-	mmio_write_32((0xf712c000 + 0x020), data);
-	mmio_write_32((0xf712c000 + 0x1d4), 0xc0000);
-	mmio_write_32((0xf712c000 + 0x010), 0x500000f);
-	mmio_write_32((0xf712c000 + 0x014), 0x10);
-	data = mmio_read_32((0xf712c000 + 0x1e4));
-	data &= 0xffffff00;
-	mmio_write_32((0xf712c000 + 0x1e4), data);
-	mmio_write_32((0xf712c000 + 0x030), 0x9dd87855);
-	mmio_write_32((0xf712c000 + 0x034), 0xa7138bb);
-	mmio_write_32((0xf712c000 + 0x038), 0x20091477);
-	mmio_write_32((0xf712c000 + 0x03c), 0x84534e16);
-	mmio_write_32((0xf712c000 + 0x040), 0x3008817);
-	mmio_write_32((0xf712c000 + 0x064), 0x106c3);
-	mmio_write_32((0xf712c000 + 0x068), 0xff0a0000);
-	data = mmio_read_32((0xf712c000 + 0x070));
-	data &= 0xffff0000;
-	data |= 0x305;
-	mmio_write_32((0xf712c000 + 0x070), data);
-	data = mmio_read_32((0xf712c000 + 0x048));
-	data |= 0x40000000;
-	mmio_write_32((0xf712c000 + 0x048), data);
-	data = mmio_read_32((0xf712c000 + 0x020));
-	data &= ~0x10;
-	mmio_write_32((0xf712c000 + 0x020), data);
-	data = mmio_read_32((0xf712c000 + 0x080));
-	data &= ~0x2000;
-	mmio_write_32((0xf712c000 + 0x080), data);
-	mmio_write_32((0xf712c000 + 0x270), 0x3);
-	mmio_write_32((0xf712c000 + 0x2f0), 0x3);
-	mmio_write_32((0xf712c000 + 0x370), 0x3);
-	mmio_write_32((0xf712c000 + 0x3f0), 0x3);
-	mmio_write_32((0xf712c000 + 0x048), 0xd0420900);
-
-	mmio_write_32((0xf7128000 + 0x040), 0x0);
-	mmio_write_32((0xf712c000 + 0x004), 0x140f);
-	do {
-		data = mmio_read_32((0xf712c000 + 0x004));
-	} while (data & 1);
-	data = mmio_read_32((0xf712c000 + 0x008));
-	if (data & 0x7fe) {
-		tf_printf("NOTICE:  " "failed to init lpddr3 rank0 dram phy\n");
-		return;
-	}
-	tf_printf("NOTICE:  " "succeed to init lpddr3 rank0 dram phy\n");
-}
-
-static void set_ddrc_800mhz(void)
-{
-	unsigned int data;
-
-	mmio_write_32((0xf7032000 + 0x580), 0x2);
-	mmio_write_32((0xf7032000 + 0x5a8), 0x1003);
-	data = mmio_read_32((0xf7032000 + 0x104));
-	data &= 0xfffffcff;
-	mmio_write_32((0xf7032000 + 0x104), data);
-
-	mmio_write_32((0xf7030000 + 0x050), 0x30);
-	mmio_write_32((0xf7030000 + 0x240), 0x5ffff);
-	mmio_write_32((0xf7030000 + 0x344), 0xf5ff);
-	mmio_write_32((0xf712c000 + 0x00c), 0x400);
-	mmio_write_32((0xf712c000 + 0x018), 0x7);
-	mmio_write_32((0xf712c000 + 0x090), 0x5400000);
-	mmio_write_32((0xf712c000 + 0x258), 0x540);
-	mmio_write_32((0xf712c000 + 0x2d8), 0x540);
-	mmio_write_32((0xf712c000 + 0x358), 0x540);
-	mmio_write_32((0xf712c000 + 0x3d8), 0x540);
-	mmio_write_32((0xf712c000 + 0x018), 0x0);
-	mmio_write_32((0xf712c000 + 0x0b0), 0xf00000f);
-	mmio_write_32((0xf712c000 + 0x0b4), 0xf);
-	mmio_write_32((0xf712c000 + 0x088), 0x3fff801);
-	mmio_write_32((0xf712c000 + 0x070), 0x8940000);
-
-	data = mmio_read_32((0xf712c000 + 0x078));
-	data |= 4;
-	mmio_write_32((0xf712c000 + 0x078), data);
-	mmio_write_32((0xf712c000 + 0x01c), 0x8000080);
-	data = mmio_read_32((0xf712c000 + 0x020));
-	data &= 0xfffffffe;
-	mmio_write_32((0xf712c000 + 0x020), data);
-	mmio_write_32((0xf712c000 + 0x1d4), 0xc0000);
-	mmio_write_32((0xf712c000 + 0x010), 0x500000f);
-	mmio_write_32((0xf712c000 + 0x014), 0x10);
-	data = mmio_read_32((0xf712c000 + 0x1e4));
-	data &= 0xffffff00;
-	mmio_write_32((0xf712c000 + 0x1e4), data);
-	mmio_write_32((0xf712c000 + 0x030), 0xe663ab77);
-	mmio_write_32((0xf712c000 + 0x034), 0xea952db);
-	mmio_write_32((0xf712c000 + 0x038), 0x200d1cb1);
-	mmio_write_32((0xf712c000 + 0x03c), 0xc67d0721);
-	mmio_write_32((0xf712c000 + 0x040), 0x3008aa1);
-	mmio_write_32((0xf712c000 + 0x064), 0x11a43);
-	mmio_write_32((0xf712c000 + 0x068), 0xff0a0000);
-	data = mmio_read_32((0xf712c000 + 0x070));
-	data &= 0xffff0000;
-	data |= 0x507;
-	mmio_write_32((0xf712c000 + 0x070), data);
-	data = mmio_read_32((0xf712c000 + 0x048));
-	data |= 0x40000000;
-	mmio_write_32((0xf712c000 + 0x048), data);
-	data = mmio_read_32((0xf712c000 + 0x020));
-	data &= 0xffffffef;
-	mmio_write_32((0xf712c000 + 0x020), data);
-	data = mmio_read_32((0xf712c000 + 0x080));
-	data &= 0xffffdfff;
-	mmio_write_32((0xf712c000 + 0x080), data);
-	mmio_write_32((0xf712c000 + 0x270), 0x3);
-	mmio_write_32((0xf712c000 + 0x2f0), 0x3);
-	mmio_write_32((0xf712c000 + 0x370), 0x3);
-	mmio_write_32((0xf712c000 + 0x3f0), 0x3);
-	mmio_write_32((0xf712c000 + 0x048), 0xd0420900);
-
-	mmio_write_32((0xf7128000 + 0x040), 0x2001);
-	mmio_write_32((0xf712c000 + 0x004), 0x140f);
-	do {
-		data = mmio_read_32((0xf712c000 + 0x004));
-	} while (data & 1);
-	data = mmio_read_32((0xf712c000 + 0x008));
-	if (data & 0x7fe) {
-		WARN("failed to init lpddr3 rank0 dram phy\n");
-		return;
-	}
-}
-
-static void ddrc_common_init(int ddr800)
-{
-	unsigned int data;
-
-	mmio_write_32((0xf7120000 + 0x020), 0x1);
-	mmio_write_32((0xf7120000 + 0x100), 0x1700);
-	mmio_write_32((0xf7120000 + 0x104), 0x71040004);
-	mmio_write_32((0xf7121400 + 0x104), 0xf);
-	mmio_write_32((0xf7121800 + 0x104), 0xf);
-	mmio_write_32((0xf7121800 + 0x104), 0xf);
-	mmio_write_32((0xf7121c00 + 0x104), 0xf);
-	mmio_write_32((0xf7122000 + 0x104), 0xf);
-	mmio_write_32((0xf7128000 + 0x02c), 0x6);
-	mmio_write_32((0xf7128000 + 0x020), 0x1);
-	mmio_write_32((0xf7128000 + 0x028), 0x310201);
-	mmio_write_32((0xf712c000 + 0x1e4), 0xfe007600);
-	mmio_write_32((0xf7128000 + 0x01c), 0xaf001);
-
-
-	data = mmio_read_32((0xf7128000 + 0x280));
-	data |= 1 << 7;
-	mmio_write_32((0xf7128000 + 0x280), data);
-	mmio_write_32((0xf7128000 + 0x244), 0x3);
-
-	if (ddr800)
-		mmio_write_32((0xf7128000 + 0x240), 167 * 400000 / 1024);
-	else
-		mmio_write_32((0xf7128000 + 0x240), 167 * 533000 / 1024);
-
-	data = mmio_read_32((0xf712c000 + 0x080));
-	data &= 0xffff;
-	data |= 0x4002000;
-	mmio_write_32((0xf712c000 + 0x080), data);
-	mmio_write_32((0xf7128000 + 0x000), 0x0);
-	do {
-		data = mmio_read_32((0xf7128000 + 0x294));
-	} while (data & 1);
-	mmio_write_32((0xf7128000 + 0x000), 0x2);
-}
-
-
-static int dienum_det_and_rowcol_cfg(void)
-{
-	unsigned int data;
-
-	mmio_write_32((0xf7128000 + 0x210), 0x87);
-	mmio_write_32((0xf7128000 + 0x218), 0x10000);
-	mmio_write_32((0xf7128000 + 0x00c), 0x1);
-	do {
-		data = mmio_read_32((0xf7128000 + 0x00c));
-	} while (data & 1);
-	data = mmio_read_32((0xf7128000 + 0x4a8)) & 0xfc;
-	switch (data) {
-		case 0x18:
-			mmio_write_32((0xf7128000 + 0x060), 0x132);
-			mmio_write_32((0xf7128000 + 0x064), 0x132);
-			mmio_write_32((0xf7120000 + 0x100), 0x1600);
-			mmio_write_32((0xf7120000 + 0x104), 0x71040004);
-			break;
-		case 0x1c:
-			mmio_write_32((0xf7128000 + 0x060), 0x142);
-			mmio_write_32((0xf7128000 + 0x064), 0x142);
-			mmio_write_32((0xf7120000 + 0x100), 0x1700);
-			mmio_write_32((0xf7120000 + 0x104), 0x71040004);
-			break;
-		case 0x58:
-			mmio_write_32((0xf7128000 + 0x060), 0x133);
-			mmio_write_32((0xf7128000 + 0x064), 0x133);
-			mmio_write_32((0xf7120000 + 0x100), 0x1700);
-			mmio_write_32((0xf7120000 + 0x104), 0x71040004);
-			break;
-		default:
-			break;
-	}
-	if (!data)
-		return -22;
-	return 0;
-}
-
-static int detect_ddr_chip_info(void)
-{
-	unsigned int data, mr5, mr6, mr7;
-
-	mmio_write_32((0xf7128000 + 0x210), 0x57);
-	mmio_write_32((0xf7128000 + 0x218), 0x10000);
-	mmio_write_32((0xf7128000 + 0x00c), 0x1);
-
-	do {
-		data = mmio_read_32((0xf7128000 + 0x00c));
-	} while (data & 1);
-
-	data = mmio_read_32((0xf7128000 + 0x4a8));
-	mr5 = data & 0xff;
-	switch (mr5) {
-		case 1:
-			tf_printf("INFO:    " "Samsung DDR\n");
-			break;
-		case 6:
-			tf_printf("INFO:    " "Hynix DDR\n");
-			break;
-		case 3:
-			tf_printf("INFO:    " "Elpida DDR\n");
-			break;
-		default:
-			tf_printf("INFO:    " "DDR from other vendors\n");
-			break;
-	}
-
-	mmio_write_32((0xf7128000 + 0x210), 0x67);
-	mmio_write_32((0xf7128000 + 0x218), 0x10000);
-	mmio_write_32((0xf7128000 + 0x00c), 0x1);
-	do {
-		data = mmio_read_32((0xf7128000 + 0x00c));
-	} while (data & 1);
-	data = mmio_read_32((0xf7128000 + 0x4a8));
-	mr6 = data & 0xff;
-	mmio_write_32((0xf7128000 + 0x210), 0x77);
-	mmio_write_32((0xf7128000 + 0x218), 0x10000);
-	mmio_write_32((0xf7128000 + 0x00c), 0x1);
-	do {
-		data = mmio_read_32((0xf7128000 + 0x00c));
-	} while (data & 1);
-	data = mmio_read_32((0xf7128000 + 0x4a8));
-	mr7 = data & 0xff;
-	data = mr5 + (mr6 << 8) + (mr7 << 16);
-	return data;
-}
-
-int lpddr3_freq_init(int ddr800)
-{
-	unsigned int data;
-
-	if (ddr800) {
-		set_ddrc_800mhz();
-		tf_printf("INFO:    " "%s, set ddrc 800mhz\n", __func__);
-	} else {
-		set_ddrc_533mhz();
-		tf_printf("INFO:    " "%s, set ddrc 533mhz\n", __func__);
-	}
-
-	data = cat_533mhz_800mhz();
-	if (data)
-		tf_printf("NOTICE:  " "fail to set eye diagram\n");
-
-	mmio_write_32((0xf712c000 + 0x004), 0xf1);
-	if (ddr800)
-		mmio_write_32((0xf7128000 + 0x050), 0x100023);
-	else
-		mmio_write_32((0xf7128000 + 0x050), 0x100123);
-	mmio_write_32((0xf7128000 + 0x060), 0x133);
-	mmio_write_32((0xf7128000 + 0x064), 0x133);
-	mmio_write_32((0xf7128000 + 0x200), 0xa1000);
-	if (ddr800) {
-		mmio_write_32((0xf7128000 + 0x100), 0x755a9d12);
-		mmio_write_32((0xf7128000 + 0x104), 0x1753b055);
-		mmio_write_32((0xf7128000 + 0x108), 0x7401505f);
-		mmio_write_32((0xf7128000 + 0x10c), 0x578ca244);
-		mmio_write_32((0xf7128000 + 0x110), 0x10700000);
-		mmio_write_32((0xf7128000 + 0x114), 0x13141306);
-	} else {
-		mmio_write_32((0xf7128000 + 0x100), 0xb77b6718);
-		mmio_write_32((0xf7128000 + 0x104), 0x1e82a071);
-		mmio_write_32((0xf7128000 + 0x108), 0x9501c07e);
-		mmio_write_32((0xf7128000 + 0x10c), 0xaf50c255);
-		mmio_write_32((0xf7128000 + 0x110), 0x10b00000);
-		mmio_write_32((0xf7128000 + 0x114), 0x13181908);
-	}
-	mmio_write_32((0xf7128000 + 0x118), 0x44);
-	do {
-		data = mmio_read_32((0xf712c000 + 0x004));
-	} while (data & 1);
-
-	data = mmio_read_32((0xf712c000 + 0x008));
-	if (data & 0x7fe) {
-		tf_printf("NOTICE:  " "fail to init ddr3 rank0\n");
-		return -14;
-	}
-	tf_printf("INFO:    " "init ddr3 rank0\n");
-	ddrx_rdet();
-	ddrx_wdet();
-
-	data = mmio_read_32((0xf712c000 + 0x048));
-	data |= 1;
-	mmio_write_32((0xf712c000 + 0x048), data);
-	mmio_write_32((0xf712c000 + 0x004), 0x21);
-	do {
-		data = mmio_read_32((0xf712c000 + 0x004));
-	} while (data & 1);
-
-	data = mmio_read_32((0xf712c000 + 0x008));
-	if (data & 0x7fe)
-		tf_printf("NOTICE:  " "ddr3 rank1 init failure\n");
-	else
-		tf_printf("INFO:    " "ddr3 rank1 init pass\n");
-
-	data = mmio_read_32((0xf712c000 + 0x048));
-	data &= ~0xf;
-	mmio_write_32((0xf712c000 + 0x048), data);
-	return 0;
-}
-
-static void init_ddr(int ddr800)
-{
-	unsigned int data;
-	int ret;
-
-
-	data = mmio_read_32((0xf7032000 + 0x030));
-	data |= 1;
-	mmio_write_32((0xf7032000 + 0x030), data);
-	data = mmio_read_32((0xf7032000 + 0x010));
-	data |= 1;
-	mmio_write_32((0xf7032000 + 0x010), data);
-	
-	udelay(100);
-	do {
-		data = mmio_read_32((0xf7032000 + 0x030));
-		data &= 3 << 28;
-	} while (data != (3 << 28));
-	do {
-		data = mmio_read_32((0xf7032000 + 0x010));
-		data &= 3 << 28;
-	} while (data != (3 << 28));
-
-	ret = lpddr3_freq_init(ddr800);
-	if (ret)
-		return;
-}
-
-static void init_ddrc_qos(void)
-{
-	unsigned int port, data;
-
-	mmio_write_32((0xf7124000 + 0x088), 1);
-
-
-	port = 0;
-	mmio_write_32((0xf7120000 + 0x200 + port * 0x10), 0x1210);
-	mmio_write_32((0xf7120000 + 0x204 + port * 0x10), 0x11111111);
-	mmio_write_32((0xf7120000 + 0x208 + port * 0x10), 0x11111111);
-	mmio_write_32((0xf7120000 + 0x400 + 0 * 0x10), 0x001d0007);
-
-
-	for (port = 3; port <= 4; port++) {
-		mmio_write_32((0xf7120000 + 0x200 + port * 0x10), 0x1210);
-		mmio_write_32((0xf7120000 + 0x204 + port * 0x10), 0x77777777);
-		mmio_write_32((0xf7120000 + 0x208 + port * 0x10), 0x77777777);
-	}
-
-
-	port = 1;
-	mmio_write_32((0xf7120000 + 0x200 + port * 0x10), 0x30000);
-	mmio_write_32((0xf7120000 + 0x204 + port * 0x10), 0x1234567);
-	mmio_write_32((0xf7120000 + 0x208 + port * 0x10), 0x1234567);
-
-
-	mmio_write_32((0xf7124000 + 0x1f0), 0);
-	mmio_write_32((0xf7124000 + 0x0bc), 0x3020100);
-	mmio_write_32((0xf7124000 + 0x0d0), 0x3020100);
-	mmio_write_32((0xf7124000 + 0x1f4), 0x01000100);
-	mmio_write_32((0xf7124000 + 0x08c + 0 * 4), 0xd0670402);
-	mmio_write_32((0xf7124000 + 0x068 + 0 * 4), 0x31);
-	mmio_write_32((0xf7124000 + 0x000), 0x7);
-
-	data = mmio_read_32((0xf7124000 + 0x09c));
-	data &= ~0xff0000;
-	data |= 0x400000;
-	mmio_write_32((0xf7124000 + 0x09c), data);
-	data = mmio_read_32((0xf7124000 + 0x0ac));
-	data &= ~0xff0000;
-	data |= 0x400000;
-	mmio_write_32((0xf7124000 + 0x0ac), data);
-	port = 2;
-	mmio_write_32((0xf7120000 + 0x200 + port * 0x10), 0x30000);
-	mmio_write_32((0xf7120000 + 0x204 + port * 0x10), 0x1234567);
-	mmio_write_32((0xf7120000 + 0x208 + port * 0x10), 0x1234567);
-
-
-	mmio_write_32((0xf7124000 + 0x09c), 0xff7fff);
-	mmio_write_32((0xf7124000 + 0x0a0), 0xff);
-	mmio_write_32((0xf7124000 + 0x0ac), 0xff7fff);
-	mmio_write_32((0xf7124000 + 0x0b0), 0xff);
-	mmio_write_32((0xf7124000 + 0x0bc), 0x3020100);
-	mmio_write_32((0xf7124000 + 0x0d0), 0x3020100);
-}
-
-static void init_mmc0_pll(void)
-{
-	unsigned int data;
-
-	data = hi6553_read_8(0x084);
-	data |= 0x7;
-	hi6553_write_8(0x084, data);
-
-	/* select SYSPLL as the source of MMC0 */
-	/* select SYSPLL as the source of MUX1 (SC_CLK_SEL0) */
-	mmio_write_32(PERI_SC_CLK_SEL0, 1 << 5 | 1 << 21);
-	do {
-		data = mmio_read_32(PERI_SC_CLK_SEL0);
-	} while (!(data & (1 << 5)));
-	/* select MUX1 as the source of MUX2 (SC_CLK_SEL0) */
-	mmio_write_32(PERI_SC_CLK_SEL0, 1 << 29);
-	do {
-		data = mmio_read_32(PERI_SC_CLK_SEL0);
-	} while (data & (1 << 13));
-
-	mmio_write_32(PERI_SC_PERIPH_CLKEN0, (1 << 0));
-	do {
-		data = mmio_read_32(PERI_SC_PERIPH_CLKSTAT0);
-	} while (!(data & (1 << 0)));
-
-	data = mmio_read_32(PERI_SC_PERIPH_CLKEN12);
-	data |= 1 << 1;
-	mmio_write_32(PERI_SC_PERIPH_CLKEN12, data);
-
-	do {
-		mmio_write_32(PERI_SC_CLKCFG8BIT1, (1 << 7) | 0xb);
-		data = mmio_read_32(PERI_SC_CLKCFG8BIT1);
-	} while ((data & 0xb) != 0xb);
-}
-
-static void reset_mmc0_clk(void)
-{
-	unsigned int data;
-
-	/* disable mmc0 bus clock */
-	mmio_write_32(PERI_SC_PERIPH_CLKDIS0, PERI_CLK0_MMC0);
-	do {
-		data = mmio_read_32(PERI_SC_PERIPH_CLKSTAT0);
-	} while (data & PERI_CLK0_MMC0);
-	/* enable mmc0 bus clock */
-	mmio_write_32(PERI_SC_PERIPH_CLKEN0, PERI_CLK0_MMC0);
-	do {
-		data = mmio_read_32(PERI_SC_PERIPH_CLKSTAT0);
-	} while (!(data & PERI_CLK0_MMC0));
-	/* reset mmc0 clock domain */
-	mmio_write_32(PERI_SC_PERIPH_RSTEN0, PERI_RST0_MMC0);
-
-	/* bypass mmc0 clock phase */
-	data = mmio_read_32(PERI_SC_PERIPH_CTRL2);
-	data |= 3;
-	mmio_write_32(PERI_SC_PERIPH_CTRL2, data);
-
-	/* disable low power */
-	data = mmio_read_32(PERI_SC_PERIPH_CTRL13);
-	data |= 1 << 3;
-	mmio_write_32(PERI_SC_PERIPH_CTRL13, data);
-	do {
-		data = mmio_read_32(PERI_SC_PERIPH_RSTSTAT0);
-	} while (!(data & PERI_RST0_MMC0));
-
-	/* unreset mmc0 clock domain */
-	mmio_write_32(PERI_SC_PERIPH_RSTDIS0, PERI_RST0_MMC0);
-	do {
-		data = mmio_read_32(PERI_SC_PERIPH_RSTSTAT0);
-	} while (data & PERI_RST0_MMC0);
-}
-
-static void init_media_clk(void)
-{
-	unsigned int data, value;
-
-	data = mmio_read_32(PMCTRL_MEDPLLCTRL);
-	data |= 1;
-	mmio_write_32(PMCTRL_MEDPLLCTRL, data);
-
-	for (;;) {
-		data = mmio_read_32(PMCTRL_MEDPLLCTRL);
-		value = 1 << 28;
-		if ((data & value) == value)
-			break;
-	}
-
-	data = mmio_read_32(PERI_SC_PERIPH_CLKEN12);
-	data = 1 << 10;
-	mmio_write_32(PERI_SC_PERIPH_CLKEN12, data);
-}
-
-static void init_mmc1_pll(void)
-{
-	uint32_t data;
-
-	/* select SYSPLL as the source of MMC1 */
-	/* select SYSPLL as the source of MUX1 (SC_CLK_SEL0) */
-	mmio_write_32(PERI_SC_CLK_SEL0, 1 << 11 | 1 << 27);
-	do {
-		data = mmio_read_32(PERI_SC_CLK_SEL0);
-	} while (!(data & (1 << 11)));
-	/* select MUX1 as the source of MUX2 (SC_CLK_SEL0) */
-	mmio_write_32(PERI_SC_CLK_SEL0, 1 << 30);
-	do {
-		data = mmio_read_32(PERI_SC_CLK_SEL0);
-	} while (data & (1 << 14));
-
-	mmio_write_32(PERI_SC_PERIPH_CLKEN0, (1 << 1));
-	do {
-		data = mmio_read_32(PERI_SC_PERIPH_CLKSTAT0);
-	} while (!(data & (1 << 1)));
-
-	data = mmio_read_32(PERI_SC_PERIPH_CLKEN12);
-	data |= 1 << 2;
-	mmio_write_32(PERI_SC_PERIPH_CLKEN12, data);
-
-	do {
-		/* 1.2GHz / 50 = 24MHz */
-		mmio_write_32(PERI_SC_CLKCFG8BIT2, 0x31 | (1 << 7));
-		data = mmio_read_32(PERI_SC_CLKCFG8BIT2);
-	} while ((data & 0x31) != 0x31);
-}
-
-static void reset_mmc1_clk(void)
-{
-	unsigned int data;
-
-	/* disable mmc1 bus clock */
-	mmio_write_32(PERI_SC_PERIPH_CLKDIS0, PERI_CLK0_MMC1);
-	do {
-		data = mmio_read_32(PERI_SC_PERIPH_CLKSTAT0);
-	} while (data & PERI_CLK0_MMC1);
-	/* enable mmc1 bus clock */
-	mmio_write_32(PERI_SC_PERIPH_CLKEN0, PERI_CLK0_MMC1);
-	do {
-		data = mmio_read_32(PERI_SC_PERIPH_CLKSTAT0);
-	} while (!(data & PERI_CLK0_MMC1));
-	/* reset mmc1 clock domain */
-	mmio_write_32(PERI_SC_PERIPH_RSTEN0, PERI_RST0_MMC1);
-
-	/* bypass mmc1 clock phase */
-	data = mmio_read_32(PERI_SC_PERIPH_CTRL2);
-	data |= 3 << 2;
-	mmio_write_32(PERI_SC_PERIPH_CTRL2, data);
-
-	/* disable low power */
-	data = mmio_read_32(PERI_SC_PERIPH_CTRL13);
-	data |= 1 << 4;
-	mmio_write_32(PERI_SC_PERIPH_CTRL13, data);
-	do {
-		data = mmio_read_32(PERI_SC_PERIPH_RSTSTAT0);
-	} while (!(data & PERI_RST0_MMC1));
-
-	/* unreset mmc0 clock domain */
-	mmio_write_32(PERI_SC_PERIPH_RSTDIS0, PERI_RST0_MMC1);
-	do {
-		data = mmio_read_32(PERI_SC_PERIPH_RSTSTAT0);
-	} while (data & PERI_RST0_MMC1);
-}
-
-static void ddr_phy_reset(void)
-{
-	mmio_write_32(0xf7030340, 0xa000);
-	mmio_write_32(0xf7030344, 0xa000);
-}
-
-void hi6220_pll_init(void)
-{
-	uint32_t data;
-
-	init_pll();
-	init_freq();
-
-	/*
-	 * Init DDR with 533MHz. Otherwise, DDR initialization
-	 * may fail on 800MHz on some boards.
-	 */
-	ddr_phy_reset();
-	init_ddr(0);
-	/* Init DDR with 800MHz. */
-	ddr_phy_reset();
-	init_ddr(1);
-
-
-	ddrc_common_init(1);
-	dienum_det_and_rowcol_cfg();
-	detect_ddr_chip_info();
-
-	data = mmio_read_32(0xf7032000 + 0x010);
-	data &= ~0x1;
-	mmio_write_32(0xf7032000 + 0x010, data);
-	data = mmio_read_32(0xf7032000 + 0x010);
-
-	/*
-	 * Test memory access. Do not use address 0x0 because the compiler
-	 * may assume it is not a valid address and generate incorrect code
-	 * (GCC 4.9.1 without -fno-delete-null-pointer-checks for instance).
-	 */
-	mmio_write_32(0x4, 0xa5a55a5a);
-	INFO("ddr test value:0x%x\n", mmio_read_32(0x4));
-	init_ddrc_qos();
-
-	init_mmc0_pll();
-	reset_mmc0_clk();
-	init_media_clk();
-
-	dsb();
-
-	init_mmc1_pll();
-	reset_mmc1_clk();
-}
diff --git a/plat/hikey/usb.c b/plat/hikey/usb.c
deleted file mode 100644
index 57adfbc..0000000
--- a/plat/hikey/usb.c
+++ /dev/null
@@ -1,1515 +0,0 @@
-/*
- * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
- * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <assert.h>
-#include <ctype.h>
-#include <debug.h>
-#include <gpio.h>
-#include <hi6220.h>
-#include <mmio.h>
-#include <partitions.h>
-#include <platform_def.h>
-#include <sp804_timer.h>
-#include <string.h>
-#include <usb.h>
-#include "hikey_private.h"
-#include <bl_common.h>
-
-#define NUM_ENDPOINTS			16
-
-#define USB_BLOCK_HIGH_SPEED_SIZE	512
-
-#define VERSION_BOOTLOADER	"0.4"
-
-struct ep_type {
-	unsigned char		active;
-	unsigned char		busy;
-	unsigned char		done;
-	unsigned int		rc;
-	unsigned int		size;
-};
-
-struct usb_endpoint {
-	struct usb_endpoint	*next;
-	unsigned int		maxpkt;
-	struct usb_request	*req;
-	unsigned char		num;
-	unsigned char		in;
-};
-
-struct usb_config_bundle {
-	struct usb_config_descriptor config;
-	struct usb_interface_descriptor interface;
-	struct usb_endpoint_descriptor ep1;
-	struct usb_endpoint_descriptor ep2;
-} __attribute__ ((packed));
-
-static setup_packet ctrl_req[NUM_ENDPOINTS]
-__attribute__ ((section("tzfw_coherent_mem")));
-static unsigned char ctrl_resp[2]
-__attribute__ ((section("tzfw_coherent_mem")));
-
-static struct ep_type endpoints[NUM_ENDPOINTS]
-__attribute__ ((section("tzfw_coherent_mem")));
-
-dwc_otg_dev_dma_desc_t dma_desc
-__attribute__ ((section("tzfw_coherent_mem")));
-dwc_otg_dev_dma_desc_t dma_desc_ep0
-__attribute__ ((section("tzfw_coherent_mem")));
-dwc_otg_dev_dma_desc_t dma_desc_in
-__attribute__ ((section("tzfw_coherent_mem")));
-dwc_otg_dev_dma_desc_t dma_desc_addr
-__attribute__ ((section("tzfw_coherent_mem")));
-
-static struct usb_config_bundle config_bundle
-__attribute__ ((section("tzfw_coherent_mem")));
-static struct usb_device_descriptor device_descriptor
-__attribute__ ((section("tzfw_coherent_mem")));
-
-static struct usb_request rx_req
-__attribute__ ((section("tzfw_coherent_mem")));
-static struct usb_request tx_req
-__attribute__ ((section("tzfw_coherent_mem")));
-
-static struct usb_string_descriptor serial_string
-__attribute__ ((section("tzfw_coherent_mem")));
-
-static const struct usb_string_descriptor string_devicename = {
-	24,
-	USB_DT_STRING,
-	{'A', 'n', 'd', 'r', 'o', 'i', 'd', ' ', '2', '.', '0'}
-};
-
-static const struct usb_string_descriptor serial_string_descriptor = {
-	34,
-	USB_DT_STRING,
-	{'0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F'}
-};
-
-static const struct usb_string_descriptor lang_descriptor = {
-	4,
-	USB_DT_STRING,
-	{0x0409}	/* en-US */
-};
-
-static void usb_rx_cmd_complete(unsigned actual, int stat);
-static void usb_rx_data_complete(unsigned actual, int status);
-
-static unsigned int rx_desc_bytes = 0;
-static unsigned long rx_addr;
-static unsigned long rx_length;
-static unsigned int last_one = 0;
-static char *cmdbuf;
-static struct usb_endpoint ep1in, ep1out;
-static int g_usb_enum_flag = 0;
-
-int usb_need_reset = 0;
-
-static int usb_drv_port_speed(void)
-{
-	/* 2'b00 High speed (PHY clock is at 30MHz or 60MHz) */
-	return (mmio_read_32(DSTS) & 2) == 0 ? 1 : 0;
-}
-
-static void reset_endpoints(void)
-{
-	int i;
-	unsigned int data;
-
-	INFO("enter reset_endpoints.\n");
-	for (i = 0; i < NUM_ENDPOINTS; i++) {
-		endpoints[i].active = 0;
-		endpoints[i].busy = 0;
-		endpoints[i].rc = -1;
-		endpoints[i].done = 1;
-	}
-
-	/* EP0 IN ACTIVE NEXT=1 */
-	mmio_write_32(DIEPCTL0, 0x8800);
-
-	/* EP0 OUT ACTIVE */
-	mmio_write_32(DOEPCTL0, 0x8000);
-
-	/* Clear any pending OTG Interrupts */
-	mmio_write_32(GOTGINT, ~0);
-
-	/* Clear any pending interrupts */
-	mmio_write_32(GINTSTS, ~0);
-	mmio_write_32(DIEPINT0, ~0);
-	mmio_write_32(DOEPINT0, ~0);
-	mmio_write_32(DIEPINT1, ~0);
-	mmio_write_32(DOEPINT1, ~0);
-
-	/* IN EP interrupt mask */
-	mmio_write_32(DIEPMSK, 0x0D);
-	/* OUT EP interrupt mask */
-	mmio_write_32(DOEPMSK, 0x0D);
-	/* Enable interrupts on Ep0 */
-	mmio_write_32(DAINTMSK, 0x00010001);
-
-	/* EP0 OUT Transfer Size:64 Bytes, 1 Packet, 3 Setup Packet, Read to receive setup packet*/
-	data = DOEPTSIZ0_SUPCNT(3) | DOEPTSIZ0_PKTCNT |
-		(64 << DOEPTSIZ0_XFERSIZE_SHIFT);
-	mmio_write_32(DOEPTSIZ0, data);
-	//notes that:the compulsive conversion is expectable.
-	dma_desc_ep0.status.b.bs = 0x3;
-	dma_desc_ep0.status.b.mtrf = 0;
-	dma_desc_ep0.status.b.sr = 0;
-	dma_desc_ep0.status.b.l = 1;
-	dma_desc_ep0.status.b.ioc = 1;
-	dma_desc_ep0.status.b.sp = 0;
-	dma_desc_ep0.status.b.bytes = 64;
-	dma_desc_ep0.buf = (unsigned long)&ctrl_req;
-	dma_desc_ep0.status.b.sts = 0;
-	dma_desc_ep0.status.b.bs = 0x0;
-	mmio_write_32(DOEPDMA0, ((unsigned long)&(dma_desc_ep0)));
-	VERBOSE("%s, &ctrl_req:%llx:%x, &dms_desc_ep0:%llx:%x\n",
-		__func__, (unsigned long)&ctrl_req, (unsigned long)&ctrl_req,
-		(unsigned long)&dma_desc_ep0, (unsigned long)&dma_desc_ep0);
-	/* EP0 OUT ENABLE CLEARNAK */
-	data = mmio_read_32(DOEPCTL0);
-	mmio_write_32(DOEPCTL0, (data | 0x84000000));
-
-	VERBOSE("exit reset_endpoints. \n");
-}
-
-static int usb_drv_request_endpoint(int type, int dir)
-{
-	int ep = 1;    /*FIXME*/
-	unsigned int newbits, data;
-
-	newbits = (type << 18) | 0x10000000;
-
-	/*
-	 * (type << 18):Endpoint Type (EPType)
-	 * 0x10000000:Endpoint Enable (EPEna)
-	 * 0x000C000:Endpoint Type (EPType);Hardcoded to 00 for control.
-	 * (ep<<22):TxFIFO Number (TxFNum)
-	 * 0x20000:NAK Status (NAKSts);The core is transmitting NAK handshakes on this endpoint.
-	 */
-	if (dir) {  // IN: to host
-		data = mmio_read_32(DIEPCTL(ep));
-		data &= ~0x000c0000;
-		data |= newbits | (ep << 22) | 0x20000;
-		mmio_write_32(DIEPCTL(ep), data);
-	} else {    // OUT: to device
-		data = mmio_read_32(DOEPCTL(ep));
-		data &= ~0x000c0000;
-		data |= newbits;
-		mmio_write_32(DOEPCTL(ep), data);
-	}
-	endpoints[ep].active = 1;	// true
-
-    return ep | dir;
-}
-
-void usb_drv_release_endpoint(int ep)
-{
-	ep = ep % NUM_ENDPOINTS;
-	if (ep < 1 || ep > NUM_ENDPOINTS)
-		return;
-
-	endpoints[ep].active = 0;
-}
-
-void usb_config(void)
-{
-	unsigned int data;
-
-	INFO("enter usb_config\n");
-
-	mmio_write_32(GDFIFOCFG, DATA_FIFO_CONFIG);
-	mmio_write_32(GRXFSIZ, RX_SIZE);
-	mmio_write_32(GNPTXFSIZ, ENDPOINT_TX_SIZE);
-
-	mmio_write_32(DIEPTXF1, DATA_IN_ENDPOINT_TX_FIFO1);
-	mmio_write_32(DIEPTXF2, DATA_IN_ENDPOINT_TX_FIFO2);
-	mmio_write_32(DIEPTXF3, DATA_IN_ENDPOINT_TX_FIFO3);
-	mmio_write_32(DIEPTXF4, DATA_IN_ENDPOINT_TX_FIFO4);
-	mmio_write_32(DIEPTXF5, DATA_IN_ENDPOINT_TX_FIFO5);
-	mmio_write_32(DIEPTXF6, DATA_IN_ENDPOINT_TX_FIFO6);
-	mmio_write_32(DIEPTXF7, DATA_IN_ENDPOINT_TX_FIFO7);
-	mmio_write_32(DIEPTXF8, DATA_IN_ENDPOINT_TX_FIFO8);
-	mmio_write_32(DIEPTXF9, DATA_IN_ENDPOINT_TX_FIFO9);
-	mmio_write_32(DIEPTXF10, DATA_IN_ENDPOINT_TX_FIFO10);
-	mmio_write_32(DIEPTXF11, DATA_IN_ENDPOINT_TX_FIFO11);
-	mmio_write_32(DIEPTXF12, DATA_IN_ENDPOINT_TX_FIFO12);
-	mmio_write_32(DIEPTXF13, DATA_IN_ENDPOINT_TX_FIFO13);
-	mmio_write_32(DIEPTXF14, DATA_IN_ENDPOINT_TX_FIFO14);
-	mmio_write_32(DIEPTXF15, DATA_IN_ENDPOINT_TX_FIFO15);
-
-	/*Init global csr register.*/
-
-	/*
-	 * set Periodic TxFIFO Empty Level,
-	 * Non-Periodic TxFIFO Empty Level,
-	 * Enable DMA, Unmask Global Intr
-	 */
-	INFO("USB: DMA mode.\n");
-	mmio_write_32(GAHBCFG, GAHBCFG_CTRL_MASK);
-
-	/*select 8bit UTMI+, ULPI Inerface*/
-	INFO("USB ULPI PHY\n");
-	mmio_write_32(GUSBCFG, 0x2400);
-
-	/* Detect usb work mode,host or device? */
-	do {
-		data = mmio_read_32(GINTSTS);
-	} while (data & GINTSTS_CURMODE_HOST);
-	VERBOSE("Enter device mode\n");
-	udelay(3);
-
-	/*Init global and device mode csr register.*/
-	/*set Non-Zero-Length status out handshake */
-	data = (0x20 << DCFG_EPMISCNT_SHIFT) | DCFG_NZ_STS_OUT_HSHK;
-	mmio_write_32(DCFG, data);
-
-	/* Interrupt unmask: IN event, OUT event, bus reset */
-	data = GINTSTS_OEPINT | GINTSTS_IEPINT | GINTSTS_ENUMDONE |
-	       GINTSTS_USBRST | GINTSTS_USBSUSP | GINTSTS_ERLYSUSP |
-	       GINTSTS_GOUTNAKEFF;
-	mmio_write_32(GINTMSK, data);
-
-	do {
-		data = mmio_read_32(GINTSTS) & GINTSTS_ENUMDONE;
-	} while (data);
-	VERBOSE("USB Enum Done.\n");
-
-	/* Clear any pending OTG Interrupts */
-	mmio_write_32(GOTGINT, ~0);
-	/* Clear any pending interrupts */
-	mmio_write_32(GINTSTS, ~0);
-	mmio_write_32(GINTMSK, ~0);
-	data = mmio_read_32(GOTGINT);
-	data &= ~0x3000;
-	mmio_write_32(GOTGINT, data);
-	/*endpoint settings cfg*/
-	reset_endpoints();
-
-	udelay(1);
-
-	/*init finish. and ready to transfer data*/
-
-	/* Soft Disconnect */
-	mmio_write_32(DCTL, 0x802);
-	udelay(10000);
-
-	/* Soft Reconnect */
-	mmio_write_32(DCTL, 0x800);
-	VERBOSE("exit usb_config.\n");
-}
-
-void usb_drv_set_address(int address)
-{
-	unsigned int cfg;
-
-	cfg = mmio_read_32(DCFG);
-	cfg &= ~0x7F0;
-	cfg |= address << 4;
-	mmio_write_32(DCFG, cfg);	// 0x7F0: device address
-}
-
-static void ep_send(int ep, const void *ptr, int len)
-{
-	unsigned int data;
-
-	endpoints[ep].busy = 1;		// true
-	endpoints[ep].size = len;
-
-	/* EPx OUT ACTIVE */
-	data = mmio_read_32(DIEPCTL(ep)) | DXEPCTL_USBACTEP;
-	mmio_write_32(DIEPCTL(ep), data);
-
-	/* set DMA Address */
-	if (!len) {
-		/* send one empty packet */
-		dma_desc_in.buf = 0;
-	} else {
-		dma_desc_in.buf = (unsigned long)ptr;
-	}
-	dma_desc_in.status.b.bs = 0x3;
-	dma_desc_in.status.b.l = 1;
-	dma_desc_in.status.b.ioc = 1;
-	dma_desc_in.status.b.sp = 1;
-	dma_desc_in.status.b.sts = 0;
-	dma_desc_in.status.b.bs = 0x0;
-	dma_desc_in.status.b.bytes = len;
-	mmio_write_32(DIEPDMA(ep), (unsigned long)&dma_desc_in);
-
-	data = mmio_read_32(DIEPCTL(ep));
-	data |= DXEPCTL_EPENA | DXEPCTL_CNAK | DXEPCTL_NEXTEP(ep + 1);
-	mmio_write_32(DIEPCTL(ep), data);
-}
-
-void usb_drv_stall(int endpoint, char stall, char in)
-{
-	unsigned int data;
-
-	/*
-	 * STALL Handshake (Stall)
-	 */
-
-	data = mmio_read_32(DIEPCTL(endpoint));
-	if (in) {
-		if (stall)
-			mmio_write_32(DIEPCTL(endpoint), data | 0x00200000);
-		else
-			mmio_write_32(DIEPCTL(endpoint), data & ~0x00200000);
-	} else {
-		if (stall)
-			mmio_write_32(DOEPCTL(endpoint), data | 0x00200000);
-		else
-			mmio_write_32(DOEPCTL(endpoint), data & ~0x00200000);
-	}
-}
-
-int usb_drv_send_nonblocking(int endpoint, const void *ptr, int len)
-{
-	VERBOSE("%s, endpoint = %d, ptr = 0x%x, Len=%d.\n",
-		__func__, endpoint, ptr, len);
-	ep_send(endpoint % NUM_ENDPOINTS, ptr, len);
-	return 0;
-}
-
-void usb_drv_cancel_all_transfers(void)
-{
-	reset_endpoints();
-}
-
-int hiusb_epx_tx(unsigned ep, void *buf, unsigned len)
-{
-	int blocksize,packets;
-	unsigned int epints;
-	unsigned int cycle = 0;
-	unsigned int data;
-
-	endpoints[ep].busy = 1;		//true
-	endpoints[ep].size = len;
-
-	while (mmio_read_32(GINTSTS) & 0x40) {
-		data = mmio_read_32(DCTL);
-		data |= 0x100;
-		mmio_write_32(DCTL, data);
-	}
-
-	data = mmio_read_32(DIEPCTL(ep));
-	data |= 0x08000000;
-	mmio_write_32(DIEPCTL(ep), data);
-
-	/* EPx OUT ACTIVE */
-	mmio_write_32(DIEPCTL(ep), data | 0x8000);
-	if (!ep) {
-		blocksize = 64;
-	} else {
-		blocksize = usb_drv_port_speed() ? USB_BLOCK_HIGH_SPEED_SIZE : 64;
-	}
-	packets = (len + blocksize - 1) / blocksize;
-
-	if (!len) {
-		/* one empty packet */
-		mmio_write_32(DIEPTSIZ(ep), 1 << 19);
-		/* NULL */
-		dma_desc_in.status.b.bs = 0x3;
-		dma_desc_in.status.b.l = 1;
-		dma_desc_in.status.b.ioc = 1;
-		dma_desc_in.status.b.sp = last_one;
-		dma_desc_in.status.b.bytes = 0;
-		dma_desc_in.buf = 0;
-		dma_desc_in.status.b.sts = 0;
-		dma_desc_in.status.b.bs = 0x0;
-		mmio_write_32(DIEPDMA(ep), (unsigned long)&dma_desc_in);
-	} else {
-		mmio_write_32(DIEPTSIZ(ep), len | (packets << 19));
-		dma_desc_in.status.b.bs = 0x3;
-		dma_desc_in.status.b.l = 1;
-		dma_desc_in.status.b.ioc = 1;
-		dma_desc_in.status.b.sp = last_one;
-		dma_desc_in.status.b.bytes = len;
-		dma_desc_in.buf = (unsigned long)buf;
-		dma_desc_in.status.b.sts = 0;
-		dma_desc_in.status.b.bs = 0x0;
-		mmio_write_32(DIEPDMA(ep), (unsigned long)&dma_desc_in);
-	}
-
-	cycle = 0;
-	while(1){
-		data = mmio_read_32(DIEPINT(ep));
-		if ((data & 0x2000) || (cycle > 10000)) {
-			if (cycle > 10000) {
-				NOTICE("Phase 2:ep(%d) status, DIEPCTL(%d) is [0x%x],"
-				       "DTXFSTS(%d) is [0x%x], DIEPINT(%d) is [0x%x],"
-				       "DIEPTSIZ(%d) is [0x%x] GINTSTS is [0x%x]\n",
-					ep, ep, data,
-					ep, mmio_read_32(DTXFSTS(ep)),
-					ep, mmio_read_32(DIEPINT(ep)),
-					ep, mmio_read_32(DIEPTSIZ(ep)),
-					mmio_read_32(GINTSTS));
-			}
-			break;
-		}
-
-		cycle++;
-		udelay(10);
-	}
-	VERBOSE("ep(%d) enable, DIEPCTL(%d) is [0x%x], DTXFSTS(%d) is [0x%x],"
-		"DIEPINT(%d) is [0x%x], DIEPTSIZ(%d) is [0x%x] \n",
-		ep, ep, mmio_read_32(DIEPCTL(ep)),
-		ep, mmio_read_32(DTXFSTS(ep)),
-		ep, mmio_read_32(DIEPINT(ep)),
-		ep, mmio_read_32(DIEPTSIZ(ep)));
-
-	__asm__ volatile("dsb	sy\n"
-			 "isb	sy\n");
-	data = mmio_read_32(DIEPCTL(ep));
-	data |= 0x84000000;
-	/* epena & cnak*/
-	mmio_write_32(DIEPCTL(ep), data);
-	__asm__ volatile("dsb	sy\n"
-			 "isb	sy\n");
-
-	cycle = 0;
-	while (1) {
-		epints = mmio_read_32(DIEPINT(ep)) & 1;
-		if ((mmio_read_32(GINTSTS) & 0x40000) && epints) {
-			VERBOSE("Tx succ:ep(%d), DTXFSTS(%d) is [0x%x] \n",
-				ep, ep, mmio_read_32(DTXFSTS(ep)));
-			mmio_write_32(DIEPINT(ep), epints);
-			if (endpoints[ep].busy) {
-				endpoints[ep].busy = 0;//false
-				endpoints[ep].rc = 0;
-				endpoints[ep].done = 1;//true
-			}
-			break;
-		}
-		cycle++;
-		udelay(10);
-		VERBOSE("loop for intr: ep(%d), DIEPCTL(%d) is [0x%x], ",
-			"DTXFSTS(%d) is [0x%x], DIEPINT(%d) is [0x%x] \n",
-			ep, ep, mmio_read_32(DIEPCTL(ep)),
-			ep, mmio_read_32(DTXFSTS(ep)),
-			ep, mmio_read_32(DIEPINT(ep)));
-
-		if (cycle > 1000000) {
-			WARN("Wait IOC intr over 10s! USB will reset\n");
-			usb_need_reset = 1;
-			return 1;
-		}
-	}
-
-	cycle = 0;
-	while (1) {
-		if ((mmio_read_32(DIEPINT(ep)) & 0x2000) || (cycle > 100000)) {
-			if (cycle > 100000){
-				WARN("all wait cycle is [%d]\n",cycle);
-			}
-			break;
-		}
-
-		cycle++;
-		udelay(10);
-	}
-
-	return 0;
-}
-
-int hiusb_epx_rx(unsigned ep, void *buf, unsigned len)
-{
-	unsigned int blocksize = 0, data;
-	int packets;
-
-	VERBOSE("ep%d rx, len = 0x%x, buf = 0x%x.\n", ep, len, buf);
-
-	endpoints[ep].busy = 1;//true
-	/* EPx UNSTALL */
-	data = mmio_read_32(DOEPCTL(ep)) & ~0x00200000;
-	mmio_write_32(DOEPCTL(ep), data);
-	/* EPx OUT ACTIVE */
-	data = mmio_read_32(DOEPCTL(ep)) | 0x8000;
-	mmio_write_32(DOEPCTL(ep), data);
-
-	blocksize = usb_drv_port_speed() ? USB_BLOCK_HIGH_SPEED_SIZE : 64;
-	packets = (len + blocksize - 1) / blocksize;
-
-#define MAX_RX_PACKET 0x3FF
-
-	/*Max recv packets is 1023*/
-	if (packets > MAX_RX_PACKET) {
-		endpoints[ep].size = MAX_RX_PACKET * blocksize;
-		len = MAX_RX_PACKET * blocksize;
-	} else {
-		endpoints[ep].size = len;
-	}
-
-	if (!len) {
-		/* one empty packet */
-		mmio_write_32(DOEPTSIZ(ep), 1 << 19);
-		//NULL  /* dummy address */
-		dma_desc.status.b.bs = 0x3;
-		dma_desc.status.b.mtrf = 0;
-		dma_desc.status.b.sr = 0;
-		dma_desc.status.b.l = 1;
-		dma_desc.status.b.ioc = 1;
-		dma_desc.status.b.sp = 0;
-		dma_desc.status.b.bytes = 0;
-		dma_desc.buf = 0;
-		dma_desc.status.b.sts = 0;
-		dma_desc.status.b.bs = 0x0;
-
-		mmio_write_32(DOEPDMA(ep), (unsigned long)&dma_desc);
-	} else {
-		if (len >= blocksize * 64) {
-			rx_desc_bytes = blocksize*64;
-		} else {
-			rx_desc_bytes = len;
-		}
-		VERBOSE("rx len %d, rx_desc_bytes %d \n",len,rx_desc_bytes);
-		dma_desc.status.b.bs = 0x3;
-		dma_desc.status.b.mtrf = 0;
-		dma_desc.status.b.sr = 0;
-		dma_desc.status.b.l = 1;
-		dma_desc.status.b.ioc = 1;
-		dma_desc.status.b.sp = 0;
-		dma_desc.status.b.bytes = rx_desc_bytes;
-		dma_desc.buf = (unsigned long)buf;
-		dma_desc.status.b.sts = 0;
-		dma_desc.status.b.bs = 0x0;
-
-		mmio_write_32(DOEPDMA(ep), (unsigned long)&dma_desc);
-	}
-	/* EPx OUT ENABLE CLEARNAK */
-	data = mmio_read_32(DOEPCTL(ep));
-	data |= 0x84000000;
-	mmio_write_32(DOEPCTL(ep), data);
-	return 0;
-}
-
-int usb_queue_req(struct usb_endpoint *ept, struct usb_request *req)
-{
-	if (ept->in)
-		hiusb_epx_tx(ept->num, req->buf, req->length);
-	else
-		hiusb_epx_rx(ept->num, req->buf, req->length);
-
-	return 0;
-}
-
-static void rx_cmd(void)
-{
-	struct usb_request *req = &rx_req;
-	req->buf = cmdbuf;
-	req->length = RX_REQ_LEN;
-	req->complete = usb_rx_cmd_complete;
-	usb_queue_req(&ep1out, req);
-}
-
-static void rx_data(void)
-{
-	struct usb_request *req = &rx_req;
-
-	req->buf = (void *)((unsigned long) rx_addr);
-	req->length = rx_length;
-	req->complete = usb_rx_data_complete;
-	usb_queue_req(&ep1out, req);
-}
-
-void tx_status(const char *status)
-{
-	struct usb_request *req = &tx_req;
-	int len = strlen(status);
-
-	memcpy(req->buf, status, (unsigned int)len);
-	req->length = (unsigned int)len;
-	req->complete = 0;
-	usb_queue_req(&ep1in, req);
-}
-
-void fastboot_tx_status(const char *status)
-{
-	tx_status(status);
-	rx_cmd();
-}
-
-void tx_dump_page(const char *ptr, int len)
-{
-	struct usb_request *req = &tx_req;
-
-	memcpy(req->buf, ptr, (unsigned int)len);
-	req->length = (unsigned int)len;
-	req->complete = 0;
-	usb_queue_req(&ep1in, req);
-}
-
-
-static void usb_rx_data_complete(unsigned actual, int status)
-{
-
-	if(status != 0)
-		return;
-
-	if(actual > rx_length) {
-		actual = rx_length;
-	}
-
-	rx_addr += actual;
-	rx_length -= actual;
-
-	if(rx_length > 0) {
-		rx_data();
-	} else {
-		tx_status("OKAY");
-		rx_cmd();
-	}
-}
-
-static void usb_status(unsigned online, unsigned highspeed)
-{
-	if (online) {
-		INFO("usb: online (%s)\n", highspeed ? "highspeed" : "fullspeed");
-		rx_cmd();
-	}
-}
-
-void usb_handle_control_request(setup_packet* req)
-{
-	const void* addr = NULL;
-	int size = -1;
-	int i;
-	int maxpacket;
-	unsigned int data;
-	char *serialno;
-	struct usb_endpoint_descriptor epx;
-	struct usb_config_bundle const_bundle = {
-		.config = {
-			.bLength	= sizeof(struct usb_config_descriptor),
-			.bDescriptorType	= USB_DT_CONFIG,
-			.wTotalLength	= sizeof(struct usb_config_descriptor) +
-				sizeof(struct usb_interface_descriptor) +
-				sizeof(struct usb_endpoint_descriptor) *
-				USB_NUM_ENDPOINTS,
-			.bNumInterfaces		= 1,
-			.bConfigurationValue	= 1,
-			.iConfiguration		= 0,
-			.bmAttributes		= USB_CONFIG_ATT_ONE,
-			.bMaxPower		= 0x80
-		},
-		.interface = {
-			.bLength	= sizeof(struct usb_interface_descriptor),
-			.bDescriptorType	= USB_DT_INTERFACE,
-			.bInterfaceNumber	= 0,
-			.bAlternateSetting	= 0,
-			.bNumEndpoints		= USB_NUM_ENDPOINTS,
-			.bInterfaceClass	= USB_CLASS_VENDOR_SPEC,
-			.bInterfaceSubClass	= 0x42,
-			.bInterfaceProtocol	= 0x03,
-			.iInterface		= 0
-		}
-	};
-
-	/* avoid to hang on accessing unaligned memory */
-	struct usb_endpoint_descriptor const_ep1 = {
-		.bLength	= sizeof(struct usb_endpoint_descriptor),
-		.bDescriptorType	= USB_DT_ENDPOINT,
-		.bEndpointAddress	= 0x81,
-		.bmAttributes		= USB_ENDPOINT_XFER_BULK,
-		.wMaxPacketSize		= 0,
-		.bInterval		= 0
-	};
-
-	struct usb_endpoint_descriptor const_ep2 = {
-		.bLength	= sizeof(struct usb_endpoint_descriptor),
-		.bDescriptorType	= USB_DT_ENDPOINT,
-		.bEndpointAddress	= 0x01,
-		.bmAttributes		= USB_ENDPOINT_XFER_BULK,
-		.wMaxPacketSize		= 0,
-		.bInterval		= 1
-	};
-
-	struct usb_device_descriptor const_device = {
-		.bLength		= sizeof(struct usb_device_descriptor),
-		.bDescriptorType	= USB_DT_DEVICE,
-		.bcdUSB			= 0x0200,
-		.bDeviceClass		= 0,
-		.bDeviceClass		= 0,
-		.bDeviceProtocol	= 0,
-		.bMaxPacketSize0	= 0x40,
-		.idVendor		= 0x18d1,
-		.idProduct		= 0xd00d,
-		.bcdDevice		= 0x0100,
-		.iManufacturer		= 1,
-		.iProduct		= 2,
-		.iSerialNumber		= 3,
-		.bNumConfigurations	= 1
-	};
-
-	memcpy(&config_bundle, &const_bundle, sizeof(struct usb_config_bundle));
-	memcpy(&config_bundle.ep1, &const_ep1, sizeof(struct usb_endpoint_descriptor));
-	memcpy(&config_bundle.ep2, &const_ep2, sizeof(struct usb_endpoint_descriptor));
-	memcpy(&device_descriptor, &const_device,
-		sizeof(struct usb_device_descriptor));
-
-	switch (req->request) {
-	case USB_REQ_GET_STATUS:
-		if (req->type == USB_DIR_IN)
-			ctrl_resp[0] = 1;
-		else
-			ctrl_resp[0] = 0;
-		ctrl_resp[1] = 0;
-		addr = ctrl_resp;
-		size = 2;
-		break;
-
-	case USB_REQ_CLEAR_FEATURE:
-		if ((req->type == USB_RECIP_ENDPOINT) &&
-		    (req->value == USB_ENDPOINT_HALT))
-			usb_drv_stall(req->index & 0xf, 0, req->index >> 7);
-		size = 0;
-		break;
-
-	case USB_REQ_SET_FEATURE:
-		size = 0;
-		break;
-
-	case USB_REQ_SET_ADDRESS:
-		size = 0;
-		usb_drv_cancel_all_transfers();     // all endpoints reset
-		usb_drv_set_address(req->value);   // set device address
-		break;
-
-	case USB_REQ_GET_DESCRIPTOR:
-		VERBOSE("USB_REQ_GET_DESCRIPTOR: 0x%x\n", req->value >> 8);
-		switch (req->value >> 8) {
-		case USB_DT_DEVICE:
-			addr = &device_descriptor;
-			size = sizeof(device_descriptor);
-			VERBOSE("Get device descriptor.\n");
-			break;
-
-		case USB_DT_OTHER_SPEED_CONFIG:
-		case USB_DT_CONFIG:
-			if ((req->value >> 8) == USB_DT_CONFIG) {
-				maxpacket = usb_drv_port_speed() ? USB_BLOCK_HIGH_SPEED_SIZE : 64;
-				config_bundle.config.bDescriptorType = USB_DT_CONFIG;
-			} else {
-				maxpacket = usb_drv_port_speed() ? 64 : USB_BLOCK_HIGH_SPEED_SIZE;
-				config_bundle.config.bDescriptorType = USB_DT_OTHER_SPEED_CONFIG;
-			}
-			/* avoid hang when access unaligned structure */
-			memcpy(&epx, &config_bundle.ep1, sizeof(struct usb_endpoint_descriptor));
-			epx.wMaxPacketSize = maxpacket;
-			memcpy(&config_bundle.ep1, &epx, sizeof(struct usb_endpoint_descriptor));
-			memcpy(&epx, &config_bundle.ep2, sizeof(struct usb_endpoint_descriptor));
-			epx.wMaxPacketSize = maxpacket;
-			memcpy(&config_bundle.ep2, &epx, sizeof(struct usb_endpoint_descriptor));
-			addr = &config_bundle;
-			size = sizeof(config_bundle);
-			VERBOSE("Get config descriptor.\n");
-			break;
-
-		case USB_DT_STRING:
-			switch (req->value & 0xff) {
-			case 0:
-				addr = &lang_descriptor;
-				size = lang_descriptor.bLength;
-				break;
-			case 1:
-				addr = &string_devicename;
-				size = 14;
-				break;
-			case 2:
-				addr = &string_devicename;
-				size = string_devicename.bLength;
-				break;
-			case 3:
-				serialno = load_serialno();
-				if (serialno == NULL) {
-					addr = &serial_string_descriptor;
-					size = serial_string_descriptor.bLength;
-				} else {
-					i = 0;
-					memcpy((void *)&serial_string,
-					       (void *)&serial_string_descriptor,
-					       sizeof(serial_string));
-					while (1) {
-						serial_string.wString[i] = serialno[i];
-						if (serialno[i] == '\0')
-							break;
-						i++;
-					}
-					addr = &serial_string;
-					size = serial_string.bLength;
-				}
-				break;
-			default:
-				break;
-			}
-			break;
-
-		default:
-			break;
-		}
-		break;
-
-	case USB_REQ_GET_CONFIGURATION:
-		ctrl_resp[0] = 1;
-		addr = ctrl_resp;
-		size = 1;
-		break;
-
-	case USB_REQ_SET_CONFIGURATION:
-		usb_drv_cancel_all_transfers();     // call reset_endpoints  reset all EPs
-
-		usb_drv_request_endpoint(USB_ENDPOINT_XFER_BULK, USB_DIR_OUT);
-		usb_drv_request_endpoint(USB_ENDPOINT_XFER_BULK, USB_DIR_IN);
-		/*
-		 * 0x10088800:
-		 * 1:EP enable; 8:EP type:BULK; 8:USB Active Endpoint; 8:Next Endpoint
-		 */
-		data = mmio_read_32(DIEPCTL1) | 0x10088800;
-		mmio_write_32(DIEPCTL1, data);
-		data = mmio_read_32(DIEPCTL(1)) | 0x08000000;
-		mmio_write_32(DIEPCTL(1), data);
-
-		/* Enable interrupts on all endpoints */
-		mmio_write_32(DAINTMSK, 0xffffffff);
-
-		usb_status(req->value? 1 : 0, usb_drv_port_speed() ? 1 : 0);
-		size = 0;
-		VERBOSE("Set config descriptor.\n");
-
-		/* USB ö¾Ù³É¹¦µã,ÖÃÉϱêʶ */
-		g_usb_enum_flag = 1;
-		break;
-
-	default:
-		break;
-	}
-
-	if (!size) {
-		usb_drv_send_nonblocking(0, 0, 0);  // send an empty packet
-	} else if (size == -1) { // stall:Applies to non-control, non-isochronous IN and OUT endpoints only.
-		usb_drv_stall(0, 1, 1);     // IN
-		usb_drv_stall(0, 1, 0);     // OUT
-	} else { // stall:Applies to control endpoints only.
-		usb_drv_stall(0, 0, 1);     // IN
-		usb_drv_stall(0, 0, 0);     // OUT
-
-		usb_drv_send_nonblocking(0, addr, size > req->length ? req->length : size);
-	}
-}
-
-/* IRQ handler */
-static void usb_poll(void)
-{
-	uint32_t ints;
-	uint32_t epints, data;
-
-	ints = mmio_read_32(GINTSTS);		/* interrupt status */
-
-
-	if ((ints & 0xc3010) == 0)
-		return;
-	/*
-	 * bus reset
-	 * The core sets this bit to indicate that a reset is detected on the USB.
-	 */
-	if (ints & GINTSTS_USBRST) {
-		VERBOSE("bus reset intr\n");
-		/*set Non-Zero-Length status out handshake */
-		/*
-		 * DCFG:This register configures the core in Device mode after power-on
-		 * or after certain control commands or enumeration. Do not make changes
-		 * to this register after initial programming.
-		 * Send a STALL handshake on a nonzero-length status OUT transaction and
-		 * do not send the received OUT packet to the application.
-		 */
-		mmio_write_32(DCFG, 0x800004);
-		reset_endpoints();
-	}
-	/*
-	 * enumeration done, we now know the speed
-	 * The core sets this bit to indicate that speed enumeration is complete. The
-	 * application must read the Device Status (DSTS) register to obtain the
-	 * enumerated speed.
-	 */
-	if (ints & GINTSTS_ENUMDONE) {
-		/* Set up the maximum packet sizes accordingly */
-		uint32_t maxpacket = usb_drv_port_speed() ? USB_BLOCK_HIGH_SPEED_SIZE : 64;  // high speed maxpacket=512
-		VERBOSE("enum done intr. Maxpacket:%d\n", maxpacket);
-		//Set Maximum In Packet Size (MPS)
-		data = mmio_read_32(DIEPCTL1) & ~0x000003ff;
-		mmio_write_32(DIEPCTL1, data | maxpacket);
-		//Set Maximum Out Packet Size (MPS)
-		data = mmio_read_32(DOEPCTL1) & ~0x000003ff;
-		mmio_write_32(DOEPCTL1, data | maxpacket);
-	}
-
-	/*
-	 * IN EP event
-	 * The core sets this bit to indicate that an interrupt is pending on one of the IN
-	 * endpoints of the core (in Device mode). The application must read the
-	 * Device All Endpoints Interrupt (DAINT) register to determine the exact
-	 * number of the IN endpoint on which the interrupt occurred, and then read
-	 * the corresponding Device IN Endpoint-n Interrupt (DIEPINTn) register to
-	 * determine the exact cause of the interrupt. The application must clear the
-	 * appropriate status bit in the corresponding DIEPINTn register to clear this bit.
-	 */
-	if (ints & GINTSTS_IEPINT) {
-		epints = mmio_read_32(DIEPINT0);
-		mmio_write_32(DIEPINT0, epints);
-
-		//VERBOSE("IN EP event,ints:0x%x, DIEPINT0:%x, DAINT:%x, DAINTMSK:%x.\n",
-		//	ints, epints, mmio_read_32(DAINT), mmio_read_32(DAINTMSK));
-		if (epints & 0x1) { /* Transfer Completed Interrupt (XferCompl) */
-			VERBOSE("TX completed.DIEPTSIZ(0) = 0x%x.\n", mmio_read_32(DIEPTSIZ0));
-			/*FIXME,Maybe you can use bytes*/
-			/*int bytes = endpoints[0].size - (DIEPTSIZ(0) & 0x3FFFF);*/ //actual transfer
-			if (endpoints[0].busy) {
-				endpoints[0].busy = 0;//false
-				endpoints[0].rc = 0;
-				endpoints[0].done = 1;//true
-			}
-		}
-		if (epints & 0x4) { /* AHB error */
-			WARN("AHB error on IN EP0.\n");
-		}
-
-		if (epints & 0x8) { /* Timeout */
-			WARN("Timeout on IN EP0.\n");
-			if (endpoints[0].busy) {
-				endpoints[0].busy = 1;//false
-				endpoints[0].rc = 1;
-				endpoints[0].done = 1;//true
-			}
-		}
-	}
-
-	/*
-	 * OUT EP event
-	 * The core sets this bit to indicate that an interrupt is pending on one of the
-	 * OUT endpoints of the core (in Device mode). The application must read the
-	 * Device All Endpoints Interrupt (DAINT) register to determine the exact
-	 * number of the OUT endpoint on which the interrupt occurred, and then read
-	 * the corresponding Device OUT Endpoint-n Interrupt (DOEPINTn) register
-	 * to determine the exact cause of the interrupt. The application must clear the
-	 * appropriate status bit in the corresponding DOEPINTn register to clear this bit.
-	 */
-	if (ints & GINTSTS_OEPINT) {
-		/* indicates the status of an endpoint
-		 * with respect to USB- and AHB-related events. */
-		epints = mmio_read_32(DOEPINT(0));
-		//VERBOSE("OUT EP event,ints:0x%x, DOEPINT0:%x, DAINT:%x, DAINTMSK:%x.\n",
-		//	ints, epints, mmio_read_32(DAINT), mmio_read_32(DAINTMSK));
-		if (epints) {
-			mmio_write_32(DOEPINT(0), epints);
-			/* Transfer completed */
-			if (epints & DXEPINT_XFERCOMPL) {
-				/*FIXME,need use bytes*/
-				VERBOSE("EP0 RX completed. DOEPTSIZ(0) = 0x%x.\n",
-					mmio_read_32(DOEPTSIZ(0)));
-				if (endpoints[0].busy) {
-					endpoints[0].busy = 0;
-					endpoints[0].rc = 0;
-					endpoints[0].done = 1;
-				}
-			}
-			if (epints & DXEPINT_AHBERR) { /* AHB error */
-				WARN("AHB error on OUT EP0.\n");
-			}
-
-			/*
-			 * IN Token Received When TxFIFO is Empty (INTknTXFEmp)
-			 * Indicates that an IN token was received when the associated TxFIFO (periodic/nonperiodic)
-			 * was empty. This interrupt is asserted on the endpoint for which the IN token
-			 * was received.
-			 */
-			if (epints & DXEPINT_SETUP) { /* SETUP phase done */
-				VERBOSE("Setup phase \n");
-				data = mmio_read_32(DIEPCTL(0)) | DXEPCTL_SNAK;
-				mmio_write_32(DIEPCTL(0), data);
-				data = mmio_read_32(DOEPCTL(0)) | DXEPCTL_SNAK;
-				mmio_write_32(DOEPCTL(0), data);
-				/*clear IN EP intr*/
-				mmio_write_32(DIEPINT(0), ~0);
-				usb_handle_control_request((setup_packet *)&ctrl_req);
-			}
-
-			/* Make sure EP0 OUT is set up to accept the next request */
-			/* memset(p_ctrlreq, 0, NUM_ENDPOINTS*8); */
-			data = DOEPTSIZ0_SUPCNT(3) | DOEPTSIZ0_PKTCNT |
-				(64 << DOEPTSIZ0_XFERSIZE_SHIFT);
-			mmio_write_32(DOEPTSIZ0, data);
-			/*
-			 * IN Token Received When TxFIFO is Empty (INTknTXFEmp)
-			 * Indicates that an IN token was received when the associated TxFIFO (periodic/nonperiodic)
-			 * was empty. This interrupt is asserted on the endpoint for which the IN token
-			 * was received.
-			 */
-			// notes that:the compulsive conversion is expectable.
-			// Holds the start address of the external memory for storing or fetching endpoint data.
-			dma_desc_ep0.status.b.bs = 0x3;
-			dma_desc_ep0.status.b.mtrf = 0;
-			dma_desc_ep0.status.b.sr = 0;
-			dma_desc_ep0.status.b.l = 1;
-			dma_desc_ep0.status.b.ioc = 1;
-			dma_desc_ep0.status.b.sp = 0;
-			dma_desc_ep0.status.b.bytes = 64;
-			dma_desc_ep0.buf = (uintptr_t)&ctrl_req;
-			dma_desc_ep0.status.b.sts = 0;
-			dma_desc_ep0.status.b.bs = 0x0;
-			mmio_write_32(DOEPDMA0, (uintptr_t)&dma_desc_ep0);
-			// endpoint enable; clear NAK
-			mmio_write_32(DOEPCTL0, 0x84000000);
-		}
-
-		epints = mmio_read_32(DOEPINT1);
-		if(epints) {
-			mmio_write_32(DOEPINT1, epints);
-			VERBOSE("OUT EP1: epints :0x%x,DOEPTSIZ1 :0x%x.\n",epints, mmio_read_32(DOEPTSIZ1));
-			/* Transfer Completed Interrupt (XferCompl);Transfer completed */
-			if (epints & DXEPINT_XFERCOMPL) {
-				/* ((readl(DOEPTSIZ(1))) & 0x7FFFF is Transfer Size (XferSize) */
-				/*int bytes = (p_endpoints + 1)->size - ((readl(DOEPTSIZ(1))) & 0x7FFFF);*/
-				int bytes = rx_desc_bytes - dma_desc.status.b.bytes;
-				VERBOSE("OUT EP1: recv %d bytes \n",bytes);
-				if (endpoints[1].busy) {
-					endpoints[1].busy = 0;
-					endpoints[1].rc = 0;
-					endpoints[1].done = 1;
-					rx_req.complete(bytes, 0);
-				}
-			}
-
-			if (epints & DXEPINT_AHBERR) { /* AHB error */
-				WARN("AHB error on OUT EP1.\n");
-			}
-			if (epints & DXEPINT_SETUP) { /* SETUP phase done */
-				WARN("SETUP phase done  on OUT EP1.\n");
-			}
-		}
-	}
-	/* write to clear interrupts */
-	mmio_write_32(GINTSTS, ints);
-}
-
-#define EYE_PATTERN	0x70533483
-
-/*
-* pico phy exit siddq, nano phy enter siddq,
-* and open the clock of pico phy and dvc,
-*/
-static void dvc_and_picophy_init_chip(void)
-{
-	unsigned int data;
-
-	/* enable USB clock */
-	mmio_write_32(PERI_SC_PERIPH_CLKEN0, PERI_CLK0_USBOTG);
-	do {
-		data = mmio_read_32(PERI_SC_PERIPH_CLKSTAT0);
-	} while ((data & PERI_CLK0_USBOTG) == 0);
-
-
-	/* out of reset */
-	mmio_write_32(PERI_SC_PERIPH_RSTDIS0,
-		      PERI_RST0_USBOTG_BUS | PERI_RST0_POR_PICOPHY |
-		      PERI_RST0_USBOTG | PERI_RST0_USBOTG_32K);
-	do {
-		data = mmio_read_32(PERI_SC_PERIPH_RSTSTAT0);
-		data &= PERI_RST0_USBOTG_BUS | PERI_RST0_POR_PICOPHY |
-			PERI_RST0_USBOTG | PERI_RST0_USBOTG_32K;
-	} while (data);
-
-	mmio_write_32(PERI_SC_PERIPH_CTRL8, EYE_PATTERN);
-
-	/* configure USB PHY */
-	data = mmio_read_32(PERI_SC_PERIPH_CTRL4);
-	/* make PHY out of low power mode */
-	data &= ~PERI_CTRL4_PICO_SIDDQ;
-	/* detect VBUS by external circuit, switch D+ to 1.5KOhm pullup */
-	data |= PERI_CTRL4_PICO_VBUSVLDEXTSEL | PERI_CTRL4_PICO_VBUSVLDEXT;
-	data &= ~PERI_CTRL4_FPGA_EXT_PHY_SEL;
-	/* select PHY */
-	data &= ~PERI_CTRL4_OTG_PHY_SEL;
-	mmio_write_32(PERI_SC_PERIPH_CTRL4, data);
-
-	udelay(1000);
-
-	data = mmio_read_32(PERI_SC_PERIPH_CTRL5);
-	data &= ~PERI_CTRL5_PICOPHY_BC_MODE;
-	mmio_write_32(PERI_SC_PERIPH_CTRL5, data);
-
-	udelay(20000);
-}
-
-int init_usb(void)
-{
-	static int init_flag = 0;
-	uint32_t	data;
-
-	if (init_flag == 0) {
-		memset(&ctrl_req, 0, sizeof(setup_packet));
-		memset(&ctrl_resp, 0, 2);
-		memset(&endpoints, 0, sizeof(struct ep_type) * NUM_ENDPOINTS);
-		memset(&dma_desc, 0, sizeof(struct dwc_otg_dev_dma_desc));
-		memset(&dma_desc_ep0, 0, sizeof(struct dwc_otg_dev_dma_desc));
-		memset(&dma_desc_in, 0, sizeof(struct dwc_otg_dev_dma_desc));
-	}
-
-	VERBOSE("Pico PHY and DVC init start.\n");
-
-	dvc_and_picophy_init_chip();
-	VERBOSE("Pico PHY and DVC init done.\n");
-
-	/* wait for OTG AHB master idle */
-	do {
-		data = mmio_read_32(GRSTCTL) & GRSTCTL_AHBIDLE;
-	} while (data == 0);
-	VERBOSE("Reset usb controller\n");
-
-	/* OTG: Assert software reset */
-	mmio_write_32(GRSTCTL, GRSTCTL_CSFTRST);
-
-	/* wait for OTG to ack reset */
-	while (mmio_read_32(GRSTCTL) & GRSTCTL_CSFTRST);
-
-	/* wait for OTG AHB master idle */
-	while ((mmio_read_32(GRSTCTL) & GRSTCTL_AHBIDLE) == 0);
-
-	VERBOSE("Reset usb controller done\n");
-
-	usb_config();
-	VERBOSE("exit usb_init()\n");
-	return 0;
-}
-
-#define LOCK_STATE_LOCKED		0
-#define LOCK_STATE_UNLOCKED		1
-#define LOCK_STATE_RELOCKED		2
-
-#define FB_MAX_FILE_SIZE		(256 * 1024 * 1024)
-
-static struct ptentry *flash_ptn = NULL;
-
-static void fb_getvar(char *cmdbuf)
-{
-	char response[64];
-	char part_name[32];
-	int bytes;
-	struct ptentry *ptn = 0;
-
-	if (!strncmp(cmdbuf + 7, "max-download-size", 17)) {
-		bytes = sprintf(response, "OKAY0x%08x",
-				FB_MAX_FILE_SIZE);
-		response[bytes] = '\0';
-		tx_status(response);
-		rx_cmd();
-	} else if (!strncmp(cmdbuf + 7, "partition-type:", 15)) {
-		bytes = sprintf(part_name, "%s", cmdbuf + 22);
-		ptn = find_ptn(part_name);
-		if (ptn == NULL) {
-			bytes = sprintf(response, "FAIL%s",
-					"invalid partition");
-			response[bytes] = '\0';
-			flash_ptn = NULL;
-		} else {
-			flash_ptn = ptn;
-			if (!strncmp(cmdbuf +22, "system", 6) || !strncmp(cmdbuf +22, "userdata", 8) ||
-				!strncmp(cmdbuf +22, "cache", 5)) {
-				bytes = sprintf(response, "OKAYext4");
-				response[bytes] = '\0';
-			} else {
-				bytes = sprintf(response, "OKAYraw");
-				response[bytes] = '\0';
-			}
-		}
-		tx_status(response);
-		rx_cmd();
-        } else if (!strncmp(cmdbuf + 7, "partition-size:", 15)) {
-                bytes = sprintf(part_name, "%s", cmdbuf + 22);
-                ptn = find_ptn(part_name);
-                if (ptn == NULL) {
-                        bytes = sprintf(response, "FAIL%s",
-                                        "invalid partition");
-                        response[bytes] = '\0';
-                        flash_ptn = NULL;
-                } else {
-                        bytes = sprintf(response, "OKAY%llx",ptn->length);
-                        response[bytes] = '\0';
-                        flash_ptn = ptn;
-                }
-                tx_status(response);
-                rx_cmd();
-	} else if (!strncmp(cmdbuf + 7, "serialno", 8)) {
-		bytes = sprintf(response, "OKAY%s",
-				load_serialno());
-		response[bytes] = '\0';
-		tx_status(response);
-		rx_cmd();
-	} else if (!strncmp(cmdbuf + 7, "version-bootloader", 18)) {
-		bytes = sprintf(response, "OKAY%s", VERSION_BOOTLOADER);
-		response[bytes] = '\0';
-		tx_status(response);
-		rx_cmd();
-	} else if (!strncmp(cmdbuf + 7, "version-baseband", 16)) {
-		bytes = sprintf(response, "OKAYN/A");
-		response[bytes] = '\0';
-		tx_status(response);
-		rx_cmd();
-	} else if (!strncmp(cmdbuf + 7, "product", 8)) {
-		bytes = sprintf(response, "OKAYhikey");
-		response[bytes] = '\0';
-		tx_status(response);
-		rx_cmd();
-	} else {
-		bytes = sprintf(response, "FAIL%s",
-					"unknown var");
-		response[bytes] = '\0';
-		tx_status(response);
-		rx_cmd();
-	}
-}
-
-/* FIXME: do not support endptr yet */
-static unsigned long strtoul(const char *nptr, char **endptr, int base)
-{
-	unsigned long step, data;
-	int i;
-
-	if (base == 0)
-		step = 10;
-	else if ((base < 2) || (base > 36)) {
-		VERBOSE("%s: invalid base %d\n", __func__, base);
-		return 0;
-	} else
-		step = base;
-
-	for (i = 0, data = 0; ; i++) {
-		if (nptr[i] == '\0')
-			break;
-		else if (!isalpha(nptr[i]) && !isdigit(nptr[i])) {
-			VERBOSE("%s: invalid string %s at %d [%x]\n",
-				__func__, nptr, i, nptr[i]);
-			return 0;
-		} else {
-			data *= step;
-			if (isupper(nptr[i]))
-				data += nptr[i] - 'A' + 10;
-			else if (islower(nptr[i]))
-				data += nptr[i] - 'a' + 10;
-			else if (isdigit(nptr[i]))
-				data += nptr[i] - '0';
-		}
-	}
-	return data;
-}
-
-static void fb_serialno(char *cmdbuf)
-{
-	struct random_serial_num random;
-
-	generate_serialno(&random);
-	flush_random_serialno((unsigned long)&random, sizeof(random));
-}
-
-static int fb_assigned_sn(char *cmdbuf)
-{
-	struct random_serial_num random;
-	int ret;
-
-	ret = assign_serialno(cmdbuf, &random);
-	if (ret < 0)
-		return ret;
-	flush_random_serialno((unsigned long)&random, sizeof(random));
-	return 0;
-}
-
-#define FB_DOWNLOAD_BASE	0x20000000
-
-static unsigned long fb_download_base, fb_download_size;
-
-static void fb_download(char *cmdbuf)
-{
-	char response[64];
-	int bytes;
-
-	if (!flash_ptn) {
-		bytes = sprintf(response, "FAIL%s",
-				"invalid partition");
-		response[bytes] = '\0';
-		tx_status(response);
-		rx_cmd();
-	} else {
-		rx_addr = FB_DOWNLOAD_BASE;
-		rx_length = strtoul(cmdbuf + 9, NULL, 16);
-		fb_download_base = rx_addr;
-		fb_download_size = rx_length;
-		if (rx_length > FB_MAX_FILE_SIZE) {
-			bytes = sprintf(response, "FAIL%s",
-					"file is too large");
-			response[bytes] = '\0';
-			tx_status(response);
-			rx_cmd();
-		} else {
-			bytes = sprintf(response, "DATA%08x",
-					rx_length);
-			VERBOSE("start:0x%x, length:0x%x, res:%s\n",
-				rx_addr, rx_length, response);
-			response[bytes] = '\0';
-			tx_status(response);
-			rx_data();
-		}
-	}
-}
-
-static void fb_flash(char *cmdbuf)
-{
-	flush_user_images(cmdbuf + 6, fb_download_base, fb_download_size);
-	tx_status("OKAY");
-	rx_cmd();
-}
-
-static void fb_reboot(char *cmdbuf)
-{
-	/* Send the system reset request */
-	mmio_write_32(AO_SC_SYS_STAT0, 0x48698284);
-
-	wfi();
-	panic();
-}
-
-static void usb_rx_cmd_complete(unsigned actual, int stat)
-{
-	if(stat != 0) return;
-
-	if(actual > 4095)
-		actual = 4095;
-	cmdbuf[actual] = 0;
-
-	INFO("cmd :%s\n",cmdbuf);
-
-	if(memcmp(cmdbuf, (void *)"reboot", 6) == 0) {
-		tx_status("OKAY");
-		fb_reboot(cmdbuf);
-		return;
-	} else if (!memcmp(cmdbuf, (void *)"getvar:", 7)) {
-		fb_getvar(cmdbuf);
-		return;
-	} else if (!memcmp(cmdbuf, (void *)"download:", 9)) {
-		fb_download(cmdbuf);
-		return;
-	} else if(memcmp(cmdbuf, (void *)"erase:", 6) == 0) {
-                /* FIXME erase is not supported but we return success */
-                tx_status("OKAY");
-                rx_cmd();
-                return;
-	} else if(memcmp(cmdbuf, (void *)"flash:", 6) == 0) {
-		INFO("recog updatefile\n");
-		fb_flash(cmdbuf);
-		return;
-	} else if(memcmp(cmdbuf, (void *)"boot", 4) == 0) {
-		INFO(" - OKAY\n");
-
-		return;
-	} else if (memcmp(cmdbuf, (void *)"oem serialno", 12) == 0) {
-		if (*(cmdbuf + 12) == '\0') {
-			fb_serialno(cmdbuf);
-			tx_status("OKAY");
-			rx_cmd();
-			return;
-		} else if (memcmp(cmdbuf + 12, (void *)" set", 4) == 0) {
-			if (fb_assigned_sn(cmdbuf + 16) == 0) {
-				tx_status("OKAY");
-				rx_cmd();
-				return;
-			}
-		}
-	} else if (memcmp(cmdbuf, (void *)"oem led", 7) == 0) {
-		if ((*(cmdbuf + 7) >= '1') && (*(cmdbuf + 7) <= '4')) {
-			int led;
-			led = *(cmdbuf + 7) - '0';
-			if (memcmp(cmdbuf + 8, (void *)" on", 3) == 0) {
-				gpio_set_value(31 + led, 1);
-				tx_status("OKAY");
-				rx_cmd();
-				return;
-			} else if (memcmp(cmdbuf + 8, (void *)" off", 4) == 0) {
-				gpio_set_value(31 + led, 0);
-				tx_status("OKAY");
-				rx_cmd();
-				return;
-			}
-		}
-	}
-
-	tx_status("FAILinvalid command");
-	rx_cmd();
-}
-
-static void usbloader_init(void)
-{
-	VERBOSE("enter usbloader_init\n");
-
-	/*usb sw and hw init*/
-	init_usb();
-
-	/*alloc and init sth for transfer*/
-	ep1in.num = BULK_IN_EP;
-	ep1in.in = 1;
-	ep1in.req = NULL;
-	ep1in.maxpkt = MAX_PACKET_LEN;
-	ep1in.next = &ep1in;
-	ep1out.num = BULK_OUT_EP;
-	ep1out.in = 0;
-	ep1out.req = NULL;
-	ep1out.maxpkt = MAX_PACKET_LEN;
-	ep1out.next = &ep1out;
-	cmdbuf = (char *)(rx_req.buf);
-
-	VERBOSE("exit usbloader_init\n");
-}
-
-void usb_reinit()
-{
-	if (usb_need_reset)
-	{
-		usb_need_reset = 0;
-		init_usb();
-	}
-}
-
-void usb_download(void)
-{
-	usbloader_init();
-	INFO("Enter downloading mode. Please run fastboot command on Host.\n");
-	for (;;) {
-		usb_poll();
-		usb_reinit();
-	}
-}
diff --git a/plat/hisilicon/hikey/aarch64/hikey_common.c b/plat/hisilicon/hikey/aarch64/hikey_common.c
new file mode 100644
index 0000000..d5b63cc
--- /dev/null
+++ b/plat/hisilicon/hikey/aarch64/hikey_common.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <debug.h>
+#include <mmio.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <xlat_tables.h>
+
+#include "../hikey_def.h"
+
+#define MAP_DDR		MAP_REGION_FLAT(DDR_BASE,			\
+					DDR_SIZE,			\
+					MT_DEVICE | MT_RW | MT_NS)
+
+#define MAP_DEVICE	MAP_REGION_FLAT(DEVICE_BASE,			\
+					DEVICE_SIZE,			\
+					MT_DEVICE | MT_RW | MT_SECURE)
+
+#define MAP_TSP_MEM	MAP_REGION_FLAT(TSP_SEC_MEM_BASE,		\
+					TSP_SEC_MEM_SIZE,		\
+					MT_MEMORY | MT_RW | MT_SECURE)
+
+#if LOAD_IMAGE_V2
+#ifdef SPD_opteed
+#define MAP_OPTEE_PAGEABLE	MAP_REGION_FLAT(		\
+					HIKEY_OPTEE_PAGEABLE_LOAD_BASE,	\
+					HIKEY_OPTEE_PAGEABLE_LOAD_SIZE,	\
+					MT_MEMORY | MT_RW | MT_SECURE)
+#endif
+#endif
+
+#define MAP_ROM_PARAM	MAP_REGION_FLAT(XG2RAM0_BASE,			\
+					BL1_XG2RAM0_OFFSET,		\
+					MT_DEVICE | MT_RO | MT_SECURE)
+
+#define MAP_SRAM	MAP_REGION_FLAT(SRAM_BASE,			\
+					SRAM_SIZE,			\
+					MT_DEVICE | MT_RW | MT_SECURE)
+
+/*
+ * BL1 needs to access the areas of MMC_SRAM.
+ * BL1 loads BL2 from eMMC into SRAM before DDR initialized.
+ */
+#define MAP_MMC_SRAM	MAP_REGION_FLAT(HIKEY_BL1_MMC_DESC_BASE,	\
+					HIKEY_BL1_MMC_DESC_SIZE +	\
+					HIKEY_BL1_MMC_DATA_SIZE,	\
+					MT_DEVICE | MT_RW | MT_SECURE)
+
+/*
+ * Table of regions for different BL stages to map using the MMU.
+ * This doesn't include Trusted RAM as the 'mem_layout' argument passed to
+ * hikey_init_mmu_elx() will give the available subset of that,
+ */
+#ifdef IMAGE_BL1
+static const mmap_region_t hikey_mmap[] = {
+	MAP_DEVICE,
+	MAP_ROM_PARAM,
+	MAP_MMC_SRAM,
+	{0}
+};
+#endif
+
+#ifdef IMAGE_BL2
+static const mmap_region_t hikey_mmap[] = {
+	MAP_DDR,
+	MAP_DEVICE,
+	MAP_TSP_MEM,
+#if LOAD_IMAGE_V2
+#ifdef SPD_opteed
+	MAP_OPTEE_PAGEABLE,
+#endif
+#endif
+	{0}
+};
+#endif
+
+#ifdef IMAGE_BL31
+static const mmap_region_t hikey_mmap[] = {
+	MAP_DEVICE,
+	MAP_SRAM,
+	MAP_TSP_MEM,
+	{0}
+};
+#endif
+
+#ifdef IMAGE_BL32
+static const mmap_region_t hikey_mmap[] = {
+	MAP_DEVICE,
+	MAP_DDR,
+	{0}
+};
+#endif
+
+/*
+ * Macro generating the code for the function setting up the pagetables as per
+ * the platform memory map & initialize the mmu, for the given exception level
+ */
+#define HIKEY_CONFIGURE_MMU_EL(_el)				\
+	void hikey_init_mmu_el##_el(unsigned long total_base,	\
+				  unsigned long total_size,	\
+				  unsigned long ro_start,	\
+				  unsigned long ro_limit,	\
+				  unsigned long coh_start,	\
+				  unsigned long coh_limit)	\
+	{							\
+	       mmap_add_region(total_base, total_base,		\
+			       total_size,			\
+			       MT_MEMORY | MT_RW | MT_SECURE);	\
+	       mmap_add_region(ro_start, ro_start,		\
+			       ro_limit - ro_start,		\
+			       MT_MEMORY | MT_RO | MT_SECURE);	\
+	       mmap_add_region(coh_start, coh_start,		\
+			       coh_limit - coh_start,		\
+			       MT_DEVICE | MT_RW | MT_SECURE);	\
+	       mmap_add(hikey_mmap);				\
+	       init_xlat_tables();				\
+								\
+	       enable_mmu_el##_el(0);				\
+	}
+
+/* Define EL1 and EL3 variants of the function initialising the MMU */
+HIKEY_CONFIGURE_MMU_EL(1)
+HIKEY_CONFIGURE_MMU_EL(3)
+
+unsigned long plat_get_ns_image_entrypoint(void)
+{
+	return HIKEY_NS_IMAGE_OFFSET;
+}
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+	return 1200000;
+}
diff --git a/plat/hisilicon/hikey/aarch64/hikey_helpers.S b/plat/hisilicon/hikey/aarch64/hikey_helpers.S
new file mode 100644
index 0000000..680c0a1
--- /dev/null
+++ b/plat/hisilicon/hikey/aarch64/hikey_helpers.S
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include "../hikey_def.h"
+
+	.globl	plat_my_core_pos
+	.globl	platform_mem_init
+	.globl	plat_crash_console_init
+	.globl	plat_crash_console_putc
+	.globl	plat_report_exception
+	.globl	plat_reset_handler
+
+func plat_my_core_pos
+	mrs	x0, mpidr_el1
+	and	x1, x0, #MPIDR_CPU_MASK
+	and	x0, x0, #MPIDR_CLUSTER_MASK
+	add	x0, x1, x0, LSR #6
+	ret
+endfunc plat_my_core_pos
+
+	/* -----------------------------------------------------
+	 * void platform_mem_init(void);
+	 *
+	 * We don't need to carry out any memory initialization
+	 * on HIKEY. The Secure RAM is accessible straight away.
+	 * -----------------------------------------------------
+	 */
+func platform_mem_init
+	ret
+endfunc platform_mem_init
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_init(void)
+	 * Function to initialize the crash console
+	 * without a C Runtime to print crash report.
+	 * Clobber list : x0, x1, x2
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_init
+	mov_imm	x0, CRASH_CONSOLE_BASE
+	mov_imm	x1, PL011_UART_CLK_IN_HZ
+	mov_imm	x2, PL011_BAUDRATE
+	b	console_core_init
+endfunc plat_crash_console_init
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_putc(int c)
+	 * Function to print a character on the crash
+	 * console without a C Runtime.
+	 * Clobber list : x1, x2
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_putc
+	mov_imm	x1, CRASH_CONSOLE_BASE
+	b	console_core_putc
+endfunc plat_crash_console_putc
+
+	/* ---------------------------------------------
+	 * void plat_report_exception(unsigned int type)
+	 * Function to report an unhandled exception
+	 * with platform-specific means.
+	 * On HIKEY platform, it updates the LEDs
+	 * to indicate where we are
+	 * ---------------------------------------------
+	 */
+func plat_report_exception
+	mov	x8, x30
+
+	/* Turn on LED according to x0 (0 -- f) */
+	ldr	x2, =0xf7020000
+	and	x1, x0, #1
+	str	w1, [x2, #4]
+	and	x1, x0, #2
+	str	w1, [x2, #8]
+	and	x1, x0, #4
+	str	w1, [x2, #16]
+	and	x1, x0, #8
+	str	w1, [x2, #32]
+
+	mrs	x2, currentel
+	and	x2, x2, #0xc0
+	/* Check EL1 */
+	cmp	x2, #0x04
+	beq	plat_report_el1
+
+	adr	x4, plat_err_str
+	bl	asm_print_str
+
+	adr	x4, esr_el3_str
+	bl	asm_print_str
+
+	mrs	x4, esr_el3
+	bl	asm_print_hex
+
+	adr	x4, elr_el3_str
+	bl	asm_print_str
+
+	mrs	x4, elr_el3
+	bl	asm_print_hex
+	b	plat_report_end
+
+plat_report_el1:
+	adr	x4, plat_err_str
+	bl	asm_print_str
+
+	adr	x4, esr_el1_str
+	bl	asm_print_str
+
+	mrs	x4, esr_el1
+	bl	asm_print_hex
+
+	adr	x4, elr_el1_str
+	bl	asm_print_str
+
+	mrs	x4, elr_el1
+	bl	asm_print_hex
+plat_report_end:
+	mov	x30, x8
+	ret
+endfunc plat_report_exception
+
+	/* -----------------------------------------------------
+	 * void plat_reset_handler(void);
+	 * -----------------------------------------------------
+	 */
+func plat_reset_handler
+	ret
+endfunc plat_reset_handler
+
+.section .rodata.rev_err_str, "aS"
+plat_err_str:
+	.asciz "\nPlatform exception reporting:"
+esr_el3_str:
+	.asciz "\nESR_EL3: "
+elr_el3_str:
+	.asciz "\nELR_EL3: "
+esr_el1_str:
+	.asciz "\nESR_EL1: "
+elr_el1_str:
+	.asciz "\nELR_EL1: "
diff --git a/plat/hisilicon/hikey/hikey_bl1_setup.c b/plat/hisilicon/hikey/hikey_bl1_setup.c
new file mode 100644
index 0000000..df0ad8e
--- /dev/null
+++ b/plat/hisilicon/hikey/hikey_bl1_setup.c
@@ -0,0 +1,591 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <debug.h>
+#include <dw_mmc.h>
+#include <emmc.h>
+#include <errno.h>
+#include <gpio.h>
+#include <hi6220.h>
+#include <hi6553.h>
+#include <mmio.h>
+#include <pl061_gpio.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <sp804_delay_timer.h>
+#include <string.h>
+#include <tbbr/tbbr_img_desc.h>
+
+#include "../../bl1/bl1_private.h"
+#include "hikey_def.h"
+#include "hikey_private.h"
+
+/*
+ * Declarations of linker defined symbols which will help us find the layout
+ * of trusted RAM
+ */
+extern unsigned long __COHERENT_RAM_START__;
+extern unsigned long __COHERENT_RAM_END__;
+
+/*
+ * The next 2 constants identify the extents of the coherent memory region.
+ * These addresses are used by the MMU setup code and therefore they must be
+ * page-aligned.  It is the responsibility of the linker script to ensure that
+ * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
+ * page-aligned addresses.
+ */
+#define BL1_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
+#define BL1_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
+
+/* Data structure which holds the extents of the trusted RAM for BL1 */
+static meminfo_t bl1_tzram_layout;
+
+enum {
+	BOOT_NORMAL = 0,
+	BOOT_USB_DOWNLOAD,
+	BOOT_UART_DOWNLOAD,
+};
+
+meminfo_t *bl1_plat_sec_mem_layout(void)
+{
+	return &bl1_tzram_layout;
+}
+
+#if LOAD_IMAGE_V2
+/*******************************************************************************
+ * Function that takes a memory layout into which BL2 has been loaded and
+ * populates a new memory layout for BL2 that ensures that BL1's data sections
+ * resident in secure RAM are not visible to BL2.
+ ******************************************************************************/
+void bl1_init_bl2_mem_layout(const meminfo_t *bl1_mem_layout,
+			     meminfo_t *bl2_mem_layout)
+{
+
+	assert(bl1_mem_layout != NULL);
+	assert(bl2_mem_layout != NULL);
+
+	/*
+	 * Cannot remove BL1 RW data from the scope of memory visible to BL2
+	 * like arm platforms because they overlap in hikey
+	 */
+	bl2_mem_layout->total_base = BL2_BASE;
+	bl2_mem_layout->total_size = BL32_SRAM_LIMIT - BL2_BASE;
+
+	flush_dcache_range((unsigned long)bl2_mem_layout, sizeof(meminfo_t));
+}
+#endif /* LOAD_IMAGE_V2 */
+
+/*
+ * Perform any BL1 specific platform actions.
+ */
+void bl1_early_platform_setup(void)
+{
+	/* Initialize the console to provide early debug support */
+	console_init(CONSOLE_BASE, PL011_UART_CLK_IN_HZ, PL011_BAUDRATE);
+
+	/* Allow BL1 to see the whole Trusted RAM */
+	bl1_tzram_layout.total_base = BL1_RW_BASE;
+	bl1_tzram_layout.total_size = BL1_RW_SIZE;
+
+#if !LOAD_IMAGE_V2
+	/* Calculate how much RAM BL1 is using and how much remains free */
+	bl1_tzram_layout.free_base = BL1_RW_BASE;
+	bl1_tzram_layout.free_size = BL1_RW_SIZE;
+	reserve_mem(&bl1_tzram_layout.free_base,
+		    &bl1_tzram_layout.free_size,
+		    BL1_RAM_BASE,
+		    BL1_RAM_LIMIT - BL1_RAM_BASE); /* bl1_size */
+#endif
+
+	INFO("BL1: 0x%lx - 0x%lx [size = %lu]\n", BL1_RAM_BASE, BL1_RAM_LIMIT,
+	     BL1_RAM_LIMIT - BL1_RAM_BASE); /* bl1_size */
+}
+
+/*
+ * Perform the very early platform specific architecture setup here. At the
+ * moment this only does basic initialization. Later architectural setup
+ * (bl1_arch_setup()) does not do anything platform specific.
+ */
+void bl1_plat_arch_setup(void)
+{
+	hikey_init_mmu_el3(bl1_tzram_layout.total_base,
+			   bl1_tzram_layout.total_size,
+			   BL1_RO_BASE,
+			   BL1_RO_LIMIT,
+			   BL1_COHERENT_RAM_BASE,
+			   BL1_COHERENT_RAM_LIMIT);
+}
+
+static void hikey_sp804_init(void)
+{
+	uint32_t data;
+
+	/* select the clock of dual timer0 */
+	data = mmio_read_32(AO_SC_TIMER_EN0);
+	while (data & 3) {
+		data &= ~3;
+		data |= 3 << 16;
+		mmio_write_32(AO_SC_TIMER_EN0, data);
+		data = mmio_read_32(AO_SC_TIMER_EN0);
+	}
+	/* enable the pclk of dual timer0 */
+	data = mmio_read_32(AO_SC_PERIPH_CLKSTAT4);
+	while (!(data & PCLK_TIMER1) || !(data & PCLK_TIMER0)) {
+		mmio_write_32(AO_SC_PERIPH_CLKEN4, PCLK_TIMER1 | PCLK_TIMER0);
+		data = mmio_read_32(AO_SC_PERIPH_CLKSTAT4);
+	}
+	/* reset dual timer0 */
+	data = mmio_read_32(AO_SC_PERIPH_RSTSTAT4);
+	mmio_write_32(AO_SC_PERIPH_RSTEN4, PCLK_TIMER1 | PCLK_TIMER0);
+	do {
+		data = mmio_read_32(AO_SC_PERIPH_RSTSTAT4);
+	} while (!(data & PCLK_TIMER1) || !(data & PCLK_TIMER0));
+	/* unreset dual timer0 */
+	mmio_write_32(AO_SC_PERIPH_RSTDIS4, PCLK_TIMER1 | PCLK_TIMER0);
+	do {
+		data = mmio_read_32(AO_SC_PERIPH_RSTSTAT4);
+	} while ((data & PCLK_TIMER1) || (data & PCLK_TIMER0));
+
+	sp804_timer_init(SP804_TIMER0_BASE, 10, 192);
+}
+
+static void hikey_gpio_init(void)
+{
+	pl061_gpio_init();
+	pl061_gpio_register(GPIO0_BASE, 0);
+	pl061_gpio_register(GPIO1_BASE, 1);
+	pl061_gpio_register(GPIO2_BASE, 2);
+	pl061_gpio_register(GPIO3_BASE, 3);
+	pl061_gpio_register(GPIO4_BASE, 4);
+	pl061_gpio_register(GPIO5_BASE, 5);
+	pl061_gpio_register(GPIO6_BASE, 6);
+	pl061_gpio_register(GPIO7_BASE, 7);
+	pl061_gpio_register(GPIO8_BASE, 8);
+	pl061_gpio_register(GPIO9_BASE, 9);
+	pl061_gpio_register(GPIO10_BASE, 10);
+	pl061_gpio_register(GPIO11_BASE, 11);
+	pl061_gpio_register(GPIO12_BASE, 12);
+	pl061_gpio_register(GPIO13_BASE, 13);
+	pl061_gpio_register(GPIO14_BASE, 14);
+	pl061_gpio_register(GPIO15_BASE, 15);
+	pl061_gpio_register(GPIO16_BASE, 16);
+	pl061_gpio_register(GPIO17_BASE, 17);
+	pl061_gpio_register(GPIO18_BASE, 18);
+	pl061_gpio_register(GPIO19_BASE, 19);
+
+	/* Power on indicator LED (USER_LED1). */
+	gpio_set_direction(32, GPIO_DIR_OUT);	/* LED1 */
+	gpio_set_value(32, GPIO_LEVEL_HIGH);
+	gpio_set_direction(33, GPIO_DIR_OUT);	/* LED2 */
+	gpio_set_value(33, GPIO_LEVEL_LOW);
+	gpio_set_direction(34, GPIO_DIR_OUT);	/* LED3 */
+	gpio_set_direction(35, GPIO_DIR_OUT);	/* LED4 */
+}
+
+static void hikey_pmussi_init(void)
+{
+	uint32_t data;
+
+	/* Initialize PWR_HOLD GPIO */
+	gpio_set_direction(0, GPIO_DIR_OUT);
+	gpio_set_value(0, GPIO_LEVEL_LOW);
+
+	/*
+	 * After reset, PMUSSI stays in reset mode.
+	 * Now make it out of reset.
+	 */
+	mmio_write_32(AO_SC_PERIPH_RSTDIS4,
+		      AO_SC_PERIPH_RSTDIS4_PRESET_PMUSSI_N);
+	do {
+		data = mmio_read_32(AO_SC_PERIPH_RSTSTAT4);
+	} while (data & AO_SC_PERIPH_RSTDIS4_PRESET_PMUSSI_N);
+
+	/* Set PMUSSI clock latency for read operation. */
+	data = mmio_read_32(AO_SC_MCU_SUBSYS_CTRL3);
+	data &= ~AO_SC_MCU_SUBSYS_CTRL3_RCLK_MASK;
+	data |= AO_SC_MCU_SUBSYS_CTRL3_RCLK_3;
+	mmio_write_32(AO_SC_MCU_SUBSYS_CTRL3, data);
+
+	/* enable PMUSSI clock */
+	data = AO_SC_PERIPH_CLKEN5_PCLK_PMUSSI_CCPU |
+	       AO_SC_PERIPH_CLKEN5_PCLK_PMUSSI_MCU;
+	mmio_write_32(AO_SC_PERIPH_CLKEN5, data);
+	data = AO_SC_PERIPH_CLKEN4_PCLK_PMUSSI;
+	mmio_write_32(AO_SC_PERIPH_CLKEN4, data);
+
+	gpio_set_value(0, GPIO_LEVEL_HIGH);
+}
+
+static void hikey_hi6553_init(void)
+{
+	uint8_t data;
+
+	mmio_write_8(HI6553_PERI_EN_MARK, 0x1e);
+	mmio_write_8(HI6553_NP_REG_ADJ1, 0);
+	data = DISABLE6_XO_CLK_CONN | DISABLE6_XO_CLK_NFC |
+		DISABLE6_XO_CLK_RF1 | DISABLE6_XO_CLK_RF2;
+	mmio_write_8(HI6553_DISABLE6_XO_CLK, data);
+
+	/* configure BUCK0 & BUCK1 */
+	mmio_write_8(HI6553_BUCK01_CTRL2, 0x5e);
+	mmio_write_8(HI6553_BUCK0_CTRL7, 0x10);
+	mmio_write_8(HI6553_BUCK1_CTRL7, 0x10);
+	mmio_write_8(HI6553_BUCK0_CTRL5, 0x1e);
+	mmio_write_8(HI6553_BUCK1_CTRL5, 0x1e);
+	mmio_write_8(HI6553_BUCK0_CTRL1, 0xfc);
+	mmio_write_8(HI6553_BUCK1_CTRL1, 0xfc);
+
+	/* configure BUCK2 */
+	mmio_write_8(HI6553_BUCK2_REG1, 0x4f);
+	mmio_write_8(HI6553_BUCK2_REG5, 0x99);
+	mmio_write_8(HI6553_BUCK2_REG6, 0x45);
+	mdelay(1);
+	mmio_write_8(HI6553_VSET_BUCK2_ADJ, 0x22);
+	mdelay(1);
+
+	/* configure BUCK3 */
+	mmio_write_8(HI6553_BUCK3_REG3, 0x02);
+	mmio_write_8(HI6553_BUCK3_REG5, 0x99);
+	mmio_write_8(HI6553_BUCK3_REG6, 0x41);
+	mmio_write_8(HI6553_VSET_BUCK3_ADJ, 0x02);
+	mdelay(1);
+
+	/* configure BUCK4 */
+	mmio_write_8(HI6553_BUCK4_REG2, 0x9a);
+	mmio_write_8(HI6553_BUCK4_REG5, 0x99);
+	mmio_write_8(HI6553_BUCK4_REG6, 0x45);
+
+	/* configure LDO20 */
+	mmio_write_8(HI6553_LDO20_REG_ADJ, 0x50);
+
+	mmio_write_8(HI6553_NP_REG_CHG, 0x0f);
+	mmio_write_8(HI6553_CLK_TOP0, 0x06);
+	mmio_write_8(HI6553_CLK_TOP3, 0xc0);
+	mmio_write_8(HI6553_CLK_TOP4, 0x00);
+
+	/* configure LDO7 & LDO10 for SD slot */
+	/* enable LDO7 */
+	data = mmio_read_8(HI6553_LDO7_REG_ADJ);
+	data = (data & 0xf8) | 0x2;
+	mmio_write_8(HI6553_LDO7_REG_ADJ, data);
+	mdelay(5);
+	mmio_write_8(HI6553_ENABLE2_LDO1_8, 1 << 6);
+	mdelay(5);
+	/* enable LDO10 */
+	data = mmio_read_8(HI6553_LDO10_REG_ADJ);
+	data = (data & 0xf8) | 0x5;
+	mmio_write_8(HI6553_LDO10_REG_ADJ, data);
+	mdelay(5);
+	mmio_write_8(HI6553_ENABLE3_LDO9_16, 1 << 1);
+	mdelay(5);
+	/* enable LDO15 */
+	data = mmio_read_8(HI6553_LDO15_REG_ADJ);
+	data = (data & 0xf8) | 0x4;
+	mmio_write_8(HI6553_LDO15_REG_ADJ, data);
+	mmio_write_8(HI6553_ENABLE3_LDO9_16, 1 << 6);
+	mdelay(5);
+	/* enable LDO19 */
+	data = mmio_read_8(HI6553_LDO19_REG_ADJ);
+	data |= 0x7;
+	mmio_write_8(HI6553_LDO19_REG_ADJ, data);
+	mmio_write_8(HI6553_ENABLE4_LDO17_22, 1 << 2);
+	mdelay(5);
+	/* enable LDO21 */
+	data = mmio_read_8(HI6553_LDO21_REG_ADJ);
+	data = (data & 0xf8) | 0x3;
+	mmio_write_8(HI6553_LDO21_REG_ADJ, data);
+	mmio_write_8(HI6553_ENABLE4_LDO17_22, 1 << 4);
+	mdelay(5);
+	/* enable LDO22 */
+	data = mmio_read_8(HI6553_LDO22_REG_ADJ);
+	data = (data & 0xf8) | 0x7;
+	mmio_write_8(HI6553_LDO22_REG_ADJ, data);
+	mmio_write_8(HI6553_ENABLE4_LDO17_22, 1 << 5);
+	mdelay(5);
+
+	/* select 32.764KHz */
+	mmio_write_8(HI6553_CLK19M2_600_586_EN, 0x01);
+
+	/* Disable vbus_det interrupts */
+	data = mmio_read_8(HI6553_IRQ2_MASK);
+	data = data | 0x3;
+	mmio_write_8(HI6553_IRQ2_MASK, data);
+}
+
+static void init_mmc0_pll(void)
+{
+	unsigned int data;
+
+	/* select SYSPLL as the source of MMC0 */
+	/* select SYSPLL as the source of MUX1 (SC_CLK_SEL0) */
+	mmio_write_32(PERI_SC_CLK_SEL0, 1 << 5 | 1 << 21);
+	do {
+		data = mmio_read_32(PERI_SC_CLK_SEL0);
+	} while (!(data & (1 << 5)));
+	/* select MUX1 as the source of MUX2 (SC_CLK_SEL0) */
+	mmio_write_32(PERI_SC_CLK_SEL0, 1 << 29);
+	do {
+		data = mmio_read_32(PERI_SC_CLK_SEL0);
+	} while (data & (1 << 13));
+
+	mmio_write_32(PERI_SC_PERIPH_CLKEN0, (1 << 0));
+	do {
+		data = mmio_read_32(PERI_SC_PERIPH_CLKSTAT0);
+	} while (!(data & (1 << 0)));
+
+	data = mmio_read_32(PERI_SC_PERIPH_CLKEN12);
+	data |= 1 << 1;
+	mmio_write_32(PERI_SC_PERIPH_CLKEN12, data);
+
+	do {
+		mmio_write_32(PERI_SC_CLKCFG8BIT1, (1 << 7) | 0xb);
+		data = mmio_read_32(PERI_SC_CLKCFG8BIT1);
+	} while ((data & 0xb) != 0xb);
+}
+
+static void reset_mmc0_clk(void)
+{
+	unsigned int data;
+
+	/* disable mmc0 bus clock */
+	mmio_write_32(PERI_SC_PERIPH_CLKDIS0, PERI_CLK0_MMC0);
+	do {
+		data = mmio_read_32(PERI_SC_PERIPH_CLKSTAT0);
+	} while (data & PERI_CLK0_MMC0);
+	/* enable mmc0 bus clock */
+	mmio_write_32(PERI_SC_PERIPH_CLKEN0, PERI_CLK0_MMC0);
+	do {
+		data = mmio_read_32(PERI_SC_PERIPH_CLKSTAT0);
+	} while (!(data & PERI_CLK0_MMC0));
+	/* reset mmc0 clock domain */
+	mmio_write_32(PERI_SC_PERIPH_RSTEN0, PERI_RST0_MMC0);
+
+	/* bypass mmc0 clock phase */
+	data = mmio_read_32(PERI_SC_PERIPH_CTRL2);
+	data |= 3;
+	mmio_write_32(PERI_SC_PERIPH_CTRL2, data);
+
+	/* disable low power */
+	data = mmio_read_32(PERI_SC_PERIPH_CTRL13);
+	data |= 1 << 3;
+	mmio_write_32(PERI_SC_PERIPH_CTRL13, data);
+	do {
+		data = mmio_read_32(PERI_SC_PERIPH_RSTSTAT0);
+	} while (!(data & PERI_RST0_MMC0));
+
+	/* unreset mmc0 clock domain */
+	mmio_write_32(PERI_SC_PERIPH_RSTDIS0, PERI_RST0_MMC0);
+	do {
+		data = mmio_read_32(PERI_SC_PERIPH_RSTSTAT0);
+	} while (data & PERI_RST0_MMC0);
+}
+
+static void init_media_clk(void)
+{
+	unsigned int data, value;
+
+	data = mmio_read_32(PMCTRL_MEDPLLCTRL);
+	data |= 1;
+	mmio_write_32(PMCTRL_MEDPLLCTRL, data);
+
+	for (;;) {
+		data = mmio_read_32(PMCTRL_MEDPLLCTRL);
+		value = 1 << 28;
+		if ((data & value) == value)
+			break;
+	}
+
+	data = mmio_read_32(PERI_SC_PERIPH_CLKEN12);
+	data = 1 << 10;
+	mmio_write_32(PERI_SC_PERIPH_CLKEN12, data);
+}
+
+static void init_mmc1_pll(void)
+{
+	uint32_t data;
+
+	/* select SYSPLL as the source of MMC1 */
+	/* select SYSPLL as the source of MUX1 (SC_CLK_SEL0) */
+	mmio_write_32(PERI_SC_CLK_SEL0, 1 << 11 | 1 << 27);
+	do {
+		data = mmio_read_32(PERI_SC_CLK_SEL0);
+	} while (!(data & (1 << 11)));
+	/* select MUX1 as the source of MUX2 (SC_CLK_SEL0) */
+	mmio_write_32(PERI_SC_CLK_SEL0, 1 << 30);
+	do {
+		data = mmio_read_32(PERI_SC_CLK_SEL0);
+	} while (data & (1 << 14));
+
+	mmio_write_32(PERI_SC_PERIPH_CLKEN0, (1 << 1));
+	do {
+		data = mmio_read_32(PERI_SC_PERIPH_CLKSTAT0);
+	} while (!(data & (1 << 1)));
+
+	data = mmio_read_32(PERI_SC_PERIPH_CLKEN12);
+	data |= 1 << 2;
+	mmio_write_32(PERI_SC_PERIPH_CLKEN12, data);
+
+	do {
+		/* 1.2GHz / 50 = 24MHz */
+		mmio_write_32(PERI_SC_CLKCFG8BIT2, 0x31 | (1 << 7));
+		data = mmio_read_32(PERI_SC_CLKCFG8BIT2);
+	} while ((data & 0x31) != 0x31);
+}
+
+static void reset_mmc1_clk(void)
+{
+	unsigned int data;
+
+	/* disable mmc1 bus clock */
+	mmio_write_32(PERI_SC_PERIPH_CLKDIS0, PERI_CLK0_MMC1);
+	do {
+		data = mmio_read_32(PERI_SC_PERIPH_CLKSTAT0);
+	} while (data & PERI_CLK0_MMC1);
+	/* enable mmc1 bus clock */
+	mmio_write_32(PERI_SC_PERIPH_CLKEN0, PERI_CLK0_MMC1);
+	do {
+		data = mmio_read_32(PERI_SC_PERIPH_CLKSTAT0);
+	} while (!(data & PERI_CLK0_MMC1));
+	/* reset mmc1 clock domain */
+	mmio_write_32(PERI_SC_PERIPH_RSTEN0, PERI_RST0_MMC1);
+
+	/* bypass mmc1 clock phase */
+	data = mmio_read_32(PERI_SC_PERIPH_CTRL2);
+	data |= 3 << 2;
+	mmio_write_32(PERI_SC_PERIPH_CTRL2, data);
+
+	/* disable low power */
+	data = mmio_read_32(PERI_SC_PERIPH_CTRL13);
+	data |= 1 << 4;
+	mmio_write_32(PERI_SC_PERIPH_CTRL13, data);
+	do {
+		data = mmio_read_32(PERI_SC_PERIPH_RSTSTAT0);
+	} while (!(data & PERI_RST0_MMC1));
+
+	/* unreset mmc0 clock domain */
+	mmio_write_32(PERI_SC_PERIPH_RSTDIS0, PERI_RST0_MMC1);
+	do {
+		data = mmio_read_32(PERI_SC_PERIPH_RSTSTAT0);
+	} while (data & PERI_RST0_MMC1);
+}
+
+/* Initialize PLL of both eMMC and SD controllers. */
+static void hikey_mmc_pll_init(void)
+{
+	init_mmc0_pll();
+	reset_mmc0_clk();
+	init_media_clk();
+
+	dsb();
+
+	init_mmc1_pll();
+	reset_mmc1_clk();
+}
+
+static void hikey_rtc_init(void)
+{
+	uint32_t data;
+
+	data = mmio_read_32(AO_SC_PERIPH_CLKEN4);
+	data |= AO_SC_PERIPH_RSTDIS4_RESET_RTC0_N;
+	mmio_write_32(AO_SC_PERIPH_CLKEN4, data);
+}
+
+/*
+ * Function which will perform any remaining platform-specific setup that can
+ * occur after the MMU and data cache have been enabled.
+ */
+void bl1_platform_setup(void)
+{
+	dw_mmc_params_t params;
+
+	assert((HIKEY_BL1_MMC_DESC_BASE >= SRAM_BASE) &&
+	       ((SRAM_BASE + SRAM_SIZE) >=
+		(HIKEY_BL1_MMC_DATA_BASE + HIKEY_BL1_MMC_DATA_SIZE)));
+	hikey_sp804_init();
+	hikey_gpio_init();
+	hikey_pmussi_init();
+	hikey_hi6553_init();
+
+	hikey_rtc_init();
+
+	hikey_mmc_pll_init();
+
+	memset(&params, 0, sizeof(dw_mmc_params_t));
+	params.reg_base = DWMMC0_BASE;
+	params.desc_base = HIKEY_BL1_MMC_DESC_BASE;
+	params.desc_size = 1 << 20;
+	params.clk_rate = 24 * 1000 * 1000;
+	params.bus_width = EMMC_BUS_WIDTH_8;
+	params.flags = EMMC_FLAG_CMD23;
+	dw_mmc_init(&params);
+
+	hikey_io_setup();
+}
+
+/*
+ * The following function checks if Firmware update is needed,
+ * by checking if TOC in FIP image is valid or not.
+ */
+unsigned int bl1_plat_get_next_image_id(void)
+{
+	int32_t boot_mode;
+	unsigned int ret;
+
+	boot_mode = mmio_read_32(ONCHIPROM_PARAM_BASE);
+	switch (boot_mode) {
+	case BOOT_NORMAL:
+		ret = BL2_IMAGE_ID;
+		break;
+	case BOOT_USB_DOWNLOAD:
+	case BOOT_UART_DOWNLOAD:
+		ret = NS_BL1U_IMAGE_ID;
+		break;
+	default:
+		WARN("Invalid boot mode is found:%d\n", boot_mode);
+		panic();
+	}
+	return ret;
+}
+
+image_desc_t *bl1_plat_get_image_desc(unsigned int image_id)
+{
+	unsigned int index = 0;
+
+	while (bl1_tbbr_image_descs[index].image_id != INVALID_IMAGE_ID) {
+		if (bl1_tbbr_image_descs[index].image_id == image_id)
+			return &bl1_tbbr_image_descs[index];
+
+		index++;
+	}
+
+	return NULL;
+}
+
+void bl1_plat_set_ep_info(unsigned int image_id,
+		entry_point_info_t *ep_info)
+{
+	unsigned int data = 0;
+
+	if (image_id == BL2_IMAGE_ID)
+		return;
+	inv_dcache_range(NS_BL1U_BASE, NS_BL1U_SIZE);
+	__asm__ volatile ("mrs	%0, cpacr_el1" : "=r"(data));
+	do {
+		data |= 3 << 20;
+		__asm__ volatile ("msr	cpacr_el1, %0" : : "r"(data));
+		__asm__ volatile ("mrs	%0, cpacr_el1" : "=r"(data));
+	} while ((data & (3 << 20)) != (3 << 20));
+	INFO("cpacr_el1:0x%x\n", data);
+
+	ep_info->args.arg0 = 0xffff & read_mpidr();
+	ep_info->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
+				DISABLE_ALL_EXCEPTIONS);
+}
diff --git a/plat/hisilicon/hikey/hikey_bl2_mem_params_desc.c b/plat/hisilicon/hikey/hikey_bl2_mem_params_desc.c
new file mode 100644
index 0000000..50ca015
--- /dev/null
+++ b/plat/hisilicon/hikey/hikey_bl2_mem_params_desc.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <bl_common.h>
+#include <desc_image_load.h>
+#include <platform.h>
+#include <platform_def.h>
+
+
+/*******************************************************************************
+ * Following descriptor provides BL image/ep information that gets used
+ * by BL2 to load the images and also subset of this information is
+ * passed to next BL image. The image loading sequence is managed by
+ * populating the images in required loading order. The image execution
+ * sequence is managed by populating the `next_handoff_image_id` with
+ * the next executable image id.
+ ******************************************************************************/
+static bl_mem_params_node_t bl2_mem_params_descs[] = {
+#ifdef SCP_BL2_BASE
+	/* Fill SCP_BL2 related information if it exists */
+    {
+	    .image_id = SCP_BL2_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY,
+		    VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
+		    VERSION_2, image_info_t, 0),
+	    .image_info.image_base = SCP_BL2_BASE,
+	    .image_info.image_max_size = SCP_BL2_SIZE,
+
+	    .next_handoff_image_id = INVALID_IMAGE_ID,
+    },
+#endif /* SCP_BL2_BASE */
+
+#ifdef EL3_PAYLOAD_BASE
+	/* Fill EL3 payload related information (BL31 is EL3 payload)*/
+    {
+	    .image_id = BL31_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t,
+		    SECURE | EXECUTABLE | EP_FIRST_EXE),
+	    .ep_info.pc = EL3_PAYLOAD_BASE,
+	    .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+		    DISABLE_ALL_EXCEPTIONS),
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t,
+		    IMAGE_ATTRIB_PLAT_SETUP | IMAGE_ATTRIB_SKIP_LOADING),
+
+	    .next_handoff_image_id = INVALID_IMAGE_ID,
+    },
+
+#else /* EL3_PAYLOAD_BASE */
+
+	/* Fill BL31 related information */
+    {
+	    .image_id = BL31_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t,
+		    SECURE | EXECUTABLE | EP_FIRST_EXE),
+	    .ep_info.pc = BL31_BASE,
+	    .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+		    DISABLE_ALL_EXCEPTIONS),
+#if DEBUG
+	    .ep_info.args.arg1 = HIKEY_BL31_PLAT_PARAM_VAL,
+#endif
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, IMAGE_ATTRIB_PLAT_SETUP),
+	    .image_info.image_base = BL31_BASE,
+	    .image_info.image_max_size = BL31_LIMIT - BL31_BASE,
+
+# ifdef BL32_BASE
+	    .next_handoff_image_id = BL32_IMAGE_ID,
+# else
+	    .next_handoff_image_id = BL33_IMAGE_ID,
+# endif
+    },
+
+# ifdef BL32_BASE
+	/* Fill BL32 related information */
+    {
+	    .image_id = BL32_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t, SECURE | EXECUTABLE),
+	    .ep_info.pc = BL32_BASE,
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, 0),
+	    .image_info.image_base = BL32_BASE,
+	    .image_info.image_max_size = BL32_LIMIT - BL32_BASE,
+
+	    .next_handoff_image_id = BL33_IMAGE_ID,
+    },
+
+	/*
+	 * Fill BL32 external 1 related information.
+	 * A typical use for extra1 image is with OP-TEE where it is the pager image.
+	 */
+    {
+	    .image_id = BL32_EXTRA1_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+	    .image_info.image_base = BL32_BASE,
+	    .image_info.image_max_size = BL32_LIMIT - BL32_BASE,
+
+	    .next_handoff_image_id = INVALID_IMAGE_ID,
+    },
+
+	/*
+	 * Fill BL32 external 2 related information.
+	 * A typical use for extra2 image is with OP-TEE where it is the paged image.
+	 */
+    {
+	    .image_id = BL32_EXTRA2_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+#ifdef SPD_opteed
+	    .image_info.image_base = HIKEY_OPTEE_PAGEABLE_LOAD_BASE,
+	    .image_info.image_max_size = HIKEY_OPTEE_PAGEABLE_LOAD_SIZE,
+#endif
+	    .next_handoff_image_id = INVALID_IMAGE_ID,
+    },
+# endif /* BL32_BASE */
+
+	/* Fill BL33 related information */
+    {
+	    .image_id = BL33_IMAGE_ID,
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t, NON_SECURE | EXECUTABLE),
+# ifdef PRELOADED_BL33_BASE
+	    .ep_info.pc = PRELOADED_BL33_BASE,
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+# else
+	    .ep_info.pc = HIKEY_NS_IMAGE_OFFSET,
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, 0),
+	    .image_info.image_base = HIKEY_NS_IMAGE_OFFSET,
+	    .image_info.image_max_size = 0x200000 /* 2MB */,
+# endif /* PRELOADED_BL33_BASE */
+
+	    .next_handoff_image_id = INVALID_IMAGE_ID,
+    }
+#endif /* EL3_PAYLOAD_BASE */
+};
+
+REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs)
diff --git a/plat/hisilicon/hikey/hikey_bl2_setup.c b/plat/hisilicon/hikey/hikey_bl2_setup.c
new file mode 100644
index 0000000..86c205d
--- /dev/null
+++ b/plat/hisilicon/hikey/hikey_bl2_setup.c
@@ -0,0 +1,492 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <debug.h>
+#include <desc_image_load.h>
+#include <dw_mmc.h>
+#include <emmc.h>
+#include <errno.h>
+#include <hi6220.h>
+#include <hisi_mcu.h>
+#include <hisi_sram_map.h>
+#include <mmio.h>
+#if LOAD_IMAGE_V2
+#ifdef SPD_opteed
+#include <optee_utils.h>
+#endif
+#endif
+#include <platform_def.h>
+#include <sp804_delay_timer.h>
+#include <string.h>
+
+#include "hikey_def.h"
+#include "hikey_private.h"
+
+/*
+ * The next 2 constants identify the extents of the code & RO data region.
+ * These addresses are used by the MMU setup code and therefore they must be
+ * page-aligned.  It is the responsibility of the linker script to ensure that
+ * __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
+ */
+#define BL2_RO_BASE (unsigned long)(&__RO_START__)
+#define BL2_RO_LIMIT (unsigned long)(&__RO_END__)
+
+/*
+ * The next 2 constants identify the extents of the coherent memory region.
+ * These addresses are used by the MMU setup code and therefore they must be
+ * page-aligned.  It is the responsibility of the linker script to ensure that
+ * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
+ * page-aligned addresses.
+ */
+#define BL2_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
+#define BL2_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
+
+static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
+
+#if !LOAD_IMAGE_V2
+
+/*******************************************************************************
+ * This structure represents the superset of information that is passed to
+ * BL31, e.g. while passing control to it from BL2, bl31_params
+ * and other platform specific params
+ ******************************************************************************/
+typedef struct bl2_to_bl31_params_mem {
+	bl31_params_t		bl31_params;
+	image_info_t		bl31_image_info;
+	image_info_t		bl32_image_info;
+	image_info_t		bl33_image_info;
+	entry_point_info_t	bl33_ep_info;
+	entry_point_info_t	bl32_ep_info;
+	entry_point_info_t	bl31_ep_info;
+} bl2_to_bl31_params_mem_t;
+
+static bl2_to_bl31_params_mem_t bl31_params_mem;
+
+meminfo_t *bl2_plat_sec_mem_layout(void)
+{
+	return &bl2_tzram_layout;
+}
+
+void bl2_plat_get_scp_bl2_meminfo(meminfo_t *scp_bl2_meminfo)
+{
+	scp_bl2_meminfo->total_base = SCP_BL2_BASE;
+	scp_bl2_meminfo->total_size = SCP_BL2_SIZE;
+	scp_bl2_meminfo->free_base = SCP_BL2_BASE;
+	scp_bl2_meminfo->free_size = SCP_BL2_SIZE;
+}
+#endif /* LOAD_IMAGE_V2 */
+
+/*******************************************************************************
+ * Transfer SCP_BL2 from Trusted RAM using the SCP Download protocol.
+ * Return 0 on success, -1 otherwise.
+ ******************************************************************************/
+#if LOAD_IMAGE_V2
+int plat_hikey_bl2_handle_scp_bl2(image_info_t *scp_bl2_image_info)
+#else
+int bl2_plat_handle_scp_bl2(struct image_info *scp_bl2_image_info)
+#endif
+{
+	/* Enable MCU SRAM */
+	hisi_mcu_enable_sram();
+
+	/* Load MCU binary into SRAM */
+	hisi_mcu_load_image(scp_bl2_image_info->image_base,
+			    scp_bl2_image_info->image_size);
+	/* Let MCU running */
+	hisi_mcu_start_run();
+
+	INFO("%s: MCU PC is at 0x%x\n",
+	     __func__, mmio_read_32(AO_SC_MCU_SUBSYS_STAT2));
+	INFO("%s: AO_SC_PERIPH_CLKSTAT4 is 0x%x\n",
+	     __func__, mmio_read_32(AO_SC_PERIPH_CLKSTAT4));
+	return 0;
+}
+
+/*******************************************************************************
+ * Gets SPSR for BL32 entry
+ ******************************************************************************/
+uint32_t hikey_get_spsr_for_bl32_entry(void)
+{
+	/*
+	 * The Secure Payload Dispatcher service is responsible for
+	 * setting the SPSR prior to entry into the BL3-2 image.
+	 */
+	return 0;
+}
+
+/*******************************************************************************
+ * Gets SPSR for BL33 entry
+ ******************************************************************************/
+#ifndef AARCH32
+uint32_t hikey_get_spsr_for_bl33_entry(void)
+{
+	unsigned int mode;
+	uint32_t spsr;
+
+	/* Figure out what mode we enter the non-secure world in */
+	mode = EL_IMPLEMENTED(2) ? MODE_EL2 : MODE_EL1;
+
+	/*
+	 * TODO: Consider the possibility of specifying the SPSR in
+	 * the FIP ToC and allowing the platform to have a say as
+	 * well.
+	 */
+	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+	return spsr;
+}
+#else
+uint32_t hikey_get_spsr_for_bl33_entry(void)
+{
+	unsigned int hyp_status, mode, spsr;
+
+	hyp_status = GET_VIRT_EXT(read_id_pfr1());
+
+	mode = (hyp_status) ? MODE32_hyp : MODE32_svc;
+
+	/*
+	 * TODO: Consider the possibility of specifying the SPSR in
+	 * the FIP ToC and allowing the platform to have a say as
+	 * well.
+	 */
+	spsr = SPSR_MODE32(mode, plat_get_ns_image_entrypoint() & 0x1,
+			SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
+	return spsr;
+}
+#endif /* AARCH32 */
+
+#if LOAD_IMAGE_V2
+int hikey_bl2_handle_post_image_load(unsigned int image_id)
+{
+	int err = 0;
+	bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
+#ifdef SPD_opteed
+	bl_mem_params_node_t *pager_mem_params = NULL;
+	bl_mem_params_node_t *paged_mem_params = NULL;
+#endif
+	assert(bl_mem_params);
+
+	switch (image_id) {
+#ifdef AARCH64
+	case BL32_IMAGE_ID:
+#ifdef SPD_opteed
+		pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
+		assert(pager_mem_params);
+
+		paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
+		assert(paged_mem_params);
+
+		err = parse_optee_header(&bl_mem_params->ep_info,
+				&pager_mem_params->image_info,
+				&paged_mem_params->image_info);
+		if (err != 0) {
+			WARN("OPTEE header parse error.\n");
+		}
+#endif
+		bl_mem_params->ep_info.spsr = hikey_get_spsr_for_bl32_entry();
+		break;
+#endif
+
+	case BL33_IMAGE_ID:
+		/* BL33 expects to receive the primary CPU MPID (through r0) */
+		bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
+		bl_mem_params->ep_info.spsr = hikey_get_spsr_for_bl33_entry();
+		break;
+
+#ifdef SCP_BL2_BASE
+	case SCP_BL2_IMAGE_ID:
+		/* The subsequent handling of SCP_BL2 is platform specific */
+		err = plat_hikey_bl2_handle_scp_bl2(&bl_mem_params->image_info);
+		if (err) {
+			WARN("Failure in platform-specific handling of SCP_BL2 image.\n");
+		}
+		break;
+#endif
+	}
+
+	return err;
+}
+
+/*******************************************************************************
+ * This function can be used by the platforms to update/use image
+ * information for given `image_id`.
+ ******************************************************************************/
+int bl2_plat_handle_post_image_load(unsigned int image_id)
+{
+	return hikey_bl2_handle_post_image_load(image_id);
+}
+
+#else /* LOAD_IMAGE_V2 */
+
+bl31_params_t *bl2_plat_get_bl31_params(void)
+{
+	bl31_params_t *bl2_to_bl31_params = NULL;
+
+	/*
+	 * Initialise the memory for all the arguments that needs to
+	 * be passed to BL3-1
+	 */
+	memset(&bl31_params_mem, 0, sizeof(bl2_to_bl31_params_mem_t));
+
+	/* Assign memory for TF related information */
+	bl2_to_bl31_params = &bl31_params_mem.bl31_params;
+	SET_PARAM_HEAD(bl2_to_bl31_params, PARAM_BL31, VERSION_1, 0);
+
+	/* Fill BL3-1 related information */
+	bl2_to_bl31_params->bl31_image_info = &bl31_params_mem.bl31_image_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl31_image_info, PARAM_IMAGE_BINARY,
+		VERSION_1, 0);
+
+	/* Fill BL3-2 related information if it exists */
+#ifdef BL32_BASE
+	bl2_to_bl31_params->bl32_ep_info = &bl31_params_mem.bl32_ep_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl32_ep_info, PARAM_EP,
+		VERSION_1, 0);
+	bl2_to_bl31_params->bl32_image_info = &bl31_params_mem.bl32_image_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl32_image_info, PARAM_IMAGE_BINARY,
+		VERSION_1, 0);
+#endif
+
+	/* Fill BL3-3 related information */
+	bl2_to_bl31_params->bl33_ep_info = &bl31_params_mem.bl33_ep_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl33_ep_info,
+		PARAM_EP, VERSION_1, 0);
+
+	/* BL3-3 expects to receive the primary CPU MPID (through x0) */
+	bl2_to_bl31_params->bl33_ep_info->args.arg0 = 0xffff & read_mpidr();
+
+	bl2_to_bl31_params->bl33_image_info = &bl31_params_mem.bl33_image_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl33_image_info, PARAM_IMAGE_BINARY,
+		VERSION_1, 0);
+
+	return bl2_to_bl31_params;
+}
+
+struct entry_point_info *bl2_plat_get_bl31_ep_info(void)
+{
+#if DEBUG
+	bl31_params_mem.bl31_ep_info.args.arg1 = HIKEY_BL31_PLAT_PARAM_VAL;
+#endif
+
+	return &bl31_params_mem.bl31_ep_info;
+}
+
+void bl2_plat_set_bl31_ep_info(image_info_t *image,
+			       entry_point_info_t *bl31_ep_info)
+{
+	SET_SECURITY_STATE(bl31_ep_info->h.attr, SECURE);
+	bl31_ep_info->spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+				       DISABLE_ALL_EXCEPTIONS);
+}
+
+/*******************************************************************************
+ * Before calling this function BL32 is loaded in memory and its entrypoint
+ * is set by load_image. This is a placeholder for the platform to change
+ * the entrypoint of BL32 and set SPSR and security state.
+ * On Hikey we only set the security state of the entrypoint
+ ******************************************************************************/
+#ifdef BL32_BASE
+void bl2_plat_set_bl32_ep_info(image_info_t *bl32_image_info,
+					entry_point_info_t *bl32_ep_info)
+{
+	SET_SECURITY_STATE(bl32_ep_info->h.attr, SECURE);
+	/*
+	 * The Secure Payload Dispatcher service is responsible for
+	 * setting the SPSR prior to entry into the BL32 image.
+	 */
+	bl32_ep_info->spsr = 0;
+}
+
+/*******************************************************************************
+ * Populate the extents of memory available for loading BL32
+ ******************************************************************************/
+void bl2_plat_get_bl32_meminfo(meminfo_t *bl32_meminfo)
+{
+	/*
+	 * Populate the extents of memory available for loading BL32.
+	 */
+	bl32_meminfo->total_base = BL32_BASE;
+	bl32_meminfo->free_base = BL32_BASE;
+	bl32_meminfo->total_size =
+			(TSP_SEC_MEM_BASE + TSP_SEC_MEM_SIZE) - BL32_BASE;
+	bl32_meminfo->free_size =
+			(TSP_SEC_MEM_BASE + TSP_SEC_MEM_SIZE) - BL32_BASE;
+}
+#endif /* BL32_BASE */
+
+void bl2_plat_set_bl33_ep_info(image_info_t *image,
+			       entry_point_info_t *bl33_ep_info)
+{
+	unsigned long el_status;
+	unsigned int mode;
+
+	/* Figure out what mode we enter the non-secure world in */
+	el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
+	el_status &= ID_AA64PFR0_ELX_MASK;
+
+	if (el_status)
+		mode = MODE_EL2;
+	else
+		mode = MODE_EL1;
+
+	/*
+	 * TODO: Consider the possibility of specifying the SPSR in
+	 * the FIP ToC and allowing the platform to have a say as
+	 * well.
+	 */
+	bl33_ep_info->spsr = SPSR_64(mode, MODE_SP_ELX,
+				       DISABLE_ALL_EXCEPTIONS);
+	SET_SECURITY_STATE(bl33_ep_info->h.attr, NON_SECURE);
+}
+
+void bl2_plat_flush_bl31_params(void)
+{
+	flush_dcache_range((unsigned long)&bl31_params_mem,
+			   sizeof(bl2_to_bl31_params_mem_t));
+}
+
+void bl2_plat_get_bl33_meminfo(meminfo_t *bl33_meminfo)
+{
+	bl33_meminfo->total_base = DDR_BASE;
+	bl33_meminfo->total_size = DDR_SIZE;
+	bl33_meminfo->free_base = DDR_BASE;
+	bl33_meminfo->free_size = DDR_SIZE;
+}
+#endif /* LOAD_IMAGE_V2 */
+
+static void reset_dwmmc_clk(void)
+{
+	unsigned int data;
+
+	/* disable mmc0 bus clock */
+	mmio_write_32(PERI_SC_PERIPH_CLKDIS0, PERI_CLK0_MMC0);
+	do {
+		data = mmio_read_32(PERI_SC_PERIPH_CLKSTAT0);
+	} while (data & PERI_CLK0_MMC0);
+	/* enable mmc0 bus clock */
+	mmio_write_32(PERI_SC_PERIPH_CLKEN0, PERI_CLK0_MMC0);
+	do {
+		data = mmio_read_32(PERI_SC_PERIPH_CLKSTAT0);
+	} while (!(data & PERI_CLK0_MMC0));
+	/* reset mmc0 clock domain */
+	mmio_write_32(PERI_SC_PERIPH_RSTEN0, PERI_RST0_MMC0);
+
+	/* bypass mmc0 clock phase */
+	data = mmio_read_32(PERI_SC_PERIPH_CTRL2);
+	data |= 3;
+	mmio_write_32(PERI_SC_PERIPH_CTRL2, data);
+
+	/* disable low power */
+	data = mmio_read_32(PERI_SC_PERIPH_CTRL13);
+	data |= 1 << 3;
+	mmio_write_32(PERI_SC_PERIPH_CTRL13, data);
+	do {
+		data = mmio_read_32(PERI_SC_PERIPH_RSTSTAT0);
+	} while (!(data & PERI_RST0_MMC0));
+
+	/* unreset mmc0 clock domain */
+	mmio_write_32(PERI_SC_PERIPH_RSTDIS0, PERI_RST0_MMC0);
+	do {
+		data = mmio_read_32(PERI_SC_PERIPH_RSTSTAT0);
+	} while (data & PERI_RST0_MMC0);
+}
+
+static void hikey_boardid_init(void)
+{
+	u_register_t midr;
+
+	midr = read_midr();
+	mmio_write_32(MEMORY_AXI_CHIP_ADDR, midr);
+	INFO("[BDID] [%x] midr: 0x%x\n", MEMORY_AXI_CHIP_ADDR,
+	     (unsigned int)midr);
+
+	mmio_write_32(MEMORY_AXI_BOARD_TYPE_ADDR, 0);
+	mmio_write_32(MEMORY_AXI_BOARD_ID_ADDR, 0x2b);
+
+	mmio_write_32(ACPU_ARM64_FLAGA, 0x1234);
+	mmio_write_32(ACPU_ARM64_FLAGB, 0x5678);
+}
+
+static void hikey_sd_init(void)
+{
+	/* switch pinmux to SD */
+	mmio_write_32(IOMG_SD_CLK, IOMG_MUX_FUNC0);
+	mmio_write_32(IOMG_SD_CMD, IOMG_MUX_FUNC0);
+	mmio_write_32(IOMG_SD_DATA0, IOMG_MUX_FUNC0);
+	mmio_write_32(IOMG_SD_DATA1, IOMG_MUX_FUNC0);
+	mmio_write_32(IOMG_SD_DATA2, IOMG_MUX_FUNC0);
+	mmio_write_32(IOMG_SD_DATA3, IOMG_MUX_FUNC0);
+
+	mmio_write_32(IOCG_SD_CLK, IOCG_INPUT_16MA);
+	mmio_write_32(IOCG_SD_CMD, IOCG_INPUT_12MA);
+	mmio_write_32(IOCG_SD_DATA0, IOCG_INPUT_12MA);
+	mmio_write_32(IOCG_SD_DATA1, IOCG_INPUT_12MA);
+	mmio_write_32(IOCG_SD_DATA2, IOCG_INPUT_12MA);
+	mmio_write_32(IOCG_SD_DATA3, IOCG_INPUT_12MA);
+
+	/* set SD Card detect as nopull */
+	mmio_write_32(IOCG_GPIO8, 0);
+}
+
+static void hikey_jumper_init(void)
+{
+	/* set jumper detect as nopull */
+	mmio_write_32(IOCG_GPIO24, 0);
+	/* set jumper detect as GPIO */
+	mmio_write_32(IOMG_GPIO24, IOMG_MUX_FUNC0);
+}
+
+void bl2_early_platform_setup(meminfo_t *mem_layout)
+{
+	dw_mmc_params_t params;
+
+	/* Initialize the console to provide early debug support */
+	console_init(CONSOLE_BASE, PL011_UART_CLK_IN_HZ, PL011_BAUDRATE);
+
+	/* Setup the BL2 memory layout */
+	bl2_tzram_layout = *mem_layout;
+
+	/* Clear SRAM since it'll be used by MCU right now. */
+	memset((void *)SRAM_BASE, 0, SRAM_SIZE);
+
+	sp804_timer_init(SP804_TIMER0_BASE, 10, 192);
+	dsb();
+	hikey_ddr_init();
+
+	hikey_boardid_init();
+	init_acpu_dvfs();
+	hikey_sd_init();
+	hikey_jumper_init();
+
+	reset_dwmmc_clk();
+	memset(&params, 0, sizeof(dw_mmc_params_t));
+	params.reg_base = DWMMC0_BASE;
+	params.desc_base = HIKEY_MMC_DESC_BASE;
+	params.desc_size = 1 << 20;
+	params.clk_rate = 24 * 1000 * 1000;
+	params.bus_width = EMMC_BUS_WIDTH_8;
+	params.flags = EMMC_FLAG_CMD23;
+	dw_mmc_init(&params);
+
+	hikey_io_setup();
+}
+
+void bl2_plat_arch_setup(void)
+{
+	hikey_init_mmu_el1(bl2_tzram_layout.total_base,
+			   bl2_tzram_layout.total_size,
+			   BL2_RO_BASE,
+			   BL2_RO_LIMIT,
+			   BL2_COHERENT_RAM_BASE,
+			   BL2_COHERENT_RAM_LIMIT);
+}
+
+void bl2_platform_setup(void)
+{
+}
diff --git a/plat/hisilicon/hikey/hikey_bl31_setup.c b/plat/hisilicon/hikey/hikey_bl31_setup.c
new file mode 100644
index 0000000..412b593
--- /dev/null
+++ b/plat/hisilicon/hikey/hikey_bl31_setup.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <cci.h>
+#include <console.h>
+#include <debug.h>
+#include <errno.h>
+#include <gicv2.h>
+#include <hi6220.h>
+#include <hisi_ipc.h>
+#include <hisi_pwrc.h>
+#include <mmio.h>
+#include <platform_def.h>
+
+#include "hikey_def.h"
+#include "hikey_private.h"
+
+/*
+ * The next 2 constants identify the extents of the code & RO data region.
+ * These addresses are used by the MMU setup code and therefore they must be
+ * page-aligned.  It is the responsibility of the linker script to ensure that
+ * __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
+ */
+#define BL31_RO_BASE (unsigned long)(&__RO_START__)
+#define BL31_RO_LIMIT (unsigned long)(&__RO_END__)
+
+/*
+ * The next 2 constants identify the extents of the coherent memory region.
+ * These addresses are used by the MMU setup code and therefore they must be
+ * page-aligned.  It is the responsibility of the linker script to ensure that
+ * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
+ * page-aligned addresses.
+ */
+#define BL31_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
+#define BL31_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
+
+static entry_point_info_t bl32_ep_info;
+static entry_point_info_t bl33_ep_info;
+
+/******************************************************************************
+ * On a GICv2 system, the Group 1 secure interrupts are treated as Group 0
+ * interrupts.
+ *****************************************************************************/
+const unsigned int g0_interrupt_array[] = {
+	IRQ_SEC_PHY_TIMER,
+	IRQ_SEC_SGI_0
+};
+
+/*
+ * Ideally `arm_gic_data` structure definition should be a `const` but it is
+ * kept as modifiable for overwriting with different GICD and GICC base when
+ * running on FVP with VE memory map.
+ */
+gicv2_driver_data_t hikey_gic_data = {
+	.gicd_base = PLAT_ARM_GICD_BASE,
+	.gicc_base = PLAT_ARM_GICC_BASE,
+	.g0_interrupt_num = ARRAY_SIZE(g0_interrupt_array),
+	.g0_interrupt_array = g0_interrupt_array,
+};
+
+static const int cci_map[] = {
+	CCI400_SL_IFACE3_CLUSTER_IX,
+	CCI400_SL_IFACE4_CLUSTER_IX
+};
+
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+	entry_point_info_t *next_image_info;
+
+	next_image_info = (type == NON_SECURE) ? &bl33_ep_info : &bl32_ep_info;
+
+	/* None of the images on this platform can have 0x0 as the entrypoint */
+	if (next_image_info->pc)
+		return next_image_info;
+	return NULL;
+}
+
+#if LOAD_IMAGE_V2
+void bl31_early_platform_setup(void *from_bl2,
+			       void *plat_params_from_bl2)
+#else
+void bl31_early_platform_setup(bl31_params_t *from_bl2,
+			       void *plat_params_from_bl2)
+#endif
+{
+	/* Initialize the console to provide early debug support */
+	console_init(CONSOLE_BASE, PL011_UART_CLK_IN_HZ, PL011_BAUDRATE);
+
+	/* Initialize CCI driver */
+	cci_init(CCI400_BASE, cci_map, ARRAY_SIZE(cci_map));
+	cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+
+#if LOAD_IMAGE_V2
+	/*
+	 * Check params passed from BL2 should not be NULL,
+	 */
+	bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
+	assert(params_from_bl2 != NULL);
+	assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
+	assert(params_from_bl2->h.version >= VERSION_2);
+
+	bl_params_node_t *bl_params = params_from_bl2->head;
+
+	/*
+	 * Copy BL33 and BL32 (if present), entry point information.
+	 * They are stored in Secure RAM, in BL2's address space.
+	 */
+	while (bl_params) {
+		if (bl_params->image_id == BL32_IMAGE_ID)
+			bl32_ep_info = *bl_params->ep_info;
+
+		if (bl_params->image_id == BL33_IMAGE_ID)
+			bl33_ep_info = *bl_params->ep_info;
+
+		bl_params = bl_params->next_params_info;
+	}
+
+	if (bl33_ep_info.pc == 0)
+		panic();
+
+#else /* LOAD_IMAGE_V2 */
+
+	/*
+	 * Check params passed from BL2 should not be NULL,
+	 */
+	assert(from_bl2 != NULL);
+	assert(from_bl2->h.type == PARAM_BL31);
+	assert(from_bl2->h.version >= VERSION_1);
+
+	/*
+	 * Copy BL3-2 and BL3-3 entry point information.
+	 * They are stored in Secure RAM, in BL2's address space.
+	 */
+	bl32_ep_info = *from_bl2->bl32_ep_info;
+	bl33_ep_info = *from_bl2->bl33_ep_info;
+#endif /* LOAD_IMAGE_V2 */
+}
+
+void bl31_plat_arch_setup(void)
+{
+	hikey_init_mmu_el3(BL31_BASE,
+			   BL31_LIMIT - BL31_BASE,
+			   BL31_RO_BASE,
+			   BL31_RO_LIMIT,
+			   BL31_COHERENT_RAM_BASE,
+			   BL31_COHERENT_RAM_LIMIT);
+}
+
+/* Initialize EDMAC controller with non-secure mode. */
+static void hikey_edma_init(void)
+{
+	int i;
+	uint32_t non_secure;
+
+	non_secure = EDMAC_SEC_CTRL_INTR_SEC | EDMAC_SEC_CTRL_GLOBAL_SEC;
+	mmio_write_32(EDMAC_SEC_CTRL, non_secure);
+
+	for (i = 0; i < EDMAC_CHANNEL_NUMS; i++) {
+		mmio_write_32(EDMAC_AXI_CONF(i), (1 << 6) | (1 << 18));
+	}
+}
+
+void bl31_platform_setup(void)
+{
+	/* Initialize the GIC driver, cpu and distributor interfaces */
+	gicv2_driver_init(&hikey_gic_data);
+	gicv2_distif_init();
+	gicv2_pcpu_distif_init();
+	gicv2_cpuif_enable();
+
+	hikey_edma_init();
+
+	hisi_ipc_init();
+	hisi_pwrc_setup();
+}
+
+void bl31_plat_runtime_setup(void)
+{
+}
diff --git a/plat/hisilicon/hikey/hikey_ddr.c b/plat/hisilicon/hikey/hikey_ddr.c
new file mode 100644
index 0000000..d00e953
--- /dev/null
+++ b/plat/hisilicon/hikey/hikey_ddr.c
@@ -0,0 +1,968 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <errno.h>
+#include <hi6220.h>
+#include <hi6553.h>
+#include <mmio.h>
+#include <sp804_delay_timer.h>
+
+enum {
+	DDR_FREQ_533M = 0,
+	DDR_FREQ_800M,
+};
+
+static void init_pll(void)
+{
+	unsigned int data;
+
+	data = mmio_read_32((0xf7032000 + 0x000));
+	data |= 0x1;
+	mmio_write_32((0xf7032000 + 0x000), data);
+	dsb();
+	do {
+		data = mmio_read_32((0xf7032000 + 0x000));
+	} while (!(data & (1 << 28)));
+
+	data = mmio_read_32((0xf7800000 + 0x000));
+	data &= ~0x007;
+	data |= 0x004;
+	mmio_write_32((0xf7800000 + 0x000), data);
+	dsb();
+	do {
+		data = mmio_read_32((0xf7800000 + 0x014));
+		data &= 0x007;
+	} while (data != 0x004);
+
+	mmio_write_32(PERI_SC_PERIPH_CTRL14, 0x2101);
+	data = mmio_read_32(PERI_SC_PERIPH_STAT1);
+	mmio_write_32(0xf7032000 + 0x02c, 0x5110103e);
+	data = mmio_read_32(0xf7032000 + 0x050);
+	data |= 1 << 28;
+	mmio_write_32(0xf7032000 + 0x050, data);
+	mmio_write_32(PERI_SC_PERIPH_CTRL14, 0x2101);
+	mdelay(1);
+	data = mmio_read_32(PERI_SC_PERIPH_STAT1);
+	NOTICE("syspll frequency:%dHz\n", data);
+}
+
+static void init_freq(void)
+{
+	unsigned int data, tmp;
+	unsigned int cpuext_cfg, ddr_cfg;
+
+	mmio_write_32((0xf7032000 + 0x374), 0x4a);
+	mmio_write_32((0xf7032000 + 0x368), 0xda);
+	mmio_write_32((0xf7032000 + 0x36c), 0x01);
+	mmio_write_32((0xf7032000 + 0x370), 0x01);
+	mmio_write_32((0xf7032000 + 0x360), 0x60);
+	mmio_write_32((0xf7032000 + 0x364), 0x60);
+
+	mmio_write_32((0xf7032000 + 0x114), 0x1000);
+
+	data = mmio_read_32((0xf7032000 + 0x110));
+	data |= (3 << 12);
+	mmio_write_32((0xf7032000 + 0x110), data);
+
+	data = mmio_read_32((0xf7032000 + 0x110));
+	data |= (1 << 4);
+	mmio_write_32((0xf7032000 + 0x110), data);
+
+
+	data = mmio_read_32((0xf7032000 + 0x110));
+	data &= ~0x7;
+	data |= 0x5;
+	mmio_write_32((0xf7032000 + 0x110), data);
+	dsb();
+	mdelay(10);
+
+
+	do {
+		data = mmio_read_32((0xf6504000 + 0x008));
+		data &= (3 << 20);
+	} while (data != (3 << 20));
+	dsb();
+	mdelay(10);
+
+
+	data = mmio_read_32((0xf6504000 + 0x054));
+	data &= ~((1 << 0) | (1 << 11));
+	mmio_write_32((0xf6504000 + 0x054), data);
+	mdelay(10);
+
+	data = mmio_read_32((0xf7032000 + 0x104));
+	data &= ~(3 << 8);
+	data |= (1 << 8);
+	mmio_write_32((0xf7032000 + 0x104), data);
+
+	data = mmio_read_32((0xf7032000 + 0x100));
+	data |= (1 << 0);
+	mmio_write_32((0xf7032000 + 0x100), data);
+	dsb();
+
+	do {
+		data = mmio_read_32((0xf7032000 + 0x100));
+		data &= (1 << 2);
+	} while (data != (1 << 2));
+
+	data = mmio_read_32((0xf6504000 + 0x06c));
+	data &= ~0xffff;
+	data |= 0x56;
+	mmio_write_32((0xf6504000 + 0x06c), data);
+
+	data = mmio_read_32((0xf6504000 + 0x06c));
+	data &= ~(0xffffff << 8);
+	data |= 0xc7a << 8;
+	mmio_write_32((0xf6504000 + 0x06c), data);
+
+	data = mmio_read_32((0xf6504000 + 0x058));
+	data &= ((1 << 13) - 1);
+	data |= 0xccb;
+	mmio_write_32((0xf6504000 + 0x058), data);
+
+	mmio_write_32((0xf6504000 + 0x060), 0x1fff);
+	mmio_write_32((0xf6504000 + 0x064), 0x1ffffff);
+	mmio_write_32((0xf6504000 + 0x068), 0x7fffffff);
+	mmio_write_32((0xf6504000 + 0x05c), 0x1);
+
+	data = mmio_read_32((0xf6504000 + 0x054));
+	data &= ~(0xf << 12);
+	data |= 1 << 12;
+	mmio_write_32((0xf6504000 + 0x054), data);
+	dsb();
+
+
+	data = mmio_read_32((0xf7032000 + 0x000));
+	data &= ~(1 << 0);
+	mmio_write_32((0xf7032000 + 0x000), data);
+
+	mmio_write_32((0xf7032000 + 0x004), 0x5110207d);
+	mmio_write_32((0xf7032000 + 0x134), 0x10000005);
+	data = mmio_read_32((0xf7032000 + 0x134));
+
+
+	data = mmio_read_32((0xf7032000 + 0x000));
+	data |= (1 << 0);
+	mmio_write_32((0xf7032000 + 0x000), data);
+
+	mmio_write_32((0xf7032000 + 0x368), 0x100da);
+	data = mmio_read_32((0xf7032000 + 0x378));
+	data &= ~((1 << 7) - 1);
+	data |= 0x6b;
+	mmio_write_32((0xf7032000 + 0x378), data);
+	dsb();
+	do {
+		data = mmio_read_32((0xf7032000 + 0x378));
+		tmp = data & 0x7f;
+		data = (data & (0x7f << 8)) >> 8;
+		if (data != tmp)
+			continue;
+		data = mmio_read_32((0xf7032000 + 0x37c));
+	} while (!(data & 1));
+
+	data = mmio_read_32((0xf7032000 + 0x104));
+	data &= ~((3 << 0) |
+			(3 << 8));
+	cpuext_cfg = 1;
+	ddr_cfg = 1;
+	data |= cpuext_cfg | (ddr_cfg << 8);
+	mmio_write_32((0xf7032000 + 0x104), data);
+	dsb();
+
+	do {
+		data = mmio_read_32((0xf7032000 + 0x104));
+		tmp = (data & (3 << 16)) >> 16;
+		if (cpuext_cfg != tmp)
+			continue;
+		tmp = (data & (3 << 24)) >> 24;
+		if (ddr_cfg != tmp)
+			continue;
+		data = mmio_read_32((0xf7032000 + 0x000));
+		data &= 1 << 28;
+	} while (!data);
+
+	data = mmio_read_32((0xf7032000 + 0x100));
+	data &= ~(1 << 0);
+	mmio_write_32((0xf7032000 + 0x100), data);
+	dsb();
+	do {
+		data = mmio_read_32((0xf7032000 + 0x100));
+		data &= (1 << 1);
+	} while (data != (1 << 1));
+	mdelay(1000);
+
+	data = mmio_read_32((0xf6504000 + 0x054));
+	data &= ~(1 << 28);
+	mmio_write_32((0xf6504000 + 0x054), data);
+	dsb();
+
+	data = mmio_read_32((0xf7032000 + 0x110));
+	data &= ~((1 << 4) |
+			(3 << 12));
+	mmio_write_32((0xf7032000 + 0x110), data);
+}
+
+int cat_533mhz_800mhz(void)
+{
+	unsigned int data, i;
+	unsigned int bdl[5];
+
+
+	data = mmio_read_32((0xf712c000 + 0x1c8));
+	data &= 0xfffff0f0;
+	data |= 0x100f0f;
+	mmio_write_32((0xf712c000 + 0x1c8), data);
+
+	for (i = 0; i < 0x20; i++) {
+		mmio_write_32((0xf712c000 + 0x1d4), 0xc0000);
+		data = (i << 0x10) + i;
+		mmio_write_32((0xf712c000 + 0x140), data);
+		mmio_write_32((0xf712c000 + 0x144), data);
+		mmio_write_32((0xf712c000 + 0x148), data);
+		mmio_write_32((0xf712c000 + 0x14c), data);
+		mmio_write_32((0xf712c000 + 0x150), data);
+
+
+		data = mmio_read_32((0xf712c000 + 0x070));
+		data |= 0x80000;
+		mmio_write_32((0xf712c000 + 0x070), data);
+		data = mmio_read_32((0xf712c000 + 0x070));
+		data &= 0xfff7ffff;
+		mmio_write_32((0xf712c000 + 0x070), data);
+
+
+		mmio_write_32((0xf712c000 + 0x004), 0x8000);
+		mmio_write_32((0xf712c000 + 0x004), 0x0);
+		mmio_write_32((0xf712c000 + 0x004), 0x801);
+		do {
+			data = mmio_read_32((0xf712c000 + 0x004));
+		} while (data & 1);
+
+		data = mmio_read_32((0xf712c000 + 0x008));
+		if (!(data & 0x400)) {
+			mdelay(10);
+			return 0;
+		}
+		WARN("lpddr3 cat fail\n");
+		data = mmio_read_32((0xf712c000 + 0x1d4));
+		if ((data & 0x1f00) && ((data & 0x1f) == 0)) {
+			bdl[0] = mmio_read_32((0xf712c000 + 0x140));
+			bdl[1] = mmio_read_32((0xf712c000 + 0x144));
+			bdl[2] = mmio_read_32((0xf712c000 + 0x148));
+			bdl[3] = mmio_read_32((0xf712c000 + 0x14c));
+			bdl[4] = mmio_read_32((0xf712c000 + 0x150));
+			if ((!(bdl[0] & 0x1f001f)) || (!(bdl[1] & 0x1f001f)) ||
+					(!(bdl[2] & 0x1f001f)) || (!(bdl[3] & 0x1f001f)) ||
+					(!(bdl[4] & 0x1f001f))) {
+				WARN("lpddr3 cat deskew error\n");
+				if (i == 0x1f) {
+					WARN("addrnbdl is max\n");
+					return -EINVAL;
+				}
+				mmio_write_32((0xf712c000 + 0x008), 0x400);
+			} else {
+				WARN("lpddr3 cat other error1\n");
+				return -EINVAL;
+			}
+		} else {
+			WARN("lpddr3 cat other error2\n");
+			return -EINVAL;
+		}
+	}
+	return -EINVAL;
+}
+
+static void ddrx_rdet(void)
+{
+	unsigned int data, rdet, bdl[4];
+
+	data = mmio_read_32((0xf712c000 + 0x0d0));
+	data &= 0xf800ffff;
+	data |= 0x8f0000;
+	mmio_write_32((0xf712c000 + 0x0d0), data);
+
+	data = mmio_read_32((0xf712c000 + 0x0dc));
+	data &= 0xfffffff0;
+	data |= 0xf;
+	mmio_write_32((0xf712c000 + 0x0dc), data);
+
+
+	data = mmio_read_32((0xf712c000 + 0x070));
+	data |= 0x80000;
+	mmio_write_32((0xf712c000 + 0x070), data);
+	data = mmio_read_32((0xf712c000 + 0x070));
+	data &= 0xfff7ffff;
+	mmio_write_32((0xf712c000 + 0x070), data);
+
+	mmio_write_32((0xf712c000 + 0x004), 0x8000);
+	mmio_write_32((0xf712c000 + 0x004), 0);
+
+	data = mmio_read_32((0xf712c000 + 0x0d0));
+	data &= ~0xf0000000;
+	data |= 0x80000000;
+	mmio_write_32((0xf712c000 + 0x0d0), data);
+
+	mmio_write_32((0xf712c000 + 0x004), 0x101);
+	do {
+		data = mmio_read_32((0xf712c000 + 0x004));
+	} while (!(data & 1));
+	data = mmio_read_32((0xf712c000 + 0x008));
+	if (data & 0x100)
+		WARN("rdet lbs fail\n");
+
+	bdl[0] = mmio_read_32((0xf712c000 + 0x22c)) & 0x7f;
+	bdl[1] = mmio_read_32((0xf712c000 + 0x2ac)) & 0x7f;
+	bdl[2] = mmio_read_32((0xf712c000 + 0x32c)) & 0x7f;
+	bdl[3] = mmio_read_32((0xf712c000 + 0x3ac)) & 0x7f;
+	do {
+		data = mmio_read_32((0xf712c000 + 0x22c));
+		data &= ~0x7f;
+		data |= bdl[0];
+		mmio_write_32((0xf712c000 + 0x22c), data);
+		data = mmio_read_32((0xf712c000 + 0x2ac));
+		data &= ~0x7f;
+		data |= bdl[1];
+		mmio_write_32((0xf712c000 + 0x2ac), data);
+		data = mmio_read_32((0xf712c000 + 0x32c));
+		data &= ~0x7f;
+		data |= bdl[2];
+		mmio_write_32((0xf712c000 + 0x32c), data);
+		data = mmio_read_32((0xf712c000 + 0x3ac));
+		data &= ~0x7f;
+		data |= bdl[3];
+		mmio_write_32((0xf712c000 + 0x3ac), data);
+
+
+		data = mmio_read_32((0xf712c000 + 0x070));
+		data |= 0x80000;
+		mmio_write_32((0xf712c000 + 0x070), data);
+		data = mmio_read_32((0xf712c000 + 0x070));
+		data &= 0xfff7ffff;
+		mmio_write_32((0xf712c000 + 0x070), data);
+
+		mmio_write_32((0xf712c000 + 0x004), 0x8000);
+		mmio_write_32((0xf712c000 + 0x004), 0);
+
+		data = mmio_read_32((0xf712c000 + 0x0d0));
+		data &= ~0xf0000000;
+		data |= 0x40000000;
+		mmio_write_32((0xf712c000 + 0x0d0), data);
+		mmio_write_32((0xf712c000 + 0x004), 0x101);
+		do {
+			data = mmio_read_32((0xf712c000 + 0x004));
+		} while (data & 1);
+
+		data = mmio_read_32((0xf712c000 + 0x008));
+		rdet = data & 0x100;
+		if (rdet) {
+			INFO("rdet ds fail\n");
+			mmio_write_32((0xf712c000 + 0x008), 0x100);
+		}
+		bdl[0]++;
+		bdl[1]++;
+		bdl[2]++;
+		bdl[3]++;
+	} while (rdet);
+
+	data = mmio_read_32((0xf712c000 + 0x0d0));
+	data &= ~0xf0000000;
+	data |= 0x30000000;
+	mmio_write_32((0xf712c000 + 0x0d0), data);
+
+	mmio_write_32((0xf712c000 + 0x004), 0x101);
+	do {
+		data = mmio_read_32((0xf712c000 + 0x004));
+	} while (data & 1);
+	data = mmio_read_32((0xf712c000 + 0x008));
+	if (data & 0x100)
+		INFO("rdet rbs av fail\n");
+}
+
+static void ddrx_wdet(void)
+{
+	unsigned int data, wdet, zero_bdl = 0, dq[4];
+	int i;
+
+	data = mmio_read_32((0xf712c000 + 0x0d0));
+	data &= ~0xf;
+	data |= 0xf;
+	mmio_write_32((0xf712c000 + 0x0d0), data);
+
+	data = mmio_read_32((0xf712c000 + 0x070));
+	data |= 0x80000;
+	mmio_write_32((0xf712c000 + 0x070), data);
+	data = mmio_read_32((0xf712c000 + 0x070));
+	data &= ~0x80000;
+	mmio_write_32((0xf712c000 + 0x070), data);
+
+	mmio_write_32((0xf712c000 + 0x004), 0x8000);
+	mmio_write_32((0xf712c000 + 0x004), 0);
+	data = mmio_read_32((0xf712c000 + 0x0d0));
+	data &= ~0xf000;
+	data |= 0x8000;
+	mmio_write_32((0xf712c000 + 0x0d0), data);
+	mmio_write_32((0xf712c000 + 0x004), 0x201);
+	do {
+		data = mmio_read_32((0xf712c000 + 0x004));
+	} while (data & 1);
+	data = mmio_read_32((0xf712c000 + 0x008));
+	if (data & 0x200)
+		INFO("wdet lbs fail\n");
+
+	dq[0] = mmio_read_32((0xf712c000 + 0x234)) & 0x1f00;
+	dq[1] = mmio_read_32((0xf712c000 + 0x2b4)) & 0x1f00;
+	dq[2] = mmio_read_32((0xf712c000 + 0x334)) & 0x1f00;
+	dq[3] = mmio_read_32((0xf712c000 + 0x3b4)) & 0x1f00;
+
+	do {
+		mmio_write_32((0xf712c000 + 0x234), dq[0]);
+		mmio_write_32((0xf712c000 + 0x2b4), dq[1]);
+		mmio_write_32((0xf712c000 + 0x334), dq[2]);
+		mmio_write_32((0xf712c000 + 0x3b4), dq[3]);
+
+		data = mmio_read_32((0xf712c000 + 0x070));
+		data |= 0x80000;
+		mmio_write_32((0xf712c000 + 0x070), data);
+		data = mmio_read_32((0xf712c000 + 0x070));
+		data &= ~0x80000;
+		mmio_write_32((0xf712c000 + 0x070), data);
+		mmio_write_32((0xf712c000 + 0x004), 0x8000);
+		mmio_write_32((0xf712c000 + 0x004), 0);
+
+		data = mmio_read_32((0xf712c000 + 0x0d0));
+		data &= ~0xf000;
+		data |= 0x4000;
+		mmio_write_32((0xf712c000 + 0x0d0), data);
+		mmio_write_32((0xf712c000 + 0x004), 0x201);
+		do {
+			data = mmio_read_32((0xf712c000 + 0x004));
+		} while (data & 1);
+
+		data = mmio_read_32((0xf712c000 + 0x008));
+		wdet = data & 0x200;
+		if (wdet) {
+			INFO("wdet ds fail\n");
+			mmio_write_32((0xf712c000 + 0x008), 0x200);
+		}
+		mdelay(10);
+
+		for (i = 0; i < 4; i++) {
+			data = mmio_read_32((0xf712c000 + 0x210 + i * 0x80));
+			if ((!(data & 0x1f)) || (!(data & 0x1f00)) ||
+			    (!(data & 0x1f0000)) || (!(data & 0x1f000000)))
+				zero_bdl = 1;
+			data = mmio_read_32((0xf712c000 + 0x214 + i * 0x80));
+			if ((!(data & 0x1f)) || (!(data & 0x1f00)) ||
+			    (!(data & 0x1f0000)) || (!(data & 0x1f000000)))
+				zero_bdl = 1;
+			data = mmio_read_32((0xf712c000 + 0x218 + i * 0x80));
+			if (!(data & 0x1f))
+				zero_bdl = 1;
+			if (zero_bdl) {
+				if (i == 0)
+					dq[0] = dq[0] - 0x100;
+				if (i == 1)
+					dq[1] = dq[1] - 0x100;
+				if (i == 2)
+					dq[2] = dq[2] - 0x100;
+				if (i == 3)
+					dq[3] = dq[3] - 0x100;
+			}
+		}
+	} while (wdet);
+
+	data = mmio_read_32((0xf712c000 + 0x0d0));
+	data &= ~0xf000;
+	data |= 0x3000;
+	mmio_write_32((0xf712c000 + 0x0d0), data);
+	mmio_write_32((0xf712c000 + 0x004), 0x201);
+	do {
+		data = mmio_read_32((0xf712c000 + 0x004));
+	} while (data & 1);
+	data = mmio_read_32((0xf712c000 + 0x008));
+	if (data & 0x200)
+		INFO("wdet rbs av fail\n");
+}
+
+static void set_ddrc_533mhz(void)
+{
+	unsigned int data;
+
+	mmio_write_32((0xf7032000 + 0x580), 0x3);
+	mmio_write_32((0xf7032000 + 0x5a8), 0x11111);
+	data = mmio_read_32((0xf7032000 + 0x104));
+	data |= 0x100;
+	mmio_write_32((0xf7032000 + 0x104), data);
+
+	mmio_write_32((0xf7030000 + 0x050), 0x30);
+	mmio_write_32((0xf7030000 + 0x240), 0x5ffff);
+	mmio_write_32((0xf7030000 + 0x344), 0xf5ff);
+	mmio_write_32((0xf712c000 + 0x00c), 0x400);
+	mmio_write_32((0xf712c000 + 0x018), 0x7);
+	mmio_write_32((0xf712c000 + 0x090), 0x6400000);
+	mmio_write_32((0xf712c000 + 0x258), 0x640);
+	mmio_write_32((0xf712c000 + 0x2d8), 0x640);
+	mmio_write_32((0xf712c000 + 0x358), 0x640);
+	mmio_write_32((0xf712c000 + 0x3d8), 0x640);
+	mmio_write_32((0xf712c000 + 0x018), 0x0);
+	mmio_write_32((0xf712c000 + 0x0b0), 0xf00000f);
+	mmio_write_32((0xf712c000 + 0x0b4), 0xf);
+	mmio_write_32((0xf712c000 + 0x088), 0x3fff801);
+	mmio_write_32((0xf712c000 + 0x070), 0x8940000);
+
+	data = mmio_read_32((0xf712c000 + 0x078));
+	data |= 4;
+	mmio_write_32((0xf712c000 + 0x078), data);
+	mmio_write_32((0xf712c000 + 0x01c), 0x8000080);
+	data = mmio_read_32((0xf712c000 + 0x020));
+	data &= 0xfffffffe;
+	mmio_write_32((0xf712c000 + 0x020), data);
+	mmio_write_32((0xf712c000 + 0x1d4), 0xc0000);
+	mmio_write_32((0xf712c000 + 0x010), 0x500000f);
+	mmio_write_32((0xf712c000 + 0x014), 0x10);
+	data = mmio_read_32((0xf712c000 + 0x1e4));
+	data &= 0xffffff00;
+	mmio_write_32((0xf712c000 + 0x1e4), data);
+	mmio_write_32((0xf712c000 + 0x030), 0x9dd87855);
+	mmio_write_32((0xf712c000 + 0x034), 0xa7138bb);
+	mmio_write_32((0xf712c000 + 0x038), 0x20091477);
+	mmio_write_32((0xf712c000 + 0x03c), 0x84534e16);
+	mmio_write_32((0xf712c000 + 0x040), 0x3008817);
+	mmio_write_32((0xf712c000 + 0x064), 0x106c3);
+	mmio_write_32((0xf712c000 + 0x068), 0xff0a0000);
+	data = mmio_read_32((0xf712c000 + 0x070));
+	data &= 0xffff0000;
+	data |= 0x305;
+	mmio_write_32((0xf712c000 + 0x070), data);
+	data = mmio_read_32((0xf712c000 + 0x048));
+	data |= 0x40000000;
+	mmio_write_32((0xf712c000 + 0x048), data);
+	data = mmio_read_32((0xf712c000 + 0x020));
+	data &= ~0x10;
+	mmio_write_32((0xf712c000 + 0x020), data);
+	data = mmio_read_32((0xf712c000 + 0x080));
+	data &= ~0x2000;
+	mmio_write_32((0xf712c000 + 0x080), data);
+	mmio_write_32((0xf712c000 + 0x270), 0x3);
+	mmio_write_32((0xf712c000 + 0x2f0), 0x3);
+	mmio_write_32((0xf712c000 + 0x370), 0x3);
+	mmio_write_32((0xf712c000 + 0x3f0), 0x3);
+	mmio_write_32((0xf712c000 + 0x048), 0xd0420900);
+
+	mmio_write_32((0xf7128000 + 0x040), 0x0);
+	mmio_write_32((0xf712c000 + 0x004), 0x140f);
+	do {
+		data = mmio_read_32((0xf712c000 + 0x004));
+	} while (data & 1);
+	data = mmio_read_32((0xf712c000 + 0x008));
+	if (data & 0x7fe) {
+		NOTICE("failed to init lpddr3 rank0 dram phy\n");
+		return;
+	}
+	NOTICE("succeed to init lpddr3 rank0 dram phy\n");
+}
+
+static void set_ddrc_800mhz(void)
+{
+	unsigned int data;
+
+	mmio_write_32((0xf7032000 + 0x580), 0x2);
+	mmio_write_32((0xf7032000 + 0x5a8), 0x1003);
+	data = mmio_read_32((0xf7032000 + 0x104));
+	data &= 0xfffffcff;
+	mmio_write_32((0xf7032000 + 0x104), data);
+
+	mmio_write_32((0xf7030000 + 0x050), 0x30);
+	mmio_write_32((0xf7030000 + 0x240), 0x5ffff);
+	mmio_write_32((0xf7030000 + 0x344), 0xf5ff);
+	mmio_write_32((0xf712c000 + 0x00c), 0x400);
+	mmio_write_32((0xf712c000 + 0x018), 0x7);
+	mmio_write_32((0xf712c000 + 0x090), 0x5400000);
+	mmio_write_32((0xf712c000 + 0x258), 0x540);
+	mmio_write_32((0xf712c000 + 0x2d8), 0x540);
+	mmio_write_32((0xf712c000 + 0x358), 0x540);
+	mmio_write_32((0xf712c000 + 0x3d8), 0x540);
+	mmio_write_32((0xf712c000 + 0x018), 0x0);
+	mmio_write_32((0xf712c000 + 0x0b0), 0xf00000f);
+	mmio_write_32((0xf712c000 + 0x0b4), 0xf);
+	mmio_write_32((0xf712c000 + 0x088), 0x3fff801);
+	mmio_write_32((0xf712c000 + 0x070), 0x8940000);
+
+	data = mmio_read_32((0xf712c000 + 0x078));
+	data |= 4;
+	mmio_write_32((0xf712c000 + 0x078), data);
+	mmio_write_32((0xf712c000 + 0x01c), 0x8000080);
+	data = mmio_read_32((0xf712c000 + 0x020));
+	data &= 0xfffffffe;
+	mmio_write_32((0xf712c000 + 0x020), data);
+	mmio_write_32((0xf712c000 + 0x1d4), 0xc0000);
+	mmio_write_32((0xf712c000 + 0x010), 0x500000f);
+	mmio_write_32((0xf712c000 + 0x014), 0x10);
+	data = mmio_read_32((0xf712c000 + 0x1e4));
+	data &= 0xffffff00;
+	mmio_write_32((0xf712c000 + 0x1e4), data);
+	mmio_write_32((0xf712c000 + 0x030), 0xe663ab77);
+	mmio_write_32((0xf712c000 + 0x034), 0xea952db);
+	mmio_write_32((0xf712c000 + 0x038), 0x200d1cb1);
+	mmio_write_32((0xf712c000 + 0x03c), 0xc67d0721);
+	mmio_write_32((0xf712c000 + 0x040), 0x3008aa1);
+	mmio_write_32((0xf712c000 + 0x064), 0x11a43);
+	mmio_write_32((0xf712c000 + 0x068), 0xff0a0000);
+	data = mmio_read_32((0xf712c000 + 0x070));
+	data &= 0xffff0000;
+	data |= 0x507;
+	mmio_write_32((0xf712c000 + 0x070), data);
+	data = mmio_read_32((0xf712c000 + 0x048));
+	data |= 0x40000000;
+	mmio_write_32((0xf712c000 + 0x048), data);
+	data = mmio_read_32((0xf712c000 + 0x020));
+	data &= 0xffffffef;
+	mmio_write_32((0xf712c000 + 0x020), data);
+	data = mmio_read_32((0xf712c000 + 0x080));
+	data &= 0xffffdfff;
+	mmio_write_32((0xf712c000 + 0x080), data);
+	mmio_write_32((0xf712c000 + 0x270), 0x3);
+	mmio_write_32((0xf712c000 + 0x2f0), 0x3);
+	mmio_write_32((0xf712c000 + 0x370), 0x3);
+	mmio_write_32((0xf712c000 + 0x3f0), 0x3);
+	mmio_write_32((0xf712c000 + 0x048), 0xd0420900);
+
+	mmio_write_32((0xf7128000 + 0x040), 0x2001);
+	mmio_write_32((0xf712c000 + 0x004), 0x140f);
+	NOTICE("NOTICE: FUN RACE AHEAD\n");
+	do {
+		data = mmio_read_32((0xf712c000 + 0x004));
+	} while (data & 1);
+	data = mmio_read_32((0xf712c000 + 0x008));
+	if (data & 0x7fe) {
+		WARN("failed to init lpddr3 rank0 dram phy\n");
+		return;
+	}
+}
+
+static void ddrc_common_init(int ddr800)
+{
+	unsigned int data;
+
+	mmio_write_32((0xf7120000 + 0x020), 0x1);
+	mmio_write_32((0xf7120000 + 0x100), 0x1700);
+	mmio_write_32((0xf7120000 + 0x104), 0x71040004);
+	mmio_write_32((0xf7121400 + 0x104), 0xf);
+	mmio_write_32((0xf7121800 + 0x104), 0xf);
+	mmio_write_32((0xf7121800 + 0x104), 0xf);
+	mmio_write_32((0xf7121c00 + 0x104), 0xf);
+	mmio_write_32((0xf7122000 + 0x104), 0xf);
+	mmio_write_32((0xf7128000 + 0x02c), 0x6);
+	mmio_write_32((0xf7128000 + 0x020), 0x1);
+	mmio_write_32((0xf7128000 + 0x028), 0x310201);
+	mmio_write_32((0xf712c000 + 0x1e4), 0xfe007600);
+	mmio_write_32((0xf7128000 + 0x01c), 0xaf001);
+
+
+	data = mmio_read_32((0xf7128000 + 0x280));
+	data |= 1 << 7;
+	mmio_write_32((0xf7128000 + 0x280), data);
+	mmio_write_32((0xf7128000 + 0x244), 0x3);
+
+	if (ddr800)
+		mmio_write_32((0xf7128000 + 0x240), 167 * 400000 / 1024);
+	else
+		mmio_write_32((0xf7128000 + 0x240), 167 * 533000 / 1024);
+
+	data = mmio_read_32((0xf712c000 + 0x080));
+	data &= 0xffff;
+	data |= 0x4002000;
+	mmio_write_32((0xf712c000 + 0x080), data);
+	mmio_write_32((0xf7128000 + 0x000), 0x0);
+	do {
+		data = mmio_read_32((0xf7128000 + 0x294));
+	} while (data & 1);
+	mmio_write_32((0xf7128000 + 0x000), 0x2);
+}
+
+
+static int dienum_det_and_rowcol_cfg(void)
+{
+	unsigned int data;
+
+	mmio_write_32((0xf7128000 + 0x210), 0x87);
+	mmio_write_32((0xf7128000 + 0x218), 0x10000);
+	mmio_write_32((0xf7128000 + 0x00c), 0x1);
+	do {
+		data = mmio_read_32((0xf7128000 + 0x00c));
+	} while (data & 1);
+	data = mmio_read_32((0xf7128000 + 0x4a8)) & 0xfc;
+	switch (data) {
+	case 0x18:
+		mmio_write_32((0xf7128000 + 0x060), 0x132);
+		mmio_write_32((0xf7128000 + 0x064), 0x132);
+		mmio_write_32((0xf7120000 + 0x100), 0x1600);
+		mmio_write_32((0xf7120000 + 0x104), 0x71040004);
+		break;
+	case 0x1c:
+		mmio_write_32((0xf7128000 + 0x060), 0x142);
+		mmio_write_32((0xf7128000 + 0x064), 0x142);
+		mmio_write_32((0xf7120000 + 0x100), 0x1700);
+		mmio_write_32((0xf7120000 + 0x104), 0x71040004);
+		break;
+	case 0x58:
+		mmio_write_32((0xf7128000 + 0x060), 0x133);
+		mmio_write_32((0xf7128000 + 0x064), 0x133);
+		mmio_write_32((0xf7120000 + 0x100), 0x1700);
+		mmio_write_32((0xf7120000 + 0x104), 0x71040004);
+		break;
+	default:
+		break;
+	}
+	if (!data)
+		return -EINVAL;
+	return 0;
+}
+
+static int detect_ddr_chip_info(void)
+{
+	unsigned int data, mr5, mr6, mr7;
+
+	mmio_write_32((0xf7128000 + 0x210), 0x57);
+	mmio_write_32((0xf7128000 + 0x218), 0x10000);
+	mmio_write_32((0xf7128000 + 0x00c), 0x1);
+
+	do {
+		data = mmio_read_32((0xf7128000 + 0x00c));
+	} while (data & 1);
+
+	data = mmio_read_32((0xf7128000 + 0x4a8));
+	mr5 = data & 0xff;
+	switch (mr5) {
+	case 1:
+		INFO("Samsung DDR\n");
+		break;
+	case 6:
+		INFO("Hynix DDR\n");
+		break;
+	case 3:
+		INFO("Elpida DDR\n");
+		break;
+	default:
+		INFO("DDR from other vendors\n");
+		break;
+	}
+
+	mmio_write_32((0xf7128000 + 0x210), 0x67);
+	mmio_write_32((0xf7128000 + 0x218), 0x10000);
+	mmio_write_32((0xf7128000 + 0x00c), 0x1);
+	do {
+		data = mmio_read_32((0xf7128000 + 0x00c));
+	} while (data & 1);
+	data = mmio_read_32((0xf7128000 + 0x4a8));
+	mr6 = data & 0xff;
+	mmio_write_32((0xf7128000 + 0x210), 0x77);
+	mmio_write_32((0xf7128000 + 0x218), 0x10000);
+	mmio_write_32((0xf7128000 + 0x00c), 0x1);
+	do {
+		data = mmio_read_32((0xf7128000 + 0x00c));
+	} while (data & 1);
+	data = mmio_read_32((0xf7128000 + 0x4a8));
+	mr7 = data & 0xff;
+	data = mr5 + (mr6 << 8) + (mr7 << 16);
+	return data;
+}
+
+int lpddr3_freq_init(int freq)
+{
+	unsigned int data;
+
+	if (freq == DDR_FREQ_800M) {
+		set_ddrc_800mhz();
+		INFO("%s, set ddrc 800mhz\n", __func__);
+	} else {
+		set_ddrc_533mhz();
+		INFO("%s, set ddrc 533mhz\n", __func__);
+	}
+
+	mmio_write_32((0xf712c000 + 0x004), 0xf1);
+	if (freq == DDR_FREQ_800M)
+		mmio_write_32((0xf7128000 + 0x050), 0x100023);
+	else
+		mmio_write_32((0xf7128000 + 0x050), 0x100123);
+	mmio_write_32((0xf7128000 + 0x060), 0x133);
+	mmio_write_32((0xf7128000 + 0x064), 0x133);
+	mmio_write_32((0xf7128000 + 0x200), 0xa1000);
+
+	if (freq == DDR_FREQ_800M) {
+		mmio_write_32((0xf7128000 + 0x100), 0x755a9d12);
+		mmio_write_32((0xf7128000 + 0x104), 0x1753b055);
+		mmio_write_32((0xf7128000 + 0x108), 0x7401505f);
+		mmio_write_32((0xf7128000 + 0x10c), 0x578ca244);
+		mmio_write_32((0xf7128000 + 0x110), 0x10700000);
+		mmio_write_32((0xf7128000 + 0x114), 0x13141306);
+	} else {
+		mmio_write_32((0xf7128000 + 0x100), 0xb77b6718);
+		mmio_write_32((0xf7128000 + 0x104), 0x1e82a071);
+		mmio_write_32((0xf7128000 + 0x108), 0x9501c07e);
+		mmio_write_32((0xf7128000 + 0x10c), 0xaf50c255);
+		mmio_write_32((0xf7128000 + 0x110), 0x10b00000);
+		mmio_write_32((0xf7128000 + 0x114), 0x13181908);
+	}
+	mmio_write_32((0xf7128000 + 0x118), 0x44);
+	do {
+		data = mmio_read_32((0xf712c000 + 0x004));
+	} while (data & 1);
+
+	data = mmio_read_32((0xf712c000 + 0x008));
+	if (data & 0x7fe) {
+		NOTICE("fail to init ddr3 rank0\n");
+		return -EFAULT;
+	}
+	INFO("init ddr3 rank0\n");
+	ddrx_rdet();
+	ddrx_wdet();
+
+	data = mmio_read_32((0xf712c000 + 0x048));
+	data |= 1;
+	mmio_write_32((0xf712c000 + 0x048), data);
+	mmio_write_32((0xf712c000 + 0x004), 0x21);
+	do {
+		data = mmio_read_32((0xf712c000 + 0x004));
+	} while (data & 1);
+
+	data = mmio_read_32((0xf712c000 + 0x008));
+	if (data & 0x7fe)
+		NOTICE("ddr3 rank1 init failure\n");
+	else
+		INFO("ddr3 rank1 init pass\n");
+
+	data = mmio_read_32((0xf712c000 + 0x048));
+	data &= ~0xf;
+	mmio_write_32((0xf712c000 + 0x048), data);
+	return 0;
+}
+
+static void init_ddr(int freq)
+{
+	unsigned int data;
+	int ret;
+
+
+	data = mmio_read_32((0xf7032000 + 0x030));
+	data |= 1;
+	mmio_write_32((0xf7032000 + 0x030), data);
+	data = mmio_read_32((0xf7032000 + 0x010));
+	data |= 1;
+	mmio_write_32((0xf7032000 + 0x010), data);
+
+	udelay(100);
+	do {
+		data = mmio_read_32((0xf7032000 + 0x030));
+		data &= 3 << 28;
+	} while (data != (3 << 28));
+	do {
+		data = mmio_read_32((0xf7032000 + 0x010));
+		data &= 3 << 28;
+	} while (data != (3 << 28));
+
+	ret = lpddr3_freq_init(freq);
+	if (ret)
+		return;
+}
+
+static void init_ddrc_qos(void)
+{
+	unsigned int port, data;
+
+	mmio_write_32((0xf7124000 + 0x088), 1);
+
+	port = 0;
+	mmio_write_32((0xf7120000 + 0x200 + port * 0x10), 0x1210);
+	mmio_write_32((0xf7120000 + 0x204 + port * 0x10), 0x11111111);
+	mmio_write_32((0xf7120000 + 0x208 + port * 0x10), 0x11111111);
+	mmio_write_32((0xf7120000 + 0x400 + 0 * 0x10), 0x001d0007);
+
+	for (port = 3; port <= 4; port++) {
+		mmio_write_32((0xf7120000 + 0x200 + port * 0x10), 0x1210);
+		mmio_write_32((0xf7120000 + 0x204 + port * 0x10), 0x77777777);
+		mmio_write_32((0xf7120000 + 0x208 + port * 0x10), 0x77777777);
+	}
+
+	port = 1;
+	mmio_write_32((0xf7120000 + 0x200 + port * 0x10), 0x30000);
+	mmio_write_32((0xf7120000 + 0x204 + port * 0x10), 0x1234567);
+	mmio_write_32((0xf7120000 + 0x208 + port * 0x10), 0x1234567);
+
+	mmio_write_32((0xf7124000 + 0x1f0), 0);
+	mmio_write_32((0xf7124000 + 0x0bc), 0x3020100);
+	mmio_write_32((0xf7124000 + 0x0d0), 0x3020100);
+	mmio_write_32((0xf7124000 + 0x1f4), 0x01000100);
+	mmio_write_32((0xf7124000 + 0x08c + 0 * 4), 0xd0670402);
+	mmio_write_32((0xf7124000 + 0x068 + 0 * 4), 0x31);
+	mmio_write_32((0xf7124000 + 0x000), 0x7);
+
+	data = mmio_read_32((0xf7124000 + 0x09c));
+	data &= ~0xff0000;
+	data |= 0x400000;
+	mmio_write_32((0xf7124000 + 0x09c), data);
+	data = mmio_read_32((0xf7124000 + 0x0ac));
+	data &= ~0xff0000;
+	data |= 0x400000;
+	mmio_write_32((0xf7124000 + 0x0ac), data);
+	port = 2;
+	mmio_write_32((0xf7120000 + 0x200 + port * 0x10), 0x30000);
+	mmio_write_32((0xf7120000 + 0x204 + port * 0x10), 0x1234567);
+	mmio_write_32((0xf7120000 + 0x208 + port * 0x10), 0x1234567);
+
+
+	mmio_write_32((0xf7124000 + 0x09c), 0xff7fff);
+	mmio_write_32((0xf7124000 + 0x0a0), 0xff);
+	mmio_write_32((0xf7124000 + 0x0ac), 0xff7fff);
+	mmio_write_32((0xf7124000 + 0x0b0), 0xff);
+	mmio_write_32((0xf7124000 + 0x0bc), 0x3020100);
+	mmio_write_32((0xf7124000 + 0x0d0), 0x3020100);
+}
+
+static void ddr_phy_reset(void)
+{
+	mmio_write_32(0xf7030340, 0xa000);
+	mmio_write_32(0xf7030344, 0xa000);
+}
+
+void hikey_ddr_init(void)
+{
+	uint32_t data;
+
+	init_pll();
+	init_freq();
+
+	/*
+	 * Init DDR with 533MHz. Otherwise, DDR initialization
+	 * may fail on 800MHz on some boards.
+	 */
+	ddr_phy_reset();
+	init_ddr(DDR_FREQ_533M);
+	/* Init DDR with 800MHz. */
+	ddr_phy_reset();
+	init_ddr(DDR_FREQ_800M);
+
+
+	ddrc_common_init(1);
+	dienum_det_and_rowcol_cfg();
+	detect_ddr_chip_info();
+
+	data = mmio_read_32(0xf7032000 + 0x010);
+	data &= ~0x1;
+	mmio_write_32(0xf7032000 + 0x010, data);
+	data = mmio_read_32(0xf7032000 + 0x010);
+
+	/*
+	 * Test memory access. Do not use address 0x0 because the compiler
+	 * may assume it is not a valid address and generate incorrect code
+	 * (GCC 4.9.1 without -fno-delete-null-pointer-checks for instance).
+	 */
+	mmio_write_32(0x4, 0xa5a55a5a);
+	INFO("ddr test value:0x%x\n", mmio_read_32(0x4));
+	init_ddrc_qos();
+}
diff --git a/plat/hisilicon/hikey/hikey_def.h b/plat/hisilicon/hikey/hikey_def.h
new file mode 100644
index 0000000..668b459
--- /dev/null
+++ b/plat/hisilicon/hikey/hikey_def.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __HIKEY_DEF_H__
+#define __HIKEY_DEF_H__
+
+#include <common_def.h>
+#include <tbbr_img_def.h>
+
+/* Always assume DDR is 1GB size. */
+#define DDR_BASE			0x0
+#define DDR_SIZE			0x40000000
+
+#define DEVICE_BASE			0xF4000000
+#define DEVICE_SIZE			0x05800000
+
+#define XG2RAM0_BASE			0xF9800000
+#define XG2RAM0_SIZE			0x00400000
+
+/* Memory location options for TSP */
+#define HIKEY_SRAM_ID		0
+#define HIKEY_DRAM_ID		1
+
+/*
+ * DDR for OP-TEE (32MB from 0x3E00000-0x3FFFFFFF) is divided in several
+ * regions
+ *   - Secure DDR (default is the top 16MB) used by OP-TEE
+ *   - Non-secure DDR used by OP-TEE (shared memory and padding) (4MB)
+ *   - Secure DDR (4MB aligned on 4MB) for OP-TEE's "Secure Data Path" feature
+ *   - Non-secure DDR (8MB) reserved for OP-TEE's future use
+ */
+#define DDR_SEC_SIZE			0x01000000
+#define DDR_SEC_BASE			(DDR_BASE + DDR_SIZE - DDR_SEC_SIZE) /* 0x3F000000 */
+
+#define DDR_SDP_SIZE			0x00400000
+#define DDR_SDP_BASE			(DDR_SEC_BASE - 0x400000 /* align */ - \
+					DDR_SDP_SIZE)
+
+#define SRAM_BASE			0xFFF80000
+#define SRAM_SIZE			0x00012000
+
+/*
+ * BL1 is stored in XG2RAM0_HIRQ that is 784KB large (0xF980_0000~0xF98C_4000).
+ */
+#define ONCHIPROM_PARAM_BASE		(XG2RAM0_BASE + 0x700)
+#define LOADER_RAM_BASE			(XG2RAM0_BASE + 0x800)
+#define BL1_XG2RAM0_OFFSET		0x1000
+
+/*
+ * PL011 related constants
+ */
+#define PL011_UART0_BASE		0xF8015000
+#define PL011_UART3_BASE		0xF7113000
+#define PL011_BAUDRATE			115200
+#define PL011_UART_CLK_IN_HZ		19200000
+
+#define HIKEY_USB_DESC_BASE		(DDR_BASE + 0x00800000)
+#define HIKEY_USB_DESC_SIZE		0x00100000
+#define HIKEY_USB_DATA_BASE		(DDR_BASE + 0x10000000)
+#define HIKEY_USB_DATA_SIZE		0x10000000
+#define HIKEY_FB_BUFFER_BASE		(HIKEY_USB_DATA_BASE)
+#define HIKEY_FB_BUFFER_SIZE		HIKEY_USB_DATA_SIZE
+#define HIKEY_FB_DOWNLOAD_BASE		(HIKEY_FB_BUFFER_BASE +		\
+					 HIKEY_FB_BUFFER_SIZE)
+#define HIKEY_FB_DOWNLOAD_SIZE		HIKEY_USB_DATA_SIZE
+
+#define HIKEY_USB_DESC_IN_BASE		(DDR_BASE + 0x00800000)
+#define HIKEY_USB_DESC_IN_SIZE		0x00040000
+#define HIKEY_USB_DESC_EP0_OUT_BASE	(HIKEY_USB_DESC_IN_BASE +	\
+					 HIKEY_USB_DESC_IN_SIZE)
+#define HIKEY_USB_DESC_EP0_OUT_SIZE	0x00040000
+#define HIKEY_USB_DESC_EPX_OUT_BASE	(HIKEY_USB_DESC_EP0_OUT_BASE +	\
+					 HIKEY_USB_DESC_EP0_OUT_SIZE)
+#define HIKEY_USB_DESC_EPX_OUT_SIZE	0x00080000
+
+#define HIKEY_MMC_DESC_BASE		(DDR_BASE + 0x03000000)
+#define HIKEY_MMC_DESC_SIZE		0x00100000
+
+/*
+ * HIKEY_MMC_DATA_BASE & HIKEY_MMC_DATA_SIZE are shared between fastboot
+ * and eMMC driver. Since it could avoid to memory copy.
+ * So this SRAM region is used twice. First, it's used in BL1 as temporary
+ * buffer in eMMC driver. Second, it's used by MCU in BL2. The SRAM region
+ * needs to be clear before used in BL2.
+ */
+#define HIKEY_MMC_DATA_BASE		(DDR_BASE + 0x10000000)
+#define HIKEY_MMC_DATA_SIZE		0x20000000
+#define HIKEY_NS_IMAGE_OFFSET		(DDR_BASE + 0x35000000)
+#define HIKEY_BL1_MMC_DESC_BASE		(SRAM_BASE)
+#define HIKEY_BL1_MMC_DESC_SIZE		0x00001000
+#define HIKEY_BL1_MMC_DATA_BASE		(HIKEY_BL1_MMC_DESC_BASE +	\
+					 HIKEY_BL1_MMC_DESC_SIZE)
+#define HIKEY_BL1_MMC_DATA_SIZE		0x0000B000
+
+#define EMMC_BASE			0
+#define HIKEY_FIP_BASE			(EMMC_BASE + (4 << 20))
+#define HIKEY_FIP_MAX_SIZE		(8 << 20)
+#define HIKEY_EMMC_RPMB_BASE		(EMMC_BASE + 0)
+#define HIKEY_EMMC_RPMB_MAX_SIZE	(128 << 10)
+#define HIKEY_EMMC_USERDATA_BASE	(EMMC_BASE + 0)
+#define HIKEY_EMMC_USERDATA_MAX_SIZE	(4 << 30)
+
+/*
+ * GIC400 interrupt handling related constants
+ */
+#define IRQ_SEC_PHY_TIMER			29
+#define IRQ_SEC_SGI_0				8
+#define IRQ_SEC_SGI_1				9
+#define IRQ_SEC_SGI_2				10
+#define IRQ_SEC_SGI_3				11
+#define IRQ_SEC_SGI_4				12
+#define IRQ_SEC_SGI_5				13
+#define IRQ_SEC_SGI_6				14
+#define IRQ_SEC_SGI_7				15
+#define IRQ_SEC_SGI_8				16
+
+#endif /* __HIKEY_DEF_H__ */
diff --git a/plat/hisilicon/hikey/hikey_image_load.c b/plat/hisilicon/hikey/hikey_image_load.c
new file mode 100644
index 0000000..32ca36d
--- /dev/null
+++ b/plat/hisilicon/hikey/hikey_image_load.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <bl_common.h>
+#include <desc_image_load.h>
+#include <platform.h>
+
+/*******************************************************************************
+ * This function flushes the data structures so that they are visible
+ * in memory for the next BL image.
+ ******************************************************************************/
+void plat_flush_next_bl_params(void)
+{
+	flush_bl_params_desc();
+}
+
+/*******************************************************************************
+ * This function returns the list of loadable images.
+ ******************************************************************************/
+bl_load_info_t *plat_get_bl_image_load_info(void)
+{
+	return get_bl_load_info_from_mem_params_desc();
+}
+
+/*******************************************************************************
+ * This function returns the list of executable images.
+ ******************************************************************************/
+bl_params_t *plat_get_next_bl_params(void)
+{
+	return get_next_bl_params_from_mem_params_desc();
+}
diff --git a/plat/hisilicon/hikey/hikey_io_storage.c b/plat/hisilicon/hikey/hikey_io_storage.c
new file mode 100644
index 0000000..57c66d5
--- /dev/null
+++ b/plat/hisilicon/hikey/hikey_io_storage.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <emmc.h>
+#include <errno.h>
+#include <firmware_image_package.h>
+#include <io_block.h>
+#include <io_driver.h>
+#include <io_fip.h>
+#include <io_memmap.h>
+#include <io_storage.h>
+#include <mmio.h>
+#include <platform_def.h>
+#include <semihosting.h>	/* For FOPEN_MODE_... */
+#include <string.h>
+#include "hikey_private.h"
+
+#define EMMC_BLOCK_SHIFT			9
+
+/* Page 1024, since only a few pages before 2048 are used as partition table */
+#define SERIALNO_EMMC_OFFSET			(1024 * 512)
+
+struct plat_io_policy {
+	uintptr_t *dev_handle;
+	uintptr_t image_spec;
+	int (*check)(const uintptr_t spec);
+};
+
+static const io_dev_connector_t *emmc_dev_con;
+static uintptr_t emmc_dev_handle;
+static const io_dev_connector_t *fip_dev_con;
+static uintptr_t fip_dev_handle;
+
+static int check_emmc(const uintptr_t spec);
+static int check_fip(const uintptr_t spec);
+
+static const io_block_spec_t emmc_fip_spec = {
+	.offset		= HIKEY_FIP_BASE,
+	.length		= HIKEY_FIP_MAX_SIZE,
+};
+
+static const io_block_dev_spec_t emmc_dev_spec = {
+	/* It's used as temp buffer in block driver. */
+#ifdef IMAGE_BL1
+	.buffer		= {
+		.offset	= HIKEY_BL1_MMC_DATA_BASE,
+		.length	= HIKEY_BL1_MMC_DATA_SIZE,
+	},
+#else
+	.buffer		= {
+		.offset	= HIKEY_MMC_DATA_BASE,
+		.length	= HIKEY_MMC_DATA_SIZE,
+	},
+#endif
+	.ops		= {
+		.read	= emmc_read_blocks,
+		.write	= emmc_write_blocks,
+	},
+	.block_size	= EMMC_BLOCK_SIZE,
+};
+
+static const io_uuid_spec_t bl2_uuid_spec = {
+	.uuid = UUID_TRUSTED_BOOT_FIRMWARE_BL2,
+};
+
+static const io_uuid_spec_t bl31_uuid_spec = {
+	.uuid = UUID_EL3_RUNTIME_FIRMWARE_BL31,
+};
+
+static const io_uuid_spec_t bl32_uuid_spec = {
+	.uuid = UUID_SECURE_PAYLOAD_BL32,
+};
+
+static const io_uuid_spec_t bl32_extra1_uuid_spec = {
+	.uuid = UUID_SECURE_PAYLOAD_BL32_EXTRA1,
+};
+
+static const io_uuid_spec_t bl32_extra2_uuid_spec = {
+	.uuid = UUID_SECURE_PAYLOAD_BL32_EXTRA2,
+};
+
+static const io_uuid_spec_t bl33_uuid_spec = {
+	.uuid = UUID_NON_TRUSTED_FIRMWARE_BL33,
+};
+
+static const io_uuid_spec_t scp_bl2_uuid_spec = {
+	.uuid = UUID_SCP_FIRMWARE_SCP_BL2,
+};
+
+static const struct plat_io_policy policies[] = {
+	[FIP_IMAGE_ID] = {
+		&emmc_dev_handle,
+		(uintptr_t)&emmc_fip_spec,
+		check_emmc
+	},
+	[BL2_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl2_uuid_spec,
+		check_fip
+	},
+	[SCP_BL2_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&scp_bl2_uuid_spec,
+		check_fip
+	},
+	[BL31_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl31_uuid_spec,
+		check_fip
+	},
+	[BL32_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl32_uuid_spec,
+		check_fip
+	},
+	[BL32_EXTRA1_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl32_extra1_uuid_spec,
+		check_fip
+	},
+	[BL32_EXTRA2_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl32_extra2_uuid_spec,
+		check_fip
+	},
+	[BL33_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl33_uuid_spec,
+		check_fip
+	}
+};
+
+static int check_emmc(const uintptr_t spec)
+{
+	int result;
+	uintptr_t local_handle;
+
+	result = io_dev_init(emmc_dev_handle, (uintptr_t)NULL);
+	if (result == 0) {
+		result = io_open(emmc_dev_handle, spec, &local_handle);
+		if (result == 0)
+			io_close(local_handle);
+	}
+	return result;
+}
+
+static int check_fip(const uintptr_t spec)
+{
+	int result;
+	uintptr_t local_image_handle;
+
+	/* See if a Firmware Image Package is available */
+	result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID);
+	if (result == 0) {
+		result = io_open(fip_dev_handle, spec, &local_image_handle);
+		if (result == 0) {
+			VERBOSE("Using FIP\n");
+			io_close(local_image_handle);
+		}
+	}
+	return result;
+}
+
+void hikey_io_setup(void)
+{
+	int result;
+
+	result = register_io_dev_block(&emmc_dev_con);
+	assert(result == 0);
+
+	result = register_io_dev_fip(&fip_dev_con);
+	assert(result == 0);
+
+	result = io_dev_open(emmc_dev_con, (uintptr_t)&emmc_dev_spec,
+			     &emmc_dev_handle);
+	assert(result == 0);
+
+	result = io_dev_open(fip_dev_con, (uintptr_t)NULL, &fip_dev_handle);
+	assert(result == 0);
+
+	/* Ignore improbable errors in release builds */
+	(void)result;
+}
+
+/* Return an IO device handle and specification which can be used to access
+ * an image. Use this to enforce platform load policy
+ */
+int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
+			  uintptr_t *image_spec)
+{
+	int result;
+	const struct plat_io_policy *policy;
+
+	assert(image_id < ARRAY_SIZE(policies));
+
+	policy = &policies[image_id];
+	result = policy->check(policy->image_spec);
+	assert(result == 0);
+
+	*image_spec = policy->image_spec;
+	*dev_handle = *(policy->dev_handle);
+
+	return result;
+}
diff --git a/plat/hisilicon/hikey/hikey_pm.c b/plat/hisilicon/hikey/hikey_pm.c
new file mode 100644
index 0000000..d4dd683
--- /dev/null
+++ b/plat/hisilicon/hikey/hikey_pm.c
@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cci.h>
+#include <debug.h>
+#include <gicv2.h>
+#include <hi6220.h>
+#include <hisi_ipc.h>
+#include <hisi_pwrc.h>
+#include <hisi_sram_map.h>
+#include <mmio.h>
+#include <psci.h>
+#include <sp804_delay_timer.h>
+
+#include "hikey_def.h"
+
+#define CORE_PWR_STATE(state) \
+	((state)->pwr_domain_state[MPIDR_AFFLVL0])
+#define CLUSTER_PWR_STATE(state) \
+	((state)->pwr_domain_state[MPIDR_AFFLVL1])
+#define SYSTEM_PWR_STATE(state) \
+	((state)->pwr_domain_state[PLAT_MAX_PWR_LVL])
+
+static uintptr_t hikey_sec_entrypoint;
+
+static int hikey_pwr_domain_on(u_register_t mpidr)
+{
+	int cpu, cluster;
+	int curr_cluster;
+
+	cluster = MPIDR_AFFLVL1_VAL(mpidr);
+	cpu = MPIDR_AFFLVL0_VAL(mpidr);
+	curr_cluster = MPIDR_AFFLVL1_VAL(read_mpidr());
+	if (cluster != curr_cluster)
+		hisi_ipc_cluster_on(cpu, cluster);
+
+	hisi_pwrc_set_core_bx_addr(cpu, cluster, hikey_sec_entrypoint);
+	hisi_pwrc_enable_debug(cpu, cluster);
+	hisi_ipc_cpu_on(cpu, cluster);
+
+	return 0;
+}
+
+static void hikey_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+	unsigned long mpidr;
+	int cpu, cluster;
+
+	mpidr = read_mpidr();
+	cluster = MPIDR_AFFLVL1_VAL(mpidr);
+	cpu = MPIDR_AFFLVL0_VAL(mpidr);
+
+
+	/*
+	 * Enable CCI coherency for this cluster.
+	 * No need for locks as no other cpu is active at the moment.
+	 */
+	if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
+		cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr));
+
+	/* Zero the jump address in the mailbox for this cpu */
+	hisi_pwrc_set_core_bx_addr(cpu, cluster, 0);
+
+	/* Program the GIC per-cpu distributor or re-distributor interface */
+	gicv2_pcpu_distif_init();
+	/* Enable the GIC cpu interface */
+	gicv2_cpuif_enable();
+}
+
+void hikey_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	unsigned long mpidr;
+	int cpu, cluster;
+
+	mpidr = read_mpidr();
+	cluster = MPIDR_AFFLVL1_VAL(mpidr);
+	cpu = MPIDR_AFFLVL0_VAL(mpidr);
+
+	gicv2_cpuif_disable();
+	hisi_ipc_cpu_off(cpu, cluster);
+
+	if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
+		hisi_ipc_spin_lock(HISI_IPC_SEM_CPUIDLE);
+		cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr));
+		hisi_ipc_spin_unlock(HISI_IPC_SEM_CPUIDLE);
+
+		hisi_ipc_cluster_off(cpu, cluster);
+	}
+}
+
+static void hikey_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+	u_register_t mpidr = read_mpidr_el1();
+	unsigned int cpu = mpidr & MPIDR_CPU_MASK;
+	unsigned int cluster =
+		(mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
+
+	if (CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE)
+		return;
+
+	if (CORE_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
+
+		/* Program the jump address for the target cpu */
+		hisi_pwrc_set_core_bx_addr(cpu, cluster, hikey_sec_entrypoint);
+
+		gicv2_cpuif_disable();
+
+		if (SYSTEM_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE)
+			hisi_ipc_cpu_suspend(cpu, cluster);
+	}
+
+	/* Perform the common cluster specific operations */
+	if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
+		hisi_ipc_spin_lock(HISI_IPC_SEM_CPUIDLE);
+		cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr));
+		hisi_ipc_spin_unlock(HISI_IPC_SEM_CPUIDLE);
+
+		if (SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
+			hisi_pwrc_set_cluster_wfi(1);
+			hisi_pwrc_set_cluster_wfi(0);
+			hisi_ipc_psci_system_off();
+		} else
+			hisi_ipc_cluster_suspend(cpu, cluster);
+	}
+}
+
+static void hikey_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
+{
+	unsigned long mpidr;
+	unsigned int cluster, cpu;
+
+	/* Nothing to be done on waking up from retention from CPU level */
+	if (CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE)
+		return;
+
+	/* Get the mpidr for this cpu */
+	mpidr = read_mpidr_el1();
+	cluster = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFF1_SHIFT;
+	cpu = mpidr & MPIDR_CPU_MASK;
+
+	/* Enable CCI coherency for cluster */
+	if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
+		cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr));
+
+	hisi_pwrc_set_core_bx_addr(cpu, cluster, 0);
+
+	if (SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
+		gicv2_distif_init();
+		gicv2_pcpu_distif_init();
+		gicv2_cpuif_enable();
+	} else {
+		gicv2_pcpu_distif_init();
+		gicv2_cpuif_enable();
+	}
+}
+
+static void hikey_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+	int i;
+
+	for (i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
+		req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
+}
+
+static void __dead2 hikey_system_off(void)
+{
+	NOTICE("%s: off system\n", __func__);
+
+	/* Pull down GPIO_0_0 to trigger PMIC shutdown */
+	mmio_write_32(0xF8001810, 0x2); /* Pinmux */
+	mmio_write_8(0xF8011400, 1);	/* Pin direction */
+	mmio_write_8(0xF8011004, 0);	/* Pin output value */
+
+	/* Wait for 2s to power off system by PMIC */
+	sp804_timer_init(SP804_TIMER0_BASE, 10, 192);
+	mdelay(2000);
+
+	/*
+	 * PMIC shutdown depends on two conditions: GPIO_0_0 (PWR_HOLD) low,
+	 * and VBUS_DET < 3.6V. For HiKey, VBUS_DET is connected to VDD_4V2
+	 * through Jumper 1-2. So, to complete shutdown, user needs to manually
+	 * remove Jumper 1-2.
+	 */
+	NOTICE("+------------------------------------------+\n");
+	NOTICE("| IMPORTANT: Remove Jumper 1-2 to shutdown |\n");
+	NOTICE("| DANGER:    SoC is still burning. DANGER! |\n");
+	NOTICE("| Board will be reboot to avoid overheat   |\n");
+	NOTICE("+------------------------------------------+\n");
+
+	/* Send the system reset request */
+	mmio_write_32(AO_SC_SYS_STAT0, 0x48698284);
+
+	wfi();
+	panic();
+}
+
+static void __dead2 hikey_system_reset(void)
+{
+	/* Send the system reset request */
+	mmio_write_32(AO_SC_SYS_STAT0, 0x48698284);
+	isb();
+	dsb();
+
+	wfi();
+	panic();
+}
+
+int hikey_validate_power_state(unsigned int power_state,
+			       psci_power_state_t *req_state)
+{
+	int pstate = psci_get_pstate_type(power_state);
+	int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
+	int i;
+
+	assert(req_state);
+
+	if (pwr_lvl > PLAT_MAX_PWR_LVL)
+		return PSCI_E_INVALID_PARAMS;
+
+	/* Sanity check the requested state */
+	if (pstate == PSTATE_TYPE_STANDBY) {
+		/*
+		 * It's possible to enter standby only on power level 0
+		 * Ignore any other power level.
+		 */
+		if (pwr_lvl != MPIDR_AFFLVL0)
+			return PSCI_E_INVALID_PARAMS;
+
+		req_state->pwr_domain_state[MPIDR_AFFLVL0] =
+					PLAT_MAX_RET_STATE;
+	} else {
+		for (i = MPIDR_AFFLVL0; i <= pwr_lvl; i++)
+			req_state->pwr_domain_state[i] =
+					PLAT_MAX_OFF_STATE;
+	}
+
+	/*
+	 * We expect the 'state id' to be zero.
+	 */
+	if (psci_get_pstate_id(power_state))
+		return PSCI_E_INVALID_PARAMS;
+
+	return PSCI_E_SUCCESS;
+}
+
+static int hikey_validate_ns_entrypoint(uintptr_t entrypoint)
+{
+	/*
+	 * Check if the non secure entrypoint lies within the non
+	 * secure DRAM.
+	 */
+	if ((entrypoint > DDR_BASE) && (entrypoint < (DDR_BASE + DDR_SIZE)))
+		return PSCI_E_SUCCESS;
+
+	return PSCI_E_INVALID_ADDRESS;
+}
+
+static const plat_psci_ops_t hikey_psci_ops = {
+	.cpu_standby			= NULL,
+	.pwr_domain_on			= hikey_pwr_domain_on,
+	.pwr_domain_on_finish		= hikey_pwr_domain_on_finish,
+	.pwr_domain_off			= hikey_pwr_domain_off,
+	.pwr_domain_suspend		= hikey_pwr_domain_suspend,
+	.pwr_domain_suspend_finish	= hikey_pwr_domain_suspend_finish,
+	.system_off			= hikey_system_off,
+	.system_reset			= hikey_system_reset,
+	.validate_power_state		= hikey_validate_power_state,
+	.validate_ns_entrypoint		= hikey_validate_ns_entrypoint,
+	.get_sys_suspend_power_state	= hikey_get_sys_suspend_power_state,
+};
+
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+			const plat_psci_ops_t **psci_ops)
+{
+	hikey_sec_entrypoint = sec_entrypoint;
+
+	/*
+	 * Initialize PSCI ops struct
+	 */
+	*psci_ops = &hikey_psci_ops;
+	return 0;
+}
diff --git a/plat/hisilicon/hikey/hikey_private.h b/plat/hisilicon/hikey/hikey_private.h
new file mode 100644
index 0000000..a7709b2
--- /dev/null
+++ b/plat/hisilicon/hikey/hikey_private.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __HIKEY_PRIVATE_H__
+#define __HIKEY_PRIVATE_H__
+
+#include <bl_common.h>
+
+#define RANDOM_MAX		0x7fffffffffffffff
+#define RANDOM_MAGIC		0x9a4dbeaf
+
+struct random_serial_num {
+	uint64_t	magic;
+	uint64_t	data;
+	char		serialno[32];
+};
+
+/*
+ * Function and variable prototypes
+ */
+void hikey_init_mmu_el1(unsigned long total_base,
+			unsigned long total_size,
+			unsigned long ro_start,
+			unsigned long ro_limit,
+			unsigned long coh_start,
+			unsigned long coh_limit);
+void hikey_init_mmu_el3(unsigned long total_base,
+			unsigned long total_size,
+			unsigned long ro_start,
+			unsigned long ro_limit,
+			unsigned long coh_start,
+			unsigned long coh_limit);
+
+void hikey_ddr_init(void);
+void hikey_io_setup(void);
+
+int hikey_get_partition_size(const char *arg, int left, char *response);
+int hikey_get_partition_type(const char *arg, int left, char *response);
+
+int hikey_erase(const char *arg);
+int hikey_flash(const char *arg);
+int hikey_oem(const char *arg);
+int hikey_reboot(const char *arg);
+
+const char *hikey_init_serialno(void);
+int hikey_read_serialno(struct random_serial_num *serialno);
+int hikey_write_serialno(struct random_serial_num *serialno);
+
+void init_acpu_dvfs(void);
+
+#endif /* __HIKEY_PRIVATE_H__ */
diff --git a/plat/hisilicon/hikey/hikey_topology.c b/plat/hisilicon/hikey/hikey_topology.c
new file mode 100644
index 0000000..95948b8
--- /dev/null
+++ b/plat/hisilicon/hikey/hikey_topology.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <platform_def.h>
+#include <psci.h>
+
+/*
+ * The HiKey power domain tree descriptor. The cluster power domains
+ * are arranged so that when the PSCI generic code creates the power
+ * domain tree, the indices of the CPU power domain nodes it allocates
+ * match the linear indices returned by plat_core_pos_by_mpidr().
+ */
+const unsigned char hikey_power_domain_tree_desc[] = {
+	/* Number of root nodes */
+	1,
+	/* Number of clusters */
+	PLATFORM_CLUSTER_COUNT,
+	/* Number of children for the first cluster node */
+	PLATFORM_CORE_COUNT_PER_CLUSTER,
+	/* Number of children for the second cluster node */
+	PLATFORM_CORE_COUNT_PER_CLUSTER,
+};
+
+/*******************************************************************************
+ * This function returns the HiKey topology tree information.
+ ******************************************************************************/
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+	return hikey_power_domain_tree_desc;
+}
+
+/*******************************************************************************
+ * This function implements a part of the critical interface between the psci
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is returned
+ * in case the MPIDR is invalid.
+ ******************************************************************************/
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+	unsigned int cluster_id, cpu_id;
+
+	mpidr &= MPIDR_AFFINITY_MASK;
+
+	if (mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK))
+		return -1;
+
+	cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+	cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
+
+	if (cluster_id >= PLATFORM_CLUSTER_COUNT)
+		return -1;
+
+	/*
+	 * Validate cpu_id by checking whether it represents a CPU in
+	 * one of the two clusters present on the platform.
+	 */
+	if (cpu_id >= PLATFORM_CORE_COUNT_PER_CLUSTER)
+		return -1;
+
+	return (cpu_id + (cluster_id * 4));
+}
diff --git a/plat/hisilicon/hikey/hisi_dvfs.c b/plat/hisilicon/hikey/hisi_dvfs.c
new file mode 100644
index 0000000..0d4f893
--- /dev/null
+++ b/plat/hisilicon/hikey/hisi_dvfs.c
@@ -0,0 +1,778 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <debug.h>
+#include <hi6220.h>
+#include <hi6553.h>
+#include <hisi_sram_map.h>
+#include <mmio.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <string.h>
+
+#define ACPU_FREQ_MAX_NUM		5
+#define	ACPU_OPP_NUM			7
+
+#define ACPU_VALID_VOLTAGE_MAGIC	(0x5A5AC5C5)
+
+#define ACPU_WAIT_TIMEOUT		(200)
+#define ACPU_WAIT_FOR_WFI_TIMOUT	(2000)
+#define ACPU_DFS_STATE_CNT		(0x10000)
+
+struct acpu_dvfs_sram_stru {
+	unsigned int magic;
+	unsigned int support_freq_num;
+	unsigned int support_freq_max;
+	unsigned int start_prof;
+	unsigned int vol[ACPU_OPP_NUM];
+};
+
+struct acpu_volt_cal_para {
+	unsigned int freq;
+	unsigned int ul_vol;
+	unsigned int dl_vol;
+	unsigned int core_ref_hpm;
+};
+
+struct ddr_volt_cal_para {
+	unsigned int freq;
+	unsigned int ul_vol;
+	unsigned int dl_vol;
+	unsigned int ddr_ref_hpm;
+};
+
+struct acpu_dvfs_opp_para {
+	unsigned int freq;
+	unsigned int acpu_clk_profile0;
+	unsigned int acpu_clk_profile1;
+	unsigned int acpu_vol_profile;
+	unsigned int acpu_pll_freq;
+	unsigned int acpu_pll_frac;
+};
+
+unsigned int efuse_acpu_freq[] = {
+	1200000, 1250000, 1300000, 1350000,
+	1400000, 1450000, 1500000, 1550000,
+	1600000, 1650000, 1700000, 1750000,
+	1800000, 1850000, 1900000, 1950000,
+};
+
+struct acpu_dvfs_opp_para hi6220_acpu_profile[] = {
+	{ 208000,  0x61E5, 0x022, 0x3A, 0x5220102B, 0x05555555 },
+	{ 432000,  0x10A6, 0x121, 0x3A, 0x5120102D, 0x10000005 },
+	{ 729000,  0x2283, 0x100, 0x4A, 0x51101026, 0x10000005 },
+	{ 960000,  0x1211, 0x100, 0x5B, 0x51101032, 0x10000005 },
+	{ 1200000, 0x1211, 0x100, 0x6B, 0x5110207D, 0x10000005 },
+	{ 1400000, 0x1211, 0x100, 0x6B, 0x51101049, 0x10000005 },
+	{ 1500000, 0x1211, 0x100, 0x6B, 0x51101049, 0x10000005 },
+};
+
+struct acpu_dvfs_opp_para *acpu_dvfs_profile = hi6220_acpu_profile;
+struct acpu_dvfs_sram_stru *acpu_dvfs_sram_buf =
+	(struct acpu_dvfs_sram_stru *)MEMORY_AXI_ACPU_FREQ_VOL_ADDR;
+
+static inline void write_reg_mask(uintptr_t addr,
+				  uint32_t val, uint32_t mask)
+{
+	uint32_t reg;
+
+	reg = mmio_read_32(addr);
+	reg = (reg & ~(mask)) | val;
+	mmio_write_32(addr, reg);
+}
+
+static inline uint32_t read_reg_mask(uintptr_t addr,
+				     uint32_t mask, uint32_t offset)
+{
+	uint32_t reg;
+
+	reg = mmio_read_32(addr);
+	reg &= (mask << offset);
+	return (reg >> offset);
+}
+
+static int acpu_dvfs_syspll_cfg(unsigned int prof_id)
+{
+	uint32_t reg0 = 0;
+	uint32_t count = 0;
+	uint32_t clk_div_status = 0;
+
+	/*
+	 * step 1:
+	 *  - ACPUSYSPLLCFG.acpu_subsys_clk_div_sw = 0x3;
+	 *  - ACPUSYSPLLCFG.acpu_syspll_clken_cfg = 0x1;
+	 */
+	write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x3 << 12, 0x3 << 12);
+	write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x1 << 4,  0x1 << 4);
+
+	/*
+	 * step 2:
+	 *  - ACPUSYSPLLCFG.acpu_syspll_div_cfg:
+	 *     208MHz, set to 0x5;
+	 *     500MHz, set to 0x2;
+	 *     other opps set to 0x1
+	 */
+	if (prof_id == 0)
+		write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x5 << 0, 0x7 << 0);
+	else if (prof_id == 1)
+		write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x2 << 0, 0x7 << 0);
+	else
+		write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x1 << 0, 0x7 << 0);
+
+	/*
+	 * step 3:
+	 *  - Polling ACPU_SC_CPU_STAT.clk_div_status_vd == 0x3;
+	 *  - ACPU_SC_VD_CTRL.tune_en_dif = 0
+	 *  - ACPU_SC_VD_CTRL.tune_en_int = 0
+	 *  - PMCTRL_ACPUCLKDIV.acpu_ddr_clk_div_cfg = 0x1
+	 *  - PMCTRL_ACPUPLLSEL.acpu_pllsw_cfg = 0x1
+	 */
+	clk_div_status = 0x3;
+	do {
+		reg0 = read_reg_mask(ACPU_SC_CPU_STAT, 0x3, 20);
+		if ((count++) > ACPU_DFS_STATE_CNT) {
+			ERROR("%s: clk div status timeout!\n", __func__);
+			return -1;
+		}
+	} while (clk_div_status != reg0);
+
+	write_reg_mask(ACPU_SC_VD_CTRL, 0x0, (0x1 << 0) | (0x1 << 11));
+	write_reg_mask(PMCTRL_ACPUCLKDIV, 0x1 << 8, 0x3 << 8);
+	write_reg_mask(PMCTRL_ACPUPLLSEL, 0x1 << 0, 0x1 << 0);
+
+	return 0;
+}
+
+static void acpu_dvfs_clk_div_cfg(unsigned int prof_id,
+				  unsigned int *cpuext_cfg,
+				  unsigned int *acpu_ddr_cfg)
+{
+	if (prof_id == 0) {
+		write_reg_mask(PMCTRL_ACPUCLKDIV,
+			(0x1 << SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_START) |
+			(0x1 << SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_START),
+			(0x3 << SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_START) |
+			(0x3 << SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_START));
+		*cpuext_cfg = 0x1;
+		*acpu_ddr_cfg = 0x1;
+	} else if (prof_id == 1) {
+		write_reg_mask(PMCTRL_ACPUCLKDIV,
+			(0x1 << SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_START) |
+			(0x1 << SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_START),
+			(0x3 << SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_START) |
+			(0x3 << SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_START));
+		*cpuext_cfg = 0x1;
+		*acpu_ddr_cfg = 0x1;
+	} else {
+		/* ddr has not been inited */
+		write_reg_mask(PMCTRL_ACPUCLKDIV,
+			(0x1 << SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_START) |
+			(0x0 << SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_START),
+			(0x3 << SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_START) |
+			(0x3 << SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_START));
+		*cpuext_cfg = 0x1;
+		*acpu_ddr_cfg = 0x0;
+	}
+}
+
+static int acpu_dvfs_freq_ascend(unsigned int cur_prof, unsigned int tar_prof)
+{
+	unsigned int reg0 = 0;
+	unsigned int reg1 = 0;
+	unsigned int reg2 = 0;
+	unsigned int count = 0;
+	unsigned int cpuext_cfg_val = 0;
+	unsigned int acpu_ddr_cfg_val = 0;
+	int ret = 0;
+
+	/*
+	 * step 1:
+	 *  - PMCTRL_ACPUSYSPLLCFG.acpu_subsys_clk_div_sw = 0x3;
+	 *  - ACPUSYSPLLCFG.acpu_syspll_clken_cfg = 0x1;
+	 *
+	 * step 2:
+	 *  - PMCTRL_ACPUSYSPLLCFG.acpu_syspll_div_cfg = 0x5 (208MHz)
+	 *  - PMCTRL_ACPUSYSPLLCFG.acpu_syspll_div_cfg = 0x2 (500MHz)
+	 *  - PMCTRL_ACPUSYSPLLCFG.acpu_syspll_div_cfg = 0x1 (Other OPPs)
+	 *
+	 * step 3:
+	 *  - ACPU_SC_CPU_STAT.clk_div_status_vd = 0x3;
+	 *  - ACPU_SC_VD_CTRL.tune_en_dif = 0x0;
+	 *  - ACPU_SC_VD_CTRL.tune_en_int = 0x0;
+	 *  - PMCTRL_ACPUCLKDIV.acpu_ddr_clk_div_cfg = 0x1;
+	 *  - PMCTRL_ACPUPLLSEL.acpu_pllsw_cfg = 0x1
+	 */
+	ret = acpu_dvfs_syspll_cfg(cur_prof);
+	if (ret)
+		return -1;
+
+	/*
+	 * step 4:
+	 *  - Polling PMCTRL_ACPUPLLSEL.syspll_sw_stat == 0x1
+	 */
+	count = 0;
+	do {
+		reg0 = read_reg_mask(PMCTRL_ACPUPLLSEL, 0x1,
+			SOC_PMCTRL_ACPUPLLSEL_syspll_sw_stat_START);
+		if ((count++) > ACPU_DFS_STATE_CNT) {
+			ERROR("%s: syspll sw status timeout\n", __func__);
+			return -1;
+		}
+	} while (reg0 != 0x1);
+
+	/* Enable VD functionality if > 800MHz */
+	if (acpu_dvfs_profile[tar_prof].freq > 800000) {
+
+		write_reg_mask(ACPU_SC_VD_HPM_CTRL,
+			HPM_OSC_DIV_VAL, HPM_OSC_DIV_MASK);
+
+		/*
+		 * step 5:
+		 *  - ACPU_SC_VD_HPM_CTRL.hpm_dly_exp = 0xC7A;
+		 *  - ACPU_SC_VD_MASK_PATTERN_CTRL[12:0] = 0xCCB;
+		 */
+		write_reg_mask(ACPU_SC_VD_HPM_CTRL,
+			HPM_DLY_EXP_VAL, HPM_DLY_EXP_MASK);
+		write_reg_mask(ACPU_SC_VD_MASK_PATTERN_CTRL,
+			ACPU_SC_VD_MASK_PATTERN_VAL,
+			ACPU_SC_VD_MASK_PATTERN_MASK);
+
+		/*
+		 * step 6:
+		 *  - ACPU_SC_VD_DLY_TABLE0_CTRL = 0x1FFF;
+		 *  - ACPU_SC_VD_DLY_TABLE1_CTRL = 0x1FFFFFF;
+		 *  - ACPU_SC_VD_DLY_TABLE2_CTRL = 0x7FFFFFFF;
+		 *  - ACPU_SC_VD_DLY_FIXED_CTRL  = 0x1;
+		 */
+		mmio_write_32(ACPU_SC_VD_DLY_TABLE0_CTRL, 0x1FFF);
+		mmio_write_32(ACPU_SC_VD_DLY_TABLE1_CTRL, 0x1FFFFFF);
+		mmio_write_32(ACPU_SC_VD_DLY_TABLE2_CTRL, 0x7FFFFFFF);
+		mmio_write_32(ACPU_SC_VD_DLY_FIXED_CTRL, 0x1);
+
+		/*
+		 * step 7:
+		 *  - ACPU_SC_VD_CTRL.shift_table0 = 0x1;
+		 *  - ACPU_SC_VD_CTRL.shift_table1 = 0x3;
+		 *  - ACPU_SC_VD_CTRL.shift_table2 = 0x5;
+		 *  - ACPU_SC_VD_CTRL.shift_table3 = 0x6;
+		 *
+		 * step 8:
+		 *  - ACPU_SC_VD_CTRL.tune = 0x7;
+		 */
+		write_reg_mask(ACPU_SC_VD_CTRL,
+			ACPU_SC_VD_SHIFT_TABLE_TUNE_VAL,
+			ACPU_SC_VD_SHIFT_TABLE_TUNE_MASK);
+	}
+
+	/* step 9: ACPUPLLCTRL.acpupll_en_cfg = 0x0 */
+	write_reg_mask(PMCTRL_ACPUPLLCTRL, 0x0,
+		0x1 << SOC_PMCTRL_ACPUPLLCTRL_acpupll_en_cfg_START);
+
+	/* step 10: set PMCTRL_ACPUPLLFREQ and PMCTRL_ACPUPLLFRAC */
+	mmio_write_32(PMCTRL_ACPUPLLFREQ,
+		acpu_dvfs_profile[tar_prof].acpu_pll_freq);
+	mmio_write_32(PMCTRL_ACPUPLLFRAC,
+		acpu_dvfs_profile[tar_prof].acpu_pll_frac);
+
+	/*
+	 * step 11:
+	 *  - wait for 1us;
+	 *  - PMCTRL_ACPUPLLCTRL.acpupll_en_cfg = 0x1
+	 */
+	count = 0;
+	while (count < ACPU_WAIT_TIMEOUT)
+		count++;
+
+	write_reg_mask(PMCTRL_ACPUPLLCTRL,
+		0x1 << SOC_PMCTRL_ACPUPLLCTRL_acpupll_en_cfg_START,
+		0x1 << SOC_PMCTRL_ACPUPLLCTRL_acpupll_en_cfg_START);
+
+	/* step 12: PMCTRL_ACPUVOLPMUADDR = 0x100da */
+	mmio_write_32(PMCTRL_ACPUVOLPMUADDR, 0x100da);
+
+	/*
+	 * step 13:
+	 *  - PMCTRL_ACPUDESTVOL.acpu_dest_vol = 0x13 (208MHz);
+	 *  - PMCTRL_ACPUDESTVOL.acpu_dest_vol = 0x13 (500MHz);
+	 *  - PMCTRL_ACPUDESTVOL.acpu_dest_vol = 0x20 (798MHz);
+	 *  - PMCTRL_ACPUDESTVOL.acpu_dest_vol = 0x3A (1300MHz);
+	 *  - PMCTRL_ACPUDESTVOL.acpu_dest_vol = 0x3A (1500MHz);
+	 */
+	write_reg_mask(PMCTRL_ACPUDESTVOL,
+		acpu_dvfs_profile[tar_prof].acpu_vol_profile,
+		((0x1 << (SOC_PMCTRL_ACPUDESTVOL_acpu_dest_vol_END + 1)) - 1));
+
+	/*
+	 * step 14:
+	 *  - Polling PMCTRL_ACPUDESTVOL.acpu_vol_using == ACPUDESTVOL.acpu_dest_vol
+	 *  - Polling ACPUVOLTIMEOUT.acpu_vol_timeout == 0x1
+	 *  - Config PMCTRL_ACPUCLKDIV.acpu_ddr_clk_div_cfg
+	 *  - Config ACPUCLKDIV.cpuext_clk_div_cfg;
+	 */
+	count = 0;
+	do {
+		reg0 = read_reg_mask(PMCTRL_ACPUDESTVOL, 0x7F,
+			SOC_PMCTRL_ACPUDESTVOL_acpu_dest_vol_START);
+		reg1 = read_reg_mask(PMCTRL_ACPUDESTVOL, 0x7F,
+			SOC_PMCTRL_ACPUDESTVOL_acpu_vol_using_START);
+		reg2 = read_reg_mask(PMCTRL_ACPUVOLTTIMEOUT, 0x1,
+			SOC_PMCTRL_ACPUVOLTIMEOUT_acpu_vol_timeout_START);
+		if ((count++) > ACPU_DFS_STATE_CNT) {
+			ERROR("%s: acpu destvol cfg timeout.\n", __func__);
+			return -1;
+		}
+	} while ((reg0 != reg1) || (reg2 != 0x1));
+
+	acpu_dvfs_clk_div_cfg(tar_prof, &cpuext_cfg_val, &acpu_ddr_cfg_val);
+
+	/*
+	 * step 15:
+	 *  - Polling PMCTRL_ACPUCLKDIV.cpuext_clk_div_stat;
+	 *  - Polling ACPUCLKDIV.acpu_ddr_clk_div_stat;
+	 *  - ACPUPLLCTRL.acpupll_timeout = 0x1;
+	 *  - PMCTRL_ACPUPLLSEL.acpu_pllsw_cfg = 0x0;
+	 */
+	count = 0;
+	do {
+		reg0 = read_reg_mask(PMCTRL_ACPUCLKDIV, 0x3,
+			SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_stat_START);
+		reg1 = read_reg_mask(PMCTRL_ACPUCLKDIV, 0x3,
+			SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_stat_START);
+		reg2 = read_reg_mask(PMCTRL_ACPUPLLCTRL, 0x1,
+			SOC_PMCTRL_ACPUPLLCTRL_acpupll_timeout_START);
+		if ((count++) > ACPU_DFS_STATE_CNT) {
+			ERROR("%s: acpu clk div cfg timeout.\n", __func__);
+			return -1;
+		}
+	} while ((reg1 != cpuext_cfg_val) ||
+		(reg0 != acpu_ddr_cfg_val) ||
+		(reg2 != 0x1));
+
+	write_reg_mask(PMCTRL_ACPUPLLSEL, 0x0,
+		0x1 << SOC_PMCTRL_ACPUPLLSEL_acpu_pllsw_cfg_START);
+
+	/*
+	 * step 16:
+	 *  - Polling PMCTRL_ACPUPLLSEL.acpupll_sw_stat == 0x1;
+	 *  - ACPU_SC_VD_CTRL.force_clk_en = 0x0;
+	 *  - ACPU_SC_VD_CTRL.clk_dis_cnt_en = 0x0;
+	 *  - ACPU_SC_VD_CTRL.calibrate_en_ini = 0x0;
+	 *  - ACPU_SC_VD_CTRL.calibrate_en_dif = 0x0;
+	 *  - ACPU_SC_VD_CTRL.div_en_dif = 0x1;
+	 *  - ACPU_SC_VD_CTRL.tune_en_int = 0x1;
+	 *  - ACPU_SC_VD_CTRL.tune_en_dif = 0x1;
+	 *  - PMCTRL_ACPUSYSPLLCFG.acpu_subsys_clk_div_sw = 0x0;
+	 *  - ACPUSYSPLLCFG.acpu_syspll_clken_cfg = 0x0;
+	 */
+	count = 0;
+	do {
+		reg0 = read_reg_mask(PMCTRL_ACPUPLLSEL, 0x1,
+			SOC_PMCTRL_ACPUPLLSEL_acpu_pllsw_stat_START);
+		if ((count++) > ACPU_DFS_STATE_CNT) {
+			ERROR("%s: acpu pll sw status timeout.\n", __func__);
+			return -1;
+		}
+	} while (reg0 != 0x1);
+
+	if (acpu_dvfs_profile[tar_prof].freq > 800000)
+		write_reg_mask(ACPU_SC_VD_CTRL,
+			ACPU_SC_VD_EN_ASIC_VAL, ACPU_SC_VD_EN_MASK);
+
+	write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x0,
+		(0x3 << SOC_PMCTRL_ACPUSYSPLLCFG_acpu_subsys_clk_div_sw_START) |
+		(0x1 << SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_clken_cfg_START));
+
+	return 0;
+}
+
+static int acpu_dvfs_freq_descend(unsigned int cur_prof, unsigned int tar_prof)
+{
+	unsigned int reg0 = 0;
+	unsigned int reg1 = 0;
+	unsigned int reg2 = 0;
+	unsigned int count = 0;
+	unsigned int cpuext_cfg_val = 0;
+	unsigned int acpu_ddr_cfg_val = 0;
+	int ret = 0;
+
+	ret = acpu_dvfs_syspll_cfg(tar_prof);
+	if (ret)
+		return -1;
+
+	/*
+	 * step 4:
+	 *  - Polling PMCTRL_ACPUPLLSEL.syspll_sw_stat == 0x1
+	 */
+	count = 0;
+	do {
+		reg0 = read_reg_mask(PMCTRL_ACPUPLLSEL, 0x1, 2);
+		if ((count++) > ACPU_DFS_STATE_CNT) {
+			ERROR("%s: syspll sw status timeout.\n", __func__);
+			return -1;
+		}
+	} while (reg0 != 0x1);
+
+	/*
+	 * Step 5:
+	 *  - PMCTRL_ACPUPLLCTRL.acpupll_en_cfg = 0x0
+	 */
+	write_reg_mask(PMCTRL_ACPUPLLCTRL, 0x0, 0x1 << 0);
+
+	/*
+	 * step 6
+	 *  - Config PMCTRL_ACPUPLLFREQ and ACPUPLLFRAC
+	 */
+	mmio_write_32(PMCTRL_ACPUPLLFREQ, acpu_dvfs_profile[tar_prof].acpu_pll_freq);
+	mmio_write_32(PMCTRL_ACPUPLLFRAC, acpu_dvfs_profile[tar_prof].acpu_pll_frac);
+
+	/*
+	 * step 7:
+	 *  - Wait 1us;
+	 *  - Config PMCTRL_ACPUPLLCTRL.acpupll_en_cfg = 0x1
+	 */
+	count = 0;
+	while (count < ACPU_WAIT_TIMEOUT)
+		count++;
+
+	write_reg_mask(PMCTRL_ACPUPLLCTRL,
+		0x1 << SOC_PMCTRL_ACPUPLLCTRL_acpupll_en_cfg_START,
+		0x1 << SOC_PMCTRL_ACPUPLLCTRL_acpupll_en_cfg_START);
+
+	/* Enable VD functionality if > 800MHz */
+	if (acpu_dvfs_profile[tar_prof].freq > 800000) {
+
+		write_reg_mask(ACPU_SC_VD_HPM_CTRL,
+			HPM_OSC_DIV_VAL, HPM_OSC_DIV_MASK);
+
+		/*
+		 * step 9:
+		 *  - ACPU_SC_VD_HPM_CTRL.hpm_dly_exp = 0xC7A;
+		 *  - ACPU_SC_VD_MASK_PATTERN_CTRL[12:0] = 0xCCB;
+		 */
+		write_reg_mask(ACPU_SC_VD_HPM_CTRL,
+			HPM_DLY_EXP_VAL, HPM_DLY_EXP_MASK);
+		write_reg_mask(ACPU_SC_VD_MASK_PATTERN_CTRL,
+			ACPU_SC_VD_MASK_PATTERN_VAL,
+			ACPU_SC_VD_MASK_PATTERN_MASK);
+
+		/*
+		 * step 10:
+		 *  - ACPU_SC_VD_DLY_TABLE0_CTRL = 0x1FFF;
+		 *  - ACPU_SC_VD_DLY_TABLE1_CTRL = 0x1FFFFFF;
+		 *  - ACPU_SC_VD_DLY_TABLE2_CTRL = 0x7FFFFFFF;
+		 *  - ACPU_SC_VD_DLY_FIXED_CTRL  = 0x1;
+		 */
+		mmio_write_32(ACPU_SC_VD_DLY_TABLE0_CTRL, 0x1FFF);
+		mmio_write_32(ACPU_SC_VD_DLY_TABLE1_CTRL, 0x1FFFFFF);
+		mmio_write_32(ACPU_SC_VD_DLY_TABLE2_CTRL, 0x7FFFFFFF);
+		mmio_write_32(ACPU_SC_VD_DLY_FIXED_CTRL, 0x1);
+
+		/*
+		 * step 11:
+		 *  - ACPU_SC_VD_CTRL.shift_table0 = 0x1;
+		 *  - ACPU_SC_VD_CTRL.shift_table1 = 0x3;
+		 *  - ACPU_SC_VD_CTRL.shift_table2 = 0x5;
+		 *  - ACPU_SC_VD_CTRL.shift_table3 = 0x6;
+		 *
+		 * step 12:
+		 *  - ACPU_SC_VD_CTRL.tune = 0x7;
+		 */
+		write_reg_mask(ACPU_SC_VD_CTRL,
+			ACPU_SC_VD_SHIFT_TABLE_TUNE_VAL,
+			ACPU_SC_VD_SHIFT_TABLE_TUNE_MASK);
+	}
+
+	/*
+	 * step 13:
+	 *  - Pollig PMCTRL_ACPUPLLCTRL.acpupll_timeout == 0x1;
+	 *  - PMCTRL_ACPUPLLSEL.acpu_pllsw_cfg = 0x0;
+	 */
+	count = 0;
+	do {
+		reg0 = read_reg_mask(PMCTRL_ACPUPLLCTRL, 0x1,
+			SOC_PMCTRL_ACPUPLLCTRL_acpupll_timeout_START);
+		if ((count++) > ACPU_DFS_STATE_CNT) {
+			ERROR("%s: acpupll timeout.\n", __func__);
+			return -1;
+		}
+	} while (reg0 != 0x1);
+
+	write_reg_mask(PMCTRL_ACPUPLLSEL, 0x0,
+		0x1 << SOC_PMCTRL_ACPUPLLSEL_acpu_pllsw_cfg_START);
+
+	/*
+	 * step 14:
+	 *  - Polling PMCTRL_ACPUPLLSEL.acpupll_sw_stat == 0x1;
+	 *  - ACPU_SC_VD_CTRL.force_clk_en = 0x0;
+	 *  - ACPU_SC_VD_CTRL.clk_dis_cnt_en = 0x0;
+	 *  - ACPU_SC_VD_CTRL.calibrate_en_ini = 0x0;
+	 *  - ACPU_SC_VD_CTRL.calibrate_en_dif = 0x0;
+	 *  - ACPU_SC_VD_CTRL.div_en_dif = 0x1;
+	 *  - ACPU_SC_VD_CTRL.tune_en_int = 0x1;
+	 *  - ACPU_SC_VD_CTRL.tune_en_dif = 0x1;
+	 */
+	count = 0;
+	do {
+		reg0 = read_reg_mask(PMCTRL_ACPUPLLSEL, 0x1,
+			SOC_PMCTRL_ACPUPLLSEL_acpu_pllsw_stat_START);
+		if ((count++) > ACPU_DFS_STATE_CNT) {
+			ERROR("%s: acpupll sw status timeout.\n", __func__);
+			return -1;
+		}
+	} while (reg0 != 0x1);
+
+	if (acpu_dvfs_profile[tar_prof].freq > 800000)
+		write_reg_mask(ACPU_SC_VD_CTRL,
+			ACPU_SC_VD_EN_ASIC_VAL, ACPU_SC_VD_EN_MASK);
+
+	/*
+	 * step 15:
+	 *  - PMCTRL_ACPUSYSPLLCFG.acpu_subsys_clk_div_sw = 0x0;
+	 *  - ACPUSYSPLLCFG.acpu_syspll_clken_cfg = 0x0;
+	 */
+	write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x0,
+		(0x3 << SOC_PMCTRL_ACPUSYSPLLCFG_acpu_subsys_clk_div_sw_START) |
+		(0x1 << SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_clken_cfg_START));
+
+	/*
+	 * step 16:
+	 *  - Polling ACPU_SC_CPU_STAT.clk_div_status_vd == 0x0;
+	 */
+	count = 0;
+	do {
+		reg0 = read_reg_mask(ACPU_SC_CPU_STAT, 0x3,
+			ACPU_SC_CPU_STAT_CLK_DIV_STATUS_VD_SHIFT);
+		if ((count++) > ACPU_DFS_STATE_CNT) {
+			ERROR("%s: clk div status timeout.\n", __func__);
+			return -1;
+		}
+	} while (reg0 != 0x0);
+
+	acpu_dvfs_clk_div_cfg(tar_prof, &cpuext_cfg_val, &acpu_ddr_cfg_val);
+
+	/*
+	 * step 17:
+	 *  - Polling PMCTRL_ACPUCLKDIV.cpuext_clk_div_stat;
+	 *  - Polling ACPUCLKDIV.acpu_ddr_clk_div_stat;
+	 *  - PMCTRL_ACPUVOLPMUADDR = 0x1006C;
+	 */
+	count = 0;
+	do {
+		reg0 = read_reg_mask(PMCTRL_ACPUCLKDIV, 0x3,
+			SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_stat_START);
+		reg1 = read_reg_mask(PMCTRL_ACPUCLKDIV, 0x3,
+			SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_stat_START);
+		if ((count++) > ACPU_DFS_STATE_CNT) {
+			ERROR("%s: acpu clk div cfg timeout.\n", __func__);
+			return -1;
+		}
+	} while ((reg0 != cpuext_cfg_val) || (reg1 != acpu_ddr_cfg_val));
+
+	mmio_write_32(PMCTRL_ACPUVOLPMUADDR, 0x100da);
+
+	/*
+	 * step 16:
+	 *  - Polling PMCTRL_ACPUPLLSEL.acpupll_sw_stat == 0x1;
+	 *  - ACPU_SC_VD_CTRL.force_clk_en = 0x0;
+	 *  - ACPU_SC_VD_CTRL.clk_dis_cnt_en = 0x0;
+	 *  - ACPU_SC_VD_CTRL.calibrate_en_ini = 0x0;
+	 *  - ACPU_SC_VD_CTRL.calibrate_en_dif = 0x0;
+	 *  - ACPU_SC_VD_CTRL.div_en_dif = 0x1;
+	 *  - ACPU_SC_VD_CTRL.tune_en_int = 0x1;
+	 *  - ACPU_SC_VD_CTRL.tune_en_dif = 0x1;
+	 *  - PMCTRL_ACPUSYSPLLCFG.acpu_subsys_clk_div_sw = 0x0;
+	 *  - ACPUSYSPLLCFG.acpu_syspll_clken_cfg = 0x0;
+	 */
+	write_reg_mask(PMCTRL_ACPUDESTVOL,
+		acpu_dvfs_profile[tar_prof].acpu_vol_profile,
+		((0x1 << (SOC_PMCTRL_ACPUDESTVOL_acpu_dest_vol_END + 1)) - 1));
+
+	/*
+	 * step 19:
+	 *  - Polling PMCTRL_ACPUDESTVOL.acpu_vol_using == ACPUDESTVOL.acpu_dest_vol
+	 *  - ACPUVOLTIMEOUT.acpu_vol_timeout = 0x1;
+	 */
+	count = 0;
+	do {
+		reg0 = read_reg_mask(PMCTRL_ACPUDESTVOL, 0x7F,
+			SOC_PMCTRL_ACPUDESTVOL_acpu_dest_vol_START);
+		reg1 = read_reg_mask(PMCTRL_ACPUDESTVOL, 0x7F,
+			SOC_PMCTRL_ACPUDESTVOL_acpu_vol_using_START);
+		reg2 = read_reg_mask(PMCTRL_ACPUVOLTTIMEOUT, 0x1,
+			SOC_PMCTRL_ACPUVOLTIMEOUT_acpu_vol_timeout_START);
+		if ((count++) > ACPU_DFS_STATE_CNT) {
+			ERROR("%s: acpu destvol cfg timeout.\n", __func__);
+			return -1;
+		}
+	} while ((reg0 != reg1) || (reg2 != 0x1));
+
+	return 0;
+}
+
+int acpu_dvfs_target(unsigned int curr_prof, unsigned int target_prof)
+{
+	int ret = 0;
+
+	if (curr_prof == target_prof) {
+		INFO("%s: target_prof is equal curr_prof: is %d!\n",
+			__func__, curr_prof);
+		return 0;
+	}
+
+	if ((curr_prof >= ACPU_FREQ_MAX_NUM) ||
+	    (target_prof >= ACPU_FREQ_MAX_NUM)) {
+		INFO("%s: invalid parameter %d %d\n",
+			__func__, curr_prof, target_prof);
+		return -1;
+	}
+
+	if (target_prof > acpu_dvfs_sram_buf->support_freq_num)
+		target_prof = acpu_dvfs_sram_buf->support_freq_num;
+
+	if (target_prof < curr_prof)
+		ret = acpu_dvfs_freq_descend(curr_prof, target_prof);
+	else if (target_prof > curr_prof)
+		ret = acpu_dvfs_freq_ascend(curr_prof, target_prof);
+
+	if (ret) {
+		ERROR("%s: acpu_dvfs_target failed!\n", __func__);
+		return -1;
+	}
+
+	/* Complete acpu dvfs setting and set magic number */
+	acpu_dvfs_sram_buf->start_prof = target_prof;
+	acpu_dvfs_sram_buf->magic = ACPU_VALID_VOLTAGE_MAGIC;
+
+	mmio_write_32(DDR_DFS_FREQ_ADDR, 800000);
+	return 0;
+}
+
+static int acpu_dvfs_set_freq(void)
+{
+	unsigned int i;
+	unsigned int curr_prof;
+	unsigned int target_prof;
+	unsigned int max_freq = 0;
+
+	max_freq = acpu_dvfs_sram_buf->support_freq_max;
+
+	for (i = 0; i < acpu_dvfs_sram_buf->support_freq_num; i++) {
+
+		if (max_freq == hi6220_acpu_profile[i].freq) {
+			target_prof = i;
+			break;
+		}
+	}
+
+	if (i == acpu_dvfs_sram_buf->support_freq_num) {
+		ERROR("%s: cannot found max freq profile\n", __func__);
+		return -1;
+	}
+
+	curr_prof = 0;
+	target_prof = i;
+
+	/* if max freq is 208MHz, do nothing */
+	if (curr_prof == target_prof)
+		return 0;
+
+	if (acpu_dvfs_target(curr_prof, target_prof)) {
+		ERROR("%s: set acpu freq failed!", __func__);
+		return -1;
+	}
+
+	INFO("%s: support freq num is %d\n",
+		__func__, acpu_dvfs_sram_buf->support_freq_num);
+	INFO("%s: start prof is 0x%x\n",
+		__func__,  acpu_dvfs_sram_buf->start_prof);
+	INFO("%s: magic is 0x%x\n",
+		__func__, acpu_dvfs_sram_buf->magic);
+	INFO("%s: voltage:\n", __func__);
+	for (i = 0; i < acpu_dvfs_sram_buf->support_freq_num; i++)
+		INFO("  - %d: 0x%x\n", i, acpu_dvfs_sram_buf->vol[i]);
+
+	NOTICE("%s: set acpu freq success!", __func__);
+	return 0;
+}
+
+struct acpu_dvfs_volt_setting {
+	unsigned int magic;
+	unsigned int support_freq_num;
+	unsigned int support_freq_max;
+	unsigned int start_prof;
+	unsigned int vol[7];
+	unsigned int hmp_dly_threshold[7];
+};
+
+static void acpu_dvfs_volt_init(void)
+{
+	struct acpu_dvfs_volt_setting *volt;
+
+	/*
+	 * - set default voltage;
+	 * - set pmu address;
+	 * - set voltage up and down step;
+	 * - set voltage stable time;
+	 */
+	mmio_write_32(PMCTRL_ACPUDFTVOL, 0x4a);
+	mmio_write_32(PMCTRL_ACPUVOLPMUADDR, 0xda);
+	mmio_write_32(PMCTRL_ACPUVOLUPSTEP, 0x1);
+	mmio_write_32(PMCTRL_ACPUVOLDNSTEP, 0x1);
+	mmio_write_32(PMCTRL_ACPUPMUVOLUPTIME, 0x60);
+	mmio_write_32(PMCTRL_ACPUPMUVOLDNTIME, 0x60);
+	mmio_write_32(PMCTRL_ACPUCLKOFFCFG, 0x1000);
+
+	volt = (void *)MEMORY_AXI_ACPU_FREQ_VOL_ADDR;
+	volt->magic = 0x5a5ac5c5;
+	volt->support_freq_num = 5;
+	volt->support_freq_max = 1200000;
+	volt->start_prof = 4;
+	volt->vol[0] = 0x49;
+	volt->vol[1] = 0x49;
+	volt->vol[2] = 0x50;
+	volt->vol[3] = 0x60;
+	volt->vol[4] = 0x78;
+	volt->vol[5] = 0x78;
+	volt->vol[6] = 0x78;
+
+	volt->hmp_dly_threshold[0] = 0x0;
+	volt->hmp_dly_threshold[1] = 0x0;
+	volt->hmp_dly_threshold[2] = 0x0;
+	volt->hmp_dly_threshold[3] = 0x0e8b0e45;
+	volt->hmp_dly_threshold[4] = 0x10691023;
+	volt->hmp_dly_threshold[5] = 0x10691023;
+	volt->hmp_dly_threshold[6] = 0x10691023;
+
+	INFO("%s: success!\n", __func__);
+}
+
+void init_acpu_dvfs(void)
+{
+	unsigned int i = 0;
+
+	INFO("%s: pmic version %d\n", __func__,
+	     mmio_read_8(HI6553_VERSION_REG));
+
+	/* init parameters */
+	mmio_write_32(ACPU_CHIP_MAX_FREQ, efuse_acpu_freq[8]);
+	INFO("%s: ACPU_CHIP_MAX_FREQ=0x%x.\n",
+		__func__, mmio_read_32(ACPU_CHIP_MAX_FREQ));
+
+	/* set maximum support frequency to 1.2GHz */
+	for (i = 0; i < ACPU_FREQ_MAX_NUM; i++)
+		acpu_dvfs_sram_buf->vol[i] = hi6220_acpu_profile[i].acpu_vol_profile;
+
+	acpu_dvfs_sram_buf->support_freq_num = ACPU_FREQ_MAX_NUM;
+	acpu_dvfs_sram_buf->support_freq_max = 1200000;
+
+	/* init acpu dvfs */
+	acpu_dvfs_volt_init();
+	acpu_dvfs_set_freq();
+}
diff --git a/plat/hisilicon/hikey/hisi_ipc.c b/plat/hisilicon/hikey/hisi_ipc.c
new file mode 100644
index 0000000..0469a08
--- /dev/null
+++ b/plat/hisilicon/hikey/hisi_ipc.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <hisi_ipc.h>
+#include <hisi_sram_map.h>
+#include <mmio.h>
+#include <platform_def.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+static int ipc_init;
+
+static unsigned int cpu_ipc_num[PLATFORM_CLUSTER_COUNT][PLATFORM_CORE_COUNT_PER_CLUSTER] = {
+	{
+		HISI_IPC_MCU_INT_SRC_ACPU0_PD,
+		HISI_IPC_MCU_INT_SRC_ACPU1_PD,
+		HISI_IPC_MCU_INT_SRC_ACPU2_PD,
+		HISI_IPC_MCU_INT_SRC_ACPU3_PD,
+	},
+	{
+		HISI_IPC_MCU_INT_SRC_ACPU4_PD,
+		HISI_IPC_MCU_INT_SRC_ACPU5_PD,
+		HISI_IPC_MCU_INT_SRC_ACPU6_PD,
+		HISI_IPC_MCU_INT_SRC_ACPU7_PD,
+	}
+};
+
+int hisi_cpus_pd_in_cluster_besides_curr(unsigned int cpu,
+					 unsigned int cluster)
+{
+	unsigned int val = 0, cpu_val = 0;
+	int i;
+
+	val = mmio_read_32(ACPU_CORE_POWERDOWN_FLAGS_ADDR);
+	val = val >> (cluster * 16);
+
+	for (i = 0; i < PLATFORM_CORE_COUNT_PER_CLUSTER; i++) {
+
+		if (cpu == i)
+			continue;
+
+		cpu_val = (val >> (i * 4)) & 0xF;
+		if (cpu_val == 0x8)
+			return 0;
+	}
+
+	return 1;
+}
+
+int hisi_cpus_powered_off_besides_curr(unsigned int cpu)
+{
+	unsigned int val;
+
+	val = mmio_read_32(ACPU_CORE_POWERDOWN_FLAGS_ADDR);
+	return (val == (0x8 << (cpu * 4)));
+}
+
+static void hisi_ipc_send(unsigned int ipc_num)
+{
+	if (!ipc_init) {
+		printf("error ipc base is null!!!\n");
+		return;
+	}
+
+	mmio_write_32(HISI_IPC_CPU_RAW_INT_ADDR, 1 << ipc_num);
+}
+
+void hisi_ipc_spin_lock(unsigned int signal)
+{
+	unsigned int hs_ctrl;
+
+	if (signal >= HISI_IPC_INT_SRC_NUM)
+		return;
+
+	do {
+		hs_ctrl = mmio_read_32(HISI_IPC_ACPU_CTRL(signal));
+	} while (hs_ctrl);
+}
+
+void hisi_ipc_spin_unlock(unsigned int signal)
+{
+	if (signal >= HISI_IPC_INT_SRC_NUM)
+		return;
+
+	mmio_write_32(HISI_IPC_ACPU_CTRL(signal), 0);
+}
+
+void hisi_ipc_cpu_on_off(unsigned int cpu, unsigned int cluster,
+			 unsigned int mode)
+{
+	unsigned int val = 0;
+	unsigned int offset;
+
+	if (mode == HISI_IPC_PM_ON)
+		offset = cluster * 16 + cpu * 4;
+	else
+		offset = cluster * 16 + cpu * 4 + 1;
+
+	hisi_ipc_spin_lock(HISI_IPC_SEM_CPUIDLE);
+	val = mmio_read_32(ACPU_CORE_POWERDOWN_FLAGS_ADDR);
+	val |= (0x01 << offset);
+	mmio_write_32(ACPU_CORE_POWERDOWN_FLAGS_ADDR, val);
+	isb();
+	dsb();
+	hisi_ipc_spin_unlock(HISI_IPC_SEM_CPUIDLE);
+
+	hisi_ipc_send(cpu_ipc_num[cluster][cpu]);
+}
+
+void hisi_ipc_cpu_on(unsigned int cpu, unsigned int cluster)
+{
+	hisi_ipc_cpu_on_off(cpu, cluster, HISI_IPC_PM_ON);
+}
+
+void hisi_ipc_cpu_off(unsigned int cpu, unsigned int cluster)
+{
+	hisi_ipc_cpu_on_off(cpu, cluster, HISI_IPC_PM_OFF);
+}
+
+void hisi_ipc_cluster_on_off(unsigned int cpu, unsigned int cluster,
+			     unsigned int mode)
+{
+	unsigned int val = 0;
+	unsigned int offset;
+
+	if (mode == HISI_IPC_PM_ON)
+		offset = cluster * 4;
+	else
+		offset = cluster * 4 + 1;
+
+	hisi_ipc_spin_lock(HISI_IPC_SEM_CPUIDLE);
+	val = mmio_read_32(ACPU_CLUSTER_POWERDOWN_FLAGS_ADDR);
+	val |= (0x01 << offset);
+	mmio_write_32(ACPU_CLUSTER_POWERDOWN_FLAGS_ADDR, val);
+	isb();
+	dsb();
+	hisi_ipc_spin_unlock(HISI_IPC_SEM_CPUIDLE);
+
+	hisi_ipc_send(cpu_ipc_num[cluster][cpu]);
+}
+
+void hisi_ipc_cluster_on(unsigned int cpu, unsigned int cluster)
+{
+	hisi_ipc_cluster_on_off(cpu, cluster, HISI_IPC_PM_ON);
+}
+
+void hisi_ipc_cluster_off(unsigned int cpu, unsigned int cluster)
+{
+	hisi_ipc_cluster_on_off(cpu, cluster, HISI_IPC_PM_OFF);
+}
+
+void hisi_ipc_cpu_suspend(unsigned int cpu, unsigned int cluster)
+{
+	unsigned int val = 0;
+	unsigned int offset;
+
+	offset = cluster * 16 + cpu * 4 + 2;
+
+	hisi_ipc_spin_lock(HISI_IPC_SEM_CPUIDLE);
+	val = mmio_read_32(ACPU_CORE_POWERDOWN_FLAGS_ADDR);
+	val |= (0x01 << offset);
+	mmio_write_32(ACPU_CORE_POWERDOWN_FLAGS_ADDR, val);
+	hisi_ipc_spin_unlock(HISI_IPC_SEM_CPUIDLE);
+
+	hisi_ipc_send(cpu_ipc_num[cluster][cpu]);
+}
+
+void hisi_ipc_cluster_suspend(unsigned int cpu, unsigned int cluster)
+{
+	unsigned int val;
+	unsigned int offset;
+
+	offset = cluster * 4 + 1;
+
+	hisi_ipc_spin_lock(HISI_IPC_SEM_CPUIDLE);
+	if (hisi_cpus_pd_in_cluster_besides_curr(cpu, cluster)) {
+		val = mmio_read_32(ACPU_CLUSTER_POWERDOWN_FLAGS_ADDR);
+		val |= (0x01 << offset);
+		mmio_write_32(ACPU_CLUSTER_POWERDOWN_FLAGS_ADDR, val);
+	}
+	hisi_ipc_spin_unlock(HISI_IPC_SEM_CPUIDLE);
+
+	hisi_ipc_send(cpu_ipc_num[cluster][cpu]);
+}
+
+void hisi_ipc_psci_system_off(void)
+{
+	hisi_ipc_send(HISI_IPC_MCU_INT_SRC_ACPU_PD);
+}
+
+int hisi_ipc_init(void)
+{
+	ipc_init = 1;
+
+	mmio_write_32(ACPU_CORE_POWERDOWN_FLAGS_ADDR, 0x8);
+	mmio_write_32(ACPU_CLUSTER_POWERDOWN_FLAGS_ADDR, 0x8);
+	return 0;
+}
diff --git a/plat/hisilicon/hikey/hisi_mcu.c b/plat/hisilicon/hikey/hisi_mcu.c
new file mode 100644
index 0000000..359b94d
--- /dev/null
+++ b/plat/hisilicon/hikey/hisi_mcu.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <debug.h>
+#include <hi6220.h>
+#include <mmio.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <string.h>
+
+#define MCU_SECTION_MAX		30
+
+enum MCU_IMAGE_SEC_TYPE_ENUM {
+	MCU_IMAGE_SEC_TYPE_TEXT = 0,	/* text section */
+	MCU_IMAGE_SEC_TYPE_DATA,	/* data section */
+	MCU_IMAGE_SEC_TYPE_BUTT
+};
+
+enum MCU_IMAGE_SEC_LOAD_ENUM {
+	MCU_IMAGE_SEC_LOAD_STATIC = 0,
+	MCU_IMAGE_SEC_LOAD_DYNAMIC,
+	MCU_IMAGE_SEC_LOAD_BUFFER,
+	MCU_IMAGE_SEC_LOAD_MODEM_ENTRY,
+	MCU_IMAGE_SEC_LOAD_BUTT
+};
+
+struct mcu_image_sec {
+	unsigned short serial;
+	char type;
+	char load_attr;
+	uint32_t src_offset;		/* offset in image */
+	uint32_t dst_offset;		/* offset in memory */
+	uint32_t size;
+};
+
+struct mcu_image_head {
+	char time_stamp[24];
+	uint32_t image_size;
+	uint32_t secs_num;
+	struct mcu_image_sec secs[MCU_SECTION_MAX];
+};
+
+#define SOC_SRAM_M3_BASE_ADDR		(0xF6000000)
+
+#define MCU_SRAM_SIZE			(0x0000C000)
+#define MCU_CACHE_SIZE			(0x00004000)
+#define MCU_CODE_SIZE			(MCU_SRAM_SIZE - MCU_CACHE_SIZE)
+
+#define MCU_SYS_MEM_ADDR		(0x05E00000)
+#define MCU_SYS_MEM_SIZE		(0x00100000)
+
+static uint32_t mcu2ap_addr(uint32_t mcu_addr)
+{
+	if (mcu_addr < MCU_CODE_SIZE)
+		return (mcu_addr + SOC_SRAM_M3_BASE_ADDR);
+	else if ((mcu_addr >= MCU_SRAM_SIZE) &&
+		 (mcu_addr < MCU_SRAM_SIZE + MCU_SYS_MEM_SIZE))
+		return mcu_addr - MCU_SRAM_SIZE + MCU_SYS_MEM_ADDR;
+	else
+		return mcu_addr;
+}
+
+static int is_binary_header_invalid(struct mcu_image_head *head,
+				    unsigned int length)
+{
+	/* invalid cases */
+	if ((head->image_size == 0) ||
+	    (head->image_size > length) ||
+	    (head->secs_num > MCU_SECTION_MAX) ||
+	    (head->secs_num == 0))
+		return 1;
+
+	return 0;
+}
+
+static int is_binary_section_invalid(struct mcu_image_sec *sec,
+				     struct mcu_image_head *head)
+{
+	unsigned long ap_dst_offset = 0;
+
+	if ((sec->serial >= head->secs_num) ||
+	    (sec->src_offset + sec->size > head->image_size))
+		return 1;
+
+	if ((sec->type >= MCU_IMAGE_SEC_TYPE_BUTT) ||
+	    (sec->load_attr >= MCU_IMAGE_SEC_LOAD_BUTT))
+		return 1;
+
+	ap_dst_offset = mcu2ap_addr(sec->dst_offset);
+	if ((ap_dst_offset >= SOC_SRAM_M3_BASE_ADDR) &&
+	    (ap_dst_offset < SOC_SRAM_M3_BASE_ADDR + 0x20000 - sec->size))
+		return 0;
+	else if ((ap_dst_offset >= MCU_SYS_MEM_ADDR) &&
+		 (ap_dst_offset < MCU_SYS_MEM_ADDR + MCU_SYS_MEM_SIZE - sec->size))
+		return 0;
+	else if ((ap_dst_offset >= 0xfff8e000) &&
+		 (ap_dst_offset < 0xfff91c00 - sec->size))
+		return 0;
+
+	ERROR("%s: mcu destination address invalid.\n", __func__);
+	ERROR("%s: number=%d, dst offset=%d size=%d\n",
+		__func__, sec->serial, sec->dst_offset, sec->size);
+	return 1;
+}
+
+void hisi_mcu_enable_sram(void)
+{
+	mmio_write_32(AO_SC_PERIPH_CLKEN4,
+		      AO_SC_PERIPH_CLKEN4_HCLK_IPC_S |
+		      AO_SC_PERIPH_CLKEN4_HCLK_IPC_NS);
+
+	/* set register to enable dvfs which is used by mcu */
+	mmio_write_32(PERI_SC_RESERVED8_ADDR, 0x0A001022);
+
+	/* mcu mem is powered on, need de-assert reset */
+	mmio_write_32(AO_SC_PERIPH_RSTDIS4,
+		      AO_SC_PERIPH_RSTDIS4_RESET_MCU_ECTR_N);
+
+	/* enable mcu hclk */
+	mmio_write_32(AO_SC_PERIPH_CLKEN4,
+		      AO_SC_PERIPH_CLKEN4_HCLK_MCU |
+		      AO_SC_PERIPH_CLKEN4_CLK_MCU_DAP);
+}
+
+void hisi_mcu_start_run(void)
+{
+	unsigned int val;
+
+	/* set mcu ddr remap configuration */
+	mmio_write_32(AO_SC_MCU_SUBSYS_CTRL2, MCU_SYS_MEM_ADDR);
+
+	/* de-assert reset for mcu and to run */
+	mmio_write_32(AO_SC_PERIPH_RSTDIS4,
+		AO_SC_PERIPH_RSTDIS4_RESET_MCU_ECTR_N |
+		AO_SC_PERIPH_RSTDIS4_RESET_MCU_SYS_N |
+		AO_SC_PERIPH_RSTDIS4_RESET_MCU_POR_N |
+		AO_SC_PERIPH_RSTDIS4_RESET_MCU_DAP_N);
+
+	val = mmio_read_32(AO_SC_SYS_CTRL2);
+	mmio_write_32(AO_SC_SYS_CTRL2,
+		val | AO_SC_SYS_CTRL2_GLB_SRST_STAT_CLEAR);
+
+	INFO("%s: AO_SC_SYS_CTRL2=%x\n", __func__,
+		mmio_read_32(AO_SC_SYS_CTRL2));
+}
+
+int hisi_mcu_load_image(uintptr_t image_base, uint32_t image_size)
+{
+	unsigned int i;
+	struct mcu_image_head *head;
+	char *buf;
+
+	head = (struct mcu_image_head *)image_base;
+	if (is_binary_header_invalid(head, image_size)) {
+		ERROR("Invalid %s image header.\n", head->time_stamp);
+		return -1;
+	}
+
+	buf = (char *)head;
+	for (i = 0; i < head->secs_num; i++) {
+
+		int *src, *dst;
+
+		/* check the sections */
+		if (is_binary_section_invalid(&head->secs[i], head)) {
+			ERROR("Invalid mcu section.\n");
+			return -1;
+		}
+
+		/* check if the section is static-loaded */
+		if (head->secs[i].load_attr != MCU_IMAGE_SEC_LOAD_STATIC)
+			continue;
+
+		/* copy the sections */
+		src = (int *)(intptr_t)(buf + head->secs[i].src_offset);
+		dst = (int *)(intptr_t)mcu2ap_addr(head->secs[i].dst_offset);
+
+		memcpy((void *)dst, (void *)src, head->secs[i].size);
+
+		INFO("%s: mcu sections %d:\n", __func__, i);
+		INFO("%s:  src  = 0x%x\n",
+		     __func__, (unsigned int)(uintptr_t)src);
+		INFO("%s:  dst  = 0x%x\n",
+		     __func__, (unsigned int)(uintptr_t)dst);
+		INFO("%s:  size = %d\n", __func__, head->secs[i].size);
+
+		INFO("%s:  [SRC 0x%x] 0x%x 0x%x 0x%x 0x%x\n",
+		     __func__, (unsigned int)(uintptr_t)src,
+		     src[0], src[1], src[2], src[3]);
+		INFO("%s:  [DST 0x%x] 0x%x 0x%x 0x%x 0x%x\n",
+		     __func__, (unsigned int)(uintptr_t)dst,
+		     dst[0], dst[1], dst[2], dst[3]);
+	}
+
+	return 0;
+}
diff --git a/plat/hisilicon/hikey/hisi_pwrc.c b/plat/hisilicon/hikey/hisi_pwrc.c
new file mode 100644
index 0000000..b635fb1
--- /dev/null
+++ b/plat/hisilicon/hikey/hisi_pwrc.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <mmio.h>
+
+#include <hi6220_regs_acpu.h>
+#include <hi6220_regs_ao.h>
+#include <hisi_ipc.h>
+#include <hisi_pwrc.h>
+#include <hisi_sram_map.h>
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <platform_def.h>
+
+#define CLUSTER_CORE_COUNT		(4)
+#define CLUSTER_CORE_MASK		((1 << CLUSTER_CORE_COUNT) - 1)
+
+void hisi_pwrc_set_core_bx_addr(unsigned int core, unsigned int cluster,
+				uintptr_t entry_point)
+{
+	uintptr_t *core_entry = (uintptr_t *)PWRCTRL_ACPU_ASM_D_ARM_PARA_AD;
+	unsigned int i;
+
+	if (!core_entry) {
+		INFO("%s: core entry point is null!\n", __func__);
+		return;
+	}
+
+	i = cluster * CLUSTER_CORE_COUNT + core;
+	mmio_write_64((uintptr_t)(core_entry + i), entry_point);
+}
+
+void hisi_pwrc_set_cluster_wfi(unsigned int cluster)
+{
+	unsigned int reg = 0;
+
+	if (cluster == 0) {
+		reg = mmio_read_32(ACPU_SC_SNOOP_PWD);
+		reg |= PD_DETECT_START0;
+		mmio_write_32(ACPU_SC_SNOOP_PWD, reg);
+	} else if (cluster == 1) {
+		reg = mmio_read_32(ACPU_SC_SNOOP_PWD);
+		reg |= PD_DETECT_START1;
+		mmio_write_32(ACPU_SC_SNOOP_PWD, reg);
+	}
+}
+
+void hisi_pwrc_enable_debug(unsigned int core, unsigned int cluster)
+{
+	unsigned int val, enable;
+
+	enable = 1U << (core + PDBGUP_CLUSTER1_SHIFT * cluster);
+
+	/* Enable debug module */
+	val = mmio_read_32(ACPU_SC_PDBGUP_MBIST);
+	mmio_write_32(ACPU_SC_PDBGUP_MBIST, val | enable);
+	do {
+		/* RAW barrier */
+		val = mmio_read_32(ACPU_SC_PDBGUP_MBIST);
+	} while (!(val & enable));
+}
+
+int hisi_pwrc_setup(void)
+{
+	unsigned int reg, sec_entrypoint;
+	extern char pm_asm_code[], pm_asm_code_end[];
+	extern char v7_asm[], v7_asm_end[];
+
+	sec_entrypoint = PWRCTRL_ACPU_ASM_CODE_BASE;
+	mmio_write_32(ACPU_SC_CPUx_RVBARADDR(0), sec_entrypoint >> 2);
+	mmio_write_32(ACPU_SC_CPUx_RVBARADDR(1), sec_entrypoint >> 2);
+	mmio_write_32(ACPU_SC_CPUx_RVBARADDR(2), sec_entrypoint >> 2);
+	mmio_write_32(ACPU_SC_CPUx_RVBARADDR(3), sec_entrypoint >> 2);
+	mmio_write_32(ACPU_SC_CPUx_RVBARADDR(4), sec_entrypoint >> 2);
+	mmio_write_32(ACPU_SC_CPUx_RVBARADDR(5), sec_entrypoint >> 2);
+	mmio_write_32(ACPU_SC_CPUx_RVBARADDR(6), sec_entrypoint >> 2);
+	mmio_write_32(ACPU_SC_CPUx_RVBARADDR(7), sec_entrypoint >> 2);
+
+	memset((void *)PWRCTRL_ACPU_ASM_SPACE_ADDR, 0, 0x400);
+	memcpy((void *)PWRCTRL_ACPU_ASM_SPACE_ADDR, (void *)v7_asm,
+	       v7_asm_end - v7_asm);
+
+	memcpy((void *)PWRCTRL_ACPU_ASM_CODE_BASE, (void *)pm_asm_code,
+	       pm_asm_code_end - pm_asm_code);
+
+	reg = mmio_read_32(AO_SC_SYS_CTRL1);
+	/* Remap SRAM address for ACPU */
+	reg |= AO_SC_SYS_CTRL1_REMAP_SRAM_AARM |
+	       AO_SC_SYS_CTRL1_REMAP_SRAM_AARM_MSK;
+
+	/* Enable reset signal for watchdog */
+	reg |= AO_SC_SYS_CTRL1_AARM_WD_RST_CFG |
+	       AO_SC_SYS_CTRL1_AARM_WD_RST_CFG_MSK;
+	mmio_write_32(AO_SC_SYS_CTRL1, reg);
+
+	return 0;
+}
diff --git a/plat/hisilicon/hikey/hisi_pwrc_sram.S b/plat/hisilicon/hikey/hisi_pwrc_sram.S
new file mode 100644
index 0000000..62542f2
--- /dev/null
+++ b/plat/hisilicon/hikey/hisi_pwrc_sram.S
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <cortex_a53.h>
+#include <hi6220.h>
+#include <hisi_sram_map.h>
+
+	.global pm_asm_code
+	.global pm_asm_code_end
+	.global v7_asm
+	.global v7_asm_end
+
+func pm_asm_code _align=3
+	mov	x0, 0
+	msr	oslar_el1, x0
+
+	mrs	x0, CORTEX_A53_CPUACTLR_EL1
+	bic	x0, x0, #(CORTEX_A53_CPUACTLR_EL1_RADIS | \
+				CORTEX_A53_CPUACTLR_EL1_L1RADIS)
+	orr	x0, x0, #0x180000
+	orr	x0, x0, #0xe000
+	msr	CORTEX_A53_CPUACTLR_EL1, x0
+
+	mrs	x3, actlr_el3
+	orr	x3, x3, #ACTLR_EL3_L2ECTLR_BIT
+	msr	actlr_el3, x3
+
+	mrs	x3, actlr_el2
+	orr	x3, x3, #ACTLR_EL2_L2ECTLR_BIT
+	msr	actlr_el2, x3
+
+	ldr	x3, =PWRCTRL_ACPU_ASM_D_ARM_PARA_AD
+	mrs	x0, mpidr_el1
+	and	x1, x0, #MPIDR_CPU_MASK
+	and	x0, x0, #MPIDR_CLUSTER_MASK
+	add	x0, x1, x0, LSR #6
+pen:	ldr	x4, [x3, x0, LSL #3]
+	cbz	x4, pen
+
+	mov	x0, #0x0
+	mov	x1, #0x0
+	mov	x2, #0x0
+	mov	x3, #0x0
+	br	x4
+
+	.ltorg
+
+pm_asm_code_end:
+endfunc pm_asm_code
+
+	/*
+	 * By default, all cores in Hi6220 reset with aarch32 mode.
+	 * Now hardcode ARMv7 instructions to execute warm reset for
+	 * switching aarch64 mode.
+	 */
+	.align	3
+	.section .rodata.v7_asm, "aS"
+v7_asm:
+	.word	0xE1A00000	// nop
+	.word	0xE3A02003	// mov r2, #3
+	.word	0xEE0C2F50	// mcr 15, 0, r2, cr12, cr0, {2}
+	.word	0xE320F003	// wfi
+
+	.ltorg
+v7_asm_end:
diff --git a/plat/hisilicon/hikey/hisi_sip_svc.c b/plat/hisilicon/hikey/hisi_sip_svc.c
new file mode 100644
index 0000000..b3109d6
--- /dev/null
+++ b/plat/hisilicon/hikey/hisi_sip_svc.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <hisi_sip_svc.h>
+#include <pmf.h>
+#include <runtime_svc.h>
+#include <stdint.h>
+#include <uuid.h>
+
+
+/* Hisi SiP Service UUID */
+DEFINE_SVC_UUID(hisi_sip_svc_uid,
+		0xe599df74, 0x7682, 0x40aa, 0x9f, 0xf8,
+		0xc0, 0x85, 0x52, 0xbc, 0x39, 0x3f);
+
+static int hisi_sip_setup(void)
+{
+	if (pmf_setup() != 0)
+		return 1;
+	return 0;
+}
+
+/*
+ * This function handles Hisi defined SiP Calls
+ */
+static uintptr_t hisi_sip_handler(unsigned int smc_fid,
+			u_register_t x1,
+			u_register_t x2,
+			u_register_t x3,
+			u_register_t x4,
+			void *cookie,
+			void *handle,
+			u_register_t flags)
+{
+	int call_count = 0;
+
+	/*
+	 * Dispatch PMF calls to PMF SMC handler and return its return
+	 * value
+	 */
+	if (is_pmf_fid(smc_fid)) {
+		return pmf_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
+				handle, flags);
+	}
+
+	switch (smc_fid) {
+	case HISI_SIP_SVC_CALL_COUNT:
+		/* PMF calls */
+		call_count += PMF_NUM_SMC_CALLS;
+
+		/* State switch call */
+		call_count += 1;
+
+		SMC_RET1(handle, call_count);
+
+	case HISI_SIP_SVC_UID:
+		/* Return UID to the caller */
+		SMC_UUID_RET(handle, hisi_sip_svc_uid);
+
+	case HISI_SIP_SVC_VERSION:
+		/* Return the version of current implementation */
+		SMC_RET2(handle, HISI_SIP_SVC_VERSION_MAJOR, HISI_SIP_SVC_VERSION_MINOR);
+
+	default:
+		WARN("Unimplemented HISI SiP Service Call: 0x%x \n", smc_fid);
+		SMC_RET1(handle, SMC_UNK);
+	}
+
+}
+
+
+/* Define a runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+	hisi_sip_svc,
+	OEN_SIP_START,
+	OEN_SIP_END,
+	SMC_TYPE_FAST,
+	hisi_sip_setup,
+	hisi_sip_handler
+);
diff --git a/plat/hisilicon/hikey/include/hi6220.h b/plat/hisilicon/hikey/include/hi6220.h
new file mode 100644
index 0000000..fe7720a
--- /dev/null
+++ b/plat/hisilicon/hikey/include/hi6220.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __HI6220_H__
+#define __HI6220_H__
+
+#include <hi6220_regs_acpu.h>
+#include <hi6220_regs_ao.h>
+#include <hi6220_regs_peri.h>
+#include <hi6220_regs_pin.h>
+#include <hi6220_regs_pmctrl.h>
+
+/*******************************************************************************
+ * Implementation defined ACTLR_EL2 bit definitions
+ ******************************************************************************/
+#define ACTLR_EL2_L2ACTLR_BIT		(1 << 6)
+#define ACTLR_EL2_L2ECTLR_BIT		(1 << 5)
+#define ACTLR_EL2_L2CTLR_BIT		(1 << 4)
+#define ACTLR_EL2_CPUECTLR_BIT		(1 << 1)
+#define ACTLR_EL2_CPUACTLR_BIT		(1 << 0)
+
+/*******************************************************************************
+ * Implementation defined ACTLR_EL3 bit definitions
+ ******************************************************************************/
+#define ACTLR_EL3_L2ACTLR_BIT		(1 << 6)
+#define ACTLR_EL3_L2ECTLR_BIT		(1 << 5)
+#define ACTLR_EL3_L2CTLR_BIT		(1 << 4)
+#define ACTLR_EL3_CPUECTLR_BIT		(1 << 1)
+#define ACTLR_EL3_CPUACTLR_BIT		(1 << 0)
+
+/*******************************************************************************
+ * CCI-400 related constants
+ ******************************************************************************/
+#define CCI400_BASE				0xF6E90000
+#define CCI400_SL_IFACE3_CLUSTER_IX		3
+#define CCI400_SL_IFACE4_CLUSTER_IX		4
+
+#define DWMMC0_BASE				0xF723D000
+
+#define DWUSB_BASE				0xF72C0000
+
+#define EDMAC_BASE				0xf7370000
+#define EDMAC_SEC_CTRL				(EDMAC_BASE + 0x694)
+#define EDMAC_AXI_CONF(x)			(EDMAC_BASE + 0x820 + (x << 6))
+#define EDMAC_SEC_CTRL_INTR_SEC			(1 << 1)
+#define EDMAC_SEC_CTRL_GLOBAL_SEC		(1 << 0)
+#define EDMAC_CHANNEL_NUMS			16
+
+#define PMUSSI_BASE				0xF8000000
+
+#define SP804_TIMER0_BASE			0xF8008000
+
+#define GPIO0_BASE				0xF8011000
+#define GPIO1_BASE				0xF8012000
+#define GPIO2_BASE				0xF8013000
+#define GPIO3_BASE				0xF8014000
+#define GPIO4_BASE				0xF7020000
+#define GPIO5_BASE				0xF7021000
+#define GPIO6_BASE				0xF7022000
+#define GPIO7_BASE				0xF7023000
+#define GPIO8_BASE				0xF7024000
+#define GPIO9_BASE				0xF7025000
+#define GPIO10_BASE				0xF7026000
+#define GPIO11_BASE				0xF7027000
+#define GPIO12_BASE				0xF7028000
+#define GPIO13_BASE				0xF7029000
+#define GPIO14_BASE				0xF702A000
+#define GPIO15_BASE				0xF702B000
+#define GPIO16_BASE				0xF702C000
+#define GPIO17_BASE				0xF702D000
+#define GPIO18_BASE				0xF702E000
+#define GPIO19_BASE				0xF702F000
+
+#endif	/* __HI6220_H__ */
diff --git a/plat/hisilicon/hikey/include/hi6220_regs_acpu.h b/plat/hisilicon/hikey/include/hi6220_regs_acpu.h
new file mode 100644
index 0000000..dde9e65
--- /dev/null
+++ b/plat/hisilicon/hikey/include/hi6220_regs_acpu.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __HI6220_REGS_ACPU_H__
+#define __HI6220_REGS_ACPU_H__
+
+#define ACPU_CTRL_BASE				0xF6504000
+
+#define ACPU_SC_CPU_CTRL			(ACPU_CTRL_BASE + 0x000)
+#define ACPU_SC_CPU_STAT			(ACPU_CTRL_BASE + 0x008)
+#define ACPU_SC_CPU_STAT_SC_STANDBYWFIL2		(1 << 0)
+#define ACPU_SC_CPU_STAT_SC_STANDBYWFIL2_SHIFT		(0)
+#define ACPU_SC_CPU_STAT_SC_STANDBYWFI0			(1 << 1)
+#define ACPU_SC_CPU_STAT_SC_STANDBYWFI0_SHIFT		(1)
+#define ACPU_SC_CPU_STAT_SC_STANDBYWFI1			(1 << 2)
+#define ACPU_SC_CPU_STAT_SC_STANDBYWFI1_SHIFT		(2)
+#define ACPU_SC_CPU_STAT_SC_STANDBYWFI2			(1 << 3)
+#define ACPU_SC_CPU_STAT_SC_STANDBYWFI2_SHIFT		(3)
+#define ACPU_SC_CPU_STAT_SC_STANDBYWFI3			(1 << 4)
+#define ACPU_SC_CPU_STAT_SC_STANDBYWFI3_SHIFT		(4)
+#define ACPU_SC_CPU_STAT_A53_1_STANDBYWFIL2		(1 << 8)
+#define ACPU_SC_CPU_STAT_A53_1_STANDBYWFIL2_SHIFT	(8)
+#define ACPU_SC_CPU_STAT_A53_1_STANDBYWFI		(1 << 9)
+#define ACPU_SC_CPU_STAT_A53_1_STANDBYWFI_SHIFT		(9)
+#define ACPU_SC_CPU_STAT_L2FLSHUDONE0			(1 << 16)
+#define ACPU_SC_CPU_STAT_L2FLSHUDONE0_SHIFT		(16)
+#define ACPU_SC_CPU_STAT_L2FLSHUDONE1			(1 << 17)
+#define ACPU_SC_CPU_STAT_L2FLSHUDONE1_SHIFT		(17)
+#define ACPU_SC_CPU_STAT_CCI400_ACTIVE			(1 << 18)
+#define ACPU_SC_CPU_STAT_CCI400_ACTIVE_SHIFT		(18)
+#define ACPU_SC_CPU_STAT_CLK_DIV_STATUS_VD		(1 << 20)
+#define ACPU_SC_CPU_STAT_CLK_DIV_STATUS_VD_SHIFT	(20)
+
+#define ACPU_SC_CLKEN				(ACPU_CTRL_BASE + 0x00c)
+#define HPM_L2_1_CLKEN				(1 << 9)
+#define G_CPU_1_CLKEN				(1 << 8)
+#define HPM_L2_CLKEN				(1 << 1)
+#define G_CPU_CLKEN				(1 << 0)
+
+#define ACPU_SC_CLKDIS				(ACPU_CTRL_BASE + 0x010)
+#define ACPU_SC_CLK_STAT			(ACPU_CTRL_BASE + 0x014)
+#define ACPU_SC_RSTEN				(ACPU_CTRL_BASE + 0x018)
+#define SRST_PRESET1_RSTEN			(1 << 11)
+#define SRST_PRESET0_RSTEN			(1 << 10)
+#define SRST_CLUSTER1_RSTEN			(1 << 9)
+#define SRST_CLUSTER0_RSTEN			(1 << 8)
+#define SRST_L2_HPM_1_RSTEN			(1 << 5)
+#define SRST_AARM_L2_1_RSTEN			(1 << 4)
+#define SRST_L2_HPM_0_RSTEN			(1 << 3)
+#define SRST_AARM_L2_0_RSTEN			(1 << 1)
+#define SRST_CLUSTER1				(SRST_PRESET1_RSTEN | \
+						 SRST_CLUSTER1_RSTEN | \
+						 SRST_L2_HPM_1_RSTEN | \
+						 SRST_AARM_L2_1_RSTEN)
+#define SRST_CLUSTER0				(SRST_PRESET0_RSTEN | \
+						 SRST_CLUSTER0_RSTEN | \
+						 SRST_L2_HPM_0_RSTEN | \
+						 SRST_AARM_L2_0_RSTEN)
+
+#define ACPU_SC_RSTDIS				(ACPU_CTRL_BASE + 0x01c)
+#define ACPU_SC_RST_STAT			(ACPU_CTRL_BASE + 0x020)
+#define ACPU_SC_PDBGUP_MBIST			(ACPU_CTRL_BASE + 0x02c)
+#define PDBGUP_CLUSTER1_SHIFT			8
+
+#define ACPU_SC_VD_CTRL				(ACPU_CTRL_BASE + 0x054)
+#define ACPU_SC_VD_MASK_PATTERN_CTRL		(ACPU_CTRL_BASE + 0x058)
+#define ACPU_SC_VD_MASK_PATTERN_VAL		(0xCCB << 12)
+#define ACPU_SC_VD_MASK_PATTERN_MASK		((0x1 << 13) - 1)
+
+#define ACPU_SC_VD_DLY_FIXED_CTRL		(ACPU_CTRL_BASE + 0x05c)
+#define ACPU_SC_VD_DLY_TABLE0_CTRL		(ACPU_CTRL_BASE + 0x060)
+#define ACPU_SC_VD_DLY_TABLE1_CTRL		(ACPU_CTRL_BASE + 0x064)
+#define ACPU_SC_VD_DLY_TABLE2_CTRL		(ACPU_CTRL_BASE + 0x068)
+#define ACPU_SC_VD_HPM_CTRL			(ACPU_CTRL_BASE + 0x06c)
+#define ACPU_SC_A53_CLUSTER_MTCMOS_EN		(ACPU_CTRL_BASE + 0x088)
+#define PW_MTCMOS_EN_A53_1_EN			(1 << 1)
+#define PW_MTCMOS_EN_A53_0_EN			(1 << 0)
+
+#define ACPU_SC_A53_CLUSTER_MTCMOS_STA		(ACPU_CTRL_BASE + 0x090)
+#define ACPU_SC_A53_CLUSTER_ISO_EN		(ACPU_CTRL_BASE + 0x098)
+#define PW_ISO_A53_1_EN				(1 << 1)
+#define PW_ISO_A53_0_EN				(1 << 0)
+
+#define ACPU_SC_A53_CLUSTER_ISO_DIS		(ACPU_CTRL_BASE + 0x09c)
+#define ACPU_SC_A53_CLUSTER_ISO_STA		(ACPU_CTRL_BASE + 0x0a0)
+#define ACPU_SC_A53_1_MTCMOS_TIMER		(ACPU_CTRL_BASE + 0x0b4)
+#define ACPU_SC_A53_0_MTCMOS_TIMER		(ACPU_CTRL_BASE + 0x0bc)
+#define ACPU_SC_A53_x_MTCMOS_TIMER(x)		((x) ? ACPU_SC_A53_1_MTCMOS_TIMER : ACPU_SC_A53_0_MTCMOS_TIMER)
+
+#define ACPU_SC_SNOOP_PWD			(ACPU_CTRL_BASE + 0xe4)
+#define PD_DETECT_START1			(1 << 16)
+#define PD_DETECT_START0			(1 << 0)
+
+#define ACPU_SC_CPU0_CTRL			(ACPU_CTRL_BASE + 0x100)
+#define CPU_CTRL_AARCH64_MODE			(1 << 7)
+
+#define ACPU_SC_CPU0_STAT			(ACPU_CTRL_BASE + 0x104)
+#define ACPU_SC_CPU0_CLKEN			(ACPU_CTRL_BASE + 0x108)
+#define CPU_CLKEN_HPM				(1 << 1)
+
+#define ACPU_SC_CPU0_CLK_STAT			(ACPU_CTRL_BASE + 0x110)
+
+#define ACPU_SC_CPU0_RSTEN			(ACPU_CTRL_BASE + 0x114)
+#define ACPU_SC_CPU0_RSTDIS			(ACPU_CTRL_BASE + 0x118)
+#define ACPU_SC_CPU0_MTCMOS_EN			(ACPU_CTRL_BASE + 0x120)
+#define CPU_MTCMOS_PW				(1 << 0)
+
+#define ACPU_SC_CPU0_PW_ISOEN			(ACPU_CTRL_BASE + 0x130)
+#define CPU_PW_ISO				(1 << 0)
+
+#define ACPU_SC_CPU0_PW_ISODIS			(ACPU_CTRL_BASE + 0x134)
+#define ACPU_SC_CPU0_PW_ISO_STAT		(ACPU_CTRL_BASE + 0x138)
+#define ACPU_SC_CPU0_MTCMOS_TIMER_STAT		(ACPU_CTRL_BASE + 0x154)
+#define CPU_MTCMOS_TIMER_STA			(1 << 0)
+
+#define ACPU_SC_CPU0_RVBARADDR			(ACPU_CTRL_BASE + 0x158)
+#define ACPU_SC_CPU1_CTRL			(ACPU_CTRL_BASE + 0x200)
+#define ACPU_SC_CPU1_STAT			(ACPU_CTRL_BASE + 0x204)
+#define ACPU_SC_CPU1_CLKEN			(ACPU_CTRL_BASE + 0x208)
+#define ACPU_SC_CPU1_CLK_STAT			(ACPU_CTRL_BASE + 0x210)
+#define ACPU_SC_CPU1_RSTEN			(ACPU_CTRL_BASE + 0x214)
+#define ACPU_SC_CPU1_RSTDIS			(ACPU_CTRL_BASE + 0x218)
+#define ACPU_SC_CPU1_MTCMOS_EN			(ACPU_CTRL_BASE + 0x220)
+#define ACPU_SC_CPU1_PW_ISODIS			(ACPU_CTRL_BASE + 0x234)
+#define ACPU_SC_CPU1_PW_ISO_STAT		(ACPU_CTRL_BASE + 0x238)
+#define ACPU_SC_CPU1_MTCMOS_TIMER_STAT		(ACPU_CTRL_BASE + 0x254)
+#define ACPU_SC_CPU1_RVBARADDR			(ACPU_CTRL_BASE + 0x258)
+#define ACPU_SC_CPU2_CTRL			(ACPU_CTRL_BASE + 0x300)
+#define ACPU_SC_CPU2_STAT			(ACPU_CTRL_BASE + 0x304)
+#define ACPU_SC_CPU2_CLKEN			(ACPU_CTRL_BASE + 0x308)
+#define ACPU_SC_CPU2_CLK_STAT			(ACPU_CTRL_BASE + 0x310)
+#define ACPU_SC_CPU2_RSTEN			(ACPU_CTRL_BASE + 0x314)
+#define ACPU_SC_CPU2_RSTDIS			(ACPU_CTRL_BASE + 0x318)
+#define ACPU_SC_CPU2_MTCMOS_EN			(ACPU_CTRL_BASE + 0x320)
+#define ACPU_SC_CPU2_PW_ISODIS			(ACPU_CTRL_BASE + 0x334)
+#define ACPU_SC_CPU2_PW_ISO_STAT		(ACPU_CTRL_BASE + 0x338)
+#define ACPU_SC_CPU2_MTCMOS_TIMER_STAT		(ACPU_CTRL_BASE + 0x354)
+#define ACPU_SC_CPU2_RVBARADDR			(ACPU_CTRL_BASE + 0x358)
+#define ACPU_SC_CPU3_CTRL			(ACPU_CTRL_BASE + 0x400)
+#define ACPU_SC_CPU3_STAT			(ACPU_CTRL_BASE + 0x404)
+#define ACPU_SC_CPU3_CLKEN			(ACPU_CTRL_BASE + 0x408)
+#define ACPU_SC_CPU3_CLK_STAT			(ACPU_CTRL_BASE + 0x410)
+#define ACPU_SC_CPU3_RSTEN			(ACPU_CTRL_BASE + 0x414)
+#define ACPU_SC_CPU3_RSTDIS			(ACPU_CTRL_BASE + 0x418)
+#define ACPU_SC_CPU3_MTCMOS_EN			(ACPU_CTRL_BASE + 0x420)
+#define ACPU_SC_CPU3_PW_ISODIS			(ACPU_CTRL_BASE + 0x434)
+#define ACPU_SC_CPU3_PW_ISO_STAT		(ACPU_CTRL_BASE + 0x438)
+#define ACPU_SC_CPU3_MTCMOS_TIMER_STAT		(ACPU_CTRL_BASE + 0x454)
+#define ACPU_SC_CPU3_RVBARADDR			(ACPU_CTRL_BASE + 0x458)
+#define ACPU_SC_CPU4_CTRL			(ACPU_CTRL_BASE + 0x500)
+#define ACPU_SC_CPU4_STAT			(ACPU_CTRL_BASE + 0x504)
+#define ACPU_SC_CPU4_CLKEN			(ACPU_CTRL_BASE + 0x508)
+#define ACPU_SC_CPU4_CLK_STAT			(ACPU_CTRL_BASE + 0x510)
+#define ACPU_SC_CPU4_RSTEN			(ACPU_CTRL_BASE + 0x514)
+#define ACPU_SC_CPU4_RSTDIS			(ACPU_CTRL_BASE + 0x518)
+#define ACPU_SC_CPU4_MTCMOS_EN			(ACPU_CTRL_BASE + 0x520)
+#define ACPU_SC_CPU4_PW_ISODIS			(ACPU_CTRL_BASE + 0x534)
+#define ACPU_SC_CPU4_PW_ISO_STAT		(ACPU_CTRL_BASE + 0x538)
+#define ACPU_SC_CPU4_MTCMOS_TIMER_STAT		(ACPU_CTRL_BASE + 0x554)
+#define ACPU_SC_CPU4_RVBARADDR			(ACPU_CTRL_BASE + 0x558)
+#define ACPU_SC_CPU5_CTRL			(ACPU_CTRL_BASE + 0x600)
+#define ACPU_SC_CPU5_STAT			(ACPU_CTRL_BASE + 0x604)
+#define ACPU_SC_CPU5_CLKEN			(ACPU_CTRL_BASE + 0x608)
+#define ACPU_SC_CPU5_CLK_STAT			(ACPU_CTRL_BASE + 0x610)
+#define ACPU_SC_CPU5_RSTEN			(ACPU_CTRL_BASE + 0x614)
+#define ACPU_SC_CPU5_RSTDIS			(ACPU_CTRL_BASE + 0x618)
+#define ACPU_SC_CPU5_MTCMOS_EN			(ACPU_CTRL_BASE + 0x620)
+#define ACPU_SC_CPU5_PW_ISODIS			(ACPU_CTRL_BASE + 0x634)
+#define ACPU_SC_CPU5_PW_ISO_STAT		(ACPU_CTRL_BASE + 0x638)
+#define ACPU_SC_CPU5_MTCMOS_TIMER_STAT		(ACPU_CTRL_BASE + 0x654)
+#define ACPU_SC_CPU5_RVBARADDR			(ACPU_CTRL_BASE + 0x658)
+#define ACPU_SC_CPU6_CTRL			(ACPU_CTRL_BASE + 0x700)
+#define ACPU_SC_CPU6_STAT			(ACPU_CTRL_BASE + 0x704)
+#define ACPU_SC_CPU6_CLKEN			(ACPU_CTRL_BASE + 0x708)
+#define ACPU_SC_CPU6_CLK_STAT			(ACPU_CTRL_BASE + 0x710)
+#define ACPU_SC_CPU6_RSTEN			(ACPU_CTRL_BASE + 0x714)
+#define ACPU_SC_CPU6_RSTDIS			(ACPU_CTRL_BASE + 0x718)
+#define ACPU_SC_CPU6_MTCMOS_EN			(ACPU_CTRL_BASE + 0x720)
+#define ACPU_SC_CPU6_PW_ISODIS			(ACPU_CTRL_BASE + 0x734)
+#define ACPU_SC_CPU6_PW_ISO_STAT		(ACPU_CTRL_BASE + 0x738)
+#define ACPU_SC_CPU6_MTCMOS_TIMER_STAT		(ACPU_CTRL_BASE + 0x754)
+#define ACPU_SC_CPU6_RVBARADDR			(ACPU_CTRL_BASE + 0x758)
+#define ACPU_SC_CPU7_CTRL			(ACPU_CTRL_BASE + 0x800)
+#define ACPU_SC_CPU7_STAT			(ACPU_CTRL_BASE + 0x804)
+#define ACPU_SC_CPU7_CLKEN			(ACPU_CTRL_BASE + 0x808)
+#define ACPU_SC_CPU7_CLK_STAT			(ACPU_CTRL_BASE + 0x810)
+#define ACPU_SC_CPU7_RSTEN			(ACPU_CTRL_BASE + 0x814)
+#define ACPU_SC_CPU7_RSTDIS			(ACPU_CTRL_BASE + 0x818)
+#define ACPU_SC_CPU7_MTCMOS_EN			(ACPU_CTRL_BASE + 0x820)
+#define ACPU_SC_CPU7_PW_ISODIS			(ACPU_CTRL_BASE + 0x834)
+#define ACPU_SC_CPU7_PW_ISO_STAT		(ACPU_CTRL_BASE + 0x838)
+#define ACPU_SC_CPU7_MTCMOS_TIMER_STAT		(ACPU_CTRL_BASE + 0x854)
+#define ACPU_SC_CPU7_RVBARADDR			(ACPU_CTRL_BASE + 0x858)
+#define ACPU_SC_CPUx_CTRL(x)			((x < 8) ? (ACPU_SC_CPU0_CTRL + 0x100 * x) : ACPU_SC_CPU0_CTRL)
+#define ACPU_SC_CPUx_STAT(x)			((x < 8) ? (ACPU_SC_CPU0_STAT + 0x100 * x) : ACPU_SC_CPU0_STAT)
+#define ACPU_SC_CPUx_CLKEN(x)			((x < 8) ? (ACPU_SC_CPU0_CLKEN + 0x100 * x) : ACPU_SC_CPU0_CLKEN)
+#define ACPU_SC_CPUx_CLK_STAT(x)		((x < 8) ? (ACPU_SC_CPU0_CLK_STAT + 0x100 * x) : ACPU_SC_CPU0_CLK_STAT)
+#define ACPU_SC_CPUx_RSTEN(x)			((x < 8) ? (ACPU_SC_CPU0_RSTEN + 0x100 * x) : ACPU_SC_CPU0_RSTEN)
+#define ACPU_SC_CPUx_RSTDIS(x)			((x < 8) ? (ACPU_SC_CPU0_RSTDIS + 0x100 * x) : ACPU_SC_CPU0_RSTDIS)
+#define ACPU_SC_CPUx_MTCMOS_EN(x)		((x < 8) ? (ACPU_SC_CPU0_MTCMOS_EN + 0x100 * x) : ACPU_SC_CPU0_MTCMOS_EN)
+#define ACPU_SC_CPUx_PW_ISODIS(x)		((x < 8) ? (ACPU_SC_CPU0_PW_ISODIS + 0x100 * x) : ACPU_SC_CPU0_PW_ISODIS)
+#define ACPU_SC_CPUx_PW_ISO_STAT(x)		((x < 8) ? (ACPU_SC_CPU0_PW_ISO_STAT + 0x100 * x) : ACPU_SC_CPU0_PW_ISO_STAT)
+#define ACPU_SC_CPUx_MTCMOS_TIMER_STAT(x)	((x < 8) ? (ACPU_SC_CPU0_MTCMOS_TIMER_STAT + 0x100 * x) : ACPU_SC_CPU0_MTCMOS_TIMER_STAT)
+#define ACPU_SC_CPUx_RVBARADDR(x)		((x < 8) ? (ACPU_SC_CPU0_RVBARADDR + 0x100 * x) : ACPU_SC_CPU0_RVBARADDR)
+
+#define ACPU_SC_CPU_STAT_CLKDIV_VD_MASK		(3 << 20)
+
+#define ACPU_SC_VD_CTRL_TUNE_EN_DIF		(1 << 0)
+#define ACPU_SC_VD_CTRL_TUNE_EN_DIF_SHIFT	(0)
+#define ACPU_SC_VD_CTRL_TUNE			(1 << 1)
+#define ACPU_SC_VD_CTRL_TUNE_SHIFT		(1)
+#define ACPU_SC_VD_CTRL_CALIBRATE_EN_DIF	(1 << 7)
+#define ACPU_SC_VD_CTRL_CALIBRATE_EN_DIF_SHIFT	(7)
+#define ACPU_SC_VD_CTRL_CALIBRATE_EN_INI	(1 << 8)
+#define ACPU_SC_VD_CTRL_CALIBRATE_EN_INI_SHIFT	(8)
+#define ACPU_SC_VD_CTRL_CLK_DIS_CNT_CLR		(1 << 9)
+#define ACPU_SC_VD_CTRL_CLK_DIS_CNT_CLR_SHIFT	(9)
+#define ACPU_SC_VD_CTRL_CLK_DIS_CNT_EN		(1 << 10)
+#define ACPU_SC_VD_CTRL_CLK_DIS_CNT_EN_SHIFT	(10)
+#define ACPU_SC_VD_CTRL_TUNE_EN_INT		(1 << 11)
+#define ACPU_SC_VD_CTRL_TUNE_EN_INT_SHIFT	(11)
+#define ACPU_SC_VD_CTRL_SHIFT_TABLE0		(1 << 12)
+#define ACPU_SC_VD_CTRL_SHIFT_TABLE0_MASK	(0xf << 12)
+#define ACPU_SC_VD_CTRL_SHIFT_TABLE0_SHIFT	(12)
+#define ACPU_SC_VD_CTRL_SHIFT_TABLE1		(1 << 16)
+#define ACPU_SC_VD_CTRL_SHIFT_TABLE1_MASK	(0xf << 16)
+#define ACPU_SC_VD_CTRL_SHIFT_TABLE1_SHIFT	(16)
+#define ACPU_SC_VD_CTRL_SHIFT_TABLE2		(1 << 20)
+#define ACPU_SC_VD_CTRL_SHIFT_TABLE2_MASK	(0xf << 20)
+#define ACPU_SC_VD_CTRL_SHIFT_TABLE2_SHIFT	(20)
+#define ACPU_SC_VD_CTRL_SHIFT_TABLE3		(1 << 24)
+#define ACPU_SC_VD_CTRL_SHIFT_TABLE3_MASK	(0xf << 24)
+#define ACPU_SC_VD_CTRL_SHIFT_TABLE3_SHIFT	(24)
+#define ACPU_SC_VD_CTRL_FORCE_CLK_EN		(1 << 28)
+#define ACPU_SC_VD_CTRL_FORCE_CLK_EN_SHIFT	(28)
+#define ACPU_SC_VD_CTRL_DIV_EN_DIF		(1 << 29)
+#define ACPU_SC_VD_CTRL_DIV_EN_DIF_SHIFT	(29)
+
+#define ACPU_SC_VD_SHIFT_TABLE_TUNE_VAL			\
+	((0x1 << ACPU_SC_VD_CTRL_SHIFT_TABLE0_SHIFT) |	\
+	 (0x3 << ACPU_SC_VD_CTRL_SHIFT_TABLE1_SHIFT) |	\
+	 (0x5 << ACPU_SC_VD_CTRL_SHIFT_TABLE2_SHIFT) |	\
+	 (0x6 << ACPU_SC_VD_CTRL_SHIFT_TABLE3_SHIFT) |	\
+	 (0x7 << ACPU_SC_VD_CTRL_TUNE_SHIFT))
+
+#define ACPU_SC_VD_SHIFT_TABLE_TUNE_MASK		\
+	((0xF << ACPU_SC_VD_CTRL_SHIFT_TABLE0_SHIFT) |	\
+	 (0xF << ACPU_SC_VD_CTRL_SHIFT_TABLE1_SHIFT) |	\
+	 (0xF << ACPU_SC_VD_CTRL_SHIFT_TABLE2_SHIFT) |	\
+	 (0xF << ACPU_SC_VD_CTRL_SHIFT_TABLE3_SHIFT) |	\
+	 (0x3F << ACPU_SC_VD_CTRL_TUNE_SHIFT))
+
+#define ACPU_SC_VD_HPM_CTRL_OSC_DIV		(1 << 0)
+#define ACPU_SC_VD_HPM_CTRL_OSC_DIV_SHIFT	(0)
+#define ACPU_SC_VD_HPM_CTRL_OSC_DIV_MASK	(0x000000FF)
+#define ACPU_SC_VD_HPM_CTRL_DLY_EXP		(1 << 8)
+#define ACPU_SC_VD_HPM_CTRL_DLY_EXP_SHIFT	(8)
+#define ACPU_SC_VD_HPM_CTRL_DLY_EXP_MASK	(0x001FFF00)
+
+#define HPM_OSC_DIV_VAL \
+	(0x56 << ACPU_SC_VD_HPM_CTRL_OSC_DIV_SHIFT)
+#define HPM_OSC_DIV_MASK \
+	(ACPU_SC_VD_HPM_CTRL_OSC_DIV_MASK)
+
+#define HPM_DLY_EXP_VAL \
+	(0xC7A << ACPU_SC_VD_HPM_CTRL_DLY_EXP_SHIFT)
+#define HPM_DLY_EXP_MASK \
+	(ACPU_SC_VD_HPM_CTRL_DLY_EXP_MASK)
+
+#define ACPU_SC_VD_EN_ASIC_VAL					\
+	((0x0 << ACPU_SC_VD_CTRL_FORCE_CLK_EN_SHIFT) |		\
+	 (0x0 << ACPU_SC_VD_CTRL_CLK_DIS_CNT_EN_SHIFT) |	\
+	 (0x0 << ACPU_SC_VD_CTRL_CALIBRATE_EN_INI_SHIFT) |	\
+	 (0x0 << ACPU_SC_VD_CTRL_CALIBRATE_EN_DIF_SHIFT) |	\
+	 (0X0 << ACPU_SC_VD_CTRL_DIV_EN_DIF_SHIFT) |		\
+	 (0X0 << ACPU_SC_VD_CTRL_TUNE_EN_INT_SHIFT) |		\
+	 (0x0 << ACPU_SC_VD_CTRL_TUNE_EN_DIF_SHIFT))
+
+#define ACPU_SC_VD_EN_SFT_VAL					\
+	((0x0 << ACPU_SC_VD_CTRL_FORCE_CLK_EN_SHIFT) |		\
+	 (0x0 << ACPU_SC_VD_CTRL_CLK_DIS_CNT_EN_SHIFT) |	\
+	 (0x0 << ACPU_SC_VD_CTRL_CALIBRATE_EN_INI_SHIFT) |	\
+	 (0x0 << ACPU_SC_VD_CTRL_CALIBRATE_EN_DIF_SHIFT) |	\
+	 (0x0 << ACPU_SC_VD_CTRL_DIV_EN_DIF_SHIFT) |		\
+	 (0x0 << ACPU_SC_VD_CTRL_TUNE_EN_INT_SHIFT) |		\
+	 (0x0 << ACPU_SC_VD_CTRL_TUNE_EN_DIF_SHIFT))
+
+#define ACPU_SC_VD_EN_MASK					\
+	((0x1 << ACPU_SC_VD_CTRL_FORCE_CLK_EN_SHIFT) |		\
+	 (0x1 << ACPU_SC_VD_CTRL_CLK_DIS_CNT_EN_SHIFT) |	\
+	 (0x1 << ACPU_SC_VD_CTRL_CALIBRATE_EN_INI_SHIFT) |	\
+	 (0x1 << ACPU_SC_VD_CTRL_CALIBRATE_EN_DIF_SHIFT) |	\
+	 (0x1 << ACPU_SC_VD_CTRL_DIV_EN_DIF_SHIFT) |		\
+	 (0x1 << ACPU_SC_VD_CTRL_TUNE_EN_INT_SHIFT) |		\
+	 (0x1 << ACPU_SC_VD_CTRL_TUNE_EN_DIF_SHIFT))
+
+#endif /* __HI6220_REGS_ACPU_H__ */
diff --git a/plat/hisilicon/hikey/include/hi6220_regs_ao.h b/plat/hisilicon/hikey/include/hi6220_regs_ao.h
new file mode 100644
index 0000000..79a5404
--- /dev/null
+++ b/plat/hisilicon/hikey/include/hi6220_regs_ao.h
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __HI6220_AO_H__
+#define __HI6220_AO_H__
+
+#define AO_CTRL_BASE				0xF7800000
+
+#define AO_SC_SYS_CTRL0				(AO_CTRL_BASE + 0x000)
+#define AO_SC_SYS_CTRL1				(AO_CTRL_BASE + 0x004)
+#define AO_SC_SYS_CTRL2				(AO_CTRL_BASE + 0x008)
+#define AO_SC_SYS_STAT0				(AO_CTRL_BASE + 0x010)
+#define AO_SC_SYS_STAT1				(AO_CTRL_BASE + 0x014)
+#define AO_SC_MCU_IMCTRL			(AO_CTRL_BASE + 0x018)
+#define AO_SC_MCU_IMSTAT			(AO_CTRL_BASE + 0x01C)
+#define AO_SC_SECONDRY_INT_EN0			(AO_CTRL_BASE + 0x044)
+#define AO_SC_SECONDRY_INT_STATR0		(AO_CTRL_BASE + 0x048)
+#define AO_SC_SECONDRY_INT_STATM0		(AO_CTRL_BASE + 0x04C)
+#define AO_SC_MCU_WKUP_INT_EN6			(AO_CTRL_BASE + 0x054)
+#define AO_SC_MCU_WKUP_INT_STATR6		(AO_CTRL_BASE + 0x058)
+#define AO_SC_MCU_WKUP_INT_STATM6		(AO_CTRL_BASE + 0x05C)
+#define AO_SC_MCU_WKUP_INT_EN5			(AO_CTRL_BASE + 0x064)
+#define AO_SC_MCU_WKUP_INT_STATR5		(AO_CTRL_BASE + 0x068)
+#define AO_SC_MCU_WKUP_INT_STATM5		(AO_CTRL_BASE + 0x06C)
+#define AO_SC_MCU_WKUP_INT_EN4			(AO_CTRL_BASE + 0x094)
+#define AO_SC_MCU_WKUP_INT_STATR4		(AO_CTRL_BASE + 0x098)
+#define AO_SC_MCU_WKUP_INT_STATM4		(AO_CTRL_BASE + 0x09C)
+#define AO_SC_MCU_WKUP_INT_EN0			(AO_CTRL_BASE + 0x0A8)
+#define AO_SC_MCU_WKUP_INT_STATR0		(AO_CTRL_BASE + 0x0AC)
+#define AO_SC_MCU_WKUP_INT_STATM0		(AO_CTRL_BASE + 0x0B0)
+#define AO_SC_MCU_WKUP_INT_EN1			(AO_CTRL_BASE + 0x0B4)
+#define AO_SC_MCU_WKUP_INT_STATR1		(AO_CTRL_BASE + 0x0B8)
+#define AO_SC_MCU_WKUP_INT_STATM1		(AO_CTRL_BASE + 0x0BC)
+#define AO_SC_INT_STATR				(AO_CTRL_BASE + 0x0C4)
+#define AO_SC_INT_STATM				(AO_CTRL_BASE + 0x0C8)
+#define AO_SC_INT_CLEAR				(AO_CTRL_BASE + 0x0CC)
+#define AO_SC_INT_EN_SET			(AO_CTRL_BASE + 0x0D0)
+#define AO_SC_INT_EN_DIS			(AO_CTRL_BASE + 0x0D4)
+#define AO_SC_INT_EN_STAT			(AO_CTRL_BASE + 0x0D8)
+#define AO_SC_INT_STATR1			(AO_CTRL_BASE + 0x0E4)
+#define AO_SC_INT_STATM1			(AO_CTRL_BASE + 0x0E8)
+#define AO_SC_INT_CLEAR1			(AO_CTRL_BASE + 0x0EC)
+#define AO_SC_INT_EN_SET1			(AO_CTRL_BASE + 0x0F0)
+#define AO_SC_INT_EN_DIS1			(AO_CTRL_BASE + 0x0F4)
+#define AO_SC_INT_EN_STAT1			(AO_CTRL_BASE + 0x0F8)
+#define AO_SC_TIMER_EN0				(AO_CTRL_BASE + 0x1D0)
+#define AO_SC_TIMER_EN1				(AO_CTRL_BASE + 0x1D4)
+#define AO_SC_TIMER_EN4				(AO_CTRL_BASE + 0x1F0)
+#define AO_SC_TIMER_EN5				(AO_CTRL_BASE + 0x1F4)
+#define AO_SC_MCU_SUBSYS_CTRL0			(AO_CTRL_BASE + 0x400)
+#define AO_SC_MCU_SUBSYS_CTRL1			(AO_CTRL_BASE + 0x404)
+#define AO_SC_MCU_SUBSYS_CTRL2			(AO_CTRL_BASE + 0x408)
+#define AO_SC_MCU_SUBSYS_CTRL3			(AO_CTRL_BASE + 0x40C)
+#define AO_SC_MCU_SUBSYS_CTRL4			(AO_CTRL_BASE + 0x410)
+#define AO_SC_MCU_SUBSYS_CTRL5			(AO_CTRL_BASE + 0x414)
+#define AO_SC_MCU_SUBSYS_CTRL6			(AO_CTRL_BASE + 0x418)
+#define AO_SC_MCU_SUBSYS_CTRL7			(AO_CTRL_BASE + 0x41C)
+#define AO_SC_MCU_SUBSYS_STAT0			(AO_CTRL_BASE + 0x440)
+#define AO_SC_MCU_SUBSYS_STAT1			(AO_CTRL_BASE + 0x444)
+#define AO_SC_MCU_SUBSYS_STAT2			(AO_CTRL_BASE + 0x448)
+#define AO_SC_MCU_SUBSYS_STAT3			(AO_CTRL_BASE + 0x44C)
+#define AO_SC_MCU_SUBSYS_STAT4			(AO_CTRL_BASE + 0x450)
+#define AO_SC_MCU_SUBSYS_STAT5			(AO_CTRL_BASE + 0x454)
+#define AO_SC_MCU_SUBSYS_STAT6			(AO_CTRL_BASE + 0x458)
+#define AO_SC_MCU_SUBSYS_STAT7			(AO_CTRL_BASE + 0x45C)
+#define AO_SC_PERIPH_CLKEN4			(AO_CTRL_BASE + 0x630)
+#define AO_SC_PERIPH_CLKDIS4			(AO_CTRL_BASE + 0x634)
+#define AO_SC_PERIPH_CLKSTAT4			(AO_CTRL_BASE + 0x638)
+#define AO_SC_PERIPH_CLKEN5			(AO_CTRL_BASE + 0x63C)
+#define AO_SC_PERIPH_CLKDIS5			(AO_CTRL_BASE + 0x640)
+#define AO_SC_PERIPH_CLKSTAT5			(AO_CTRL_BASE + 0x644)
+#define AO_SC_PERIPH_RSTEN4			(AO_CTRL_BASE + 0x6F0)
+#define AO_SC_PERIPH_RSTDIS4			(AO_CTRL_BASE + 0x6F4)
+#define AO_SC_PERIPH_RSTSTAT4			(AO_CTRL_BASE + 0x6F8)
+#define AO_SC_PERIPH_RSTEN5			(AO_CTRL_BASE + 0x6FC)
+#define AO_SC_PERIPH_RSTDIS5			(AO_CTRL_BASE + 0x700)
+#define AO_SC_PERIPH_RSTSTAT5			(AO_CTRL_BASE + 0x704)
+#define AO_SC_PW_CLKEN0				(AO_CTRL_BASE + 0x800)
+#define AO_SC_PW_CLKDIS0			(AO_CTRL_BASE + 0x804)
+#define AO_SC_PW_CLK_STAT0			(AO_CTRL_BASE + 0x808)
+#define AO_SC_PW_RSTEN0				(AO_CTRL_BASE + 0x810)
+#define AO_SC_PW_RSTDIS0			(AO_CTRL_BASE + 0x814)
+#define AO_SC_PW_RST_STAT0			(AO_CTRL_BASE + 0x818)
+#define AO_SC_PW_ISOEN0				(AO_CTRL_BASE + 0x820)
+#define AO_SC_PW_ISODIS0			(AO_CTRL_BASE + 0x824)
+#define AO_SC_PW_ISO_STAT0			(AO_CTRL_BASE + 0x828)
+#define AO_SC_PW_MTCMOS_EN0			(AO_CTRL_BASE + 0x830)
+#define AO_SC_PW_MTCMOS_DIS0			(AO_CTRL_BASE + 0x834)
+#define AO_SC_PW_MTCMOS_STAT0			(AO_CTRL_BASE + 0x838)
+#define AO_SC_PW_MTCMOS_ACK_STAT0		(AO_CTRL_BASE + 0x83C)
+#define AO_SC_PW_MTCMOS_TIMEOUT_STAT0		(AO_CTRL_BASE + 0x840)
+#define AO_SC_PW_STAT0				(AO_CTRL_BASE + 0x850)
+#define AO_SC_PW_STAT1				(AO_CTRL_BASE + 0x854)
+#define AO_SC_SYSTEST_STAT			(AO_CTRL_BASE + 0x880)
+#define AO_SC_SYSTEST_SLICER_CNT0		(AO_CTRL_BASE + 0x890)
+#define AO_SC_SYSTEST_SLICER_CNT1		(AO_CTRL_BASE + 0x894)
+#define AO_SC_PW_CTRL1				(AO_CTRL_BASE + 0x8C8)
+#define AO_SC_PW_CTRL				(AO_CTRL_BASE + 0x8CC)
+#define AO_SC_MCPU_VOTEEN			(AO_CTRL_BASE + 0x8D0)
+#define AO_SC_MCPU_VOTEDIS			(AO_CTRL_BASE + 0x8D4)
+#define AO_SC_MCPU_VOTESTAT			(AO_CTRL_BASE + 0x8D8)
+#define AO_SC_MCPU_VOTE_MSK0			(AO_CTRL_BASE + 0x8E0)
+#define AO_SC_MCPU_VOTE_MSK1			(AO_CTRL_BASE + 0x8E4)
+#define AO_SC_MCPU_VOTESTAT0_MSK		(AO_CTRL_BASE + 0x8E8)
+#define AO_SC_MCPU_VOTESTAT1_MSK		(AO_CTRL_BASE + 0x8EC)
+#define AO_SC_PERI_VOTEEN			(AO_CTRL_BASE + 0x8F0)
+#define AO_SC_PERI_VOTEDIS			(AO_CTRL_BASE + 0x8F4)
+#define AO_SC_PERI_VOTESTAT			(AO_CTRL_BASE + 0x8F8)
+#define AO_SC_PERI_VOTE_MSK0			(AO_CTRL_BASE + 0x900)
+#define AO_SC_PERI_VOTE_MSK1			(AO_CTRL_BASE + 0x904)
+#define AO_SC_PERI_VOTESTAT0_MSK		(AO_CTRL_BASE + 0x908)
+#define AO_SC_PERI_VOTESTAT1_MSK		(AO_CTRL_BASE + 0x90C)
+#define AO_SC_ACPU_VOTEEN			(AO_CTRL_BASE + 0x910)
+#define AO_SC_ACPU_VOTEDIS			(AO_CTRL_BASE + 0x914)
+#define AO_SC_ACPU_VOTESTAT			(AO_CTRL_BASE + 0x918)
+#define AO_SC_ACPU_VOTE_MSK0			(AO_CTRL_BASE + 0x920)
+#define AO_SC_ACPU_VOTE_MSK1			(AO_CTRL_BASE + 0x924)
+#define AO_SC_ACPU_VOTESTAT0_MSK		(AO_CTRL_BASE + 0x928)
+#define AO_SC_ACPU_VOTESTAT1_MSK		(AO_CTRL_BASE + 0x92C)
+#define AO_SC_MCU_VOTEEN			(AO_CTRL_BASE + 0x930)
+#define AO_SC_MCU_VOTEDIS			(AO_CTRL_BASE + 0x934)
+#define AO_SC_MCU_VOTESTAT			(AO_CTRL_BASE + 0x938)
+#define AO_SC_MCU_VOTE_MSK0			(AO_CTRL_BASE + 0x940)
+#define AO_SC_MCU_VOTE_MSK1			(AO_CTRL_BASE + 0x944)
+#define AO_SC_MCU_VOTESTAT0_MSK			(AO_CTRL_BASE + 0x948)
+#define AO_SC_MCU_VOTESTAT1_MSK			(AO_CTRL_BASE + 0x94C)
+#define AO_SC_MCU_VOTE1EN			(AO_CTRL_BASE + 0x960)
+#define AO_SC_MCU_VOTE1DIS			(AO_CTRL_BASE + 0x964)
+#define AO_SC_MCU_VOTE1STAT			(AO_CTRL_BASE + 0x968)
+#define AO_SC_MCU_VOTE1_MSK0			(AO_CTRL_BASE + 0x970)
+#define AO_SC_MCU_VOTE1_MSK1			(AO_CTRL_BASE + 0x974)
+#define AO_SC_MCU_VOTE1STAT0_MSK		(AO_CTRL_BASE + 0x978)
+#define AO_SC_MCU_VOTE1STAT1_MSK		(AO_CTRL_BASE + 0x97C)
+#define AO_SC_MCU_VOTE2EN			(AO_CTRL_BASE + 0x980)
+#define AO_SC_MCU_VOTE2DIS			(AO_CTRL_BASE + 0x984)
+#define AO_SC_MCU_VOTE2STAT			(AO_CTRL_BASE + 0x988)
+#define AO_SC_MCU_VOTE2_MSK0			(AO_CTRL_BASE + 0x990)
+#define AO_SC_MCU_VOTE2_MSK1			(AO_CTRL_BASE + 0x994)
+#define AO_SC_MCU_VOTE2STAT0_MSK		(AO_CTRL_BASE + 0x998)
+#define AO_SC_MCU_VOTE2STAT1_MSK		(AO_CTRL_BASE + 0x99C)
+#define AO_SC_VOTE_CTRL				(AO_CTRL_BASE + 0x9A0)
+#define AO_SC_VOTE_STAT				(AO_CTRL_BASE + 0x9A4)
+#define AO_SC_ECONUM				(AO_CTRL_BASE + 0xF00)
+#define AO_SCCHIPID				(AO_CTRL_BASE + 0xF10)
+#define AO_SCSOCID				(AO_CTRL_BASE + 0xF1C)
+#define AO_SC_SOC_FPGA_RTL_DEF			(AO_CTRL_BASE + 0xFE0)
+#define AO_SC_SOC_FPGA_PR_DEF			(AO_CTRL_BASE + 0xFE4)
+#define AO_SC_SOC_FPGA_RES_DEF0			(AO_CTRL_BASE + 0xFE8)
+#define AO_SC_SOC_FPGA_RES_DEF1			(AO_CTRL_BASE + 0xFEC)
+#define AO_SC_XTAL_CTRL0			(AO_CTRL_BASE + 0x102)
+#define AO_SC_XTAL_CTRL1			(AO_CTRL_BASE + 0x102)
+#define AO_SC_XTAL_CTRL3			(AO_CTRL_BASE + 0x103)
+#define AO_SC_XTAL_CTRL5			(AO_CTRL_BASE + 0x103)
+#define AO_SC_XTAL_STAT0			(AO_CTRL_BASE + 0x106)
+#define AO_SC_XTAL_STAT1			(AO_CTRL_BASE + 0x107)
+#define AO_SC_EFUSE_CHIPID0			(AO_CTRL_BASE + 0x108)
+#define AO_SC_EFUSE_CHIPID1			(AO_CTRL_BASE + 0x108)
+#define AO_SC_EFUSE_SYS_CTRL			(AO_CTRL_BASE + 0x108)
+#define AO_SC_DEBUG_CTRL1			(AO_CTRL_BASE + 0x128)
+#define AO_SC_DBG_STAT				(AO_CTRL_BASE + 0x12B)
+#define AO_SC_ARM_DBG_KEY0			(AO_CTRL_BASE + 0x12B)
+#define AO_SC_RESERVED31			(AO_CTRL_BASE + 0x13A)
+#define AO_SC_RESERVED32			(AO_CTRL_BASE + 0x13A)
+#define AO_SC_RESERVED33			(AO_CTRL_BASE + 0x13A)
+#define AO_SC_RESERVED34			(AO_CTRL_BASE + 0x13A)
+#define AO_SC_RESERVED35			(AO_CTRL_BASE + 0x13B)
+#define AO_SC_RESERVED36			(AO_CTRL_BASE + 0x13B)
+#define AO_SC_RESERVED37			(AO_CTRL_BASE + 0x13B)
+#define AO_SC_RESERVED38			(AO_CTRL_BASE + 0x13B)
+#define AO_SC_ALWAYSON_SYS_CTRL0		(AO_CTRL_BASE + 0x148)
+#define AO_SC_ALWAYSON_SYS_CTRL1		(AO_CTRL_BASE + 0x148)
+#define AO_SC_ALWAYSON_SYS_CTRL2		(AO_CTRL_BASE + 0x148)
+#define AO_SC_ALWAYSON_SYS_CTRL3		(AO_CTRL_BASE + 0x148)
+#define AO_SC_ALWAYSON_SYS_CTRL10		(AO_CTRL_BASE + 0x14A)
+#define AO_SC_ALWAYSON_SYS_CTRL11		(AO_CTRL_BASE + 0x14A)
+#define AO_SC_ALWAYSON_SYS_STAT0		(AO_CTRL_BASE + 0x14C)
+#define AO_SC_ALWAYSON_SYS_STAT1		(AO_CTRL_BASE + 0x14C)
+#define AO_SC_ALWAYSON_SYS_STAT2		(AO_CTRL_BASE + 0x14C)
+#define AO_SC_ALWAYSON_SYS_STAT3		(AO_CTRL_BASE + 0x14C)
+#define AO_SC_PWUP_TIME0			(AO_CTRL_BASE + 0x188)
+#define AO_SC_PWUP_TIME1			(AO_CTRL_BASE + 0x188)
+#define AO_SC_PWUP_TIME2			(AO_CTRL_BASE + 0x188)
+#define AO_SC_PWUP_TIME3			(AO_CTRL_BASE + 0x188)
+#define AO_SC_PWUP_TIME4			(AO_CTRL_BASE + 0x189)
+#define AO_SC_PWUP_TIME5			(AO_CTRL_BASE + 0x189)
+#define AO_SC_PWUP_TIME6			(AO_CTRL_BASE + 0x189)
+#define AO_SC_PWUP_TIME7			(AO_CTRL_BASE + 0x189)
+#define AO_SC_SECURITY_CTRL1			(AO_CTRL_BASE + 0x1C0)
+#define AO_SC_SYSTEST_SLICER_CNT0		(AO_CTRL_BASE + 0x890)
+#define AO_SC_SYSTEST_SLICER_CNT1		(AO_CTRL_BASE + 0x894)
+
+#define AO_SC_SYS_CTRL0_MODE_NORMAL				0x004
+#define AO_SC_SYS_CTRL0_MODE_MASK				0x007
+
+#define AO_SC_SYS_CTRL1_AARM_WD_RST_CFG				(1 << 0)
+#define AO_SC_SYS_CTRL1_REMAP_SRAM_AARM				(1 << 1)
+#define AO_SC_SYS_CTRL1_EFUSEC_REMAP				(1 << 2)
+#define AO_SC_SYS_CTRL1_EXT_PLL_SEL				(1 << 3)
+#define AO_SC_SYS_CTRL1_MCU_WDG0_RSTMCU_CFG			(1 << 4)
+#define AO_SC_SYS_CTRL1_USIM0_HPD_DE_BOUNCE_CFG			(1 << 6)
+#define AO_SC_SYS_CTRL1_USIM0_HPD_OE_CFG			(1 << 7)
+#define AO_SC_SYS_CTRL1_USIM1_HPD_DE_BOUNCE_CFG			(1 << 8)
+#define AO_SC_SYS_CTRL1_USIM1_HPD_OE_CFG			(1 << 9)
+#define AO_SC_SYS_CTRL1_BUS_DFS_FORE_HD_CFG			(1 << 10)
+#define AO_SC_SYS_CTRL1_BUS_DFS_FORE_HD_CFG1			(1 << 11)
+#define AO_SC_SYS_CTRL1_USIM0_HPD_OE_SFT			(1 << 12)
+#define AO_SC_SYS_CTRL1_USIM1_HPD_OE_SFT			(1 << 13)
+#define AO_SC_SYS_CTRL1_MCU_CLKEN_HARDCFG			(1 << 15)
+#define AO_SC_SYS_CTRL1_AARM_WD_RST_CFG_MSK			(1 << 16)
+#define AO_SC_SYS_CTRL1_REMAP_SRAM_AARM_MSK			(1 << 17)
+#define AO_SC_SYS_CTRL1_EFUSEC_REMAP_MSK			(1 << 18)
+#define AO_SC_SYS_CTRL1_EXT_PLL_SEL_MSK				(1 << 19)
+#define AO_SC_SYS_CTRL1_MCU_WDG0_RSTMCU_CFG_MSK			(1 << 20)
+#define AO_SC_SYS_CTRL1_USIM0_HPD_DE_BOUNCE_CFG_MSK		(1 << 22)
+#define AO_SC_SYS_CTRL1_USIM0_HPD_OE_CFG_MSK			(1 << 23)
+#define AO_SC_SYS_CTRL1_USIM1_HPD_DE_BOUNCE_CFG_MSK		(1 << 24)
+#define AO_SC_SYS_CTRL1_USIM1_HPD_OE_CFG_MSK			(1 << 25)
+#define AO_SC_SYS_CTRL1_BUS_DFS_FORE_HD_CFG_MSK			(1 << 26)
+#define AO_SC_SYS_CTRL1_BUS_DFS_FORE_HD_CFG1_MSK		(1 << 27)
+#define AO_SC_SYS_CTRL1_USIM0_HPD_OE_SFT_MSK			(1 << 28)
+#define AO_SC_SYS_CTRL1_USIM1_HPD_OE_SFT_MSK			(1 << 29)
+#define AO_SC_SYS_CTRL1_MCU_CLKEN_HARDCFG_MSK			(1 << 31)
+
+#define AO_SC_SYS_CTRL2_MCU_SFT_RST_STAT_CLEAR			(1 << 26)
+#define AO_SC_SYS_CTRL2_MCU_WDG0_RST_STAT_CLEAR			(1 << 27)
+#define AO_SC_SYS_CTRL2_TSENSOR_RST_STAT_CLEAR			(1 << 28)
+#define AO_SC_SYS_CTRL2_ACPU_WDG_RST_STAT_CLEAR			(1 << 29)
+#define AO_SC_SYS_CTRL2_MCU_WDG1_RST_STAT_CLEAR			(1 << 30)
+#define AO_SC_SYS_CTRL2_GLB_SRST_STAT_CLEAR			(1 << 31)
+
+#define AO_SC_SYS_STAT0_MCU_RST_STAT				(1 << 25)
+#define AO_SC_SYS_STAT0_MCU_SOFTRST_STAT			(1 << 26)
+#define AO_SC_SYS_STAT0_MCU_WDGRST_STAT				(1 << 27)
+#define AO_SC_SYS_STAT0_TSENSOR_HARDRST_STAT			(1 << 28)
+#define AO_SC_SYS_STAT0_ACPU_WD_GLB_RST_STAT			(1 << 29)
+#define AO_SC_SYS_STAT0_CM3_WDG1_RST_STAT			(1 << 30)
+#define AO_SC_SYS_STAT0_GLB_SRST_STAT				(1 << 31)
+
+#define AO_SC_SYS_STAT1_MODE_STATUS				(1 << 0)
+#define AO_SC_SYS_STAT1_BOOT_SEL_LOCK				(1 << 16)
+#define AO_SC_SYS_STAT1_FUNC_MODE_LOCK				(1 << 17)
+#define AO_SC_SYS_STAT1_BOOT_MODE_LOCK				(1 << 19)
+#define AO_SC_SYS_STAT1_FUN_JTAG_MODE_OUT			(1 << 20)
+#define AO_SC_SYS_STAT1_SECURITY_BOOT_FLG			(1 << 27)
+#define AO_SC_SYS_STAT1_EFUSE_NANDBOOT_MSK			(1 << 28)
+#define AO_SC_SYS_STAT1_EFUSE_NAND_BITWIDE			(1 << 29)
+
+#define AO_SC_PERIPH_RSTDIS4_RESET_MCU_ECTR_N			(1 << 0)
+#define AO_SC_PERIPH_RSTDIS4_RESET_MCU_SYS_N			(1 << 1)
+#define AO_SC_PERIPH_RSTDIS4_RESET_MCU_POR_N			(1 << 2)
+#define AO_SC_PERIPH_RSTDIS4_RESET_MCU_DAP_N			(1 << 3)
+#define AO_SC_PERIPH_RSTDIS4_PRESET_CM3_TIMER0_N		(1 << 4)
+#define AO_SC_PERIPH_RSTDIS4_PRESET_CM3_TIMER1_N		(1 << 5)
+#define AO_SC_PERIPH_RSTDIS4_PRESET_CM3_WDT0_N			(1 << 6)
+#define AO_SC_PERIPH_RSTDIS4_PRESET_CM3_WDT1_N			(1 << 7)
+#define AO_SC_PERIPH_RSTDIS4_HRESET_IPC_S_N			(1 << 8)
+#define AO_SC_PERIPH_RSTDIS4_HRESET_IPC_NS_N			(1 << 9)
+#define AO_SC_PERIPH_RSTDIS4_PRESET_EFUSEC_N			(1 << 10)
+#define AO_SC_PERIPH_RSTDIS4_PRESET_WDT0_N			(1 << 12)
+#define AO_SC_PERIPH_RSTDIS4_PRESET_WDT1_N			(1 << 13)
+#define AO_SC_PERIPH_RSTDIS4_PRESET_WDT2_N			(1 << 14)
+#define AO_SC_PERIPH_RSTDIS4_PRESET_TIMER0_N			(1 << 15)
+#define AO_SC_PERIPH_RSTDIS4_PRESET_TIMER1_N			(1 << 16)
+#define AO_SC_PERIPH_RSTDIS4_PRESET_TIMER2_N			(1 << 17)
+#define AO_SC_PERIPH_RSTDIS4_PRESET_TIMER3_N			(1 << 18)
+#define AO_SC_PERIPH_RSTDIS4_PRESET_TIMER4_N			(1 << 19)
+#define AO_SC_PERIPH_RSTDIS4_PRESET_TIMER5_N			(1 << 20)
+#define AO_SC_PERIPH_RSTDIS4_PRESET_TIMER6_N			(1 << 21)
+#define AO_SC_PERIPH_RSTDIS4_PRESET_TIMER7_N			(1 << 22)
+#define AO_SC_PERIPH_RSTDIS4_PRESET_TIMER8_N			(1 << 23)
+#define AO_SC_PERIPH_RSTDIS4_PRESET_UART0_N			(1 << 24)
+#define AO_SC_PERIPH_RSTDIS4_RESET_RTC0_N			(1 << 25)
+#define AO_SC_PERIPH_RSTDIS4_RESET_RTC1_N			(1 << 26)
+#define AO_SC_PERIPH_RSTDIS4_PRESET_PMUSSI_N			(1 << 27)
+#define AO_SC_PERIPH_RSTDIS4_RESET_JTAG_AUTH_N			(1 << 28)
+#define AO_SC_PERIPH_RSTDIS4_RESET_CS_DAPB_ON_N			(1 << 29)
+#define AO_SC_PERIPH_RSTDIS4_MDM_SUBSYS_GLB			(1 << 30)
+
+#define AO_SC_PERIPH_CLKEN4_HCLK_MCU				(1 << 0)
+#define AO_SC_PERIPH_CLKEN4_CLK_MCU_DAP				(1 << 3)
+#define AO_SC_PERIPH_CLKEN4_PCLK_CM3_TIMER0			(1 << 4)
+#define AO_SC_PERIPH_CLKEN4_PCLK_CM3_TIMER1			(1 << 5)
+#define AO_SC_PERIPH_CLKEN4_PCLK_CM3_WDT0			(1 << 6)
+#define AO_SC_PERIPH_CLKEN4_PCLK_CM3_WDT1			(1 << 7)
+#define AO_SC_PERIPH_CLKEN4_HCLK_IPC_S				(1 << 8)
+#define AO_SC_PERIPH_CLKEN4_HCLK_IPC_NS				(1 << 9)
+#define AO_SC_PERIPH_CLKEN4_PCLK_EFUSEC				(1 << 10)
+#define AO_SC_PERIPH_CLKEN4_PCLK_TZPC				(1 << 11)
+#define AO_SC_PERIPH_CLKEN4_PCLK_WDT0				(1 << 12)
+#define AO_SC_PERIPH_CLKEN4_PCLK_WDT1				(1 << 13)
+#define AO_SC_PERIPH_CLKEN4_PCLK_WDT2				(1 << 14)
+#define AO_SC_PERIPH_CLKEN4_PCLK_TIMER0				(1 << 15)
+#define AO_SC_PERIPH_CLKEN4_PCLK_TIMER1				(1 << 16)
+#define AO_SC_PERIPH_CLKEN4_PCLK_TIMER2				(1 << 17)
+#define AO_SC_PERIPH_CLKEN4_PCLK_TIMER3				(1 << 18)
+#define AO_SC_PERIPH_CLKEN4_PCLK_TIMER4				(1 << 19)
+#define AO_SC_PERIPH_CLKEN4_PCLK_TIMER5				(1 << 20)
+#define AO_SC_PERIPH_CLKEN4_PCLK_TIMER6				(1 << 21)
+#define AO_SC_PERIPH_CLKEN4_PCLK_TIMER7				(1 << 22)
+#define AO_SC_PERIPH_CLKEN4_PCLK_TIMER8				(1 << 23)
+#define AO_SC_PERIPH_CLKEN4_CLK_UART0				(1 << 24)
+#define AO_SC_PERIPH_CLKEN4_CLK_RTC0				(1 << 25)
+#define AO_SC_PERIPH_CLKEN4_CLK_RTC1				(1 << 26)
+#define AO_SC_PERIPH_CLKEN4_PCLK_PMUSSI				(1 << 27)
+#define AO_SC_PERIPH_CLKEN4_CLK_JTAG_AUTH			(1 << 28)
+#define AO_SC_PERIPH_CLKEN4_CLK_CS_DAPB_ON			(1 << 29)
+#define AO_SC_PERIPH_CLKEN4_CLK_PDM				(1 << 30)
+#define AO_SC_PERIPH_CLKEN4_CLK_SSI_PAD				(1 << 31)
+
+#define AO_SC_PERIPH_CLKEN5_PCLK_PMUSSI_CCPU			(1 << 0)
+#define AO_SC_PERIPH_CLKEN5_PCLK_EFUSEC_CCPU			(1 << 1)
+#define AO_SC_PERIPH_CLKEN5_HCLK_IPC_CCPU			(1 << 2)
+#define AO_SC_PERIPH_CLKEN5_HCLK_IPC_NS_CCPU			(1 << 3)
+#define AO_SC_PERIPH_CLKEN5_PCLK_PMUSSI_MCU			(1 << 16)
+#define AO_SC_PERIPH_CLKEN5_PCLK_EFUSEC_MCU			(1 << 17)
+#define AO_SC_PERIPH_CLKEN5_HCLK_IPC_MCU			(1 << 18)
+#define AO_SC_PERIPH_CLKEN5_HCLK_IPC_NS_MCU			(1 << 19)
+
+#define AO_SC_MCU_SUBSYS_CTRL3_RCLK_3				0x003
+#define AO_SC_MCU_SUBSYS_CTRL3_RCLK_MASK			0x007
+#define AO_SC_MCU_SUBSYS_CTRL3_CSSYS_CTRL_PROT			(1 << 3)
+#define AO_SC_MCU_SUBSYS_CTRL3_TCXO_AFC_OEN_CRG			(1 << 4)
+#define AO_SC_MCU_SUBSYS_CTRL3_AOB_IO_SEL18_USIM1		(1 << 8)
+#define AO_SC_MCU_SUBSYS_CTRL3_AOB_IO_SEL18_USIM0		(1 << 9)
+#define AO_SC_MCU_SUBSYS_CTRL3_AOB_IO_SEL18_SD			(1 << 10)
+#define AO_SC_MCU_SUBSYS_CTRL3_MCU_SUBSYS_CTRL3_RESERVED	(1 << 11)
+
+#define PCLK_TIMER1						(1 << 16)
+#define PCLK_TIMER0						(1 << 15)
+
+#endif /* __HI6220_AO_H__ */
diff --git a/plat/hisilicon/hikey/include/hi6220_regs_peri.h b/plat/hisilicon/hikey/include/hi6220_regs_peri.h
new file mode 100644
index 0000000..d2c0460
--- /dev/null
+++ b/plat/hisilicon/hikey/include/hi6220_regs_peri.h
@@ -0,0 +1,380 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __HI6220_PERI_H__
+#define __HI6220_PERI_H__
+
+#define PERI_BASE				0xF7030000
+
+#define PERI_SC_PERIPH_CTRL1			(PERI_BASE + 0x000)
+#define PERI_SC_PERIPH_CTRL2			(PERI_BASE + 0x004)
+#define PERI_SC_PERIPH_CTRL3			(PERI_BASE + 0x008)
+#define PERI_SC_PERIPH_CTRL4			(PERI_BASE + 0x00c)
+#define PERI_SC_PERIPH_CTRL5			(PERI_BASE + 0x010)
+#define PERI_SC_PERIPH_CTRL6			(PERI_BASE + 0x014)
+#define PERI_SC_PERIPH_CTRL8			(PERI_BASE + 0x018)
+#define PERI_SC_PERIPH_CTRL9			(PERI_BASE + 0x01c)
+#define PERI_SC_PERIPH_CTRL10			(PERI_BASE + 0x020)
+#define PERI_SC_PERIPH_CTRL12			(PERI_BASE + 0x024)
+#define PERI_SC_PERIPH_CTRL13			(PERI_BASE + 0x028)
+#define PERI_SC_PERIPH_CTRL14			(PERI_BASE + 0x02c)
+
+#define PERI_SC_DDR_CTRL0			(PERI_BASE + 0x050)
+#define PERI_SC_PERIPH_STAT1			(PERI_BASE + 0x094)
+
+#define PERI_SC_PERIPH_CLKEN0			(PERI_BASE + 0x200)
+#define PERI_SC_PERIPH_CLKDIS0			(PERI_BASE + 0x204)
+#define PERI_SC_PERIPH_CLKSTAT0			(PERI_BASE + 0x208)
+#define PERI_SC_PERIPH_CLKEN1			(PERI_BASE + 0x210)
+#define PERI_SC_PERIPH_CLKDIS1			(PERI_BASE + 0x214)
+#define PERI_SC_PERIPH_CLKSTAT1			(PERI_BASE + 0x218)
+#define PERI_SC_PERIPH_CLKEN2			(PERI_BASE + 0x220)
+#define PERI_SC_PERIPH_CLKDIS2			(PERI_BASE + 0x224)
+#define PERI_SC_PERIPH_CLKSTAT2			(PERI_BASE + 0x228)
+#define PERI_SC_PERIPH_CLKEN3			(PERI_BASE + 0x230)
+#define PERI_SC_PERIPH_CLKDIS3			(PERI_BASE + 0x234)
+#define PERI_SC_PERIPH_CLKSTAT3			(PERI_BASE + 0x238)
+#define PERI_SC_PERIPH_CLKEN8			(PERI_BASE + 0x240)
+#define PERI_SC_PERIPH_CLKDIS8			(PERI_BASE + 0x244)
+#define PERI_SC_PERIPH_CLKSTAT8			(PERI_BASE + 0x248)
+#define PERI_SC_PERIPH_CLKEN9			(PERI_BASE + 0x250)
+#define PERI_SC_PERIPH_CLKDIS9			(PERI_BASE + 0x254)
+#define PERI_SC_PERIPH_CLKSTAT9			(PERI_BASE + 0x258)
+#define PERI_SC_PERIPH_CLKEN10			(PERI_BASE + 0x260)
+#define PERI_SC_PERIPH_CLKDIS10			(PERI_BASE + 0x264)
+#define PERI_SC_PERIPH_CLKSTAT10		(PERI_BASE + 0x268)
+#define PERI_SC_PERIPH_CLKEN12			(PERI_BASE + 0x270)
+#define PERI_SC_PERIPH_CLKDIS12			(PERI_BASE + 0x274)
+#define PERI_SC_PERIPH_CLKSTAT12		(PERI_BASE + 0x278)
+
+#define PERI_SC_PERIPH_RSTEN0			(PERI_BASE + 0x300)
+#define PERI_SC_PERIPH_RSTDIS0			(PERI_BASE + 0x304)
+#define PERI_SC_PERIPH_RSTSTAT0			(PERI_BASE + 0x308)
+#define PERI_SC_PERIPH_RSTEN1			(PERI_BASE + 0x310)
+#define PERI_SC_PERIPH_RSTDIS1			(PERI_BASE + 0x314)
+#define PERI_SC_PERIPH_RSTSTAT1			(PERI_BASE + 0x318)
+#define PERI_SC_PERIPH_RSTEN2			(PERI_BASE + 0x320)
+#define PERI_SC_PERIPH_RSTDIS2			(PERI_BASE + 0x324)
+#define PERI_SC_PERIPH_RSTSTAT2			(PERI_BASE + 0x328)
+#define PERI_SC_PERIPH_RSTEN3			(PERI_BASE + 0x330)
+#define PERI_SC_PERIPH_RSTDIS3			(PERI_BASE + 0x334)
+#define PERI_SC_PERIPH_RSTSTAT3			(PERI_BASE + 0x338)
+#define PERI_SC_PERIPH_RSTEN8			(PERI_BASE + 0x340)
+#define PERI_SC_PERIPH_RSTDIS8			(PERI_BASE + 0x344)
+#define PERI_SC_PERIPH_RSTSTAT8			(PERI_BASE + 0x338)
+
+#define PERI_SC_CLK_SEL0			(PERI_BASE + 0x400)
+#define PERI_SC_CLKCFG8BIT1			(PERI_BASE + 0x494)
+#define PERI_SC_CLKCFG8BIT2			(PERI_BASE + 0x498)
+#define PERI_SC_RESERVED8_ADDR			(PERI_BASE + 0xd04)
+
+/* PERI_SC_PERIPH_CTRL1 */
+#define PERI_CTRL1_ETR_AXI_CSYSREQ_N		(1 << 0)
+#define PERI_CTRL1_ETR_AXI_CSYSREQ_N		(1 << 0)
+#define PERI_CTRL1_HIFI_INT_MASK		(1 << 1)
+#define PERI_CTRL1_HIFI_ALL_INT_MASK		(1 << 2)
+#define PERI_CTRL1_ETR_AXI_CSYSREQ_N_MSK	(1 << 16)
+#define PERI_CTRL1_HIFI_INT_MASK_MSK		(1 << 17)
+#define PERI_CTRL1_HIFI_ALL_INT_MASK_MSK	(1 << 18)
+
+/* PERI_SC_PERIPH_CTRL2	*/
+#define PERI_CTRL2_MMC_CLK_PHASE_BYPASS_EN_MMC0	(1 << 0)
+#define PERI_CTRL2_MMC_CLK_PHASE_BYPASS_EN_MMC1	(1 << 2)
+#define PERI_CTRL2_NAND_SYS_MEM_SEL		(1 << 6)
+#define PERI_CTRL2_G3D_DDRT_AXI_SEL		(1 << 7)
+#define PERI_CTRL2_GU_MDM_BBP_TESTPIN_SEL	(1 << 8)
+#define PERI_CTRL2_CODEC_SSI_MASTER_CHECK	(1 << 9)
+#define PERI_CTRL2_FUNC_TEST_SOFT		(1 << 12)
+#define PERI_CTRL2_CSSYS_TS_ENABLE		(1 << 15)
+#define PERI_CTRL2_HIFI_RAMCTRL_S_EMA		(1 << 16)
+#define PERI_CTRL2_HIFI_RAMCTRL_S_EMAW		(1 << 20)
+#define PERI_CTRL2_HIFI_RAMCTRL_S_EMAS		(1 << 22)
+#define PERI_CTRL2_HIFI_RAMCTRL_S_RET1N		(1 << 26)
+#define PERI_CTRL2_HIFI_RAMCTRL_S_RET2N		(1 << 27)
+#define PERI_CTRL2_HIFI_RAMCTRL_S_PGEN		(1 << 28)
+
+/* PERI_SC_PERIPH_CTRL3 */
+#define PERI_CTRL3_HIFI_DDR_HARQMEM_ADDR	(1 << 0)
+#define PERI_CTRL3_HIFI_HARQMEMRMP_EN		(1 << 12)
+#define PERI_CTRL3_HARQMEM_SYS_MED_SEL		(1 << 13)
+#define PERI_CTRL3_SOC_AP_OCCUPY_GRP1		(1 << 14)
+#define PERI_CTRL3_SOC_AP_OCCUPY_GRP2		(1 << 16)
+#define PERI_CTRL3_SOC_AP_OCCUPY_GRP3		(1 << 18)
+#define PERI_CTRL3_SOC_AP_OCCUPY_GRP4		(1 << 20)
+#define PERI_CTRL3_SOC_AP_OCCUPY_GRP5		(1 << 22)
+#define PERI_CTRL3_SOC_AP_OCCUPY_GRP6		(1 << 24)
+
+/* PERI_SC_PERIPH_CTRL4 */
+#define PERI_CTRL4_PICO_FSELV			(1 << 0)
+#define PERI_CTRL4_FPGA_EXT_PHY_SEL		(1 << 3)
+#define PERI_CTRL4_PICO_REFCLKSEL		(1 << 4)
+#define PERI_CTRL4_PICO_SIDDQ			(1 << 6)
+#define PERI_CTRL4_PICO_SUSPENDM_SLEEPM		(1 << 7)
+#define PERI_CTRL4_PICO_OGDISABLE		(1 << 8)
+#define PERI_CTRL4_PICO_COMMONONN		(1 << 9)
+#define PERI_CTRL4_PICO_VBUSVLDEXT		(1 << 10)
+#define PERI_CTRL4_PICO_VBUSVLDEXTSEL		(1 << 11)
+#define PERI_CTRL4_PICO_VATESTENB		(1 << 12)
+#define PERI_CTRL4_PICO_SUSPENDM		(1 << 14)
+#define PERI_CTRL4_PICO_SLEEPM			(1 << 15)
+#define PERI_CTRL4_BC11_C			(1 << 16)
+#define PERI_CTRL4_BC11_B			(1 << 17)
+#define PERI_CTRL4_BC11_A			(1 << 18)
+#define PERI_CTRL4_BC11_GND			(1 << 19)
+#define PERI_CTRL4_BC11_FLOAT			(1 << 20)
+#define PERI_CTRL4_OTG_PHY_SEL			(1 << 21)
+#define PERI_CTRL4_USB_OTG_SS_SCALEDOWN_MODE	(1 << 22)
+#define PERI_CTRL4_OTG_DM_PULLDOWN		(1 << 24)
+#define PERI_CTRL4_OTG_DP_PULLDOWN		(1 << 25)
+#define PERI_CTRL4_OTG_IDPULLUP			(1 << 26)
+#define PERI_CTRL4_OTG_DRVBUS			(1 << 27)
+#define PERI_CTRL4_OTG_SESSEND			(1 << 28)
+#define PERI_CTRL4_OTG_BVALID			(1 << 29)
+#define PERI_CTRL4_OTG_AVALID			(1 << 30)
+#define PERI_CTRL4_OTG_VBUSVALID		(1 << 31)
+
+/* PERI_SC_PERIPH_CTRL5 */
+#define PERI_CTRL5_USBOTG_RES_SEL		(1 << 3)
+#define PERI_CTRL5_PICOPHY_ACAENB		(1 << 4)
+#define PERI_CTRL5_PICOPHY_BC_MODE		(1 << 5)
+#define PERI_CTRL5_PICOPHY_CHRGSEL		(1 << 6)
+#define PERI_CTRL5_PICOPHY_VDATSRCEND		(1 << 7)
+#define PERI_CTRL5_PICOPHY_VDATDETENB		(1 << 8)
+#define PERI_CTRL5_PICOPHY_DCDENB		(1 << 9)
+#define PERI_CTRL5_PICOPHY_IDDIG		(1 << 10)
+#define PERI_CTRL5_DBG_MUX			(1 << 11)
+
+/* PERI_SC_PERIPH_CTRL6 */
+#define PERI_CTRL6_CSSYSOFF_RAMCTRL_S_EMA	(1 << 0)
+#define PERI_CTRL6_CSSYSOFF_RAMCTRL_S_EMAW	(1 << 4)
+#define PERI_CTRL6_CSSYSOFF_RAMCTRL_S_EMAS	(1 << 6)
+#define PERI_CTRL6_CSSYSOFF_RAMCTRL_S_RET1N	(1 << 10)
+#define PERI_CTRL6_CSSYSOFF_RAMCTRL_S_RET2N	(1 << 11)
+#define PERI_CTRL6_CSSYSOFF_RAMCTRL_S_PGEN	(1 << 12)
+
+/* PERI_SC_PERIPH_CTRL8 */
+#define PERI_CTRL8_PICOPHY_TXRISETUNE0		(1 << 0)
+#define PERI_CTRL8_PICOPHY_TXPREEMPAMPTUNE0	(1 << 2)
+#define PERI_CTRL8_PICOPHY_TXRESTUNE0		(1 << 4)
+#define PERI_CTRL8_PICOPHY_TXHSSVTUNE0		(1 << 6)
+#define PERI_CTRL8_PICOPHY_COMPDISTUNE0		(1 << 8)
+#define PERI_CTRL8_PICOPHY_TXPREEMPPULSETUNE0	(1 << 11)
+#define PERI_CTRL8_PICOPHY_OTGTUNE0		(1 << 12)
+#define PERI_CTRL8_PICOPHY_SQRXTUNE0		(1 << 16)
+#define PERI_CTRL8_PICOPHY_TXVREFTUNE0		(1 << 20)
+#define PERI_CTRL8_PICOPHY_TXFSLSTUNE0		(1 << 28)
+
+/* PERI_SC_PERIPH_CTRL9	*/
+#define PERI_CTRL9_PICOPLY_TESTCLKEN		(1 << 0)
+#define PERI_CTRL9_PICOPLY_TESTDATAOUTSEL	(1 << 1)
+#define PERI_CTRL9_PICOPLY_TESTADDR		(1 << 4)
+#define PERI_CTRL9_PICOPLY_TESTDATAIN		(1 << 8)
+
+/*
+ * PERI_SC_PERIPH_CLKEN0
+ * PERI_SC_PERIPH_CLKDIS0
+ * PERI_SC_PERIPH_CLKSTAT0
+ */
+#define PERI_CLK0_MMC0				(1 << 0)
+#define PERI_CLK0_MMC1				(1 << 1)
+#define PERI_CLK0_MMC2				(1 << 2)
+#define PERI_CLK0_NANDC				(1 << 3)
+#define PERI_CLK0_USBOTG			(1 << 4)
+#define PERI_CLK0_PICOPHY			(1 << 5)
+#define PERI_CLK0_PLL				(1 << 6)
+
+/*
+ * PERI_SC_PERIPH_CLKEN1
+ * PERI_SC_PERIPH_CLKDIS1
+ * PERI_SC_PERIPH_CLKSTAT1
+ */
+#define PERI_CLK1_HIFI				(1 << 0)
+#define PERI_CLK1_DIGACODEC			(1 << 5)
+
+/*
+ * PERI_SC_PERIPH_CLKEN2
+ * PERI_SC_PERIPH_CLKDIS2
+ * PERI_SC_PERIPH_CLKSTAT2
+ */
+#define PERI_CLK2_IPF				(1 << 0)
+#define PERI_CLK2_SOCP				(1 << 1)
+#define PERI_CLK2_DMAC				(1 << 2)
+#define PERI_CLK2_SECENG			(1 << 3)
+#define PERI_CLK2_HPM0				(1 << 5)
+#define PERI_CLK2_HPM1				(1 << 6)
+#define PERI_CLK2_HPM2				(1 << 7)
+#define PERI_CLK2_HPM3				(1 << 8)
+
+/*
+ * PERI_SC_PERIPH_CLKEN3
+ * PERI_SC_PERIPH_CLKDIS3
+ * PERI_SC_PERIPH_CLKSTAT3
+ */
+#define PERI_CLK3_CSSYS				(1 << 0)
+#define PERI_CLK3_I2C0				(1 << 1)
+#define PERI_CLK3_I2C1				(1 << 2)
+#define PERI_CLK3_I2C2				(1 << 3)
+#define PERI_CLK3_I2C3				(1 << 4)
+#define PERI_CLK3_UART1				(1 << 5)
+#define PERI_CLK3_UART2				(1 << 6)
+#define PERI_CLK3_UART3				(1 << 7)
+#define PERI_CLK3_UART4				(1 << 8)
+#define PERI_CLK3_SSP				(1 << 9)
+#define PERI_CLK3_PWM				(1 << 10)
+#define PERI_CLK3_BLPWM				(1 << 11)
+#define PERI_CLK3_TSENSOR			(1 << 12)
+#define PERI_CLK3_GPS				(1 << 15)
+#define PERI_CLK3_TCXO_PAD0			(1 << 16)
+#define PERI_CLK3_TCXO_PAD1			(1 << 17)
+#define PERI_CLK3_DAPB				(1 << 18)
+#define PERI_CLK3_HKADC				(1 << 19)
+#define PERI_CLK3_CODEC_SSI			(1 << 20)
+#define PERI_CLK3_TZPC_DEP			(1 << 21)
+
+/*
+ * PERI_SC_PERIPH_CLKEN8
+ * PERI_SC_PERIPH_CLKDIS8
+ * PERI_SC_PERIPH_CLKSTAT8
+ */
+#define PERI_CLK8_RS0				(1 << 0)
+#define PERI_CLK8_RS2				(1 << 1)
+#define PERI_CLK8_RS3				(1 << 2)
+#define PERI_CLK8_MS0				(1 << 3)
+#define PERI_CLK8_MS2				(1 << 5)
+#define PERI_CLK8_XG2RAM0			(1 << 6)
+#define PERI_CLK8_X2SRAM			(1 << 7)
+#define PERI_CLK8_SRAM				(1 << 8)
+#define PERI_CLK8_ROM				(1 << 9)
+#define PERI_CLK8_HARQ				(1 << 10)
+#define PERI_CLK8_MMU				(1 << 11)
+#define PERI_CLK8_DDRC				(1 << 12)
+#define PERI_CLK8_DDRPHY			(1 << 13)
+#define PERI_CLK8_DDRPHY_REF			(1 << 14)
+#define PERI_CLK8_X2X_SYSNOC			(1 << 15)
+#define PERI_CLK8_X2X_CCPU			(1 << 16)
+#define PERI_CLK8_DDRT				(1 << 17)
+#define PERI_CLK8_DDRPACK_RS			(1 << 18)
+
+/*
+ * PERI_SC_PERIPH_CLKEN9
+ * PERI_SC_PERIPH_CLKDIS9
+ * PERI_SC_PERIPH_CLKSTAT9
+ */
+#define PERI_CLK9_CARM_DAP			(1 << 0)
+#define PERI_CLK9_CARM_ATB			(1 << 1)
+#define PERI_CLK9_CARM_LBUS			(1 << 2)
+#define PERI_CLK9_CARM_KERNEL			(1 << 3)
+
+/*
+ * PERI_SC_PERIPH_CLKEN10
+ * PERI_SC_PERIPH_CLKDIS10
+ * PERI_SC_PERIPH_CLKSTAT10
+ */
+#define PERI_CLK10_IPF_CCPU			(1 << 0)
+#define PERI_CLK10_SOCP_CCPU			(1 << 1)
+#define PERI_CLK10_SECENG_CCPU			(1 << 2)
+#define PERI_CLK10_HARQ_CCPU			(1 << 3)
+#define PERI_CLK10_IPF_MCU			(1 << 16)
+#define PERI_CLK10_SOCP_MCU			(1 << 17)
+#define PERI_CLK10_SECENG_MCU			(1 << 18)
+#define PERI_CLK10_HARQ_MCU			(1 << 19)
+
+/*
+ * PERI_SC_PERIPH_CLKEN12
+ * PERI_SC_PERIPH_CLKDIS12
+ * PERI_SC_PERIPH_CLKSTAT12
+ */
+#define PERI_CLK12_HIFI_SRC			(1 << 0)
+#define PERI_CLK12_MMC0_SRC			(1 << 1)
+#define PERI_CLK12_MMC1_SRC			(1 << 2)
+#define PERI_CLK12_MMC2_SRC			(1 << 3)
+#define PERI_CLK12_SYSPLL_DIV			(1 << 4)
+#define PERI_CLK12_TPIU_SRC			(1 << 5)
+#define PERI_CLK12_MMC0_HF			(1 << 6)
+#define PERI_CLK12_MMC1_HF			(1 << 7)
+#define PERI_CLK12_PLL_TEST_SRC			(1 << 8)
+#define PERI_CLK12_CODEC_SOC			(1 << 9)
+#define PERI_CLK12_MEDIA			(1 << 10)
+
+/*
+ * PERI_SC_PERIPH_RSTEN0
+ * PERI_SC_PERIPH_RSTDIS0
+ * PERI_SC_PERIPH_RSTSTAT0
+ */
+#define PERI_RST0_MMC0				(1 << 0)
+#define PERI_RST0_MMC1				(1 << 1)
+#define PERI_RST0_MMC2				(1 << 2)
+#define PERI_RST0_NANDC				(1 << 3)
+#define PERI_RST0_USBOTG_BUS			(1 << 4)
+#define PERI_RST0_POR_PICOPHY			(1 << 5)
+#define PERI_RST0_USBOTG			(1 << 6)
+#define PERI_RST0_USBOTG_32K			(1 << 7)
+
+/*
+ * PERI_SC_PERIPH_RSTEN1
+ * PERI_SC_PERIPH_RSTDIS1
+ * PERI_SC_PERIPH_RSTSTAT1
+ */
+#define PERI_RST1_HIFI				(1 << 0)
+#define PERI_RST1_DIGACODEC			(1 << 5)
+
+/*
+ * PERI_SC_PERIPH_RSTEN2
+ * PERI_SC_PERIPH_RSTDIS2
+ * PERI_SC_PERIPH_RSTSTAT2
+ */
+#define PERI_RST2_IPF				(1 << 0)
+#define PERI_RST2_SOCP				(1 << 1)
+#define PERI_RST2_DMAC				(1 << 2)
+#define PERI_RST2_SECENG			(1 << 3)
+#define PERI_RST2_ABB				(1 << 4)
+#define PERI_RST2_HPM0				(1 << 5)
+#define PERI_RST2_HPM1				(1 << 6)
+#define PERI_RST2_HPM2				(1 << 7)
+#define PERI_RST2_HPM3				(1 << 8)
+
+/*
+ * PERI_SC_PERIPH_RSTEN3
+ * PERI_SC_PERIPH_RSTDIS3
+ * PERI_SC_PERIPH_RSTSTAT3
+ */
+#define PERI_RST3_CSSYS				(1 << 0)
+#define PERI_RST3_I2C0				(1 << 1)
+#define PERI_RST3_I2C1				(1 << 2)
+#define PERI_RST3_I2C2				(1 << 3)
+#define PERI_RST3_I2C3				(1 << 4)
+#define PERI_RST3_UART1				(1 << 5)
+#define PERI_RST3_UART2				(1 << 6)
+#define PERI_RST3_UART3				(1 << 7)
+#define PERI_RST3_UART4				(1 << 8)
+#define PERI_RST3_SSP				(1 << 9)
+#define PERI_RST3_PWM				(1 << 10)
+#define PERI_RST3_BLPWM				(1 << 11)
+#define PERI_RST3_TSENSOR			(1 << 12)
+#define PERI_RST3_DAPB				(1 << 18)
+#define PERI_RST3_HKADC				(1 << 19)
+#define PERI_RST3_CODEC				(1 << 20)
+
+/*
+ * PERI_SC_PERIPH_RSTEN8
+ * PERI_SC_PERIPH_RSTDIS8
+ * PERI_SC_PERIPH_RSTSTAT8
+ */
+#define PERI_RST8_RS0				(1 << 0)
+#define PERI_RST8_RS2				(1 << 1)
+#define PERI_RST8_RS3				(1 << 2)
+#define PERI_RST8_MS0				(1 << 3)
+#define PERI_RST8_MS2				(1 << 5)
+#define PERI_RST8_XG2RAM0			(1 << 6)
+#define PERI_RST8_X2SRAM_TZMA			(1 << 7)
+#define PERI_RST8_SRAM				(1 << 8)
+#define PERI_RST8_HARQ				(1 << 10)
+#define PERI_RST8_DDRC				(1 << 12)
+#define PERI_RST8_DDRC_APB			(1 << 13)
+#define PERI_RST8_DDRPACK_APB			(1 << 14)
+#define PERI_RST8_DDRT				(1 << 17)
+
+#endif /* __HI6220_PERI_H__ */
diff --git a/plat/hisilicon/hikey/include/hi6220_regs_pin.h b/plat/hisilicon/hikey/include/hi6220_regs_pin.h
new file mode 100644
index 0000000..7de4c3c
--- /dev/null
+++ b/plat/hisilicon/hikey/include/hi6220_regs_pin.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __HI6220_PIN_H__
+#define __HI6220_PIN_H__
+
+#define IOMG_BASE				0xF7010000
+
+#define IOMG_SD_CLK				(IOMG_BASE + 0x0C)
+#define IOMG_SD_CMD				(IOMG_BASE + 0x10)
+#define IOMG_SD_DATA0				(IOMG_BASE + 0x14)
+#define IOMG_SD_DATA1				(IOMG_BASE + 0x18)
+#define IOMG_SD_DATA2				(IOMG_BASE + 0x1C)
+#define IOMG_SD_DATA3				(IOMG_BASE + 0x20)
+#define IOMG_GPIO24				(IOMG_BASE + 0x140)
+
+#define IOMG_MUX_FUNC0				0
+#define IOMG_MUX_FUNC1				1
+#define IOMG_MUX_FUNC2				2
+
+#define IOCG1_BASE				0xF7010800
+#define IOCG2_BASE				0xF8001800
+
+#define IOCG_SD_CLK				(IOCG1_BASE + 0x0C)
+#define IOCG_SD_CMD				(IOCG1_BASE + 0x10)
+#define IOCG_SD_DATA0				(IOCG1_BASE + 0x14)
+#define IOCG_SD_DATA1				(IOCG1_BASE + 0x18)
+#define IOCG_SD_DATA2				(IOCG1_BASE + 0x1C)
+#define IOCG_SD_DATA3				(IOCG1_BASE + 0x20)
+#define IOCG_GPIO24				(IOCG1_BASE + 0x150)
+#define IOCG_GPIO8				(IOCG2_BASE + 0x30)
+
+#define IOCG_DRIVE_8MA				(2 << 4)
+#define IOCG_DRIVE_10MA				(3 << 4)
+#define IOCG_INPUT_16MA				0x64
+#define IOCG_INPUT_12MA				0x54
+#define IOCG_PULLDOWN				(1 << 1)
+#define IOCG_PULLUP				(1 << 0)
+
+#endif	/* __HI6220_PIN_H__ */
diff --git a/plat/hisilicon/hikey/include/hi6220_regs_pmctrl.h b/plat/hisilicon/hikey/include/hi6220_regs_pmctrl.h
new file mode 100644
index 0000000..dc09b20
--- /dev/null
+++ b/plat/hisilicon/hikey/include/hi6220_regs_pmctrl.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __HI6220_REGS_PMCTRL_H__
+#define __HI6220_REGS_PMCTRL_H__
+
+#define PMCTRL_BASE				0xF7032000
+
+#define PMCTRL_ACPUPLLCTRL			(PMCTRL_BASE + 0x000)
+#define PMCTRL_ACPUPLLFREQ			(PMCTRL_BASE + 0x004)
+#define PMCTRL_DDRPLL1CTRL			(PMCTRL_BASE + 0x010)
+#define PMCTRL_DDRPLL0CTRL			(PMCTRL_BASE + 0x030)
+#define PMCTRL_MEDPLLCTRL			(PMCTRL_BASE + 0x038)
+#define PMCTRL_ACPUPLLSEL			(PMCTRL_BASE + 0x100)
+#define PMCTRL_ACPUCLKDIV			(PMCTRL_BASE + 0x104)
+#define PMCTRL_ACPUSYSPLLCFG			(PMCTRL_BASE + 0x110)
+#define PMCTRL_ACPUCLKOFFCFG			(PMCTRL_BASE + 0x114)
+#define PMCTRL_ACPUPLLFRAC			(PMCTRL_BASE + 0x134)
+#define PMCTRL_ACPUPMUVOLUPTIME			(PMCTRL_BASE + 0x360)
+#define PMCTRL_ACPUPMUVOLDNTIME			(PMCTRL_BASE + 0x364)
+#define PMCTRL_ACPUVOLPMUADDR			(PMCTRL_BASE + 0x368)
+#define PMCTRL_ACPUVOLUPSTEP			(PMCTRL_BASE + 0x36c)
+#define PMCTRL_ACPUVOLDNSTEP			(PMCTRL_BASE + 0x370)
+#define PMCTRL_ACPUDFTVOL			(PMCTRL_BASE + 0x374)
+#define PMCTRL_ACPUDESTVOL			(PMCTRL_BASE + 0x378)
+#define PMCTRL_ACPUVOLTTIMEOUT			(PMCTRL_BASE + 0x37c)
+
+#define PMCTRL_ACPUPLLCTRL_EN_CFG		(1 << 0)
+
+#define PMCTRL_ACPUCLKDIV_CPUEXT_CFG_MASK	(3 << 0)
+#define PMCTRL_ACPUCLKDIV_DDR_CFG_MASK		(3 << 8)
+#define PMCTRL_ACPUCLKDIV_CPUEXT_STAT_MASK	(3 << 16)
+#define PMCTRL_ACPUCLKDIV_DDR_STAT_MASK		(3 << 24)
+
+#define PMCTRL_ACPUPLLSEL_ACPUPLL_CFG		(1 << 0)
+#define PMCTRL_ACPUPLLSEL_ACPUPLL_STAT		(1 << 1)
+#define PMCTRL_ACPUPLLSEL_SYSPLL_STAT		(1 << 2)
+
+#define PMCTRL_ACPUSYSPLL_CLKDIV_CFG_MASK	0x7
+#define PMCTRL_ACPUSYSPLL_CLKEN_CFG		(1 << 4)
+#define PMCTRL_ACPUSYSPLL_CLKDIV_SW		(3 << 12)
+
+#define PMCTRL_ACPUSYSPLLCFG_SYSPLL_CLKEN	(1 << 4)
+#define PMCTRL_ACPUSYSPLLCFG_CLKDIV_MASK	(3 << 12)
+
+#define PMCTRL_ACPUDESTVOL_DEST_VOL_MASK	0x7f
+#define PMCTRL_ACPUDESTVOL_CURR_VOL_MASK	(0x7f << 8)
+
+#define SOC_PMCTRL_ACPUPLLCTRL_acpupll_en_cfg_START   (0)
+#define SOC_PMCTRL_ACPUPLLCTRL_acpupll_en_cfg_END     (0)
+#define SOC_PMCTRL_ACPUPLLCTRL_acpupll_rst_START      (2)
+#define SOC_PMCTRL_ACPUPLLCTRL_acpupll_rst_END        (2)
+#define SOC_PMCTRL_ACPUPLLCTRL_acpupll_time_START     (4)
+#define SOC_PMCTRL_ACPUPLLCTRL_acpupll_time_END       (27)
+#define SOC_PMCTRL_ACPUPLLCTRL_acpupll_timeout_START  (28)
+#define SOC_PMCTRL_ACPUPLLCTRL_acpupll_timeout_END    (28)
+#define SOC_PMCTRL_ACPUPLLCTRL_acpupll_lock_START     (29)
+#define SOC_PMCTRL_ACPUPLLCTRL_acpupll_lock_END       (29)
+
+#define SOC_PMCTRL_ACPUPLLFRAC_ADDR(base)   ((base) + (0x134))
+#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_subsys_clk_div_sw_START   (12)
+
+#define SOC_PMCTRL_ACPUPLLSEL_acpu_pllsw_cfg_START   (0)
+#define SOC_PMCTRL_ACPUPLLSEL_acpu_pllsw_cfg_END     (0)
+#define SOC_PMCTRL_ACPUPLLSEL_acpu_pllsw_stat_START  (1)
+#define SOC_PMCTRL_ACPUPLLSEL_acpu_pllsw_stat_END    (1)
+#define SOC_PMCTRL_ACPUPLLSEL_syspll_sw_stat_START   (2)
+#define SOC_PMCTRL_ACPUPLLSEL_syspll_sw_stat_END     (2)
+
+#define SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_START     (0)
+#define SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_END       (1)
+#define SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_START   (8)
+#define SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_END     (9)
+#define SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_stat_START    (16)
+#define SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_stat_END      (17)
+#define SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_stat_START  (24)
+#define SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_stat_END    (25)
+
+#define SOC_PMCTRL_ACPUDESTVOL_acpu_dest_vol_START   (0)
+#define SOC_PMCTRL_ACPUDESTVOL_acpu_dest_vol_END     (6)
+#define SOC_PMCTRL_ACPUDESTVOL_acpu_vol_using_START  (8)
+#define SOC_PMCTRL_ACPUDESTVOL_acpu_vol_using_END    (14)
+
+#define SOC_PMCTRL_ACPUVOLTIMEOUT_acpu_vol_timeout_START  (0)
+#define SOC_PMCTRL_ACPUVOLTIMEOUT_acpu_vol_timeout_END    (0)
+
+#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_div_cfg_START      (0)
+#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_div_cfg_END        (2)
+#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_clken_cfg_START    (4)
+#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_clken_cfg_END      (4)
+#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_subsys_clk_div_cfg_START  (8)
+#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_subsys_clk_div_cfg_END    (9)
+#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_div_stat_START     (16)
+#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_div_stat_END       (19)
+#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_clken_stat_START   (20)
+#define SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_clken_stat_END     (20)
+
+#endif /* __HI6220_REGS_PMCTRL_H__ */
diff --git a/plat/hisilicon/hikey/include/hi6553.h b/plat/hisilicon/hikey/include/hi6553.h
new file mode 100644
index 0000000..a80d36d
--- /dev/null
+++ b/plat/hisilicon/hikey/include/hi6553.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __HI6553_H__
+#define __HI6553_H__
+
+#include <hi6220.h>
+#include <mmio.h>
+
+#define HI6553_DISABLE6_XO_CLK			(PMUSSI_BASE + (0x036 << 2))
+
+#define DISABLE6_XO_CLK_BB			(1 << 0)
+#define DISABLE6_XO_CLK_CONN			(1 << 1)
+#define DISABLE6_XO_CLK_NFC			(1 << 2)
+#define DISABLE6_XO_CLK_RF1			(1 << 3)
+#define DISABLE6_XO_CLK_RF2			(1 << 4)
+
+#define HI6553_VERSION_REG			(PMUSSI_BASE + (0x000 << 2))
+#define HI6553_IRQ2_MASK			(PMUSSI_BASE + (0x008 << 2))
+#define HI6553_ENABLE2_LDO1_8			(PMUSSI_BASE + (0x029 << 2))
+#define HI6553_DISABLE2_LDO1_8			(PMUSSI_BASE + (0x02a << 2))
+#define HI6553_ONOFF_STATUS2_LDO1_8		(PMUSSI_BASE + (0x02b << 2))
+#define HI6553_ENABLE3_LDO9_16			(PMUSSI_BASE + (0x02c << 2))
+#define HI6553_DISABLE3_LDO9_16			(PMUSSI_BASE + (0x02d << 2))
+#define HI6553_ONOFF_STATUS3_LDO9_16		(PMUSSI_BASE + (0x02e << 2))
+#define HI6553_ENABLE4_LDO17_22			(PMUSSI_BASE + (0x02f << 2))
+#define HI6553_DISABLE4_LDO17_22		(PMUSSI_BASE + (0x030 << 2))
+#define HI6553_ONOFF_STATUS4_LDO17_22		(PMUSSI_BASE + (0x031 << 2))
+#define HI6553_PERI_EN_MARK			(PMUSSI_BASE + (0x040 << 2))
+#define HI6553_BUCK2_REG1			(PMUSSI_BASE + (0x04a << 2))
+#define HI6553_BUCK2_REG5			(PMUSSI_BASE + (0x04e << 2))
+#define HI6553_BUCK2_REG6			(PMUSSI_BASE + (0x04f << 2))
+#define HI6553_BUCK3_REG3			(PMUSSI_BASE + (0x054 << 2))
+#define HI6553_BUCK3_REG5			(PMUSSI_BASE + (0x056 << 2))
+#define HI6553_BUCK3_REG6			(PMUSSI_BASE + (0x057 << 2))
+#define HI6553_BUCK4_REG2			(PMUSSI_BASE + (0x05b << 2))
+#define HI6553_BUCK4_REG5			(PMUSSI_BASE + (0x05e << 2))
+#define HI6553_BUCK4_REG6			(PMUSSI_BASE + (0x05f << 2))
+#define HI6553_CLK_TOP0				(PMUSSI_BASE + (0x063 << 2))
+#define HI6553_CLK_TOP3				(PMUSSI_BASE + (0x066 << 2))
+#define HI6553_CLK_TOP4				(PMUSSI_BASE + (0x067 << 2))
+#define HI6553_VSET_BUCK2_ADJ			(PMUSSI_BASE + (0x06d << 2))
+#define HI6553_VSET_BUCK3_ADJ			(PMUSSI_BASE + (0x06e << 2))
+#define HI6553_LDO7_REG_ADJ			(PMUSSI_BASE + (0x078 << 2))
+#define HI6553_LDO10_REG_ADJ			(PMUSSI_BASE + (0x07b << 2))
+#define HI6553_LDO15_REG_ADJ			(PMUSSI_BASE + (0x080 << 2))
+#define HI6553_LDO19_REG_ADJ			(PMUSSI_BASE + (0x084 << 2))
+#define HI6553_LDO20_REG_ADJ			(PMUSSI_BASE + (0x085 << 2))
+#define HI6553_LDO21_REG_ADJ			(PMUSSI_BASE + (0x086 << 2))
+#define HI6553_LDO22_REG_ADJ			(PMUSSI_BASE + (0x087 << 2))
+#define HI6553_DR_LED_CTRL			(PMUSSI_BASE + (0x098 << 2))
+#define HI6553_DR_OUT_CTRL			(PMUSSI_BASE + (0x099 << 2))
+#define HI6553_DR3_ISET				(PMUSSI_BASE + (0x09a << 2))
+#define HI6553_DR3_START_DEL			(PMUSSI_BASE + (0x09b << 2))
+#define HI6553_DR4_ISET				(PMUSSI_BASE + (0x09c << 2))
+#define HI6553_DR4_START_DEL			(PMUSSI_BASE + (0x09d << 2))
+#define HI6553_DR345_TIM_CONF0			(PMUSSI_BASE + (0x0a0 << 2))
+#define HI6553_NP_REG_ADJ1			(PMUSSI_BASE + (0x0be << 2))
+#define HI6553_NP_REG_CHG			(PMUSSI_BASE + (0x0c0 << 2))
+#define HI6553_BUCK01_CTRL2			(PMUSSI_BASE + (0x0d9 << 2))
+#define HI6553_BUCK0_CTRL1			(PMUSSI_BASE + (0x0dd << 2))
+#define HI6553_BUCK0_CTRL5			(PMUSSI_BASE + (0x0e1 << 2))
+#define HI6553_BUCK0_CTRL7			(PMUSSI_BASE + (0x0e3 << 2))
+#define HI6553_BUCK1_CTRL1			(PMUSSI_BASE + (0x0e8 << 2))
+#define HI6553_BUCK1_CTRL5			(PMUSSI_BASE + (0x0ec << 2))
+#define HI6553_BUCK1_CTRL7			(PMUSSI_BASE + (0x0ef << 2))
+#define HI6553_CLK19M2_600_586_EN		(PMUSSI_BASE + (0x0fe << 2))
+
+#define LED_START_DELAY_TIME			0x00
+#define LED_ELEC_VALUE				0x07
+#define LED_LIGHT_TIME				0xf0
+#define LED_GREEN_ENABLE			(1 << 1)
+#define LED_OUT_CTRL				0x00
+
+#define PMU_HI6552_V300				0x30
+#define PMU_HI6552_V310				0x31
+
+#endif	/* __HI6553_H__ */
diff --git a/plat/hisilicon/hikey/include/hisi_ipc.h b/plat/hisilicon/hikey/include/hisi_ipc.h
new file mode 100644
index 0000000..b20742f
--- /dev/null
+++ b/plat/hisilicon/hikey/include/hisi_ipc.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __HISI_IPC_H__
+#define __HISI_IPC_H__
+
+#define HISI_IPC_CORE_ACPU		0x0
+
+#define HISI_IPC_MCU_INT_SRC_ACPU0_PD	10
+#define HISI_IPC_MCU_INT_SRC_ACPU1_PD	11
+#define HISI_IPC_MCU_INT_SRC_ACPU2_PD	12
+#define HISI_IPC_MCU_INT_SRC_ACPU3_PD	13
+#define HISI_IPC_MCU_INT_SRC_ACPU_PD	16
+#define HISI_IPC_MCU_INT_SRC_ACPU4_PD	26
+#define HISI_IPC_MCU_INT_SRC_ACPU5_PD	27
+#define HISI_IPC_MCU_INT_SRC_ACPU6_PD	28
+#define HISI_IPC_MCU_INT_SRC_ACPU7_PD	29
+
+#define HISI_IPC_SEM_CPUIDLE		27
+#define HISI_IPC_INT_SRC_NUM		32
+
+#define HISI_IPC_PM_ON			0
+#define HISI_IPC_PM_OFF			1
+
+#define HISI_IPC_OK			(0)
+#define HISI_IPC_ERROR			(-1)
+
+#define HISI_IPC_BASE_ADDR		(0xF7510000)
+#define HISI_IPC_CPU_RAW_INT_ADDR	(0xF7510420)
+#define HISI_IPC_ACPU_CTRL(i)		(0xF7510800 + (i << 3))
+
+void hisi_ipc_spin_lock(unsigned int signal);
+void hisi_ipc_spin_unlock(unsigned int signal);
+void hisi_ipc_cpu_on(unsigned int cpu, unsigned int cluster);
+void hisi_ipc_cpu_off(unsigned int cpu, unsigned int cluster);
+void hisi_ipc_cpu_suspend(unsigned int cpu, unsigned int cluster);
+void hisi_ipc_cluster_on(unsigned int cpu, unsigned int cluster);
+void hisi_ipc_cluster_off(unsigned int cpu, unsigned int cluster);
+void hisi_ipc_cluster_suspend(unsigned int cpu, unsigned int cluster);
+void hisi_ipc_psci_system_off(void);
+int hisi_ipc_init(void);
+
+#endif	/* __HISI_IPC_H__ */
diff --git a/plat/hisilicon/hikey/include/hisi_mcu.h b/plat/hisilicon/hikey/include/hisi_mcu.h
new file mode 100644
index 0000000..f5c6ed0
--- /dev/null
+++ b/plat/hisilicon/hikey/include/hisi_mcu.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef	__HISI_MCU_H__
+#define	__HISI_MCU_H__
+
+#include <stdint.h>
+
+extern void hisi_mcu_enable_sram(void);
+extern void hisi_mcu_start_run(void);
+extern int hisi_mcu_load_image(uintptr_t image_base, uint32_t image_size);
+
+#endif	/* __HISI_MCU_H__ */
diff --git a/plat/hisilicon/hikey/include/hisi_pwrc.h b/plat/hisilicon/hikey/include/hisi_pwrc.h
new file mode 100644
index 0000000..cffe70e
--- /dev/null
+++ b/plat/hisilicon/hikey/include/hisi_pwrc.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __HISI_PWRC_H__
+#define __HISI_PWRC_H__
+
+#ifndef __ASSEMBLY__
+
+void hisi_pwrc_set_cluster_wfi(unsigned int id);
+void hisi_pwrc_set_core_bx_addr(unsigned int core,
+				unsigned int cluster,
+				uintptr_t entry_point);
+void hisi_pwrc_enable_debug(unsigned int core,
+			    unsigned int cluster);
+int hisi_pwrc_setup(void);
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* __HISI_PWRC_H__ */
diff --git a/plat/hisilicon/hikey/include/hisi_sip_svc.h b/plat/hisilicon/hikey/include/hisi_sip_svc.h
new file mode 100644
index 0000000..662b6cc
--- /dev/null
+++ b/plat/hisilicon/hikey/include/hisi_sip_svc.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __HISI_SIP_SVC_H__
+#define __HISI_SIP_SVC_H__
+
+/* SMC function IDs for SiP Service queries */
+
+#define HISI_SIP_SVC_CALL_COUNT		0x8200ff00
+#define HISI_SIP_SVC_UID			0x8200ff01
+/*					0x8200ff02 is reserved */
+#define HISI_SIP_SVC_VERSION			0x8200ff03
+
+/* HISI SiP Service Calls version numbers */
+#define HISI_SIP_SVC_VERSION_MAJOR		0x0
+#define HISI_SIP_SVC_VERSION_MINOR		0x1
+
+#endif /* __ARM_SIP_SVC_H__ */
diff --git a/plat/hisilicon/hikey/include/hisi_sram_map.h b/plat/hisilicon/hikey/include/hisi_sram_map.h
new file mode 100644
index 0000000..ed90c7b
--- /dev/null
+++ b/plat/hisilicon/hikey/include/hisi_sram_map.h
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __HISI_SRAM_MAP_H__
+#define __HISI_SRAM_MAP_H__
+
+/*
+ * SRAM Memory Region Layout
+ *
+ *  +-----------------------+
+ *  |  Low Power Mode       | 7KB
+ *  +-----------------------+
+ *  |  Secure OS            | 64KB
+ *  +-----------------------+
+ *  |  Software Flag        | 1KB
+ *  +-----------------------+
+ *
+ */
+
+#define SOC_SRAM_OFF_BASE_ADDR		(0xFFF80000)
+
+/* PM Section: 7KB */
+#define SRAM_PM_ADDR			(SOC_SRAM_OFF_BASE_ADDR)
+#define SRAM_PM_SIZE			(0x00001C00)
+
+/* TEE OS Section: 64KB */
+#define SRAM_TEEOS_ADDR			(SRAM_PM_ADDR + SRAM_PM_SIZE)
+#define SRAM_TEEOS_SIZE			(0x00010000)
+
+/* General Use Section: 1KB */
+#define SRAM_GENERAL_ADDR		(SRAM_TEEOS_ADDR + SRAM_TEEOS_SIZE)
+#define SRAM_GENERAL_SIZE		(0x00000400)
+
+/*
+ * General Usage Section Layout:
+ *
+ *  +-----------------------+
+ *  |  AP boot flag         | 64B
+ *  +-----------------------+
+ *  |  DICC flag            | 32B
+ *  +-----------------------+
+ *  |  Soft flag            | 256B
+ *  +-----------------------+
+ *  |  Thermal flag         | 128B
+ *  +-----------------------+
+ *  |  CSHELL               | 4B
+ *  +-----------------------+
+ *  |  Uart Switching       | 4B
+ *  +-----------------------+
+ *  |  ICC                  | 1024B
+ *  +-----------------------+
+ *  |  Memory Management    | 1024B
+ *  +-----------------------+
+ *  |  IFC                  | 32B
+ *  +-----------------------+
+ *  |  HIFI                 | 32B
+ *  +-----------------------+
+ *  |  DDR capacity         | 4B
+ *  +-----------------------+
+ *  |  Reserved             |
+ *  +-----------------------+
+ *
+ */
+
+/* App Core Boot Flags */
+#define MEMORY_AXI_ACPU_START_ADDR		(SRAM_GENERAL_ADDR)
+#define MEMORY_AXI_ACPU_START_SIZE		(64)
+
+#define MEMORY_AXI_SRESET_FLAG_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x0000)
+#define MEMORY_AXI_SECOND_CPU_BOOT_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x0004)
+#define MEMORY_AXI_READY_FLAG_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x0008)
+#define MEMORY_AXI_FASTBOOT_ENTRY_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x000C)
+#define MEMORY_AXI_PD_CHARGE_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x0010)
+#define MEMORY_AXI_DBG_ALARM_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x0014)
+#define MEMORY_AXI_CHIP_ADDR			(MEMORY_AXI_ACPU_START_ADDR + 0x0018)
+#define MEMORY_AXI_BOARD_TYPE_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x001C)
+#define MEMORY_AXI_BOARD_ID_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x0020)
+#define MEMORY_AXI_CHARGETYPE_FLAG_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x0024)
+#define MEMORY_AXI_COLD_START_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x0028)
+#define MEMORY_AXI_ANDROID_REBOOT_FLAG_ADDR	(MEMORY_AXI_ACPU_START_ADDR + 0x002C)
+#define MEMORY_AXI_ACPU_WDTRST_REBOOT_FLAG_ADDR	(MEMORY_AXI_ACPU_START_ADDR + 0x0030)
+#define MEMORY_AXI_ABNRST_BITMAP_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x0034)
+#define MEMORY_AXI_32K_CLK_TYPE_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x0038)
+#define AXI_MODEM_PANIC_FLAG_ADDR		(MEMORY_AXI_ACPU_START_ADDR + 0x003C)
+#define AXI_MODEM_PANIC_FLAG			(0x68697369)
+#define MEMORY_AXI_ACPU_END_ADDR		(AXI_MODEM_PANIC_FLAG_ADDR + 4)
+
+/* DICC Flags */
+#define MEMORY_AXI_DICC_ADDR			(MEMORY_AXI_ACPU_START_ADDR + MEMORY_AXI_ACPU_START_SIZE)
+#define MEMORY_AXI_DICC_SIZE			(32)
+
+#define MEMORY_AXI_SOFT_FLAG_ADDR		(MEMORY_AXI_DICC_ADDR + MEMORY_AXI_DICC_SIZE)
+#define MEMORY_AXI_SOFT_FLAG_SIZE		(256)
+
+/* Thermal Flags */
+#define MEMORY_AXI_TEMP_PROTECT_ADDR		(MEMORY_AXI_SOFT_FLAG_ADDR + MEMORY_AXI_SOFT_FLAG_SIZE)
+#define MEMORY_AXI_TEMP_PROTECT_SIZE		(128)
+
+/* CSHELL */
+#define MEMORY_AXI_USB_CSHELL_ADDR		(MEMORY_AXI_TEMP_PROTECT_ADDR + MEMORY_AXI_TEMP_PROTECT_SIZE)
+#define MEMORY_AXI_USB_CSHELL_SIZE		(4)
+
+/* Uart and A/C Shell Switch Flags */
+#define MEMORY_AXI_UART_INOUT_ADDR		(MEMORY_AXI_USB_CSHELL_ADDR + MEMORY_AXI_USB_CSHELL_SIZE)
+#define MEMORY_AXI_UART_INOUT_SIZE		(4)
+
+/* IFC Flags */
+#define MEMORY_AXI_IFC_ADDR			(MEMORY_AXI_UART_INOUT_ADDR + MEMORY_AXI_UART_INOUT_SIZE)
+#define MEMORY_AXI_IFC_SIZE			(32)
+
+/* HIFI Data */
+#define MEMORY_AXI_HIFI_ADDR			(MEMORY_AXI_IFC_ADDR + MEMORY_AXI_IFC_SIZE)
+#define MEMORY_AXI_HIFI_SIZE			(32)
+
+/* CONFIG Flags */
+#define MEMORY_AXI_CONFIG_ADDR			(MEMORY_AXI_HIFI_ADDR + MEMORY_AXI_HIFI_SIZE)
+#define MEMORY_AXI_CONFIG_SIZE			(32)
+
+/* DDR Capacity Flags */
+#define MEMORY_AXI_DDR_CAPACITY_ADDR		(MEMORY_AXI_CONFIG_ADDR + MEMORY_AXI_CONFIG_SIZE)
+#define MEMORY_AXI_DDR_CAPACITY_SIZE		(4)
+
+/* USB Shell Flags */
+#define MEMORY_AXI_USB_SHELL_FLAG_ADDR		(MEMORY_AXI_DDR_CAPACITY_ADDR + MEMORY_AXI_DDR_CAPACITY_SIZE)
+#define MEMORY_AXI_USB_SHELL_FLAG_SIZE		(4)
+
+/* MCU WDT Switch Flag */
+#define MEMORY_AXI_MCU_WDT_FLAG_ADDR		(MEMORY_AXI_USB_SHELL_FLAG_ADDR + MEMORY_AXI_USB_SHELL_FLAG_SIZE)
+#define MEMORY_AXI_MCU_WDT_FLAG_SIZE		(4)
+
+/* TLDSP Mailbox MNTN */
+#define SRAM_DSP_MNTN_INFO_ADDR			(MEMORY_AXI_MCU_WDT_FLAG_ADDR + MEMORY_AXI_MCU_WDT_FLAG_SIZE)
+#define SRAM_DSP_MNTN_SIZE			(32)
+
+/* TLDSP ARM Mailbox Protect Flag */
+#define SRAM_DSP_ARM_MAILBOX_PROTECT_FLAG_ADDR	(SRAM_DSP_MNTN_INFO_ADDR + SRAM_DSP_MNTN_SIZE)
+#define SRAM_DSP_ARM_MAILBOX_PROTECT_FLAG_SIZE	(4)
+
+/* RTT Sleep Flag */
+#define SRAM_RTT_SLEEP_FLAG_ADDR                (SRAM_DSP_ARM_MAILBOX_PROTECT_FLAG_ADDR + SRAM_DSP_ARM_MAILBOX_PROTECT_FLAG_SIZE)
+#define SRAM_RTT_SLEEP_FLAG_SIZE                (32)
+
+/* LDSP Awake Flag */
+#define MEMORY_AXI_LDSP_AWAKE_ADDR              (SRAM_RTT_SLEEP_FLAG_ADDR + SRAM_RTT_SLEEP_FLAG_SIZE)
+#define MEMORY_AXI_LDSP_AWAKE_SIZE              (4)
+
+#define NVUPDATE_SUCCESS			0x5555AAAA
+#define NVUPDATE_FAILURE			0xAAAA5555
+
+/*
+ * Low Power Mode Region
+ */
+#define PWRCTRL_ACPU_ASM_SPACE_ADDR		(SRAM_PM_ADDR)
+#define PWRCTRL_ACPU_ASM_SPACE_SIZE		(SRAM_PM_SIZE)
+
+#define PWRCTRL_ACPU_ASM_MEM_BASE		(PWRCTRL_ACPU_ASM_SPACE_ADDR)
+#define PWRCTRL_ACPU_ASM_MEM_SIZE		(PWRCTRL_ACPU_ASM_SPACE_SIZE)
+#define PWRCTRL_ACPU_ASM_CODE_BASE		(PWRCTRL_ACPU_ASM_MEM_BASE + 0x200)
+#define PWRCTRL_ACPU_ASM_DATA_BASE		(PWRCTRL_ACPU_ASM_MEM_BASE + 0xE00)
+#define PWRCTRL_ACPU_ASM_DATA_SIZE		(0xE00)
+
+#define PWRCTRL_ACPU_ASM_D_C0_ADDR		(PWRCTRL_ACPU_ASM_DATA_BASE)
+#define PWRCTRL_ACPU_ASM_D_C0_MMU_PARA_AD	(PWRCTRL_ACPU_ASM_DATA_BASE + 0)
+#define PWRCTRL_ACPU_ASM_D_ARM_PARA_AD		(PWRCTRL_ACPU_ASM_DATA_BASE + 0x20)
+
+#define PWRCTRL_ACPU_ASM_D_COMM_ADDR		(PWRCTRL_ACPU_ASM_DATA_BASE + 0x700)
+
+#define PWRCTRL_ACPU_REBOOT			(PWRCTRL_ACPU_ASM_D_COMM_ADDR)
+#define PWRCTRL_ACPU_REBOOT_SIZE		(0x200)
+#define PWRCTRL_ACPU_ASM_SLICE_BAK_ADDR		(PWRCTRL_ACPU_REBOOT + PWRCTRL_ACPU_REBOOT_SIZE)
+#define PWRCTRL_ACPU_ASM_SLICE_BAK_SIZE		(4)
+#define PWRCTRL_ACPU_ASM_DEBUG_FLAG_ADDR	(PWRCTRL_ACPU_ASM_SLICE_BAK_ADDR + PWRCTRL_ACPU_ASM_SLICE_BAK_SIZE)
+#define PWRCTRL_ACPU_ASM_DEBUG_FLAG_SIZE	(4)
+#define EXCH_A_CORE_POWRCTRL_CONV_ADDR		(PWRCTRL_ACPU_ASM_DEBUG_FLAG_ADDR + PWRCTRL_ACPU_ASM_DEBUG_FLAG_SIZE)
+#define EXCH_A_CORE_POWRCTRL_CONV_SIZE		(4)
+
+/*
+ * Below region memory mapping is:
+ * 4 + 12 + 16 + 28 + 28 + 16 + 28 + 12 + 24 + 20 + 64 +
+ * 4 + 4 + 4 + 4 + 12 + 4 + 4 + 4 + 4 + 16 + 4 + 0x2BC +
+ * 24 + 20 + 12 + 16
+ */
+
+#define MEMORY_AXI_CPU_IDLE_ADDR		(EXCH_A_CORE_POWRCTRL_CONV_ADDR + EXCH_A_CORE_POWRCTRL_CONV_SIZE)
+#define MEMORY_AXI_CPU_IDLE_SIZE		(4)
+
+#define MEMORY_AXI_CUR_FREQ_ADDR		(MEMORY_AXI_CPU_IDLE_ADDR + MEMORY_AXI_CPU_IDLE_SIZE)
+#define MEMORY_AXI_CUR_FREQ_SIZE		(12)
+
+#define MEMORY_AXI_ACPU_FREQ_VOL_ADDR		(MEMORY_AXI_CUR_FREQ_ADDR + MEMORY_AXI_CUR_FREQ_SIZE)
+#define MEMORY_AXI_ACPU_FREQ_VOL_SIZE		(16 + 28 + 28)
+
+#define MEMORY_AXI_DDR_FREQ_VOL_ADDR		(MEMORY_AXI_ACPU_FREQ_VOL_ADDR + MEMORY_AXI_ACPU_FREQ_VOL_SIZE)
+#define MEMORY_AXI_DDR_FREQ_VOL_SIZE		(16 + 28)
+
+#define MEMORY_AXI_ACPU_FIQ_TEST_ADDR		(MEMORY_AXI_DDR_FREQ_VOL_ADDR + MEMORY_AXI_DDR_FREQ_VOL_SIZE)
+#define MEMORY_AXI_ACPU_FIQ_TEST_SIZE		(12)
+
+#define MEMORY_AXI_ACPU_FIQ_CPU_INFO_ADDR	(MEMORY_AXI_ACPU_FIQ_TEST_ADDR + MEMORY_AXI_ACPU_FIQ_TEST_SIZE)
+#define MEMORY_AXI_ACPU_FIQ_CPU_INFO_SIZE	(24)
+
+#define MEMORY_AXI_ACPU_FIQ_DEBUG_INFO_ADDR	(MEMORY_AXI_ACPU_FIQ_CPU_INFO_ADDR + MEMORY_AXI_ACPU_FIQ_CPU_INFO_SIZE)
+#define MEMORY_AXI_ACPU_FIQ_DEBUG_INFO_SIZE	(20)
+
+#define MEMORY_FREQDUMP_ADDR			(MEMORY_AXI_ACPU_FIQ_DEBUG_INFO_ADDR + MEMORY_AXI_ACPU_FIQ_DEBUG_INFO_SIZE)
+#define MEMORY_FREQDUMP_SIZE			(64)
+
+#define MEMORY_AXI_CCPU_LOG_ADDR		(MEMORY_FREQDUMP_ADDR + MEMORY_FREQDUMP_SIZE)
+#define MEMORY_AXI_CCPU_LOG_SIZE		(4)
+
+#define MEMORY_AXI_MCU_LOG_ADDR			(MEMORY_AXI_CCPU_LOG_ADDR + MEMORY_AXI_CCPU_LOG_SIZE)
+#define MEMORY_AXI_MCU_LOG_SIZE			(4)
+
+#define MEMORY_AXI_SEC_CORE_BOOT_ADDR		(MEMORY_AXI_MCU_LOG_ADDR + MEMORY_AXI_MCU_LOG_SIZE)
+#define MEMORY_AXI_SEC_CORE_BOOT_SIZE		(4)
+
+#define MEMORY_AXI_BBP_PS_VOTE_FLAG_ADDR	(MEMORY_AXI_SEC_CORE_BOOT_ADDR + MEMORY_AXI_SEC_CORE_BOOT_SIZE)
+#define MEMORY_AXI_BBP_PS_VOTE_FLAG_SIZE	(0x4)
+
+#define POLICY_AREA_RESERVED			(MEMORY_AXI_BBP_PS_VOTE_FLAG_ADDR + MEMORY_AXI_BBP_PS_VOTE_FLAG_SIZE)
+#define POLICY_AREA_RESERVED_SIZE		(12)
+
+#define DDR_POLICY_VALID_MAGIC			(POLICY_AREA_RESERVED + POLICY_AREA_RESERVED_SIZE)
+#define DDR_POLICY_VALID_MAGIC_SIZE		(4)
+
+#define DDR_POLICY_MAX_NUM			(DDR_POLICY_VALID_MAGIC + DDR_POLICY_VALID_MAGIC_SIZE)
+#define DDR_POLICY_MAX_NUM_SIZE			(4)
+
+#define DDR_POLICY_SUPPORT_NUM			(DDR_POLICY_MAX_NUM + DDR_POLICY_MAX_NUM_SIZE)
+#define DDR_POLICY_SUPPORT_NUM_SIZE		(4)
+
+#define DDR_POLICY_CUR_POLICY			(DDR_POLICY_SUPPORT_NUM + DDR_POLICY_SUPPORT_NUM_SIZE)
+#define DDR_POLICY_CUR_POLICY_SIZE		(4)
+
+#define ACPU_POLICY_VALID_MAGIC			(DDR_POLICY_CUR_POLICY + DDR_POLICY_CUR_POLICY_SIZE)
+#define ACPU_POLICY_VALID_MAGIC_SIZE		(4)
+
+#define ACPU_POLICY_MAX_NUM			(ACPU_POLICY_VALID_MAGIC + ACPU_POLICY_VALID_MAGIC_SIZE)
+#define ACPU_POLICY_MAX_NUM_SIZE		(4)
+
+#define ACPU_POLICY_SUPPORT_NUM			(ACPU_POLICY_MAX_NUM + ACPU_POLICY_MAX_NUM_SIZE)
+#define ACPU_POLICY_SUPPORT_NUM_SIZE		(4)
+
+#define ACPU_POLICY_CUR_POLICY			(ACPU_POLICY_SUPPORT_NUM + ACPU_POLICY_SUPPORT_NUM_SIZE)
+#define ACPU_POLICY_CUR_POLICY_SIZE		(4)
+
+#define LPDDR_OPTION_ADDR			(ACPU_POLICY_CUR_POLICY + ACPU_POLICY_CUR_POLICY_SIZE)
+#define LPDDR_OPTION_SIZE			(4)
+
+#define MEMORY_AXI_DDR_DDL_ADDR			(LPDDR_OPTION_ADDR + LPDDR_OPTION_SIZE)
+#define MEMORY_AXI_DDR_DDL_SIZE			(0x2BC)
+
+#define DDR_TEST_DFS_ADDR			(MEMORY_AXI_DDR_DDL_ADDR + MEMORY_AXI_DDR_DDL_SIZE)
+#define DDR_TEST_DFS_ADDR_SIZE			(4)
+
+#define DDR_TEST_DFS_TIMES_ADDR			(DDR_TEST_DFS_ADDR + DDR_TEST_DFS_ADDR_SIZE)
+#define DDR_TEST_DFS_TIMES_ADDR_SIZE		(4)
+
+#define DDR_TEST_QOS_ADDR			(DDR_TEST_DFS_TIMES_ADDR + DDR_TEST_DFS_TIMES_ADDR_SIZE)
+#define DDR_TEST_QOS_ADDR_SIZE			(4)
+
+#define DDR_TEST_FUN_ADDR			(DDR_TEST_QOS_ADDR + DDR_TEST_QOS_ADDR_SIZE)
+#define DDR_TEST_FUN_ADDR_SIZE			(4)
+
+#define BOARD_TYPE_ADDR				(DDR_TEST_FUN_ADDR + DDR_TEST_FUN_ADDR_SIZE)
+#define BOARD_ADDR_SIZE				(4)
+#define DDR_DFS_FREQ_ADDR			(BOARD_TYPE_ADDR + BOARD_ADDR_SIZE)
+#define DDR_DFS_FREQ_SIZE			(4)
+
+#define DDR_PASR_ADDR				(DDR_DFS_FREQ_ADDR + DDR_DFS_FREQ_SIZE)
+#define DDR_PASR_SIZE				(20)
+
+#define ACPU_DFS_FREQ_ADDR			(DDR_PASR_ADDR + DDR_PASR_SIZE)
+#define ACPU_DFS_FREQ_ADDR_SIZE			(12)
+
+#define ACPU_CHIP_MAX_FREQ			(ACPU_DFS_FREQ_ADDR + ACPU_DFS_FREQ_ADDR_SIZE)
+#define ACPU_CHIP_MAX_FREQ_SIZE			(4)
+
+#define MEMORY_MEDPLL_STATE_ADDR		(ACPU_CHIP_MAX_FREQ + ACPU_CHIP_MAX_FREQ_SIZE)
+#define MEMORY_MEDPLL_STATE_SIZE		(8)
+
+#define MEMORY_CCPU_LOAD_FLAG_ADDR		(MEMORY_MEDPLL_STATE_ADDR + MEMORY_MEDPLL_STATE_SIZE)
+#define MEMORY_CCPU_LOAD_FLAG_SIZE		(4)
+
+
+#define ACPU_CORE_BITS_ADDR			(MEMORY_CCPU_LOAD_FLAG_ADDR + MEMORY_CCPU_LOAD_FLAG_SIZE)
+#define ACPU_CORE_BITS_SIZE			(4)
+
+#define ACPU_CLUSTER_IDLE_ADDR			(ACPU_CORE_BITS_ADDR + ACPU_CORE_BITS_SIZE)
+#define ACPU_CLUSTER_IDLE_SIZE			(4)
+
+#define ACPU_A53_FLAGS_ADDR			(ACPU_CLUSTER_IDLE_ADDR + ACPU_CLUSTER_IDLE_SIZE)
+#define ACPU_A53_FLAGS_SIZE			(4)
+
+#define ACPU_POWER_STATE_QOS_ADDR		(ACPU_A53_FLAGS_ADDR+ACPU_A53_FLAGS_SIZE)
+#define ACPU_POWER_STATE_QOS_SIZE		(4)
+
+#define ACPU_UNLOCK_CORE_FLAGS_ADDR		(ACPU_POWER_STATE_QOS_ADDR+ACPU_POWER_STATE_QOS_SIZE)
+#define ACPU_UNLOCK_CORE_FLAGS_SIZE		(8)
+
+#define ACPU_SUBSYS_POWERDOWN_FLAGS_ADDR	(ACPU_UNLOCK_CORE_FLAGS_ADDR + ACPU_UNLOCK_CORE_FLAGS_SIZE)
+#define ACPU_SUBSYS_POWERDOWN_FLAGS_SIZE	(4)
+
+#define ACPU_CORE_POWERDOWN_FLAGS_ADDR		(ACPU_SUBSYS_POWERDOWN_FLAGS_ADDR + ACPU_SUBSYS_POWERDOWN_FLAGS_SIZE)
+#define ACPU_CORE_POWERDOWN_FLAGS_SIZE		(4)
+
+#define ACPU_CLUSTER_POWERDOWN_FLAGS_ADDR	(ACPU_CORE_POWERDOWN_FLAGS_ADDR + ACPU_CORE_POWERDOWN_FLAGS_SIZE)
+#define ACPU_CLUSTER_POWERDOWN_FLAGS_SIZE	(4)
+
+#define ACPU_ARM64_FLAGA			(ACPU_CLUSTER_POWERDOWN_FLAGS_ADDR + ACPU_CLUSTER_POWERDOWN_FLAGS_SIZE)
+#define ACPU_ARM64_FLAGA_SIZE			(4)
+
+#define ACPU_ARM64_FLAGB			(ACPU_ARM64_FLAGA + ACPU_ARM64_FLAGA_SIZE)
+#define ACPU_ARM64_FLAGB_SIZE			(4)
+
+#define MCU_EXCEPTION_FLAGS_ADDR		(ACPU_ARM64_FLAGB + ACPU_ARM64_FLAGB_SIZE)
+#define MCU_EXCEPTION_FLAGS_SIZE		(4)
+
+#define ACPU_MASTER_CORE_STATE_ADDR		(MCU_EXCEPTION_FLAGS_ADDR + MCU_EXCEPTION_FLAGS_SIZE)
+#define ACPU_MASTER_CORE_STATE_SIZE		(4)
+
+#define PWRCTRL_AXI_RESERVED_ADDR		(ACPU_MASTER_CORE_STATE_ADDR + ACPU_MASTER_CORE_STATE_SIZE)
+
+#endif /* __HISI_SRAM_MAP_H__ */
diff --git a/plat/hisilicon/hikey/include/plat_macros.S b/plat/hisilicon/hikey/include/plat_macros.S
new file mode 100644
index 0000000..1ad217a
--- /dev/null
+++ b/plat/hisilicon/hikey/include/plat_macros.S
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_MACROS_S__
+#define __PLAT_MACROS_S__
+
+#include <cci.h>
+#include <gic_v2.h>
+#include <hi6220.h>
+#include <platform_def.h>
+
+.section .rodata.gic_reg_name, "aS"
+gicc_regs:
+	.asciz "gicc_hppir", "gicc_ahppir", "gicc_ctlr", ""
+gicd_pend_reg:
+	.asciz "gicd_ispendr regs (Offsets 0x200 - 0x278)\n"	\
+		" Offset:\t\t\tvalue\n"
+newline:
+	.asciz "\n"
+spacer:
+	.asciz ":\t\t0x"
+
+.section .rodata.cci_reg_name, "aS"
+cci_iface_regs:
+	.asciz "cci_snoop_ctrl_cluster0", "cci_snoop_ctrl_cluster1" , ""
+
+/* ---------------------------------------------
+ * The below macro prints out relevant GIC
+ * registers whenever an unhandled exception is
+ * taken in BL31.
+ * ---------------------------------------------
+ */
+.macro plat_crash_print_regs
+	mov_imm	x16, PLAT_ARM_GICD_BASE
+	mov_imm	x17, PLAT_ARM_GICC_BASE
+
+	/* Load the gicc reg list to x6 */
+	adr	x6, gicc_regs
+	/* Load the gicc regs to gp regs used by str_in_crash_buf_print */
+	ldr	w8, [x17, #GICC_HPPIR]
+	ldr	w9, [x17, #GICC_AHPPIR]
+	ldr	w10, [x17, #GICC_CTLR]
+	/* Store to the crash buf and print to cosole */
+	bl	str_in_crash_buf_print
+
+	/* Print the GICD_ISPENDR regs */
+	add	x7, x16, #GICD_ISPENDR
+	adr	x4, gicd_pend_reg
+	bl	asm_print_str
+2:
+	sub	x4, x7, x16
+	cmp	x4, #0x280
+	b.eq	1f
+	bl	asm_print_hex
+	adr	x4, spacer
+	bl	asm_print_str
+	ldr	x4, [x7], #8
+	bl	asm_print_hex
+	adr	x4, newline
+	bl	asm_print_str
+	b	2b
+1:
+	adr	x6, cci_iface_regs
+	/* Store in x7 the base address of the first interface */
+	mov_imm	x7, (CCI400_BASE + SLAVE_IFACE_OFFSET(	\
+			CCI400_SL_IFACE3_CLUSTER_IX))
+	ldr	w8, [x7, #SNOOP_CTRL_REG]
+	/* Store in x7 the base address of the second interface */
+	mov_imm	x7, (CCI400_BASE + SLAVE_IFACE_OFFSET(	\
+			CCI400_SL_IFACE4_CLUSTER_IX))
+	ldr	w9, [x7, #SNOOP_CTRL_REG]
+	/* Store to the crash buf and print to console */
+	bl	str_in_crash_buf_print
+.endm
+
+#endif /* __PLAT_MACROS_S__ */
diff --git a/plat/hisilicon/hikey/include/platform_def.h b/plat/hisilicon/hikey/include/platform_def.h
new file mode 100644
index 0000000..f4a3fd4
--- /dev/null
+++ b/plat/hisilicon/hikey/include/platform_def.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#include <arch.h>
+#include "../hikey_def.h"
+
+/* Special value used to verify platform parameters from BL2 to BL3-1 */
+#define HIKEY_BL31_PLAT_PARAM_VAL	0x0f1e2d3c4b5a6978ULL
+
+/*
+ * Generic platform constants
+ */
+
+/* Size of cacheable stacks */
+#define PLATFORM_STACK_SIZE		0x800
+
+#define FIRMWARE_WELCOME_STR		"Booting Trusted Firmware\n"
+
+#define PLATFORM_CACHE_LINE_SIZE	64
+#define PLATFORM_CLUSTER_COUNT		2
+#define PLATFORM_CORE_COUNT_PER_CLUSTER	4
+#define PLATFORM_CORE_COUNT		(PLATFORM_CLUSTER_COUNT *	\
+					 PLATFORM_CORE_COUNT_PER_CLUSTER)
+#define PLAT_MAX_PWR_LVL		MPIDR_AFFLVL2
+#define PLAT_NUM_PWR_DOMAINS		(PLATFORM_CORE_COUNT + \
+					 PLATFORM_CLUSTER_COUNT + 1)
+
+#define PLAT_MAX_RET_STATE		1
+#define PLAT_MAX_OFF_STATE		2
+
+#define MAX_IO_DEVICES			3
+#define MAX_IO_HANDLES			4
+/* eMMC RPMB and eMMC User Data */
+#define MAX_IO_BLOCK_DEVICES		2
+
+/* GIC related constants (no GICR in GIC-400) */
+#define PLAT_ARM_GICD_BASE		0xF6801000
+#define PLAT_ARM_GICC_BASE		0xF6802000
+#define PLAT_ARM_GICH_BASE		0xF6804000
+#define PLAT_ARM_GICV_BASE		0xF6806000
+
+
+/*
+ * Platform memory map related constants
+ */
+
+/*
+ * BL1 is stored in XG2RAM0_HIRQ that is 784KB large (0xF980_0000~0xF98C_4000).
+ */
+#define ONCHIPROM_PARAM_BASE		(XG2RAM0_BASE + 0x700)
+#define LOADER_RAM_BASE			(XG2RAM0_BASE + 0x800)
+#define BL1_XG2RAM0_OFFSET		0x1000
+
+/*
+ * BL1 specific defines.
+ *
+ * Both loader and BL1_RO region stay in SRAM since they are used to simulate
+ * ROM.
+ * Loader is used to switch Hi6220 SoC from 32-bit to 64-bit mode.
+ *
+ * ++++++++++  0xF980_0000
+ * + loader +
+ * ++++++++++  0xF980_1000
+ * + BL1_RO +
+ * ++++++++++  0xF981_0000
+ * + BL1_RW +
+ * ++++++++++  0xF989_8000
+ */
+#define BL1_RO_BASE			(XG2RAM0_BASE + BL1_XG2RAM0_OFFSET)
+#define BL1_RO_LIMIT			(XG2RAM0_BASE + 0x10000)
+#define BL1_RW_BASE			(BL1_RO_LIMIT)	/* 0xf981_0000 */
+#define BL1_RW_SIZE			(0x00088000)
+#define BL1_RW_LIMIT			(0xF9898000)
+
+/*
+ * BL2 specific defines.
+ */
+#define BL2_BASE			(BL1_RW_BASE + 0x8000)	/* 0xf981_8000 */
+#define BL2_LIMIT			(BL2_BASE + 0x40000)
+
+/*
+ * SCP_BL2 specific defines.
+ * In HiKey, SCP_BL2 means MCU firmware. It's loaded into the temporary buffer
+ * at 0x0100_0000. Then BL2 will parse the sections and loaded them into
+ * predefined separated buffers.
+ */
+#define SCP_BL2_BASE			(DDR_BASE + 0x01000000)
+#define SCP_BL2_LIMIT			(SCP_BL2_BASE + 0x00100000)
+#define SCP_BL2_SIZE			(SCP_BL2_LIMIT - SCP_BL2_BASE)
+
+/*
+ * BL31 specific defines.
+ */
+#define BL31_BASE			BL2_LIMIT /* 0xf985_8000 */
+#define BL31_LIMIT			0xF9898000
+
+/*
+ * BL3-2 specific defines.
+ */
+
+/*
+ * The TSP currently executes from TZC secured area of DRAM or SRAM.
+ */
+#define BL32_SRAM_BASE			BL31_LIMIT
+#define BL32_SRAM_LIMIT			(BL31_LIMIT+0x80000) /* 512K */
+
+#define BL32_DRAM_BASE			DDR_SEC_BASE
+#define BL32_DRAM_LIMIT			(DDR_SEC_BASE+DDR_SEC_SIZE)
+
+#if LOAD_IMAGE_V2
+#ifdef SPD_opteed
+/* Load pageable part of OP-TEE at end of allocated DRAM space for BL32 */
+#define HIKEY_OPTEE_PAGEABLE_LOAD_BASE	(BL32_DRAM_LIMIT - HIKEY_OPTEE_PAGEABLE_LOAD_SIZE) /* 0x3FC0_0000 */
+#define HIKEY_OPTEE_PAGEABLE_LOAD_SIZE	0x400000 /* 4MB */
+#endif
+#endif
+
+#if (HIKEY_TSP_RAM_LOCATION_ID == HIKEY_DRAM_ID)
+#define TSP_SEC_MEM_BASE		BL32_DRAM_BASE
+#define TSP_SEC_MEM_SIZE		(BL32_DRAM_LIMIT - BL32_DRAM_BASE)
+#define BL32_BASE			BL32_DRAM_BASE
+#define BL32_LIMIT			BL32_DRAM_LIMIT
+#elif (HIKEY_TSP_RAM_LOCATION_ID == HIKEY_SRAM_ID)
+#define TSP_SEC_MEM_BASE		BL32_SRAM_BASE
+#define TSP_SEC_MEM_SIZE		(BL32_SRAM_LIMIT - BL32_SRAM_BASE)
+#define BL32_BASE			BL32_SRAM_BASE
+#define BL32_LIMIT			BL32_SRAM_LIMIT
+#else
+#error "Currently unsupported HIKEY_TSP_LOCATION_ID value"
+#endif
+
+/* BL32 is mandatory in AArch32 */
+#ifndef AARCH32
+#ifdef SPD_none
+#undef BL32_BASE
+#endif /* SPD_none */
+#endif
+
+#define NS_BL1U_BASE			(BL2_BASE)
+#define NS_BL1U_SIZE			(0x00010000)
+#define NS_BL1U_LIMIT			(NS_BL1U_BASE + NS_BL1U_SIZE)
+
+/*
+ * Platform specific page table and MMU setup constants
+ */
+#define ADDR_SPACE_SIZE			(1ull << 32)
+
+#if defined(IMAGE_BL1) || defined(IMAGE_BL32)
+#define MAX_XLAT_TABLES			3
+#endif
+
+#ifdef IMAGE_BL31
+#define MAX_XLAT_TABLES			4
+#endif
+
+#ifdef IMAGE_BL2
+#if LOAD_IMAGE_V2
+#ifdef SPD_opteed
+#define MAX_XLAT_TABLES			4
+#else
+#define MAX_XLAT_TABLES			3
+#endif
+#else
+#define MAX_XLAT_TABLES			3
+#endif
+#endif
+
+#define MAX_MMAP_REGIONS		16
+
+#define HIKEY_NS_IMAGE_OFFSET		(DDR_BASE + 0x35000000)
+
+/*
+ * Declarations and constants to access the mailboxes safely. Each mailbox is
+ * aligned on the biggest cache line size in the platform. This is known only
+ * to the platform as it might have a combination of integrated and external
+ * caches. Such alignment ensures that two maiboxes do not sit on the same cache
+ * line at any cache level. They could belong to different cpus/clusters &
+ * get written while being protected by different locks causing corruption of
+ * a valid mailbox address.
+ */
+#define CACHE_WRITEBACK_SHIFT		6
+#define CACHE_WRITEBACK_GRANULE		(1 << CACHE_WRITEBACK_SHIFT)
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/hisilicon/hikey/platform.mk b/plat/hisilicon/hikey/platform.mk
new file mode 100644
index 0000000..26218a4
--- /dev/null
+++ b/plat/hisilicon/hikey/platform.mk
@@ -0,0 +1,121 @@
+#
+# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Enable version2 of image loading
+LOAD_IMAGE_V2	:=	1
+
+# On Hikey, the TSP can execute from TZC secure area in DRAM (default)
+# or SRAM.
+HIKEY_TSP_RAM_LOCATION	:=	dram
+ifeq (${HIKEY_TSP_RAM_LOCATION}, dram)
+  HIKEY_TSP_RAM_LOCATION_ID = HIKEY_DRAM_ID
+else ifeq (${HIKEY_TSP_RAM_LOCATION}, sram)
+  HIKEY_TSP_RAM_LOCATION_ID := HIKEY_SRAM_ID
+else
+  $(error "Currently unsupported HIKEY_TSP_RAM_LOCATION value")
+endif
+
+CONSOLE_BASE			:=	PL011_UART3_BASE
+CRASH_CONSOLE_BASE		:=	PL011_UART3_BASE
+PLAT_PARTITION_MAX_ENTRIES	:=	12
+PLAT_PL061_MAX_GPIOS		:=	160
+COLD_BOOT_SINGLE_CPU		:=	1
+PROGRAMMABLE_RESET_ADDRESS	:=	1
+
+# Process flags
+$(eval $(call add_define,HIKEY_TSP_RAM_LOCATION_ID))
+$(eval $(call add_define,CONSOLE_BASE))
+$(eval $(call add_define,CRASH_CONSOLE_BASE))
+$(eval $(call add_define,PLAT_PL061_MAX_GPIOS))
+$(eval $(call add_define,PLAT_PARTITION_MAX_ENTRIES))
+
+# Add the build options to pack Trusted OS Extra1 and Trusted OS Extra2 images
+# in the FIP if the platform requires.
+ifneq ($(BL32_EXTRA1),)
+$(eval $(call FIP_ADD_IMG,BL32_EXTRA1,--tos-fw-extra1))
+endif
+ifneq ($(BL32_EXTRA2),)
+$(eval $(call FIP_ADD_IMG,BL32_EXTRA2,--tos-fw-extra2))
+endif
+
+ENABLE_PLAT_COMPAT	:=	0
+
+USE_COHERENT_MEM	:=	1
+
+PLAT_INCLUDES		:=	-Iinclude/common/tbbr			\
+				-Iinclude/drivers/synopsys		\
+				-Iplat/hisilicon/hikey/include
+
+PLAT_BL_COMMON_SOURCES	:=	drivers/arm/pl011/pl011_console.S	\
+				lib/aarch64/xlat_tables.c		\
+				plat/hisilicon/hikey/aarch64/hikey_common.c
+
+BL1_SOURCES		+=	bl1/tbbr/tbbr_img_desc.c		\
+				drivers/arm/pl061/pl061_gpio.c		\
+				drivers/arm/sp804/sp804_delay_timer.c	\
+				drivers/delay_timer/delay_timer.c	\
+				drivers/gpio/gpio.c			\
+				drivers/io/io_block.c			\
+				drivers/io/io_fip.c			\
+				drivers/io/io_storage.c			\
+				drivers/emmc/emmc.c			\
+				drivers/synopsys/emmc/dw_mmc.c		\
+				lib/cpus/aarch64/cortex_a53.S		\
+				plat/hisilicon/hikey/aarch64/hikey_helpers.S \
+				plat/hisilicon/hikey/hikey_bl1_setup.c	\
+				plat/hisilicon/hikey/hikey_io_storage.c
+
+BL2_SOURCES		+=	drivers/arm/sp804/sp804_delay_timer.c	\
+				drivers/delay_timer/delay_timer.c	\
+				drivers/io/io_block.c			\
+				drivers/io/io_fip.c			\
+				drivers/io/io_storage.c			\
+				drivers/emmc/emmc.c			\
+				drivers/synopsys/emmc/dw_mmc.c		\
+				plat/hisilicon/hikey/aarch64/hikey_helpers.S \
+				plat/hisilicon/hikey/hikey_bl2_setup.c	\
+				plat/hisilicon/hikey/hikey_ddr.c	\
+				plat/hisilicon/hikey/hikey_io_storage.c	\
+				plat/hisilicon/hikey/hisi_dvfs.c	\
+				plat/hisilicon/hikey/hisi_mcu.c
+
+ifeq (${LOAD_IMAGE_V2},1)
+BL2_SOURCES		+=	plat/hisilicon/hikey/hikey_bl2_mem_params_desc.c \
+				plat/hisilicon/hikey/hikey_image_load.c \
+				common/desc_image_load.c
+
+ifeq (${SPD},opteed)
+BL2_SOURCES		+=	lib/optee/optee_utils.c
+endif
+endif
+
+HIKEY_GIC_SOURCES	:=	drivers/arm/gic/common/gic_common.c	\
+				drivers/arm/gic/v2/gicv2_main.c		\
+				drivers/arm/gic/v2/gicv2_helpers.c	\
+				plat/common/plat_gicv2.c
+
+BL31_SOURCES		+=	drivers/arm/cci/cci.c			\
+				drivers/arm/sp804/sp804_delay_timer.c	\
+				drivers/delay_timer/delay_timer.c	\
+				lib/cpus/aarch64/cortex_a53.S		\
+				plat/common/aarch64/plat_psci_common.c	\
+				plat/hisilicon/hikey/aarch64/hikey_helpers.S \
+				plat/hisilicon/hikey/hikey_bl31_setup.c	\
+				plat/hisilicon/hikey/hikey_pm.c		\
+				plat/hisilicon/hikey/hikey_topology.c	\
+				plat/hisilicon/hikey/hisi_ipc.c		\
+				plat/hisilicon/hikey/hisi_pwrc.c	\
+				plat/hisilicon/hikey/hisi_pwrc_sram.S	\
+				${HIKEY_GIC_SOURCES}
+ifeq (${ENABLE_PMF}, 1)
+BL31_SOURCES		+=	plat/hisilicon/hikey/hisi_sip_svc.c			\
+				lib/pmf/pmf_smc.c
+endif
+
+# Enable workarounds for selected Cortex-A53 errata.
+ERRATA_A53_836870		:=	1
+ERRATA_A53_843419		:=	1
+ERRATA_A53_855873		:=	1
diff --git a/plat/hisilicon/hikey960/aarch64/hikey960_common.c b/plat/hisilicon/hikey960/aarch64/hikey960_common.c
new file mode 100644
index 0000000..b1020a6
--- /dev/null
+++ b/plat/hisilicon/hikey960/aarch64/hikey960_common.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <debug.h>
+#include <mmio.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <xlat_tables.h>
+
+#include "../hikey960_def.h"
+#include "../hikey960_private.h"
+
+#define MAP_DDR		MAP_REGION_FLAT(DDR_BASE,			\
+					DDR_SIZE,			\
+					MT_MEMORY | MT_RW | MT_NS)
+
+#define MAP_DEVICE	MAP_REGION_FLAT(DEVICE_BASE,			\
+					DEVICE_SIZE,			\
+					MT_DEVICE | MT_RW | MT_SECURE)
+
+#define MAP_BL1_RW	MAP_REGION_FLAT(BL1_RW_BASE,			\
+					BL1_RW_LIMIT - BL1_RW_BASE,	\
+					MT_MEMORY | MT_RW | MT_NS)
+
+#define MAP_UFS_DATA	MAP_REGION_FLAT(HIKEY960_UFS_DATA_BASE,		\
+					HIKEY960_UFS_DATA_SIZE,		\
+					MT_MEMORY | MT_RW | MT_NS)
+
+#define MAP_UFS_DESC	MAP_REGION_FLAT(HIKEY960_UFS_DESC_BASE,		\
+					HIKEY960_UFS_DESC_SIZE,		\
+					MT_MEMORY | MT_RW | MT_NS)
+
+#define MAP_TSP_MEM	MAP_REGION_FLAT(TSP_SEC_MEM_BASE,		\
+					TSP_SEC_MEM_SIZE,		\
+					MT_MEMORY | MT_RW | MT_SECURE)
+
+#if LOAD_IMAGE_V2
+#ifdef SPD_opteed
+#define MAP_OPTEE_PAGEABLE	MAP_REGION_FLAT(		\
+					HIKEY960_OPTEE_PAGEABLE_LOAD_BASE,	\
+					HIKEY960_OPTEE_PAGEABLE_LOAD_SIZE,	\
+					MT_MEMORY | MT_RW | MT_SECURE)
+#endif
+#endif
+
+/*
+ * Table of regions for different BL stages to map using the MMU.
+ * This doesn't include Trusted RAM as the 'mem_layout' argument passed to
+ * hikey960_init_mmu_elx() will give the available subset of that,
+ */
+#ifdef IMAGE_BL1
+static const mmap_region_t hikey960_mmap[] = {
+	MAP_UFS_DATA,
+	MAP_BL1_RW,
+	MAP_UFS_DESC,
+	MAP_DEVICE,
+	{0}
+};
+#endif
+
+#ifdef IMAGE_BL2
+static const mmap_region_t hikey960_mmap[] = {
+	MAP_DDR,
+	MAP_DEVICE,
+	MAP_TSP_MEM,
+#if LOAD_IMAGE_V2
+#ifdef SPD_opteed
+	MAP_OPTEE_PAGEABLE,
+#endif
+#endif
+	{0}
+};
+#endif
+
+#ifdef IMAGE_BL31
+static const mmap_region_t hikey960_mmap[] = {
+	MAP_DEVICE,
+	MAP_TSP_MEM,
+	{0}
+};
+#endif
+
+#ifdef IMAGE_BL32
+static const mmap_region_t hikey960_mmap[] = {
+	MAP_DEVICE,
+	MAP_DDR,
+	{0}
+};
+#endif
+
+/*
+ * Macro generating the code for the function setting up the pagetables as per
+ * the platform memory map & initialize the mmu, for the given exception level
+ */
+#define HIKEY960_CONFIGURE_MMU_EL(_el)					\
+	void hikey960_init_mmu_el##_el(unsigned long total_base,	\
+				unsigned long total_size,		\
+				unsigned long ro_start,			\
+				unsigned long ro_limit,			\
+				unsigned long coh_start,		\
+				unsigned long coh_limit)		\
+	{								\
+	       mmap_add_region(total_base, total_base,			\
+			       total_size,				\
+			       MT_MEMORY | MT_RW | MT_SECURE);		\
+	       mmap_add_region(ro_start, ro_start,			\
+			       ro_limit - ro_start,			\
+			       MT_MEMORY | MT_RO | MT_SECURE);		\
+	       mmap_add_region(coh_start, coh_start,			\
+			       coh_limit - coh_start,			\
+			       MT_DEVICE | MT_RW | MT_SECURE);		\
+	       mmap_add(hikey960_mmap);					\
+	       init_xlat_tables();					\
+									\
+	       enable_mmu_el##_el(0);					\
+	}
+
+/* Define EL1 and EL3 variants of the function initialising the MMU */
+HIKEY960_CONFIGURE_MMU_EL(1)
+HIKEY960_CONFIGURE_MMU_EL(3)
+
+unsigned long plat_get_ns_image_entrypoint(void)
+{
+	return NS_BL1U_BASE;
+}
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+	return 1920000;
+}
diff --git a/plat/hisilicon/hikey960/aarch64/hikey960_helpers.S b/plat/hisilicon/hikey960/aarch64/hikey960_helpers.S
new file mode 100644
index 0000000..c88f68e
--- /dev/null
+++ b/plat/hisilicon/hikey960/aarch64/hikey960_helpers.S
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <cortex_a53.h>
+#include <cortex_a73.h>
+#include "../hikey960_def.h"
+
+	.globl	plat_my_core_pos
+	.globl	platform_mem_init
+	.globl	plat_crash_console_init
+	.globl	plat_crash_console_putc
+	.globl	plat_report_exception
+	.globl	plat_reset_handler
+	.globl	set_retention_ticks
+	.globl	clr_retention_ticks
+	.globl	clr_ex
+	.globl	nop
+
+func plat_my_core_pos
+	mrs	x0, mpidr_el1
+	and	x1, x0, #MPIDR_CPU_MASK
+	and	x0, x0, #MPIDR_CLUSTER_MASK
+	add	x0, x1, x0, LSR #6
+	ret
+endfunc plat_my_core_pos
+
+	/* -----------------------------------------------------
+	 * void platform_mem_init(void);
+	 *
+	 * We don't need to carry out any memory initialization
+	 * on HIKEY. The Secure RAM is accessible straight away.
+	 * -----------------------------------------------------
+	 */
+func platform_mem_init
+	ret
+endfunc platform_mem_init
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_init(void)
+	 * Function to initialize the crash console
+	 * without a C Runtime to print crash report.
+	 * Clobber list : x0, x1, x2
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_init
+	mov_imm	x0, CRASH_CONSOLE_BASE
+	mov_imm	x1, PL011_UART_CLK_IN_HZ
+	mov_imm	x2, PL011_BAUDRATE
+	b	console_core_init
+endfunc plat_crash_console_init
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_putc(int c)
+	 * Function to print a character on the crash
+	 * console without a C Runtime.
+	 * Clobber list : x1, x2
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_putc
+	mov_imm	x1, CRASH_CONSOLE_BASE
+	b	console_core_putc
+endfunc plat_crash_console_putc
+
+	/* ---------------------------------------------
+	 * void plat_report_exception(unsigned int type)
+	 * Function to report an unhandled exception
+	 * with platform-specific means.
+	 * On HIKEY platform, it updates the LEDs
+	 * to indicate where we are
+	 * ---------------------------------------------
+	 */
+func plat_report_exception
+	mov	x8, x30
+
+	/* Turn on LED according to x0 (0 -- f) */
+	ldr	x2, =0xf7020000
+	and	x1, x0, #1
+	str	w1, [x2, #4]
+	and	x1, x0, #2
+	str	w1, [x2, #8]
+	and	x1, x0, #4
+	str	w1, [x2, #16]
+	and	x1, x0, #8
+	str	w1, [x2, #32]
+
+	mrs	x2, currentel
+	and	x2, x2, #0x0c
+	/* Check EL1 */
+	cmp	x2, #0x04
+	beq	plat_report_el1
+
+	adr	x4, plat_err_str
+	bl	asm_print_str
+
+	adr	x4, esr_el3_str
+	bl	asm_print_str
+
+	mrs	x4, esr_el3
+	bl	asm_print_hex
+
+	adr	x4, elr_el3_str
+	bl	asm_print_str
+
+	mrs	x4, elr_el3
+	bl	asm_print_hex
+	b	plat_report_end
+
+plat_report_el1:
+	adr	x4, plat_err_str
+	bl	asm_print_str
+
+	adr	x4, esr_el1_str
+	bl	asm_print_str
+
+	mrs	x4, esr_el1
+	bl	asm_print_hex
+
+	adr	x4, elr_el1_str
+	bl	asm_print_str
+
+	mrs	x4, elr_el1
+	bl	asm_print_hex
+plat_report_end:
+	mov	x30, x8
+	ret
+endfunc plat_report_exception
+
+	/* -----------------------------------------------------
+	 * void plat_reset_handler(void);
+	 * -----------------------------------------------------
+	 */
+func plat_reset_handler
+	ret
+endfunc plat_reset_handler
+
+	/* -----------------------------------------------------
+	 * void set_retention_ticks(unsigned int val);
+	 * Clobber list : x0
+	 * -----------------------------------------------------
+	 */
+func set_retention_ticks
+	mrs	x0, CORTEX_A53_ECTLR_EL1
+	bic	x0, x0, #CORTEX_A53_ECTLR_CPU_RET_CTRL_MASK
+	orr	x0, x0, #RETENTION_ENTRY_TICKS_8
+	msr	CORTEX_A53_ECTLR_EL1, x0
+	isb
+	dsb	sy
+	ret
+endfunc set_retention_ticks
+
+	/* -----------------------------------------------------
+	 * void clr_retention_ticks(unsigned int val);
+	 * Clobber list : x0
+	 * -----------------------------------------------------
+	 */
+func clr_retention_ticks
+	mrs	x0, CORTEX_A53_ECTLR_EL1
+	bic	x0, x0, #CORTEX_A53_ECTLR_CPU_RET_CTRL_MASK
+	msr	CORTEX_A53_ECTLR_EL1, x0
+	isb
+	dsb	sy
+	ret
+endfunc clr_retention_ticks
+
+	/* -----------------------------------------------------
+	 * void clrex(void);
+	 * -----------------------------------------------------
+	 */
+func clr_ex
+	clrex
+	ret
+endfunc clr_ex
+
+	/* -----------------------------------------------------
+	 * void nop(void);
+	 * -----------------------------------------------------
+	 */
+func nop
+	nop
+	ret
+endfunc nop
+
+.section .rodata.rev_err_str, "aS"
+plat_err_str:
+	.asciz "\nPlatform exception reporting:"
+esr_el3_str:
+	.asciz "\nESR_EL3: "
+elr_el3_str:
+	.asciz "\nELR_EL3: "
+esr_el1_str:
+	.asciz "\nESR_EL1: "
+elr_el1_str:
+	.asciz "\nELR_EL1: "
diff --git a/plat/hisilicon/hikey960/drivers/ipc/hisi_ipc.c b/plat/hisilicon/hikey960/drivers/ipc/hisi_ipc.c
new file mode 100644
index 0000000..4c664d1
--- /dev/null
+++ b/plat/hisilicon/hikey960/drivers/ipc/hisi_ipc.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <hi3660.h>
+#include <hisi_ipc.h>
+#include <mmio.h>
+#include <platform.h>
+#include <platform_def.h>
+
+#include "../../hikey960_private.h"
+
+#define IPC_MBX_SOURCE_REG(m)		(IPC_BASE + ((m) << 6))
+#define IPC_MBX_DSET_REG(m)		(IPC_BASE + ((m) << 6) + 0x04)
+#define IPC_MBX_DCLEAR_REG(m)		(IPC_BASE + ((m) << 6) + 0x08)
+#define IPC_MBX_DSTATUS_REG(m)		(IPC_BASE + ((m) << 6) + 0x0C)
+#define IPC_MBX_MODE_REG(m)		(IPC_BASE + ((m) << 6) + 0x10)
+#define IPC_MBX_IMASK_REG(m)		(IPC_BASE + ((m) << 6) + 0x14)
+#define IPC_MBX_ICLR_REG(m)		(IPC_BASE + ((m) << 6) + 0x18)
+#define IPC_MBX_SEND_REG(m)		(IPC_BASE + ((m) << 6) + 0x1C)
+#define IPC_MBX_DATA_REG(m, d)		(IPC_BASE + ((m) << 6) + 0x20 + \
+					 ((d) * 4))
+#define IPC_CPU_IMST_REG(m)		(IPC_BASE + ((m) << 3))
+#define IPC_LOCK_REG			(IPC_BASE + 0xA00)
+#define IPC_ACK_BIT_SHIFT		(1 << 7)
+#define IPC_UNLOCK_VALUE		(0x1ACCE551)
+
+/*********************************************************
+ *bit[31:24]:0~AP
+ *bit[23:16]:0x1~A15, 0x2~A7
+ *bit[15:8]:0~ON, 1~OFF
+ *bit[7:0]:0x3 cpu power mode
+ *********************************************************/
+#define IPC_CMD_TYPE(src_obj, cluster_obj, is_off, mode) \
+	((src_obj << 24) | (((cluster_obj) + 1) << 16) | (is_off << 8) | (mode))
+
+/*********************************************************
+ *bit[15:8]:0~no idle, 1~idle
+ *bit[7:0]:cpux
+ *********************************************************/
+
+#define IPC_CMD_PARA(is_idle, cpu) \
+	((is_idle << 8) | (cpu))
+
+#define IPC_STATE_IDLE			0x10
+
+enum src_id {
+	SRC_IDLE = 0,
+	SRC_A15 = 1 << 0,
+	SRC_A7 = 1 << 1,
+	SRC_IOM3 = 1 << 2,
+	SRC_LPM3 = 1 << 3
+};
+
+/*lpm3's mailboxs are 13~17*/
+enum lpm3_mbox_id {
+	LPM3_MBX0 = 13,
+	LPM3_MBX1,
+	LPM3_MBX2,
+	LPM3_MBX3,
+	LPM3_MBX4,
+};
+
+static void cpu_relax(void)
+{
+	volatile int i;
+
+	for (i = 0; i < 10; i++)
+		nop();
+}
+
+static inline void
+hisi_ipc_clear_ack(enum src_id source, enum lpm3_mbox_id mbox)
+{
+	unsigned int int_status = 0;
+
+	do {
+		int_status = mmio_read_32(IPC_MBX_MODE_REG(mbox));
+		int_status &= 0xF0;
+		cpu_relax();
+	} while (int_status != IPC_ACK_BIT_SHIFT);
+
+	mmio_write_32(IPC_MBX_ICLR_REG(mbox), source);
+}
+
+static void
+hisi_ipc_send_cmd_with_ack(enum src_id source, enum lpm3_mbox_id mbox,
+			   unsigned int cmdtype, unsigned int cmdpara)
+{
+	unsigned int regval;
+	unsigned int mask;
+	unsigned int state;
+
+	mmio_write_32(IPC_LOCK_REG, IPC_UNLOCK_VALUE);
+	/* wait for idle and occupy */
+	do {
+		state = mmio_read_32(IPC_MBX_MODE_REG(mbox));
+		if (state == IPC_STATE_IDLE) {
+			mmio_write_32(IPC_MBX_SOURCE_REG(mbox), source);
+			regval = mmio_read_32(IPC_MBX_SOURCE_REG(mbox));
+			if (regval == source)
+				break;
+		}
+		cpu_relax();
+
+	} while (1);
+
+	/* auto answer */
+	mmio_write_32(IPC_MBX_MODE_REG(mbox), 0x1);
+
+	mask = (~((int)source | SRC_LPM3) & 0x3F);
+	/* mask the other cpus */
+	mmio_write_32(IPC_MBX_IMASK_REG(mbox), mask);
+	/* set data */
+	mmio_write_32(IPC_MBX_DATA_REG(mbox, 0), cmdtype);
+	mmio_write_32(IPC_MBX_DATA_REG(mbox, 1), cmdpara);
+	/* send cmd */
+	mmio_write_32(IPC_MBX_SEND_REG(mbox), source);
+	/* wait ack and clear */
+	hisi_ipc_clear_ack(source, mbox);
+
+	/* release mailbox */
+	mmio_write_32(IPC_MBX_SOURCE_REG(mbox), source);
+}
+
+void hisi_ipc_pm_on_off(unsigned int core, unsigned int cluster,
+			enum pm_mode mode)
+{
+	unsigned int cmdtype = 0;
+	unsigned int cmdpara = 0;
+	enum src_id source = SRC_IDLE;
+	enum lpm3_mbox_id mailbox = (enum lpm3_mbox_id)(LPM3_MBX0 + core);
+
+	cmdtype = IPC_CMD_TYPE(0, cluster, mode, 0x3);
+	cmdpara = IPC_CMD_PARA(0, core);
+	source = cluster ? SRC_A7 : SRC_A15;
+	hisi_ipc_send_cmd_with_ack(source, mailbox, cmdtype, cmdpara);
+}
+
+void hisi_ipc_pm_suspend(unsigned int core, unsigned int cluster,
+			 unsigned int affinity_level)
+{
+	unsigned int cmdtype = 0;
+	unsigned int cmdpara = 0;
+	enum src_id source = SRC_IDLE;
+	enum lpm3_mbox_id mailbox = (enum lpm3_mbox_id)(LPM3_MBX0 + core);
+
+	if (affinity_level == 0x3)
+		cmdtype = IPC_CMD_TYPE(0, -1, 0x1, 0x3 + affinity_level);
+	else
+		cmdtype = IPC_CMD_TYPE(0, cluster, 0x1, 0x3 + affinity_level);
+
+	cmdpara = IPC_CMD_PARA(1, core);
+	source = cluster ? SRC_A7 : SRC_A15;
+	hisi_ipc_send_cmd_with_ack(source, mailbox, cmdtype, cmdpara);
+}
+
+void hisi_ipc_psci_system_off(unsigned int core, unsigned int cluster)
+{
+	unsigned int cmdtype = 0;
+	unsigned int cmdpara = 0;
+	enum src_id source = SRC_IDLE;
+	enum lpm3_mbox_id mailbox = (enum lpm3_mbox_id)(LPM3_MBX0 + core);
+
+	cmdtype = IPC_CMD_TYPE(0, (0x10 - 1), 0x1, 0x0);
+	cmdpara = IPC_CMD_PARA(0, 0);
+	source = cluster ? SRC_A7 : SRC_A15;
+	hisi_ipc_send_cmd_with_ack(source, mailbox, cmdtype, cmdpara);
+}
+
+void hisi_ipc_psci_system_reset(unsigned int core, unsigned int cluster,
+				unsigned int cmd_id)
+{
+	unsigned int cmdtype = 0;
+	unsigned int cmdpara = 0;
+	enum src_id source = SRC_IDLE;
+	enum lpm3_mbox_id mailbox = (enum lpm3_mbox_id)(LPM3_MBX0 + core);
+
+	cmdtype = IPC_CMD_TYPE(0, (0x10 - 1), 0x0, 0x0);
+	cmdpara = cmd_id;
+	source = cluster ? SRC_A7 : SRC_A15;
+	hisi_ipc_send_cmd_with_ack(source, mailbox, cmdtype, cmdpara);
+}
+
+int hisi_ipc_init(void)
+{
+	int ret = 0;
+	enum lpm3_mbox_id  i = LPM3_MBX0;
+
+	mmio_write_32(IPC_LOCK_REG, IPC_UNLOCK_VALUE);
+	for (i = LPM3_MBX0; i <= LPM3_MBX4; i++) {
+		mmio_write_32(IPC_MBX_MODE_REG(i), 1);
+		mmio_write_32(IPC_MBX_IMASK_REG(i),
+			      ((int)SRC_IOM3 | (int)SRC_A15 | (int)SRC_A7));
+		mmio_write_32(IPC_MBX_ICLR_REG(i), SRC_A7);
+	}
+
+	return ret;
+}
diff --git a/plat/hisilicon/hikey960/drivers/pwrc/hisi_pwrc.c b/plat/hisilicon/hikey960/drivers/pwrc/hisi_pwrc.c
new file mode 100644
index 0000000..9fdc3e7
--- /dev/null
+++ b/plat/hisilicon/hikey960/drivers/pwrc/hisi_pwrc.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <../hikey960_def.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <hisi_ipc.h>
+#include <mmio.h>
+#include <platform.h>
+#include <platform_def.h>
+
+
+#include "hisi_pwrc.h"
+
+
+/* resource lock api */
+#define RES0_LOCK_BASE		(SOC_PCTRL_RESOURCE0_LOCK_ADDR(PCTRL_BASE))
+#define RES1_LOCK_BASE		(SOC_PCTRL_RESOURCE1_LOCK_ADDR(PCTRL_BASE))
+#define RES2_LOCK_BASE		(SOC_PCTRL_RESOURCE2_LOCK_ADDR(PCTRL_BASE))
+
+#define LOCK_BIT			(0x1 << 28)
+#define LOCK_ID_MASK			(0x7 << 29)
+#define CPUIDLE_LOCK_ID(core)		(0x6 - (core))
+#define LOCK_UNLOCK_OFFSET		0x4
+#define LOCK_STAT_OFFSET		0x8
+
+#define CLUSTER0_CPUS_ONLINE_MASK	(0xF << 16)
+#define	CLUSTER1_CPUS_ONLINE_MASK	(0xF << 20)
+
+/* cpu hotplug flag api */
+#define SCTRL_BASE			(SOC_ACPU_SCTRL_BASE_ADDR)
+#define REG_SCBAKDATA3_OFFSET		(SOC_SCTRL_SCBAKDATA3_ADDR(SCTRL_BASE))
+#define REG_SCBAKDATA8_OFFSET		(SOC_SCTRL_SCBAKDATA8_ADDR(SCTRL_BASE))
+#define REG_SCBAKDATA9_OFFSET		(SOC_SCTRL_SCBAKDATA9_ADDR(SCTRL_BASE))
+
+#define CPUIDLE_FLAG_REG(cluster) \
+			((cluster == 0) ? REG_SCBAKDATA8_OFFSET : \
+			 REG_SCBAKDATA9_OFFSET)
+#define CLUSTER_IDLE_BIT				BIT(8)
+#define CLUSTER_IDLE_MASK		(CLUSTER_IDLE_BIT | 0x0F)
+
+#define AP_SUSPEND_FLAG			(1 << 16)
+
+#define CLUSTER_PWDN_IDLE		(0<<28)
+#define CLUSTER_PWDN_HOTPLUG		(1<<28)
+#define CLUSTER_PWDN_SR			(2<<28)
+
+#define CLUSTER0_PDC_OFFSET			0x260
+#define CLUSTER1_PDC_OFFSET			0x300
+
+#define PDC_EN_OFFSET				0x0
+#define PDC_COREPWRINTEN_OFFSET		0x4
+#define PDC_COREPWRINTSTAT_OFFSET	0x8
+#define PDC_COREGICMASK_OFFSET		0xc
+#define PDC_COREPOWERUP_OFFSET		0x10
+#define PDC_COREPOWERDN_OFFSET		0x14
+#define PDC_COREPOWERSTAT_OFFSET	0x18
+
+#define PDC_COREPWRSTAT_MASK   (0XFFFF)
+
+enum pdc_gic_mask {
+	PDC_MASK_GIC_WAKE_IRQ,
+	PDC_UNMASK_GIC_WAKE_IRQ
+};
+
+enum pdc_finish_int_mask {
+	PDC_DISABLE_FINISH_INT,
+	PDC_ENABLE_FINISH_INT
+};
+
+static void hisi_resource_lock(unsigned int lockid, unsigned int offset)
+{
+	unsigned int lock_id = (lockid << 29);
+	unsigned int lock_val =  lock_id | LOCK_BIT;
+	unsigned int lock_state;
+
+	do {
+		mmio_write_32(offset, lock_val);
+		lock_state = mmio_read_32(LOCK_STAT_OFFSET + (uintptr_t)offset);
+	} while ((lock_state & LOCK_ID_MASK) != lock_id);
+}
+
+static void hisi_resource_unlock(unsigned int lockid, unsigned int offset)
+{
+	unsigned int lock_val = (lockid << 29) | LOCK_BIT;
+
+	mmio_write_32((LOCK_UNLOCK_OFFSET + (uintptr_t)offset), lock_val);
+}
+
+
+static void hisi_cpuhotplug_lock(unsigned int cluster, unsigned int core)
+{
+	unsigned int lock_id;
+
+	lock_id = (cluster << 2) + core;
+
+	hisi_resource_lock(lock_id, RES2_LOCK_BASE);
+}
+
+static void hisi_cpuhotplug_unlock(unsigned int cluster, unsigned int core)
+{
+	unsigned int lock_id;
+
+	lock_id = (cluster << 2) + core;
+
+	hisi_resource_unlock(lock_id, RES2_LOCK_BASE);
+}
+
+/* get the resource lock */
+void hisi_cpuidle_lock(unsigned int cluster, unsigned int core)
+{
+	unsigned int offset = (cluster == 0 ? RES0_LOCK_BASE : RES1_LOCK_BASE);
+
+	hisi_resource_lock(CPUIDLE_LOCK_ID(core), offset);
+}
+
+/* release the resource lock */
+void hisi_cpuidle_unlock(unsigned int cluster, unsigned int core)
+{
+	unsigned int offset = (cluster == 0 ? RES0_LOCK_BASE : RES1_LOCK_BASE);
+
+	hisi_resource_unlock(CPUIDLE_LOCK_ID(core), offset);
+}
+
+unsigned int hisi_get_cpuidle_flag(unsigned int cluster)
+{
+	unsigned int val;
+
+	val = mmio_read_32(CPUIDLE_FLAG_REG(cluster));
+	val &= 0xF;
+
+	return val;
+}
+
+void hisi_set_cpuidle_flag(unsigned int cluster, unsigned int core)
+{
+	mmio_setbits_32(CPUIDLE_FLAG_REG(cluster), BIT(core));
+}
+
+void hisi_clear_cpuidle_flag(unsigned int cluster, unsigned int core)
+{
+	mmio_clrbits_32(CPUIDLE_FLAG_REG(cluster), BIT(core));
+
+}
+
+int hisi_test_ap_suspend_flag(unsigned int cluster)
+{
+	unsigned int val;
+
+	val = mmio_read_32(CPUIDLE_FLAG_REG(cluster));
+	val &= AP_SUSPEND_FLAG;
+	return !!val;
+}
+
+void hisi_set_cluster_pwdn_flag(unsigned int cluster,
+				unsigned int core, unsigned int value)
+{
+	unsigned int val;
+
+	hisi_cpuhotplug_lock(cluster, core);
+
+	val = mmio_read_32(REG_SCBAKDATA3_OFFSET);
+	val = (value << (cluster << 1)) | (val & 0xFFFFFFF);
+	mmio_write_32(REG_SCBAKDATA3_OFFSET, val);
+
+	hisi_cpuhotplug_unlock(cluster, core);
+}
+
+unsigned int hisi_get_cpu_boot_flag(unsigned int cluster, unsigned int core)
+{
+	unsigned int val;
+
+	hisi_cpuhotplug_lock(cluster, core);
+	val = mmio_read_32(REG_SCBAKDATA3_OFFSET);
+	val = val >> (16 + (cluster << 2));
+	val &= 0xF;
+	hisi_cpuhotplug_unlock(cluster, core);
+
+	return val;
+}
+
+unsigned int hisi_test_cpu_down(unsigned int cluster, unsigned int core)
+{
+	unsigned int val;
+
+	hisi_cpuhotplug_lock(cluster, core);
+	val = mmio_read_32(REG_SCBAKDATA3_OFFSET);
+	val = val >> (16 + (cluster << 2));
+	val &= 0xF;
+	hisi_cpuhotplug_unlock(cluster, core);
+
+	if (val)
+		return 0;
+	else
+		return 1;
+}
+
+void hisi_set_cpu_boot_flag(unsigned int cluster, unsigned int core)
+{
+	unsigned int flag = BIT((cluster<<2) + core + 16);
+
+	hisi_cpuhotplug_lock(cluster, core);
+
+	mmio_setbits_32(REG_SCBAKDATA3_OFFSET, flag);
+
+	hisi_cpuhotplug_unlock(cluster, core);
+}
+
+void hisi_clear_cpu_boot_flag(unsigned int cluster, unsigned int core)
+{
+	unsigned int flag = BIT((cluster<<2) + core + 16);
+
+	hisi_cpuhotplug_lock(cluster, core);
+
+	mmio_clrbits_32(REG_SCBAKDATA3_OFFSET, flag);
+
+	hisi_cpuhotplug_unlock(cluster, core);
+}
+
+int cluster_is_powered_on(unsigned int cluster)
+{
+	unsigned int val = mmio_read_32(REG_SCBAKDATA3_OFFSET);
+	int ret;
+
+	if (cluster == 0)
+		ret = val & CLUSTER0_CPUS_ONLINE_MASK;
+	else
+		ret = val & CLUSTER1_CPUS_ONLINE_MASK;
+
+	return !!ret;
+}
+
+static void *hisi_get_pdc_addr(unsigned int cluster)
+{
+	void *pdc_base_addr;
+	uintptr_t addr;
+
+	if (cluster == 0)
+		addr = SOC_CRGPERIPH_A53_PDCEN_ADDR(CRG_BASE);
+	else
+		addr = SOC_CRGPERIPH_MAIA_PDCEN_ADDR(CRG_BASE);
+	pdc_base_addr = (void *)addr;
+
+	return pdc_base_addr;
+}
+
+static unsigned int hisi_get_pdc_stat(unsigned int cluster)
+{
+	void *pdc_base_addr = hisi_get_pdc_addr(cluster);
+	unsigned int val;
+
+	val = mmio_read_32((uintptr_t)pdc_base_addr + PDC_COREPOWERSTAT_OFFSET);
+
+	return val;
+}
+
+int hisi_test_pwrdn_allcores(unsigned int cluster, unsigned int core)
+{
+	unsigned int mask = 0xf << (core * 4);
+	unsigned int pdc_stat = hisi_get_pdc_stat(cluster);
+	unsigned int boot_flag = hisi_get_cpu_boot_flag(cluster, core);
+	unsigned int cpuidle_flag = hisi_get_cpuidle_flag(cluster);
+
+	mask = (PDC_COREPWRSTAT_MASK & (~mask));
+	pdc_stat &= mask;
+
+	if ((boot_flag ^ cpuidle_flag) || pdc_stat)
+		return 0;
+	else
+		return 1;
+}
+
+void hisi_disable_pdc(unsigned int cluster)
+{
+	void *pdc_base_addr = hisi_get_pdc_addr(cluster);
+
+	mmio_write_32((uintptr_t)pdc_base_addr, 0x0);
+}
+
+void hisi_enable_pdc(unsigned int cluster)
+{
+	void *pdc_base_addr = hisi_get_pdc_addr(cluster);
+
+	mmio_write_32((uintptr_t)pdc_base_addr, 0x1);
+}
+
+static inline void hisi_pdc_set_intmask(void *pdc_base_addr,
+					unsigned int core,
+					enum pdc_finish_int_mask intmask)
+{
+	unsigned int val;
+
+	val = mmio_read_32((uintptr_t)pdc_base_addr + PDC_COREPWRINTEN_OFFSET);
+	if (intmask == PDC_ENABLE_FINISH_INT)
+		val |= BIT(core);
+	else
+		val &= ~BIT(core);
+
+	mmio_write_32((uintptr_t)pdc_base_addr + PDC_COREPWRINTEN_OFFSET, val);
+}
+
+static inline void hisi_pdc_set_gicmask(void *pdc_base_addr,
+					unsigned int core,
+					enum pdc_gic_mask gicmask)
+{
+	unsigned int val;
+
+	val = mmio_read_32((uintptr_t)pdc_base_addr + PDC_COREGICMASK_OFFSET);
+	if (gicmask == PDC_MASK_GIC_WAKE_IRQ)
+		val |= BIT(core);
+	else
+		val &= ~BIT(core);
+
+	mmio_write_32((uintptr_t)pdc_base_addr + PDC_COREGICMASK_OFFSET, val);
+}
+
+void hisi_pdc_mask_cluster_wakeirq(unsigned int cluster)
+{
+	int i;
+	void *pdc_base_addr = hisi_get_pdc_addr(cluster);
+
+	for (i = 0; i < 4; i++)
+		hisi_pdc_set_gicmask(pdc_base_addr, i, PDC_MASK_GIC_WAKE_IRQ);
+}
+
+static void hisi_pdc_powerup_core(unsigned int cluster, unsigned int core,
+				  enum pdc_gic_mask gicmask,
+				  enum pdc_finish_int_mask intmask)
+{
+	void *pdc_base_addr = hisi_get_pdc_addr(cluster);
+
+	mmio_write_32((uintptr_t)pdc_base_addr + PDC_COREPOWERUP_OFFSET,
+		      BIT(core));
+}
+
+static void hisi_pdc_powerdn_core(unsigned int cluster, unsigned int core,
+				  enum pdc_gic_mask gicmask,
+				  enum pdc_finish_int_mask intmask)
+{
+	void *pdc_base_addr = hisi_get_pdc_addr(cluster);
+
+	mmio_write_32((uintptr_t)pdc_base_addr + PDC_COREPOWERDN_OFFSET,
+		      BIT(core));
+}
+
+void hisi_powerup_core(unsigned int cluster, unsigned int core)
+{
+	hisi_pdc_powerup_core(cluster, core, PDC_MASK_GIC_WAKE_IRQ,
+			      PDC_DISABLE_FINISH_INT);
+}
+
+void hisi_powerdn_core(unsigned int cluster, unsigned int core)
+{
+	hisi_pdc_powerdn_core(cluster, core, PDC_MASK_GIC_WAKE_IRQ,
+			      PDC_DISABLE_FINISH_INT);
+}
+
+void hisi_powerup_cluster(unsigned int cluster, unsigned int core)
+{
+	hisi_ipc_pm_on_off(core, cluster, PM_ON);
+}
+
+void hisi_powerdn_cluster(unsigned int cluster, unsigned int core)
+{
+	void *pdc_base_addr = hisi_get_pdc_addr(cluster);
+
+	hisi_set_cluster_pwdn_flag(cluster, core, CLUSTER_PWDN_HOTPLUG);
+	mmio_write_32((uintptr_t)pdc_base_addr + PDC_COREPWRINTEN_OFFSET,
+		      (0x10001 << core));
+	mmio_write_32((uintptr_t)pdc_base_addr + PDC_COREPOWERDN_OFFSET,
+		      BIT(core));
+}
+
+void hisi_enter_core_idle(unsigned int cluster, unsigned int core)
+{
+	hisi_pdc_powerdn_core(cluster, core, PDC_UNMASK_GIC_WAKE_IRQ,
+			      PDC_DISABLE_FINISH_INT);
+}
+
+void hisi_enter_cluster_idle(unsigned int cluster, unsigned int core)
+{
+	void *pdc_base_addr = hisi_get_pdc_addr(cluster);
+
+	hisi_set_cluster_pwdn_flag(cluster, core, CLUSTER_PWDN_IDLE);
+	mmio_write_32((uintptr_t)pdc_base_addr + PDC_COREPWRINTEN_OFFSET,
+		      (0x10001 << core));
+	mmio_write_32((uintptr_t)pdc_base_addr + PDC_COREPOWERDN_OFFSET,
+		      BIT(core));
+}
+
+void hisi_enter_ap_suspend(unsigned int cluster, unsigned int core)
+{
+	hisi_ipc_pm_suspend(core, cluster, 0x3);
+}
diff --git a/plat/hisilicon/hikey960/drivers/pwrc/hisi_pwrc.h b/plat/hisilicon/hikey960/drivers/pwrc/hisi_pwrc.h
new file mode 100644
index 0000000..a4d887f
--- /dev/null
+++ b/plat/hisilicon/hikey960/drivers/pwrc/hisi_pwrc.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __HISI_PWRC_H__
+#define __HISI_PWRC_H__
+
+#include <hi3660.h>
+#include <hi3660_crg.h>
+
+#define PCTRL_BASE					(PCTRL_REG_BASE)
+#define CRG_BASE					(CRG_REG_BASE)
+
+#define SOC_CRGPERIPH_A53_PDCEN_ADDR(base)		((base) + (0x260))
+#define SOC_CRGPERIPH_MAIA_PDCEN_ADDR(base)		((base) + (0x300))
+
+#define SOC_PCTRL_RESOURCE0_LOCK_ADDR(base)		((base) + (0x400))
+#define SOC_PCTRL_RESOURCE0_UNLOCK_ADDR(base)		((base) + (0x404))
+#define SOC_PCTRL_RESOURCE0_LOCK_ST_ADDR(base)		((base) + (0x408))
+#define SOC_PCTRL_RESOURCE1_LOCK_ADDR(base)		((base) + (0x40C))
+#define SOC_PCTRL_RESOURCE1_UNLOCK_ADDR(base)		((base) + (0x410))
+#define SOC_PCTRL_RESOURCE1_LOCK_ST_ADDR(base)		((base) + (0x414))
+#define SOC_PCTRL_RESOURCE2_LOCK_ADDR(base)		((base) + (0x418))
+
+#define SOC_SCTRL_SCBAKDATA3_ADDR(base)			((base) + (0x418))
+#define SOC_SCTRL_SCBAKDATA8_ADDR(base)			((base) + (0x42C))
+#define SOC_SCTRL_SCBAKDATA9_ADDR(base)			((base) + (0x430))
+
+#define SOC_ACPU_SCTRL_BASE_ADDR			(0xFFF0A000)
+
+void hisi_cpuidle_lock(unsigned int cluster, unsigned int core);
+void hisi_cpuidle_unlock(unsigned int cluster, unsigned int core);
+void hisi_set_cpuidle_flag(unsigned int cluster, unsigned int core);
+void hisi_clear_cpuidle_flag(unsigned int cluster, unsigned int core);
+void hisi_set_cpu_boot_flag(unsigned int cluster, unsigned int core);
+void hisi_clear_cpu_boot_flag(unsigned int cluster, unsigned int core);
+int cluster_is_powered_on(unsigned int cluster);
+void hisi_enter_core_idle(unsigned int cluster, unsigned int core);
+void hisi_enter_cluster_idle(unsigned int cluster, unsigned int core);
+int hisi_test_ap_suspend_flag(unsigned int cluster);
+void hisi_enter_ap_suspend(unsigned int cluster, unsigned int core);
+
+
+/* pdc api */
+void hisi_pdc_mask_cluster_wakeirq(unsigned int cluster);
+int hisi_test_pwrdn_allcores(unsigned int cluster, unsigned int core);
+void hisi_disable_pdc(unsigned int cluster);
+void hisi_enable_pdc(unsigned int cluster);
+void hisi_powerup_core(unsigned int cluster, unsigned int core);
+void hisi_powerdn_core(unsigned int cluster, unsigned int core);
+void hisi_powerup_cluster(unsigned int cluster, unsigned int core);
+void hisi_powerdn_cluster(unsigned int cluster, unsigned int core);
+unsigned int hisi_test_cpu_down(unsigned int cluster, unsigned int core);
+
+#endif /* __HISI_PWRC_H__ */
diff --git a/plat/hisilicon/hikey960/hikey960_bl1_setup.c b/plat/hisilicon/hikey960/hikey960_bl1_setup.c
new file mode 100644
index 0000000..6dfada7
--- /dev/null
+++ b/plat/hisilicon/hikey960/hikey960_bl1_setup.c
@@ -0,0 +1,730 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <dw_ufs.h>
+#include <errno.h>
+#include <generic_delay_timer.h>
+#include <gicv2.h>
+#include <hi3660.h>
+#include <mmio.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <string.h>
+#include <tbbr/tbbr_img_desc.h>
+#include <ufs.h>
+
+#include "../../bl1/bl1_private.h"
+#include "hikey960_def.h"
+#include "hikey960_private.h"
+
+enum {
+	BOOT_MODE_RECOVERY = 0,
+	BOOT_MODE_NORMAL,
+	BOOT_MODE_MASK = 1,
+};
+
+/*
+ * Declarations of linker defined symbols which will help us find the layout
+ * of trusted RAM
+ */
+extern unsigned long __COHERENT_RAM_START__;
+extern unsigned long __COHERENT_RAM_END__;
+
+/*
+ * The next 2 constants identify the extents of the coherent memory region.
+ * These addresses are used by the MMU setup code and therefore they must be
+ * page-aligned.  It is the responsibility of the linker script to ensure that
+ * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
+ * page-aligned addresses.
+ */
+#define BL1_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
+#define BL1_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
+
+/* Data structure which holds the extents of the trusted RAM for BL1 */
+static meminfo_t bl1_tzram_layout;
+
+/******************************************************************************
+ * On a GICv2 system, the Group 1 secure interrupts are treated as Group 0
+ * interrupts.
+ *****************************************************************************/
+const unsigned int g0_interrupt_array[] = {
+	IRQ_SEC_PHY_TIMER,
+	IRQ_SEC_SGI_0
+};
+
+const gicv2_driver_data_t hikey960_gic_data = {
+	.gicd_base = GICD_REG_BASE,
+	.gicc_base = GICC_REG_BASE,
+	.g0_interrupt_num = ARRAY_SIZE(g0_interrupt_array),
+	.g0_interrupt_array = g0_interrupt_array,
+};
+
+meminfo_t *bl1_plat_sec_mem_layout(void)
+{
+	return &bl1_tzram_layout;
+}
+
+#if LOAD_IMAGE_V2
+/*******************************************************************************
+ * Function that takes a memory layout into which BL2 has been loaded and
+ * populates a new memory layout for BL2 that ensures that BL1's data sections
+ * resident in secure RAM are not visible to BL2.
+ ******************************************************************************/
+void bl1_init_bl2_mem_layout(const meminfo_t *bl1_mem_layout,
+			     meminfo_t *bl2_mem_layout)
+{
+
+	assert(bl1_mem_layout != NULL);
+	assert(bl2_mem_layout != NULL);
+
+	/*
+	 * Cannot remove BL1 RW data from the scope of memory visible to BL2
+	 * like arm platforms because they overlap in hikey960
+	 */
+	bl2_mem_layout->total_base = BL2_BASE;
+	bl2_mem_layout->total_size = NS_BL1U_LIMIT - BL2_BASE;
+
+	flush_dcache_range((unsigned long)bl2_mem_layout, sizeof(meminfo_t));
+}
+#endif /* LOAD_IMAGE_V2 */
+
+/*
+ * Perform any BL1 specific platform actions.
+ */
+void bl1_early_platform_setup(void)
+{
+	unsigned int id, uart_base;
+
+	generic_delay_timer_init();
+	hikey960_read_boardid(&id);
+	if (id == 5300)
+		uart_base = PL011_UART5_BASE;
+	else
+		uart_base = PL011_UART6_BASE;
+	/* Initialize the console to provide early debug support */
+	console_init(uart_base, PL011_UART_CLK_IN_HZ, PL011_BAUDRATE);
+
+	/* Allow BL1 to see the whole Trusted RAM */
+	bl1_tzram_layout.total_base = BL1_RW_BASE;
+	bl1_tzram_layout.total_size = BL1_RW_SIZE;
+
+#if !LOAD_IMAGE_V2
+	/* Calculate how much RAM BL1 is using and how much remains free */
+	bl1_tzram_layout.free_base = BL1_RW_BASE;
+	bl1_tzram_layout.free_size = BL1_RW_SIZE;
+	reserve_mem(&bl1_tzram_layout.free_base,
+		    &bl1_tzram_layout.free_size,
+		    BL1_RAM_BASE,
+		    BL1_RAM_LIMIT - BL1_RAM_BASE); /* bl1_size */
+#endif /* LOAD_IMAGE_V2 */
+
+	INFO("BL1: 0x%lx - 0x%lx [size = %lu]\n", BL1_RAM_BASE, BL1_RAM_LIMIT,
+	     BL1_RAM_LIMIT - BL1_RAM_BASE); /* bl1_size */
+}
+
+/*
+ * Perform the very early platform specific architecture setup here. At the
+ * moment this only does basic initialization. Later architectural setup
+ * (bl1_arch_setup()) does not do anything platform specific.
+ */
+void bl1_plat_arch_setup(void)
+{
+	hikey960_init_mmu_el3(bl1_tzram_layout.total_base,
+			      bl1_tzram_layout.total_size,
+			      BL1_RO_BASE,
+			      BL1_RO_LIMIT,
+			      BL1_COHERENT_RAM_BASE,
+			      BL1_COHERENT_RAM_LIMIT);
+}
+
+static void hikey960_clk_init(void)
+{
+	/* change ldi0 sel to ppll2 */
+	mmio_write_32(0xfff350b4, 0xf0002000);
+	/* ldi0 20' */
+	mmio_write_32(0xfff350bc, 0xfc004c00);
+}
+
+static void hikey960_pmu_init(void)
+{
+	/* clear np_xo_abb_dig_START bit in PMIC_CLK_TOP_CTRL7 register */
+	mmio_clrbits_32(PMU_SSI0_CLK_TOP_CTRL7_REG, NP_XO_ABB_DIG);
+}
+
+static void hikey960_enable_ppll3(void)
+{
+	/* enable ppll3 */
+	mmio_write_32(PMC_PPLL3_CTRL0_REG, 0x4904305);
+	mmio_write_32(PMC_PPLL3_CTRL1_REG, 0x2300000);
+	mmio_write_32(PMC_PPLL3_CTRL1_REG, 0x6300000);
+}
+
+static void bus_idle_clear(unsigned int value)
+{
+	unsigned int pmc_value, value1, value2;
+	int timeout = 100;
+
+	pmc_value = value << 16;
+	pmc_value &= ~value;
+	mmio_write_32(PMC_NOC_POWER_IDLEREQ_REG, pmc_value);
+
+	for (;;) {
+		value1 = (unsigned int)mmio_read_32(PMC_NOC_POWER_IDLEACK_REG);
+		value2 = (unsigned int)mmio_read_32(PMC_NOC_POWER_IDLE_REG);
+		if (((value1 & value) == 0) && ((value2 & value) == 0))
+			break;
+		udelay(1);
+		timeout--;
+		if (timeout <= 0) {
+			WARN("%s timeout\n", __func__);
+			break;
+		}
+	}
+}
+
+static void set_vivobus_power_up(void)
+{
+	/* clk enable */
+	mmio_write_32(CRG_CLKDIV20_REG, 0x00020002);
+	mmio_write_32(CRG_PEREN0_REG, 0x00001000);
+}
+
+static void set_dss_power_up(void)
+{
+	/* set edc0 133MHz = 1600MHz / 12 */
+	mmio_write_32(CRG_CLKDIV5_REG, 0x003f000b);
+	/* set ldi0 ppl0 */
+	mmio_write_32(CRG_CLKDIV3_REG, 0xf0001000);
+	/* set ldi0 133MHz, 1600MHz / 12 */
+	mmio_write_32(CRG_CLKDIV5_REG, 0xfc002c00);
+	/* mtcmos on */
+	mmio_write_32(CRG_PERPWREN_REG, 0x00000020);
+	udelay(100);
+	/* DISP CRG */
+	mmio_write_32(CRG_PERRSTDIS4_REG, 0x00000010);
+	/* clk enable */
+	mmio_write_32(CRG_CLKDIV18_REG, 0x01400140);
+	mmio_write_32(CRG_PEREN0_REG, 0x00002000);
+	mmio_write_32(CRG_PEREN3_REG, 0x0003b000);
+	udelay(1);
+	/* clk disable */
+	mmio_write_32(CRG_PERDIS3_REG, 0x0003b000);
+	mmio_write_32(CRG_PERDIS0_REG, 0x00002000);
+	mmio_write_32(CRG_CLKDIV18_REG, 0x01400000);
+	udelay(1);
+	/* iso disable */
+	mmio_write_32(CRG_ISODIS_REG, 0x00000040);
+	/* unreset */
+	mmio_write_32(CRG_PERRSTDIS4_REG, 0x00000006);
+	mmio_write_32(CRG_PERRSTDIS3_REG, 0x00000c00);
+	/* clk enable */
+	mmio_write_32(CRG_CLKDIV18_REG, 0x01400140);
+	mmio_write_32(CRG_PEREN0_REG, 0x00002000);
+	mmio_write_32(CRG_PEREN3_REG, 0x0003b000);
+	/* bus idle clear */
+	bus_idle_clear(PMC_NOC_POWER_IDLEREQ_DSS);
+	/* set edc0 400MHz for 2K 1600MHz / 4 */
+	mmio_write_32(CRG_CLKDIV5_REG, 0x003f0003);
+	/* set ldi 266MHz, 1600MHz / 6 */
+	mmio_write_32(CRG_CLKDIV5_REG, 0xfc001400);
+}
+
+static void set_vcodec_power_up(void)
+{
+	/* clk enable */
+	mmio_write_32(CRG_CLKDIV20_REG, 0x00040004);
+	mmio_write_32(CRG_PEREN0_REG, 0x00000060);
+	mmio_write_32(CRG_PEREN2_REG, 0x10000000);
+	/* unreset */
+	mmio_write_32(CRG_PERRSTDIS0_REG, 0x00000018);
+	/* bus idle clear */
+	bus_idle_clear(PMC_NOC_POWER_IDLEREQ_VCODEC);
+}
+
+static void set_vdec_power_up(void)
+{
+	/* mtcmos on */
+	mmio_write_32(CRG_PERPWREN_REG, 0x00000004);
+	udelay(100);
+	/* clk enable */
+	mmio_write_32(CRG_CLKDIV18_REG, 0x80008000);
+	mmio_write_32(CRG_PEREN2_REG, 0x20080000);
+	mmio_write_32(CRG_PEREN3_REG, 0x00000800);
+	udelay(1);
+	/* clk disable */
+	mmio_write_32(CRG_PERDIS3_REG, 0x00000800);
+	mmio_write_32(CRG_PERDIS2_REG, 0x20080000);
+	mmio_write_32(CRG_CLKDIV18_REG, 0x80000000);
+	udelay(1);
+	/* iso disable */
+	mmio_write_32(CRG_ISODIS_REG, 0x00000004);
+	/* unreset */
+	mmio_write_32(CRG_PERRSTDIS3_REG, 0x00000200);
+	/* clk enable */
+	mmio_write_32(CRG_CLKDIV18_REG, 0x80008000);
+	mmio_write_32(CRG_PEREN2_REG, 0x20080000);
+	mmio_write_32(CRG_PEREN3_REG, 0x00000800);
+	/* bus idle clear */
+	bus_idle_clear(PMC_NOC_POWER_IDLEREQ_VDEC);
+}
+
+static void set_venc_power_up(void)
+{
+	/* set venc ppll3 */
+	mmio_write_32(CRG_CLKDIV8_REG, 0x18001000);
+	/* set venc 258MHz, 1290MHz / 5 */
+	mmio_write_32(CRG_CLKDIV8_REG, 0x07c00100);
+	/* mtcmos on */
+	mmio_write_32(CRG_PERPWREN_REG, 0x00000002);
+	udelay(100);
+	/* clk enable */
+	mmio_write_32(CRG_CLKDIV19_REG, 0x00010001);
+	mmio_write_32(CRG_PEREN2_REG, 0x40000100);
+	mmio_write_32(CRG_PEREN3_REG, 0x00000400);
+	udelay(1);
+	/* clk disable */
+	mmio_write_32(CRG_PERDIS3_REG, 0x00000400);
+	mmio_write_32(CRG_PERDIS2_REG, 0x40000100);
+	mmio_write_32(CRG_CLKDIV19_REG, 0x00010000);
+	udelay(1);
+	/* iso disable */
+	mmio_write_32(CRG_ISODIS_REG, 0x00000002);
+	/* unreset */
+	mmio_write_32(CRG_PERRSTDIS3_REG, 0x00000100);
+	/* clk enable */
+	mmio_write_32(CRG_CLKDIV19_REG, 0x00010001);
+	mmio_write_32(CRG_PEREN2_REG, 0x40000100);
+	mmio_write_32(CRG_PEREN3_REG, 0x00000400);
+	/* bus idle clear */
+	bus_idle_clear(PMC_NOC_POWER_IDLEREQ_VENC);
+	/* set venc 645MHz, 1290MHz / 2 */
+	mmio_write_32(CRG_CLKDIV8_REG, 0x07c00040);
+}
+
+static void set_isp_power_up(void)
+{
+	/* mtcmos on */
+	mmio_write_32(CRG_PERPWREN_REG, 0x00000001);
+	udelay(100);
+	/* clk enable */
+	mmio_write_32(CRG_CLKDIV18_REG, 0x70007000);
+	mmio_write_32(CRG_CLKDIV20_REG, 0x00100010);
+	mmio_write_32(CRG_PEREN5_REG, 0x01000010);
+	mmio_write_32(CRG_PEREN3_REG, 0x0bf00000);
+	udelay(1);
+	/* clk disable */
+	mmio_write_32(CRG_PERDIS5_REG, 0x01000010);
+	mmio_write_32(CRG_PERDIS3_REG, 0x0bf00000);
+	mmio_write_32(CRG_CLKDIV18_REG, 0x70000000);
+	mmio_write_32(CRG_CLKDIV20_REG, 0x00100000);
+	udelay(1);
+	/* iso disable */
+	mmio_write_32(CRG_ISODIS_REG, 0x00000001);
+	/* unreset */
+	mmio_write_32(CRG_ISP_SEC_RSTDIS_REG, 0x0000002f);
+	/* clk enable */
+	mmio_write_32(CRG_CLKDIV18_REG, 0x70007000);
+	mmio_write_32(CRG_CLKDIV20_REG, 0x00100010);
+	mmio_write_32(CRG_PEREN5_REG, 0x01000010);
+	mmio_write_32(CRG_PEREN3_REG, 0x0bf00000);
+	/* bus idle clear */
+	bus_idle_clear(PMC_NOC_POWER_IDLEREQ_ISP);
+	/* csi clk enable */
+	mmio_write_32(CRG_PEREN3_REG, 0x00700000);
+}
+
+static void set_ivp_power_up(void)
+{
+	/* set ivp ppll0 */
+	mmio_write_32(CRG_CLKDIV0_REG, 0xc0000000);
+	/* set ivp 267MHz, 1600MHz / 6 */
+	mmio_write_32(CRG_CLKDIV0_REG, 0x3c001400);
+	/* mtcmos on */
+	mmio_write_32(CRG_PERPWREN_REG, 0x00200000);
+	udelay(100);
+	/* IVP CRG unreset */
+	mmio_write_32(CRG_IVP_SEC_RSTDIS_REG, 0x00000001);
+	/* clk enable */
+	mmio_write_32(CRG_CLKDIV20_REG, 0x02000200);
+	mmio_write_32(CRG_PEREN4_REG, 0x000000a8);
+	udelay(1);
+	/* clk disable */
+	mmio_write_32(CRG_PERDIS4_REG, 0x000000a8);
+	mmio_write_32(CRG_CLKDIV20_REG, 0x02000000);
+	udelay(1);
+	/* iso disable */
+	mmio_write_32(CRG_ISODIS_REG, 0x01000000);
+	/* unreset */
+	mmio_write_32(CRG_IVP_SEC_RSTDIS_REG, 0x00000002);
+	/* clk enable */
+	mmio_write_32(CRG_CLKDIV20_REG, 0x02000200);
+	mmio_write_32(CRG_PEREN4_REG, 0x000000a8);
+	/* bus idle clear */
+	bus_idle_clear(PMC_NOC_POWER_IDLEREQ_IVP);
+	/* set ivp 533MHz, 1600MHz / 3 */
+	mmio_write_32(CRG_CLKDIV0_REG, 0x3c000800);
+}
+
+static void set_audio_power_up(void)
+{
+	unsigned int ret;
+	int timeout = 100;
+	/* mtcmos on */
+	mmio_write_32(SCTRL_SCPWREN_REG, 0x00000001);
+	udelay(100);
+	/* clk enable */
+	mmio_write_32(CRG_CLKDIV19_REG, 0x80108010);
+	mmio_write_32(SCTRL_SCCLKDIV2_REG, 0x00010001);
+	mmio_write_32(SCTRL_SCPEREN0_REG, 0x0c000000);
+	mmio_write_32(CRG_PEREN0_REG, 0x04000000);
+	mmio_write_32(CRG_PEREN5_REG, 0x00000080);
+	mmio_write_32(SCTRL_SCPEREN1_REG, 0x0000000f);
+	udelay(1);
+	/* clk disable */
+	mmio_write_32(SCTRL_SCPERDIS1_REG, 0x0000000f);
+	mmio_write_32(SCTRL_SCPERDIS0_REG, 0x0c000000);
+	mmio_write_32(CRG_PERDIS5_REG, 0x00000080);
+	mmio_write_32(CRG_PERDIS0_REG, 0x04000000);
+	mmio_write_32(SCTRL_SCCLKDIV2_REG, 0x00010000);
+	mmio_write_32(CRG_CLKDIV19_REG, 0x80100000);
+	udelay(1);
+	/* iso disable */
+	mmio_write_32(SCTRL_SCISODIS_REG, 0x00000001);
+	udelay(1);
+	/* unreset */
+	mmio_write_32(SCTRL_PERRSTDIS1_SEC_REG, 0x00000001);
+	mmio_write_32(SCTRL_SCPERRSTDIS0_REG, 0x00000780);
+	/* clk enable */
+	mmio_write_32(CRG_CLKDIV19_REG, 0x80108010);
+	mmio_write_32(SCTRL_SCCLKDIV2_REG, 0x00010001);
+	mmio_write_32(SCTRL_SCPEREN0_REG, 0x0c000000);
+	mmio_write_32(CRG_PEREN0_REG, 0x04000000);
+	mmio_write_32(CRG_PEREN5_REG, 0x00000080);
+	mmio_write_32(SCTRL_SCPEREN1_REG, 0x0000000f);
+	/* bus idle clear */
+	mmio_write_32(SCTRL_SCPERCTRL7_REG, 0x00040000);
+	for (;;) {
+		ret = mmio_read_32(SCTRL_SCPERSTAT6_REG);
+		if (((ret & (1 << 5)) == 0) && ((ret & (1 << 8)) == 0))
+			break;
+		udelay(1);
+		timeout--;
+		if (timeout <= 0) {
+			WARN("%s timeout\n", __func__);
+			break;
+		}
+	}
+	mmio_write_32(ASP_CFG_MMBUF_CTRL_REG, 0x00ff0000);
+}
+
+static void set_pcie_power_up(void)
+{
+	/* mtcmos on */
+	mmio_write_32(SCTRL_SCPWREN_REG, 0x00000010);
+	udelay(100);
+	/* clk enable */
+	mmio_write_32(SCTRL_SCCLKDIV6_REG, 0x08000800);
+	mmio_write_32(SCTRL_SCPEREN2_REG, 0x00104000);
+	mmio_write_32(CRG_PEREN7_REG, 0x000003a0);
+	udelay(1);
+	/* clk disable */
+	mmio_write_32(SCTRL_SCPERDIS2_REG, 0x00104000);
+	mmio_write_32(CRG_PERDIS7_REG, 0x000003a0);
+	mmio_write_32(SCTRL_SCCLKDIV6_REG, 0x08000000);
+	udelay(1);
+	/* iso disable */
+	mmio_write_32(SCTRL_SCISODIS_REG, 0x00000030);
+	/* unreset */
+	mmio_write_32(CRG_PERRSTDIS3_REG, 0x8c000000);
+	/* clk enable */
+	mmio_write_32(SCTRL_SCCLKDIV6_REG, 0x08000800);
+	mmio_write_32(SCTRL_SCPEREN2_REG, 0x00104000);
+	mmio_write_32(CRG_PEREN7_REG, 0x000003a0);
+}
+
+static void ispfunc_enable(void)
+{
+	/* enable ispfunc. Otherwise powerup isp_srt causes exception. */
+	mmio_write_32(0xfff35000, 0x00000008);
+	mmio_write_32(0xfff35460, 0xc004ffff);
+	mmio_write_32(0xfff35030, 0x02000000);
+	mdelay(10);
+}
+
+static void isps_control_clock(int flag)
+{
+	unsigned int ret;
+
+	/* flag: 0 -- disable clock, 1 -- enable clock */
+	if (flag) {
+		ret = mmio_read_32(0xe8420364);
+		ret |= 1;
+		mmio_write_32(0xe8420364, ret);
+	} else {
+		ret = mmio_read_32(0xe8420364);
+		ret &= ~1;
+		mmio_write_32(0xe8420364, ret);
+	}
+}
+
+static void set_isp_srt_power_up(void)
+{
+	unsigned int ret;
+
+	ispfunc_enable();
+	/* reset */
+	mmio_write_32(0xe8420374, 0x00000001);
+	mmio_write_32(0xe8420350, 0x00000000);
+	mmio_write_32(0xe8420358, 0x00000000);
+	/* mtcmos on */
+	mmio_write_32(0xfff35150, 0x00400000);
+	udelay(100);
+	/* clk enable */
+	isps_control_clock(1);
+	udelay(1);
+	isps_control_clock(0);
+	udelay(1);
+	/* iso disable */
+	mmio_write_32(0xfff35148, 0x08000000);
+	/* unreset */
+	ret = mmio_read_32(0xe8420374);
+	ret &= ~0x1;
+	mmio_write_32(0xe8420374, ret);
+	/* clk enable */
+	isps_control_clock(1);
+	/* enable clock gating for accessing csi registers */
+	mmio_write_32(0xe8420010, ~0);
+}
+
+static void hikey960_regulator_enable(void)
+{
+	set_vivobus_power_up();
+	hikey960_enable_ppll3();
+	set_dss_power_up();
+	set_vcodec_power_up();
+	set_vdec_power_up();
+	set_venc_power_up();
+	set_isp_power_up();
+	set_ivp_power_up();
+	set_audio_power_up();
+	set_pcie_power_up();
+	set_isp_srt_power_up();
+}
+
+static void hikey960_ufs_reset(void)
+{
+	unsigned int data, mask;
+
+	mmio_write_32(CRG_PERDIS7_REG, 1 << 14);
+	mmio_clrbits_32(UFS_SYS_PHY_CLK_CTRL_REG, BIT_SYSCTRL_REF_CLOCK_EN);
+	do {
+		data = mmio_read_32(UFS_SYS_PHY_CLK_CTRL_REG);
+	} while (data & BIT_SYSCTRL_REF_CLOCK_EN);
+	/* use abb clk */
+	mmio_clrbits_32(UFS_SYS_UFS_SYSCTRL_REG, BIT_UFS_REFCLK_SRC_SE1);
+	mmio_clrbits_32(UFS_SYS_PHY_ISO_EN_REG, BIT_UFS_REFCLK_ISO_EN);
+	mmio_write_32(PCTRL_PERI_CTRL3_REG, (1 << 0) | (1 << 16));
+	mdelay(1);
+	mmio_write_32(CRG_PEREN7_REG, 1 << 14);
+	mmio_setbits_32(UFS_SYS_PHY_CLK_CTRL_REG, BIT_SYSCTRL_REF_CLOCK_EN);
+
+	mmio_write_32(CRG_PERRSTEN3_REG, PERI_UFS_BIT);
+	do {
+		data = mmio_read_32(CRG_PERRSTSTAT3_REG);
+	} while ((data & PERI_UFS_BIT) == 0);
+	mmio_setbits_32(UFS_SYS_PSW_POWER_CTRL_REG, BIT_UFS_PSW_MTCMOS_EN);
+	mdelay(1);
+	mmio_setbits_32(UFS_SYS_HC_LP_CTRL_REG, BIT_SYSCTRL_PWR_READY);
+	mmio_write_32(UFS_SYS_UFS_DEVICE_RESET_CTRL_REG,
+		      MASK_UFS_DEVICE_RESET);
+	/* clear SC_DIV_UFS_PERIBUS */
+	mask = SC_DIV_UFS_PERIBUS << 16;
+	mmio_write_32(CRG_CLKDIV17_REG, mask);
+	/* set SC_DIV_UFSPHY_CFG(3) */
+	mask = SC_DIV_UFSPHY_CFG_MASK << 16;
+	data = SC_DIV_UFSPHY_CFG(3);
+	mmio_write_32(CRG_CLKDIV16_REG, mask | data);
+	data = mmio_read_32(UFS_SYS_PHY_CLK_CTRL_REG);
+	data &= ~MASK_SYSCTRL_CFG_CLOCK_FREQ;
+	data |= 0x39;
+	mmio_write_32(UFS_SYS_PHY_CLK_CTRL_REG, data);
+	mmio_clrbits_32(UFS_SYS_PHY_CLK_CTRL_REG, MASK_SYSCTRL_REF_CLOCK_SEL);
+	mmio_setbits_32(UFS_SYS_CLOCK_GATE_BYPASS_REG,
+			MASK_UFS_CLK_GATE_BYPASS);
+	mmio_setbits_32(UFS_SYS_UFS_SYSCTRL_REG, MASK_UFS_SYSCTRL_BYPASS);
+
+	mmio_setbits_32(UFS_SYS_PSW_CLK_CTRL_REG, BIT_SYSCTRL_PSW_CLK_EN);
+	mmio_clrbits_32(UFS_SYS_PSW_POWER_CTRL_REG, BIT_UFS_PSW_ISO_CTRL);
+	mmio_clrbits_32(UFS_SYS_PHY_ISO_EN_REG, BIT_UFS_PHY_ISO_CTRL);
+	mmio_clrbits_32(UFS_SYS_HC_LP_CTRL_REG, BIT_SYSCTRL_LP_ISOL_EN);
+	mmio_write_32(CRG_PERRSTDIS3_REG, PERI_ARST_UFS_BIT);
+	mmio_setbits_32(UFS_SYS_RESET_CTRL_EN_REG, BIT_SYSCTRL_LP_RESET_N);
+	mdelay(1);
+	mmio_write_32(UFS_SYS_UFS_DEVICE_RESET_CTRL_REG,
+		      MASK_UFS_DEVICE_RESET | BIT_UFS_DEVICE_RESET);
+	mdelay(20);
+	mmio_write_32(UFS_SYS_UFS_DEVICE_RESET_CTRL_REG,
+		      0x03300330);
+
+	mmio_write_32(CRG_PERRSTDIS3_REG, PERI_UFS_BIT);
+	do {
+		data = mmio_read_32(CRG_PERRSTSTAT3_REG);
+	} while (data & PERI_UFS_BIT);
+}
+
+static void hikey960_ufs_init(void)
+{
+	dw_ufs_params_t ufs_params;
+
+	memset(&ufs_params, 0, sizeof(ufs_params));
+	ufs_params.reg_base = UFS_REG_BASE;
+	ufs_params.desc_base = HIKEY960_UFS_DESC_BASE;
+	ufs_params.desc_size = HIKEY960_UFS_DESC_SIZE;
+
+	if ((ufs_params.flags & UFS_FLAGS_SKIPINIT) == 0)
+		hikey960_ufs_reset();
+	dw_ufs_init(&ufs_params);
+}
+
+static void hikey960_tzc_init(void)
+{
+	mmio_write_32(TZC_EN0_REG, 0x7fbff066);
+	mmio_write_32(TZC_EN1_REG, 0xfffff5fc);
+	mmio_write_32(TZC_EN2_REG, 0x0007005c);
+	mmio_write_32(TZC_EN3_REG, 0x37030700);
+	mmio_write_32(TZC_EN4_REG, 0xf63fefae);
+	mmio_write_32(TZC_EN5_REG, 0x000410fd);
+	mmio_write_32(TZC_EN6_REG, 0x0063ff68);
+	mmio_write_32(TZC_EN7_REG, 0x030000f3);
+	mmio_write_32(TZC_EN8_REG, 0x00000007);
+}
+
+static void hikey960_peri_init(void)
+{
+	/* unreset */
+	mmio_setbits_32(CRG_PERRSTDIS4_REG, 1);
+}
+
+static void hikey960_pinmux_init(void)
+{
+	unsigned int id;
+
+	hikey960_read_boardid(&id);
+	if (id == 5301) {
+		/* hikey960 hardware v2 */
+		/* GPIO150: LED */
+		mmio_write_32(IOMG_FIX_006_REG, 0);
+		/* GPIO151: LED */
+		mmio_write_32(IOMG_FIX_007_REG, 0);
+		/* GPIO189: LED */
+		mmio_write_32(IOMG_AO_011_REG, 0);
+		/* GPIO190: LED */
+		mmio_write_32(IOMG_AO_012_REG, 0);
+		/* GPIO46 */
+		mmio_write_32(IOMG_044_REG, 0);
+		/* GPIO202 */
+		mmio_write_32(IOMG_AO_023_REG, 0);
+		/* GPIO206 */
+		mmio_write_32(IOMG_AO_026_REG, 0);
+		/* GPIO219 - PD pullup */
+		mmio_write_32(IOMG_AO_039_REG, 0);
+		mmio_write_32(IOCG_AO_043_REG, 1 << 0);
+	}
+	/* GPIO005 - PMU SSI, 10mA */
+	mmio_write_32(IOCG_006_REG, 2 << 4);
+}
+
+/*
+ * Function which will perform any remaining platform-specific setup that can
+ * occur after the MMU and data cache have been enabled.
+ */
+void bl1_platform_setup(void)
+{
+	hikey960_clk_init();
+	hikey960_pmu_init();
+	hikey960_regulator_enable();
+	hikey960_tzc_init();
+	hikey960_peri_init();
+	hikey960_ufs_init();
+	hikey960_pinmux_init();
+	hikey960_io_setup();
+}
+
+/*
+ * The following function checks if Firmware update is needed,
+ * by checking if TOC in FIP image is valid or not.
+ */
+unsigned int bl1_plat_get_next_image_id(void)
+{
+	unsigned int mode, ret;
+
+	mode = mmio_read_32(SCTRL_BAK_DATA0_REG);
+	switch (mode & BOOT_MODE_MASK) {
+	case BOOT_MODE_RECOVERY:
+		ret = NS_BL1U_IMAGE_ID;
+		break;
+	case BOOT_MODE_NORMAL:
+		ret = BL2_IMAGE_ID;
+		break;
+	default:
+		WARN("Invalid boot mode is found:%d\n", mode);
+		panic();
+	}
+	return ret;
+}
+
+image_desc_t *bl1_plat_get_image_desc(unsigned int image_id)
+{
+	unsigned int index = 0;
+
+	while (bl1_tbbr_image_descs[index].image_id != INVALID_IMAGE_ID) {
+		if (bl1_tbbr_image_descs[index].image_id == image_id)
+			return &bl1_tbbr_image_descs[index];
+		index++;
+	}
+
+	return NULL;
+}
+
+void bl1_plat_set_ep_info(unsigned int image_id,
+		entry_point_info_t *ep_info)
+{
+	unsigned int data = 0;
+	uintptr_t tmp = HIKEY960_NS_TMP_OFFSET;
+
+	if (image_id == BL2_IMAGE_ID)
+		return;
+	/* Copy NS BL1U from 0x1AC1_8000 to 0x1AC9_8000 */
+	memcpy((void *)tmp, (void *)HIKEY960_NS_IMAGE_OFFSET,
+		NS_BL1U_SIZE);
+	memcpy((void *)NS_BL1U_BASE, (void *)tmp, NS_BL1U_SIZE);
+	inv_dcache_range(NS_BL1U_BASE, NS_BL1U_SIZE);
+	/* Initialize the GIC driver, cpu and distributor interfaces */
+	gicv2_driver_init(&hikey960_gic_data);
+	gicv2_distif_init();
+	gicv2_pcpu_distif_init();
+	gicv2_cpuif_enable();
+	/* CNTFRQ is read-only in EL1 */
+	write_cntfrq_el0(plat_get_syscnt_freq2());
+	data = read_cpacr_el1();
+	do {
+		data |= 3 << 20;
+		write_cpacr_el1(data);
+		data = read_cpacr_el1();
+	} while ((data & (3 << 20)) != (3 << 20));
+	INFO("cpacr_el1:0x%x\n", data);
+
+	ep_info->args.arg0 = 0xffff & read_mpidr();
+	ep_info->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
+				DISABLE_ALL_EXCEPTIONS);
+}
diff --git a/plat/hisilicon/hikey960/hikey960_bl2_mem_params_desc.c b/plat/hisilicon/hikey960/hikey960_bl2_mem_params_desc.c
new file mode 100644
index 0000000..b59f897
--- /dev/null
+++ b/plat/hisilicon/hikey960/hikey960_bl2_mem_params_desc.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <bl_common.h>
+#include <desc_image_load.h>
+#include <platform.h>
+#include <platform_def.h>
+
+
+/*******************************************************************************
+ * Following descriptor provides BL image/ep information that gets used
+ * by BL2 to load the images and also subset of this information is
+ * passed to next BL image. The image loading sequence is managed by
+ * populating the images in required loading order. The image execution
+ * sequence is managed by populating the `next_handoff_image_id` with
+ * the next executable image id.
+ ******************************************************************************/
+static bl_mem_params_node_t bl2_mem_params_descs[] = {
+#ifdef SCP_BL2_BASE
+	/* Fill SCP_BL2 related information if it exists */
+    {
+	    .image_id = SCP_BL2_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY,
+		    VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
+		    VERSION_2, image_info_t, 0),
+	    .image_info.image_base = SCP_BL2_BASE,
+	    .image_info.image_max_size = SCP_BL2_SIZE,
+
+	    .next_handoff_image_id = INVALID_IMAGE_ID,
+    },
+#endif /* SCP_BL2_BASE */
+
+#ifdef EL3_PAYLOAD_BASE
+	/* Fill EL3 payload related information (BL31 is EL3 payload)*/
+    {
+	    .image_id = BL31_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t,
+		    SECURE | EXECUTABLE | EP_FIRST_EXE),
+	    .ep_info.pc = EL3_PAYLOAD_BASE,
+	    .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+		    DISABLE_ALL_EXCEPTIONS),
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t,
+		    IMAGE_ATTRIB_PLAT_SETUP | IMAGE_ATTRIB_SKIP_LOADING),
+
+	    .next_handoff_image_id = INVALID_IMAGE_ID,
+    },
+
+#else /* EL3_PAYLOAD_BASE */
+
+	/* Fill BL31 related information */
+    {
+	    .image_id = BL31_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t,
+		    SECURE | EXECUTABLE | EP_FIRST_EXE),
+	    .ep_info.pc = BL31_BASE,
+	    .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+		    DISABLE_ALL_EXCEPTIONS),
+#if DEBUG
+	    .ep_info.args.arg1 = HIKEY960_BL31_PLAT_PARAM_VAL,
+#endif
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, IMAGE_ATTRIB_PLAT_SETUP),
+	    .image_info.image_base = BL31_BASE,
+	    .image_info.image_max_size = BL31_LIMIT - BL31_BASE,
+
+# ifdef BL32_BASE
+	    .next_handoff_image_id = BL32_IMAGE_ID,
+# else
+	    .next_handoff_image_id = BL33_IMAGE_ID,
+# endif
+    },
+
+# ifdef BL32_BASE
+	/* Fill BL32 related information */
+    {
+	    .image_id = BL32_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t, SECURE | EXECUTABLE),
+	    .ep_info.pc = BL32_BASE,
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, 0),
+	    .image_info.image_base = BL32_BASE,
+	    .image_info.image_max_size = BL32_LIMIT - BL32_BASE,
+
+	    .next_handoff_image_id = BL33_IMAGE_ID,
+    },
+
+	/*
+	 * Fill BL32 external 1 related information.
+	 * A typical use for extra1 image is with OP-TEE where it is the pager image.
+	 */
+    {
+	    .image_id = BL32_EXTRA1_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+	    .image_info.image_base = BL32_BASE,
+	    .image_info.image_max_size = BL32_LIMIT - BL32_BASE,
+
+	    .next_handoff_image_id = INVALID_IMAGE_ID,
+    },
+
+	/*
+	 * Fill BL32 external 2 related information.
+	 * A typical use for extra2 image is with OP-TEE where it is the paged image.
+	 */
+    {
+	    .image_id = BL32_EXTRA2_IMAGE_ID,
+
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+#ifdef SPD_opteed
+	    .image_info.image_base = HIKEY960_OPTEE_PAGEABLE_LOAD_BASE,
+	    .image_info.image_max_size = HIKEY960_OPTEE_PAGEABLE_LOAD_SIZE,
+#endif
+	    .next_handoff_image_id = INVALID_IMAGE_ID,
+    },
+# endif /* BL32_BASE */
+
+	/* Fill BL33 related information */
+    {
+	    .image_id = BL33_IMAGE_ID,
+	    SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+		    VERSION_2, entry_point_info_t, NON_SECURE | EXECUTABLE),
+# ifdef PRELOADED_BL33_BASE
+	    .ep_info.pc = PRELOADED_BL33_BASE,
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+# else
+	    .ep_info.pc = NS_BL1U_BASE,
+
+	    SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+		    VERSION_2, image_info_t, 0),
+	    .image_info.image_base = NS_BL1U_BASE,
+	    .image_info.image_max_size = 0x200000 /* 2MB */,
+# endif /* PRELOADED_BL33_BASE */
+
+	    .next_handoff_image_id = INVALID_IMAGE_ID,
+    }
+#endif /* EL3_PAYLOAD_BASE */
+};
+
+REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs)
diff --git a/plat/hisilicon/hikey960/hikey960_bl2_setup.c b/plat/hisilicon/hikey960/hikey960_bl2_setup.c
new file mode 100644
index 0000000..b50ed87
--- /dev/null
+++ b/plat/hisilicon/hikey960/hikey960_bl2_setup.c
@@ -0,0 +1,432 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <debug.h>
+#include <desc_image_load.h>
+#include <errno.h>
+#include <generic_delay_timer.h>
+#include <hi3660.h>
+#include <mmio.h>
+#if LOAD_IMAGE_V2
+#ifdef SPD_opteed
+#include <optee_utils.h>
+#endif
+#endif
+#include <platform_def.h>
+#include <string.h>
+#include <ufs.h>
+
+#include "hikey960_def.h"
+#include "hikey960_private.h"
+
+/*
+ * The next 2 constants identify the extents of the code & RO data region.
+ * These addresses are used by the MMU setup code and therefore they must be
+ * page-aligned.  It is the responsibility of the linker script to ensure that
+ * __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
+ */
+#define BL2_RO_BASE (unsigned long)(&__RO_START__)
+#define BL2_RO_LIMIT (unsigned long)(&__RO_END__)
+
+/*
+ * The next 2 constants identify the extents of the coherent memory region.
+ * These addresses are used by the MMU setup code and therefore they must be
+ * page-aligned.  It is the responsibility of the linker script to ensure that
+ * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
+ * page-aligned addresses.
+ */
+#define BL2_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
+#define BL2_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
+
+static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
+
+#if !LOAD_IMAGE_V2
+
+/*******************************************************************************
+ * This structure represents the superset of information that is passed to
+ * BL31, e.g. while passing control to it from BL2, bl31_params
+ * and other platform specific params
+ ******************************************************************************/
+typedef struct bl2_to_bl31_params_mem {
+	bl31_params_t		bl31_params;
+	image_info_t		bl31_image_info;
+	image_info_t		bl32_image_info;
+	image_info_t		bl33_image_info;
+	entry_point_info_t	bl33_ep_info;
+	entry_point_info_t	bl32_ep_info;
+	entry_point_info_t	bl31_ep_info;
+} bl2_to_bl31_params_mem_t;
+
+static bl2_to_bl31_params_mem_t bl31_params_mem;
+
+meminfo_t *bl2_plat_sec_mem_layout(void)
+{
+	return &bl2_tzram_layout;
+}
+
+bl31_params_t *bl2_plat_get_bl31_params(void)
+{
+	bl31_params_t *bl2_to_bl31_params = NULL;
+
+	/*
+	 * Initialise the memory for all the arguments that needs to
+	 * be passed to BL3-1
+	 */
+	memset(&bl31_params_mem, 0, sizeof(bl2_to_bl31_params_mem_t));
+
+	/* Assign memory for TF related information */
+	bl2_to_bl31_params = &bl31_params_mem.bl31_params;
+	SET_PARAM_HEAD(bl2_to_bl31_params, PARAM_BL31, VERSION_1, 0);
+
+	/* Fill BL3-1 related information */
+	bl2_to_bl31_params->bl31_image_info = &bl31_params_mem.bl31_image_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl31_image_info, PARAM_IMAGE_BINARY,
+		VERSION_1, 0);
+
+	/* Fill BL3-2 related information if it exists */
+#ifdef BL32_BASE
+	bl2_to_bl31_params->bl32_ep_info = &bl31_params_mem.bl32_ep_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl32_ep_info, PARAM_EP,
+		VERSION_1, 0);
+	bl2_to_bl31_params->bl32_image_info = &bl31_params_mem.bl32_image_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl32_image_info, PARAM_IMAGE_BINARY,
+		VERSION_1, 0);
+#endif
+
+	/* Fill BL3-3 related information */
+	bl2_to_bl31_params->bl33_ep_info = &bl31_params_mem.bl33_ep_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl33_ep_info,
+		PARAM_EP, VERSION_1, 0);
+
+	/* BL3-3 expects to receive the primary CPU MPID (through x0) */
+	bl2_to_bl31_params->bl33_ep_info->args.arg0 = 0xffff & read_mpidr();
+
+	bl2_to_bl31_params->bl33_image_info = &bl31_params_mem.bl33_image_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl33_image_info, PARAM_IMAGE_BINARY,
+		VERSION_1, 0);
+
+	return bl2_to_bl31_params;
+}
+
+/*******************************************************************************
+ * Populate the extents of memory available for loading SCP_BL2 (if used),
+ * i.e. anywhere in trusted RAM as long as it doesn't overwrite BL2.
+ ******************************************************************************/
+void bl2_plat_get_scp_bl2_meminfo(meminfo_t *scp_bl2_meminfo)
+{
+	hikey960_init_ufs();
+	hikey960_io_setup();
+
+	*scp_bl2_meminfo = bl2_tzram_layout;
+}
+#endif /* LOAD_IMAGE_V2 */
+
+extern int load_lpm3(void);
+
+/*******************************************************************************
+ * Transfer SCP_BL2 from Trusted RAM using the SCP Download protocol.
+ * Return 0 on success, -1 otherwise.
+ ******************************************************************************/
+#if LOAD_IMAGE_V2
+int plat_hikey960_bl2_handle_scp_bl2(image_info_t *scp_bl2_image_info)
+#else
+int bl2_plat_handle_scp_bl2(image_info_t *scp_bl2_image_info)
+#endif
+{
+	int i;
+	int *buf;
+
+	assert(scp_bl2_image_info->image_size < SCP_BL2_SIZE);
+
+	INFO("BL2: Initiating SCP_BL2 transfer to SCP\n");
+
+	INFO("BL2: SCP_BL2: 0x%lx@0x%x\n",
+	     scp_bl2_image_info->image_base,
+	     scp_bl2_image_info->image_size);
+
+	buf = (int *)scp_bl2_image_info->image_base;
+
+	INFO("BL2: SCP_BL2 HEAD:\n");
+	for (i = 0; i < 64; i += 4)
+		INFO("BL2: SCP_BL2 0x%x 0x%x 0x%x 0x%x\n",
+			buf[i], buf[i+1], buf[i+2], buf[i+3]);
+
+	buf = (int *)(scp_bl2_image_info->image_base +
+		      scp_bl2_image_info->image_size - 256);
+
+	INFO("BL2: SCP_BL2 TAIL:\n");
+	for (i = 0; i < 64; i += 4)
+		INFO("BL2: SCP_BL2 0x%x 0x%x 0x%x 0x%x\n",
+			buf[i], buf[i+1], buf[i+2], buf[i+3]);
+
+	INFO("BL2: SCP_BL2 transferred to SCP\n");
+
+	load_lpm3();
+	(void)buf;
+
+	return 0;
+}
+
+void hikey960_init_ufs(void)
+{
+	ufs_params_t ufs_params;
+
+	memset(&ufs_params, 0, sizeof(ufs_params_t));
+	ufs_params.reg_base = UFS_REG_BASE;
+	ufs_params.desc_base = HIKEY960_UFS_DESC_BASE;
+	ufs_params.desc_size = HIKEY960_UFS_DESC_SIZE;
+	ufs_params.flags = UFS_FLAGS_SKIPINIT;
+	ufs_init(NULL, &ufs_params);
+}
+
+/*******************************************************************************
+ * Gets SPSR for BL32 entry
+ ******************************************************************************/
+uint32_t hikey960_get_spsr_for_bl32_entry(void)
+{
+	/*
+	 * The Secure Payload Dispatcher service is responsible for
+	 * setting the SPSR prior to entry into the BL3-2 image.
+	 */
+	return 0;
+}
+
+/*******************************************************************************
+ * Gets SPSR for BL33 entry
+ ******************************************************************************/
+#ifndef AARCH32
+uint32_t hikey960_get_spsr_for_bl33_entry(void)
+{
+	unsigned int mode;
+	uint32_t spsr;
+
+	/* Figure out what mode we enter the non-secure world in */
+	mode = EL_IMPLEMENTED(2) ? MODE_EL2 : MODE_EL1;
+
+	/*
+	 * TODO: Consider the possibility of specifying the SPSR in
+	 * the FIP ToC and allowing the platform to have a say as
+	 * well.
+	 */
+	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+	return spsr;
+}
+#else
+uint32_t hikey960_get_spsr_for_bl33_entry(void)
+{
+	unsigned int hyp_status, mode, spsr;
+
+	hyp_status = GET_VIRT_EXT(read_id_pfr1());
+
+	mode = (hyp_status) ? MODE32_hyp : MODE32_svc;
+
+	/*
+	 * TODO: Consider the possibility of specifying the SPSR in
+	 * the FIP ToC and allowing the platform to have a say as
+	 * well.
+	 */
+	spsr = SPSR_MODE32(mode, plat_get_ns_image_entrypoint() & 0x1,
+			SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
+	return spsr;
+}
+#endif /* AARCH32 */
+
+#if LOAD_IMAGE_V2
+int hikey960_bl2_handle_post_image_load(unsigned int image_id)
+{
+	int err = 0;
+	bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
+#ifdef SPD_opteed
+	bl_mem_params_node_t *pager_mem_params = NULL;
+	bl_mem_params_node_t *paged_mem_params = NULL;
+#endif
+	assert(bl_mem_params);
+
+	switch (image_id) {
+#ifdef AARCH64
+	case BL32_IMAGE_ID:
+#ifdef SPD_opteed
+		pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
+		assert(pager_mem_params);
+
+		paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
+		assert(paged_mem_params);
+
+		err = parse_optee_header(&bl_mem_params->ep_info,
+				&pager_mem_params->image_info,
+				&paged_mem_params->image_info);
+		if (err != 0) {
+			WARN("OPTEE header parse error.\n");
+		}
+#endif
+		bl_mem_params->ep_info.spsr = hikey960_get_spsr_for_bl32_entry();
+		break;
+#endif
+
+	case BL33_IMAGE_ID:
+		/* BL33 expects to receive the primary CPU MPID (through r0) */
+		bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
+		bl_mem_params->ep_info.spsr = hikey960_get_spsr_for_bl33_entry();
+		break;
+
+#ifdef SCP_BL2_BASE
+	case SCP_BL2_IMAGE_ID:
+		/* The subsequent handling of SCP_BL2 is platform specific */
+		err = plat_hikey960_bl2_handle_scp_bl2(&bl_mem_params->image_info);
+		if (err) {
+			WARN("Failure in platform-specific handling of SCP_BL2 image.\n");
+		}
+		break;
+#endif
+	}
+
+	return err;
+}
+
+/*******************************************************************************
+ * This function can be used by the platforms to update/use image
+ * information for given `image_id`.
+ ******************************************************************************/
+int bl2_plat_handle_post_image_load(unsigned int image_id)
+{
+	return hikey960_bl2_handle_post_image_load(image_id);
+}
+
+#else /* LOAD_IMAGE_V2 */
+
+struct entry_point_info *bl2_plat_get_bl31_ep_info(void)
+{
+#if DEBUG
+	bl31_params_mem.bl31_ep_info.args.arg1 = HIKEY960_BL31_PLAT_PARAM_VAL;
+#endif
+
+	return &bl31_params_mem.bl31_ep_info;
+}
+
+void bl2_plat_set_bl31_ep_info(image_info_t *image,
+			       entry_point_info_t *bl31_ep_info)
+{
+	SET_SECURITY_STATE(bl31_ep_info->h.attr, SECURE);
+	bl31_ep_info->spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+				       DISABLE_ALL_EXCEPTIONS);
+}
+
+/*******************************************************************************
+ * Before calling this function BL32 is loaded in memory and its entrypoint
+ * is set by load_image. This is a placeholder for the platform to change
+ * the entrypoint of BL32 and set SPSR and security state.
+ * On Hikey we only set the security state of the entrypoint
+ ******************************************************************************/
+#ifdef BL32_BASE
+void bl2_plat_set_bl32_ep_info(image_info_t *bl32_image_info,
+					entry_point_info_t *bl32_ep_info)
+{
+	SET_SECURITY_STATE(bl32_ep_info->h.attr, SECURE);
+	/*
+	 * The Secure Payload Dispatcher service is responsible for
+	 * setting the SPSR prior to entry into the BL32 image.
+	 */
+	bl32_ep_info->spsr = 0;
+}
+
+/*******************************************************************************
+ * Populate the extents of memory available for loading BL32
+ ******************************************************************************/
+void bl2_plat_get_bl32_meminfo(meminfo_t *bl32_meminfo)
+{
+	/*
+	 * Populate the extents of memory available for loading BL32.
+	 */
+	bl32_meminfo->total_base = BL32_BASE;
+	bl32_meminfo->free_base = BL32_BASE;
+	bl32_meminfo->total_size =
+			(TSP_SEC_MEM_BASE + TSP_SEC_MEM_SIZE) - BL32_BASE;
+	bl32_meminfo->free_size =
+			(TSP_SEC_MEM_BASE + TSP_SEC_MEM_SIZE) - BL32_BASE;
+}
+#endif /* BL32_BASE */
+
+void bl2_plat_set_bl33_ep_info(image_info_t *image,
+			       entry_point_info_t *bl33_ep_info)
+{
+	unsigned long el_status;
+	unsigned int mode;
+
+	/* Figure out what mode we enter the non-secure world in */
+	el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
+	el_status &= ID_AA64PFR0_ELX_MASK;
+
+	if (el_status)
+		mode = MODE_EL2;
+	else
+		mode = MODE_EL1;
+
+	/*
+	 * TODO: Consider the possibility of specifying the SPSR in
+	 * the FIP ToC and allowing the platform to have a say as
+	 * well.
+	 */
+	bl33_ep_info->spsr = SPSR_64(mode, MODE_SP_ELX,
+				       DISABLE_ALL_EXCEPTIONS);
+	SET_SECURITY_STATE(bl33_ep_info->h.attr, NON_SECURE);
+}
+
+void bl2_plat_flush_bl31_params(void)
+{
+	flush_dcache_range((unsigned long)&bl31_params_mem,
+			   sizeof(bl2_to_bl31_params_mem_t));
+}
+
+void bl2_plat_get_bl33_meminfo(meminfo_t *bl33_meminfo)
+{
+	bl33_meminfo->total_base = DDR_BASE;
+	bl33_meminfo->total_size = DDR_SIZE;
+	bl33_meminfo->free_base = DDR_BASE;
+	bl33_meminfo->free_size = DDR_SIZE;
+}
+#endif /* LOAD_IMAGE_V2 */
+
+void bl2_early_platform_setup(meminfo_t *mem_layout)
+{
+	unsigned int id, uart_base;
+
+	generic_delay_timer_init();
+	hikey960_read_boardid(&id);
+	if (id == 5300)
+		uart_base = PL011_UART5_BASE;
+	else
+		uart_base = PL011_UART6_BASE;
+
+	/* Initialize the console to provide early debug support */
+	console_init(uart_base, PL011_UART_CLK_IN_HZ, PL011_BAUDRATE);
+
+	/* Setup the BL2 memory layout */
+	bl2_tzram_layout = *mem_layout;
+}
+
+void bl2_plat_arch_setup(void)
+{
+	hikey960_init_mmu_el1(bl2_tzram_layout.total_base,
+			      bl2_tzram_layout.total_size,
+			      BL2_RO_BASE,
+			      BL2_RO_LIMIT,
+			      BL2_COHERENT_RAM_BASE,
+			      BL2_COHERENT_RAM_LIMIT);
+}
+
+void bl2_platform_setup(void)
+{
+	/* disable WDT0 */
+	if (mmio_read_32(WDT0_REG_BASE + WDT_LOCK_OFFSET) == WDT_LOCKED) {
+		mmio_write_32(WDT0_REG_BASE + WDT_LOCK_OFFSET, WDT_UNLOCK);
+		mmio_write_32(WDT0_REG_BASE + WDT_CONTROL_OFFSET, 0);
+		mmio_write_32(WDT0_REG_BASE + WDT_LOCK_OFFSET, 0);
+	}
+}
diff --git a/plat/hisilicon/hikey960/hikey960_bl31_setup.c b/plat/hisilicon/hikey960/hikey960_bl31_setup.c
new file mode 100644
index 0000000..f685f9c
--- /dev/null
+++ b/plat/hisilicon/hikey960/hikey960_bl31_setup.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <cci.h>
+#include <console.h>
+#include <debug.h>
+#include <errno.h>
+#include <generic_delay_timer.h>
+#include <gicv2.h>
+#include <hi3660.h>
+#include <hisi_ipc.h>
+#include <platform_def.h>
+
+#include "hikey960_def.h"
+#include "hikey960_private.h"
+
+/*
+ * The next 2 constants identify the extents of the code & RO data region.
+ * These addresses are used by the MMU setup code and therefore they must be
+ * page-aligned.  It is the responsibility of the linker script to ensure that
+ * __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
+ */
+#define BL31_RO_BASE	(unsigned long)(&__RO_START__)
+#define BL31_RO_LIMIT	(unsigned long)(&__RO_END__)
+
+/*
+ * The next 2 constants identify the extents of the coherent memory region.
+ * These addresses are used by the MMU setup code and therefore they must be
+ * page-aligned.  It is the responsibility of the linker script to ensure that
+ * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
+ * page-aligned addresses.
+ */
+#define BL31_COHERENT_RAM_BASE	(unsigned long)(&__COHERENT_RAM_START__)
+#define BL31_COHERENT_RAM_LIMIT	(unsigned long)(&__COHERENT_RAM_END__)
+
+static entry_point_info_t bl32_ep_info;
+static entry_point_info_t bl33_ep_info;
+
+/******************************************************************************
+ * On a GICv2 system, the Group 1 secure interrupts are treated as Group 0
+ * interrupts.
+ *****************************************************************************/
+const unsigned int g0_interrupt_array[] = {
+	IRQ_SEC_PHY_TIMER,
+	IRQ_SEC_SGI_0
+};
+
+const gicv2_driver_data_t hikey960_gic_data = {
+	.gicd_base = GICD_REG_BASE,
+	.gicc_base = GICC_REG_BASE,
+	.g0_interrupt_num = ARRAY_SIZE(g0_interrupt_array),
+	.g0_interrupt_array = g0_interrupt_array,
+};
+
+static const int cci_map[] = {
+	CCI400_SL_IFACE3_CLUSTER_IX,
+	CCI400_SL_IFACE4_CLUSTER_IX
+};
+
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+	entry_point_info_t *next_image_info;
+
+	next_image_info = (type == NON_SECURE) ? &bl33_ep_info : &bl32_ep_info;
+
+	/* None of the images on this platform can have 0x0 as the entrypoint */
+	if (next_image_info->pc)
+		return next_image_info;
+	return NULL;
+}
+
+#if LOAD_IMAGE_V2
+void bl31_early_platform_setup(void *from_bl2,
+			       void *plat_params_from_bl2)
+#else
+void bl31_early_platform_setup(bl31_params_t *from_bl2,
+		void *plat_params_from_bl2)
+#endif
+{
+	unsigned int id, uart_base;
+
+	generic_delay_timer_init();
+	hikey960_read_boardid(&id);
+	if (id == 5300)
+		uart_base = PL011_UART5_BASE;
+	else
+		uart_base = PL011_UART6_BASE;
+
+	/* Initialize the console to provide early debug support */
+	console_init(uart_base, PL011_UART_CLK_IN_HZ, PL011_BAUDRATE);
+
+	/* Initialize CCI driver */
+	cci_init(CCI400_REG_BASE, cci_map, ARRAY_SIZE(cci_map));
+	cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+
+#if LOAD_IMAGE_V2
+	/*
+	 * Check params passed from BL2 should not be NULL,
+	 */
+	bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
+	assert(params_from_bl2 != NULL);
+	assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
+	assert(params_from_bl2->h.version >= VERSION_2);
+
+	bl_params_node_t *bl_params = params_from_bl2->head;
+
+	/*
+	 * Copy BL33 and BL32 (if present), entry point information.
+	 * They are stored in Secure RAM, in BL2's address space.
+	 */
+	while (bl_params) {
+		if (bl_params->image_id == BL32_IMAGE_ID)
+			bl32_ep_info = *bl_params->ep_info;
+
+		if (bl_params->image_id == BL33_IMAGE_ID)
+			bl33_ep_info = *bl_params->ep_info;
+
+		bl_params = bl_params->next_params_info;
+	}
+
+	if (bl33_ep_info.pc == 0)
+		panic();
+
+#else /* LOAD_IMAGE_V2 */
+
+	/*
+	 * Check params passed from BL2 should not be NULL,
+	 */
+	assert(from_bl2 != NULL);
+	assert(from_bl2->h.type == PARAM_BL31);
+	assert(from_bl2->h.version >= VERSION_1);
+
+	/*
+	 * Copy BL3-2 and BL3-3 entry point information.
+	 * They are stored in Secure RAM, in BL2's address space.
+	 */
+	bl32_ep_info = *from_bl2->bl32_ep_info;
+	bl33_ep_info = *from_bl2->bl33_ep_info;
+#endif /* LOAD_IMAGE_V2 */
+}
+
+void bl31_plat_arch_setup(void)
+{
+	hikey960_init_mmu_el3(BL31_BASE,
+			BL31_LIMIT - BL31_BASE,
+			BL31_RO_BASE,
+			BL31_RO_LIMIT,
+			BL31_COHERENT_RAM_BASE,
+			BL31_COHERENT_RAM_LIMIT);
+}
+
+void bl31_platform_setup(void)
+{
+	/* Initialize the GIC driver, cpu and distributor interfaces */
+	gicv2_driver_init(&hikey960_gic_data);
+	gicv2_distif_init();
+	gicv2_pcpu_distif_init();
+	gicv2_cpuif_enable();
+
+	hisi_ipc_init();
+}
+
+void bl31_plat_runtime_setup(void)
+{
+}
diff --git a/plat/hisilicon/hikey960/hikey960_boardid.c b/plat/hisilicon/hikey960/hikey960_boardid.c
new file mode 100644
index 0000000..90faa9d
--- /dev/null
+++ b/plat/hisilicon/hikey960/hikey960_boardid.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <errno.h>
+#include <hi3660.h>
+#include <mmio.h>
+
+#include "hikey960_private.h"
+
+#define ADC_ADCIN0				0
+#define ADC_ADCIN1				1
+#define ADC_ADCIN2				2
+
+#define HKADC_DATA_GRADE0			0
+#define HKADC_DATA_GRADE1			100
+#define HKADC_DATA_GRADE2			300
+#define HKADC_DATA_GRADE3			500
+#define HKADC_DATA_GRADE4			700
+#define HKADC_DATA_GRADE5			900
+#define HKADC_DATA_GRADE6			1100
+#define HKADC_DATA_GRADE7			1300
+#define HKADC_DATA_GRADE8			1500
+#define HKADC_DATA_GRADE9			1700
+#define HKADC_DATA_GRADE10			1800
+
+#define BOARDID_VALUE0				0
+#define BOARDID_VALUE1				1
+#define BOARDID_VALUE2				2
+#define BOARDID_VALUE3				3
+#define BOARDID_VALUE4				4
+#define BOARDID_VALUE5				5
+#define BOARDID_VALUE6				6
+#define BOARDID_VALUE7				7
+#define BOARDID_VALUE8				8
+#define BOARDID_VALUE9				9
+#define BOARDID_UNKNOWN				0xF
+
+#define BOARDID3_BASE				5
+
+
+static void init_adc(void)
+{
+	/* reset hkadc */
+	mmio_write_32(CRG_PERRSTEN2_REG, PERRSTEN2_HKADCSSI);
+	/* wait a few clock cycles */
+	udelay(2);
+	mmio_write_32(CRG_PERRSTDIS2_REG, PERRSTEN2_HKADCSSI);
+	udelay(2);
+	/* enable hkadc clock */
+	mmio_write_32(CRG_PERDIS2_REG, PEREN2_HKADCSSI);
+	udelay(2);
+	mmio_write_32(CRG_PEREN2_REG, PEREN2_HKADCSSI);
+	udelay(2);
+}
+
+static int get_adc(unsigned int channel, unsigned int *value)
+{
+	unsigned int	data, value1, value0;
+
+	if (channel > HKADC_CHANNEL_MAX) {
+		WARN("invalid channel:%d\n", channel);
+		return -EFAULT;
+	}
+	/* configure the read/write operation for external HKADC */
+	mmio_write_32(HKADC_WR01_DATA_REG, HKADC_WR01_VALUE | channel);
+	mmio_write_32(HKADC_WR23_DATA_REG, HKADC_WR23_VALUE);
+	mmio_write_32(HKADC_WR45_DATA_REG, HKADC_WR45_VALUE);
+	/* configure the number of accessing registers */
+	mmio_write_32(HKADC_WR_NUM_REG, HKADC_WR_NUM_VALUE);
+	/* configure delay of accessing registers */
+	mmio_write_32(HKADC_DELAY01_REG, HKADC_CHANNEL0_DELAY01_VALUE);
+	mmio_write_32(HKADC_DELAY23_REG, HKADC_DELAY23_VALUE);
+
+	/* start HKADC */
+	mmio_write_32(HKADC_DSP_START_REG, 1);
+	do {
+		data = mmio_read_32(HKADC_DSP_START_REG);
+	} while (data & 1);
+
+	/* convert AD result */
+	value1 = mmio_read_32(HKADC_DSP_RD2_DATA_REG) & 0xffff;
+	value0 = mmio_read_32(HKADC_DSP_RD3_DATA_REG) & 0xffff;
+
+	data = ((value1 << 4) & HKADC_VALUE_HIGH) |
+	       ((value0 >> 4) & HKADC_VALUE_LOW);
+	*value = data;
+	return 0;
+}
+
+static int get_value(unsigned int channel, unsigned int *value)
+{
+	int ret;
+
+	ret = get_adc(channel, value);
+	if (ret)
+		return ret;
+
+	/* convert ADC value to micro-volt */
+	ret = ((*value & HKADC_VALID_VALUE) * HKADC_VREF_1V8) / HKADC_ACCURACY;
+	*value = ret;
+	return 0;
+}
+
+static int adcin_data_remap(unsigned int adcin_value)
+{
+	int	ret;
+
+	if (adcin_value < HKADC_DATA_GRADE1)
+		ret = BOARDID_VALUE0;
+	else if (adcin_value < HKADC_DATA_GRADE2)
+		ret = BOARDID_VALUE1;
+	else if (adcin_value < HKADC_DATA_GRADE3)
+		ret = BOARDID_VALUE2;
+	else if (adcin_value < HKADC_DATA_GRADE4)
+		ret = BOARDID_VALUE3;
+	else if (adcin_value < HKADC_DATA_GRADE5)
+		ret = BOARDID_VALUE4;
+	else if (adcin_value < HKADC_DATA_GRADE6)
+		ret = BOARDID_VALUE5;
+	else if (adcin_value < HKADC_DATA_GRADE7)
+		ret = BOARDID_VALUE6;
+	else if (adcin_value < HKADC_DATA_GRADE8)
+		ret = BOARDID_VALUE7;
+	else if (adcin_value < HKADC_DATA_GRADE9)
+		ret = BOARDID_VALUE8;
+	else if (adcin_value < HKADC_DATA_GRADE10)
+		ret = BOARDID_VALUE9;
+	else
+		ret = BOARDID_UNKNOWN;
+	return ret;
+}
+
+int hikey960_read_boardid(unsigned int *id)
+{
+	unsigned int	adcin0, adcin1, adcin2;
+	unsigned int	adcin0_remap, adcin1_remap, adcin2_remap;
+
+	assert(id != NULL);
+
+	init_adc();
+
+	/* read ADC channel0 data */
+	get_value(ADC_ADCIN0, &adcin0);
+	adcin0_remap = adcin_data_remap(adcin0);
+	INFO("[BDID]adcin0:%d adcin0_remap:%d\n", adcin0, adcin0_remap);
+	if (adcin0_remap == BOARDID_UNKNOWN)
+		return -EINVAL;
+	/* read ADC channel1 data */
+	get_value(ADC_ADCIN1, &adcin1);
+	adcin1_remap = adcin_data_remap(adcin1);
+	INFO("[BDID]adcin1:%d adcin1_remap:%d\n", adcin1, adcin1_remap);
+	if (adcin1_remap == BOARDID_UNKNOWN)
+		return -EINVAL;
+	/* read ADC channel2 data */
+	get_value(ADC_ADCIN2, &adcin2);
+	adcin2_remap = adcin_data_remap(adcin2);
+	INFO("[BDID]adcin2:%d adcin2_remap:%d\n", adcin2, adcin2_remap);
+	if (adcin2_remap == BOARDID_UNKNOWN)
+		return -EINVAL;
+	*id = BOARDID3_BASE * 1000 + (adcin2_remap * 100) +
+		(adcin1_remap * 10) + adcin0_remap;
+	INFO("[BDID]boardid: %d\n", *id);
+	return 0;
+}
diff --git a/plat/hisilicon/hikey960/hikey960_def.h b/plat/hisilicon/hikey960/hikey960_def.h
new file mode 100644
index 0000000..fc46d71
--- /dev/null
+++ b/plat/hisilicon/hikey960/hikey960_def.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __HIKEY960_DEF_H__
+#define __HIKEY960_DEF_H__
+
+#include <common_def.h>
+#include <tbbr_img_def.h>
+
+#define DDR_BASE			0x0
+#define DDR_SIZE			0xC0000000
+
+#define DEVICE_BASE			0xE0000000
+#define DEVICE_SIZE			0x20000000
+
+/* Memory location options for TSP */
+#define HIKEY960_SRAM_ID	0
+#define HIKEY960_DRAM_ID	1
+
+/*
+ * DDR for OP-TEE (32MB from 0x3E00000-0x3FFFFFFF) is divided in several
+ * regions:
+ *   - Secure DDR (default is the top 16MB) used by OP-TEE
+ *   - Non-secure DDR used by OP-TEE (shared memory and padding) (4MB)
+ *   - Secure DDR (4MB aligned on 4MB) for OP-TEE's "Secure Data Path" feature
+ *   - Non-secure DDR (8MB) reserved for OP-TEE's future use
+ */
+#define DDR_SEC_SIZE			0x01000000
+#define DDR_SEC_BASE			0x3F000000
+
+#define DDR_SDP_SIZE			0x00400000
+#define DDR_SDP_BASE			(DDR_SEC_BASE - 0x400000 /* align */ - \
+					DDR_SDP_SIZE)
+
+/*
+ * PL011 related constants
+ */
+#define PL011_UART5_BASE		0xFDF05000
+#define PL011_UART6_BASE		0xFFF32000
+#define PL011_BAUDRATE			115200
+#define PL011_UART_CLK_IN_HZ		19200000
+
+#define UFS_BASE			0
+/* FIP partition */
+#define HIKEY960_FIP_BASE		(UFS_BASE + 0x1400000)
+#define HIKEY960_FIP_MAX_SIZE		(12 << 20)
+
+#define HIKEY960_UFS_DESC_BASE		0x20000000
+#define HIKEY960_UFS_DESC_SIZE		0x00200000	/* 2MB */
+#define HIKEY960_UFS_DATA_BASE		0x10000000
+#define HIKEY960_UFS_DATA_SIZE		0x0A000000	/* 160MB */
+
+#endif /* __HIKEY960_DEF_H__ */
diff --git a/plat/hisilicon/hikey960/hikey960_image_load.c b/plat/hisilicon/hikey960/hikey960_image_load.c
new file mode 100644
index 0000000..8e91adb
--- /dev/null
+++ b/plat/hisilicon/hikey960/hikey960_image_load.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <bl_common.h>
+#include <desc_image_load.h>
+#include <platform.h>
+
+#include "hikey960_private.h"
+
+/*******************************************************************************
+ * This function flushes the data structures so that they are visible
+ * in memory for the next BL image.
+ ******************************************************************************/
+void plat_flush_next_bl_params(void)
+{
+	flush_bl_params_desc();
+}
+
+/*******************************************************************************
+ * This function returns the list of loadable images.
+ ******************************************************************************/
+bl_load_info_t *plat_get_bl_image_load_info(void)
+{
+	/* Required before loading scp_bl2 */
+	hikey960_init_ufs();
+	hikey960_io_setup();
+
+	return get_bl_load_info_from_mem_params_desc();
+}
+
+/*******************************************************************************
+ * This function returns the list of executable images.
+ ******************************************************************************/
+bl_params_t *plat_get_next_bl_params(void)
+{
+	return get_next_bl_params_from_mem_params_desc();
+}
diff --git a/plat/hisilicon/hikey960/hikey960_io_storage.c b/plat/hisilicon/hikey960/hikey960_io_storage.c
new file mode 100644
index 0000000..1a1d846
--- /dev/null
+++ b/plat/hisilicon/hikey960/hikey960_io_storage.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <firmware_image_package.h>
+#include <io_block.h>
+#include <io_driver.h>
+#include <io_fip.h>
+#include <io_memmap.h>
+#include <io_storage.h>
+#include <mmio.h>
+#include <platform_def.h>
+#include <semihosting.h>	/* For FOPEN_MODE_... */
+#include <string.h>
+#include <ufs.h>
+
+struct plat_io_policy {
+	uintptr_t *dev_handle;
+	uintptr_t image_spec;
+	int (*check)(const uintptr_t spec);
+};
+
+static const io_dev_connector_t *ufs_dev_con, *fip_dev_con;
+static uintptr_t ufs_dev_handle, fip_dev_handle;
+
+static int check_ufs(const uintptr_t spec);
+static int check_fip(const uintptr_t spec);
+size_t ufs_read_lun3_blks(int lba, uintptr_t buf, size_t size);
+size_t ufs_write_lun3_blks(int lba, const uintptr_t buf, size_t size);
+
+static const io_block_spec_t ufs_fip_spec = {
+	.offset		= HIKEY960_FIP_BASE,
+	.length		= HIKEY960_FIP_MAX_SIZE,
+};
+
+static const io_block_spec_t ufs_data_spec = {
+	.offset		= 0,
+	.length		= 256 << 20,
+};
+
+static const io_block_dev_spec_t ufs_dev_spec = {
+	/* It's used as temp buffer in block driver. */
+	.buffer		= {
+		.offset	= HIKEY960_UFS_DATA_BASE,
+		.length	= HIKEY960_UFS_DATA_SIZE,
+	},
+	.ops		= {
+		.read	= ufs_read_lun3_blks,
+		.write	= ufs_write_lun3_blks,
+	},
+	.block_size	= UFS_BLOCK_SIZE,
+};
+
+static const io_uuid_spec_t bl2_uuid_spec = {
+	.uuid = UUID_TRUSTED_BOOT_FIRMWARE_BL2,
+};
+
+static const io_uuid_spec_t scp_bl2_uuid_spec = {
+	.uuid = UUID_SCP_FIRMWARE_SCP_BL2,
+};
+
+static const io_uuid_spec_t bl31_uuid_spec = {
+	.uuid = UUID_EL3_RUNTIME_FIRMWARE_BL31,
+};
+
+static const io_uuid_spec_t bl32_uuid_spec = {
+	.uuid = UUID_SECURE_PAYLOAD_BL32,
+};
+
+static const io_uuid_spec_t bl32_extra1_uuid_spec = {
+	.uuid = UUID_SECURE_PAYLOAD_BL32_EXTRA1,
+};
+
+static const io_uuid_spec_t bl32_extra2_uuid_spec = {
+	.uuid = UUID_SECURE_PAYLOAD_BL32_EXTRA2,
+};
+
+static const io_uuid_spec_t bl33_uuid_spec = {
+	.uuid = UUID_NON_TRUSTED_FIRMWARE_BL33,
+};
+
+static const struct plat_io_policy policies[] = {
+	[FIP_IMAGE_ID] = {
+		&ufs_dev_handle,
+		(uintptr_t)&ufs_fip_spec,
+		check_ufs
+	},
+	[BL2_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl2_uuid_spec,
+		check_fip
+	},
+	[SCP_BL2_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&scp_bl2_uuid_spec,
+		check_fip
+	},
+	[BL31_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl31_uuid_spec,
+		check_fip
+	},
+	[BL32_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl32_uuid_spec,
+		check_fip
+	},
+	[BL32_EXTRA1_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl32_extra1_uuid_spec,
+		check_fip
+	},
+	[BL32_EXTRA2_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl32_extra2_uuid_spec,
+		check_fip
+	},
+	[BL33_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl33_uuid_spec,
+		check_fip
+	},
+	[BL2U_IMAGE_ID] = {
+		&ufs_dev_handle,
+		(uintptr_t)&ufs_data_spec,
+		check_ufs
+	}
+};
+
+static int check_ufs(const uintptr_t spec)
+{
+	int result;
+	uintptr_t local_handle;
+
+	result = io_dev_init(ufs_dev_handle, (uintptr_t)NULL);
+	if (result == 0) {
+		result = io_open(ufs_dev_handle, spec, &local_handle);
+		if (result == 0)
+			io_close(local_handle);
+	}
+	return result;
+}
+
+static int check_fip(const uintptr_t spec)
+{
+	int result;
+	uintptr_t local_image_handle;
+
+	/* See if a Firmware Image Package is available */
+	result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID);
+	if (result == 0) {
+		result = io_open(fip_dev_handle, spec, &local_image_handle);
+		if (result == 0) {
+			VERBOSE("Using FIP\n");
+			io_close(local_image_handle);
+		}
+	}
+	return result;
+}
+
+void hikey960_io_setup(void)
+{
+	int result;
+
+	result = register_io_dev_block(&ufs_dev_con);
+	assert(result == 0);
+
+	result = register_io_dev_fip(&fip_dev_con);
+	assert(result == 0);
+
+	result = io_dev_open(ufs_dev_con, (uintptr_t)&ufs_dev_spec,
+			     &ufs_dev_handle);
+	assert(result == 0);
+
+	result = io_dev_open(fip_dev_con, (uintptr_t)NULL, &fip_dev_handle);
+	assert(result == 0);
+
+	/* Ignore improbable errors in release builds */
+	(void)result;
+}
+
+/* Return an IO device handle and specification which can be used to access
+ * an image. Use this to enforce platform load policy
+ */
+int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
+			  uintptr_t *image_spec)
+{
+	int result;
+	const struct plat_io_policy *policy;
+
+	assert(image_id < ARRAY_SIZE(policies));
+
+	policy = &policies[image_id];
+	result = policy->check(policy->image_spec);
+	assert(result == 0);
+
+	*image_spec = policy->image_spec;
+	*dev_handle = *(policy->dev_handle);
+
+	return result;
+}
+
+size_t ufs_read_lun3_blks(int lba, uintptr_t buf, size_t size)
+{
+	return ufs_read_blocks(3, lba, buf, size);
+}
+
+size_t ufs_write_lun3_blks(int lba, const uintptr_t buf, size_t size)
+{
+	return ufs_write_blocks(3, lba, buf, size);
+}
diff --git a/plat/hisilicon/hikey960/hikey960_mcu_load.c b/plat/hisilicon/hikey960/hikey960_mcu_load.c
new file mode 100644
index 0000000..7bf9a3d
--- /dev/null
+++ b/plat/hisilicon/hikey960/hikey960_mcu_load.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <errno.h>
+#include <hi3660.h>
+#include <mmio.h>
+#include <string.h>
+
+#define ADDR_CONVERT(addr)		((addr) < 0x40000 ?	\
+					 (addr) + 0xFFF30000 :	\
+					 (addr) + 0x40000000)
+
+static void fw_data_init(void)
+{
+	unsigned long data_head_addr;
+	unsigned int *data_addr;
+
+	data_head_addr = mmio_read_32((uintptr_t) HISI_DATA_HEAD_BASE) + 0x14;
+	data_addr = (unsigned int *) ADDR_CONVERT(data_head_addr);
+
+	memcpy((void *)HISI_DATA0_BASE,
+	       (const void *)(unsigned long)ADDR_CONVERT(data_addr[0]),
+	       HISI_DATA0_SIZE);
+	memcpy((void *)HISI_DATA1_BASE,
+	       (const void *)(unsigned long)ADDR_CONVERT(data_addr[1]),
+	       HISI_DATA1_SIZE);
+}
+
+int load_lpm3(void)
+{
+	INFO("start fw loading\n");
+
+	fw_data_init();
+
+	flush_dcache_range((uintptr_t)HISI_RESERVED_MEM_BASE,
+			   HISI_RESERVED_MEM_SIZE);
+
+	sev();
+	sev();
+
+	INFO("fw load success\n");
+
+	return 0;
+}
diff --git a/plat/hisilicon/hikey960/hikey960_pm.c b/plat/hisilicon/hikey960/hikey960_pm.c
new file mode 100644
index 0000000..348cdba
--- /dev/null
+++ b/plat/hisilicon/hikey960/hikey960_pm.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cci.h>
+#include <console.h>
+#include <debug.h>
+#include <gicv2.h>
+#include <hi3660.h>
+#include <hi3660_crg.h>
+#include <mmio.h>
+#include <psci.h>
+#include "drivers/pwrc/hisi_pwrc.h"
+
+#include "hikey960_def.h"
+#include "hikey960_private.h"
+
+#define CORE_PWR_STATE(state) \
+	((state)->pwr_domain_state[MPIDR_AFFLVL0])
+#define CLUSTER_PWR_STATE(state) \
+	((state)->pwr_domain_state[MPIDR_AFFLVL1])
+#define SYSTEM_PWR_STATE(state) \
+	((state)->pwr_domain_state[PLAT_MAX_PWR_LVL])
+
+#define DMAC_GLB_REG_SEC	0x694
+#define AXI_CONF_BASE		0x820
+
+static uintptr_t hikey960_sec_entrypoint;
+
+static void hikey960_pwr_domain_standby(plat_local_state_t cpu_state)
+{
+	unsigned long scr;
+	unsigned int val = 0;
+
+	assert(cpu_state == PLAT_MAX_RET_STATE);
+
+	scr = read_scr_el3();
+
+	/* Enable Physical IRQ and FIQ to wake the CPU*/
+	write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT);
+
+	set_retention_ticks(val);
+	wfi();
+	clr_retention_ticks(val);
+
+	/*
+	 * Restore SCR to the original value, synchronisazion of
+	 * scr_el3 is done by eret while el3_exit to save some
+	 * execution cycles.
+	 */
+	write_scr_el3(scr);
+}
+
+static int hikey960_pwr_domain_on(u_register_t mpidr)
+{
+	unsigned int core = mpidr & MPIDR_CPU_MASK;
+	unsigned int cluster =
+		(mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
+	int cluster_stat = cluster_is_powered_on(cluster);
+
+	hisi_set_cpu_boot_flag(cluster, core);
+
+	mmio_write_32(CRG_REG_BASE + CRG_RVBAR(cluster, core),
+		      hikey960_sec_entrypoint >> 2);
+
+	if (cluster_stat)
+		hisi_powerup_core(cluster, core);
+	else
+		hisi_powerup_cluster(cluster, core);
+
+	return PSCI_E_SUCCESS;
+}
+
+static void
+hikey960_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+	if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
+		cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+
+	gicv2_pcpu_distif_init();
+	gicv2_cpuif_enable();
+}
+
+void hikey960_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	unsigned long mpidr = read_mpidr_el1();
+	unsigned int core = mpidr & MPIDR_CPU_MASK;
+	unsigned int cluster =
+		(mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
+
+	clr_ex();
+	isb();
+	dsbsy();
+
+	gicv2_cpuif_disable();
+
+	hisi_clear_cpu_boot_flag(cluster, core);
+	hisi_powerdn_core(cluster, core);
+
+	/* check if any core is powered up */
+	if (hisi_test_cpu_down(cluster, core)) {
+
+		cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+
+		isb();
+		dsbsy();
+
+		hisi_powerdn_cluster(cluster, core);
+	}
+}
+
+static void __dead2 hikey960_system_reset(void)
+{
+	mmio_write_32(SCTRL_SCPEREN1_REG,
+		      SCPEREN1_WAIT_DDR_SELFREFRESH_DONE_BYPASS);
+	mmio_write_32(SCTRL_SCSYSSTAT_REG, 0xdeadbeef);
+	panic();
+}
+
+int hikey960_validate_power_state(unsigned int power_state,
+			       psci_power_state_t *req_state)
+{
+	int pstate = psci_get_pstate_type(power_state);
+	int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
+	int i;
+
+	assert(req_state);
+
+	if (pwr_lvl > PLAT_MAX_PWR_LVL)
+		return PSCI_E_INVALID_PARAMS;
+
+	/* Sanity check the requested state */
+	if (pstate == PSTATE_TYPE_STANDBY) {
+		/*
+		 * It's possible to enter standby only on power level 0
+		 * Ignore any other power level.
+		 */
+		if (pwr_lvl != MPIDR_AFFLVL0)
+			return PSCI_E_INVALID_PARAMS;
+
+		req_state->pwr_domain_state[MPIDR_AFFLVL0] =
+					PLAT_MAX_RET_STATE;
+	} else {
+		for (i = MPIDR_AFFLVL0; i <= pwr_lvl; i++)
+			req_state->pwr_domain_state[i] =
+					PLAT_MAX_OFF_STATE;
+	}
+
+	/*
+	 * We expect the 'state id' to be zero.
+	 */
+	if (psci_get_pstate_id(power_state))
+		return PSCI_E_INVALID_PARAMS;
+
+	return PSCI_E_SUCCESS;
+}
+
+static int hikey960_validate_ns_entrypoint(uintptr_t entrypoint)
+{
+	/*
+	 * Check if the non secure entrypoint lies within the non
+	 * secure DRAM.
+	 */
+	if ((entrypoint > DDR_BASE) && (entrypoint < (DDR_BASE + DDR_SIZE)))
+		return PSCI_E_SUCCESS;
+
+	return PSCI_E_INVALID_ADDRESS;
+}
+
+static void hikey960_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+	u_register_t mpidr = read_mpidr_el1();
+	unsigned int core = mpidr & MPIDR_CPU_MASK;
+	unsigned int cluster =
+		(mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
+
+	if (CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE)
+		return;
+
+	if (CORE_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
+		clr_ex();
+		isb();
+		dsbsy();
+
+		gicv2_cpuif_disable();
+
+		hisi_cpuidle_lock(cluster, core);
+		hisi_set_cpuidle_flag(cluster, core);
+		hisi_cpuidle_unlock(cluster, core);
+
+		mmio_write_32(CRG_REG_BASE + CRG_RVBAR(cluster, core),
+		      hikey960_sec_entrypoint >> 2);
+
+		hisi_enter_core_idle(cluster, core);
+	}
+
+	/* Perform the common cluster specific operations */
+	if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
+		hisi_cpuidle_lock(cluster, core);
+		hisi_disable_pdc(cluster);
+
+		/* check if any core is powered up */
+		if (hisi_test_pwrdn_allcores(cluster, core)) {
+
+			cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr));
+
+			isb();
+			dsbsy();
+
+			/* mask the pdc wakeup irq, then
+			 * enable pdc to power down the core
+			 */
+			hisi_pdc_mask_cluster_wakeirq(cluster);
+			hisi_enable_pdc(cluster);
+
+			hisi_cpuidle_unlock(cluster, core);
+
+			/* check the SR flag bit to determine
+			 * CLUSTER_IDLE_IPC or AP_SR_IPC to send
+			 */
+			if (hisi_test_ap_suspend_flag(cluster))
+				hisi_enter_ap_suspend(cluster, core);
+			else
+				hisi_enter_cluster_idle(cluster, core);
+		} else {
+			/* enable pdc */
+			hisi_enable_pdc(cluster);
+			hisi_cpuidle_unlock(cluster, core);
+		}
+	}
+}
+
+static void hikey960_sr_dma_reinit(void)
+{
+	unsigned int ctr = 0;
+
+	mmio_write_32(DMAC_BASE + DMAC_GLB_REG_SEC, 0x3);
+
+	/* 1~15 channel is set non_secure */
+	for (ctr = 1; ctr <= 15; ctr++)
+		mmio_write_32(DMAC_BASE + AXI_CONF_BASE + ctr * (0x40),
+			      (1 << 6) | (1 << 18));
+}
+
+static void
+hikey960_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
+{
+	unsigned long mpidr = read_mpidr_el1();
+	unsigned int core = mpidr & MPIDR_CPU_MASK;
+	unsigned int cluster =
+		(mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
+
+	/* Nothing to be done on waking up from retention from CPU level */
+	if (CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE)
+		return;
+
+	hisi_cpuidle_lock(cluster, core);
+	hisi_clear_cpuidle_flag(cluster, core);
+	hisi_cpuidle_unlock(cluster, core);
+
+	if (hisi_test_ap_suspend_flag(cluster)) {
+		hikey960_sr_dma_reinit();
+		gicv2_cpuif_enable();
+		console_init(PL011_UART6_BASE, PL011_UART_CLK_IN_HZ,
+			     PL011_BAUDRATE);
+	}
+
+	hikey960_pwr_domain_on_finish(target_state);
+}
+
+static void hikey960_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+	int i;
+
+	for (i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
+		req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
+}
+
+static const plat_psci_ops_t hikey960_psci_ops = {
+	.cpu_standby			= hikey960_pwr_domain_standby,
+	.pwr_domain_on			= hikey960_pwr_domain_on,
+	.pwr_domain_on_finish		= hikey960_pwr_domain_on_finish,
+	.pwr_domain_off			= hikey960_pwr_domain_off,
+	.pwr_domain_suspend		= hikey960_pwr_domain_suspend,
+	.pwr_domain_suspend_finish	= hikey960_pwr_domain_suspend_finish,
+	.system_off			= NULL,
+	.system_reset			= hikey960_system_reset,
+	.validate_power_state		= hikey960_validate_power_state,
+	.validate_ns_entrypoint		= hikey960_validate_ns_entrypoint,
+	.get_sys_suspend_power_state	= hikey960_get_sys_suspend_power_state,
+};
+
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+			const plat_psci_ops_t **psci_ops)
+{
+	hikey960_sec_entrypoint = sec_entrypoint;
+
+	INFO("%s: sec_entrypoint=0x%lx\n", __func__,
+	     (unsigned long)hikey960_sec_entrypoint);
+
+	/*
+	 * Initialize PSCI ops struct
+	 */
+	*psci_ops = &hikey960_psci_ops;
+	return 0;
+}
diff --git a/plat/hisilicon/hikey960/hikey960_private.h b/plat/hisilicon/hikey960/hikey960_private.h
new file mode 100644
index 0000000..e3c9d21
--- /dev/null
+++ b/plat/hisilicon/hikey960/hikey960_private.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __HIKEY960_PRIVATE_H__
+#define __HIKEY960_PRIVATE_H__
+
+#include <bl_common.h>
+
+/*
+ * Function and variable prototypes
+ */
+void hikey960_init_mmu_el1(unsigned long total_base,
+			unsigned long total_size,
+			unsigned long ro_start,
+			unsigned long ro_limit,
+			unsigned long coh_start,
+			unsigned long coh_limit);
+void hikey960_init_mmu_el3(unsigned long total_base,
+			unsigned long total_size,
+			unsigned long ro_start,
+			unsigned long ro_limit,
+			unsigned long coh_start,
+			unsigned long coh_limit);
+void hikey960_init_ufs(void);
+void hikey960_io_setup(void);
+int hikey960_read_boardid(unsigned int *id);
+void set_retention_ticks(unsigned int val);
+void clr_retention_ticks(unsigned int val);
+void clr_ex(void);
+void nop(void);
+
+#endif /* __HIKEY960_PRIVATE_H__ */
diff --git a/plat/hisilicon/hikey960/hikey960_topology.c b/plat/hisilicon/hikey960/hikey960_topology.c
new file mode 100644
index 0000000..3363724
--- /dev/null
+++ b/plat/hisilicon/hikey960/hikey960_topology.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <platform_def.h>
+#include <psci.h>
+
+/*
+ * The HiKey power domain tree descriptor. The cluster power domains
+ * are arranged so that when the PSCI generic code creates the power
+ * domain tree, the indices of the CPU power domain nodes it allocates
+ * match the linear indices returned by plat_core_pos_by_mpidr().
+ */
+const unsigned char hikey960_power_domain_tree_desc[] = {
+	/* Number of root nodes */
+	1,
+	/* Number of clusters */
+	PLATFORM_CLUSTER_COUNT,
+	/* Number of children for the first cluster node */
+	PLATFORM_CORE_COUNT_PER_CLUSTER,
+	/* Number of children for the second cluster node */
+	PLATFORM_CORE_COUNT_PER_CLUSTER,
+};
+
+/*******************************************************************************
+ * This function returns the HiKey topology tree information.
+ ******************************************************************************/
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+	return hikey960_power_domain_tree_desc;
+}
+
+/*******************************************************************************
+ * This function implements a part of the critical interface between the psci
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is returned
+ * in case the MPIDR is invalid.
+ ******************************************************************************/
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+	unsigned int cluster_id, cpu_id;
+
+	mpidr &= MPIDR_AFFINITY_MASK;
+
+	if (mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK))
+		return -1;
+
+	cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+	cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
+
+	if (cluster_id >= PLATFORM_CLUSTER_COUNT)
+		return -1;
+
+	/*
+	 * Validate cpu_id by checking whether it represents a CPU in
+	 * one of the two clusters present on the platform.
+	 */
+	if (cpu_id >= PLATFORM_CORE_COUNT_PER_CLUSTER)
+		return -1;
+
+	return (cpu_id + (cluster_id * 4));
+}
diff --git a/plat/hisilicon/hikey960/include/hi3660.h b/plat/hisilicon/hikey960/include/hi3660.h
new file mode 100644
index 0000000..83d1b36
--- /dev/null
+++ b/plat/hisilicon/hikey960/include/hi3660.h
@@ -0,0 +1,343 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __HI3660_H__
+#define __HI3660_H__
+
+#include <hi3660_crg.h>
+#include <hi3660_hkadc.h>
+#include <hi3660_mem_map.h>
+
+#define ASP_CFG_REG_BASE		0xE804E000
+
+#define ASP_CFG_MMBUF_CTRL_REG		(ASP_CFG_REG_BASE + 0x148)
+
+#define LP_RAM_BASE			0xFFF50000
+
+#define SCTRL_REG_BASE			0xFFF0A000
+
+#define SCTRL_CONTROL_REG		(SCTRL_REG_BASE + 0x000)
+#define SCTRL_CONTROL_SYS_MODE(x)	(((x) & 0xf) << 3)
+#define SCTRL_CONTROL_SYS_MODE_NORMAL	((1 << 2) << 3)
+#define SCTRL_CONTROL_SYS_MODE_SLOW	((1 << 1) << 3)
+#define SCTRL_CONTROL_SYS_MODE_MASK	(0xf << 3)
+#define SCTRL_CONTROL_MODE_CTRL_NORMAL	(1 << 2)
+#define SCTRL_CONTROL_MODE_CTRL_SLOW	(1 << 1)
+#define SCTRL_CONTROL_MODE_CTRL_MASK	0x7
+
+#define SCTRL_SCSYSSTAT_REG		(SCTRL_REG_BASE + 0x004)
+
+#define SCTRL_DEEPSLEEPED_REG		(SCTRL_REG_BASE + 0x008)
+#define SCTRL_EFUSE_USB_MASK		(1 << 30)
+#define SCTRL_EFUSE_USB_PLL		(1 << 30)
+#define SCTRL_EFUSE_USB_ABB		(0 << 30)
+#define SCTRL_EFUSE_UFS_MASK		(3 << 6)
+#define SCTRL_EFUSE_UFS_PLL		(1 << 6)
+#define SCTRL_EFUSE_UFS_ABB		(0 << 6)
+
+#define SCTRL_SCISOEN_REG		(SCTRL_REG_BASE + 0x040)
+#define SCTRL_SCISODIS_REG		(SCTRL_REG_BASE + 0x044)
+#define SCISO_MMBUFISO			(1 << 3)
+
+#define SCTRL_SCPWREN_REG		(SCTRL_REG_BASE + 0x060)
+#define SCPWREN_MMBUFPWREN		(1 << 3)
+
+#define SCTRL_PLL_CTRL0_REG		(SCTRL_REG_BASE + 0x100)
+#define SCTRL_PLL0_POSTDIV2(x)		(((x) & 0x7) << 23)
+#define SCTRL_PLL0_POSTDIV1(x)		(((x) & 0x7) << 20)
+#define SCTRL_PLL0_FBDIV(x)		(((x) & 0xfff) << 8)
+#define SCTRL_PLL0_REFDIV(x)		(((x) & 0x3f) << 2)
+#define SCTRL_PLL0_EN			(1 << 0)
+
+#define SCTRL_PLL_CTRL1_REG		(SCTRL_REG_BASE + 0x104)
+#define SCTRL_PLL0_CLK_NO_GATE		(1 << 26)
+#define SCTRL_PLL0_CFG_VLD		(1 << 25)
+#define SCTRL_PLL0_FRACDIV(x)		((x) & 0xFFFFFF)
+
+#define SCTRL_PLL_STAT_REG		(SCTRL_REG_BASE + 0x10C)
+#define SCTRL_PLL0_STAT			(1 << 0)
+
+#define SCTRL_SCPEREN0_REG		(SCTRL_REG_BASE + 0x160)
+#define SCTRL_SCPERDIS0_REG		(SCTRL_REG_BASE + 0x164)
+#define SCTRL_SCPERSTAT0_REG		(SCTRL_REG_BASE + 0x168)
+
+#define SCTRL_SCPEREN1_REG		(SCTRL_REG_BASE + 0x170)
+#define SCTRL_SCPERDIS1_REG		(SCTRL_REG_BASE + 0x174)
+#define SCTRL_SCPEREN1_REG		(SCTRL_REG_BASE + 0x170)
+#define SCTRL_SCPERDIS1_REG		(SCTRL_REG_BASE + 0x174)
+#define SCPEREN1_WAIT_DDR_SELFREFRESH_DONE_BYPASS	(1 << 31)
+#define SCPEREN_GT_PCLK_MMBUFCFG	(1 << 25)
+#define SCPEREN_GT_PCLK_MMBUF		(1 << 23)
+#define SCPEREN_GT_ACLK_MMBUF		(1 << 22)
+#define SCPEREN_GT_CLK_NOC_AOBUS2MMBUF	(1 << 6)
+
+#define SCTRL_SCPEREN2_REG		(SCTRL_REG_BASE + 0x190)
+#define SCTRL_SCPERDIS2_REG		(SCTRL_REG_BASE + 0x194)
+#define SCTRL_SCPERSTAT2_REG		(SCTRL_REG_BASE + 0x198)
+#define SCTRL_SCPERRSTEN0_REG		(SCTRL_REG_BASE + 0x200)
+#define SCTRL_SCPERRSTDIS0_REG		(SCTRL_REG_BASE + 0x204)
+#define SCTRL_SCPERRSTSTAT0_REG		(SCTRL_REG_BASE + 0x208)
+#define SCTRL_SCPERRSTEN1_REG		(SCTRL_REG_BASE + 0x20C)
+#define SCTRL_SCPERRSTDIS1_REG		(SCTRL_REG_BASE + 0x210)
+#define SCTRL_SCPERRSTSTAT1_REG		(SCTRL_REG_BASE + 0x214)
+#define IP_RST_MMBUFCFG			(1 << 12)
+#define IP_RST_MMBUF			(1 << 11)
+
+#define SCTRL_SCPERRSTEN2_REG		(SCTRL_REG_BASE + 0x218)
+#define SCTRL_SCPERRSTDIS2_REG		(SCTRL_REG_BASE + 0x21C)
+#define SCTRL_SCPERRSTSTAT2_REG		(SCTRL_REG_BASE + 0x220)
+
+#define SCTRL_SCCLKDIV2_REG		(SCTRL_REG_BASE + 0x258)
+#define SEL_CLK_MMBUF_MASK		(0x3 << 8)
+#define SEL_CLK_MMBUF_PLL0		(0x3 << 8)
+#define SCCLKDIV2_GT_PCLK_MMBUF		(1 << 7)
+
+#define SCTRL_SCCLKDIV4_REG		(SCTRL_REG_BASE + 0x260)
+#define GT_MMBUF_SYS			(1 << 13)
+#define GT_MMBUF_FLL			(1 << 12)
+#define GT_PLL_CLK_MMBUF		(1 << 11)
+
+#define SCTRL_SCCLKDIV6_REG		(SCTRL_REG_BASE + 0x268)
+
+#define SCTRL_SCPERCTRL7_REG		(SCTRL_REG_BASE + 0x31C)
+#define SCTRL_SCPERSTAT6_REG		(SCTRL_REG_BASE + 0x378)
+
+#define SCTRL_SCINNERSTAT_REG		(SCTRL_REG_BASE + 0x3A0)
+#define EMMC_UFS_SEL			(1 << 15)
+
+#define SCTRL_BAK_DATA0_REG		(SCTRL_REG_BASE + 0x40C)
+#define SCTRL_BAK_DATA4_REG		(SCTRL_REG_BASE + 0x41C)
+
+#define SCTRL_LPMCU_CLKEN_REG		(SCTRL_REG_BASE + 0x480)
+#define SCTRL_LPMCU_CLKDIS_REG		(SCTRL_REG_BASE + 0x484)
+#define SCTRL_LPMCU_RSTEN_REG		(SCTRL_REG_BASE + 0x500)
+#define SCTRL_LPMCU_RSTDIS_REG		(SCTRL_REG_BASE + 0x504)
+#define DDRC_SOFT_BIT			(1 << 6)
+#define DDRC_CLK_BIT			(1 << 5)
+
+#define SCTRL_SCPEREN0_SEC_REG		(SCTRL_REG_BASE + 0x900)
+#define SCTRL_SCPERDIS0_SEC_REG		(SCTRL_REG_BASE + 0x904)
+#define MMBUF_SEC_CTRL_MASK		(0xfff << 20)
+#define MMBUF_SEC_CTRL(x)		(((x) & 0xfff) << 20)
+
+#define SCTRL_PERRSTEN1_SEC_REG		(SCTRL_REG_BASE + 0xA50)
+#define SCTRL_PERRSTDIS1_SEC_REG	(SCTRL_REG_BASE + 0xA54)
+#define SCTRL_PERRSTSTAT1_SEC_REG	(SCTRL_REG_BASE + 0xA58)
+#define RST_ASP_SUBSYS_BIT		(1 << 0)
+
+#define SCTRL_PERRSTEN2_SEC_REG		(SCTRL_REG_BASE + 0xB50)
+#define SCTRL_PERRSTDIS2_SEC_REG	(SCTRL_REG_BASE + 0xB54)
+#define SCTRL_PERRSTSTAT2_SEC_REG	(SCTRL_REG_BASE + 0xB58)
+
+#define SCTRL_HISEECLKDIV_REG		(SCTRL_REG_BASE + 0xC28)
+#define SC_SEL_HISEE_PLL_MASK		(1 << 4)
+#define SC_SEL_HISEE_PLL0		(1 << 4)
+#define SC_SEL_HISEE_PLL2		(0 << 4)
+#define SC_DIV_HISEE_PLL_MASK		(7 << 16)
+#define SC_DIV_HISEE_PLL(x)		((x) & 0x7)
+
+#define SCTRL_SCSOCID0_REG		(SCTRL_REG_BASE + 0xE00)
+
+#define PMC_REG_BASE			0xFFF31000
+#define PMC_PPLL1_CTRL0_REG		(PMC_REG_BASE + 0x038)
+#define PMC_PPLL1_CTRL1_REG		(PMC_REG_BASE + 0x03C)
+#define PMC_PPLL2_CTRL0_REG		(PMC_REG_BASE + 0x040)
+#define PMC_PPLL2_CTRL1_REG		(PMC_REG_BASE + 0x044)
+#define PMC_PPLL3_CTRL0_REG		(PMC_REG_BASE + 0x048)
+#define PMC_PPLL3_CTRL1_REG		(PMC_REG_BASE + 0x04C)
+#define PPLLx_LOCK			(1 << 26)
+#define PPLLx_WITHOUT_CLK_GATE		(1 << 26)
+#define PPLLx_CFG_VLD			(1 << 25)
+#define PPLLx_INT_MOD			(1 << 24)
+#define PPLLx_POSTDIV2_MASK		(0x7 << 23)
+#define PPLLx_POSTDIV2(x)		(((x) & 0x7) << 23)
+#define PPLLx_POSTDIV1_MASK		(0x7 << 20)
+#define PPLLx_POSTDIV1(x)		(((x) & 0x7) << 20)
+#define PPLLx_FRACDIV_MASK		(0x00FFFFFF)
+#define PPLLx_FRACDIV(x)		((x) & 0x00FFFFFF)
+#define PPLLx_FBDIV_MASK		(0xfff << 8)
+#define PPLLx_FBDIV(x)			(((x) & 0xfff) << 8)
+#define PPLLx_REFDIV_MASK		(0x3f << 2)
+#define PPLLx_REFDIV(x)			(((x) & 0x3f) << 2)
+#define PPLLx_BP			(1 << 1)
+#define PPLLx_EN			(1 << 0)
+
+#define PMC_DDRLP_CTRL_REG		(PMC_REG_BASE + 0x30C)
+#define DDRC_CSYSREQ_CFG(x)		((x) & 0xF)
+
+#define PMC_NOC_POWER_IDLEREQ_REG	(PMC_REG_BASE + 0x380)
+#define PMC_NOC_POWER_IDLEREQ_IVP	(1 << 14)
+#define PMC_NOC_POWER_IDLEREQ_DSS	(1 << 13)
+#define PMC_NOC_POWER_IDLEREQ_VENC	(1 << 11)
+#define PMC_NOC_POWER_IDLEREQ_VDEC	(1 << 10)
+#define PMC_NOC_POWER_IDLEREQ_ISP	(1 << 5)
+#define PMC_NOC_POWER_IDLEREQ_VCODEC	(1 << 4)
+#define DDRPHY_BYPASS_MODE		(1 << 0)
+
+#define PMC_NOC_POWER_IDLEACK_REG	(PMC_REG_BASE + 0x384)
+#define PMC_NOC_POWER_IDLE_REG		(PMC_REG_BASE + 0x388)
+
+#define PMU_SSI0_REG_BASE		0xFFF34000
+
+#define PMU_SSI0_LDO8_CTRL0_REG		(PMU_SSI0_REG_BASE + (0x68 << 2))
+#define LDO8_CTRL0_EN_1_8V		0x02
+
+#define PMU_SSI0_CLK_TOP_CTRL7_REG	(PMU_SSI0_REG_BASE + (0x10C << 2))
+#define NP_XO_ABB_DIG			(1 << 1)
+
+#define LP_CONFIG_REG_BASE		0xFFF3F000
+
+#define DMAC_BASE			0xFDF30000
+
+#define CCI400_REG_BASE			0xE8100000
+#define CCI400_SL_IFACE3_CLUSTER_IX	0
+#define CCI400_SL_IFACE4_CLUSTER_IX	1
+
+#define GICD_REG_BASE			0xE82B1000
+#define GICC_REG_BASE			0xE82B2000
+/*
+ * GIC400 interrupt handling related constants
+ */
+#define IRQ_SEC_PHY_TIMER		29
+#define IRQ_SEC_SGI_0			8
+#define IRQ_SEC_SGI_1			9
+#define IRQ_SEC_SGI_2			10
+#define IRQ_SEC_SGI_3			11
+#define IRQ_SEC_SGI_4			12
+#define IRQ_SEC_SGI_5			13
+#define IRQ_SEC_SGI_6			14
+#define IRQ_SEC_SGI_7			15
+#define IRQ_SEC_SGI_8			16
+
+#define IPC_REG_BASE			0xE896A000
+#define IPC_BASE			(IPC_REG_BASE)
+
+#define IOMG_REG_BASE			0xE896C000
+
+/* GPIO46: HUB 3.3V enable. active low */
+#define IOMG_044_REG			(IOMG_REG_BASE + 0x0B0)
+#define IOMG_UART5_RX_REG		(IOMG_REG_BASE + 0x0BC)
+#define IOMG_UART5_TX_REG		(IOMG_REG_BASE + 0x0C0)
+
+#define IOCG_REG_BASE			0xE896C800
+
+/* GPIO005: PMIC SSI. (2 << 4) */
+#define IOCG_006_REG			(IOCG_REG_BASE + 0x018)
+
+#define TIMER9_REG_BASE			0xE8A00000
+
+#define WDT0_REG_BASE			0xE8A06000
+#define WDT1_REG_BASE			0xE8A07000
+#define WDT_CONTROL_OFFSET		0x008
+#define WDT_LOCK_OFFSET			0xC00
+
+#define WDT_UNLOCK			0x1ACCE551
+#define WDT_LOCKED			1
+
+#define PCTRL_REG_BASE			0xE8A09000
+#define PCTRL_PERI_CTRL3_REG		(PCTRL_REG_BASE + 0x010)
+#define PCTRL_PERI_CTRL24_REG		(PCTRL_REG_BASE + 0x064)
+
+#define TZC_REG_BASE			0xE8A21000
+#define TZC_STAT0_REG			(TZC_REG_BASE + 0x800)
+#define TZC_EN0_REG			(TZC_REG_BASE + 0x804)
+#define TZC_DIS0_REG			(TZC_REG_BASE + 0x808)
+#define TZC_STAT1_REG			(TZC_REG_BASE + 0x80C)
+#define TZC_EN1_REG			(TZC_REG_BASE + 0x810)
+#define TZC_DIS1_REG			(TZC_REG_BASE + 0x814)
+#define TZC_STAT2_REG			(TZC_REG_BASE + 0x818)
+#define TZC_EN2_REG			(TZC_REG_BASE + 0x81C)
+#define TZC_DIS2_REG			(TZC_REG_BASE + 0x820)
+#define TZC_STAT3_REG			(TZC_REG_BASE + 0x824)
+#define TZC_EN3_REG			(TZC_REG_BASE + 0x828)
+#define TZC_DIS3_REG			(TZC_REG_BASE + 0x82C)
+#define TZC_STAT4_REG			(TZC_REG_BASE + 0x830)
+#define TZC_EN4_REG			(TZC_REG_BASE + 0x834)
+#define TZC_DIS4_REG			(TZC_REG_BASE + 0x838)
+#define TZC_STAT5_REG			(TZC_REG_BASE + 0x83C)
+#define TZC_EN5_REG			(TZC_REG_BASE + 0x840)
+#define TZC_DIS5_REG			(TZC_REG_BASE + 0x844)
+#define TZC_STAT6_REG			(TZC_REG_BASE + 0x848)
+#define TZC_EN6_REG			(TZC_REG_BASE + 0x84C)
+#define TZC_DIS6_REG			(TZC_REG_BASE + 0x850)
+#define TZC_STAT7_REG			(TZC_REG_BASE + 0x854)
+#define TZC_EN7_REG			(TZC_REG_BASE + 0x858)
+#define TZC_DIS7_REG			(TZC_REG_BASE + 0x85C)
+#define TZC_STAT8_REG			(TZC_REG_BASE + 0x860)
+#define TZC_EN8_REG			(TZC_REG_BASE + 0x864)
+#define TZC_DIS8_REG			(TZC_REG_BASE + 0x868)
+
+#define MMBUF_BASE			0xEA800000
+
+#define ACPU_DMCPACK0_BASE		0xEA900000
+
+#define ACPU_DMCPACK1_BASE		0xEA920000
+
+#define ACPU_DMCPACK2_BASE		0xEA940000
+
+#define ACPU_DMCPACK3_BASE		0xEA960000
+
+#define UART5_REG_BASE			0xFDF05000
+
+#define USB3OTG_REG_BASE		0xFF100000
+
+#define UFS_REG_BASE			0xFF3B0000
+
+#define UFS_SYS_REG_BASE		0xFF3B1000
+
+#define UFS_SYS_PSW_POWER_CTRL_REG	(UFS_SYS_REG_BASE + 0x004)
+#define UFS_SYS_PHY_ISO_EN_REG		(UFS_SYS_REG_BASE + 0x008)
+#define UFS_SYS_HC_LP_CTRL_REG		(UFS_SYS_REG_BASE + 0x00C)
+#define UFS_SYS_PHY_CLK_CTRL_REG	(UFS_SYS_REG_BASE + 0x010)
+#define UFS_SYS_PSW_CLK_CTRL_REG	(UFS_SYS_REG_BASE + 0x014)
+#define UFS_SYS_CLOCK_GATE_BYPASS_REG	(UFS_SYS_REG_BASE + 0x018)
+#define UFS_SYS_RESET_CTRL_EN_REG	(UFS_SYS_REG_BASE + 0x01C)
+#define UFS_SYS_MONITOR_HH_REG		(UFS_SYS_REG_BASE + 0x03C)
+#define UFS_SYS_UFS_SYSCTRL_REG		(UFS_SYS_REG_BASE + 0x05C)
+#define UFS_SYS_UFS_DEVICE_RESET_CTRL_REG	(UFS_SYS_REG_BASE + 0x060)
+#define UFS_SYS_UFS_APB_ADDR_MASK_REG	(UFS_SYS_REG_BASE + 0x064)
+
+#define BIT_UFS_PSW_ISO_CTRL			(1 << 16)
+#define BIT_UFS_PSW_MTCMOS_EN			(1 << 0)
+#define BIT_UFS_REFCLK_ISO_EN			(1 << 16)
+#define BIT_UFS_PHY_ISO_CTRL			(1 << 0)
+#define BIT_SYSCTRL_LP_ISOL_EN			(1 << 16)
+#define BIT_SYSCTRL_PWR_READY			(1 << 8)
+#define BIT_SYSCTRL_REF_CLOCK_EN		(1 << 24)
+#define MASK_SYSCTRL_REF_CLOCK_SEL		(3 << 8)
+#define MASK_SYSCTRL_CFG_CLOCK_FREQ		(0xFF)
+#define BIT_SYSCTRL_PSW_CLK_EN			(1 << 4)
+#define MASK_UFS_CLK_GATE_BYPASS		(0x3F)
+#define BIT_SYSCTRL_LP_RESET_N			(1 << 0)
+#define BIT_UFS_REFCLK_SRC_SE1			(1 << 0)
+#define MASK_UFS_SYSCTRL_BYPASS			(0x3F << 16)
+#define MASK_UFS_DEVICE_RESET			(1 << 16)
+#define BIT_UFS_DEVICE_RESET			(1 << 0)
+
+#define IOMG_FIX_REG_BASE		0xFF3B6000
+
+/* GPIO150: LED */
+#define IOMG_FIX_006_REG		(IOMG_FIX_REG_BASE + 0x018)
+/* GPIO151: LED */
+#define IOMG_FIX_007_REG		(IOMG_FIX_REG_BASE + 0x01C)
+
+#define IOMG_AO_REG_BASE		0xFFF11000
+
+/* GPIO189: LED */
+#define IOMG_AO_011_REG			(IOMG_AO_REG_BASE + 0x02C)
+/* GPIO190: LED */
+#define IOMG_AO_012_REG			(IOMG_AO_REG_BASE + 0x030)
+/* GPIO202: type C enable. active low */
+#define IOMG_AO_023_REG			(IOMG_AO_REG_BASE + 0x05C)
+/* GPIO206: USB switch. active low */
+#define IOMG_AO_026_REG			(IOMG_AO_REG_BASE + 0x068)
+/* GPIO219: PD interrupt. pull up */
+#define IOMG_AO_039_REG			(IOMG_AO_REG_BASE + 0x09C)
+
+#define IOCG_AO_REG_BASE		0xFFF1187C
+/* GPIO219: PD interrupt. pull up */
+#define IOCG_AO_043_REG			(IOCG_AO_REG_BASE + 0x030)
+
+#endif  /* __HI3660_H__ */
diff --git a/plat/hisilicon/hikey960/include/hi3660_crg.h b/plat/hisilicon/hikey960/include/hi3660_crg.h
new file mode 100644
index 0000000..db1df9e
--- /dev/null
+++ b/plat/hisilicon/hikey960/include/hi3660_crg.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __HI3660_CRG_H__
+#define __HI3660_CRG_H__
+
+#define CRG_REG_BASE			0xFFF35000
+
+#define CRG_PEREN0_REG			(CRG_REG_BASE + 0x000)
+#define CRG_PERDIS0_REG			(CRG_REG_BASE + 0x004)
+#define CRG_PERSTAT0_REG		(CRG_REG_BASE + 0x008)
+#define PEREN0_GT_CLK_AOMM		(1 << 31)
+
+#define CRG_PEREN1_REG			(CRG_REG_BASE + 0x010)
+#define CRG_PERDIS1_REG			(CRG_REG_BASE + 0x014)
+#define CRG_PERSTAT1_REG		(CRG_REG_BASE + 0x018)
+#define CRG_PEREN2_REG			(CRG_REG_BASE + 0x020)
+#define CRG_PERDIS2_REG			(CRG_REG_BASE + 0x024)
+#define CRG_PERSTAT2_REG		(CRG_REG_BASE + 0x028)
+#define PEREN2_HKADCSSI			(1 << 24)
+
+#define CRG_PEREN3_REG			(CRG_REG_BASE + 0x030)
+#define CRG_PERDIS3_REG			(CRG_REG_BASE + 0x034)
+
+#define CRG_PEREN4_REG			(CRG_REG_BASE + 0x040)
+#define CRG_PERDIS4_REG			(CRG_REG_BASE + 0x044)
+#define CRG_PERCLKEN4_REG		(CRG_REG_BASE + 0x048)
+#define CRG_PERSTAT4_REG		(CRG_REG_BASE + 0x04C)
+#define GT_ACLK_USB3OTG			(1 << 1)
+#define GT_CLK_USB3OTG_REF		(1 << 0)
+
+#define CRG_PEREN5_REG			(CRG_REG_BASE + 0x050)
+#define CRG_PERDIS5_REG			(CRG_REG_BASE + 0x054)
+#define CRG_PERSTAT5_REG		(CRG_REG_BASE + 0x058)
+#define CRG_PERRSTEN0_REG		(CRG_REG_BASE + 0x060)
+#define CRG_PERRSTDIS0_REG		(CRG_REG_BASE + 0x064)
+#define CRG_PERRSTSTAT0_REG		(CRG_REG_BASE + 0x068)
+#define CRG_PERRSTEN1_REG		(CRG_REG_BASE + 0x06C)
+#define CRG_PERRSTDIS1_REG		(CRG_REG_BASE + 0x070)
+#define CRG_PERRSTSTAT1_REG		(CRG_REG_BASE + 0x074)
+#define CRG_PERRSTEN2_REG		(CRG_REG_BASE + 0x078)
+#define CRG_PERRSTDIS2_REG		(CRG_REG_BASE + 0x07C)
+#define CRG_PERRSTSTAT2_REG		(CRG_REG_BASE + 0x080)
+#define PERRSTEN2_HKADCSSI		(1 << 24)
+
+#define CRG_PERRSTEN3_REG		(CRG_REG_BASE + 0x084)
+#define CRG_PERRSTDIS3_REG		(CRG_REG_BASE + 0x088)
+#define CRG_PERRSTSTAT3_REG		(CRG_REG_BASE + 0x08C)
+#define CRG_PERRSTEN4_REG		(CRG_REG_BASE + 0x090)
+#define CRG_PERRSTDIS4_REG		(CRG_REG_BASE + 0x094)
+#define CRG_PERRSTSTAT4_REG		(CRG_REG_BASE + 0x098)
+#define IP_RST_USB3OTG_MUX		(1 << 8)
+#define IP_RST_USB3OTG_AHBIF		(1 << 7)
+#define IP_RST_USB3OTG_32K		(1 << 6)
+#define IP_RST_USB3OTG			(1 << 5)
+#define IP_RST_USB3OTGPHY_POR		(1 << 3)
+
+#define CRG_PERRSTEN5_REG		(CRG_REG_BASE + 0x09C)
+#define CRG_PERRSTDIS5_REG		(CRG_REG_BASE + 0x0A0)
+#define CRG_PERRSTSTAT5_REG		(CRG_REG_BASE + 0x0A4)
+
+/* bit fields in CRG_PERI */
+#define PERI_PCLK_PCTRL_BIT		(1 << 31)
+#define PERI_TIMER12_BIT		(1 << 25)
+#define PERI_TIMER11_BIT		(1 << 24)
+#define PERI_TIMER10_BIT		(1 << 23)
+#define PERI_TIMER9_BIT			(1 << 22)
+#define PERI_UART5_BIT			(1 << 15)
+#define PERI_UFS_BIT			(1 << 12)
+#define PERI_ARST_UFS_BIT		(1 << 7)
+#define PERI_PPLL2_EN_CPU		(1 << 3)
+#define PERI_PWM_BIT			(1 << 0)
+#define PERI_DDRC_BIT			(1 << 0)
+#define PERI_DDRC_D_BIT			(1 << 4)
+#define PERI_DDRC_C_BIT			(1 << 3)
+#define PERI_DDRC_B_BIT			(1 << 2)
+#define PERI_DDRC_A_BIT			(1 << 1)
+#define PERI_DDRC_DMUX_BIT		(1 << 0)
+
+#define CRG_CLKDIV0_REG			(CRG_REG_BASE + 0x0A0)
+#define SC_DIV_LPMCU_MASK		((0x1F << 5) << 16)
+#define SC_DIV_LPMCU(x)			(((x) & 0x1F) << 5)
+
+#define CRG_CLKDIV1_REG			(CRG_REG_BASE + 0x0B0)
+#define SEL_LPMCU_PLL_MASK		((1 << 1) << 16)
+#define SEL_SYSBUS_MASK			((1 << 0) << 16)
+#define SEL_LPMCU_PLL1			(1 << 1)
+#define SEL_LPMCU_PLL0			(0 << 1)
+#define SEL_SYSBUS_PLL0			(1 << 0)
+#define SEL_SYSBUS_PLL1			(0 << 0)
+
+#define CRG_CLKDIV3_REG			(CRG_REG_BASE + 0x0B4)
+#define CRG_CLKDIV5_REG			(CRG_REG_BASE + 0x0BC)
+#define CRG_CLKDIV8_REG			(CRG_REG_BASE + 0x0C8)
+
+#define CRG_CLKDIV12_REG		(CRG_REG_BASE + 0x0D8)
+#define SC_DIV_A53HPM_MASK		(0x7 << 13)
+#define SC_DIV_A53HPM(x)		(((x) & 0x7) << 13)
+
+#define CRG_CLKDIV16_REG		(CRG_REG_BASE + 0x0E8)
+#define DDRC_CLK_SW_REQ_CFG_MASK	(0x3 << 12)
+#define DDRC_CLK_SW_REQ_CFG(x)		(((x) & 0x3) << 12)
+#define SC_DIV_UFSPHY_CFG_MASK		(0x3 << 9)
+#define SC_DIV_UFSPHY_CFG(x)		(((x) & 0x3) << 9)
+#define DDRCPLL_SW			(1 << 8)
+
+#define CRG_CLKDIV17_REG		(CRG_REG_BASE + 0x0EC)
+#define SC_DIV_UFS_PERIBUS		(1 << 14)
+
+#define CRG_CLKDIV18_REG		(CRG_REG_BASE + 0x0F0)
+#define CRG_CLKDIV19_REG		(CRG_REG_BASE + 0x0F4)
+#define CRG_CLKDIV20_REG		(CRG_REG_BASE + 0x0F8)
+#define CLKDIV20_GT_CLK_AOMM		(1 << 3)
+
+#define CRG_CLKDIV22_REG		(CRG_REG_BASE + 0x100)
+#define SEL_PLL_320M_MASK		(1 << 16)
+#define SEL_PLL2_320M			(1 << 0)
+#define SEL_PLL0_320M			(0 << 0)
+
+#define CRG_CLKDIV23_REG		(CRG_REG_BASE + 0x104)
+#define PERI_DDRC_SW_BIT		(1 << 13)
+#define DIV_CLK_DDRSYS_MASK		(0x3 << 10)
+#define DIV_CLK_DDRSYS(x)		(((x) & 0x3) << 10)
+#define GET_DIV_CLK_DDRSYS(x)		(((x) & DIV_CLK_DDRSYS_MASK) >> 10)
+#define DIV_CLK_DDRCFG_MASK		(0x6 << 5)
+#define DIV_CLK_DDRCFG(x)		(((x) & 0x6) << 5)
+#define GET_DIV_CLK_DDRCFG(x)		(((x) & DIV_CLK_DDRCFG_MASK) >> 5)
+#define DIV_CLK_DDRC_MASK		0x1F
+#define DIV_CLK_DDRC(x)			((x) & DIV_CLK_DDRC_MASK)
+#define GET_DIV_CLK_DDRC(x)		((x) & DIV_CLK_DDRC_MASK)
+
+#define CRG_CLKDIV25_REG		(CRG_REG_BASE + 0x10C)
+#define DIV_SYSBUS_PLL_MASK		(0xF << 16)
+#define DIV_SYSBUS_PLL(x)		((x) & 0xF)
+
+#define CRG_PERI_CTRL2_REG		(CRG_REG_BASE + 0x128)
+#define PERI_TIME_STAMP_CLK_MASK	(0x7 << 28)
+#define PERI_TIME_STAMP_CLK_DIV(x)	(((x) & 0x7) << 22)
+
+#define CRG_ISODIS_REG			(CRG_REG_BASE + 0x148)
+#define CRG_PERPWREN_REG		(CRG_REG_BASE + 0x150)
+
+#define CRG_PEREN7_REG			(CRG_REG_BASE + 0x420)
+#define CRG_PERDIS7_REG			(CRG_REG_BASE + 0x424)
+#define CRG_PERSTAT7_REG		(CRG_REG_BASE + 0x428)
+#define GT_CLK_UFSPHY_CFG		(1 << 14)
+
+#define CRG_PEREN8_REG			(CRG_REG_BASE + 0x430)
+#define CRG_PERDIS8_REG			(CRG_REG_BASE + 0x434)
+#define CRG_PERSTAT8_REG		(CRG_REG_BASE + 0x438)
+#define PERI_DMC_D_BIT			(1 << 22)
+#define PERI_DMC_C_BIT			(1 << 21)
+#define PERI_DMC_B_BIT			(1 << 20)
+#define PERI_DMC_A_BIT			(1 << 19)
+#define PERI_DMC_BIT			(1 << 18)
+
+#define CRG_PEREN11_REG			(CRG_REG_BASE + 0x460)
+#define PPLL1_GATE_CPU			(1 << 18)
+
+#define CRG_PERSTAT11_REG		(CRG_REG_BASE + 0x46C)
+#define PPLL3_EN_STAT			(1 << 21)
+#define PPLL2_EN_STAT			(1 << 20)
+#define PPLL1_EN_STAT			(1 << 19)
+
+#define CRG_IVP_SEC_RSTDIS_REG		(CRG_REG_BASE + 0xC04)
+#define CRG_ISP_SEC_RSTDIS_REG		(CRG_REG_BASE + 0xC84)
+
+#define CRG_RVBAR(c, n)			(0xE00 + (0x10 * c) + (0x4 * n))
+#define CRG_GENERAL_SEC_RSTEN_REG	(CRG_REG_BASE + 0xE20)
+#define CRG_GENERAL_SEC_RSTDIS_REG	(CRG_REG_BASE + 0xE24)
+#define IP_RST_GPIO0_SEC		(1 << 2)
+
+#define CRG_GENERAL_SEC_CLKDIV0_REG	(CRG_REG_BASE + 0xE90)
+#define SC_DIV_AO_HISE_MASK		3
+#define SC_DIV_AO_HISE(x)		((x) & 0x3)
+
+#endif	/* __HI3660_CRG_H__ */
diff --git a/plat/hisilicon/hikey960/include/hi3660_hkadc.h b/plat/hisilicon/hikey960/include/hi3660_hkadc.h
new file mode 100644
index 0000000..6e67114
--- /dev/null
+++ b/plat/hisilicon/hikey960/include/hi3660_hkadc.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __HI3660_HKADC_H__
+#define __HI3660_HKADC_H__
+
+#define HKADC_SSI_REG_BASE			0xE82B8000
+
+#define HKADC_DSP_START_REG			(HKADC_SSI_REG_BASE + 0x000)
+#define HKADC_WR_NUM_REG			(HKADC_SSI_REG_BASE + 0x008)
+#define HKADC_DSP_START_CLR_REG			(HKADC_SSI_REG_BASE + 0x01C)
+#define HKADC_WR01_DATA_REG			(HKADC_SSI_REG_BASE + 0x020)
+
+#define WR1_WRITE_MODE				(1 << 31)
+#define WR1_READ_MODE				(0 << 31)
+#define WR1_ADDR(x)				(((x) & 0x7F) << 24)
+#define WR1_DATA(x)				(((x) & 0xFF) << 16)
+#define WR0_WRITE_MODE				(1 << 15)
+#define WR0_READ_MODE				(0 << 15)
+#define WR0_ADDR(x)				(((x) & 0x7F) << 8)
+#define WR0_DATA(x)				((x) & 0xFF)
+
+#define HKADC_WR23_DATA_REG			(HKADC_SSI_REG_BASE + 0x024)
+#define HKADC_WR45_DATA_REG			(HKADC_SSI_REG_BASE + 0x028)
+#define HKADC_DELAY01_REG			(HKADC_SSI_REG_BASE + 0x030)
+#define HKADC_DELAY23_REG			(HKADC_SSI_REG_BASE + 0x034)
+#define HKADC_DELAY45_REG			(HKADC_SSI_REG_BASE + 0x038)
+#define HKADC_DSP_RD2_DATA_REG			(HKADC_SSI_REG_BASE + 0x048)
+#define HKADC_DSP_RD3_DATA_REG			(HKADC_SSI_REG_BASE + 0x04C)
+
+/* HKADC Internal Registers */
+#define HKADC_CTRL_ADDR				0x00
+#define HKADC_START_ADDR			0x01
+#define HKADC_DATA1_ADDR			0x03   /* high 8 bits */
+#define HKADC_DATA0_ADDR			0x04   /* low 8 bits */
+#define HKADC_MODE_CFG				0x0A
+
+#define HKADC_VALUE_HIGH			0x0FF0
+#define HKADC_VALUE_LOW				0x000F
+#define HKADC_VALID_VALUE			0x0FFF
+
+#define HKADC_CHANNEL_MAX			15
+#define HKADC_VREF_1V8				1800
+#define HKADC_ACCURACY				0x0FFF
+
+#define HKADC_WR01_VALUE			((HKADC_START_ADDR << 24) | \
+						 (0x1 << 16))
+#define HKADC_WR23_VALUE			((0x1 << 31) |		\
+						 (HKADC_DATA0_ADDR << 24) | \
+						 (1 << 15) |		\
+						 (HKADC_DATA1_ADDR << 8))
+#define HKADC_WR45_VALUE			(0x80)
+#define HKADC_CHANNEL0_DELAY01_VALUE		((0x0700 << 16) | 0xFFFF)
+#define HKADC_DELAY01_VALUE			((0x0700 << 16) | 0x0200)
+#define HKADC_DELAY23_VALUE			((0x00C8 << 16) | 0x00C8)
+#define START_DELAY_TIMEOUT			2000
+#define HKADC_WR_NUM_VALUE			4
+
+#endif /* __HI3660_HKADC_H__ */
diff --git a/plat/hisilicon/hikey960/include/hi3660_mem_map.h b/plat/hisilicon/hikey960/include/hi3660_mem_map.h
new file mode 100644
index 0000000..db3efaf
--- /dev/null
+++ b/plat/hisilicon/hikey960/include/hi3660_mem_map.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __HI3660_MEM_MAP__
+#define __HI3660_MEM_MAP__
+
+#define HISI_DATA_HEAD_BASE		(0x89C44400)
+
+#define HISI_RESERVED_MEM_BASE		(0x89C80000)
+#define HISI_RESERVED_MEM_SIZE		(0x00040000)
+
+#define HISI_DATA0_BASE			(0x89C96180)
+#define HISI_DATA0_SIZE			(0x000003A0)
+#define HISI_DATA1_BASE			(0x89C93480)
+#define HISI_DATA1_SIZE			(0x00002D00)
+
+#endif /* __HI3660_MEM_MAP__ */
diff --git a/plat/hisilicon/hikey960/include/hisi_ipc.h b/plat/hisilicon/hikey960/include/hisi_ipc.h
new file mode 100644
index 0000000..9dda1a5
--- /dev/null
+++ b/plat/hisilicon/hikey960/include/hisi_ipc.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __HISI_IPC_H__
+#define __HISI_IPC_H__
+
+enum pm_mode {
+	PM_ON = 0,
+	PM_OFF,
+};
+
+void hisi_ipc_pm_on_off(unsigned int core, unsigned int cluster,
+			enum pm_mode mode);
+void hisi_ipc_pm_suspend(unsigned int core, unsigned int cluster,
+			 unsigned int affinity_level);
+void hisi_ipc_psci_system_off(unsigned int core, unsigned int cluster);
+void hisi_ipc_psci_system_reset(unsigned int core, unsigned int cluster,
+				unsigned int cmd_id);
+int hisi_ipc_init(void);
+
+#endif /* __HISI_IPC_H__ */
diff --git a/plat/hisilicon/hikey960/include/plat_macros.S b/plat/hisilicon/hikey960/include/plat_macros.S
new file mode 100644
index 0000000..5137f9e
--- /dev/null
+++ b/plat/hisilicon/hikey960/include/plat_macros.S
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_MACROS_S__
+#define __PLAT_MACROS_S__
+
+#include <cci.h>
+#include <gic_v2.h>
+#include <hi3660.h>
+#include <platform_def.h>
+
+.section .rodata.gic_reg_name, "aS"
+gicc_regs:
+	.asciz "gicc_hppir", "gicc_ahppir", "gicc_ctlr", ""
+gicd_pend_reg:
+	.asciz "gicd_ispendr regs (Offsets 0x200 - 0x278)\n"    \
+                " Offset:\t\t\tvalue\n"
+newline:
+	.asciz "\n"
+spacer:
+	.asciz ":\t\t0x"
+
+.section .rodata.cci_reg_name, "aS"
+cci_iface_regs:
+	.asciz "cci_snoop_ctrl_cluster0", "cci_snoop_ctrl_cluster1" , ""
+
+/* ---------------------------------------------
+ * The below macro prints out relevant GIC
+ * registers whenever an unhandled exception is
+ * taken in BL31.
+ * ---------------------------------------------
+ */
+.macro plat_crash_print_regs
+	mov_imm	x16, GICD_REG_BASE
+	mov_imm	x17, GICC_REG_BASE
+
+	/* Load the gicc reg list to x6 */
+	adr	x6, gicc_regs
+	/* Load the gicc regs to gp regs used by str_in_crash_buf_print */
+	ldr	w8, [x17, #GICC_HPPIR]
+	ldr	w9, [x17, #GICC_AHPPIR]
+	ldr	w10, [x17, #GICC_CTLR]
+	/* Store to the crash buf and print to cosole */
+	bl	str_in_crash_buf_print
+
+	/* Print the GICD_ISPENDR regs */
+	add	x7, x16, #GICD_ISPENDR
+	adr	x4, gicd_pend_reg
+	bl	asm_print_str
+2:
+	sub	x4, x7, x16
+	cmp	x4, #0x280
+	b.eq	1f
+	bl	asm_print_hex
+	adr	x4, spacer
+	bl	asm_print_str
+	ldr	x4, [x7], #8
+	bl	asm_print_hex
+	adr	x4, newline
+	bl	asm_print_str
+	b	2b
+1:
+	adr	x6, cci_iface_regs
+	/* Store in x7 the base address of the first interface */
+	mov_imm	x7, (CCI400_REG_BASE + SLAVE_IFACE_OFFSET(	\
+	                CCI400_SL_IFACE3_CLUSTER_IX))
+	ldr	w8, [x7, #SNOOP_CTRL_REG]
+	/* Store in x7 the base address of the second interface */
+	mov_imm	x7, (CCI400_REG_BASE + SLAVE_IFACE_OFFSET(	\
+	                CCI400_SL_IFACE4_CLUSTER_IX))
+	ldr	w9, [x7, #SNOOP_CTRL_REG]
+	/* Store to the crash buf and print to console */
+	bl	str_in_crash_buf_print
+.endm
+
+#endif /* __PLAT_MACROS_S__ */
diff --git a/plat/hisilicon/hikey960/include/platform_def.h b/plat/hisilicon/hikey960/include/platform_def.h
new file mode 100644
index 0000000..cb76090
--- /dev/null
+++ b/plat/hisilicon/hikey960/include/platform_def.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#include <arch.h>
+#include "../hikey960_def.h"
+
+/* Special value used to verify platform parameters from BL2 to BL3-1 */
+#define HIKEY960_BL31_PLAT_PARAM_VAL	0x0f1e2d3c4b5a6978ULL
+
+/*
+ * Generic platform constants
+ */
+
+/* Size of cacheable stacks */
+#define PLATFORM_STACK_SIZE		0x800
+
+#define FIRMWARE_WELCOME_STR		"Booting Trusted Firmware\n"
+
+#define PLATFORM_CACHE_LINE_SIZE	64
+#define PLATFORM_CLUSTER_COUNT		2
+#define PLATFORM_CORE_COUNT_PER_CLUSTER	4
+#define PLATFORM_CORE_COUNT		(PLATFORM_CLUSTER_COUNT * \
+					 PLATFORM_CORE_COUNT_PER_CLUSTER)
+#define PLAT_MAX_PWR_LVL		MPIDR_AFFLVL2
+#define PLAT_NUM_PWR_DOMAINS		(PLATFORM_CORE_COUNT + \
+					 PLATFORM_CLUSTER_COUNT + 1)
+
+#define PLAT_MAX_RET_STATE		1
+#define PLAT_MAX_OFF_STATE		2
+
+#define MAX_IO_DEVICES			3
+#define MAX_IO_HANDLES			4
+/* UFS RPMB and UFS User Data */
+#define MAX_IO_BLOCK_DEVICES		2
+
+
+/*
+ * Platform memory map related constants
+ */
+
+/*
+ * BL1 specific defines.
+ */
+#define BL1_RO_BASE			(0x1AC00000)
+#define BL1_RO_LIMIT			(BL1_RO_BASE + 0x10000)
+#define BL1_RW_BASE			(BL1_RO_LIMIT)		/* 1AC1_0000 */
+#define BL1_RW_SIZE			(0x00188000)
+#define BL1_RW_LIMIT			(0x1B000000)
+
+/*
+ * BL2 specific defines.
+ */
+#define BL2_BASE			(BL1_RW_BASE + 0x8000)	/* 1AC1_8000 */
+#define BL2_LIMIT			(BL2_BASE + 0x40000)	/* 1AC5_8000 */
+
+/*
+ * BL31 specific defines.
+ */
+#define BL31_BASE			(BL2_LIMIT)		/* 1AC5_8000 */
+#define BL31_LIMIT			(BL31_BASE + 0x40000)	/* 1AC9_8000 */
+
+/*
+ * BL3-2 specific defines.
+ */
+
+/*
+ * The TSP currently executes from TZC secured area of DRAM.
+ */
+#define BL32_DRAM_BASE                  DDR_SEC_BASE
+#define BL32_DRAM_LIMIT                 (DDR_SEC_BASE+DDR_SEC_SIZE)
+
+#if LOAD_IMAGE_V2
+#ifdef SPD_opteed
+/* Load pageable part of OP-TEE at end of allocated DRAM space for BL32 */
+#define HIKEY960_OPTEE_PAGEABLE_LOAD_BASE	(BL32_DRAM_LIMIT - HIKEY960_OPTEE_PAGEABLE_LOAD_SIZE) /* 0x3FC0_0000 */
+#define HIKEY960_OPTEE_PAGEABLE_LOAD_SIZE	0x400000 /* 4MB */
+#endif
+#endif
+
+#if (HIKEY960_TSP_RAM_LOCATION_ID == HIKEY960_DRAM_ID)
+#define TSP_SEC_MEM_BASE		BL32_DRAM_BASE
+#define TSP_SEC_MEM_SIZE		(BL32_DRAM_LIMIT - BL32_DRAM_BASE)
+#define BL32_BASE			BL32_DRAM_BASE
+#define BL32_LIMIT			BL32_DRAM_LIMIT
+#elif (HIKEY960_TSP_RAM_LOCATION_ID == HIKEY960_SRAM_ID)
+#error "SRAM storage of TSP payload is currently unsupported"
+#else
+#error "Currently unsupported HIKEY960_TSP_LOCATION_ID value"
+#endif
+
+/* BL32 is mandatory in AArch32 */
+#ifndef AARCH32
+#ifdef SPD_none
+#undef BL32_BASE
+#endif /* SPD_none */
+#endif
+
+#define NS_BL1U_BASE			(BL31_LIMIT)		/* 1AC9_8000 */
+#define NS_BL1U_SIZE			(0x00100000)
+#define NS_BL1U_LIMIT			(NS_BL1U_BASE + NS_BL1U_SIZE)
+
+#define HIKEY960_NS_IMAGE_OFFSET	(0x1AC18000)	/* offset in l-loader */
+#define HIKEY960_NS_TMP_OFFSET		(0x1AE00000)
+
+#define SCP_BL2_BASE			(0x89C80000)
+#define SCP_BL2_SIZE			(0x00040000)
+
+/*
+ * Platform specific page table and MMU setup constants
+ */
+#define ADDR_SPACE_SIZE			(1ull << 32)
+
+#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || defined(IMAGE_BL32)
+#define MAX_XLAT_TABLES			3
+#endif
+
+#ifdef IMAGE_BL2
+#if LOAD_IMAGE_V2
+#ifdef SPD_opteed
+#define MAX_XLAT_TABLES			4
+#else
+#define MAX_XLAT_TABLES			3
+#endif
+#else
+#define MAX_XLAT_TABLES			3
+#endif
+#endif
+
+#define MAX_MMAP_REGIONS		16
+
+/*
+ * Declarations and constants to access the mailboxes safely. Each mailbox is
+ * aligned on the biggest cache line size in the platform. This is known only
+ * to the platform as it might have a combination of integrated and external
+ * caches. Such alignment ensures that two maiboxes do not sit on the same cache
+ * line at any cache level. They could belong to different cpus/clusters &
+ * get written while being protected by different locks causing corruption of
+ * a valid mailbox address.
+ */
+#define CACHE_WRITEBACK_SHIFT		6
+#define CACHE_WRITEBACK_GRANULE		(1 << CACHE_WRITEBACK_SHIFT)
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/hisilicon/hikey960/platform.mk b/plat/hisilicon/hikey960/platform.mk
new file mode 100644
index 0000000..da7bb82
--- /dev/null
+++ b/plat/hisilicon/hikey960/platform.mk
@@ -0,0 +1,102 @@
+#
+# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Enable version2 of image loading
+LOAD_IMAGE_V2	:=	1
+
+# On Hikey960, the TSP can execute from TZC secure area in DRAM.
+HIKEY960_TSP_RAM_LOCATION	:=	dram
+ifeq (${HIKEY960_TSP_RAM_LOCATION}, dram)
+  HIKEY960_TSP_RAM_LOCATION_ID = HIKEY960_DRAM_ID
+else ifeq (${HIKEY960_TSP_RAM_LOCATION}, sram)
+  HIKEY960_TSP_RAM_LOCATION_ID := HIKEY960_SRAM_ID
+else
+  $(error "Currently unsupported HIKEY960_TSP_RAM_LOCATION value")
+endif
+
+CRASH_CONSOLE_BASE		:=	PL011_UART6_BASE
+COLD_BOOT_SINGLE_CPU		:=	1
+PROGRAMMABLE_RESET_ADDRESS	:=	1
+
+# Process flags
+$(eval $(call add_define,HIKEY960_TSP_RAM_LOCATION_ID))
+$(eval $(call add_define,CRASH_CONSOLE_BASE))
+
+# Add the build options to pack Trusted OS Extra1 and Trusted OS Extra2 images
+# in the FIP if the platform requires.
+ifneq ($(BL32_EXTRA1),)
+$(eval $(call FIP_ADD_IMG,BL32_EXTRA1,--tos-fw-extra1))
+endif
+ifneq ($(BL32_EXTRA2),)
+$(eval $(call FIP_ADD_IMG,BL32_EXTRA2,--tos-fw-extra2))
+endif
+
+ENABLE_PLAT_COMPAT	:=	0
+
+USE_COHERENT_MEM	:=	1
+
+PLAT_INCLUDES		:=	-Iinclude/common/tbbr			\
+				-Iplat/hisilicon/hikey960/include
+
+PLAT_BL_COMMON_SOURCES	:=	drivers/arm/pl011/pl011_console.S	\
+				drivers/delay_timer/delay_timer.c	\
+				drivers/delay_timer/generic_delay_timer.c \
+				lib/aarch64/xlat_tables.c		\
+				plat/hisilicon/hikey960/aarch64/hikey960_common.c \
+				plat/hisilicon/hikey960/hikey960_boardid.c
+
+HIKEY960_GIC_SOURCES	:=	drivers/arm/gic/common/gic_common.c	\
+				drivers/arm/gic/v2/gicv2_main.c		\
+				drivers/arm/gic/v2/gicv2_helpers.c	\
+				plat/common/plat_gicv2.c
+
+BL1_SOURCES		+=	bl1/tbbr/tbbr_img_desc.c		\
+				drivers/io/io_block.c			\
+				drivers/io/io_fip.c			\
+				drivers/io/io_storage.c			\
+				drivers/synopsys/ufs/dw_ufs.c		\
+				drivers/ufs/ufs.c 			\
+				lib/cpus/aarch64/cortex_a53.S		\
+				plat/hisilicon/hikey960/aarch64/hikey960_helpers.S \
+				plat/hisilicon/hikey960/hikey960_bl1_setup.c 	\
+				plat/hisilicon/hikey960/hikey960_io_storage.c \
+				${HIKEY960_GIC_SOURCES}
+
+BL2_SOURCES		+=	drivers/io/io_block.c			\
+				drivers/io/io_fip.c			\
+				drivers/io/io_storage.c			\
+				drivers/ufs/ufs.c			\
+				plat/hisilicon/hikey960/hikey960_bl2_setup.c \
+				plat/hisilicon/hikey960/hikey960_io_storage.c \
+				plat/hisilicon/hikey960/hikey960_mcu_load.c
+
+ifeq (${LOAD_IMAGE_V2},1)
+BL2_SOURCES		+=	plat/hisilicon/hikey960/hikey960_bl2_mem_params_desc.c \
+				plat/hisilicon/hikey960/hikey960_image_load.c \
+				common/desc_image_load.c
+
+ifeq (${SPD},opteed)
+BL2_SOURCES		+=	lib/optee/optee_utils.c
+endif
+endif
+
+BL31_SOURCES		+=	drivers/arm/cci/cci.c			\
+				lib/cpus/aarch64/cortex_a53.S           \
+				lib/cpus/aarch64/cortex_a72.S		\
+				lib/cpus/aarch64/cortex_a73.S		\
+				plat/common/aarch64/plat_psci_common.c  \
+				plat/hisilicon/hikey960/aarch64/hikey960_helpers.S \
+				plat/hisilicon/hikey960/hikey960_bl31_setup.c \
+				plat/hisilicon/hikey960/hikey960_pm.c	\
+				plat/hisilicon/hikey960/hikey960_topology.c \
+				plat/hisilicon/hikey960/drivers/pwrc/hisi_pwrc.c \
+				plat/hisilicon/hikey960/drivers/ipc/hisi_ipc.c \
+				${HIKEY960_GIC_SOURCES}
+
+# Enable workarounds for selected Cortex-A53 errata.
+ERRATA_A53_836870		:=	1
+ERRATA_A53_843419		:=	1
+ERRATA_A53_855873		:=	1
diff --git a/plat/hisilicon/poplar/aarch64/platform_common.c b/plat/hisilicon/poplar/aarch64/platform_common.c
new file mode 100644
index 0000000..a7dac4f
--- /dev/null
+++ b/plat/hisilicon/poplar/aarch64/platform_common.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <errno.h>
+#include <mmio.h>
+#include <platform.h>
+#include <xlat_tables.h>
+#include "hi3798cv200.h"
+#include "platform_def.h"
+
+#define MAP_DDR		MAP_REGION_FLAT(DDR_BASE,			\
+					DDR_SIZE,			\
+					MT_MEMORY | MT_RW | MT_NS)
+
+#define MAP_DEVICE	MAP_REGION_FLAT(DEVICE_BASE,			\
+					DEVICE_SIZE,			\
+					MT_DEVICE | MT_RW | MT_SECURE)
+
+static const mmap_region_t poplar_mmap[] = {
+	MAP_DDR,
+	MAP_DEVICE,
+	{0}
+};
+
+#define DEFINE_CONFIGURE_MMU_EL(_el)					\
+	void plat_configure_mmu_el##_el(unsigned long total_base,	\
+				  unsigned long total_size,		\
+				  unsigned long ro_start,		\
+				  unsigned long ro_limit,		\
+				  unsigned long coh_start,		\
+				  unsigned long coh_limit)		\
+	{								\
+		mmap_add_region(total_base, total_base,			\
+				total_size,				\
+				MT_MEMORY | MT_RW | MT_SECURE);		\
+		mmap_add_region(ro_start, ro_start,			\
+				ro_limit - ro_start,			\
+				MT_MEMORY | MT_RO | MT_SECURE);		\
+		mmap_add_region(coh_start, coh_start,			\
+				coh_limit - coh_start,			\
+				MT_DEVICE | MT_RW | MT_SECURE);		\
+		mmap_add(poplar_mmap);					\
+		init_xlat_tables();					\
+									\
+		enable_mmu_el##_el(0);					\
+	}
+
+DEFINE_CONFIGURE_MMU_EL(3)
+DEFINE_CONFIGURE_MMU_EL(1)
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+	return SYS_COUNTER_FREQ_IN_TICKS;
+}
diff --git a/plat/hisilicon/poplar/bl1_plat_setup.c b/plat/hisilicon/poplar/bl1_plat_setup.c
new file mode 100644
index 0000000..c65e29e
--- /dev/null
+++ b/plat/hisilicon/poplar/bl1_plat_setup.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <debug.h>
+#include <errno.h>
+#include <generic_delay_timer.h>
+#include <mmio.h>
+#include <pl061_gpio.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <string.h>
+#include <tbbr_img_def.h>
+#include "../../bl1/bl1_private.h"
+#include "hi3798cv200.h"
+#include "plat_private.h"
+
+/* Symbols from link script for conherent section */
+extern unsigned long __COHERENT_RAM_START__;
+extern unsigned long __COHERENT_RAM_END__;
+
+#define BL1_COHERENT_RAM_BASE	(unsigned long)(&__COHERENT_RAM_START__)
+#define BL1_COHERENT_RAM_LIMIT	(unsigned long)(&__COHERENT_RAM_END__)
+
+/* Data structure which holds the extents of the trusted RAM for BL1 */
+static meminfo_t bl1_tzram_layout;
+
+meminfo_t *bl1_plat_sec_mem_layout(void)
+{
+	return &bl1_tzram_layout;
+}
+
+void bl1_early_platform_setup(void)
+{
+	/* Initialize the console to provide early debug support */
+	console_init(PL011_UART0_BASE, PL011_UART0_CLK_IN_HZ, PL011_BAUDRATE);
+
+	/* Allow BL1 to see the whole Trusted RAM */
+	bl1_tzram_layout.total_base = BL_MEM_BASE;
+	bl1_tzram_layout.total_size = BL_MEM_SIZE;
+
+	/* Calculate how much RAM BL1 is using and how much remains free */
+	bl1_tzram_layout.free_base = BL_MEM_BASE;
+	bl1_tzram_layout.free_size = BL_MEM_SIZE;
+
+	reserve_mem(&bl1_tzram_layout.free_base,
+		    &bl1_tzram_layout.free_size,
+		    BL1_RAM_BASE,
+		    BL1_RAM_LIMIT - BL1_RAM_BASE);
+
+	INFO("BL1: 0x%lx - 0x%lx [size = %zu]\n", BL1_RAM_BASE, BL1_RAM_LIMIT,
+	     BL1_RAM_LIMIT - BL1_RAM_BASE);
+}
+
+void bl1_plat_arch_setup(void)
+{
+	plat_configure_mmu_el3(bl1_tzram_layout.total_base,
+			       bl1_tzram_layout.total_size,
+			       BL_MEM_BASE, /* l-loader and BL1 ROM */
+			       BL1_RO_LIMIT,
+			       BL1_COHERENT_RAM_BASE,
+			       BL1_COHERENT_RAM_LIMIT);
+}
+
+void bl1_platform_setup(void)
+{
+	int i;
+
+	generic_delay_timer_init();
+
+	pl061_gpio_init();
+	for (i = 0; i < GPIO_MAX; i++)
+		pl061_gpio_register(GPIO_BASE(i), i);
+
+	plat_io_setup();
+}
+
+unsigned int bl1_plat_get_next_image_id(void)
+{
+	return BL2_IMAGE_ID;
+}
diff --git a/plat/hisilicon/poplar/bl2_plat_setup.c b/plat/hisilicon/poplar/bl2_plat_setup.c
new file mode 100644
index 0000000..1741475
--- /dev/null
+++ b/plat/hisilicon/poplar/bl2_plat_setup.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <debug.h>
+#include <errno.h>
+#include <generic_delay_timer.h>
+#include <mmio.h>
+#include <partition/partition.h>
+#include <platform.h>
+#include <string.h>
+#include "hi3798cv200.h"
+#include "plat_private.h"
+
+/* Memory ranges for code and read only data sections */
+#define BL2_RO_BASE	(unsigned long)(&__RO_START__)
+#define BL2_RO_LIMIT	(unsigned long)(&__RO_END__)
+
+/* Memory ranges for coherent memory section */
+#define BL2_COHERENT_RAM_BASE	(unsigned long)(&__COHERENT_RAM_START__)
+#define BL2_COHERENT_RAM_LIMIT	(unsigned long)(&__COHERENT_RAM_END__)
+
+typedef struct bl2_to_bl31_params_mem {
+	bl31_params_t		bl31_params;
+	image_info_t		bl31_image_info;
+	image_info_t		bl33_image_info;
+	entry_point_info_t	bl33_ep_info;
+	entry_point_info_t	bl31_ep_info;
+} bl2_to_bl31_params_mem_t;
+
+static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
+static bl2_to_bl31_params_mem_t bl31_params_mem;
+
+meminfo_t *bl2_plat_sec_mem_layout(void)
+{
+	return &bl2_tzram_layout;
+}
+
+bl31_params_t *bl2_plat_get_bl31_params(void)
+{
+	bl31_params_t *bl2_to_bl31_params = NULL;
+
+	/*
+	 * Initialise the memory for all the arguments that needs to
+	 * be passed to BL3-1
+	 */
+	memset(&bl31_params_mem, 0, sizeof(bl2_to_bl31_params_mem_t));
+
+	/* Assign memory for TF related information */
+	bl2_to_bl31_params = &bl31_params_mem.bl31_params;
+	SET_PARAM_HEAD(bl2_to_bl31_params, PARAM_BL31, VERSION_1, 0);
+
+	/* Fill BL3-1 related information */
+	bl2_to_bl31_params->bl31_image_info = &bl31_params_mem.bl31_image_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl31_image_info,
+		       PARAM_IMAGE_BINARY, VERSION_1, 0);
+
+	/* Fill BL3-3 related information */
+	bl2_to_bl31_params->bl33_ep_info = &bl31_params_mem.bl33_ep_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl33_ep_info,
+		       PARAM_EP, VERSION_1, 0);
+
+	/* BL3-3 expects to receive the primary CPU MPID (through x0) */
+	bl2_to_bl31_params->bl33_ep_info->args.arg0 = 0xffff & read_mpidr();
+
+	bl2_to_bl31_params->bl33_image_info = &bl31_params_mem.bl33_image_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl33_image_info,
+		       PARAM_IMAGE_BINARY, VERSION_1, 0);
+
+	return bl2_to_bl31_params;
+}
+
+struct entry_point_info *bl2_plat_get_bl31_ep_info(void)
+{
+	return &bl31_params_mem.bl31_ep_info;
+}
+
+void bl2_plat_set_bl31_ep_info(image_info_t *image,
+			       entry_point_info_t *bl31_ep_info)
+{
+	SET_SECURITY_STATE(bl31_ep_info->h.attr, SECURE);
+	bl31_ep_info->spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+				     DISABLE_ALL_EXCEPTIONS);
+}
+
+static uint32_t hisi_get_spsr_for_bl33_entry(void)
+{
+	unsigned long el_status;
+	unsigned int mode;
+	uint32_t spsr;
+
+	/* Figure out what mode we enter the non-secure world in */
+	el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
+	el_status &= ID_AA64PFR0_ELX_MASK;
+
+	mode = (el_status) ? MODE_EL2 : MODE_EL1;
+
+	/*
+	 * TODO: Consider the possibility of specifying the SPSR in
+	 * the FIP ToC and allowing the platform to have a say as
+	 * well.
+	 */
+	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+	return spsr;
+}
+
+void bl2_plat_set_bl33_ep_info(image_info_t *image,
+			       entry_point_info_t *bl33_ep_info)
+{
+	SET_SECURITY_STATE(bl33_ep_info->h.attr, NON_SECURE);
+	bl33_ep_info->spsr = hisi_get_spsr_for_bl33_entry();
+	bl33_ep_info->args.arg2 = image->image_size;
+}
+
+void bl2_plat_flush_bl31_params(void)
+{
+	flush_dcache_range((unsigned long)&bl31_params_mem,
+			   sizeof(bl2_to_bl31_params_mem_t));
+}
+
+void bl2_plat_get_bl33_meminfo(meminfo_t *bl33_meminfo)
+{
+	bl33_meminfo->total_base = DDR_BASE;
+	bl33_meminfo->total_size = DDR_SIZE;
+	bl33_meminfo->free_base  = DDR_BASE;
+	bl33_meminfo->free_size  = DDR_SIZE;
+}
+
+void bl2_early_platform_setup(meminfo_t *mem_layout)
+{
+	console_init(PL011_UART0_BASE, PL011_UART0_CLK_IN_HZ, PL011_BAUDRATE);
+
+	/* Enable arch timer */
+	generic_delay_timer_init();
+
+	bl2_tzram_layout = *mem_layout;
+}
+
+void bl2_plat_arch_setup(void)
+{
+	plat_configure_mmu_el1(bl2_tzram_layout.total_base,
+			       bl2_tzram_layout.total_size,
+			       BL2_RO_BASE,
+			       BL2_RO_LIMIT,
+			       BL2_COHERENT_RAM_BASE,
+			       BL2_COHERENT_RAM_LIMIT);
+}
+
+void bl2_platform_setup(void)
+{
+	plat_io_setup();
+}
+
+unsigned long plat_get_ns_image_entrypoint(void)
+{
+	return PLAT_ARM_NS_IMAGE_OFFSET;
+}
diff --git a/plat/hisilicon/poplar/bl31_plat_setup.c b/plat/hisilicon/poplar/bl31_plat_setup.c
new file mode 100644
index 0000000..b9a0e18
--- /dev/null
+++ b/plat/hisilicon/poplar/bl31_plat_setup.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <bl31.h>
+#include <bl_common.h>
+#include <console.h>
+#include <cortex_a53.h>
+#include <debug.h>
+#include <errno.h>
+#include <generic_delay_timer.h>
+#include <mmio.h>
+#include <plat_arm.h>
+#include <platform.h>
+#include <stddef.h>
+#include <string.h>
+#include "hi3798cv200.h"
+#include "plat_private.h"
+#include "platform_def.h"
+
+/* Memory ranges for code and RO data sections */
+#define BL31_RO_BASE	(unsigned long)(&__RO_START__)
+#define BL31_RO_LIMIT	(unsigned long)(&__RO_END__)
+
+/* Memory ranges for coherent memory section */
+#define BL31_COHERENT_RAM_BASE	(unsigned long)(&__COHERENT_RAM_START__)
+#define BL31_COHERENT_RAM_LIMIT	(unsigned long)(&__COHERENT_RAM_END__)
+
+static entry_point_info_t bl33_image_ep_info;
+
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+	return &bl33_image_ep_info;
+}
+
+void bl31_early_platform_setup(bl31_params_t *from_bl2,
+			       void *plat_params_from_bl2)
+{
+	console_init(PL011_UART0_BASE, PL011_UART0_CLK_IN_HZ, PL011_BAUDRATE);
+
+	/* Init console for crash report */
+	plat_crash_console_init();
+
+	bl33_image_ep_info = *from_bl2->bl33_ep_info;
+}
+
+void bl31_platform_setup(void)
+{
+	/* Init arch timer */
+	generic_delay_timer_init();
+
+	/* Init GIC distributor and CPU interface */
+	plat_arm_gic_driver_init();
+	plat_arm_gic_init();
+}
+
+void bl31_plat_runtime_setup(void)
+{
+	/* do nothing */
+}
+
+void bl31_plat_arch_setup(void)
+{
+	plat_configure_mmu_el3(BL31_RO_BASE,
+			       (BL31_COHERENT_RAM_LIMIT - BL31_RO_BASE),
+			       BL31_RO_BASE,
+			       BL31_RO_LIMIT,
+			       BL31_COHERENT_RAM_BASE,
+			       BL31_COHERENT_RAM_LIMIT);
+
+	INFO("Boot BL33 from 0x%lx for %lu Bytes\n",
+	     bl33_image_ep_info.pc, bl33_image_ep_info.args.arg2);
+}
diff --git a/plat/hisilicon/poplar/include/hi3798cv200.h b/plat/hisilicon/poplar/include/hi3798cv200.h
new file mode 100644
index 0000000..6318b9c
--- /dev/null
+++ b/plat/hisilicon/poplar/include/hi3798cv200.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __HI3798cv200_H__
+#define __HI3798cv200_H__
+
+/* PL011 */
+#define PL011_UART0_BASE		(0xF8B00000)
+#define PL011_BAUDRATE			(115200)
+#define PL011_UART0_CLK_IN_HZ		(75000000)
+
+/* Sys Counter */
+#define SYS_COUNTER_FREQ_IN_TICKS	(24000000)
+#define SYS_COUNTER_FREQ_IN_MHZ		(24)
+
+/* Timer */
+#define SEC_TIMER0_BASE			(0xF8008000)
+#define TIMER00_LOAD			(SEC_TIMER0_BASE + 0x000)
+#define TIMER00_VALUE			(SEC_TIMER0_BASE + 0x004)
+#define TIMER00_CONTROL			(SEC_TIMER0_BASE + 0x008)
+#define TIMER00_BGLOAD			(SEC_TIMER0_BASE + 0x018)
+
+#define SEC_TIMER2_BASE			(0xF8009000)
+#define TIMER20_LOAD			(SEC_TIMER2_BASE + 0x000)
+#define TIMER20_VALUE			(SEC_TIMER2_BASE + 0x004)
+#define TIMER20_CONTROL			(SEC_TIMER2_BASE + 0x008)
+#define TIMER20_BGLOAD			(SEC_TIMER2_BASE + 0x018)
+
+/* GPIO */
+#define	GPIO_MAX			(12)
+#define	GPIO_BASE(x)			(x != 5 ?			\
+					0xf820000 + x * 0x1000 : 0xf8004000)
+
+/* SCTL */
+#define REG_BASE_SCTL			(0xF8000000)
+#define REG_SC_GEN12			(0x00B0)
+
+/* CRG */
+#define REG_BASE_CRG			(0xF8A22000)
+#define REG_CPU_LP			(0x48)
+#define REG_CPU_RST			(0x50)
+#define REG_PERI_CRG39			(0x9C)
+#define REG_PERI_CRG40			(0xA0)
+
+/* MCI */
+#define REG_BASE_MCI			(0xF9830000)
+#define MCI_CDETECT			(0x50)
+#define MCI_VERID			(0x6C)
+#define MCI_VERID_VALUE			(0x5342250A)
+#define MCI_VERID_VALUE2		(0x5342270A)
+
+/* EMMC */
+#define REG_EMMC_PERI_CRG		REG_PERI_CRG40
+#define REG_SDCARD_PERI_CRG		REG_PERI_CRG39
+#define EMMC_CLK_MASK			(0x7 << 8)
+#define EMMC_SRST_REQ			(0x1 << 4)
+#define EMMC_CKEN			(0x1 << 1)
+#define EMMC_BUS_CKEN			(0x1 << 0)
+#define EMMC_CLK_100M			(0 << 8)
+#define EMMC_CLK_50M			(1 << 8)
+#define EMMC_CLK_25M			(2 << 8)
+
+#define EMMC_DESC_SIZE			(0xF0000)
+#define EMMC_INIT_PARAMS(base)				\
+	{	.bus_width = EMMC_BUS_WIDTH_8,		\
+		.clk_rate = 25 * 1000 * 1000,		\
+		.desc_base = (base) - EMMC_DESC_SIZE,	\
+		.desc_size = EMMC_DESC_SIZE,		\
+		.flags =  EMMC_FLAG_CMD23,		\
+		.reg_base = REG_BASE_MCI,		\
+	}
+
+/* GIC-400 */
+#define GICD_BASE			(0xF1001000)
+#define GICC_BASE			(0xF1002000)
+#define GICR_BASE			(0xF1000000)
+
+/* FIQ platform related define */
+#define HISI_IRQ_SEC_SGI_0		8
+#define HISI_IRQ_SEC_SGI_1		9
+#define HISI_IRQ_SEC_SGI_2		10
+#define HISI_IRQ_SEC_SGI_3		11
+#define HISI_IRQ_SEC_SGI_4		12
+#define HISI_IRQ_SEC_SGI_5		13
+#define HISI_IRQ_SEC_SGI_6		14
+#define HISI_IRQ_SEC_SGI_7		15
+#define HISI_IRQ_SEC_PPI_0		29
+#define HISI_IRQ_SEC_TIMER0		60
+#define HISI_IRQ_SEC_TIMER1		50
+#define HISI_IRQ_SEC_TIMER2		52
+#define HISI_IRQ_SEC_TIMER3		88
+#define HISI_IRQ_SEC_AXI		110
+
+/* Watchdog */
+#define HISI_WDG0_BASE			(0xF8A2C000)
+
+#endif	/* __HI3798cv200_H__ */
diff --git a/plat/hisilicon/poplar/include/plat_macros.S b/plat/hisilicon/poplar/include/plat_macros.S
new file mode 100644
index 0000000..82d10c1
--- /dev/null
+++ b/plat/hisilicon/poplar/include/plat_macros.S
@@ -0,0 +1,10 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+.section .rodata.gic_reg_name, "aS"
+	.macro plat_crash_print_regs
+	nop
+	.endm
diff --git a/plat/hisilicon/poplar/include/plat_private.h b/plat/hisilicon/poplar/include/plat_private.h
new file mode 100644
index 0000000..e2272cc
--- /dev/null
+++ b/plat/hisilicon/poplar/include/plat_private.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_PRIVATE_H__
+#define __PLAT_PRIVATE_H__
+
+#include <bl_common.h>
+#include "hi3798cv200.h"
+
+void plat_configure_mmu_el3(unsigned long total_base,
+			    unsigned long total_size,
+			    unsigned long ro_start,
+			    unsigned long ro_limit,
+			    unsigned long coh_start,
+			    unsigned long coh_limit);
+
+void plat_configure_mmu_el1(unsigned long total_base,
+			    unsigned long total_size,
+			    unsigned long ro_start,
+			    unsigned long ro_limit,
+			    unsigned long coh_start,
+			    unsigned long coh_limit);
+
+void plat_delay_timer_init(void);
+void plat_io_setup(void);
+
+#endif /* __PLAT_PRIVATE_H__ */
diff --git a/plat/hisilicon/poplar/include/platform_def.h b/plat/hisilicon/poplar/include/platform_def.h
new file mode 100644
index 0000000..b7afe82
--- /dev/null
+++ b/plat/hisilicon/poplar/include/platform_def.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#include <arch.h>
+#include <common_def.h>
+#include <gic_common.h>
+#include <interrupt_props.h>
+#include <tbbr/tbbr_img_def.h>
+#include "hi3798cv200.h"
+#include "poplar_layout.h"		/* BL memory region sizes, etc */
+
+#define PLATFORM_LINKER_FORMAT		"elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH		aarch64
+
+#define PLAT_ARM_CRASH_UART_BASE	PL011_UART0_BASE
+#define PLAT_ARM_CRASH_UART_CLK_IN_HZ	PL011_UART0_CLK_IN_HZ
+#define ARM_CONSOLE_BAUDRATE		PL011_BAUDRATE
+
+/* Generic platform constants */
+#define PLATFORM_STACK_SIZE		(0x800)
+
+#define FIRMWARE_WELCOME_STR		"Booting Trusted Firmware\n"
+#define BOOT_EMMC_NAME			"l-loader.bin"
+
+#define PLATFORM_CACHE_LINE_SIZE	(64)
+#define PLATFORM_CLUSTER_COUNT		(1)
+#define PLATFORM_CORE_COUNT		(4)
+#define PLATFORM_MAX_CPUS_PER_CLUSTER	(4)
+
+/* IO framework user */
+#define MAX_IO_DEVICES			(4)
+#define MAX_IO_HANDLES			(4)
+#define MAX_IO_BLOCK_DEVICES		(2)
+
+/* Memory map related constants */
+#define DDR_BASE			(0x00000000)
+#define DDR_SIZE			(0x40000000)
+
+#define DEVICE_BASE			(0xF0000000)
+#define DEVICE_SIZE			(0x0F000000)
+
+#define TEE_SEC_MEM_BASE		(0x70000000)
+#define TEE_SEC_MEM_SIZE		(0x10000000)
+
+#define BL_MEM_BASE			(BL1_RO_BASE)
+#define BL_MEM_LIMIT			(BL31_LIMIT)
+#define BL_MEM_SIZE			(BL_MEM_LIMIT - BL_MEM_BASE)
+
+#define PLAT_ARM_NS_IMAGE_OFFSET	0x37000000
+
+/* Page table and MMU setup constants */
+#define ADDR_SPACE_SIZE			(1ull << 32)
+#define MAX_XLAT_TABLES			(4)
+#define MAX_MMAP_REGIONS		(16)
+
+#define CACHE_WRITEBACK_SHIFT		(6)
+#define CACHE_WRITEBACK_GRANULE		(1 << CACHE_WRITEBACK_SHIFT)
+
+/* Power states */
+#define PLAT_MAX_PWR_LVL		(MPIDR_AFFLVL1)
+#define PLAT_MAX_OFF_STATE		2
+#define PLAT_MAX_RET_STATE		1
+
+/* Interrupt controller */
+#define PLAT_ARM_GICD_BASE	GICD_BASE
+#define PLAT_ARM_GICC_BASE	GICC_BASE
+
+#define PLAT_ARM_G1S_IRQ_PROPS(grp) \
+	INTR_PROP_DESC(HISI_IRQ_SEC_SGI_0, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(HISI_IRQ_SEC_SGI_1, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(HISI_IRQ_SEC_SGI_2, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(HISI_IRQ_SEC_SGI_3, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(HISI_IRQ_SEC_SGI_4, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(HISI_IRQ_SEC_SGI_5, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(HISI_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(HISI_IRQ_SEC_SGI_7, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(HISI_IRQ_SEC_TIMER0, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(HISI_IRQ_SEC_TIMER1, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(HISI_IRQ_SEC_TIMER2, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(HISI_IRQ_SEC_TIMER3, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(HISI_IRQ_SEC_AXI, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL)
+
+#define PLAT_ARM_G0_IRQ_PROPS(grp)
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/hisilicon/poplar/include/poplar_layout.h b/plat/hisilicon/poplar/include/poplar_layout.h
new file mode 100644
index 0000000..192bcb9
--- /dev/null
+++ b/plat/hisilicon/poplar/include/poplar_layout.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __POPLAR_LAYOUT_H
+#define __POPLAR_LAYOUT_H
+
+/*
+ * Boot memory layout definitions for the HiSilicon Poplar board
+ */
+
+/*
+ * When Poplar is powered on, boot ROM loads the initial content of
+ * boot media into low memory, verifies it, and begins executing it
+ * in 32-bit mode.  The image loaded is "l-loader.bin", which contains
+ * a small amount code along with an embedded ARM Trusted Firmware
+ * BL1 image.  The main purpose of "l-loader" is to prepare the
+ * processor to execute the BL1 image in 64-bit mode, and to trigger
+ * that execution.
+ *
+ * Also embedded in "l-loader.bin" is a FIP image that contains
+ * other ARM Trusted Firmware images:  BL2; BL31; and for BL33,
+ * U-Boot.  When BL1 executes, it unpacks the BL2 image from the FIP
+ * image into a region of memory set aside to hold it.  Similarly,
+ * BL2 unpacks BL31 into memory reserved for it, and unpacks U-Boot
+ * into high memory.
+ *
+ * Because the BL1 code is embedded in "l-loader", its base address
+ * in memory is derived from the base address of the "l-loader"
+ * text section, together with an offset.  Memory space for BL2 is
+ * reserved immediately following BL1, and memory space is reserved
+ * for BL31 after that.  ARM Trusted Firmware requires each of these
+ * memory regions to be aligned on page boundaries, so the size of
+ * each region is a multiple of a page size (ending in 000).  Note
+ * that ARM Trusted Firmware requires the read-only and read-write
+ * regions of memory used for BL1 to be defined separately.
+ *
+ *    ---------------------
+ *    |  (unused memory)  |
+ *    +-------------------+	- - - - -
+ *    |  (l-loader text)  |               \
+ *    +-------------------+                \
+ *    |  BL1 (read-only)  | \               \
+ *    |- - - - - - - - - -| |               |
+ *    |  BL1 (read-write) | |               |
+ *    +-------------------+  >  BL Memory   |
+ *    |  Reserved for BL2 | |                > "l-loader.bin" image
+ *    +-------------------+ |               |
+ *    | Reserved for BL31 | /               |
+ *    +-------------------+                 |
+ *           . . .                          /
+ *    +-------------------+                /
+ *    |        FIP        |               /
+ *    +-------------------+	- - - - -
+ *           . . .
+ *    |  (unused memory)  |
+ *           . . .
+ *    +-------------------+
+ *    |Reserved for U-Boot|
+ *    +-------------------+
+ *           . . .
+ *    |  (unused memory)  |
+ *    ---------------------
+ *
+ * The size of each of these regions is defined below.  The base
+ * address of the "l-loader" TEXT section and the offset of the BL1
+ * image within that serve as anchors for defining the positions of
+ * all other regions.  The FIP is placed in a section of its own.
+ *
+ * A "BASE" is the memory address of the start of a region; a "LIMIT"
+ * marks its end.  A "SIZE" is the size of a region (in bytes).  An
+ * "OFFSET" is an offset to the start of a region relative to the
+ * base of the "l-loader" TEXT section (also a multiple of page size).
+ */
+#define LLOADER_TEXT_BASE		0x00001000	/* page aligned */
+#define BL1_OFFSET			0x0000D000	/* page multiple */
+#define FIP_BASE			0x00040000
+
+#define BL1_RO_SIZE			0x00008000	/* page multiple */
+#define BL1_RW_SIZE			0x00008000	/* page multiple */
+#define BL1_SIZE			(BL1_RO_SIZE + BL1_RW_SIZE)
+#define BL2_SIZE			0x0000c000	/* page multiple */
+#define BL31_SIZE			0x00014000
+#define FIP_SIZE			0x00068000
+
+     /* BL1_OFFSET */			/* (Defined above) */
+#define BL1_BASE			(LLOADER_TEXT_BASE + BL1_OFFSET)
+#define BL1_LIMIT			(BL1_BASE + BL1_SIZE)
+
+#define BL1_RO_OFFSET			(BL1_OFFSET)
+#define BL1_RO_BASE			(LLOADER_TEXT_BASE + BL1_RO_OFFSET)
+#define BL1_RO_LIMIT			(BL1_RO_BASE + BL1_RO_SIZE)
+
+#define BL1_RW_OFFSET			(BL1_RO_OFFSET + BL1_RO_SIZE)
+#define BL1_RW_BASE			(LLOADER_TEXT_BASE + BL1_RW_OFFSET)
+#define BL1_RW_LIMIT			(BL1_RW_BASE + BL1_RW_SIZE)
+
+#define BL2_OFFSET			(BL1_OFFSET + BL1_SIZE)
+#define BL2_BASE			(LLOADER_TEXT_BASE + BL2_OFFSET)
+#define BL2_LIMIT			(BL2_BASE + BL2_SIZE)
+
+#define BL31_OFFSET			(BL2_OFFSET + BL2_SIZE)
+#define BL31_BASE			(LLOADER_TEXT_BASE + BL31_OFFSET)
+#define BL31_LIMIT			(BL31_BASE + BL31_SIZE)
+
+#endif /* !__POPLAR_LAYOUT_H */
diff --git a/plat/hisilicon/poplar/plat_pm.c b/plat/hisilicon/poplar/plat_pm.c
new file mode 100644
index 0000000..3e43d4d
--- /dev/null
+++ b/plat/hisilicon/poplar/plat_pm.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <mmio.h>
+#include <plat_arm.h>
+#include <platform.h>
+#include <psci.h>
+#include "hi3798cv200.h"
+#include "plat_private.h"
+#include "platform_def.h"
+
+#define REG_PERI_CPU_RVBARADDR		0xF8A80034
+#define REG_PERI_CPU_AARCH_MODE		0xF8A80030
+
+#define REG_CPU_LP_CPU_SW_BEGIN		10
+#define CPU_REG_COREPO_SRST		12
+#define CPU_REG_CORE_SRST		8
+
+static void poplar_cpu_standby(plat_local_state_t cpu_state)
+{
+	dsb();
+	wfi();
+}
+
+static int poplar_pwr_domain_on(u_register_t mpidr)
+{
+	unsigned int cpu = plat_core_pos_by_mpidr(mpidr);
+	unsigned int regval, regval_bak;
+
+	/* Select 400MHz before start slave cores */
+	regval_bak = mmio_read_32((uintptr_t)(REG_BASE_CRG + REG_CPU_LP));
+	mmio_write_32((uintptr_t)(REG_BASE_CRG + REG_CPU_LP), 0x206);
+	mmio_write_32((uintptr_t)(REG_BASE_CRG + REG_CPU_LP), 0x606);
+
+	/* Clear the slave cpu arm_por_srst_req reset */
+	regval = mmio_read_32((uintptr_t)(REG_BASE_CRG + REG_CPU_RST));
+	regval &= ~(1 << (cpu + CPU_REG_COREPO_SRST));
+	mmio_write_32((uintptr_t)(REG_BASE_CRG + REG_CPU_RST), regval);
+
+	/* Clear the slave cpu reset */
+	regval = mmio_read_32((uintptr_t)(REG_BASE_CRG + REG_CPU_RST));
+	regval &= ~(1 << (cpu + CPU_REG_CORE_SRST));
+	mmio_write_32((uintptr_t)(REG_BASE_CRG + REG_CPU_RST), regval);
+
+	/* Restore cpu frequency */
+	regval = regval_bak & (~(1 << REG_CPU_LP_CPU_SW_BEGIN));
+	mmio_write_32((uintptr_t)(REG_BASE_CRG + REG_CPU_LP), regval);
+	mmio_write_32((uintptr_t)(REG_BASE_CRG + REG_CPU_LP), regval_bak);
+
+	return PSCI_E_SUCCESS;
+}
+
+static void poplar_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	assert(0);
+}
+
+static void poplar_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+	assert(0);
+}
+
+static void poplar_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+	assert(target_state->pwr_domain_state[MPIDR_AFFLVL0] ==
+					PLAT_MAX_OFF_STATE);
+
+	/* Enable the gic cpu interface */
+	plat_arm_gic_pcpu_init();
+
+	/* Program the gic per-cpu distributor or re-distributor interface */
+	plat_arm_gic_cpuif_enable();
+}
+
+static void poplar_pwr_domain_suspend_finish(
+		const psci_power_state_t *target_state)
+{
+	assert(0);
+}
+
+static void __dead2 poplar_system_off(void)
+{
+	ERROR("Poplar System Off: operation not handled.\n");
+	panic();
+}
+
+static void __dead2 poplar_system_reset(void)
+{
+	mmio_write_32((uintptr_t)(HISI_WDG0_BASE + 0xc00), 0x1ACCE551);
+	mmio_write_32((uintptr_t)(HISI_WDG0_BASE + 0x0),   0x00000100);
+	mmio_write_32((uintptr_t)(HISI_WDG0_BASE + 0x8),   0x00000003);
+
+	wfi();
+	ERROR("Poplar System Reset: operation not handled.\n");
+	panic();
+}
+
+static int32_t poplar_validate_power_state(unsigned int power_state,
+					   psci_power_state_t *req_state)
+{
+	VERBOSE("%s: power_state: 0x%x\n", __func__, power_state);
+
+	int pstate = psci_get_pstate_type(power_state);
+
+	assert(req_state);
+
+	/* Sanity check the requested state */
+	if (pstate == PSTATE_TYPE_STANDBY)
+		req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_RET_STATE;
+	else
+		req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_OFF_STATE;
+
+	/* We expect the 'state id' to be zero */
+	if (psci_get_pstate_id(power_state))
+		return PSCI_E_INVALID_PARAMS;
+
+	return PSCI_E_SUCCESS;
+}
+
+static int poplar_validate_ns_entrypoint(uintptr_t entrypoint)
+{
+	/*
+	 * Check if the non secure entrypoint lies within the non
+	 * secure DRAM.
+	 */
+	if ((entrypoint >= DDR_BASE) && (entrypoint < (DDR_BASE + DDR_SIZE)))
+		return PSCI_E_SUCCESS;
+
+	return PSCI_E_INVALID_ADDRESS;
+}
+
+static void poplar_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+	int i;
+
+	for (i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
+		req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
+}
+
+static const plat_psci_ops_t poplar_plat_psci_ops = {
+	.cpu_standby			= poplar_cpu_standby,
+	.pwr_domain_on			= poplar_pwr_domain_on,
+	.pwr_domain_off			= poplar_pwr_domain_off,
+	.pwr_domain_suspend		= poplar_pwr_domain_suspend,
+	.pwr_domain_on_finish		= poplar_pwr_domain_on_finish,
+	.pwr_domain_suspend_finish	= poplar_pwr_domain_suspend_finish,
+	.system_off			= poplar_system_off,
+	.system_reset			= poplar_system_reset,
+	.validate_power_state		= poplar_validate_power_state,
+	.validate_ns_entrypoint		= poplar_validate_ns_entrypoint,
+	.get_sys_suspend_power_state	= poplar_get_sys_suspend_power_state,
+};
+
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+			const plat_psci_ops_t **psci_ops)
+{
+	*psci_ops = &poplar_plat_psci_ops;
+
+	mmio_write_32((uintptr_t)REG_PERI_CPU_AARCH_MODE, 0xF);
+	mmio_write_32((uintptr_t)REG_PERI_CPU_RVBARADDR, sec_entrypoint);
+	return 0;
+}
diff --git a/plat/hisilicon/poplar/plat_storage.c b/plat/hisilicon/poplar/plat_storage.c
new file mode 100644
index 0000000..623a61b
--- /dev/null
+++ b/plat/hisilicon/poplar/plat_storage.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <firmware_image_package.h>
+#include <io_block.h>
+#include <io_driver.h>
+#include <io_fip.h>
+#include <io_memmap.h>
+#include <io_storage.h>
+#include <mmio.h>
+#include <partition/partition.h>
+#include <semihosting.h>
+#include <string.h>
+#include <tbbr_img_def.h>
+#include <utils.h>
+#include "platform_def.h"
+
+static const io_dev_connector_t *mmap_dev_con;
+static const io_dev_connector_t *fip_dev_con;
+
+static uintptr_t mmap_dev_handle;
+static uintptr_t fip_dev_handle;
+
+static int open_mmap(const uintptr_t spec);
+static int open_fip(const uintptr_t spec);
+
+static const io_block_spec_t loader_fip_spec = {
+	.offset		= FIP_BASE,
+	.length		= FIP_SIZE
+};
+
+static const io_uuid_spec_t bl2_uuid_spec = {
+	.uuid = UUID_TRUSTED_BOOT_FIRMWARE_BL2,
+};
+
+static const io_uuid_spec_t bl31_uuid_spec = {
+	.uuid = UUID_EL3_RUNTIME_FIRMWARE_BL31,
+};
+
+static const io_uuid_spec_t bl33_uuid_spec = {
+	.uuid = UUID_NON_TRUSTED_FIRMWARE_BL33,
+};
+
+struct plat_io_policy {
+	uintptr_t	*dev_handle;
+	uintptr_t	image_spec;
+	int		(*check)(const uintptr_t spec);
+};
+
+static const struct plat_io_policy policies[] = {
+	[FIP_IMAGE_ID] = {
+		&mmap_dev_handle,
+		(uintptr_t)&loader_fip_spec,
+		open_mmap
+	},
+	[BL2_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl2_uuid_spec,
+		open_fip
+	},
+	[BL31_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl31_uuid_spec,
+		open_fip
+	},
+	[BL33_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl33_uuid_spec,
+		open_fip
+	},
+};
+
+static int open_mmap(const uintptr_t spec)
+{
+	int result;
+	uintptr_t local_image_handle;
+
+	result = io_dev_init(mmap_dev_handle, (uintptr_t)NULL);
+	if (result == 0) {
+		result = io_open(mmap_dev_handle, spec, &local_image_handle);
+		if (result == 0) {
+			io_close(local_image_handle);
+		}
+	}
+	return result;
+}
+
+static int open_fip(const uintptr_t spec)
+{
+	uintptr_t local_image_handle;
+	int result;
+
+	result = io_dev_init(fip_dev_handle, (uintptr_t) FIP_IMAGE_ID);
+	if (result == 0) {
+		result = io_open(fip_dev_handle, spec, &local_image_handle);
+		if (result == 0) {
+			io_close(local_image_handle);
+		} else {
+			VERBOSE("error opening fip\n");
+		}
+	} else {
+		VERBOSE("error initializing fip\n");
+	}
+
+	return result;
+}
+
+int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
+			  uintptr_t *image_spec)
+{
+	const struct plat_io_policy *policy;
+	int result;
+
+	assert(image_id < ARRAY_SIZE(policies));
+
+	policy = &policies[image_id];
+	result = policy->check(policy->image_spec);
+	assert(result == 0);
+
+	*image_spec = policy->image_spec;
+	*dev_handle = *(policy->dev_handle);
+
+	return result;
+}
+
+void plat_io_setup(void)
+{
+	int result;
+
+	result = register_io_dev_memmap(&mmap_dev_con);
+	assert(result == 0);
+
+	result = register_io_dev_fip(&fip_dev_con);
+	assert(result == 0);
+
+	result = io_dev_open(fip_dev_con, (uintptr_t)&loader_fip_spec,
+				&fip_dev_handle);
+	assert(result == 0);
+
+	result = io_dev_open(mmap_dev_con, (uintptr_t)NULL, &mmap_dev_handle);
+	assert(result == 0);
+
+	(void) result;
+}
diff --git a/plat/hisilicon/poplar/plat_topology.c b/plat/hisilicon/poplar/plat_topology.c
new file mode 100644
index 0000000..3dd818e
--- /dev/null
+++ b/plat/hisilicon/poplar/plat_topology.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <plat_arm.h>
+#include <psci.h>
+#include "platform_def.h"
+
+const unsigned char hisi_power_domain_tree_desc[] = {
+	PLATFORM_CLUSTER_COUNT,
+	PLATFORM_CORE_COUNT,
+};
+
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+	return hisi_power_domain_tree_desc;
+}
+
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+	if (mpidr & MPIDR_CLUSTER_MASK)
+		return -1;
+
+	if ((mpidr & MPIDR_CPU_MASK) >= PLATFORM_CORE_COUNT)
+		return -1;
+
+	return plat_arm_calc_core_pos(mpidr);
+}
diff --git a/plat/hisilicon/poplar/platform.mk b/plat/hisilicon/poplar/platform.mk
new file mode 100644
index 0000000..fc75ff3
--- /dev/null
+++ b/plat/hisilicon/poplar/platform.mk
@@ -0,0 +1,72 @@
+#
+# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+NEED_BL33			:= yes
+
+COLD_BOOT_SINGLE_CPU		:= 1
+PROGRAMMABLE_RESET_ADDRESS	:= 1
+CTX_INCLUDE_FPREGS		:= 1
+ENABLE_PLAT_COMPAT		:= 0
+ERRATA_A53_855873		:= 1
+ERRATA_A53_835769		:= 1
+ERRATA_A53_843419		:= 1
+
+ARM_GIC_ARCH			:= 2
+$(eval $(call add_define,ARM_GIC_ARCH))
+
+PLAT_PL061_MAX_GPIOS 		:= 104
+$(eval $(call add_define,PLAT_PL061_MAX_GPIOS))
+
+PLAT_INCLUDES	:=	-Iplat/hisilicon/poplar/include		\
+			-Iinclude/plat/arm/common/		\
+			-Iplat/hisilicon/poplar			\
+			-Iinclude/common/tbbr			\
+			-Iinclude/drivers/io
+
+PLAT_BL_COMMON_SOURCES	:=						\
+		lib/aarch64/xlat_tables.c				\
+		drivers/delay_timer/generic_delay_timer.c		\
+		drivers/arm/gic/common/gic_common.c			\
+		drivers/arm/gic/v2/gicv2_helpers.c			\
+		drivers/delay_timer/delay_timer.c			\
+		drivers/arm/pl011/pl011_console.S			\
+		drivers/arm/gic/v2/gicv2_main.c				\
+		plat/arm/common/aarch64/arm_helpers.S			\
+		plat/arm/common/arm_gicv2.c				\
+		plat/common/plat_gicv2.c				\
+		plat/hisilicon/poplar/aarch64/platform_common.c
+
+BL1_SOURCES	+=							\
+		lib/cpus/aarch64/cortex_a53.S				\
+		drivers/arm/pl061/pl061_gpio.c				\
+		drivers/io/io_storage.c					\
+		drivers/io/io_block.c					\
+		drivers/gpio/gpio.c					\
+		drivers/io/io_fip.c					\
+		drivers/io/io_memmap.c					\
+		plat/hisilicon/poplar/bl1_plat_setup.c			\
+		plat/hisilicon/poplar/plat_storage.c			\
+
+
+BL2_SOURCES	+=      						\
+		drivers/arm/pl061/pl061_gpio.c				\
+		drivers/io/io_storage.c					\
+		drivers/io/io_block.c					\
+		drivers/io/io_fip.c					\
+		drivers/gpio/gpio.c					\
+		drivers/io/io_memmap.c					\
+		plat/hisilicon/poplar/bl2_plat_setup.c			\
+		plat/hisilicon/poplar/plat_storage.c
+
+
+BL31_SOURCES	+=							\
+		lib/cpus/aarch64/aem_generic.S				\
+		lib/cpus/aarch64/cortex_a53.S				\
+		plat/common/aarch64/plat_psci_common.c			\
+		plat/hisilicon/poplar/bl31_plat_setup.c			\
+		plat/hisilicon/poplar/plat_topology.c			\
+		plat/hisilicon/poplar/plat_pm.c
+
diff --git a/plat/juno/aarch64/bl1_plat_helpers.S b/plat/juno/aarch64/bl1_plat_helpers.S
deleted file mode 100644
index 3054eab..0000000
--- a/plat/juno/aarch64/bl1_plat_helpers.S
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-#include "../juno_def.h"
-
-	.globl	platform_is_primary_cpu
-	.globl	platform_get_entrypoint
-	.globl	platform_cold_boot_init
-	.globl	plat_secondary_cold_boot_setup
-
-	/* -----------------------------------------------------
-	 * unsigned int platform_is_primary_cpu (unsigned int mpid);
-	 *
-	 * Given the mpidr say whether this cpu is the primary
-	 * cpu (applicable ony after a cold boot)
-	 * -----------------------------------------------------
-	 */
-func platform_is_primary_cpu
-	mov	x9, x30
-	bl	platform_get_core_pos
-	ldr	x1, =SCP_BOOT_CFG_ADDR
-	ldr	x1, [x1]
-	ubfx	x1, x1, #PRIMARY_CPU_SHIFT, #PRIMARY_CPU_MASK
-	cmp	x0, x1
-	cset	x0, eq
-	ret	x9
-
-	/* -----------------------------------------------------
-	 * void plat_secondary_cold_boot_setup (void);
-	 *
-	 * This function performs any platform specific actions
-	 * needed for a secondary cpu after a cold reset e.g
-	 * mark the cpu's presence, mechanism to place it in a
-	 * holding pen etc.
-	 * -----------------------------------------------------
-	 */
-func plat_secondary_cold_boot_setup
-	/* Juno todo: Implement secondary CPU cold boot setup on Juno */
-cb_panic:
-	b	cb_panic
-
-
-	/* -----------------------------------------------------
-	 * void platform_get_entrypoint (unsigned int mpid);
-	 *
-	 * Main job of this routine is to distinguish between
-	 * a cold and warm boot.
-	 * On a cold boot the secondaries first wait for the
-	 * platform to be initialized after which they are
-	 * hotplugged in. The primary proceeds to perform the
-	 * platform initialization.
-	 * On a warm boot, each cpu jumps to the address in its
-	 * mailbox.
-	 *
-	 * TODO: Not a good idea to save lr in a temp reg
-	 * -----------------------------------------------------
-	 */
-func platform_get_entrypoint
-	mov	x9, x30 // lr
-	bl	platform_get_core_pos
-	ldr	x1, =TRUSTED_MAILBOXES_BASE
-	lsl	x0, x0, #TRUSTED_MAILBOX_SHIFT
-	ldr	x0, [x1, x0]
-	ret	x9
-
-
-	/* -----------------------------------------------------
-	 * void platform_cold_boot_init (bl1_main function);
-	 *
-	 * Routine called only by the primary cpu after a cold
-	 * boot to perform early platform initialization
-	 * -----------------------------------------------------
-	 */
-func platform_cold_boot_init
-	mov	x20, x0
-
-	/* ---------------------------------------------
-	 * Give ourselves a small coherent stack to
-	 * ease the pain of initializing the MMU and
-	 * CCI in assembler
-	 * ---------------------------------------------
-	 */
-	mrs	x0, mpidr_el1
-	bl	platform_set_coherent_stack
-
-	/* ---------------------------------------------
-	 * Architectural init. can be generic e.g.
-	 * enabling stack alignment and platform spec-
-	 * ific e.g. MMU & page table setup as per the
-	 * platform memory map. Perform the latter here
-	 * and the former in bl1_main.
-	 * ---------------------------------------------
-	 */
-	bl	bl1_early_platform_setup
-	bl	bl1_plat_arch_setup
-
-	/* ---------------------------------------------
-	 * Give ourselves a stack allocated in Normal
-	 * -IS-WBWA memory
-	 * ---------------------------------------------
-	 */
-	mrs	x0, mpidr_el1
-	bl	platform_set_stack
-
-	/* ---------------------------------------------
-	 * Jump to the main function. Returning from it
-	 * is a terminal error.
-	 * ---------------------------------------------
-	 */
-	blr	x20
-
-cb_init_panic:
-	b	cb_init_panic
diff --git a/plat/juno/aarch64/juno_common.c b/plat/juno/aarch64/juno_common.c
deleted file mode 100644
index 27d4c8f..0000000
--- a/plat/juno/aarch64/juno_common.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch_helpers.h>
-#include <arm_gic.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <debug.h>
-#include <mmio.h>
-#include <platform.h>
-#include <platform_def.h>
-#include <xlat_tables.h>
-#include "../juno_def.h"
-
-#define MAP_MHU_SECURE	MAP_REGION_FLAT(MHU_SECURE_BASE,		\
-					MHU_SECURE_SIZE,		\
-					(MHU_PAYLOAD_CACHED ?		\
-					 MT_MEMORY : MT_DEVICE)		\
-					| MT_RW | MT_SECURE)
-
-#define MAP_FLASH	MAP_REGION_FLAT(FLASH_BASE,			\
-					FLASH_SIZE,			\
-					MT_MEMORY | MT_RO | MT_SECURE)
-
-#define MAP_IOFPGA	MAP_REGION_FLAT(IOFPGA_BASE,			\
-					IOFPGA_SIZE,			\
-					MT_DEVICE | MT_RW | MT_SECURE)
-
-#define MAP_DEVICE0	MAP_REGION_FLAT(DEVICE0_BASE,			\
-					DEVICE0_SIZE,			\
-					MT_DEVICE | MT_RW | MT_SECURE)
-
-#define MAP_DEVICE1	MAP_REGION_FLAT(DEVICE1_BASE,			\
-					DEVICE1_SIZE,			\
-					MT_DEVICE | MT_RW | MT_SECURE)
-
-#define MAP_NS_DRAM	MAP_REGION_FLAT(DRAM_NS_BASE,			\
-					DRAM_NS_SIZE,			\
-					MT_MEMORY | MT_RW | MT_NS)
-
-#define MAP_TSP_MEM	MAP_REGION_FLAT(TSP_SEC_MEM_BASE, 		\
-					TSP_SEC_MEM_SIZE,		\
-					MT_MEMORY | MT_RW | MT_SECURE)
-
-/*
- * Table of regions for different BL stages to map using the MMU.
- * This doesn't include Trusted RAM as the 'mem_layout' argument passed to
- * configure_mmu_elx() will give the available subset of that,
- */
-#if IMAGE_BL1
-static const mmap_region_t juno_mmap[] = {
-	MAP_MHU_SECURE,
-	MAP_FLASH,
-	MAP_IOFPGA,
-	MAP_DEVICE0,
-	MAP_DEVICE1,
-	{0}
-};
-#endif
-#if IMAGE_BL2
-static const mmap_region_t juno_mmap[] = {
-	MAP_MHU_SECURE,
-	MAP_FLASH,
-	MAP_IOFPGA,
-	MAP_DEVICE0,
-	MAP_DEVICE1,
-	MAP_NS_DRAM,
-	MAP_TSP_MEM,
-	{0}
-};
-#endif
-#if IMAGE_BL31
-static const mmap_region_t juno_mmap[] = {
-	MAP_MHU_SECURE,
-	MAP_IOFPGA,
-	MAP_DEVICE0,
-	MAP_DEVICE1,
-	MAP_TSP_MEM,
-	{0}
-};
-#endif
-#if IMAGE_BL32
-static const mmap_region_t juno_mmap[] = {
-	MAP_IOFPGA,
-	MAP_DEVICE0,
-	MAP_DEVICE1,
-	{0}
-};
-#endif
-
-/* Array of secure interrupts to be configured by the gic driver */
-const unsigned int irq_sec_array[] = {
-	IRQ_MHU,
-	IRQ_GPU_SMMU_0,
-	IRQ_GPU_SMMU_1,
-	IRQ_ETR_SMMU,
-	IRQ_TZC400,
-	IRQ_TZ_WDOG,
-	IRQ_SEC_PHY_TIMER,
-	IRQ_SEC_SGI_0,
-	IRQ_SEC_SGI_1,
-	IRQ_SEC_SGI_2,
-	IRQ_SEC_SGI_3,
-	IRQ_SEC_SGI_4,
-	IRQ_SEC_SGI_5,
-	IRQ_SEC_SGI_6,
-	IRQ_SEC_SGI_7
-};
-
-const unsigned int num_sec_irqs = sizeof(irq_sec_array) /
-	sizeof(irq_sec_array[0]);
-
-/*******************************************************************************
- * Macro generating the code for the function setting up the pagetables as per
- * the platform memory map & initialize the mmu, for the given exception level
- ******************************************************************************/
-#if USE_COHERENT_MEM
-#define DEFINE_CONFIGURE_MMU_EL(_el)				\
-	void configure_mmu_el##_el(unsigned long total_base,	\
-				  unsigned long total_size,	\
-				  unsigned long ro_start,	\
-				  unsigned long ro_limit,	\
-				  unsigned long coh_start,	\
-				  unsigned long coh_limit)	\
-	{							\
-	       mmap_add_region(total_base, total_base,		\
-			       total_size,			\
-			       MT_MEMORY | MT_RW | MT_SECURE);	\
-	       mmap_add_region(ro_start, ro_start,		\
-			       ro_limit - ro_start,		\
-			       MT_MEMORY | MT_RO | MT_SECURE);	\
-	       mmap_add_region(coh_start, coh_start,		\
-			       coh_limit - coh_start,		\
-			       MT_DEVICE | MT_RW | MT_SECURE);	\
-	       mmap_add(juno_mmap);				\
-	       init_xlat_tables();				\
-								\
-	       enable_mmu_el##_el(0);				\
-	}
-#else
-#define DEFINE_CONFIGURE_MMU_EL(_el)				\
-	void configure_mmu_el##_el(unsigned long total_base,	\
-				  unsigned long total_size,	\
-				  unsigned long ro_start,	\
-				  unsigned long ro_limit)	\
-	{							\
-	       mmap_add_region(total_base, total_base,		\
-			       total_size,			\
-			       MT_MEMORY | MT_RW | MT_SECURE);	\
-	       mmap_add_region(ro_start, ro_start,		\
-			       ro_limit - ro_start,		\
-			       MT_MEMORY | MT_RO | MT_SECURE);	\
-	       mmap_add(juno_mmap);				\
-	       init_xlat_tables();				\
-								\
-	       enable_mmu_el##_el(0);				\
-	}
-#endif
-/* Define EL1 and EL3 variants of the function initialising the MMU */
-DEFINE_CONFIGURE_MMU_EL(1)
-DEFINE_CONFIGURE_MMU_EL(3)
-
-
-unsigned long plat_get_ns_image_entrypoint(void)
-{
-	return NS_IMAGE_OFFSET;
-}
-
-uint64_t plat_get_syscnt_freq(void)
-{
-	uint64_t counter_base_frequency;
-
-	/* Read the frequency from Frequency modes table */
-	counter_base_frequency = mmio_read_32(SYS_CNTCTL_BASE + CNTFID_OFF);
-
-	/* The first entry of the frequency modes table must not be 0 */
-	if (counter_base_frequency == 0)
-		panic();
-
-	return counter_base_frequency;
-}
-
-void plat_gic_init(void)
-{
-	arm_gic_init(GICC_BASE, GICD_BASE, 0, irq_sec_array, num_sec_irqs);
-}
diff --git a/plat/juno/aarch64/plat_helpers.S b/plat/juno/aarch64/plat_helpers.S
deleted file mode 100644
index 37966a3..0000000
--- a/plat/juno/aarch64/plat_helpers.S
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-#include <bl_common.h>
-#include <cortex_a57.h>
-#include <cpu_macros.S>
-#include <platform_def.h>
-#include "../juno_def.h"
-
-	.globl	plat_crash_console_init
-	.globl	plat_crash_console_putc
-	.globl	plat_report_exception
-	.globl	plat_reset_handler
-	.globl	platform_get_core_pos
-	.globl	platform_mem_init
-
-	/* Define a crash console for the plaform */
-#define JUNO_CRASH_CONSOLE_BASE		PL011_UART3_BASE
-
-	/* ---------------------------------------------
-	 * int plat_crash_console_init(void)
-	 * Function to initialize the crash console
-	 * without a C Runtime to print crash report.
-	 * Clobber list : x0, x1, x2
-	 * ---------------------------------------------
-	 */
-func plat_crash_console_init
-	mov_imm	x0, JUNO_CRASH_CONSOLE_BASE
-	mov_imm	x1, PL011_UART3_CLK_IN_HZ
-	mov_imm	x2, PL011_BAUDRATE
-	b	console_core_init
-
-	/* ---------------------------------------------
-	 * int plat_crash_console_putc(int c)
-	 * Function to print a character on the crash
-	 * console without a C Runtime.
-	 * Clobber list : x1, x2
-	 * ---------------------------------------------
-	 */
-func plat_crash_console_putc
-	mov_imm	x1, JUNO_CRASH_CONSOLE_BASE
-	b	console_core_putc
-
-	/* ---------------------------------------------
-	 * void plat_report_exception(unsigned int type)
-	 * Function to report an unhandled exception
-	 * with platform-specific means.
-	 * On Juno platform, it updates the LEDs
-	 * to indicate where we are
-	 * ---------------------------------------------
-	 */
-func plat_report_exception
-	mrs	x1, CurrentEl
-	lsr	x1, x1, #MODE_EL_SHIFT
-	lsl	x1, x1, #SYS_LED_EL_SHIFT
-	lsl	x0, x0, #SYS_LED_EC_SHIFT
-	mov	x2, #(SECURE << SYS_LED_SS_SHIFT)
-	orr	x0, x0, x2
-	orr	x0, x0, x1
-	mov	x1, #VE_SYSREGS_BASE
-	add	x1, x1, #V2M_SYS_LED
-	str	w0, [x1]
-	ret
-
-	/*
-	 * Return 0 to 3 for the A53s and 4 or 5 for the A57s
-	 */
-func platform_get_core_pos
-	and	x1, x0, #MPIDR_CPU_MASK
-	and	x0, x0, #MPIDR_CLUSTER_MASK
-	eor	x0, x0, #(1 << MPIDR_AFFINITY_BITS)  // swap A53/A57 order
-	add	x0, x1, x0, LSR #6
-	ret
-
-
-	/* -----------------------------------------------------
-	 * void platform_mem_init(void);
-	 *
-	 * We don't need to carry out any memory initialization
-	 * on Juno. The Secure RAM is accessible straight away.
-	 * -----------------------------------------------------
-	 */
-func platform_mem_init
-	ret
-
-	/* -----------------------------------------------------
-	 * void plat_reset_handler(void);
-	 *
-	 * Before adding code in this function, refer to the
-	 * guidelines in docs/firmware-design.md to determine
-	 * whether the code should reside within the
-	 * FIRST_RESET_HANDLER_CALL block or not.
-	 *
-	 * Implement workaround for defect id 831273 by enabling
-	 * an event stream every 65536 cycles and set the L2 RAM
-	 * latencies for Cortex-A57. This code is included only
-	 * when FIRST_RESET_HANDLER_CALL is defined since it
-	 * should be executed only during BL1.
-	 * -----------------------------------------------------
-	 */
-func plat_reset_handler
-#ifdef FIRST_RESET_HANDLER_CALL
-	/* Read the MIDR_EL1 */
-	mrs	x0, midr_el1
-	ubfx	x1, x0, MIDR_PN_SHIFT, #12
-	cmp     w1, #((CORTEX_A57_MIDR >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
-	b.ne    1f
-
-	/* Change the L2 Data and Tag Ram latency to 3 cycles */
-	mov	x0, #(L2_DATA_RAM_LATENCY_3_CYCLES |    \
-			(L2_TAG_RAM_LATENCY_3_CYCLES << \
-			 L2CTLR_TAG_RAM_LATENCY_SHIFT))
-	msr     L2CTLR_EL1, x0
-
-1:
-	/* ---------------------------------------------
-	 * Enable the event stream every 65536 cycles
-	 * ---------------------------------------------
-	 */
-	mov     x0, #(0xf << EVNTI_SHIFT)
-	orr     x0, x0, #EVNTEN_BIT
-	msr     CNTKCTL_EL1, x0
-	isb
-#endif /* FIRST_RESET_HANDLER_CALL */
-	ret
diff --git a/plat/juno/bl1_plat_setup.c b/plat/juno/bl1_plat_setup.c
deleted file mode 100644
index 23e8592..0000000
--- a/plat/juno/bl1_plat_setup.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch_helpers.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <cci400.h>
-#include <console.h>
-#include <debug.h>
-#include <mmio.h>
-#include <platform.h>
-#include <platform_def.h>
-#include "../../bl1/bl1_private.h"
-#include "juno_def.h"
-#include "juno_private.h"
-
-#if USE_COHERENT_MEM
-/*******************************************************************************
- * Declarations of linker defined symbols which will help us find the layout
- * of trusted RAM
- ******************************************************************************/
-extern unsigned long __COHERENT_RAM_START__;
-extern unsigned long __COHERENT_RAM_END__;
-
-/*
- * The next 2 constants identify the extents of the coherent memory region.
- * These addresses are used by the MMU setup code and therefore they must be
- * page-aligned.  It is the responsibility of the linker script to ensure that
- * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
- * page-aligned addresses.
- */
-#define BL1_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
-#define BL1_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
-#endif
-
-/* Data structure which holds the extents of the trusted RAM for BL1 */
-static meminfo_t bl1_tzram_layout;
-
-meminfo_t *bl1_plat_sec_mem_layout(void)
-{
-	return &bl1_tzram_layout;
-}
-
-/*******************************************************************************
- * Perform any BL1 specific platform actions.
- ******************************************************************************/
-void bl1_early_platform_setup(void)
-{
-	const size_t bl1_size = BL1_RAM_LIMIT - BL1_RAM_BASE;
-
-	/* Initialize the console to provide early debug support */
-	console_init(PL011_UART2_BASE, PL011_UART2_CLK_IN_HZ, PL011_BAUDRATE);
-
-	/*
-	 * Enable CCI-400 for this cluster. No need for locks as no other cpu is
-	 * active at the moment
-	 */
-	cci_init(CCI400_BASE,
-		 CCI400_SL_IFACE3_CLUSTER_IX,
-		 CCI400_SL_IFACE4_CLUSTER_IX);
-	cci_enable_cluster_coherency(read_mpidr());
-
-	/* Allow BL1 to see the whole Trusted RAM */
-	bl1_tzram_layout.total_base = TZRAM_BASE;
-	bl1_tzram_layout.total_size = TZRAM_SIZE;
-
-	/* Calculate how much RAM BL1 is using and how much remains free */
-	bl1_tzram_layout.free_base = TZRAM_BASE;
-	bl1_tzram_layout.free_size = TZRAM_SIZE;
-	reserve_mem(&bl1_tzram_layout.free_base,
-		    &bl1_tzram_layout.free_size,
-		    BL1_RAM_BASE,
-		    bl1_size);
-
-	INFO("BL1: 0x%lx - 0x%lx [size = %u]\n", BL1_RAM_BASE, BL1_RAM_LIMIT,
-	     bl1_size);
-}
-
-
-/*
- * Address of slave 'n' security setting in the NIC-400 address region
- * control
- * TODO: Ideally this macro should be moved in a "nic-400.h" header file but
- * it would be the only thing in there so it's not worth it at the moment.
- */
-#define NIC400_ADDR_CTRL_SECURITY_REG(n)	(0x8 + (n) * 4)
-
-static void init_nic400(void)
-{
-	/*
-	 * NIC-400 Access Control Initialization
-	 *
-	 * Define access privileges by setting each corresponding bit to:
-	 *   0 = Secure access only
-	 *   1 = Non-secure access allowed
-	 */
-
-	/*
-	 * Allow non-secure access to some SOC regions, excluding UART1, which
-	 * remains secure.
-	 * Note: This is the NIC-400 device on the SOC
-	 */
-	mmio_write_32(SOC_NIC400_BASE +
-		      NIC400_ADDR_CTRL_SECURITY_REG(SOC_NIC400_USB_EHCI), ~0);
-	mmio_write_32(SOC_NIC400_BASE +
-		      NIC400_ADDR_CTRL_SECURITY_REG(SOC_NIC400_TLX_MASTER), ~0);
-	mmio_write_32(SOC_NIC400_BASE +
-		      NIC400_ADDR_CTRL_SECURITY_REG(SOC_NIC400_USB_OHCI), ~0);
-	mmio_write_32(SOC_NIC400_BASE +
-		      NIC400_ADDR_CTRL_SECURITY_REG(SOC_NIC400_PL354_SMC), ~0);
-	mmio_write_32(SOC_NIC400_BASE +
-		      NIC400_ADDR_CTRL_SECURITY_REG(SOC_NIC400_APB4_BRIDGE), ~0);
-	mmio_write_32(SOC_NIC400_BASE +
-		      NIC400_ADDR_CTRL_SECURITY_REG(SOC_NIC400_BOOTSEC_BRIDGE),
-		      ~SOC_NIC400_BOOTSEC_BRIDGE_UART1);
-
-	/*
-	 * Allow non-secure access to some CSS regions.
-	 * Note: This is the NIC-400 device on the CSS
-	 */
-	mmio_write_32(CSS_NIC400_BASE +
-		      NIC400_ADDR_CTRL_SECURITY_REG(CSS_NIC400_SLAVE_BOOTSECURE),
-		      ~0);
-}
-
-
-#define PCIE_SECURE_REG		0x3000
-#define PCIE_SEC_ACCESS_MASK	((1 << 0) | (1 << 1)) /* REG and MEM access bits */
-
-static void init_pcie(void)
-{
-	/*
-	 * PCIE Root Complex Security settings to enable non-secure
-	 * access to config registers.
-	 */
-	mmio_write_32(PCIE_CONTROL_BASE + PCIE_SECURE_REG, PCIE_SEC_ACCESS_MASK);
-}
-
-
-/*******************************************************************************
- * Function which will perform any remaining platform-specific setup that can
- * occur after the MMU and data cache have been enabled.
- ******************************************************************************/
-void bl1_platform_setup(void)
-{
-	init_nic400();
-	init_pcie();
-
-	/* Initialise the IO layer and register platform IO devices */
-	io_setup();
-
-	/* Enable and initialize the System level generic timer */
-	mmio_write_32(SYS_CNTCTL_BASE + CNTCR_OFF, CNTCR_FCREQ(0) | CNTCR_EN);
-}
-
-
-/*******************************************************************************
- * Perform the very early platform specific architecture setup here. At the
- * moment this only does basic initialization. Later architectural setup
- * (bl1_arch_setup()) does not do anything platform specific.
- ******************************************************************************/
-void bl1_plat_arch_setup(void)
-{
-	configure_mmu_el3(bl1_tzram_layout.total_base,
-			  bl1_tzram_layout.total_size,
-			  TZROM_BASE,
-			  TZROM_BASE + TZROM_SIZE
-#if USE_COHERENT_MEM
-			  , BL1_COHERENT_RAM_BASE,
-			  BL1_COHERENT_RAM_LIMIT
-#endif
-			  );
-}
-
-/*******************************************************************************
- * Before calling this function BL2 is loaded in memory and its entrypoint
- * is set by load_image. This is a placeholder for the platform to change
- * the entrypoint of BL2 and set SPSR and security state.
- * On Juno we are only setting the security state, entrypoint
- ******************************************************************************/
-void bl1_plat_set_bl2_ep_info(image_info_t *bl2_image,
-			      entry_point_info_t *bl2_ep)
-{
-	SET_SECURITY_STATE(bl2_ep->h.attr, SECURE);
-	bl2_ep->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
-}
diff --git a/plat/juno/bl2_plat_setup.c b/plat/juno/bl2_plat_setup.c
deleted file mode 100644
index 8e7b2a0..0000000
--- a/plat/juno/bl2_plat_setup.c
+++ /dev/null
@@ -1,325 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch_helpers.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <console.h>
-#include <debug.h>
-#include <platform.h>
-#include <platform_def.h>
-#include <string.h>
-#include "juno_def.h"
-#include "juno_private.h"
-#include "scp_bootloader.h"
-
-/*******************************************************************************
- * Declarations of linker defined symbols which will help us find the layout
- * of trusted RAM
- ******************************************************************************/
-extern unsigned long __RO_START__;
-extern unsigned long __RO_END__;
-
-#if USE_COHERENT_MEM
-extern unsigned long __COHERENT_RAM_START__;
-extern unsigned long __COHERENT_RAM_END__;
-#endif
-
-/*
- * The next 2 constants identify the extents of the code & RO data region.
- * These addresses are used by the MMU setup code and therefore they must be
- * page-aligned.  It is the responsibility of the linker script to ensure that
- * __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
- */
-#define BL2_RO_BASE (unsigned long)(&__RO_START__)
-#define BL2_RO_LIMIT (unsigned long)(&__RO_END__)
-
-#if USE_COHERENT_MEM
-/*
- * The next 2 constants identify the extents of the coherent memory region.
- * These addresses are used by the MMU setup code and therefore they must be
- * page-aligned.  It is the responsibility of the linker script to ensure that
- * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
- * page-aligned addresses.
- */
-#define BL2_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
-#define BL2_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
-#endif
-
-/* Data structure which holds the extents of the trusted RAM for BL2 */
-static meminfo_t bl2_tzram_layout
-__attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE)));
-
-/*******************************************************************************
- * Structure which holds the arguments which need to be passed to BL3-1
- ******************************************************************************/
-static bl2_to_bl31_params_mem_t bl31_params_mem;
-
-meminfo_t *bl2_plat_sec_mem_layout(void)
-{
-	return &bl2_tzram_layout;
-}
-
-/*******************************************************************************
- * This function assigns a pointer to the memory that the platform has kept
- * aside to pass platform specific and trusted firmware related information
- * to BL31. This memory is allocated by allocating memory to
- * bl2_to_bl31_params_mem_t structure which is a superset of all the
- * structure whose information is passed to BL31
- * NOTE: This function should be called only once and should be done
- * before generating params to BL31
- ******************************************************************************/
-bl31_params_t *bl2_plat_get_bl31_params(void)
-{
-	bl31_params_t *bl2_to_bl31_params;
-
-	/*
-	 * Initialise the memory for all the arguments that needs to
-	 * be passed to BL3-1
-	 */
-	memset(&bl31_params_mem, 0, sizeof(bl2_to_bl31_params_mem_t));
-
-	/* Assign memory for TF related information */
-	bl2_to_bl31_params = &bl31_params_mem.bl31_params;
-	SET_PARAM_HEAD(bl2_to_bl31_params, PARAM_BL31, VERSION_1, 0);
-
-	/* Fill BL3-1 related information */
-	bl2_to_bl31_params->bl31_image_info = &bl31_params_mem.bl31_image_info;
-	SET_PARAM_HEAD(bl2_to_bl31_params->bl31_image_info, PARAM_IMAGE_BINARY,
-		VERSION_1, 0);
-
-	/* Fill BL3-2 related information if it exists */
-#if BL32_BASE
-	bl2_to_bl31_params->bl32_ep_info = &bl31_params_mem.bl32_ep_info;
-	SET_PARAM_HEAD(bl2_to_bl31_params->bl32_ep_info, PARAM_EP,
-		VERSION_1, 0);
-	bl2_to_bl31_params->bl32_image_info = &bl31_params_mem.bl32_image_info;
-	SET_PARAM_HEAD(bl2_to_bl31_params->bl32_image_info, PARAM_IMAGE_BINARY,
-		VERSION_1, 0);
-#endif
-
-	/* Fill BL3-3 related information */
-	bl2_to_bl31_params->bl33_ep_info = &bl31_params_mem.bl33_ep_info;
-	SET_PARAM_HEAD(bl2_to_bl31_params->bl33_ep_info,
-		PARAM_EP, VERSION_1, 0);
-
-	/* BL3-3 expects to receive the primary CPU MPID (through x0) */
-	bl2_to_bl31_params->bl33_ep_info->args.arg0 = 0xffff & read_mpidr();
-
-	bl2_to_bl31_params->bl33_image_info = &bl31_params_mem.bl33_image_info;
-	SET_PARAM_HEAD(bl2_to_bl31_params->bl33_image_info, PARAM_IMAGE_BINARY,
-		VERSION_1, 0);
-
-	return bl2_to_bl31_params;
-}
-
-/*******************************************************************************
- * This function returns a pointer to the shared memory that the platform
- * has kept to point to entry point information of BL31 to BL2
- ******************************************************************************/
-struct entry_point_info *bl2_plat_get_bl31_ep_info(void)
-{
-#if DEBUG
-	bl31_params_mem.bl31_ep_info.args.arg1 = JUNO_BL31_PLAT_PARAM_VAL;
-#endif
-
-	return &bl31_params_mem.bl31_ep_info;
-}
-
-/*******************************************************************************
- * BL1 has passed the extents of the trusted RAM that should be visible to BL2
- * in x0. This memory layout is sitting at the base of the free trusted RAM.
- * Copy it to a safe loaction before its reclaimed by later BL2 functionality.
- ******************************************************************************/
-void bl2_early_platform_setup(meminfo_t *mem_layout)
-{
-	/* Initialize the console to provide early debug support */
-	console_init(PL011_UART2_BASE, PL011_UART2_CLK_IN_HZ, PL011_BAUDRATE);
-
-	/* Setup the BL2 memory layout */
-	bl2_tzram_layout = *mem_layout;
-
-	/* Initialise the IO layer and register platform IO devices */
-	io_setup();
-}
-
-/*******************************************************************************
- * Perform platform specific setup, i.e. initialize the IO layer, load BL3-0
- * image and initialise the memory location to use for passing arguments to
- * BL3-1.
- ******************************************************************************/
-void bl2_platform_setup(void)
-{
-	/* Initialize the secure environment */
-	plat_security_setup();
-}
-
-/* Flush the TF params and the TF plat params */
-void bl2_plat_flush_bl31_params(void)
-{
-	flush_dcache_range((unsigned long)&bl31_params_mem,
-			sizeof(bl2_to_bl31_params_mem_t));
-}
-
-/*******************************************************************************
- * Perform the very early platform specific architectural setup here. At the
- * moment this is only intializes the mmu in a quick and dirty way.
- ******************************************************************************/
-void bl2_plat_arch_setup(void)
-{
-	configure_mmu_el1(bl2_tzram_layout.total_base,
-			  bl2_tzram_layout.total_size,
-			  BL2_RO_BASE,
-			  BL2_RO_LIMIT
-#if USE_COHERENT_MEM
-			  , BL2_COHERENT_RAM_BASE,
-			  BL2_COHERENT_RAM_LIMIT
-#endif
-			  );
-}
-
-/*******************************************************************************
- * Populate the extents of memory available for loading BL3-0, i.e. anywhere
- * in trusted RAM as long as it doesn't overwrite BL2.
- ******************************************************************************/
-void bl2_plat_get_bl30_meminfo(meminfo_t *bl30_meminfo)
-{
-	*bl30_meminfo = bl2_tzram_layout;
-}
-
-/*******************************************************************************
- * Transfer BL3-0 from Trusted RAM using the SCP Download protocol.
- * Return 0 on success, -1 otherwise.
- ******************************************************************************/
-int bl2_plat_handle_bl30(image_info_t *bl30_image_info)
-{
-	int ret;
-
-	ret = scp_bootloader_transfer((void *)bl30_image_info->image_base,
-		bl30_image_info->image_size);
-
-	if (ret == 0)
-		INFO("BL2: BL3-0 transferred to SCP\n\r");
-	else
-		ERROR("BL2: BL3-0 transfer failure\n\r");
-
-	return ret;
-}
-
-/*******************************************************************************
- * Before calling this function BL31 is loaded in memory and its entrypoint
- * is set by load_image. This is a placeholder for the platform to change
- * the entrypoint of BL31 and set SPSR and security state.
- * On Juno we are only setting the security state, entrypoint
- ******************************************************************************/
-void bl2_plat_set_bl31_ep_info(image_info_t *bl31_image_info,
-			       entry_point_info_t *bl31_ep_info)
-{
-	SET_SECURITY_STATE(bl31_ep_info->h.attr, SECURE);
-	bl31_ep_info->spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
-				       DISABLE_ALL_EXCEPTIONS);
-}
-
-
-/*******************************************************************************
- * Before calling this function BL32 is loaded in memory and its entrypoint
- * is set by load_image. This is a placeholder for the platform to change
- * the entrypoint of BL32 and set SPSR and security state.
- * On Juno we are only setting the security state, entrypoint
- ******************************************************************************/
-void bl2_plat_set_bl32_ep_info(image_info_t *bl32_image_info,
-			       entry_point_info_t *bl32_ep_info)
-{
-	SET_SECURITY_STATE(bl32_ep_info->h.attr, SECURE);
-	/*
-	* The Secure Payload Dispatcher service is responsible for
-	* setting the SPSR prior to entry into the BL32 image.
-	*/
-	bl32_ep_info->spsr = 0;
-}
-
-/*******************************************************************************
- * Before calling this function BL33 is loaded in memory and its entrypoint
- * is set by load_image. This is a placeholder for the platform to change
- * the entrypoint of BL33 and set SPSR and security state.
- * On Juno we are only setting the security state, entrypoint
- ******************************************************************************/
-void bl2_plat_set_bl33_ep_info(image_info_t *image,
-				       entry_point_info_t *bl33_ep_info)
-{
-	unsigned long el_status;
-	unsigned int mode;
-
-	/* Figure out what mode we enter the non-secure world in */
-	el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
-	el_status &= ID_AA64PFR0_ELX_MASK;
-
-	if (el_status)
-		mode = MODE_EL2;
-	else
-		mode = MODE_EL1;
-
-	/*
-	 * TODO: Consider the possibility of specifying the SPSR in
-	 * the FIP ToC and allowing the platform to have a say as
-	 * well.
-	 */
-	bl33_ep_info->spsr = SPSR_64(mode, MODE_SP_ELX,
-				       DISABLE_ALL_EXCEPTIONS);
-	SET_SECURITY_STATE(bl33_ep_info->h.attr, NON_SECURE);
-}
-
-/*******************************************************************************
- * Populate the extents of memory available for loading BL3-2
- ******************************************************************************/
-void bl2_plat_get_bl32_meminfo(meminfo_t *bl32_meminfo)
-{
-	/*
-	 * Populate the extents of memory available for loading BL3-2.
-	 */
-	bl32_meminfo->total_base = BL32_BASE;
-	bl32_meminfo->free_base = BL32_BASE;
-	bl32_meminfo->total_size =
-		       (TSP_SEC_MEM_BASE + TSP_SEC_MEM_SIZE) - BL32_BASE;
-	bl32_meminfo->free_size =
-		       (TSP_SEC_MEM_BASE + TSP_SEC_MEM_SIZE) - BL32_BASE;
-}
-
-
-/*******************************************************************************
- * Populate the extents of memory available for loading BL3-3
- ******************************************************************************/
-void bl2_plat_get_bl33_meminfo(meminfo_t *bl33_meminfo)
-{
-	bl33_meminfo->total_base = DRAM_NS_BASE;
-	bl33_meminfo->total_size = DRAM_NS_SIZE;
-	bl33_meminfo->free_base = DRAM_NS_BASE;
-	bl33_meminfo->free_size = DRAM_NS_SIZE;
-}
diff --git a/plat/juno/bl31_plat_setup.c b/plat/juno/bl31_plat_setup.c
deleted file mode 100644
index ad8ea43..0000000
--- a/plat/juno/bl31_plat_setup.c
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <arm_gic.h>
-#include <assert.h>
-#include <bl31.h>
-#include <bl_common.h>
-#include <cci400.h>
-#include <console.h>
-#include <mmio.h>
-#include <platform.h>
-#include <stddef.h>
-#include "juno_def.h"
-#include "juno_private.h"
-#include "mhu.h"
-
-/*******************************************************************************
- * Declarations of linker defined symbols which will help us find the layout
- * of trusted RAM
- ******************************************************************************/
-extern unsigned long __RO_START__;
-extern unsigned long __RO_END__;
-extern unsigned long __BL31_END__;
-
-#if USE_COHERENT_MEM
-extern unsigned long __COHERENT_RAM_START__;
-extern unsigned long __COHERENT_RAM_END__;
-#endif
-
-/*
- * The next 3 constants identify the extents of the code, RO data region and the
- * limit of the BL3-1 image.  These addresses are used by the MMU setup code and
- * therefore they must be page-aligned.  It is the responsibility of the linker
- * script to ensure that __RO_START__, __RO_END__ & __BL31_END__ linker symbols
- * refer to page-aligned addresses.
- */
-#define BL31_RO_BASE (unsigned long)(&__RO_START__)
-#define BL31_RO_LIMIT (unsigned long)(&__RO_END__)
-#define BL31_END (unsigned long)(&__BL31_END__)
-
-#if USE_COHERENT_MEM
-/*
- * The next 2 constants identify the extents of the coherent memory region.
- * These addresses are used by the MMU setup code and therefore they must be
- * page-aligned.  It is the responsibility of the linker script to ensure that
- * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols
- * refer to page-aligned addresses.
- */
-#define BL31_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
-#define BL31_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
-#endif
-
-/******************************************************************************
- * Placeholder variables for copying the arguments that have been passed to
- * BL3-1 from BL2.
- ******************************************************************************/
-static entry_point_info_t bl32_ep_info;
-static entry_point_info_t bl33_ep_info;
-
-/*******************************************************************************
- * Return a pointer to the 'entry_point_info' structure of the next image for
- * the security state specified. BL3-3 corresponds to the non-secure image type
- * while BL3-2 corresponds to the secure image type. A NULL pointer is returned
- * if the image does not exist.
- ******************************************************************************/
-entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
-{
-	entry_point_info_t *next_image_info;
-
-	next_image_info = (type == NON_SECURE) ? &bl33_ep_info : &bl32_ep_info;
-
-	/* None of the images on this platform can have 0x0 as the entrypoint */
-	if (next_image_info->pc)
-		return next_image_info;
-	else
-		return NULL;
-}
-
-/*******************************************************************************
- * Perform any BL3-1 specific platform actions. Here is an opportunity to copy
- * parameters passed by the calling EL (S-EL1 in BL2 & S-EL3 in BL1) before they
- * are lost (potentially). This needs to be done before the MMU is initialized
- * so that the memory layout can be used while creating page tables. Also, BL2
- * has flushed this information to memory, so we are guaranteed to pick up good
- * data
- ******************************************************************************/
-void bl31_early_platform_setup(bl31_params_t *from_bl2,
-			       void *plat_params_from_bl2)
-{
-	/* Initialize the console to provide early debug support */
-	console_init(PL011_UART2_BASE, PL011_UART2_CLK_IN_HZ, PL011_BAUDRATE);
-
-	/*
-	 * Initialise the CCI-400 driver for BL31 so that it is accessible after
-	 * a warm boot. BL1 should have already enabled CCI coherency for this
-	 * cluster during cold boot.
-	 */
-	cci_init(CCI400_BASE,
-		 CCI400_SL_IFACE3_CLUSTER_IX,
-		 CCI400_SL_IFACE4_CLUSTER_IX);
-
-	/*
-	 * Check params passed from BL2 should not be NULL,
-	 */
-	assert(from_bl2 != NULL);
-	assert(from_bl2->h.type == PARAM_BL31);
-	assert(from_bl2->h.version >= VERSION_1);
-	/*
-	 * In debug builds, we pass a special value in 'plat_params_from_bl2'
-	 * to verify platform parameters from BL2 to BL3-1.
-	 * In release builds, it's not used.
-	 */
-	assert(((unsigned long long)plat_params_from_bl2) ==
-		JUNO_BL31_PLAT_PARAM_VAL);
-
-	/*
-	 * Copy BL3-2 and BL3-3 entry point information.
-	 * They are stored in Secure RAM, in BL2's address space.
-	 */
-	bl32_ep_info = *from_bl2->bl32_ep_info;
-	bl33_ep_info = *from_bl2->bl33_ep_info;
-}
-
-/*******************************************************************************
- * Initialize the MHU and the GIC.
- ******************************************************************************/
-void bl31_platform_setup(void)
-{
-	unsigned int reg_val;
-
-	mhu_secure_init();
-
-	/* Initialize the gic cpu and distributor interfaces */
-	plat_gic_init();
-	arm_gic_setup();
-
-	/* Enable and initialize the System level generic timer */
-	mmio_write_32(SYS_CNTCTL_BASE + CNTCR_OFF, CNTCR_FCREQ(0) | CNTCR_EN);
-
-	/* Allow access to the System counter timer module */
-	reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT);
-	reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT);
-	reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT);
-	mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(1), reg_val);
-
-	reg_val = (1 << CNTNSAR_NS_SHIFT(1));
-	mmio_write_32(SYS_TIMCTL_BASE + CNTNSAR, reg_val);
-
-	/* Topologies are best known to the platform. */
-	plat_setup_topology();
-}
-
-/*******************************************************************************
- * Perform the very early platform specific architectural setup here. At the
- * moment this is only intializes the mmu in a quick and dirty way.
- ******************************************************************************/
-void bl31_plat_arch_setup()
-{
-	configure_mmu_el3(BL31_RO_BASE,
-			  (BL31_END - BL31_RO_BASE),
-			  BL31_RO_BASE,
-			  BL31_RO_LIMIT
-#if USE_COHERENT_MEM
-			  ,
-			  BL31_COHERENT_RAM_BASE,
-			  BL31_COHERENT_RAM_LIMIT
-#endif
-			  );
-}
diff --git a/plat/juno/include/plat_macros.S b/plat/juno/include/plat_macros.S
deleted file mode 100644
index a9d2466..0000000
--- a/plat/juno/include/plat_macros.S
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <cci400.h>
-#include <gic_v2.h>
-#include "platform_def.h"
-#include "../juno_def.h"
-
-.section .rodata.gic_reg_name, "aS"
-gicc_regs:
-	.asciz "gicc_hppir", "gicc_ahppir", "gicc_ctlr", ""
-gicd_pend_reg:
-	.asciz "gicd_ispendr regs (Offsets 0x200 - 0x278)\n Offset:\t\t\tvalue\n"
-newline:
-	.asciz "\n"
-spacer:
-	.asciz ":\t\t0x"
-
-
-	/* ---------------------------------------------
-	 * The below macro prints out relevant GIC
-	 * registers whenever an unhandled exception is
-	 * taken in BL3-1.
-	 * Clobbers: x0 - x10, x16, sp
-	 * ---------------------------------------------
-	 */
-	.macro plat_print_gic_regs
-	mov_imm	x16, GICD_BASE
-	mov_imm	x17, GICC_BASE
-	/* Load the gicc reg list to x6 */
-	adr	x6, gicc_regs
-	/* Load the gicc regs to gp regs used by str_in_crash_buf_print */
-	ldr	w8, [x17, #GICC_HPPIR]
-	ldr	w9, [x17, #GICC_AHPPIR]
-	ldr	w10, [x17, #GICC_CTLR]
-	/* Store to the crash buf and print to console */
-	bl	str_in_crash_buf_print
-
-	/* Print the GICD_ISPENDR regs */
-	add	x7, x16, #GICD_ISPENDR
-	adr	x4, gicd_pend_reg
-	bl	asm_print_str
-gicd_ispendr_loop:
-	sub	x4, x7, x16
-	cmp	x4, #0x280
-	b.eq	exit_print_gic_regs
-	bl	asm_print_hex
-
-	adr	x4, spacer
-	bl	asm_print_str
-
-	ldr	x4, [x7], #8
-	bl	asm_print_hex
-
-	adr	x4, newline
-	bl	asm_print_str
-	b	gicd_ispendr_loop
-exit_print_gic_regs:
-	.endm
-
-.section .rodata.cci_reg_name, "aS"
-cci_iface_regs:
-	.asciz "cci_snoop_ctrl_cluster0", "cci_snoop_ctrl_cluster1" , ""
-
-	/* ------------------------------------------------
-	 * The below macro prints out relevant interconnect
-	 * registers whenever an unhandled exception is
-	 * taken in BL3-1.
-	 * Clobbers: x0 - x9, sp
-	 * ------------------------------------------------
-	 */
-	.macro plat_print_interconnect_regs
-	adr	x6, cci_iface_regs
-	/* Store in x7 the base address of the first interface */
-	mov_imm	x7, (CCI400_BASE + SLAVE_IFACE3_OFFSET)
-	ldr	w8, [x7, #SNOOP_CTRL_REG]
-	/* Store in x7 the base address of the second interface */
-	mov_imm	x7, (CCI400_BASE + SLAVE_IFACE4_OFFSET)
-	ldr	w9, [x7, #SNOOP_CTRL_REG]
-	/* Store to the crash buf and print to console */
-	bl	str_in_crash_buf_print
-	.endm
diff --git a/plat/juno/include/platform_def.h b/plat/juno/include/platform_def.h
deleted file mode 100644
index 31c191c..0000000
--- a/plat/juno/include/platform_def.h
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __PLATFORM_DEF_H__
-#define __PLATFORM_DEF_H__
-
-#include <arch.h>
-#include "../juno_def.h"
-
-/*******************************************************************************
- * Platform binary types for linking
- ******************************************************************************/
-#define PLATFORM_LINKER_FORMAT          "elf64-littleaarch64"
-#define PLATFORM_LINKER_ARCH            aarch64
-
-/*******************************************************************************
- * Generic platform constants
- ******************************************************************************/
-
-/* Size of cacheable stacks */
-#if TRUSTED_BOARD_BOOT && (IMAGE_BL1 || IMAGE_BL2)
-#define PLATFORM_STACK_SIZE 0x1000
-#else
-#define PLATFORM_STACK_SIZE 0x800
-#endif
-
-#define FIRMWARE_WELCOME_STR		"Booting Trusted Firmware\n"
-
-/* Trusted Boot Firmware BL2 */
-#define BL2_IMAGE_NAME			"bl2.bin"
-
-/* EL3 Runtime Firmware BL3-1 */
-#define BL31_IMAGE_NAME			"bl31.bin"
-
-/* SCP Firmware BL3-0 */
-#define BL30_IMAGE_NAME			"bl30.bin"
-
-/* Secure Payload BL3-2 (Trusted OS) */
-#define BL32_IMAGE_NAME			"bl32.bin"
-
-/* Non-Trusted Firmware BL3-3 */
-#define BL33_IMAGE_NAME			"bl33.bin" /* e.g. UEFI */
-
-/* Firmware Image Package */
-#define FIP_IMAGE_NAME			"fip.bin"
-
-#if TRUSTED_BOARD_BOOT
-/* Certificates */
-# define BL2_CERT_NAME			"bl2.crt"
-# define TRUSTED_KEY_CERT_NAME		"trusted_key.crt"
-
-# define BL30_KEY_CERT_NAME		"bl30_key.crt"
-# define BL31_KEY_CERT_NAME		"bl31_key.crt"
-# define BL32_KEY_CERT_NAME		"bl32_key.crt"
-# define BL33_KEY_CERT_NAME		"bl33_key.crt"
-
-# define BL30_CERT_NAME			"bl30.crt"
-# define BL31_CERT_NAME			"bl31.crt"
-# define BL32_CERT_NAME			"bl32.crt"
-# define BL33_CERT_NAME			"bl33.crt"
-#endif /* TRUSTED_BOARD_BOOT */
-
-#define PLATFORM_CACHE_LINE_SIZE	64
-#define PLATFORM_CLUSTER_COUNT		2
-#define PLATFORM_CORE_COUNT             6
-#define PLATFORM_NUM_AFFS		(PLATFORM_CLUSTER_COUNT + \
-					 PLATFORM_CORE_COUNT)
-#define MAX_IO_DEVICES			3
-#define MAX_IO_HANDLES			4
-
-/*******************************************************************************
- * BL1 specific defines.
- * BL1 RW data is relocated from ROM to RAM at runtime so we need 2 base
- * addresses.
- ******************************************************************************/
-#define BL1_RO_BASE			TZROM_BASE
-#define BL1_RO_LIMIT			(TZROM_BASE + TZROM_SIZE)
-
-/*
- * Put BL1 RW at the top of the Trusted SRAM. BL1_RW_BASE is calculated using
- * the current BL1 RW debug size plus a little space for growth.
- */
-#if TRUSTED_BOARD_BOOT
-#define BL1_RW_BASE			(TZRAM_BASE + TZRAM_SIZE - 0x8000)
-#else
-#define BL1_RW_BASE			(TZRAM_BASE + TZRAM_SIZE - 0x6000)
-#endif
-#define BL1_RW_LIMIT			(TZRAM_BASE + TZRAM_SIZE)
-
-/*******************************************************************************
- * BL2 specific defines.
- ******************************************************************************/
-/*
- * Put BL2 just below BL3-1. BL2_BASE is calculated using the current BL2 debug
- * size plus a little space for growth.
- */
-#if TRUSTED_BOARD_BOOT
-#define BL2_BASE			(BL31_BASE - 0x1D000)
-#else
-#define BL2_BASE			(BL31_BASE - 0xC000)
-#endif
-#define BL2_LIMIT			BL31_BASE
-
-/*******************************************************************************
- * Load address of BL3-0 in the Juno port
- * BL3-0 is loaded to the same place as BL3-1.  Once BL3-0 is transferred to the
- * SCP, it is discarded and BL3-1 is loaded over the top.
- ******************************************************************************/
-#define BL30_BASE			BL31_BASE
-
-/*******************************************************************************
- * BL3-1 specific defines.
- ******************************************************************************/
-/*
- * Put BL3-1 at the top of the Trusted SRAM. BL31_BASE is calculated using the
- * current BL3-1 debug size plus a little space for growth.
- */
-#define BL31_BASE			(TZRAM_BASE + TZRAM_SIZE - 0x1D000)
-#define BL31_PROGBITS_LIMIT		BL1_RW_BASE
-#define BL31_LIMIT			(TZRAM_BASE + TZRAM_SIZE)
-
-/*******************************************************************************
- * BL3-2 specific defines.
- ******************************************************************************/
-
-/*
- * The TSP can execute either from Trusted SRAM or Trusted DRAM.
- */
-#define BL32_SRAM_BASE                  TZRAM_BASE
-#define BL32_SRAM_LIMIT                 BL31_BASE
-#define BL32_DRAM_BASE                  DRAM_SEC_BASE
-#define BL32_DRAM_LIMIT                 (DRAM_SEC_BASE + DRAM_SEC_SIZE)
-
-#if (PLAT_TSP_LOCATION_ID == PLAT_TRUSTED_SRAM_ID)
-# define TSP_SEC_MEM_BASE		TZRAM_BASE
-# define TSP_SEC_MEM_SIZE		TZRAM_SIZE
-# define BL32_BASE			BL32_SRAM_BASE
-# define BL32_LIMIT			BL32_SRAM_LIMIT
-//# define BL32_PROGBITS_LIMIT	BL2_BASE
-#elif (PLAT_TSP_LOCATION_ID == PLAT_DRAM_ID)
-# define TSP_SEC_MEM_BASE		DRAM_SEC_BASE
-# define TSP_SEC_MEM_SIZE		DRAM_SEC_SIZE
-# define BL32_BASE			BL32_DRAM_BASE
-# define BL32_LIMIT			BL32_DRAM_LIMIT
-#else
-# error "Unsupported PLAT_TSP_LOCATION_ID value"
-#endif
-
-/*******************************************************************************
- * Load address of BL3-3 in the Juno port
- ******************************************************************************/
-#define NS_IMAGE_OFFSET			0xE0000000
-
-/*******************************************************************************
- * Platform specific page table and MMU setup constants
- ******************************************************************************/
-#define ADDR_SPACE_SIZE			(1ull << 32)
-
-#if IMAGE_BL1 || IMAGE_BL31
-# define MAX_XLAT_TABLES		3
-#endif
-
-#if IMAGE_BL2 || IMAGE_BL32
-# define MAX_XLAT_TABLES		3
-#endif
-
-#define MAX_MMAP_REGIONS		16
-
-/*******************************************************************************
- * ID of the secure physical generic timer interrupt used by the TSP
- ******************************************************************************/
-#define TSP_IRQ_SEC_PHY_TIMER		IRQ_SEC_PHY_TIMER
-
-/*******************************************************************************
- * Declarations and constants to access the mailboxes safely. Each mailbox is
- * aligned on the biggest cache line size in the platform. This is known only
- * to the platform as it might have a combination of integrated and external
- * caches. Such alignment ensures that two maiboxes do not sit on the same cache
- * line at any cache level. They could belong to different cpus/clusters &
- * get written while being protected by different locks causing corruption of
- * a valid mailbox address.
- ******************************************************************************/
-#define CACHE_WRITEBACK_SHIFT   6
-#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
-
-#if !USE_COHERENT_MEM
-/*******************************************************************************
- * Size of the per-cpu data in bytes that should be reserved in the generic
- * per-cpu data structure for the Juno port.
- ******************************************************************************/
-#define PLAT_PCPU_DATA_SIZE	2
-#endif
-
-#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/juno/include/platform_oid.h b/plat/juno/include/platform_oid.h
deleted file mode 100644
index 38aca12..0000000
--- a/plat/juno/include/platform_oid.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef PLATFORM_OID_H_
-#define PLATFORM_OID_H_
-
-/*
- * This is the list of the different extensions containing relevant information
- * to establish the chain of trust.
- *
- * The OIDs shown here are just an example. Real OIDs should be obtained from
- * the ITU-T.
- */
-
-/* Non-volatile counter extensions */
-#define TZ_FW_NVCOUNTER_OID		"1.2.3.1"
-#define NTZ_FW_NVCOUNTER_OID		"1.2.3.2"
-
-/* BL2 extensions */
-#define BL2_HASH_OID			"1.2.3.3"
-
-/* Trusted Key extensions */
-#define TZ_WORLD_PK_OID			"1.2.3.4"
-#define NTZ_WORLD_PK_OID		"1.2.3.5"
-
-/* BL3-1 extensions */
-#define BL31_CONTENT_CERT_PK_OID	"1.2.3.6"
-#define BL31_HASH_OID			"1.2.3.7"
-
-/* BL3-0 extensions */
-#define BL30_CONTENT_CERT_PK_OID	"1.2.3.8"
-#define BL30_HASH_OID			"1.2.3.9"
-
-/* BL3-2 extensions */
-#define BL32_CONTENT_CERT_PK_OID	"1.2.3.10"
-#define BL32_HASH_OID			"1.2.3.11"
-
-/* BL3-3 extensions */
-#define BL33_CONTENT_CERT_PK_OID	"1.2.3.12"
-#define BL33_HASH_OID			"1.2.3.13"
-
-#endif /* PLATFORM_OID_H_ */
diff --git a/plat/juno/juno_def.h b/plat/juno/juno_def.h
deleted file mode 100644
index 8a85aec..0000000
--- a/plat/juno/juno_def.h
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __JUNO_DEF_H__
-#define __JUNO_DEF_H__
-
-/* Special value used to verify platform parameters from BL2 to BL3-1 */
-#define JUNO_BL31_PLAT_PARAM_VAL	0x0f1e2d3c4b5a6978ULL
-
-/*******************************************************************************
- * Juno memory map related constants
- ******************************************************************************/
-#define FLASH_BASE		0x08000000
-#define FLASH_SIZE		0x04000000
-
-/* Bypass offset from start of NOR flash */
-#define BL1_ROM_BYPASS_OFFSET	0x03EC0000
-
-#ifndef TZROM_BASE
-/* Use the bypass address */
-#define TZROM_BASE		FLASH_BASE + BL1_ROM_BYPASS_OFFSET
-#endif
-/* Actual ROM size on Juno is 64 KB, but TBB requires at least 80 KB in debug
- * mode. We can test TBB on Juno bypassing the ROM and using 128 KB of flash */
-#if TRUSTED_BOARD_BOOT
-#define TZROM_SIZE		0x00020000
-#else
-#define TZROM_SIZE		0x00010000
-#endif
-
-#define TZRAM_BASE		0x04001000
-#define TZRAM_SIZE		0x0003F000
-
-#define PLAT_TRUSTED_SRAM_ID	0
-#define PLAT_DRAM_ID		1
-
-#define MHU_SECURE_BASE		0x04000000
-#define MHU_SECURE_SIZE		0x00001000
-
-#define MHU_PAYLOAD_CACHED	0
-
-#define TRUSTED_MAILBOXES_BASE	MHU_SECURE_BASE
-#define TRUSTED_MAILBOX_SHIFT	4
-
-#define EMMC_BASE		0x0c000000
-#define EMMC_SIZE		0x04000000
-
-#define PSRAM_BASE		0x14000000
-#define PSRAM_SIZE		0x02000000
-
-#define IOFPGA_BASE		0x1c000000
-#define IOFPGA_SIZE		0x03000000
-
-#define NSROM_BASE		0x1f000000
-#define NSROM_SIZE		0x00001000
-
-/* Following covers Columbus Peripherals excluding NSROM and NSRAM  */
-#define DEVICE0_BASE		0x20000000
-#define DEVICE0_SIZE		0x0e000000
-#define MHU_BASE		0x2b1f0000
-
-#define NSRAM_BASE		0x2e000000
-#define NSRAM_SIZE		0x00008000
-
-/* Following covers Juno Peripherals and PCIe expansion area */
-#define DEVICE1_BASE		0x40000000
-#define DEVICE1_SIZE		0x40000000
-#define PCIE_CONTROL_BASE	0x7ff20000
-
-#define DRAM_BASE		0x80000000
-#define DRAM_SIZE		0x80000000
-
-/*
- * DRAM at 0x8000_0000 is divided in two regions:
- *   - Secure DRAM (default is the top 16MB except for the last 2MB, which are
- *     used by the SCP for DDR retraining)
- *   - Non-Secure DRAM (remaining DRAM starting at DRAM_BASE)
- */
-
-#define DRAM_SCP_SIZE		0x00200000
-#define DRAM_SCP_BASE		(DRAM_BASE + DRAM_SIZE - DRAM_SCP_SIZE)
-
-#define DRAM_SEC_SIZE		0x00E00000
-#define DRAM_SEC_BASE		(DRAM_SCP_BASE - DRAM_SEC_SIZE)
-
-#define DRAM_NS_BASE		DRAM_BASE
-#define DRAM_NS_SIZE		(DRAM_SIZE - DRAM_SCP_SIZE - DRAM_SEC_SIZE)
-
-/* Second region of DRAM */
-#define DRAM2_BASE		0x880000000
-#define DRAM2_SIZE		0x180000000
-
-/* Memory mapped Generic timer interfaces  */
-#define SYS_CNTCTL_BASE		0x2a430000
-#define SYS_CNTREAD_BASE	0x2a800000
-#define SYS_TIMCTL_BASE		0x2a810000
-
-/* V2M motherboard system registers & offsets */
-#define VE_SYSREGS_BASE		0x1c010000
-#define V2M_SYS_LED		0x8
-
-/*
- * V2M sysled bit definitions. The values written to this
- * register are defined in arch.h & runtime_svc.h. Only
- * used by the primary cpu to diagnose any cold boot issues.
- *
- * SYS_LED[0]   - Security state (S=0/NS=1)
- * SYS_LED[2:1] - Exception Level (EL3-EL0)
- * SYS_LED[7:3] - Exception Class (Sync/Async & origin)
- *
- */
-#define SYS_LED_SS_SHIFT		0x0
-#define SYS_LED_EL_SHIFT		0x1
-#define SYS_LED_EC_SHIFT		0x3
-
-/*******************************************************************************
- * GIC-400 & interrupt handling related constants
- ******************************************************************************/
-#define GICD_BASE			0x2c010000
-#define GICC_BASE			0x2c02f000
-#define GICH_BASE			0x2c04f000
-#define GICV_BASE			0x2c06f000
-
-#define IRQ_MHU			69
-#define IRQ_GPU_SMMU_0		71
-#define IRQ_GPU_SMMU_1		73
-#define IRQ_ETR_SMMU		75
-#define IRQ_TZC400		80
-#define IRQ_TZ_WDOG		86
-
-#define IRQ_SEC_PHY_TIMER		29
-#define IRQ_SEC_SGI_0			8
-#define IRQ_SEC_SGI_1			9
-#define IRQ_SEC_SGI_2			10
-#define IRQ_SEC_SGI_3			11
-#define IRQ_SEC_SGI_4			12
-#define IRQ_SEC_SGI_5			13
-#define IRQ_SEC_SGI_6			14
-#define IRQ_SEC_SGI_7			15
-
-/*******************************************************************************
- * PL011 related constants
- ******************************************************************************/
-/* FPGA UART0 */
-#define PL011_UART0_BASE		0x1c090000
-/* FPGA UART1 */
-#define PL011_UART1_BASE		0x1c0a0000
-/* SoC UART0 */
-#define PL011_UART2_BASE		0x7ff80000
-/* SoC UART1 */
-#define PL011_UART3_BASE		0x7ff70000
-
-#define PL011_BAUDRATE			115200
-
-#define PL011_UART0_CLK_IN_HZ		24000000
-#define PL011_UART1_CLK_IN_HZ		24000000
-#define PL011_UART2_CLK_IN_HZ		7273800
-#define PL011_UART3_CLK_IN_HZ		7273800
-
-/*******************************************************************************
- * NIC-400 related constants
- ******************************************************************************/
-
-/* CSS NIC-400 Global Programmers View (GPV) */
-#define CSS_NIC400_BASE		0x2a000000
-
-/* The slave_bootsecure controls access to GPU, DMC and CS. */
-#define CSS_NIC400_SLAVE_BOOTSECURE		8
-
-/* SoC NIC-400 Global Programmers View (GPV) */
-#define SOC_NIC400_BASE		0x7fd00000
-
-#define SOC_NIC400_USB_EHCI	0
-#define SOC_NIC400_TLX_MASTER	1
-#define SOC_NIC400_USB_OHCI	2
-#define SOC_NIC400_PL354_SMC	3
-/*
- * The apb4_bridge controls access to:
- *   - the PCIe configuration registers
- *   - the MMU units for USB, HDLCD and DMA
- */
-#define SOC_NIC400_APB4_BRIDGE	4
-/*
- * The bootsec_bridge controls access to a bunch of peripherals, e.g. the UARTs.
- */
-#define SOC_NIC400_BOOTSEC_BRIDGE		5
-#define SOC_NIC400_BOOTSEC_BRIDGE_UART1         (1 << 12)
-
-/*******************************************************************************
- * TZC-400 related constants
- ******************************************************************************/
-#define TZC400_BASE		0x2a4a0000
-
-#define TZC400_NSAID_CCI400	0  /* Note: Same as default NSAID!! */
-#define TZC400_NSAID_PCIE	1
-#define TZC400_NSAID_HDLCD0	2
-#define TZC400_NSAID_HDLCD1	3
-#define TZC400_NSAID_USB	4
-#define TZC400_NSAID_DMA330	5
-#define TZC400_NSAID_THINLINKS	6
-#define TZC400_NSAID_AP		9
-#define TZC400_NSAID_GPU	10
-#define TZC400_NSAID_SCP	11
-#define TZC400_NSAID_CORESIGHT	12
-
-/*******************************************************************************
- * CCI-400 related constants
- ******************************************************************************/
-#define CCI400_BASE			0x2c090000
-#define CCI400_SL_IFACE3_CLUSTER_IX	1
-#define CCI400_SL_IFACE4_CLUSTER_IX	0
-
-/*******************************************************************************
- * SCP <=> AP boot configuration
- ******************************************************************************/
-#define SCP_BOOT_CFG_ADDR	0x04000080
-#define PRIMARY_CPU_SHIFT	8
-#define PRIMARY_CPU_MASK	0xf
-
-#endif /* __JUNO_DEF_H__ */
diff --git a/plat/juno/juno_private.h b/plat/juno/juno_private.h
deleted file mode 100644
index 70439e8..0000000
--- a/plat/juno/juno_private.h
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __JUNO_PRIVATE_H__
-#define __JUNO_PRIVATE_H__
-
-#include <bakery_lock.h>
-#include <bl_common.h>
-#include <cpu_data.h>
-#include <platform_def.h>
-#include <stdint.h>
-
-/*******************************************************************************
- * Forward declarations
- ******************************************************************************/
-struct plat_pm_ops;
-struct meminfo;
-struct bl31_params;
-struct image_info;
-struct entry_point_info;
-
-/*******************************************************************************
- * This structure represents the superset of information that is passed to
- * BL3-1 e.g. while passing control to it from BL2 which is bl31_params
- * and other platform specific params
- ******************************************************************************/
-typedef struct bl2_to_bl31_params_mem {
-	struct bl31_params bl31_params;
-	struct image_info bl31_image_info;
-	struct image_info bl32_image_info;
-	struct image_info bl33_image_info;
-	struct entry_point_info bl33_ep_info;
-	struct entry_point_info bl32_ep_info;
-	struct entry_point_info bl31_ep_info;
-} bl2_to_bl31_params_mem_t;
-
-#if IMAGE_BL31
-#if USE_COHERENT_MEM
-/*
- * These are wrapper macros to the Coherent Memory Bakery Lock API.
- */
-#define juno_lock_init(_lock_arg)		bakery_lock_init(_lock_arg)
-#define juno_lock_get(_lock_arg)		bakery_lock_get(_lock_arg)
-#define juno_lock_release(_lock_arg)		bakery_lock_release(_lock_arg)
-
-#else
-
-/*******************************************************************************
- * Constants that specify how many bakeries this platform implements and bakery
- * ids.
- ******************************************************************************/
-#define JUNO_MAX_BAKERIES	1
-#define JUNO_MHU_BAKERY_ID	0
-
-/*******************************************************************************
- * Definition of structure which holds platform specific per-cpu data. Currently
- * it holds only the bakery lock information for each cpu. Constants to specify
- * how many bakeries this platform implements and bakery ids are specified in
- * juno_def.h
- ******************************************************************************/
-typedef struct juno_cpu_data {
-	bakery_info_t pcpu_bakery_info[JUNO_MAX_BAKERIES];
-} juno_cpu_data_t;
-
-/* Macro to define the offset of bakery_info_t in juno_cpu_data_t */
-#define JUNO_CPU_DATA_LOCK_OFFSET	__builtin_offsetof\
-					    (juno_cpu_data_t, pcpu_bakery_info)
-
-/*******************************************************************************
- * Helper macros for bakery lock api when using the above juno_cpu_data_t for
- * bakery lock data structures. It assumes that the bakery_info is at the
- * beginning of the platform specific per-cpu data.
- ******************************************************************************/
-#define juno_lock_init(_lock_arg)		/* No init required */
-#define juno_lock_get(_lock_arg)		bakery_lock_get(_lock_arg,	\
-						    CPU_DATA_PLAT_PCPU_OFFSET + \
-						    JUNO_CPU_DATA_LOCK_OFFSET)
-#define juno_lock_release(_lock_arg)		bakery_lock_release(_lock_arg,	\
-						    CPU_DATA_PLAT_PCPU_OFFSET + \
-						    JUNO_CPU_DATA_LOCK_OFFSET)
-
-/*
- * Ensure that the size of the Juno specific per-cpu data structure and the size
- * of the memory allocated in generic per-cpu data for the platform are the same.
- */
-CASSERT(PLAT_PCPU_DATA_SIZE == sizeof(juno_cpu_data_t),	\
-	juno_pcpu_data_size_mismatch);
-#endif /* __USE_COHERENT_MEM__ */
-#else
-/*
- * Dummy wrapper macros for all other BL stages other than BL3-1
- */
-#define juno_lock_init(_lock_arg)
-#define juno_lock_get(_lock_arg)
-#define juno_lock_release(_lock_arg)
-
-#endif /* __IMAGE_BL31__ */
-
-/*******************************************************************************
- * Function and variable prototypes
- ******************************************************************************/
-void bl1_plat_arch_setup(void);
-void bl2_plat_arch_setup(void);
-void bl31_plat_arch_setup(void);
-int platform_setup_pm(const struct plat_pm_ops **plat_ops);
-unsigned int platform_get_core_pos(unsigned long mpidr);
-void configure_mmu_el1(unsigned long total_base,
-		       unsigned long total_size,
-		       unsigned long ro_start,
-		       unsigned long ro_limit
-#if USE_COHERENT_MEM
-		       , unsigned long coh_start,
-		       unsigned long coh_limit
-#endif
-		       );
-void configure_mmu_el3(unsigned long total_base,
-		       unsigned long total_size,
-		       unsigned long ro_start,
-		       unsigned long ro_limit
-#if USE_COHERENT_MEM
-		       , unsigned long coh_start,
-		       unsigned long coh_limit
-#endif
-		       );
-void plat_report_exception(unsigned long type);
-unsigned long plat_get_ns_image_entrypoint(void);
-unsigned long platform_get_stack(unsigned long mpidr);
-uint64_t plat_get_syscnt_freq(void);
-void plat_gic_init(void);
-
-/* Declarations for plat_topology.c */
-int plat_setup_topology(void);
-int plat_get_max_afflvl(void);
-unsigned int plat_get_aff_count(unsigned int aff_lvl, unsigned long mpidr);
-unsigned int plat_get_aff_state(unsigned int aff_lvl, unsigned long mpidr);
-
-/* Declarations for plat_io_storage.c */
-void io_setup(void);
-int plat_get_image_source(const char *image_name,
-			  uintptr_t *dev_handle,
-			  uintptr_t *image_spec);
-
-/* Declarations for security.c */
-void plat_security_setup(void);
-
-/*
- * Before calling this function BL2 is loaded in memory and its entrypoint
- * is set by load_image. This is a placeholder for the platform to change
- * the entrypoint of BL2 and set SPSR and security state.
- * On Juno we are only setting the security state, entrypoint
- */
-void bl1_plat_set_bl2_ep_info(struct image_info *image,
-			      struct entry_point_info *ep);
-
-/*
- * Before calling this function BL3-1 is loaded in memory and its entrypoint
- * is set by load_image. This is a placeholder for the platform to change
- * the entrypoint of BL3-1 and set SPSR and security state.
- * On Juno we are only setting the security state, entrypoint
- */
-void bl2_plat_set_bl31_ep_info(struct image_info *image,
-			       struct entry_point_info *ep);
-
-/*
- * Before calling this function BL3-2 is loaded in memory and its entrypoint
- * is set by load_image. This is a placeholder for the platform to change
- * the entrypoint of BL3-2 and set SPSR and security state.
- * On Juno we are only setting the security state, entrypoint
- */
-void bl2_plat_set_bl32_ep_info(struct image_info *image,
-			       struct entry_point_info *ep);
-
-/*
- * Before calling this function BL3-3 is loaded in memory and its entrypoint
- * is set by load_image. This is a placeholder for the platform to change
- * the entrypoint of BL3-3 and set SPSR and security state.
- * On Juno we are only setting the security state, entrypoint
- */
-void bl2_plat_set_bl33_ep_info(struct image_info *image,
-			       struct entry_point_info *ep);
-
-/* Gets the memory layout for BL3-2 */
-void bl2_plat_get_bl32_meminfo(struct meminfo *mem_info);
-
-/* Gets the memory layout for BL3-3 */
-void bl2_plat_get_bl33_meminfo(struct meminfo *mem_info);
-
-#endif /* __JUNO_PRIVATE_H__ */
diff --git a/plat/juno/juno_trusted_boot.c b/plat/juno/juno_trusted_boot.c
deleted file mode 100644
index e63d4b2..0000000
--- a/plat/juno/juno_trusted_boot.c
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <assert.h>
-#include <debug.h>
-#include "juno_def.h"
-#include "juno_private.h"
-
-/*
- * Check the validity of the key
- *
- * 0 = success, Otherwise = error
- */
-int plat_match_rotpk(const unsigned char *key_buf, unsigned int key_len)
-{
-	/* TODO: check against the ROT key stored in the platform */
-	return 0;
-}
diff --git a/plat/juno/mhu.c b/plat/juno/mhu.c
deleted file mode 100644
index c1c414c..0000000
--- a/plat/juno/mhu.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch_helpers.h>
-#include <bakery_lock.h>
-#include <mmio.h>
-#include "juno_def.h"
-#include "juno_private.h"
-#include "mhu.h"
-
-/* SCP MHU secure channel registers */
-#define SCP_INTR_S_STAT		0x200
-#define SCP_INTR_S_SET		0x208
-#define SCP_INTR_S_CLEAR	0x210
-
-/* CPU MHU secure channel registers */
-#define CPU_INTR_S_STAT		0x300
-#define CPU_INTR_S_SET		0x308
-#define CPU_INTR_S_CLEAR	0x310
-
-#if IMAGE_BL31
-#if USE_COHERENT_MEM
-static bakery_lock_t mhu_secure_lock __attribute__ ((section("tzfw_coherent_mem")));
-#define LOCK_ARG		&mhu_secure_lock
-#else
-#define LOCK_ARG		JUNO_MHU_BAKERY_ID
-#endif /*__USE_COHERENT_MEM__ */
-#else
-#define LOCK_ARG	/* Locks required only for BL3-1 images */
-#endif /* __IMAGE_BL31__ */
-
-void mhu_secure_message_start(void)
-{
-	juno_lock_get(LOCK_ARG);
-
-	/* Make sure any previous command has finished */
-	while (mmio_read_32(MHU_BASE + CPU_INTR_S_STAT) != 0)
-		;
-}
-
-void mhu_secure_message_send(uint32_t command)
-{
-	/* Send command to SCP and wait for it to pick it up */
-	mmio_write_32(MHU_BASE + CPU_INTR_S_SET, command);
-	while (mmio_read_32(MHU_BASE + CPU_INTR_S_STAT) != 0)
-		;
-}
-
-uint32_t mhu_secure_message_wait(void)
-{
-	/* Wait for response from SCP */
-	uint32_t response;
-	while (!(response = mmio_read_32(MHU_BASE + SCP_INTR_S_STAT)))
-		;
-
-	return response;
-}
-
-void mhu_secure_message_end(void)
-{
-	/* Clear any response we got by writing all ones to the CLEAR register */
-	mmio_write_32(MHU_BASE + SCP_INTR_S_CLEAR, 0xffffffffu);
-
-	juno_lock_release(LOCK_ARG);
-}
-
-void mhu_secure_init(void)
-{
-	juno_lock_init(LOCK_ARG);
-
-	/*
-	 * Clear the CPU's INTR register to make sure we don't see a stale
-	 * or garbage value and think it's a message we've already sent.
-	 */
-	mmio_write_32(MHU_BASE + CPU_INTR_S_CLEAR, 0xffffffffu);
-}
diff --git a/plat/juno/mhu.h b/plat/juno/mhu.h
deleted file mode 100644
index 5149c82..0000000
--- a/plat/juno/mhu.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __MHU_H__
-#define __MHU_H__
-
-#include <stdint.h>
-
-extern void mhu_secure_message_start(void);
-extern void mhu_secure_message_send(uint32_t command);
-extern uint32_t mhu_secure_message_wait(void);
-extern void mhu_secure_message_end(void);
-
-extern void mhu_secure_init(void);
-
-#endif	/* __MHU_H__ */
diff --git a/plat/juno/plat-tsp.ld.S b/plat/juno/plat-tsp.ld.S
deleted file mode 100644
index 16d6c17..0000000
--- a/plat/juno/plat-tsp.ld.S
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-    ASSERT(__BL32_END__ <= BL2_BASE, "BL3-2 image overlaps BL2 image.")
diff --git a/plat/juno/plat_io_storage.c b/plat/juno/plat_io_storage.c
deleted file mode 100644
index b31865e..0000000
--- a/plat/juno/plat_io_storage.c
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <assert.h>
-#include <debug.h>
-#include <io_driver.h>
-#include <io_fip.h>
-#include <io_memmap.h>
-#include <io_storage.h>
-#include <platform_def.h>
-#include <semihosting.h>	/* For FOPEN_MODE_... */
-#include <string.h>
-
-/* IO devices */
-static const io_dev_connector_t *fip_dev_con;
-static uintptr_t fip_dev_spec;
-static uintptr_t fip_dev_handle;
-static const io_dev_connector_t *memmap_dev_con;
-static uintptr_t memmap_dev_spec;
-static uintptr_t memmap_init_params;
-static uintptr_t memmap_dev_handle;
-
-static const io_block_spec_t fip_block_spec = {
-	.offset = FLASH_BASE,
-	.length = FLASH_SIZE
-};
-
-static const io_file_spec_t bl2_file_spec = {
-	.path = BL2_IMAGE_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl30_file_spec = {
-	.path = BL30_IMAGE_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl31_file_spec = {
-	.path = BL31_IMAGE_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl32_file_spec = {
-	.path = BL32_IMAGE_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl33_file_spec = {
-	.path = BL33_IMAGE_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-#if TRUSTED_BOARD_BOOT
-static const io_file_spec_t bl2_cert_file_spec = {
-	.path = BL2_CERT_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t trusted_key_cert_file_spec = {
-	.path = TRUSTED_KEY_CERT_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl30_key_cert_file_spec = {
-	.path = BL30_KEY_CERT_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl31_key_cert_file_spec = {
-	.path = BL31_KEY_CERT_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl32_key_cert_file_spec = {
-	.path = BL32_KEY_CERT_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl33_key_cert_file_spec = {
-	.path = BL33_KEY_CERT_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl30_cert_file_spec = {
-	.path = BL30_CERT_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl31_cert_file_spec = {
-	.path = BL31_CERT_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl32_cert_file_spec = {
-	.path = BL32_CERT_NAME,
-	.mode = FOPEN_MODE_RB
-};
-
-static const io_file_spec_t bl33_cert_file_spec = {
-	.path = BL33_CERT_NAME,
-	.mode = FOPEN_MODE_RB
-};
-#endif /* TRUSTED_BOARD_BOOT */
-
-static int open_fip(const uintptr_t spec);
-static int open_memmap(const uintptr_t spec);
-
-struct plat_io_policy {
-	const char *image_name;
-	uintptr_t *dev_handle;
-	uintptr_t image_spec;
-	int (*check)(const uintptr_t spec);
-};
-
-static const struct plat_io_policy policies[] = {
-	{
-		FIP_IMAGE_NAME,
-		&memmap_dev_handle,
-		(uintptr_t)&fip_block_spec,
-		open_memmap
-	}, {
-		BL2_IMAGE_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl2_file_spec,
-		open_fip
-	}, {
-		BL30_IMAGE_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl30_file_spec,
-		open_fip
-	}, {
-		BL31_IMAGE_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl31_file_spec,
-		open_fip
-	}, {
-		BL32_IMAGE_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl32_file_spec,
-		open_fip
-	}, {
-		BL33_IMAGE_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl33_file_spec,
-		open_fip
-	}, {
-#if TRUSTED_BOARD_BOOT
-		BL2_CERT_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl2_cert_file_spec,
-		open_fip
-	}, {
-		TRUSTED_KEY_CERT_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&trusted_key_cert_file_spec,
-		open_fip
-	}, {
-		BL30_KEY_CERT_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl30_key_cert_file_spec,
-		open_fip
-	}, {
-		BL31_KEY_CERT_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl31_key_cert_file_spec,
-		open_fip
-	}, {
-		BL32_KEY_CERT_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl32_key_cert_file_spec,
-		open_fip
-	}, {
-		BL33_KEY_CERT_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl33_key_cert_file_spec,
-		open_fip
-	}, {
-		BL30_CERT_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl30_cert_file_spec,
-		open_fip
-	}, {
-		BL31_CERT_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl31_cert_file_spec,
-		open_fip
-	}, {
-		BL32_CERT_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl32_cert_file_spec,
-		open_fip
-	}, {
-		BL33_CERT_NAME,
-		&fip_dev_handle,
-		(uintptr_t)&bl33_cert_file_spec,
-		open_fip
-	}, {
-#endif /* TRUSTED_BOARD_BOOT */
-		0, 0, 0
-	}
-};
-
-
-static int open_fip(const uintptr_t spec)
-{
-	int result = IO_FAIL;
-
-	/* See if a Firmware Image Package is available */
-	result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_NAME);
-	if (result == IO_SUCCESS) {
-		INFO("Using FIP\n");
-		/*TODO: Check image defined in spec is present in FIP. */
-	}
-	return result;
-}
-
-
-static int open_memmap(const uintptr_t spec)
-{
-	int result = IO_FAIL;
-	uintptr_t local_image_handle;
-
-	result = io_dev_init(memmap_dev_handle, memmap_init_params);
-	if (result == IO_SUCCESS) {
-		result = io_open(memmap_dev_handle, spec, &local_image_handle);
-		if (result == IO_SUCCESS) {
-			/* INFO("Using Memmap IO\n"); */
-			io_close(local_image_handle);
-		}
-	}
-	return result;
-}
-
-void io_setup(void)
-{
-	int io_result = IO_FAIL;
-
-	/* Register the IO devices on this platform */
-	io_result = register_io_dev_fip(&fip_dev_con);
-	assert(io_result == IO_SUCCESS);
-
-	io_result = register_io_dev_memmap(&memmap_dev_con);
-	assert(io_result == IO_SUCCESS);
-
-	/* Open connections to devices and cache the handles */
-	io_result = io_dev_open(fip_dev_con, fip_dev_spec, &fip_dev_handle);
-	assert(io_result == IO_SUCCESS);
-
-	io_result = io_dev_open(memmap_dev_con, memmap_dev_spec,
-				&memmap_dev_handle);
-	assert(io_result == IO_SUCCESS);
-
-	/* Ignore improbable errors in release builds */
-	(void)io_result;
-}
-
-
-/* Return an IO device handle and specification which can be used to access
- * an image. Use this to enforce platform load policy */
-int plat_get_image_source(const char *image_name, uintptr_t *dev_handle,
-			  uintptr_t *image_spec)
-{
-	int result = IO_FAIL;
-	const struct plat_io_policy *policy;
-
-	if ((image_name != NULL) && (dev_handle != NULL) &&
-	    (image_spec != NULL)) {
-		policy = policies;
-		while (policy->image_name != NULL) {
-			if (strcmp(policy->image_name, image_name) == 0) {
-				result = policy->check(policy->image_spec);
-				if (result == IO_SUCCESS) {
-					*image_spec = policy->image_spec;
-					*dev_handle = *(policy->dev_handle);
-					break;
-				}
-			}
-			policy++;
-		}
-	} else {
-		result = IO_FAIL;
-	}
-	return result;
-}
diff --git a/plat/juno/plat_pm.c b/plat/juno/plat_pm.c
deleted file mode 100644
index 47338cf..0000000
--- a/plat/juno/plat_pm.c
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- * Copyright (c) 2013, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <assert.h>
-#include <arch_helpers.h>
-#include <arm_gic.h>
-#include <debug.h>
-#include <cci400.h>
-#include <errno.h>
-#include <platform.h>
-#include <platform_def.h>
-#include <psci.h>
-#include "juno_def.h"
-#include "juno_private.h"
-#include "scpi.h"
-
-/*******************************************************************************
- * Private Juno function to program the mailbox for a cpu before it is released
- * from reset.
- ******************************************************************************/
-static void juno_program_mailbox(uint64_t mpidr, uint64_t address)
-{
-	uint64_t linear_id;
-	uint64_t mbox;
-
-	linear_id = platform_get_core_pos(mpidr);
-	mbox = TRUSTED_MAILBOXES_BASE +	(linear_id << TRUSTED_MAILBOX_SHIFT);
-	*((uint64_t *) mbox) = address;
-	flush_dcache_range(mbox, sizeof(mbox));
-}
-
-/*******************************************************************************
- * Private Juno function which is used to determine if any platform actions
- * should be performed for the specified affinity instance given its
- * state. Nothing needs to be done if the 'state' is not off or if this is not
- * the highest affinity level which will enter the 'state'.
- ******************************************************************************/
-static int32_t juno_do_plat_actions(uint32_t afflvl, uint32_t state)
-{
-	uint32_t max_phys_off_afflvl;
-
-	assert(afflvl <= MPIDR_AFFLVL1);
-
-	if (state != PSCI_STATE_OFF)
-		return -EAGAIN;
-
-	/*
-	 * Find the highest affinity level which will be suspended and postpone
-	 * all the platform specific actions until that level is hit.
-	 */
-	max_phys_off_afflvl = psci_get_max_phys_off_afflvl();
-	assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
-	assert(psci_get_suspend_afflvl() >= max_phys_off_afflvl);
-	if (afflvl != max_phys_off_afflvl)
-		return -EAGAIN;
-
-	return 0;
-}
-
-/*******************************************************************************
- * Juno handler called to check the validity of the power state parameter.
- ******************************************************************************/
-int32_t juno_validate_power_state(unsigned int power_state)
-{
-	/* Sanity check the requested state */
-	if (psci_get_pstate_type(power_state) == PSTATE_TYPE_STANDBY) {
-		/*
-		 * It's possible to enter standby only on affinity level 0 i.e.
-		 * a cpu on the Juno. Ignore any other affinity level.
-		 */
-		if (psci_get_pstate_afflvl(power_state) != MPIDR_AFFLVL0)
-			return PSCI_E_INVALID_PARAMS;
-	}
-
-	/*
-	 * We expect the 'state id' to be zero.
-	 */
-	if (psci_get_pstate_id(power_state))
-		return PSCI_E_INVALID_PARAMS;
-
-	return PSCI_E_SUCCESS;
-}
-
-
-/*******************************************************************************
- * Juno handler called when an affinity instance is about to be turned on. The
- * level and mpidr determine the affinity instance.
- ******************************************************************************/
-int32_t juno_affinst_on(uint64_t mpidr,
-			uint64_t sec_entrypoint,
-			uint32_t afflvl,
-			uint32_t state)
-{
-	/*
-	 * SCP takes care of powering up higher affinity levels so we
-	 * only need to care about level 0
-	 */
-	if (afflvl != MPIDR_AFFLVL0)
-		return PSCI_E_SUCCESS;
-
-	/*
-	 * Setup mailbox with address for CPU entrypoint when it next powers up
-	 */
-	juno_program_mailbox(mpidr, sec_entrypoint);
-
-	scpi_set_css_power_state(mpidr, scpi_power_on, scpi_power_on,
-				 scpi_power_on);
-
-	return PSCI_E_SUCCESS;
-}
-
-/*******************************************************************************
- * Juno handler called when an affinity instance has just been powered on after
- * being turned off earlier. The level and mpidr determine the affinity
- * instance. The 'state' arg. allows the platform to decide whether the cluster
- * was turned off prior to wakeup and do what's necessary to setup it up
- * correctly.
- ******************************************************************************/
-void juno_affinst_on_finish(uint32_t afflvl, uint32_t state)
-{
-	unsigned long mpidr;
-
-	/* Determine if any platform actions need to be executed. */
-	if (juno_do_plat_actions(afflvl, state) == -EAGAIN)
-		return;
-
-	/* Get the mpidr for this cpu */
-	mpidr = read_mpidr_el1();
-
-	/*
-	 * Perform the common cluster specific operations i.e enable coherency
-	 * if this cluster was off.
-	 */
-	if (afflvl != MPIDR_AFFLVL0)
-		cci_enable_cluster_coherency(mpidr);
-
-
-	/* Enable the gic cpu interface */
-	arm_gic_cpuif_setup();
-
-	/* Juno todo: Is this setup only needed after a cold boot? */
-	arm_gic_pcpu_distif_setup();
-
-	/* Clear the mailbox for this cpu. */
-	juno_program_mailbox(mpidr, 0);
-}
-
-/*******************************************************************************
- * Common function called while turning a cpu off or suspending it. It is called
- * from juno_off() or juno_suspend() when these functions in turn are called for
- * the highest affinity level which will be powered down. It performs the
- * actions common to the OFF and SUSPEND calls.
- ******************************************************************************/
-static void juno_power_down_common(uint32_t afflvl)
-{
-	uint32_t cluster_state = scpi_power_on;
-
-	/* Prevent interrupts from spuriously waking up this cpu */
-	arm_gic_cpuif_deactivate();
-
-	/* Cluster is to be turned off, so disable coherency */
-	if (afflvl > MPIDR_AFFLVL0) {
-		cci_disable_cluster_coherency(read_mpidr_el1());
-		cluster_state = scpi_power_off;
-	}
-
-	/*
-	 * Ask the SCP to power down the appropriate components depending upon
-	 * their state.
-	 */
-	scpi_set_css_power_state(read_mpidr_el1(),
-				 scpi_power_off,
-				 cluster_state,
-				 scpi_power_on);
-}
-
-/*******************************************************************************
- * Handler called when an affinity instance is about to be turned off. The
- * level and mpidr determine the affinity instance. The 'state' arg. allows the
- * platform to decide whether the cluster is being turned off and take
- * appropriate actions.
- *
- * CAUTION: There is no guarantee that caches will remain turned on across calls
- * to this function as each affinity level is dealt with. So do not write & read
- * global variables across calls. It will be wise to do flush a write to the
- * global to prevent unpredictable results.
- ******************************************************************************/
-static void juno_affinst_off(uint32_t afflvl, uint32_t state)
-{
-	/* Determine if any platform actions need to be executed */
-	if (juno_do_plat_actions(afflvl, state) == -EAGAIN)
-		return;
-
-	juno_power_down_common(afflvl);
-}
-
-/*******************************************************************************
- * Handler called when an affinity instance is about to be suspended. The
- * level and mpidr determine the affinity instance. The 'state' arg. allows the
- * platform to decide whether the cluster is being turned off and take apt
- * actions. The 'sec_entrypoint' determines the address in BL3-1 from where
- * execution should resume.
- *
- * CAUTION: There is no guarantee that caches will remain turned on across calls
- * to this function as each affinity level is dealt with. So do not write & read
- * global variables across calls. It will be wise to do flush a write to the
- * global to prevent unpredictable results.
- ******************************************************************************/
-static void juno_affinst_suspend(uint64_t sec_entrypoint,
-				    uint32_t afflvl,
-				    uint32_t state)
-{
-	/* Determine if any platform actions need to be executed */
-	if (juno_do_plat_actions(afflvl, state) == -EAGAIN)
-		return;
-
-	/*
-	 * Setup mailbox with address for CPU entrypoint when it next powers up.
-	 */
-	juno_program_mailbox(read_mpidr_el1(), sec_entrypoint);
-
-	juno_power_down_common(afflvl);
-}
-
-/*******************************************************************************
- * Juno handler called when an affinity instance has just been powered on after
- * having been suspended earlier. The level and mpidr determine the affinity
- * instance.
- * TODO: At the moment we reuse the on finisher and reinitialize the secure
- * context. Need to implement a separate suspend finisher.
- ******************************************************************************/
-static void juno_affinst_suspend_finish(uint32_t afflvl,
-					   uint32_t state)
-{
-	juno_affinst_on_finish(afflvl, state);
-}
-
-/*******************************************************************************
- * Juno handlers to shutdown/reboot the system
- ******************************************************************************/
-static void __dead2 juno_system_off(void)
-{
-	uint32_t response;
-
-	/* Send the power down request to the SCP */
-	response = scpi_sys_power_state(scpi_system_shutdown);
-
-	if (response != SCP_OK) {
-		ERROR("Juno System Off: SCP error %u.\n", response);
-		panic();
-	}
-	wfi();
-	ERROR("Juno System Off: operation not handled.\n");
-	panic();
-}
-
-static void __dead2 juno_system_reset(void)
-{
-	uint32_t response;
-
-	/* Send the system reset request to the SCP */
-	response = scpi_sys_power_state(scpi_system_reboot);
-
-	if (response != SCP_OK) {
-		ERROR("Juno System Reset: SCP error %u.\n", response);
-		panic();
-	}
-	wfi();
-	ERROR("Juno System Reset: operation not handled.\n");
-	panic();
-}
-
-/*******************************************************************************
- * Handler called when an affinity instance is about to enter standby.
- ******************************************************************************/
-void juno_affinst_standby(unsigned int power_state)
-{
-	unsigned int scr;
-
-	scr = read_scr_el3();
-	/* Enable PhysicalIRQ bit for NS world to wake the CPU */
-	write_scr_el3(scr | SCR_IRQ_BIT);
-	isb();
-	dsb();
-	wfi();
-
-	/*
-	 * Restore SCR to the original value, synchronisation of scr_el3 is
-	 * done by eret while el3_exit to save some execution cycles.
-	 */
-	write_scr_el3(scr);
-}
-
-/*******************************************************************************
- * Export the platform handlers to enable psci to invoke them
- ******************************************************************************/
-static const plat_pm_ops_t juno_ops = {
-	.affinst_on		= juno_affinst_on,
-	.affinst_on_finish	= juno_affinst_on_finish,
-	.affinst_off		= juno_affinst_off,
-	.affinst_standby	= juno_affinst_standby,
-	.affinst_suspend	= juno_affinst_suspend,
-	.affinst_suspend_finish	= juno_affinst_suspend_finish,
-	.system_off		= juno_system_off,
-	.system_reset		= juno_system_reset,
-	.validate_power_state	= juno_validate_power_state
-};
-
-/*******************************************************************************
- * Export the platform specific power ops.
- ******************************************************************************/
-int32_t platform_setup_pm(const plat_pm_ops_t **plat_ops)
-{
-	*plat_ops = &juno_ops;
-	return 0;
-}
diff --git a/plat/juno/plat_security.c b/plat/juno/plat_security.c
deleted file mode 100644
index 64e493f..0000000
--- a/plat/juno/plat_security.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <debug.h>
-#include <tzc400.h>
-#include "juno_def.h"
-
-/*******************************************************************************
- * Initialize the TrustZone Controller. Configure Region 0 with Secure RW access
- * and allow Non-Secure masters full access
- ******************************************************************************/
-static void init_tzc400(void)
-{
-	tzc_init(TZC400_BASE);
-
-	/* Disable filters. */
-	tzc_disable_filters();
-
-	/* Region 1 set to cover Non-Secure DRAM at 0x8000_0000. Apply the
-	 * same configuration to all filters in the TZC. */
-	tzc_configure_region(REG_ATTR_FILTER_BIT_ALL, 1,
-			DRAM_NS_BASE, DRAM_NS_BASE + DRAM_NS_SIZE - 1,
-			TZC_REGION_S_NONE,
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_CCI400)	|
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_PCIE)	|
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_HDLCD0)	|
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_HDLCD1)	|
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_USB)	|
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_DMA330)	|
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_THINLINKS)	|
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_AP)		|
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_GPU)	|
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_CORESIGHT));
-
-	/* Region 2 set to cover Secure DRAM */
-	tzc_configure_region(REG_ATTR_FILTER_BIT_ALL, 2,
-			DRAM_SEC_BASE, DRAM_SEC_BASE + DRAM_SEC_SIZE - 1,
-			TZC_REGION_S_RDWR,
-			0);
-
-	/* Region 3 set to cover DRAM used by SCP for DDR retraining */
-	tzc_configure_region(REG_ATTR_FILTER_BIT_ALL, 3,
-			DRAM_SCP_BASE, DRAM_SCP_BASE + DRAM_SCP_SIZE - 1,
-			TZC_REGION_S_NONE,
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_SCP));
-
-	/* Region 4 set to cover Non-Secure DRAM at 0x8_8000_0000 */
-	tzc_configure_region(REG_ATTR_FILTER_BIT_ALL, 4,
-			DRAM2_BASE, DRAM2_BASE + DRAM2_SIZE - 1,
-			TZC_REGION_S_NONE,
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_CCI400)	|
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_PCIE)	|
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_HDLCD0)	|
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_HDLCD1)	|
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_USB)	|
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_DMA330)	|
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_THINLINKS)	|
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_AP)		|
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_GPU)	|
-			TZC_REGION_ACCESS_RDWR(TZC400_NSAID_CORESIGHT));
-
-	/* Raise an exception if a NS device tries to access secure memory */
-	tzc_set_action(TZC_ACTION_ERR);
-
-	/* Enable filters. */
-	tzc_enable_filters();
-}
-
-/*******************************************************************************
- * Initialize the secure environment. At this moment only the TrustZone
- * Controller is initialized.
- ******************************************************************************/
-void plat_security_setup(void)
-{
-	/* Initialize the TrustZone Controller */
-	init_tzc400();
-}
diff --git a/plat/juno/plat_topology.c b/plat/juno/plat_topology.c
deleted file mode 100644
index 39d4dab..0000000
--- a/plat/juno/plat_topology.c
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <platform_def.h>
-#include <psci.h>
-
-unsigned int plat_get_aff_count(unsigned int aff_lvl, unsigned long mpidr)
-{
-	/* Report 1 (absent) instance at levels higher that the cluster level */
-	if (aff_lvl > MPIDR_AFFLVL1)
-		return 1;
-
-	if (aff_lvl == MPIDR_AFFLVL1)
-		return 2; /* We have two clusters */
-
-	return mpidr & 0x100 ? 4 : 2; /* 4 cpus in cluster 1, 2 in cluster 0 */
-}
-
-unsigned int plat_get_aff_state(unsigned int aff_lvl, unsigned long mpidr)
-{
-	return aff_lvl <= MPIDR_AFFLVL1 ? PSCI_AFF_PRESENT : PSCI_AFF_ABSENT;
-}
-
-int plat_get_max_afflvl()
-{
-	return MPIDR_AFFLVL1;
-}
-
-int plat_setup_topology()
-{
-	/* Juno todo: Make topology configurable via SCC */
-	return 0;
-}
diff --git a/plat/juno/platform.mk b/plat/juno/platform.mk
deleted file mode 100644
index 8beaecf..0000000
--- a/plat/juno/platform.mk
+++ /dev/null
@@ -1,111 +0,0 @@
-#
-# Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# Neither the name of ARM nor the names of its contributors may be used
-# to endorse or promote products derived from this software without specific
-# prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-
-# On Juno, the Secure Payload can be loaded either in Trusted SRAM (default) or
-# Secure DRAM allocated by the TrustZone Controller.
-
-PLAT_TSP_LOCATION	:=	tsram
-
-ifeq (${PLAT_TSP_LOCATION}, tsram)
-  PLAT_TSP_LOCATION_ID := PLAT_TRUSTED_SRAM_ID
-else ifeq (${PLAT_TSP_LOCATION}, dram)
-  PLAT_TSP_LOCATION_ID := PLAT_DRAM_ID
-else
-  $(error "Unsupported PLAT_TSP_LOCATION value")
-endif
-
-# Process flags
-$(eval $(call add_define,PLAT_TSP_LOCATION_ID))
-
-
-PLAT_INCLUDES		:=	-Iplat/juno/include/
-
-PLAT_BL_COMMON_SOURCES	:=	drivers/arm/pl011/pl011_console.S	\
-				drivers/io/io_fip.c			\
-				drivers/io/io_memmap.c			\
-				drivers/io/io_storage.c			\
-				lib/aarch64/xlat_tables.c		\
-				plat/common/aarch64/plat_common.c	\
-				plat/common/plat_gic.c			\
-				plat/juno/plat_io_storage.c
-
-BL1_SOURCES		+=	drivers/arm/cci400/cci400.c		\
-				lib/cpus/aarch64/cortex_a53.S		\
-				lib/cpus/aarch64/cortex_a57.S		\
-				plat/common/aarch64/platform_up_stack.S	\
-				plat/juno/bl1_plat_setup.c		\
-				plat/juno/aarch64/bl1_plat_helpers.S	\
-				plat/juno/aarch64/plat_helpers.S	\
-				plat/juno/aarch64/juno_common.c
-
-BL2_SOURCES		+=	drivers/arm/tzc400/tzc400.c		\
-				plat/common/aarch64/platform_up_stack.S	\
-				plat/juno/bl2_plat_setup.c		\
-				plat/juno/mhu.c				\
-				plat/juno/plat_security.c		\
-				plat/juno/aarch64/plat_helpers.S	\
-				plat/juno/aarch64/juno_common.c		\
-				plat/juno/scp_bootloader.c		\
-				plat/juno/scpi.c
-
-BL31_SOURCES		+=	drivers/arm/cci400/cci400.c		\
-				drivers/arm/gic/arm_gic.c		\
-				drivers/arm/gic/gic_v2.c		\
-				drivers/arm/gic/gic_v3.c		\
-				lib/cpus/aarch64/cortex_a53.S		\
-				lib/cpus/aarch64/cortex_a57.S		\
-				plat/common/aarch64/platform_mp_stack.S	\
-				plat/juno/bl31_plat_setup.c		\
-				plat/juno/mhu.c				\
-				plat/juno/aarch64/plat_helpers.S	\
-				plat/juno/aarch64/juno_common.c		\
-				plat/juno/plat_pm.c			\
-				plat/juno/plat_topology.c		\
-				plat/juno/scpi.c
-
-ifneq (${TRUSTED_BOARD_BOOT},0)
-  BL1_SOURCES		+=	plat/juno/juno_trusted_boot.c
-  BL2_SOURCES		+=	plat/juno/juno_trusted_boot.c
-endif
-
-ifneq (${RESET_TO_BL31},0)
-  $(error "Using BL3-1 as the reset vector is not supported on Juno. \
-  Please set RESET_TO_BL31 to 0.")
-endif
-
-NEED_BL30		:=	yes
-
-# Enable workarounds for selected Cortex-A57 erratas.
-ERRATA_A57_806969	:=	1
-ERRATA_A57_813420	:=	1
-
-# Enable option to skip L1 data cache flush during the Cortex-A57 cluster
-# power down sequence
-SKIP_A57_L1_FLUSH_PWR_DWN	:=	 1
diff --git a/plat/juno/scp_bootloader.c b/plat/juno/scp_bootloader.c
deleted file mode 100644
index a6d25d4..0000000
--- a/plat/juno/scp_bootloader.c
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch_helpers.h>
-#include <platform.h>
-#include "juno_def.h"
-#include "mhu.h"
-#include "scp_bootloader.h"
-#include "scpi.h"
-
-/* Boot commands sent from AP -> SCP */
-#define BOOT_CMD_START	0x01
-#define BOOT_CMD_DATA	0x02
-
-typedef struct {
-	uint32_t image_size;
-} cmd_start_payload;
-
-typedef struct {
-	uint32_t sequence_num;
-	uint32_t offset;
-	uint32_t size;
-} cmd_data_payload;
-
-#define BOOT_DATA_MAX_SIZE  0x1000
-
-/* Boot commands sent from SCP -> AP */
-#define BOOT_CMD_ACK	0x03
-#define BOOT_CMD_NACK	0x04
-
-typedef struct {
-	uint32_t sequence_num;
-} cmd_ack_payload;
-
-/*
- * Unlike the runtime protocol, the boot protocol uses the same memory region
- * for both AP -> SCP and SCP -> AP transfers; define the address of this...
- */
-static void * const cmd_payload = (void *)(MHU_SECURE_BASE + 0x0080);
-
-static void *scp_boot_message_start(void)
-{
-	mhu_secure_message_start();
-
-	return cmd_payload;
-}
-
-static void scp_boot_message_send(unsigned command, size_t size)
-{
-	/* Make sure payload can be seen by SCP */
-	if (MHU_PAYLOAD_CACHED)
-		flush_dcache_range((unsigned long)cmd_payload, size);
-
-	/* Send command to SCP */
-	mhu_secure_message_send(command | (size << 8));
-}
-
-static uint32_t scp_boot_message_wait(size_t size)
-{
-	uint32_t response =  mhu_secure_message_wait();
-
-	/* Make sure we see the reply from the SCP and not any stale data */
-	if (MHU_PAYLOAD_CACHED)
-		inv_dcache_range((unsigned long)cmd_payload, size);
-
-	return response & 0xff;
-}
-
-static void scp_boot_message_end(void)
-{
-	mhu_secure_message_end();
-}
-
-static int transfer_block(uint32_t sequence_num, uint32_t offset, uint32_t size)
-{
-	cmd_data_payload *cmd_data = scp_boot_message_start();
-	cmd_data->sequence_num = sequence_num;
-	cmd_data->offset = offset;
-	cmd_data->size = size;
-
-	scp_boot_message_send(BOOT_CMD_DATA, sizeof(*cmd_data));
-
-	cmd_ack_payload *cmd_ack = cmd_payload;
-	int ok = scp_boot_message_wait(sizeof(*cmd_ack)) == BOOT_CMD_ACK
-		 && cmd_ack->sequence_num == sequence_num;
-
-	scp_boot_message_end();
-
-	return ok;
-}
-
-int scp_bootloader_transfer(void *image, unsigned int image_size)
-{
-	uintptr_t offset = (uintptr_t)image - MHU_SECURE_BASE;
-	uintptr_t end = offset + image_size;
-	uint32_t response;
-
-	mhu_secure_init();
-
-	/* Initiate communications with SCP */
-	do {
-		cmd_start_payload *cmd_start = scp_boot_message_start();
-		cmd_start->image_size = image_size;
-
-		scp_boot_message_send(BOOT_CMD_START, sizeof(*cmd_start));
-
-		response = scp_boot_message_wait(0);
-
-		scp_boot_message_end();
-	} while (response != BOOT_CMD_ACK);
-
-	/* Transfer image to SCP a block at a time */
-	uint32_t sequence_num = 1;
-	size_t size;
-	while ((size = end - offset) != 0) {
-		if (size > BOOT_DATA_MAX_SIZE)
-			size = BOOT_DATA_MAX_SIZE;
-		while (!transfer_block(sequence_num, offset, size))
-			; /* Retry forever */
-		offset += size;
-		sequence_num++;
-	}
-
-	/* Wait for SCP to signal it's ready */
-	return scpi_wait_ready();
-}
diff --git a/plat/juno/scp_bootloader.h b/plat/juno/scp_bootloader.h
deleted file mode 100644
index e872513..0000000
--- a/plat/juno/scp_bootloader.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __SCP_BOOTLOADER_H__
-#define __SCP_BOOTLOADER_H__
-
-int scp_bootloader_transfer(void *image, unsigned int image_size);
-
-#endif
diff --git a/plat/juno/scpi.c b/plat/juno/scpi.c
deleted file mode 100644
index 950c00b..0000000
--- a/plat/juno/scpi.c
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch_helpers.h>
-#include <platform.h>
-#include "juno_def.h"
-#include "mhu.h"
-#include "scpi.h"
-
-#define MHU_SECURE_SCP_TO_AP_PAYLOAD	(MHU_SECURE_BASE+0x0080)
-#define MHU_SECURE_AP_TO_SCP_PAYLOAD	(MHU_SECURE_BASE+0x0280)
-
-#define SIZE_SHIFT	20	/* Bit position for size value in MHU header */
-#define SIZE_MASK	0x1ff	/* Mask to extract size value in MHU header*/
-
-
-void *scpi_secure_message_start(void)
-{
-	mhu_secure_message_start();
-
-	/* Return address of payload area. */
-	return (void *)MHU_SECURE_AP_TO_SCP_PAYLOAD;
-}
-
-void scpi_secure_message_send(unsigned command, size_t size)
-{
-	/* Make sure payload can be seen by SCP */
-	if (MHU_PAYLOAD_CACHED)
-		flush_dcache_range(MHU_SECURE_AP_TO_SCP_PAYLOAD, size);
-
-	mhu_secure_message_send(command | (size << SIZE_SHIFT));
-}
-
-unsigned scpi_secure_message_receive(void **message_out, size_t *size_out)
-{
-	uint32_t response =  mhu_secure_message_wait();
-
-	/* Get size of payload */
-	size_t size = (response >> SIZE_SHIFT) & SIZE_MASK;
-
-	/* Clear size from response */
-	response &= ~(SIZE_MASK << SIZE_SHIFT);
-
-	/* Make sure we don't read stale data */
-	if (MHU_PAYLOAD_CACHED)
-		inv_dcache_range(MHU_SECURE_SCP_TO_AP_PAYLOAD, size);
-
-	if (size_out)
-		*size_out = size;
-
-	if (message_out)
-		*message_out = (void *)MHU_SECURE_SCP_TO_AP_PAYLOAD;
-
-	return response;
-}
-
-void scpi_secure_message_end(void)
-{
-	mhu_secure_message_end();
-}
-
-static void scpi_secure_send32(unsigned command, uint32_t message)
-{
-	*(__typeof__(message) *)scpi_secure_message_start() = message;
-	scpi_secure_message_send(command, sizeof(message));
-	scpi_secure_message_end();
-}
-
-int scpi_wait_ready(void)
-{
-	/* Get a message from the SCP */
-	scpi_secure_message_start();
-	size_t size;
-	unsigned command = scpi_secure_message_receive(NULL, &size);
-	scpi_secure_message_end();
-
-	/* We are expecting 'SCP Ready', produce correct error if it's not */
-	scpi_status_t response = SCP_OK;
-	if (command != SCPI_CMD_SCP_READY)
-		response = SCP_E_SUPPORT;
-	else if (size != 0)
-		response = SCP_E_SIZE;
-
-	/* Send our response back to SCP */
-	scpi_secure_send32(command, response);
-
-	return response == SCP_OK ? 0 : -1;
-}
-
-void scpi_set_css_power_state(unsigned mpidr, scpi_power_state_t cpu_state,
-		scpi_power_state_t cluster_state, scpi_power_state_t css_state)
-{
-	uint32_t state = mpidr & 0x0f;	/* CPU ID */
-	state |= (mpidr & 0xf00) >> 4;	/* Cluster ID */
-	state |= cpu_state << 8;
-	state |= cluster_state << 12;
-	state |= css_state << 16;
-	scpi_secure_send32(SCPI_CMD_SET_CSS_POWER_STATE, state);
-}
-
-uint32_t scpi_sys_power_state(scpi_system_state_t system_state)
-{
-	uint32_t *response;
-	size_t size;
-	uint8_t state = system_state & 0xff;
-
-	/* Send the command */
-	*(__typeof__(state) *)scpi_secure_message_start() = state;
-	scpi_secure_message_send(SCPI_CMD_SYS_POWER_STATE, sizeof(state));
-	scpi_secure_message_receive((void *)&response, &size);
-	scpi_secure_message_end();
-	return *response;
-}
diff --git a/plat/juno/scpi.h b/plat/juno/scpi.h
deleted file mode 100644
index 8a5ef65..0000000
--- a/plat/juno/scpi.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __SCPI_H__
-#define __SCPI_H__
-
-#include <stddef.h>
-#include <stdint.h>
-
-extern void *scpi_secure_message_start(void);
-extern void scpi_secure_message_send(unsigned command, size_t size);
-extern unsigned scpi_secure_message_receive(void **message_out, size_t *size_out);
-extern void scpi_secure_message_end(void);
-
-
-enum {
-	SCP_OK = 0,	/* Success */
-	SCP_E_PARAM,	/* Invalid parameter(s) */
-	SCP_E_ALIGN,	/* Invalid alignment */
-	SCP_E_SIZE,	/* Invalid size */
-	SCP_E_HANDLER,	/* Invalid handler or callback */
-	SCP_E_ACCESS,	/* Invalid access or permission denied */
-	SCP_E_RANGE,	/* Value out of range */
-	SCP_E_TIMEOUT,	/* Time out has ocurred */
-	SCP_E_NOMEM,	/* Invalid memory area or pointer */
-	SCP_E_PWRSTATE,	/* Invalid power state */
-	SCP_E_SUPPORT,	/* Feature not supported or disabled */
-};
-
-typedef uint32_t scpi_status_t;
-
-typedef enum {
-	SCPI_CMD_SCP_READY = 0x01,
-	SCPI_CMD_SET_CSS_POWER_STATE = 0x04,
-	SCPI_CMD_SYS_POWER_STATE = 0x08
-} scpi_command_t;
-
-typedef enum {
-	scpi_power_on = 0,
-	scpi_power_retention = 1,
-	scpi_power_off = 3,
-} scpi_power_state_t;
-
-typedef enum {
-	scpi_system_shutdown = 0,
-	scpi_system_reboot = 1,
-	scpi_system_reset = 2
-} scpi_system_state_t;
-
-extern int scpi_wait_ready(void);
-extern void scpi_set_css_power_state(unsigned mpidr, scpi_power_state_t cpu_state,
-		scpi_power_state_t cluster_state, scpi_power_state_t css_state);
-uint32_t scpi_sys_power_state(scpi_system_state_t system_state);
-
-#endif	/* __SCPI_H__ */
diff --git a/plat/juno/tsp/tsp-juno.mk b/plat/juno/tsp/tsp-juno.mk
deleted file mode 100644
index 4d56ea2..0000000
--- a/plat/juno/tsp/tsp-juno.mk
+++ /dev/null
@@ -1,37 +0,0 @@
-#
-# Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# Neither the name of ARM nor the names of its contributors may be used
-# to endorse or promote products derived from this software without specific
-# prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-
-# TSP source files specific to Juno platform
-BL32_SOURCES		+=	drivers/arm/gic/arm_gic.c		\
-				drivers/arm/gic/gic_v2.c		\
-				plat/common/aarch64/platform_mp_stack.S	\
-				plat/juno/aarch64/juno_common.c		\
-				plat/juno/aarch64/plat_helpers.S	\
-				plat/juno/tsp/tsp_plat_setup.c
diff --git a/plat/juno/tsp/tsp_plat_setup.c b/plat/juno/tsp/tsp_plat_setup.c
deleted file mode 100644
index 8293a13..0000000
--- a/plat/juno/tsp/tsp_plat_setup.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <bl_common.h>
-#include <console.h>
-#include <platform_tsp.h>
-#include "../juno_def.h"
-#include "../juno_private.h"
-
-/*******************************************************************************
- * Declarations of linker defined symbols which will help us find the layout
- * of trusted SRAM
- ******************************************************************************/
-extern unsigned long __RO_START__;
-extern unsigned long __RO_END__;
-extern unsigned long __BL32_END__;
-
-#if USE_COHERENT_MEM
-extern unsigned long __COHERENT_RAM_START__;
-extern unsigned long __COHERENT_RAM_END__;
-#endif
-
-/*
- * The next 3 constants identify the extents of the code, RO data region and the
- * limit of the BL3-2 image.  These addresses are used by the MMU setup code and
- * therefore they must be page-aligned.  It is the responsibility of the linker
- * script to ensure that __RO_START__, __RO_END__ & __BL32_END__ linker symbols
- * refer to page-aligned addresses.
- */
-#define BL32_RO_BASE (unsigned long)(&__RO_START__)
-#define BL32_RO_LIMIT (unsigned long)(&__RO_END__)
-#define BL32_END (unsigned long)(&__BL32_END__)
-
-#if USE_COHERENT_MEM
-/*
- * The next 2 constants identify the extents of the coherent memory region.
- * These addresses are used by the MMU setup code and therefore they must be
- * page-aligned.  It is the responsibility of the linker script to ensure that
- * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
- * page-aligned addresses.
- */
-#define BL32_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
-#define BL32_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
-#endif
-
-/*******************************************************************************
- * Initialize the UART
- ******************************************************************************/
-void tsp_early_platform_setup(void)
-{
-	/*
-	 * Initialize a different console than already in use to display
-	 * messages from TSP
-	 */
-	console_init(PL011_UART0_BASE, PL011_UART0_CLK_IN_HZ, PL011_BAUDRATE);
-}
-
-/*******************************************************************************
- * Perform platform specific setup placeholder
- ******************************************************************************/
-void tsp_platform_setup(void)
-{
-	plat_gic_init();
-}
-
-/*******************************************************************************
- * Perform the very early platform specific architectural setup here. At the
- * moment this only intializes the MMU
- ******************************************************************************/
-void tsp_plat_arch_setup(void)
-{
-	configure_mmu_el1(BL32_RO_BASE,
-			  (BL32_END - BL32_RO_BASE),
-			  BL32_RO_BASE,
-			  BL32_RO_LIMIT
-#if USE_COHERENT_MEM
-			  , BL32_COHERENT_RAM_BASE,
-			  BL32_COHERENT_RAM_LIMIT
-#endif
-			  );
-}
diff --git a/plat/mediatek/common/custom/oem_svc.c b/plat/mediatek/common/custom/oem_svc.c
new file mode 100644
index 0000000..08baed8
--- /dev/null
+++ b/plat/mediatek/common/custom/oem_svc.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <oem_svc.h>
+#include <platform.h>
+#include <runtime_svc.h>
+#include <stdint.h>
+#include <uuid.h>
+
+/* OEM Service UUID */
+DEFINE_SVC_UUID(oem_svc_uid,
+		0xb943add0, 0x069d, 0x11e4, 0x91, 0x91,
+		0x08, 0x00, 0x20, 0x0c, 0x9a, 0x66);
+
+
+/* Setup OEM Services */
+static int32_t oem_svc_setup(void)
+{
+	/*
+	 * Invoke related module setup from here
+	 */
+
+	return 0;
+}
+
+/*******************************************************************************
+ * OEM top level handler for servicing SMCs.
+ ******************************************************************************/
+uint64_t oem_smc_handler(uint32_t smc_fid,
+			uint64_t x1,
+			uint64_t x2,
+			uint64_t x3,
+			uint64_t x4,
+			void *cookie,
+			void *handle,
+			uint64_t flags)
+{
+	uint64_t rc;
+
+	switch (smc_fid) {
+	default:
+		rc = SMC_UNK;
+		WARN("Unimplemented OEM Call: 0x%x\n", smc_fid);
+	}
+
+	SMC_RET1(handle, rc);
+}
+
+/*
+ * Top-level OEM Service SMC handler. This handler will in turn dispatch
+ * calls to related SMC handler
+ */
+uint64_t oem_svc_smc_handler(uint32_t smc_fid,
+			 uint64_t x1,
+			 uint64_t x2,
+			 uint64_t x3,
+			 uint64_t x4,
+			 void *cookie,
+			 void *handle,
+			 uint64_t flags)
+{
+	/*
+	 * Dispatch OEM calls to OEM Common handler and return its return value
+	 */
+	if (is_oem_fid(smc_fid)) {
+		return oem_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
+					handle, flags);
+	}
+
+	switch (smc_fid) {
+	case OEM_SVC_CALL_COUNT:
+		/*
+		 * Return the number of OEM Service Calls.
+		 */
+		SMC_RET1(handle, OEM_SVC_NUM_CALLS);
+
+	case OEM_SVC_UID:
+		/* Return UID to the caller */
+		SMC_UUID_RET(handle, oem_svc_uid);
+
+	case OEM_SVC_VERSION:
+		/* Return the version of current implementation */
+		SMC_RET2(handle, OEM_VERSION_MAJOR, OEM_VERSION_MINOR);
+
+	default:
+		WARN("Unimplemented OEM Service Call: 0x%x\n", smc_fid);
+		SMC_RET1(handle, SMC_UNK);
+	}
+}
+
+/* Register OEM Service Calls as runtime service */
+DECLARE_RT_SVC(
+		oem_svc,
+		OEN_OEM_START,
+		OEN_OEM_END,
+		SMC_TYPE_FAST,
+		oem_svc_setup,
+		oem_svc_smc_handler
+);
diff --git a/plat/mediatek/common/custom/oem_svc.h b/plat/mediatek/common/custom/oem_svc.h
new file mode 100644
index 0000000..636bb87
--- /dev/null
+++ b/plat/mediatek/common/custom/oem_svc.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __OEM_SVC_H__
+#define __OEM_SVC_H__
+
+/*******************************************************************************
+ * Defines for runtime services func ids
+ ******************************************************************************/
+/*
+ * Number of OEM calls (above) implemented.
+ */
+#define OEM_SVC_NUM_CALLS		3
+
+/*******************************************************************************
+ * Defines for OEM Service queries
+ ******************************************************************************/
+/* 0x83000000 - 0x8300FEFF is OEM service calls */
+#define OEM_SVC_CALL_COUNT		0x8300ff00
+#define OEM_SVC_UID			0x8300ff01
+/* 0x8300ff02 is reserved */
+#define OEM_SVC_VERSION			0x8300ff03
+/* 0x8300ff04 - 0x8300FFFF is reserved for future expansion */
+
+/* OEM Service Calls version numbers */
+#define OEM_VERSION_MAJOR		0x0
+#define OEM_VERSION_MINOR		0x1
+
+/* The macros below are used to identify OEM calls from the SMC function ID */
+/* SMC32 ID range from 0x83000000 to 0x83000FFF */
+/* SMC64 ID range from 0xC3000000 to 0xC3000FFF */
+#define OEM_FID_MASK			0xf000u
+#define OEM_FID_VALUE			0u
+#define is_oem_fid(_fid) \
+	(((_fid) & OEM_FID_MASK) == OEM_FID_VALUE)
+
+#define OEM_SVC_E_SUCCESS		0
+#define OEM_SVC_E_NOT_SUPPORTED		-1
+#define OEM_SVC_E_INVALID_PARAMS	-2
+
+#endif /* __OEM_SVC_H__ */
diff --git a/plat/mediatek/common/drivers/uart/8250_console.S b/plat/mediatek/common/drivers/uart/8250_console.S
new file mode 100644
index 0000000..94a6c02
--- /dev/null
+++ b/plat/mediatek/common/drivers/uart/8250_console.S
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <asm_macros.S>
+#include <uart8250.h>
+
+	.globl	console_core_init
+	.globl	console_core_putc
+	.globl	console_core_getc
+	.globl	console_core_flush
+
+	/* -----------------------------------------------
+	 * int console_core_init(unsigned long base_addr,
+	 * unsigned int uart_clk, unsigned int baud_rate)
+	 * Function to initialize the console without a
+	 * C Runtime to print debug information. This
+	 * function will be accessed by console_init and
+	 * crash reporting.
+	 * In: x0 - console base address
+	 *     w1 - Uart clock in Hz
+	 *     w2 - Baud rate
+	 * Out: return 1 on success else 0 on error
+	 * Clobber list : x1, x2, x3
+	 * -----------------------------------------------
+	 */
+func console_core_init
+	/* Check the input base address */
+	cbz	x0, core_init_fail
+	/* Check baud rate and uart clock for sanity */
+	cbz	w1, core_init_fail
+	cbz	w2, core_init_fail
+
+	/* Disable interrupt */
+	str	wzr, [x0, #UART_IER]
+
+	/* Force DTR and RTS to high */
+	mov	w3, #(UART_MCR_DTR | UART_MCR_RTS)
+	str	w3, [x0, #UART_MCR]
+
+	/* Check high speed */
+	movz	w3, #:abs_g1:115200
+	movk	w3, #:abs_g0_nc:115200
+	cmp	w2, w3
+	b.hi	1f
+
+	/* Non high speed */
+	lsl	w2, w2, #4
+	mov	w3, wzr
+	b	2f
+
+	/* High speed */
+1:	lsl	w2, w2, #2
+	mov	w3, #2
+
+	/* Set high speed UART register */
+2:	str	w3, [x0, #UART_HIGHSPEED]
+
+	/* Calculate divisor */
+	udiv	w3, w1, w2	/* divisor = uartclk / (quot * baudrate) */
+	msub	w1, w3, w2, w1	/* remainder = uartclk % (quot * baudrate) */
+	lsr	w2, w2, #1
+	cmp	w1, w2
+	cinc	w3, w3, hs
+
+	/* Set line configuration, access divisor latches */
+	mov	w1, #(UART_LCR_DLAB | UART_LCR_WLS_8)
+	str	w1, [x0, #UART_LCR]
+
+	/* Set the divisor */
+	and	w1, w3, #0xff
+	str	w1, [x0, #UART_DLL]
+	lsr	w1, w3, #8
+	and	w1, w1, #0xff
+	str	w1, [x0, #UART_DLH]
+
+	/* Hide the divisor latches */
+	mov	w1, #UART_LCR_WLS_8
+	str	w1, [x0, #UART_LCR]
+
+	/* Enable FIFOs, and clear receive and transmit */
+	mov	w1, #(UART_FCR_FIFO_EN | UART_FCR_CLEAR_RCVR |	\
+			UART_FCR_CLEAR_XMIT)
+	str	w1, [x0, #UART_FCR]
+
+	mov	w0, #1
+	ret
+core_init_fail:
+	mov	w0, wzr
+	ret
+endfunc console_core_init
+
+	/* --------------------------------------------------------
+	 * int console_core_putc(int c, unsigned long base_addr)
+	 * Function to output a character over the console. It
+	 * returns the character printed on success or -1 on error.
+	 * In : w0 - character to be printed
+	 *      x1 - console base address
+	 * Out : return -1 on error else return character.
+	 * Clobber list : x2
+	 * --------------------------------------------------------
+	 */
+func console_core_putc
+	/* Check the input parameter */
+	cbz	x1, putc_error
+	/* Prepend '\r' to '\n' */
+	cmp	w0, #0xA
+	b.ne	2f
+
+	/* Check if the transmit FIFO is full */
+1:	ldr	w2, [x1, #UART_LSR]
+	and	w2, w2, #UART_LSR_THRE
+	cbz	w2, 1b
+	mov	w2, #0xD
+	str	w2, [x1, #UART_THR]
+
+	/* Check if the transmit FIFO is full */
+2:	ldr	w2, [x1, #UART_LSR]
+	and	w2, w2, #UART_LSR_THRE
+	cbz	w2, 2b
+	str	w0, [x1, #UART_THR]
+	ret
+putc_error:
+	mov	w0, #-1
+	ret
+endfunc console_core_putc
+
+	/* ---------------------------------------------
+	 * int console_core_getc(unsigned long base_addr)
+	 * Function to get a character from the console.
+	 * It returns the character grabbed on success
+	 * or -1 on error.
+	 * In : x0 - console base address
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func console_core_getc
+	cbz	x0, getc_error
+
+	/* Check if the receive FIFO is empty */
+1:	ldr	w1, [x0, #UART_LSR]
+	tbz	w1, #UART_LSR_DR, 1b
+	ldr	w0, [x0, #UART_RBR]
+	ret
+getc_error:
+	mov	w0, #-1
+	ret
+endfunc console_core_getc
+
+	/* ---------------------------------------------
+	 * int console_core_flush(uintptr_t base_addr)
+	 * Function to force a write of all buffered
+	 * data that hasn't been output.
+	 * In : x0 - console base address
+	 * Out : return -1 on error else return 0.
+	 * Clobber list : x0, x1
+	 * ---------------------------------------------
+	 */
+func console_core_flush
+	/* Placeholder */
+	mov	w0, #0
+	ret
+endfunc console_core_flush
diff --git a/plat/mediatek/common/drivers/uart/uart8250.h b/plat/mediatek/common/drivers/uart/uart8250.h
new file mode 100644
index 0000000..8204d3f
--- /dev/null
+++ b/plat/mediatek/common/drivers/uart/uart8250.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __UART8250_H__
+#define __UART8250_H__
+
+/* UART register */
+#define UART_RBR		0x00	/* Receive buffer register */
+#define UART_DLL		0x00	/* Divisor latch lsb */
+#define UART_THR		0x00	/* Transmit holding register */
+#define UART_DLH		0x04	/* Divisor latch msb */
+#define UART_IER		0x04	/* Interrupt enable register */
+#define UART_FCR		0x08	/* FIFO control register */
+#define UART_LCR		0x0c	/* Line control register */
+#define UART_MCR		0x10	/* Modem control register */
+#define UART_LSR		0x14	/* Line status register */
+#define UART_HIGHSPEED		0x24	/* High speed UART */
+
+/* FCR */
+#define UART_FCR_FIFO_EN	0x01	/* enable FIFO */
+#define UART_FCR_CLEAR_RCVR	0x02	/* clear the RCVR FIFO */
+#define UART_FCR_CLEAR_XMIT	0x04	/* clear the XMIT FIFO */
+
+/* LCR */
+#define UART_LCR_WLS_8		0x03	/* 8 bit character length */
+#define UART_LCR_DLAB		0x80	/* divisor latch access bit */
+
+/* MCR */
+#define UART_MCR_DTR		0x01
+#define UART_MCR_RTS		0x02
+
+/* LSR */
+#define UART_LSR_DR		0x01	/* Data ready */
+#define UART_LSR_THRE		0x20	/* Xmit holding register empty */
+
+#endif	/* __UART8250_H__ */
diff --git a/plat/mediatek/common/mtk_plat_common.c b/plat/mediatek/common/mtk_plat_common.c
new file mode 100644
index 0000000..6a13192
--- /dev/null
+++ b/plat/mediatek/common/mtk_plat_common.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <bl_common.h>
+#include <cci.h>
+#include <console.h>
+#include <debug.h>
+#include <mmio.h>
+#include <mtk_plat_common.h>
+#include <mtk_sip_svc.h>
+#include <plat_private.h>
+#include <platform.h>
+#include <xlat_tables.h>
+
+struct atf_arg_t gteearg;
+
+void clean_top_32b_of_param(uint32_t smc_fid,
+				uint64_t *px1,
+				uint64_t *px2,
+				uint64_t *px3,
+				uint64_t *px4)
+{
+	/* if parameters from SMC32. Clean top 32 bits */
+	if (0 == (smc_fid & SMC_AARCH64_BIT)) {
+		*px1 = *px1 & SMC32_PARAM_MASK;
+		*px2 = *px2 & SMC32_PARAM_MASK;
+		*px3 = *px3 & SMC32_PARAM_MASK;
+		*px4 = *px4 & SMC32_PARAM_MASK;
+	}
+}
+
+#if MTK_SIP_KERNEL_BOOT_ENABLE
+static struct kernel_info k_info;
+
+static void save_kernel_info(uint64_t pc,
+			uint64_t r0,
+			uint64_t r1,
+			uint64_t k32_64)
+{
+	k_info.k32_64 = k32_64;
+	k_info.pc = pc;
+
+	if (LINUX_KERNEL_32 ==  k32_64) {
+		/* for 32 bits kernel */
+		k_info.r0 = 0;
+		/* machtype */
+		k_info.r1 = r0;
+		/* tags */
+		k_info.r2 = r1;
+	} else {
+		/* for 64 bits kernel */
+		k_info.r0 = r0;
+		k_info.r1 = r1;
+	}
+}
+
+uint64_t get_kernel_info_pc(void)
+{
+	return k_info.pc;
+}
+
+uint64_t get_kernel_info_r0(void)
+{
+	return k_info.r0;
+}
+
+uint64_t get_kernel_info_r1(void)
+{
+	return k_info.r1;
+}
+
+uint64_t get_kernel_info_r2(void)
+{
+	return k_info.r2;
+}
+
+void boot_to_kernel(uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4)
+{
+	static uint8_t kernel_boot_once_flag;
+	/* only support in booting flow */
+	if (0 == kernel_boot_once_flag) {
+		kernel_boot_once_flag = 1;
+
+		console_init(gteearg.atf_log_port,
+			UART_CLOCK, UART_BAUDRATE);
+		INFO("save kernel info\n");
+		save_kernel_info(x1, x2, x3, x4);
+		bl31_prepare_kernel_entry(x4);
+		INFO("el3_exit\n");
+		console_uninit();
+	}
+}
+#endif
+
+uint32_t plat_get_spsr_for_bl33_entry(void)
+{
+	unsigned int mode;
+	uint32_t spsr;
+	unsigned int ee;
+	unsigned long daif;
+
+	INFO("Secondary bootloader is AArch32\n");
+	mode = MODE32_svc;
+	ee = 0;
+	/*
+	 * TODO: Choose async. exception bits if HYP mode is not
+	 * implemented according to the values of SCR.{AW, FW} bits
+	 */
+	daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
+
+	spsr = SPSR_MODE32(mode, 0, ee, daif);
+	return spsr;
+}
diff --git a/plat/mediatek/common/mtk_plat_common.h b/plat/mediatek/common/mtk_plat_common.h
new file mode 100644
index 0000000..7513bc7
--- /dev/null
+++ b/plat/mediatek/common/mtk_plat_common.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __MTK_PLAT_COMMON_H__
+#define __MTK_PLAT_COMMON_H__
+#include <stdint.h>
+/*******************************************************************************
+ * Function and variable prototypes
+ ******************************************************************************/
+#define DEVINFO_SIZE 4
+#define LINUX_KERNEL_32 0
+#define SMC32_PARAM_MASK		(0xFFFFFFFF)
+
+struct atf_arg_t {
+	unsigned int atf_magic;
+	unsigned int tee_support;
+	unsigned int tee_entry;
+	unsigned int tee_boot_arg_addr;
+	unsigned int hwuid[4];     /* HW Unique id for t-base used */
+	unsigned int HRID[2];      /* HW random id for t-base used */
+	unsigned int atf_log_port;
+	unsigned int atf_log_baudrate;
+	unsigned int atf_log_buf_start;
+	unsigned int atf_log_buf_size;
+	unsigned int atf_irq_num;
+	unsigned int devinfo[DEVINFO_SIZE];
+	unsigned int atf_aee_debug_buf_start;
+	unsigned int atf_aee_debug_buf_size;
+};
+
+struct kernel_info {
+	uint64_t pc;
+	uint64_t r0;
+	uint64_t r1;
+	uint64_t r2;
+	uint64_t k32_64;
+};
+
+struct mtk_bl_param_t {
+	uint64_t bootarg_loc;
+	uint64_t bootarg_size;
+	uint64_t bl33_start_addr;
+	uint64_t tee_info_addr;
+};
+
+/* Declarations for mtk_plat_common.c */
+uint32_t plat_get_spsr_for_bl32_entry(void);
+uint32_t plat_get_spsr_for_bl33_entry(void);
+void clean_top_32b_of_param(uint32_t smc_fid, uint64_t *x1,
+				uint64_t *x2,
+				uint64_t *x3,
+				uint64_t *x4);
+void bl31_prepare_kernel_entry(uint64_t k32_64);
+void enable_ns_access_to_cpuectlr(void);
+void boot_to_kernel(uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4);
+uint64_t get_kernel_info_pc(void);
+uint64_t get_kernel_info_r0(void);
+uint64_t get_kernel_info_r1(void);
+uint64_t get_kernel_info_r2(void);
+
+extern struct atf_arg_t gteearg;
+#endif
diff --git a/plat/mediatek/common/mtk_sip_svc.c b/plat/mediatek/common/mtk_sip_svc.c
new file mode 100644
index 0000000..beb2a69
--- /dev/null
+++ b/plat/mediatek/common/mtk_sip_svc.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <assert.h>
+#include <console.h>
+#include <debug.h>
+#include <mmio.h>
+#include <mtk_plat_common.h>
+#include <mtk_sip_svc.h>
+#include <plat_sip_calls.h>
+#include <runtime_svc.h>
+#include <uuid.h>
+
+/* Mediatek SiP Service UUID */
+DEFINE_SVC_UUID(mtk_sip_svc_uid,
+		0xf7582ba4, 0x4262, 0x4d7d, 0x80, 0xe5,
+		0x8f, 0x95, 0x05, 0x00, 0x0f, 0x3d);
+
+#pragma weak mediatek_plat_sip_handler
+uint64_t mediatek_plat_sip_handler(uint32_t smc_fid,
+				uint64_t x1,
+				uint64_t x2,
+				uint64_t x3,
+				uint64_t x4,
+				void *cookie,
+				void *handle,
+				uint64_t flags)
+{
+	ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
+	SMC_RET1(handle, SMC_UNK);
+}
+
+/*
+ * This function handles Mediatek defined SiP Calls */
+uint64_t mediatek_sip_handler(uint32_t smc_fid,
+			uint64_t x1,
+			uint64_t x2,
+			uint64_t x3,
+			uint64_t x4,
+			void *cookie,
+			void *handle,
+			uint64_t flags)
+{
+	uint32_t ns;
+
+	/* if parameter is sent from SMC32. Clean top 32 bits */
+	clean_top_32b_of_param(smc_fid, &x1, &x2, &x3, &x4);
+
+	/* Determine which security state this SMC originated from */
+	ns = is_caller_non_secure(flags);
+	if (!ns) {
+		/* SiP SMC service secure world's call */
+		;
+	} else {
+		/* SiP SMC service normal world's call */
+		switch (smc_fid) {
+#if MTK_SIP_SET_AUTHORIZED_SECURE_REG_ENABLE
+		case MTK_SIP_SET_AUTHORIZED_SECURE_REG: {
+			/* only use ret here */
+			uint64_t ret;
+
+			ret = mt_sip_set_authorized_sreg((uint32_t)x1,
+				(uint32_t)x2);
+			SMC_RET1(handle, ret);
+		}
+#endif
+#if MTK_SIP_KERNEL_BOOT_ENABLE
+		case MTK_SIP_KERNEL_BOOT_AARCH32:
+			boot_to_kernel(x1, x2, x3, x4);
+			SMC_RET0(handle);
+#endif
+		}
+	}
+
+	return mediatek_plat_sip_handler(smc_fid, x1, x2, x3, x4,
+					cookie, handle, flags);
+
+}
+
+/*
+ * This function is responsible for handling all SiP calls from the NS world
+ */
+uint64_t sip_smc_handler(uint32_t smc_fid,
+			 uint64_t x1,
+			 uint64_t x2,
+			 uint64_t x3,
+			 uint64_t x4,
+			 void *cookie,
+			 void *handle,
+			 uint64_t flags)
+{
+	switch (smc_fid) {
+	case SIP_SVC_CALL_COUNT:
+		/* Return the number of Mediatek SiP Service Calls. */
+		SMC_RET1(handle,
+			 MTK_COMMON_SIP_NUM_CALLS + MTK_PLAT_SIP_NUM_CALLS);
+
+	case SIP_SVC_UID:
+		/* Return UID to the caller */
+		SMC_UUID_RET(handle, mtk_sip_svc_uid);
+
+	case SIP_SVC_VERSION:
+		/* Return the version of current implementation */
+		SMC_RET2(handle, MTK_SIP_SVC_VERSION_MAJOR,
+			MTK_SIP_SVC_VERSION_MINOR);
+
+	default:
+		return mediatek_sip_handler(smc_fid, x1, x2, x3, x4,
+			cookie, handle, flags);
+	}
+}
+
+/* Define a runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+	mediatek_sip_svc,
+	OEN_SIP_START,
+	OEN_SIP_END,
+	SMC_TYPE_FAST,
+	NULL,
+	sip_smc_handler
+);
diff --git a/plat/mediatek/common/mtk_sip_svc.h b/plat/mediatek/common/mtk_sip_svc.h
new file mode 100644
index 0000000..1572ba7
--- /dev/null
+++ b/plat/mediatek/common/mtk_sip_svc.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __PLAT_SIP_SVC_H__
+#define __PLAT_SIP_SVC_H__
+
+#include <stdint.h>
+
+/* SMC function IDs for SiP Service queries */
+#define SIP_SVC_CALL_COUNT		0x8200ff00
+#define SIP_SVC_UID			0x8200ff01
+/*					0x8200ff02 is reserved */
+#define SIP_SVC_VERSION			0x8200ff03
+
+/* Mediatek SiP Service Calls version numbers */
+#define MTK_SIP_SVC_VERSION_MAJOR	0x0
+#define MTK_SIP_SVC_VERSION_MINOR	0x1
+
+#define SMC_AARCH64_BIT		0x40000000
+
+/* Number of Mediatek SiP Calls implemented */
+#define MTK_COMMON_SIP_NUM_CALLS	4
+
+/* Mediatek SiP Service Calls function IDs */
+#define MTK_SIP_SET_AUTHORIZED_SECURE_REG	0x82000001
+
+/* For MTK SMC from Secure OS */
+/* 0x82000000 - 0x820000FF & 0xC2000000 - 0xC20000FF */
+#define MTK_SIP_KERNEL_BOOT_AARCH32		0x82000200
+#define MTK_SIP_KERNEL_BOOT_AARCH64		0xC2000200
+
+/* Mediatek SiP Calls error code */
+enum {
+	MTK_SIP_E_SUCCESS = 0,
+	MTK_SIP_E_INVALID_PARAM = -1,
+	MTK_SIP_E_NOT_SUPPORTED = -2,
+	MTK_SIP_E_INVALID_RANGE = -3,
+	MTK_SIP_E_PERMISSION_DENY = -4,
+	MTK_SIP_E_LOCK_FAIL = -5
+};
+
+/*
+ * This function should be implemented in Mediatek SOC directory. It fullfills
+ * MTK_SIP_SET_AUTHORIZED_SECURE_REG SiP call by checking the sreg with the
+ * predefined secure register list, if a match was found, set val to sreg.
+ *
+ * Return MTK_SIP_E_SUCCESS on success, and MTK_SIP_E_INVALID_PARAM on failure.
+ */
+uint64_t mt_sip_set_authorized_sreg(uint32_t sreg, uint32_t val);
+
+#endif /* __PLAT_SIP_SVC_H__ */
diff --git a/plat/mediatek/mt6795/aarch64/plat_helpers.S b/plat/mediatek/mt6795/aarch64/plat_helpers.S
new file mode 100644
index 0000000..8af4f62
--- /dev/null
+++ b/plat/mediatek/mt6795/aarch64/plat_helpers.S
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+	.globl	plat_secondary_cold_boot_setup
+	.globl	plat_report_exception
+	.globl	platform_is_primary_cpu
+	.globl	plat_crash_console_init
+	.globl	plat_crash_console_putc
+	.globl	platform_mem_init
+
+
+	.macro crash_ram_log
+	 /*
+	 * Check teearg->atf_log_buf_size.
+	 * Exit if atf_log_buf_size equals 0
+	 */
+	adr	x2, ptr_atf_crash_flag
+	ldr	x2, [x2]
+	/* exit if ptr_atf_crash_flag equals NULL */
+	cbz x2, exit_putc
+
+	/*
+	 * set atf crash magic number
+	 */
+1:
+	adr	x2, ptr_atf_crash_flag
+	ldr	x2, [x2]
+	mov_imm x1, 0xdead1abf
+	/* p_atf_log_ctrl->atf_crash_flag = 0xdead1abf */
+	str	w1, [x2]
+	/* can't use w3 return addr, w4, start of buffer addr */
+	ldr	w2, [x2]
+	cmp	w2, w1
+	b.ne	1b
+
+	/*
+	 * get cpu id
+	 */
+	mrs	x1, mpidr_el1
+	/* refer to platform_get_core_pos */
+	and	x2, x1, #MPIDR_CPU_MASK
+	and	x1, x1, #MPIDR_CLUSTER_MASK
+	/* x1 = cpu id (cpu id = aff0 + aff1*4 ) */
+	add	x1, x2, x1, LSR #6
+
+	adr	x2, ptr_atf_except_write_pos_per_cpu
+	ldr	x2, [x2]
+	/*
+	 * plus (cpu_id * 8)-->
+	 * &p_atf_log_ctrl->atf_except_write_pos_per_cpu[cpu_id]
+	 * x2 = &p_atf_log_ctrl->atf_except_write_pos_per_cpu[cpu_id];
+	 */
+	add x2, x2, x1, LSL # 3
+	/* log write */
+	/* w1 = p_atf_log_ctrl->atf_except_write_pos_per_cpu[cpu_id] */
+	ldr	x1, [x2]
+	/* *x1 = w0-->
+	 *  *(p_atf_log_ctrl->atf_except_write_pos_per_cpu[cpu_id]) = c)
+	 */
+	strb	w0, [x1]
+	/* w1++ */
+	add	x1, x1, #1
+	/* p_atf_log_ctrl->atf_except_write_pos_per_cpu[cpu_id] = w1 */
+	str	x1, [x2]
+exit_putc:
+	.endm
+
+	/* -----------------------------------------------------
+	 * void plat_secondary_cold_boot_setup (void);
+	 *
+	 * This function performs any platform specific actions
+	 * needed for a secondary cpu after a cold reset e.g
+	 * mark the cpu's presence, mechanism to place it in a
+	 * holding pen etc.
+	 * -----------------------------------------------------
+	 */
+func plat_secondary_cold_boot_setup
+	/* Do not do cold boot for secondary CPU */
+cb_panic:
+	b	cb_panic
+endfunc plat_secondary_cold_boot_setup
+
+func platform_is_primary_cpu
+	and	x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
+	cmp	x0, #PLAT_PRIMARY_CPU
+	cset	x0, eq
+	ret
+endfunc platform_is_primary_cpu
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_init(void)
+	 * Function to initialize the crash console
+	 * without a C Runtime to print crash report.
+	 * Clobber list : x0, x1, x2
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_init
+	mov_imm	x0, UART0_BASE
+	mov_imm	x1, UART_CLOCK
+	mov_imm	x2, UART_BAUDRATE
+	b	console_init
+	ret
+endfunc plat_crash_console_init
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_putc(void)
+	 * Function to print a character on the crash
+	 * console without a C Runtime.
+	 * Clobber list : x1, x2
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_putc
+	mov_imm x1, UART0_BASE
+	b	console_core_putc
+	ret
+endfunc plat_crash_console_putc
+
+	/* --------------------------------------------------------
+	 * void platform_mem_init (void);
+	 *
+	 * Any memory init, relocation to be done before the
+	 * platform boots. Called very early in the boot process.
+	 * --------------------------------------------------------
+	 */
+func platform_mem_init
+	ret
+endfunc platform_mem_init
+
diff --git a/plat/mediatek/mt6795/bl31.ld.S b/plat/mediatek/mt6795/bl31.ld.S
new file mode 100644
index 0000000..0f60a0c
--- /dev/null
+++ b/plat/mediatek/mt6795/bl31.ld.S
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+
+OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
+OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
+ENTRY(bl31_entrypoint)
+
+
+MEMORY {
+	RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_TZRAM_SIZE
+	RAM2 (rwx): ORIGIN = TZRAM2_BASE, LENGTH = TZRAM2_SIZE
+}
+
+
+SECTIONS
+{
+    . = BL31_BASE;
+
+    ASSERT(. == ALIGN(2048),
+           "vector base is not aligned on a 2K boundary.")
+
+    __RO_START__ = .;
+    vector . : {
+        *(.vectors)
+    } >RAM
+
+    ASSERT(. == ALIGN(4096),
+           "BL31_BASE address is not aligned on a page boundary.")
+
+    ro . : {
+        *bl31_entrypoint.o(.text*)
+        *(.text*)
+        *(.rodata*)
+
+        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
+        . = ALIGN(8);
+        __RT_SVC_DESCS_START__ = .;
+        KEEP(*(rt_svc_descs))
+        __RT_SVC_DESCS_END__ = .;
+
+        /*
+         * Ensure 8-byte alignment for cpu_ops so that its fields are also
+         * aligned. Also ensure cpu_ops inclusion.
+         */
+        . = ALIGN(8);
+        __CPU_OPS_START__ = .;
+        KEEP(*(cpu_ops))
+        __CPU_OPS_END__ = .;
+
+        __RO_END_UNALIGNED__ = .;
+        /*
+         * Memory page(s) mapped to this section will be marked as read-only,
+         * executable.  No RW data from the next section must creep in.
+         * Ensure the rest of the current memory page is unused.
+         */
+        . = NEXT(4096);
+        __RO_END__ = .;
+    } >RAM
+
+    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
+           "cpu_ops not defined for this platform.")
+
+    /*
+     * Define a linker symbol to mark start of the RW memory area for this
+     * image.
+     */
+    __RW_START__ = . ;
+
+    /*
+     * .data must be placed at a lower address than the stacks if the stack
+     * protector is enabled. Alternatively, the .data.stack_protector_canary
+     * section can be placed independently of the main .data section.
+     */
+    .data . : {
+        __DATA_START__ = .;
+        *(.data*)
+        __DATA_END__ = .;
+    } >RAM
+
+#ifdef BL31_PROGBITS_LIMIT
+    ASSERT(. <= BL31_PROGBITS_LIMIT, "BL3-1 progbits has exceeded its limit.")
+#endif
+
+    stacks (NOLOAD) : {
+        __STACKS_START__ = .;
+        *(tzfw_normal_stacks)
+        __STACKS_END__ = .;
+    } >RAM
+
+    /*
+     * The .bss section gets initialised to 0 at runtime.
+     * Its base address should be 16-byte aligned for better performance of the
+     * zero-initialization code.
+     */
+    .bss (NOLOAD) : ALIGN(16) {
+        __BSS_START__ = .;
+        *(.bss*)
+        *(COMMON)
+#if !USE_COHERENT_MEM
+        /*
+         * Bakery locks are stored in normal .bss memory
+         *
+         * Each lock's data is spread across multiple cache lines, one per CPU,
+         * but multiple locks can share the same cache line.
+         * The compiler will allocate enough memory for one CPU's bakery locks,
+         * the remaining cache lines are allocated by the linker script
+         */
+        . = ALIGN(CACHE_WRITEBACK_GRANULE);
+        __BAKERY_LOCK_START__ = .;
+        *(bakery_lock)
+        . = ALIGN(CACHE_WRITEBACK_GRANULE);
+        __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(. - __BAKERY_LOCK_START__);
+        . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
+        __BAKERY_LOCK_END__ = .;
+#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
+    ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE,
+        "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
+#endif
+#endif
+        __BSS_END__ = .;
+        __RW_END__ = .;
+    } >RAM
+
+    ASSERT(. <= BL31_LIMIT, "BL3-1 image has exceeded its limit.")
+
+    /*
+     * The xlat_table section is for full, aligned page tables (4K).
+     * Removing them from .bss avoids forcing 4K alignment on
+     * the .bss section and eliminates the unecessary zero init
+     */
+    xlat_table (NOLOAD) : {
+        *(xlat_table)
+    } >RAM2
+
+#if USE_COHERENT_MEM
+    /*
+     * The base address of the coherent memory section must be page-aligned (4K)
+     * to guarantee that the coherent data are stored on their own pages and
+     * are not mixed with normal data.  This is required to set up the correct
+     * memory attributes for the coherent data page tables.
+     */
+    coherent_ram (NOLOAD) : ALIGN(4096) {
+        __COHERENT_RAM_START__ = .;
+        /*
+         * Bakery locks are stored in coherent memory
+         *
+         * Each lock's data is contiguous and fully allocated by the compiler
+         */
+        *(bakery_lock)
+        *(tzfw_coherent_mem)
+        __COHERENT_RAM_END_UNALIGNED__ = .;
+        /*
+         * Memory page(s) mapped to this section will be marked
+         * as device memory.  No other unexpected data must creep in.
+         * Ensure the rest of the current memory page is unused.
+         */
+        . = NEXT(4096);
+        __COHERENT_RAM_END__ = .;
+    } >RAM2
+#endif
+
+    /*
+     * Define a linker symbol to mark end of the RW memory area for this
+     * image.
+     */
+    __BL31_END__ = .;
+
+    __BSS_SIZE__ = SIZEOF(.bss);
+#if USE_COHERENT_MEM
+    __COHERENT_RAM_UNALIGNED_SIZE__ =
+        __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
+#endif
+
+    ASSERT(. <= TZRAM2_LIMIT, "TZRAM2 image has exceeded its limit.")
+}
diff --git a/plat/mediatek/mt6795/bl31_plat_setup.c b/plat/mediatek/mt6795/bl31_plat_setup.c
new file mode 100644
index 0000000..803f1ed
--- /dev/null
+++ b/plat/mediatek/mt6795/bl31_plat_setup.c
@@ -0,0 +1,451 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <cci.h>
+#include <common_def.h>
+#include <console.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <generic_delay_timer.h>
+#include <mcucfg.h>
+#include <mmio.h>
+#include <mt_cpuxgpt.h>
+#include <mtk_plat_common.h>
+#include <mtk_sip_svc.h>
+#include <plat_private.h>
+#include <platform.h>
+#include <string.h>
+#include <xlat_tables.h>
+/*******************************************************************************
+ * Declarations of linker defined symbols which will help us find the layout
+ * of trusted SRAM
+ ******************************************************************************/
+unsigned long __RO_START__;
+unsigned long __RO_END__;
+
+/*
+ * The next 2 constants identify the extents of the code & RO data region.
+ * These addresses are used by the MMU setup code and therefore they must be
+ * page-aligned.  It is the responsibility of the linker script to ensure that
+ * __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
+ */
+#define BL31_RO_BASE (unsigned long)(&__RO_START__)
+#define BL31_RO_LIMIT (unsigned long)(&__RO_END__)
+
+/*
+ * Placeholder variables for copying the arguments that have been passed to
+ * BL3-1 from BL2.
+ */
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+static const int cci_map[] = {
+	PLAT_MT_CCI_CLUSTER0_SL_IFACE_IX,
+	PLAT_MT_CCI_CLUSTER1_SL_IFACE_IX
+};
+
+static uint32_t cci_map_length = ARRAY_SIZE(cci_map);
+
+/* Table of regions to map using the MMU.  */
+static const mmap_region_t plat_mmap[] = {
+	/* for TF text, RO, RW */
+	MAP_REGION_FLAT(MTK_DEV_RNG0_BASE, MTK_DEV_RNG0_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(MTK_DEV_RNG1_BASE, MTK_DEV_RNG1_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(RAM_CONSOLE_BASE & ~(PAGE_SIZE_MASK), RAM_CONSOLE_SIZE,
+						MT_DEVICE | MT_RW | MT_NS),
+	{ 0 }
+
+};
+
+/*******************************************************************************
+ * Macro generating the code for the function setting up the pagetables as per
+ * the platform memory map & initialize the mmu, for the given exception level
+ ******************************************************************************/
+#define DEFINE_CONFIGURE_MMU_EL(_el)					\
+	void plat_configure_mmu_el ## _el(unsigned long total_base,	\
+				unsigned long total_size,	\
+				unsigned long ro_start,	\
+				unsigned long ro_limit,	\
+				unsigned long coh_start,	\
+				unsigned long coh_limit)	\
+	{								\
+		mmap_add_region(total_base, total_base,			\
+				total_size,				\
+				MT_MEMORY | MT_RW | MT_SECURE);		\
+		mmap_add_region(ro_start, ro_start,			\
+				ro_limit - ro_start,			\
+				MT_MEMORY | MT_RO | MT_SECURE);		\
+		mmap_add_region(coh_start, coh_start,			\
+				coh_limit - coh_start,			\
+				MT_DEVICE | MT_RW | MT_SECURE);		\
+		mmap_add(plat_mmap);					\
+		init_xlat_tables();					\
+									\
+		enable_mmu_el ## _el(0);				\
+	}
+
+/* Define EL3 variants of the function initialising the MMU */
+DEFINE_CONFIGURE_MMU_EL(3)
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+	return SYS_COUNTER_FREQ_IN_TICKS;
+}
+
+void plat_cci_init(void)
+{
+	/* Initialize CCI driver */
+	cci_init(PLAT_MT_CCI_BASE, cci_map, cci_map_length);
+}
+
+void plat_cci_enable(void)
+{
+	/*
+	 * Enable CCI coherency for this cluster.
+	 * No need for locks as no other cpu is active at the moment.
+	 */
+	cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr()));
+}
+
+void plat_cci_disable(void)
+{
+	cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr()));
+}
+
+
+static void platform_setup_cpu(void)
+{
+	/* setup big cores */
+	mmio_write_32((uintptr_t)&mt6795_mcucfg->mp1_config_res,
+		MP1_DIS_RGU0_WAIT_PD_CPUS_L1_ACK |
+		MP1_DIS_RGU1_WAIT_PD_CPUS_L1_ACK |
+		MP1_DIS_RGU2_WAIT_PD_CPUS_L1_ACK |
+		MP1_DIS_RGU3_WAIT_PD_CPUS_L1_ACK |
+		MP1_DIS_RGU_NOCPU_WAIT_PD_CPUS_L1_ACK);
+	mmio_setbits_32((uintptr_t)&mt6795_mcucfg->mp1_miscdbg, MP1_AINACTS);
+	mmio_setbits_32((uintptr_t)&mt6795_mcucfg->mp1_clkenm_div,
+		MP1_SW_CG_GEN);
+	mmio_clrbits_32((uintptr_t)&mt6795_mcucfg->mp1_rst_ctl,
+		MP1_L2RSTDISABLE);
+
+	/* set big cores arm64 boot mode */
+	mmio_setbits_32((uintptr_t)&mt6795_mcucfg->mp1_cpucfg,
+		MP1_CPUCFG_64BIT);
+
+	/* set LITTLE cores arm64 boot mode */
+	mmio_setbits_32((uintptr_t)&mt6795_mcucfg->mp0_rv_addr[0].rv_addr_hw,
+		MP0_CPUCFG_64BIT);
+}
+
+/*******************************************************************************
+ * Return a pointer to the 'entry_point_info' structure of the next image for
+ * the security state specified. BL33 corresponds to the non-secure image type
+ * while BL32 corresponds to the secure image type. A NULL pointer is returned
+ * if the image does not exist.
+ ******************************************************************************/
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+	entry_point_info_t *next_image_info;
+
+	next_image_info = (type == NON_SECURE) ?
+			&bl33_image_ep_info : &bl32_image_ep_info;
+
+	/* None of the images on this platform can have 0x0 as the entrypoint */
+	if (next_image_info->pc)
+		return next_image_info;
+	else
+		return NULL;
+}
+
+/*******************************************************************************
+ * Perform any BL3-1 early platform setup. Here is an opportunity to copy
+ * parameters passed by the calling EL (S-EL1 in BL2 & S-EL3 in BL1) before they
+ * are lost (potentially). This needs to be done before the MMU is initialized
+ * so that the memory layout can be used while creating page tables.
+ * BL2 has flushed this information to memory, so we are guaranteed to pick up
+ * good data.
+ ******************************************************************************/
+void bl31_early_platform_setup(bl31_params_t *from_bl2,
+						 void *plat_params_from_bl2)
+{
+	struct mtk_bl_param_t *pmtk_bl_param =
+	(struct mtk_bl_param_t *)from_bl2;
+	struct atf_arg_t *teearg;
+	unsigned long long normal_base;
+	unsigned long long atf_base;
+
+	assert(from_bl2 != NULL);
+	/*
+	 * Mediatek preloader(i.e, BL2) is in 32 bit state, high 32bits
+	 * of 64 bit GP registers are UNKNOWN if CPU warm reset from 32 bit
+	 * to 64 bit state. So we need to clear high 32bit,
+	 * which may be random value.
+	 */
+	pmtk_bl_param =
+	(struct mtk_bl_param_t *)((uint64_t)pmtk_bl_param & 0x00000000ffffffff);
+	plat_params_from_bl2 =
+	(void *)((uint64_t)plat_params_from_bl2 & 0x00000000ffffffff);
+
+	teearg  = (struct atf_arg_t *)pmtk_bl_param->tee_info_addr;
+
+	console_init(teearg->atf_log_port, UART_CLOCK, UART_BAUDRATE);
+	memcpy((void *)&gteearg, (void *)teearg, sizeof(struct atf_arg_t));
+
+	normal_base = 0;
+    /* in ATF boot time, timer for cntpct_el0 is not initialized
+     * so it will not count now.
+     */
+	atf_base = read_cntpct_el0();
+	sched_clock_init(normal_base, atf_base);
+
+	VERBOSE("bl31_setup\n");
+
+	/* Populate entry point information for BL3-2 and BL3-3 */
+	SET_PARAM_HEAD(&bl32_image_ep_info,
+				PARAM_EP,
+				VERSION_1,
+				0);
+	SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
+	bl32_image_ep_info.pc = BL32_BASE;
+
+	SET_PARAM_HEAD(&bl33_image_ep_info,
+				PARAM_EP,
+				VERSION_1,
+				0);
+	/*
+	 * Tell BL3-1 where the non-trusted software image
+	 * is located and the entry state information
+	 */
+	/* BL33_START_ADDRESS */
+	bl33_image_ep_info.pc = pmtk_bl_param->bl33_start_addr;
+	bl33_image_ep_info.spsr = plat_get_spsr_for_bl33_entry();
+	bl33_image_ep_info.args.arg4 =  pmtk_bl_param->bootarg_loc;
+	bl33_image_ep_info.args.arg5 =  pmtk_bl_param->bootarg_size;
+	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+}
+/*******************************************************************************
+ * Perform any BL3-1 platform setup code
+ ******************************************************************************/
+
+void bl31_platform_setup(void)
+{
+	platform_setup_cpu();
+
+	generic_delay_timer_init();
+
+	plat_mt_gic_driver_init();
+	/* Initialize the gic cpu and distributor interfaces */
+	plat_mt_gic_init();
+
+	/* Topologies are best known to the platform. */
+	mt_setup_topology();
+}
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only intializes the mmu in a quick and dirty way.
+ * Init MTK propiartary log buffer control field.
+ ******************************************************************************/
+void bl31_plat_arch_setup(void)
+{
+	/* Enable non-secure access to CCI-400 registers */
+	mmio_write_32(CCI400_BASE + CCI_SEC_ACCESS_OFFSET, 0x1);
+
+	plat_cci_init();
+	plat_cci_enable();
+
+	if (gteearg.atf_log_buf_size != 0) {
+		INFO("mmap atf buffer : 0x%x, 0x%x\n\r",
+			gteearg.atf_log_buf_start,
+			gteearg.atf_log_buf_size);
+
+		mmap_add_region(
+			gteearg.atf_log_buf_start &
+			~(PAGE_SIZE_2MB_MASK),
+			gteearg.atf_log_buf_start &
+			~(PAGE_SIZE_2MB_MASK),
+			PAGE_SIZE_2MB,
+			MT_DEVICE | MT_RW | MT_NS);
+
+		INFO("mmap atf buffer (force 2MB aligned):0x%x, 0x%x\n",
+			(gteearg.atf_log_buf_start & ~(PAGE_SIZE_2MB_MASK)),
+		PAGE_SIZE_2MB);
+	}
+	/*
+	 * add TZRAM_BASE to memory map
+	 * then set RO and COHERENT to different attribute
+	 */
+	plat_configure_mmu_el3(
+		(TZRAM_BASE & ~(PAGE_SIZE_MASK)),
+		(TZRAM_SIZE & ~(PAGE_SIZE_MASK)),
+		(BL31_RO_BASE & ~(PAGE_SIZE_MASK)),
+		BL31_RO_LIMIT,
+		BL_COHERENT_RAM_BASE,
+		BL_COHERENT_RAM_END);
+	/* Initialize for ATF log buffer */
+	if (gteearg.atf_log_buf_size != 0) {
+		gteearg.atf_aee_debug_buf_size = ATF_AEE_BUFFER_SIZE;
+		gteearg.atf_aee_debug_buf_start =
+			gteearg.atf_log_buf_start +
+			gteearg.atf_log_buf_size - ATF_AEE_BUFFER_SIZE;
+		INFO("ATF log service is registered (0x%x, aee:0x%x)\n",
+			gteearg.atf_log_buf_start,
+			gteearg.atf_aee_debug_buf_start);
+	} else{
+		gteearg.atf_aee_debug_buf_size = 0;
+		gteearg.atf_aee_debug_buf_start = 0;
+	}
+
+	/* Platform code before bl31_main */
+	/* compatible to the earlier chipset */
+
+	/* Show to ATF log buffer & UART */
+	INFO("BL3-1: %s\n", version_string);
+	INFO("BL3-1: %s\n", build_message);
+
+}
+#if 0
+/* MTK Define */
+#define ACTLR_CPUECTLR_BIT    (1 << 1)
+
+void enable_ns_access_to_cpuectlr(void)
+{
+	unsigned int next_actlr;
+
+
+	/* ACTLR_EL1 do not implement CUPECTLR  */
+	next_actlr = read_actlr_el2();
+	next_actlr |= ACTLR_CPUECTLR_BIT;
+	write_actlr_el2(next_actlr);
+
+	next_actlr = read_actlr_el3();
+	next_actlr |= ACTLR_CPUECTLR_BIT;
+	write_actlr_el3(next_actlr);
+}
+#endif
+/*******************************************************************************
+ * This function prepare boot argument for 64 bit kernel entry
+ ******************************************************************************/
+static entry_point_info_t *bl31_plat_get_next_kernel64_ep_info(void)
+{
+	entry_point_info_t *next_image_info;
+	unsigned int mode;
+
+	mode = 0;
+
+	/* Kernel image is always non-secured */
+	next_image_info = &bl33_image_ep_info;
+
+	/* Figure out what mode we enter the non-secure world in */
+	if (EL_IMPLEMENTED(2)) {
+		INFO("Kernel_EL2\n");
+		mode = MODE_EL2;
+	} else{
+		INFO("Kernel_EL1\n");
+		mode = MODE_EL1;
+	}
+
+	INFO("Kernel is 64Bit\n");
+	next_image_info->spsr =
+		SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+	next_image_info->pc = get_kernel_info_pc();
+	next_image_info->args.arg0 = get_kernel_info_r0();
+	next_image_info->args.arg1 = get_kernel_info_r1();
+
+	INFO("pc=0x%lx, r0=0x%lx, r1=0x%lx\n",
+				 next_image_info->pc,
+				 next_image_info->args.arg0,
+				 next_image_info->args.arg1);
+
+
+	SET_SECURITY_STATE(next_image_info->h.attr, NON_SECURE);
+
+	/* None of the images on this platform can have 0x0 as the entrypoint */
+	if (next_image_info->pc)
+		return next_image_info;
+	else
+		return NULL;
+}
+
+/*******************************************************************************
+ * This function prepare boot argument for 32 bit kernel entry
+ ******************************************************************************/
+static entry_point_info_t *bl31_plat_get_next_kernel32_ep_info(void)
+{
+	entry_point_info_t *next_image_info;
+	unsigned int mode;
+
+	mode = 0;
+
+	/* Kernel image is always non-secured */
+	next_image_info = &bl33_image_ep_info;
+
+	/* Figure out what mode we enter the non-secure world in */
+	mode = MODE32_hyp;
+	/*
+	* TODO: Consider the possibility of specifying the SPSR in
+	* the FIP ToC and allowing the platform to have a say as
+	* well.
+	*/
+
+	INFO("Kernel is 32Bit\n");
+	next_image_info->spsr =
+		SPSR_MODE32(mode, SPSR_T_ARM, SPSR_E_LITTLE,
+		(DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT));
+	next_image_info->pc = get_kernel_info_pc();
+	next_image_info->args.arg0 = get_kernel_info_r0();
+	next_image_info->args.arg1 = get_kernel_info_r1();
+	next_image_info->args.arg2 = get_kernel_info_r2();
+
+	INFO("pc=0x%lx, r0=0x%lx, r1=0x%lx, r2=0x%lx\n",
+				 next_image_info->pc,
+				 next_image_info->args.arg0,
+				 next_image_info->args.arg1,
+				 next_image_info->args.arg2);
+
+
+	SET_SECURITY_STATE(next_image_info->h.attr, NON_SECURE);
+
+	/* None of the images on this platform can have 0x0 as the entrypoint */
+	if (next_image_info->pc)
+		return next_image_info;
+	else
+		return NULL;
+}
+
+/*******************************************************************************
+ * This function prepare boot argument for kernel entrypoint
+ ******************************************************************************/
+void bl31_prepare_kernel_entry(uint64_t k32_64)
+{
+	entry_point_info_t *next_image_info;
+	uint32_t image_type;
+
+	/* Determine which image to execute next */
+	/* image_type = bl31_get_next_image_type(); */
+	image_type = NON_SECURE;
+
+	/* Program EL3 registers to enable entry into the next EL */
+	if (k32_64 == 0)
+		next_image_info = bl31_plat_get_next_kernel32_ep_info();
+	else
+		next_image_info = bl31_plat_get_next_kernel64_ep_info();
+
+	assert(next_image_info);
+	assert(image_type == GET_SECURITY_STATE(next_image_info->h.attr));
+
+	INFO("BL3-1: Preparing for EL3 exit to %s world, Kernel\n",
+		(image_type == SECURE) ? "secure" : "normal");
+	INFO("BL3-1: Next image address = 0x%llx\n",
+		(unsigned long long) next_image_info->pc);
+	INFO("BL3-1: Next image spsr = 0x%x\n", next_image_info->spsr);
+	cm_init_context(read_mpidr_el1(), next_image_info);
+	cm_prepare_el3_exit(image_type);
+}
diff --git a/plat/mediatek/mt6795/drivers/timer/mt_cpuxgpt.c b/plat/mediatek/mt6795/drivers/timer/mt_cpuxgpt.c
new file mode 100644
index 0000000..b357972
--- /dev/null
+++ b/plat/mediatek/mt6795/drivers/timer/mt_cpuxgpt.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <mmio.h>
+#include <mt_cpuxgpt.h>
+#include <platform.h>
+#include <stdint.h>
+#define CPUXGPT_BASE	0x10200000
+#define INDEX_BASE		(CPUXGPT_BASE+0x0674)
+#define CTL_BASE		(CPUXGPT_BASE+0x0670)
+
+uint64_t normal_time_base;
+uint64_t atf_time_base;
+
+void sched_clock_init(uint64_t normal_base, uint64_t atf_base)
+{
+	normal_time_base = normal_base;
+	atf_time_base = atf_base;
+}
+
+uint64_t sched_clock(void)
+{
+	uint64_t cval;
+
+	cval = (((read_cntpct_el0() - atf_time_base)*1000)/
+		SYS_COUNTER_FREQ_IN_MHZ) + normal_time_base;
+	return cval;
+}
+
+/*
+  * Return: 0 - Trying to disable the CPUXGPT control bit,
+  * and not allowed to disable it.
+  * Return: 1 - reg_addr is not realted to disable the control bit.
+  */
+unsigned char check_cpuxgpt_write_permission(unsigned int reg_addr,
+	unsigned int reg_value)
+{
+	unsigned int idx;
+	unsigned int ctl_val;
+
+	if (reg_addr == CTL_BASE) {
+		idx = mmio_read_32(INDEX_BASE);
+
+		/* idx 0: CPUXGPT system control */
+		if (idx == 0) {
+			ctl_val = mmio_read_32(CTL_BASE);
+			if (ctl_val & 1) {
+				/*
+				 * if enable bit already set,
+				 * then bit 0 is not allow to set as 0
+				 */
+				if (!(reg_value & 1))
+					return 0;
+			}
+		}
+	}
+	return 1;
+}
+
diff --git a/plat/mediatek/mt6795/drivers/timer/mt_cpuxgpt.h b/plat/mediatek/mt6795/drivers/timer/mt_cpuxgpt.h
new file mode 100644
index 0000000..99b78a7
--- /dev/null
+++ b/plat/mediatek/mt6795/drivers/timer/mt_cpuxgpt.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MT_CPUXGPT_H__
+#define __MT_CPUXGPT_H__
+
+/* REG */
+#define INDEX_CTL_REG       0x000
+#define INDEX_STA_REG       0x004
+#define INDEX_CNT_L_INIT    0x008
+#define INDEX_CNT_H_INIT    0x00C
+
+/* CTL_REG SET */
+#define EN_CPUXGPT          0x01
+#define EN_AHLT_DEBUG       0x02
+#define CLK_DIV1            (0x1 << 8)
+#define CLK_DIV2            (0x2 << 8)
+#define CLK_DIV4            (0x4 << 8)
+#define CLK_DIV_MASK        (~(0x7<<8))
+
+void generic_timer_backup(void);
+void sched_clock_init(uint64_t normal_base, uint64_t atf_base);
+uint64_t sched_clock(void);
+
+#endif /* __MT_CPUXGPT_H__ */
diff --git a/plat/mediatek/mt6795/include/mcucfg.h b/plat/mediatek/mt6795/include/mcucfg.h
new file mode 100644
index 0000000..eff8d34
--- /dev/null
+++ b/plat/mediatek/mt6795/include/mcucfg.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MCUCFG_H__
+#define __MCUCFG_H__
+
+#include <platform_def.h>
+#include <stdint.h>
+
+struct mt6795_mcucfg_regs {
+	uint32_t mp0_ca7l_cache_config;
+	struct {
+		uint32_t mem_delsel0;
+		uint32_t mem_delsel1;
+	} mp0_cpu[4];
+	uint32_t mp0_cache_mem_delsel0;
+	uint32_t mp0_cache_mem_delsel1;
+	uint32_t mp0_axi_config;
+	uint32_t mp0_misc_config[2];
+	struct {
+		uint32_t rv_addr_lw;
+		uint32_t rv_addr_hw;
+	} mp0_rv_addr[4];
+	uint32_t mp0_ca7l_cfg_dis;
+	uint32_t mp0_ca7l_clken_ctrl;
+	uint32_t mp0_ca7l_rst_ctrl;
+	uint32_t mp0_ca7l_misc_config;
+	uint32_t mp0_ca7l_dbg_pwr_ctrl;
+	uint32_t mp0_rw_rsvd0;
+	uint32_t mp0_rw_rsvd1;
+	uint32_t mp0_ro_rsvd;
+	uint32_t reserved0_0[100];
+	uint32_t mp1_cpucfg;
+	uint32_t mp1_miscdbg;
+	uint32_t reserved0_1[13];
+	uint32_t mp1_rst_ctl;
+	uint32_t mp1_clkenm_div;
+	uint32_t reserved0_2[7];
+	uint32_t mp1_config_res;
+	uint32_t reserved0_3[13];
+	struct {
+		uint32_t rv_addr_lw;
+		uint32_t rv_addr_hw;
+	} mp1_rv_addr[2];
+	uint32_t reserved0_4[84];
+	uint32_t mp0_rst_status;		/* 0x400 */
+	uint32_t mp0_dbg_ctrl;
+	uint32_t mp0_dbg_flag;
+	uint32_t mp0_ca7l_ir_mon;
+	struct {
+		uint32_t pc_lw;
+		uint32_t pc_hw;
+		uint32_t fp_arch32;
+		uint32_t sp_arch32;
+		uint32_t fp_arch64_lw;
+		uint32_t fp_arch64_hw;
+		uint32_t sp_arch64_lw;
+		uint32_t sp_arch64_hw;
+	} mp0_dbg_core[4];
+	uint32_t dfd_ctrl;
+	uint32_t dfd_cnt_l;
+	uint32_t dfd_cnt_h;
+	uint32_t misccfg_mp0_rw_rsvd;
+	uint32_t misccfg_sec_vio_status0;
+	uint32_t misccfg_sec_vio_status1;
+	uint32_t reserved1[22];
+	uint32_t misccfg_rw_rsvd;		/* 0x500 */
+	uint32_t mcusys_dbg_mon_sel_a;
+	uint32_t mcusys_dbg_mon;
+	uint32_t reserved2[61];
+	uint32_t mcusys_config_a;		/* 0x600 */
+	uint32_t mcusys_config1_a;
+	uint32_t mcusys_gic_peribase_a;
+	uint32_t reserved3;
+	uint32_t sec_range0_start;		/* 0x610 */
+	uint32_t sec_range0_end;
+	uint32_t sec_range_enable;
+	uint32_t reserved4;
+	uint32_t int_pol_ctl[8];		/* 0x620 */
+	uint32_t aclken_div;			/* 0x640 */
+	uint32_t pclken_div;
+	uint32_t l2c_sram_ctrl;
+	uint32_t armpll_jit_ctrl;
+	uint32_t cci_addrmap;			/* 0x650 */
+	uint32_t cci_config;
+	uint32_t cci_periphbase;
+	uint32_t cci_nevntcntovfl;
+	uint32_t cci_clk_ctrl;			/* 0x660 */
+	uint32_t cci_acel_s1_ctrl;
+	uint32_t bus_fabric_dcm_ctrl;
+	uint32_t reserved5;
+	uint32_t xgpt_ctl;			/* 0x670 */
+	uint32_t xgpt_idx;
+	uint32_t ptpod2_ctl0;
+	uint32_t ptpod2_ctl1;
+	uint32_t mcusys_revid;
+	uint32_t mcusys_rw_rsvd0;
+	uint32_t mcusys_rw_rsvd1;
+};
+
+static struct mt6795_mcucfg_regs *const mt6795_mcucfg = (void *)MCUCFG_BASE;
+
+/* cpu boot mode */
+#define	MP0_CPUCFG_64BIT_SHIFT	12
+#define	MP1_CPUCFG_64BIT_SHIFT	28
+#define	MP0_CPUCFG_64BIT	(U(0xf) << MP0_CPUCFG_64BIT_SHIFT)
+#define	MP1_CPUCFG_64BIT	(U(0xf) << MP1_CPUCFG_64BIT_SHIFT)
+
+/* scu related */
+enum {
+	MP0_ACINACTM_SHIFT = 4,
+	MP1_ACINACTM_SHIFT = 0,
+	MP0_ACINACTM = 1 << MP0_ACINACTM_SHIFT,
+	MP1_ACINACTM = 1 << MP1_ACINACTM_SHIFT
+};
+
+enum {
+	MP1_DIS_RGU0_WAIT_PD_CPUS_L1_ACK_SHIFT = 0,
+	MP1_DIS_RGU1_WAIT_PD_CPUS_L1_ACK_SHIFT = 4,
+	MP1_DIS_RGU2_WAIT_PD_CPUS_L1_ACK_SHIFT = 8,
+	MP1_DIS_RGU3_WAIT_PD_CPUS_L1_ACK_SHIFT = 12,
+	MP1_DIS_RGU_NOCPU_WAIT_PD_CPUS_L1_ACK_SHIFT = 16,
+
+	MP1_DIS_RGU0_WAIT_PD_CPUS_L1_ACK =
+		0xf << MP1_DIS_RGU0_WAIT_PD_CPUS_L1_ACK_SHIFT,
+	MP1_DIS_RGU1_WAIT_PD_CPUS_L1_ACK =
+		0xf << MP1_DIS_RGU1_WAIT_PD_CPUS_L1_ACK_SHIFT,
+	MP1_DIS_RGU2_WAIT_PD_CPUS_L1_ACK =
+		0xf << MP1_DIS_RGU2_WAIT_PD_CPUS_L1_ACK_SHIFT,
+	MP1_DIS_RGU3_WAIT_PD_CPUS_L1_ACK =
+		0xf << MP1_DIS_RGU3_WAIT_PD_CPUS_L1_ACK_SHIFT,
+	MP1_DIS_RGU_NOCPU_WAIT_PD_CPUS_L1_ACK =
+		0xf << MP1_DIS_RGU_NOCPU_WAIT_PD_CPUS_L1_ACK_SHIFT
+};
+
+enum {
+	MP1_AINACTS_SHIFT = 4,
+	MP1_AINACTS = 1 << MP1_AINACTS_SHIFT
+};
+
+enum {
+	MP1_SW_CG_GEN_SHIFT = 12,
+	MP1_SW_CG_GEN = 1 << MP1_SW_CG_GEN_SHIFT
+};
+
+enum {
+	MP1_L2RSTDISABLE_SHIFT = 14,
+	MP1_L2RSTDISABLE = 1 << MP1_L2RSTDISABLE_SHIFT
+};
+
+#endif  /* __MCUCFG_H__ */
diff --git a/plat/mediatek/mt6795/include/plat_macros.S b/plat/mediatek/mt6795/include/plat_macros.S
new file mode 100644
index 0000000..48bf28f
--- /dev/null
+++ b/plat/mediatek/mt6795/include/plat_macros.S
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cci.h>
+#include <gic_v2.h>
+#include <platform_def.h>
+
+.section .rodata.gic_reg_name, "aS"
+gicc_regs:
+	.asciz "gicc_hppir", "gicc_ahppir", "gicc_ctlr", ""
+gicd_pend_reg:
+	.asciz "gicd_ispendr regs (Offsets 0x200 - 0x278)\n"	\
+		" Offset:\t\t\tvalue\n"
+newline:
+	.asciz "\n"
+spacer:
+	.asciz ":\t\t0x"
+
+	/* ---------------------------------------------
+	 * The below macro prints out relevant GIC
+	 * registers whenever an unhandled exception is
+	 * taken in BL3-1.
+	 * Clobbers: x0 - x10, x16, x17, sp
+	 * ---------------------------------------------
+	 */
+	.macro plat_crash_print_regs
+	mov_imm x16, BASE_GICD_BASE
+	mov_imm x17, BASE_GICC_BASE
+	/* Load the gicc reg list to x6 */
+	adr	x6, gicc_regs
+	/* Load the gicc regs to gp regs used by str_in_crash_buf_print */
+	ldr	w8, [x17, #GICC_HPPIR]
+	ldr	w9, [x17, #GICC_AHPPIR]
+	ldr	w10, [x17, #GICC_CTLR]
+	/* Store to the crash buf and print to console */
+	bl	str_in_crash_buf_print
+
+	/* Print the GICD_ISPENDR regs */
+	add	x7, x16, #GICD_ISPENDR
+	adr	x4, gicd_pend_reg
+	bl	asm_print_str
+gicd_ispendr_loop:
+	sub	x4, x7, x16
+	cmp	x4, #0x280
+	b.eq	exit_print_gic_regs
+	bl	asm_print_hex
+
+	adr	x4, spacer
+	bl	asm_print_str
+
+	ldr	x4, [x7], #8
+	bl	asm_print_hex
+
+	adr	x4, newline
+	bl	asm_print_str
+	b	gicd_ispendr_loop
+exit_print_gic_regs:
+	.endm
+
+.section .rodata.cci_reg_name, "aS"
+cci_iface_regs:
+	.asciz "cci_snoop_ctrl_cluster0", "cci_snoop_ctrl_cluster1" , ""
+
+	/* ------------------------------------------------
+	 * The below macro prints out relevant interconnect
+	 * registers whenever an unhandled exception is
+	 * taken in BL3-1.
+	 * Clobbers: x0 - x9, sp
+	 * ------------------------------------------------
+	 */
+	.macro plat_print_interconnect_regs
+	adr	x6, cci_iface_regs
+	/* Store in x7 the base address of the first interface */
+	mov_imm	x7, (PLAT_MT_CCI_BASE + SLAVE_IFACE_OFFSET(	\
+			PLAT_MT_CCI_CLUSTER0_SL_IFACE_IX))
+	ldr	w8, [x7, #SNOOP_CTRL_REG]
+	/* Store in x7 the base address of the second interface */
+	mov_imm	x7, (PLAT_MT_CCI_BASE + SLAVE_IFACE_OFFSET(	\
+			PLAT_MT_CCI_CLUSTER1_SL_IFACE_IX))
+	ldr	w9, [x7, #SNOOP_CTRL_REG]
+	/* Store to the crash buf and print to console */
+	bl	str_in_crash_buf_print
+	.endm
diff --git a/plat/mediatek/mt6795/include/plat_private.h b/plat/mediatek/mt6795/include/plat_private.h
new file mode 100644
index 0000000..9c6c1f0
--- /dev/null
+++ b/plat/mediatek/mt6795/include/plat_private.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_PRIVATE_H__
+#define __PLAT_PRIVATE_H__
+#include <stdint.h>
+#include <xlat_tables.h>
+
+void plat_configure_mmu_el3(unsigned long total_base,
+					unsigned long total_size,
+					unsigned long,
+					unsigned long,
+					unsigned long,
+					unsigned long);
+
+void plat_cci_init(void);
+void plat_cci_enable(void);
+void plat_cci_disable(void);
+
+/* Declarations for plat_mt_gic.c */
+void plat_mt_gic_init(void);
+
+/* Declarations for plat_topology.c */
+int mt_setup_topology(void);
+void plat_delay_timer_init(void);
+
+void plat_mt_gic_driver_init(void);
+void plat_mt_gic_init(void);
+void plat_mt_gic_cpuif_enable(void);
+void plat_mt_gic_cpuif_disable(void);
+void plat_mt_gic_pcpu_init(void);
+
+#endif /* __PLAT_PRIVATE_H__ */
diff --git a/plat/mediatek/mt6795/include/plat_sip_calls.h b/plat/mediatek/mt6795/include/plat_sip_calls.h
new file mode 100644
index 0000000..b328195
--- /dev/null
+++ b/plat/mediatek/mt6795/include/plat_sip_calls.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_SIP_CALLS_H__
+#define __PLAT_SIP_CALLS_H__
+
+/*******************************************************************************
+ * Plat SiP function constants
+ ******************************************************************************/
+#define MTK_PLAT_SIP_NUM_CALLS	0
+
+#endif /* __PLAT_SIP_CALLS_H__ */
diff --git a/plat/mediatek/mt6795/include/platform_def.h b/plat/mediatek/mt6795/include/platform_def.h
new file mode 100644
index 0000000..cb06fea
--- /dev/null
+++ b/plat/mediatek/mt6795/include/platform_def.h
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#define PLAT_PRIMARY_CPU  0x0
+
+/* Special value used to verify platform parameters from BL2 to BL3-1 */
+#define MT_BL31_PLAT_PARAM_VAL  0x0f1e2d3c4b5a6978ULL
+
+#define IO_PHYS             (0x10000000)
+#define INFRACFG_AO_BASE    (IO_PHYS + 0x1000)
+#define MCUCFG_BASE         (IO_PHYS + 0x200000)
+#define PERI_BASE           (IO_PHYS + 0x1000000)
+
+
+#define GPIO_BASE           (IO_PHYS + 0x370000)
+#define SPM_BASE            (IO_PHYS + 0x6000)
+#define RGU_BASE            (MCUCFG_BASE + 0x11000)
+#define PMIC_WRAP_BASE      (IO_PHYS + 0x10000)
+
+#define TRNG_base           (MCUCFG_BASE + 0x230000)
+#define MT_GIC_BASE         (0x10220000)
+#define MCU_SYS_SIZE        (0x700000)
+#define PLAT_MT_CCI_BASE    (IO_PHYS + 0x390000)
+
+/* Aggregate of all devices in the first GB */
+#define MTK_DEV_RNG0_BASE   IO_PHYS
+#define MTK_DEV_RNG0_SIZE   0x400000
+#define MTK_DEV_RNG1_BASE   (PERI_BASE)
+#define MTK_DEV_RNG1_SIZE   0x4000000
+
+/*******************************************************************************
+ * UART related constants
+ ******************************************************************************/
+#define UART0_BASE (PERI_BASE + 0x2000)
+
+#define UART_BAUDRATE   (921600)
+#define UART_CLOCK (26000000)
+
+/*******************************************************************************
+ * System counter frequency related constants
+ ******************************************************************************/
+#define SYS_COUNTER_FREQ_IN_TICKS	13000000
+#define SYS_COUNTER_FREQ_IN_MHZ		(SYS_COUNTER_FREQ_IN_TICKS/1000000)
+
+/*******************************************************************************
+ * GIC-400 & interrupt handling related constants
+ ******************************************************************************/
+
+/* Base MTK_platform compatible GIC memory map */
+#define BASE_GICD_BASE      (MT_GIC_BASE+0x1000)
+#define BASE_GICC_BASE      (MT_GIC_BASE + 0x2000)
+#define BASE_GICR_BASE      (MT_GIC_BASE + 0x200000)
+#define BASE_GICH_BASE      (MT_GIC_BASE + 0x4000)
+#define BASE_GICV_BASE      (MT_GIC_BASE + 0x6000)
+
+#define INT_POL_CTL0        0x10200620
+#define GIC_PRIVATE_SIGNALS (32)
+
+/*******************************************************************************
+ * CCI-400 related constants
+ ******************************************************************************/
+#define PLAT_MT_CCI_CLUSTER0_SL_IFACE_IX  4
+#define PLAT_MT_CCI_CLUSTER1_SL_IFACE_IX  3
+
+/*******************************************************************************
+ * WDT Registers
+ ******************************************************************************/
+#define MTK_WDT_BASE                        (RGU_BASE)
+#define MTK_WDT_SIZE                        (0x1000)
+#define MTK_WDT_MODE                        (MTK_WDT_BASE+0x0000)
+#define MTK_WDT_LENGTH                      (MTK_WDT_BASE+0x0004)
+#define MTK_WDT_RESTART                     (MTK_WDT_BASE+0x0008)
+#define MTK_WDT_STATUS                      (MTK_WDT_BASE+0x000C)
+#define MTK_WDT_INTERVAL                    (MTK_WDT_BASE+0x0010)
+#define MTK_WDT_SWRST                       (MTK_WDT_BASE+0x0014)
+#define MTK_WDT_SWSYSRST                    (MTK_WDT_BASE+0x0018)
+#define MTK_WDT_NONRST_REG                  (MTK_WDT_BASE+0x0020)
+#define MTK_WDT_NONRST_REG2                 (MTK_WDT_BASE+0x0024)
+#define MTK_WDT_REQ_MODE                    (MTK_WDT_BASE+0x0030)
+#define MTK_WDT_REQ_IRQ_EN                  (MTK_WDT_BASE+0x0034)
+#define MTK_WDT_DEBUG_CTL                   (MTK_WDT_BASE+0x0040)
+
+/*WDT_STATUS*/
+#define MTK_WDT_STATUS_HWWDT_RST            (0x80000000)
+#define MTK_WDT_STATUS_SWWDT_RST            (0x40000000)
+#define MTK_WDT_STATUS_IRQWDT_RST           (0x20000000)
+#define MTK_WDT_STATUS_DEBUGWDT_RST         (0x00080000)
+#define MTK_WDT_STATUS_SPMWDT_RST           (0x0002)
+#define MTK_WDT_STATUS_SPM_THERMAL_RST      (0x0001)
+#define MTK_WDT_STATUS_THERMAL_DIRECT_RST   (1<<18)
+#define MTK_WDT_STATUS_SECURITY_RST         (1<<28)
+
+#define MTK_WDT_MODE_DUAL_MODE              0x0040
+#define MTK_WDT_MODE_IRQ                    0x0008
+#define MTK_WDT_MODE_KEY                    0x22000000
+#define MTK_WDT_MODE_EXTEN                  0x0004
+#define MTK_WDT_SWRST_KEY                   0x1209
+#define MTK_WDT_RESTART_KEY                 (0x1971)
+
+/* FIQ platform related define */
+#define MT_IRQ_SEC_SGI_0  8
+#define MT_IRQ_SEC_SGI_1  9
+#define MT_IRQ_SEC_SGI_2  10
+#define MT_IRQ_SEC_SGI_3  11
+#define MT_IRQ_SEC_SGI_4  12
+#define MT_IRQ_SEC_SGI_5  13
+#define MT_IRQ_SEC_SGI_6  14
+#define MT_IRQ_SEC_SGI_7  15
+
+#define FIQ_SMP_CALL_SGI  MT_IRQ_SEC_SGI_5
+
+#define PLAT_ARM_G0_IRQS	FIQ_SMP_CALL_SGI
+
+#define DEBUG_XLAT_TABLE 0
+
+/*******************************************************************************
+ * Platform binary types for linking
+ ******************************************************************************/
+#define PLATFORM_LINKER_FORMAT    "elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH      aarch64
+
+/*******************************************************************************
+ * Generic platform constants
+ ******************************************************************************/
+
+/* Size of cacheable stacks */
+#if DEBUG_XLAT_TABLE
+#define PLATFORM_STACK_SIZE 0x800
+#elif defined(IMAGE_BL1)
+#define PLATFORM_STACK_SIZE 0x440
+#elif defined(IMAGE_BL2)
+#define PLATFORM_STACK_SIZE 0x400
+#elif defined(IMAGE_BL31)
+#define PLATFORM_STACK_SIZE 0x800
+#elif defined(IMAGE_BL32)
+#define PLATFORM_STACK_SIZE 0x440
+#endif
+
+#define FIRMWARE_WELCOME_STR    "Booting Trusted Firmware\n"
+#if ENABLE_PLAT_COMPAT
+#define PLATFORM_MAX_AFFLVL     MPIDR_AFFLVL2
+#else
+#define PLAT_MAX_PWR_LVL        2 /* MPIDR_AFFLVL2 */
+#endif
+
+#define PLATFORM_CACHE_LINE_SIZE      64
+#define PLATFORM_SYSTEM_COUNT         1
+#define PLATFORM_CLUSTER_COUNT        2
+#define PLATFORM_CLUSTER0_CORE_COUNT  4
+#define PLATFORM_CLUSTER1_CORE_COUNT  4
+#define PLATFORM_CORE_COUNT   (PLATFORM_CLUSTER1_CORE_COUNT + \
+					PLATFORM_CLUSTER0_CORE_COUNT)
+#define PLATFORM_MAX_CPUS_PER_CLUSTER 4
+#define PLATFORM_NUM_AFFS   (PLATFORM_SYSTEM_COUNT +  \
+					PLATFORM_CLUSTER_COUNT + \
+					PLATFORM_CORE_COUNT)
+
+/*******************************************************************************
+ * Platform memory map related constants
+ ******************************************************************************/
+/* ATF Argument */
+#define ATF_ARG_SIZE      (0x800)
+
+/* TF txet, ro, rw, internal SRAM, Size: release: 80KB, debug: 92KB */
+#define TZRAM_BASE        (0x110000)
+#if DEBUG
+#define TZRAM_SIZE        (0x1C400)
+#else
+#define TZRAM_SIZE        (0x1C400)
+#endif
+#define TZRAM2_BASE	   0x00100000
+#define TZRAM2_SIZE	   0xDC00
+#define TZRAM2_LIMIT		(TZRAM2_BASE + TZRAM2_SIZE)
+
+#define RAM_CONSOLE_BASE  0x0012D000
+#define RAM_CONSOLE_SIZE  0x00001000
+/*******************************************************************************
+ * BL31 specific defines.
+ ******************************************************************************/
+/*
+ * Put BL3-1 at the top of the Trusted SRAM (just below the shared memory, if
+ * present). BL31_BASE is calculated using the current BL3-1 debug size plus a
+ * little space for growth.
+ */
+#define BL31_BASE           (TZRAM_BASE + 0x1000)
+#define BL31_LIMIT          (TZRAM_BASE + TZRAM_SIZE)
+#define BSS1_STACK_LIMIT    (TZRAM_BASE + TZRAM_SIZE)
+#define BL31_TZRAM_SIZE     (TZRAM_SIZE - ATF_ARG_SIZE)
+
+/*******************************************************************************
+ * Platform specific page table and MMU setup constants
+ ******************************************************************************/
+#define ADDR_SPACE_SIZE   (1ull << 32)
+#define MAX_XLAT_TABLES   7
+#define MAX_MMAP_REGIONS  16
+
+
+/*******************************************************************************
+ * CCI-400 related constants
+ ******************************************************************************/
+#define CCI400_BASE                     0x10390000
+#define CCI400_SL_IFACE_CLUSTER0        4
+#define CCI400_SL_IFACE_CLUSTER1        3
+#define CCI400_SL_IFACE_INDEX(mpidr)  (mpidr & MPIDR_CLUSTER_MASK ? \
+					CCI400_SL_IFACE_CLUSTER1 :   \
+					CCI400_SL_IFACE_CLUSTER0)
+#define CCI_SEC_ACCESS_OFFSET           (0x8)
+
+
+/*******************************************************************************
+ * Declarations and constants to access the mailboxes safely. Each mailbox is
+ * aligned on the biggest cache line size in the platform. This is known only
+ * to the platform as it might have a combination of integrated and external
+ * caches. Such alignment ensures that two maiboxes do not sit on the same cache
+ * line at any cache level. They could belong to different cpus/clusters &
+ * get written while being protected by different locks causing corruption of
+ * a valid mailbox address.
+ ******************************************************************************/
+#define CACHE_WRITEBACK_SHIFT     6
+#define CACHE_WRITEBACK_GRANULE   (1 << CACHE_WRITEBACK_SHIFT)
+
+#define BL32_BASE                 (0x0)
+
+/*
+ * Load address of BL3-3 for this platform port
+ */
+#define LK_SIZE_LIMIT				(0x100000)
+#define PLAT_MTK_NS_IMAGE_OFFSET	(0x41E00000)
+/* 16KB */
+#define ATF_AEE_BUFFER_SIZE         (0x4000)
+#define PAGE_SIZE_2MB_MASK          (PAGE_SIZE_2MB - 1)
+#define IS_PAGE_2MB_ALIGNED(addr)   (((addr) & PAGE_SIZE_2MB_MASK) == 0)
+#define PAGE_SIZE_2MB               (1 << PAGE_SIZE_2MB_SHIFT)
+#define PAGE_SIZE_2MB_SHIFT         TWO_MB_SHIFT
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/mediatek/mt6795/include/power_tracer.h b/plat/mediatek/mt6795/include/power_tracer.h
new file mode 100644
index 0000000..5eb77e4
--- /dev/null
+++ b/plat/mediatek/mt6795/include/power_tracer.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __POWER_TRACER_H__
+#define __POWER_TRACER_H__
+
+#define CPU_UP		0
+#define CPU_DOWN	1
+#define CPU_SUSPEND	2
+#define CLUSTER_UP	3
+#define CLUSTER_DOWN	4
+#define CLUSTER_SUSPEND	5
+
+void trace_power_flow(unsigned long mpidr, unsigned char mode);
+
+#endif
diff --git a/plat/mediatek/mt6795/include/scu.h b/plat/mediatek/mt6795/include/scu.h
new file mode 100644
index 0000000..3c78692
--- /dev/null
+++ b/plat/mediatek/mt6795/include/scu.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SCU_H__
+#define __SCU_H__
+
+void disable_scu(unsigned long mpidr);
+void enable_scu(unsigned long mpidr);
+
+#endif
diff --git a/plat/mediatek/mt6795/include/spm.h b/plat/mediatek/mt6795/include/spm.h
new file mode 100644
index 0000000..5e31276
--- /dev/null
+++ b/plat/mediatek/mt6795/include/spm.h
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SPM_H__
+#define __SPM_H__
+
+#define SPM_POWERON_CONFIG_SET			(SPM_BASE + 0x000)
+#define SPM_POWER_ON_VAL0			(SPM_BASE + 0x010)
+#define SPM_POWER_ON_VAL1			(SPM_BASE + 0x014)
+#define SPM_CLK_SETTLE				(SPM_BASE + 0x100)
+#define SPM_CA7_CPU1_PWR_CON			(SPM_BASE + 0x218)
+#define SPM_CA7_CPU2_PWR_CON			(SPM_BASE + 0x21c)
+#define SPM_CA7_CPU3_PWR_CON			(SPM_BASE + 0x220)
+#define SPM_CA7_CPU1_L1_PDN			(SPM_BASE + 0x264)
+#define SPM_CA7_CPU2_L1_PDN			(SPM_BASE + 0x26c)
+#define SPM_CA7_CPU3_L1_PDN			(SPM_BASE + 0x274)
+#define SPM_MD32_SRAM_CON			(SPM_BASE + 0x2c8)
+#define SPM_PCM_CON0				(SPM_BASE + 0x310)
+#define SPM_PCM_CON1				(SPM_BASE + 0x314)
+#define SPM_PCM_IM_PTR				(SPM_BASE + 0x318)
+#define SPM_PCM_IM_LEN				(SPM_BASE + 0x31c)
+#define SPM_PCM_REG_DATA_INI			(SPM_BASE + 0x320)
+#define SPM_PCM_EVENT_VECTOR0			(SPM_BASE + 0x340)
+#define SPM_PCM_EVENT_VECTOR1			(SPM_BASE + 0x344)
+#define SPM_PCM_EVENT_VECTOR2			(SPM_BASE + 0x348)
+#define SPM_PCM_EVENT_VECTOR3			(SPM_BASE + 0x34c)
+#define SPM_PCM_MAS_PAUSE_MASK			(SPM_BASE + 0x354)
+#define SPM_PCM_PWR_IO_EN			(SPM_BASE + 0x358)
+#define SPM_PCM_TIMER_VAL			(SPM_BASE + 0x35c)
+#define SPM_PCM_TIMER_OUT			(SPM_BASE + 0x360)
+#define SPM_PCM_REG0_DATA			(SPM_BASE + 0x380)
+#define SPM_PCM_REG1_DATA			(SPM_BASE + 0x384)
+#define SPM_PCM_REG2_DATA			(SPM_BASE + 0x388)
+#define SPM_PCM_REG3_DATA			(SPM_BASE + 0x38c)
+#define SPM_PCM_REG4_DATA			(SPM_BASE + 0x390)
+#define SPM_PCM_REG5_DATA			(SPM_BASE + 0x394)
+#define SPM_PCM_REG6_DATA			(SPM_BASE + 0x398)
+#define SPM_PCM_REG7_DATA			(SPM_BASE + 0x39c)
+#define SPM_PCM_REG8_DATA			(SPM_BASE + 0x3a0)
+#define SPM_PCM_REG9_DATA			(SPM_BASE + 0x3a4)
+#define SPM_PCM_REG10_DATA			(SPM_BASE + 0x3a8)
+#define SPM_PCM_REG11_DATA			(SPM_BASE + 0x3ac)
+#define SPM_PCM_REG12_DATA			(SPM_BASE + 0x3b0)
+#define SPM_PCM_REG13_DATA			(SPM_BASE + 0x3b4)
+#define SPM_PCM_REG14_DATA			(SPM_BASE + 0x3b8)
+#define SPM_PCM_REG15_DATA			(SPM_BASE + 0x3bc)
+#define SPM_PCM_EVENT_REG_STA			(SPM_BASE + 0x3c0)
+#define SPM_PCM_FSM_STA				(SPM_BASE + 0x3c4)
+#define SPM_PCM_IM_HOST_RW_PTR			(SPM_BASE + 0x3c8)
+#define SPM_PCM_IM_HOST_RW_DAT			(SPM_BASE + 0x3cc)
+#define SPM_PCM_EVENT_VECTOR4			(SPM_BASE + 0x3d0)
+#define SPM_PCM_EVENT_VECTOR5			(SPM_BASE + 0x3d4)
+#define SPM_PCM_EVENT_VECTOR6			(SPM_BASE + 0x3d8)
+#define SPM_PCM_EVENT_VECTOR7			(SPM_BASE + 0x3dc)
+#define SPM_PCM_SW_INT_SET			(SPM_BASE + 0x3e0)
+#define SPM_PCM_SW_INT_CLEAR			(SPM_BASE + 0x3e4)
+#define SPM_CLK_CON				(SPM_BASE + 0x400)
+#define SPM_SLEEP_PTPOD2_CON			(SPM_BASE + 0x408)
+#define SPM_APMCU_PWRCTL			(SPM_BASE + 0x600)
+#define SPM_AP_DVFS_CON_SET			(SPM_BASE + 0x604)
+#define SPM_AP_STANBY_CON			(SPM_BASE + 0x608)
+#define SPM_PWR_STATUS				(SPM_BASE + 0x60c)
+#define SPM_PWR_STATUS_2ND			(SPM_BASE + 0x610)
+#define SPM_AP_BSI_REQ				(SPM_BASE + 0x614)
+#define SPM_SLEEP_TIMER_STA			(SPM_BASE + 0x720)
+#define SPM_SLEEP_WAKEUP_EVENT_MASK		(SPM_BASE + 0x810)
+#define SPM_SLEEP_CPU_WAKEUP_EVENT		(SPM_BASE + 0x814)
+#define SPM_SLEEP_MD32_WAKEUP_EVENT_MASK	(SPM_BASE + 0x818)
+#define SPM_PCM_WDT_TIMER_VAL			(SPM_BASE + 0x824)
+#define SPM_PCM_WDT_TIMER_OUT			(SPM_BASE + 0x828)
+#define SPM_PCM_MD32_MAILBOX			(SPM_BASE + 0x830)
+#define SPM_PCM_MD32_IRQ			(SPM_BASE + 0x834)
+#define SPM_SLEEP_ISR_MASK			(SPM_BASE + 0x900)
+#define SPM_SLEEP_ISR_STATUS			(SPM_BASE + 0x904)
+#define SPM_SLEEP_ISR_RAW_STA			(SPM_BASE + 0x910)
+#define SPM_SLEEP_MD32_ISR_RAW_STA		(SPM_BASE + 0x914)
+#define SPM_SLEEP_WAKEUP_MISC			(SPM_BASE + 0x918)
+#define SPM_SLEEP_BUS_PROTECT_RDY		(SPM_BASE + 0x91c)
+#define SPM_SLEEP_SUBSYS_IDLE_STA		(SPM_BASE + 0x920)
+#define SPM_PCM_RESERVE				(SPM_BASE + 0xb00)
+#define SPM_PCM_RESERVE2			(SPM_BASE + 0xb04)
+#define SPM_PCM_FLAGS				(SPM_BASE + 0xb08)
+#define SPM_PCM_SRC_REQ				(SPM_BASE + 0xb0c)
+#define SPM_PCM_DEBUG_CON			(SPM_BASE + 0xb20)
+#define SPM_CA7_CPU0_IRQ_MASK			(SPM_BASE + 0xb30)
+#define SPM_CA7_CPU1_IRQ_MASK			(SPM_BASE + 0xb34)
+#define SPM_CA7_CPU2_IRQ_MASK			(SPM_BASE + 0xb38)
+#define SPM_CA7_CPU3_IRQ_MASK			(SPM_BASE + 0xb3c)
+#define SPM_CA15_CPU0_IRQ_MASK			(SPM_BASE + 0xb40)
+#define SPM_CA15_CPU1_IRQ_MASK			(SPM_BASE + 0xb44)
+#define SPM_CA15_CPU2_IRQ_MASK			(SPM_BASE + 0xb48)
+#define SPM_CA15_CPU3_IRQ_MASK			(SPM_BASE + 0xb4c)
+#define SPM_PCM_PASR_DPD_0			(SPM_BASE + 0xb60)
+#define SPM_PCM_PASR_DPD_1			(SPM_BASE + 0xb64)
+#define SPM_PCM_PASR_DPD_2			(SPM_BASE + 0xb68)
+#define SPM_PCM_PASR_DPD_3			(SPM_BASE + 0xb6c)
+#define SPM_SLEEP_CA7_WFI0_EN			(SPM_BASE + 0xf00)
+#define SPM_SLEEP_CA7_WFI1_EN			(SPM_BASE + 0xf04)
+#define SPM_SLEEP_CA7_WFI2_EN			(SPM_BASE + 0xf08)
+#define SPM_SLEEP_CA7_WFI3_EN			(SPM_BASE + 0xf0c)
+#define SPM_SLEEP_CA15_WFI0_EN			(SPM_BASE + 0xf10)
+#define SPM_SLEEP_CA15_WFI1_EN			(SPM_BASE + 0xf14)
+#define SPM_SLEEP_CA15_WFI2_EN			(SPM_BASE + 0xf18)
+#define SPM_SLEEP_CA15_WFI3_EN			(SPM_BASE + 0xf1c)
+
+#define SPM_PROJECT_CODE	0xb16
+
+#define SPM_REGWR_EN		(1U << 0)
+#define SPM_REGWR_CFG_KEY	(SPM_PROJECT_CODE << 16)
+
+#define SPM_CPU_PDN_DIS		(1U << 0)
+#define SPM_INFRA_PDN_DIS	(1U << 1)
+#define SPM_DDRPHY_PDN_DIS	(1U << 2)
+#define SPM_DUALVCORE_PDN_DIS	(1U << 3)
+#define SPM_PASR_DIS		(1U << 4)
+#define SPM_DPD_DIS		(1U << 5)
+#define SPM_SODI_DIS		(1U << 6)
+#define SPM_MEMPLL_RESET	(1U << 7)
+#define SPM_MAINPLL_PDN_DIS	(1U << 8)
+#define SPM_CPU_DVS_DIS		(1U << 9)
+#define SPM_CPU_DORMANT		(1U << 10)
+#define SPM_EXT_VSEL_GPIO103	(1U << 11)
+#define SPM_DDR_HIGH_SPEED	(1U << 12)
+#define SPM_OPT			(1U << 13)
+
+#define POWER_ON_VAL1_DEF	0x01011820
+#define PCM_FSM_STA_DEF		0x48490
+#define PCM_END_FSM_STA_DEF	0x08490
+#define PCM_END_FSM_STA_MASK	0x3fff0
+#define PCM_HANDSHAKE_SEND1	0xbeefbeef
+
+#define PCM_WDT_TIMEOUT		(30 * 32768)
+#define PCM_TIMER_MAX		(0xffffffff - PCM_WDT_TIMEOUT)
+
+#define CON0_PCM_KICK		(1U << 0)
+#define CON0_IM_KICK		(1U << 1)
+#define CON0_IM_SLEEP_DVS	(1U << 3)
+#define CON0_PCM_SW_RESET	(1U << 15)
+#define CON0_CFG_KEY		(SPM_PROJECT_CODE << 16)
+
+#define CON1_IM_SLAVE		(1U << 0)
+#define CON1_MIF_APBEN		(1U << 3)
+#define CON1_PCM_TIMER_EN	(1U << 5)
+#define CON1_IM_NONRP_EN	(1U << 6)
+#define CON1_PCM_WDT_EN		(1U << 8)
+#define CON1_PCM_WDT_WAKE_MODE	(1U << 9)
+#define CON1_SPM_SRAM_SLP_B	(1U << 10)
+#define CON1_SPM_SRAM_ISO_B	(1U << 11)
+#define CON1_EVENT_LOCK_EN	(1U << 12)
+#define CON1_CFG_KEY		(SPM_PROJECT_CODE << 16)
+
+#define PCM_PWRIO_EN_R0		(1U << 0)
+#define PCM_PWRIO_EN_R7		(1U << 7)
+#define PCM_RF_SYNC_R0		(1U << 16)
+#define PCM_RF_SYNC_R2		(1U << 18)
+#define PCM_RF_SYNC_R6		(1U << 22)
+#define PCM_RF_SYNC_R7		(1U << 23)
+
+#define CC_SYSCLK0_EN_0		(1U << 0)
+#define CC_SYSCLK0_EN_1		(1U << 1)
+#define CC_SYSCLK1_EN_0		(1U << 2)
+#define CC_SYSCLK1_EN_1		(1U << 3)
+#define CC_SYSSETTLE_SEL	(1U << 4)
+#define CC_LOCK_INFRA_DCM	(1U << 5)
+#define CC_SRCLKENA_MASK_0	(1U << 6)
+#define CC_CXO32K_RM_EN_MD1	(1U << 9)
+#define CC_CXO32K_RM_EN_MD2	(1U << 10)
+#define CC_CLKSQ1_SEL		(1U << 12)
+#define CC_DISABLE_DORM_PWR	(1U << 14)
+#define CC_MD32_DCM_EN		(1U << 18)
+
+#define WFI_OP_AND		1
+#define WFI_OP_OR		0
+
+#define WAKE_MISC_PCM_TIMER	(1U << 19)
+#define WAKE_MISC_CPU_WAKE	(1U << 20)
+
+/* define WAKE_SRC_XXX */
+#define WAKE_SRC_SPM_MERGE	(1 << 0)
+#define WAKE_SRC_KP		(1 << 2)
+#define WAKE_SRC_WDT		(1 << 3)
+#define WAKE_SRC_GPT		(1 << 4)
+#define WAKE_SRC_EINT		(1 << 6)
+#define WAKE_SRC_LOW_BAT	(1 << 9)
+#define WAKE_SRC_MD32		(1 << 10)
+#define WAKE_SRC_USB_CD		(1 << 14)
+#define WAKE_SRC_USB_PDN	(1 << 15)
+#define WAKE_SRC_AFE		(1 << 20)
+#define WAKE_SRC_THERM		(1 << 21)
+#define WAKE_SRC_SYSPWREQ	(1 << 24)
+#define WAKE_SRC_SEJ		(1 << 27)
+#define WAKE_SRC_ALL_MD32	(1 << 28)
+#define WAKE_SRC_CPU_IRQ	(1 << 29)
+
+#endif /* __SPM_H__ */
diff --git a/plat/mediatek/mt6795/plat_delay_timer.c b/plat/mediatek/mt6795/plat_delay_timer.c
new file mode 100644
index 0000000..9df2867
--- /dev/null
+++ b/plat/mediatek/mt6795/plat_delay_timer.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <delay_timer.h>
+#include <platform_def.h>
+
+static uint32_t plat_get_timer_value(void)
+{
+	/*
+	 * Generic delay timer implementation expects the timer to be a down
+	 * counter. We apply bitwise NOT operator to the tick values returned
+	 * by read_cntpct_el0() to simulate the down counter.
+	 */
+	return (uint32_t)(~read_cntpct_el0());
+}
+
+static const timer_ops_t plat_timer_ops = {
+	.get_timer_value	= plat_get_timer_value,
+	.clk_mult		= 1,
+	.clk_div		= SYS_COUNTER_FREQ_IN_MHZ,
+};
+
+void plat_delay_timer_init(void)
+{
+	timer_init(&plat_timer_ops);
+}
diff --git a/plat/mediatek/mt6795/plat_mt_gic.c b/plat/mediatek/mt6795/plat_mt_gic.c
new file mode 100644
index 0000000..47a23df
--- /dev/null
+++ b/plat/mediatek/mt6795/plat_mt_gic.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <gicv2.h>
+#include <plat_arm.h>
+#include <platform.h>
+#include <platform_def.h>
+
+const unsigned int g0_interrupt_array[] = {
+	PLAT_ARM_G0_IRQS
+};
+
+gicv2_driver_data_t arm_gic_data = {
+	.gicd_base = BASE_GICD_BASE,
+	.gicc_base = BASE_GICC_BASE,
+	.g0_interrupt_num = ARRAY_SIZE(g0_interrupt_array),
+	.g0_interrupt_array = g0_interrupt_array,
+};
+
+void plat_mt_gic_driver_init(void)
+{
+	gicv2_driver_init(&arm_gic_data);
+}
+
+void plat_mt_gic_init(void)
+{
+	gicv2_distif_init();
+	gicv2_pcpu_distif_init();
+	gicv2_cpuif_enable();
+}
+
+void plat_mt_gic_cpuif_enable(void)
+{
+	gicv2_cpuif_enable();
+}
+
+void plat_mt_gic_cpuif_disable(void)
+{
+	gicv2_cpuif_disable();
+}
+
+void plat_mt_gic_pcpu_init(void)
+{
+	gicv2_pcpu_distif_init();
+}
diff --git a/plat/mediatek/mt6795/plat_pm.c b/plat/mediatek/mt6795/plat_pm.c
new file mode 100644
index 0000000..bd47bd8
--- /dev/null
+++ b/plat/mediatek/mt6795/plat_pm.c
@@ -0,0 +1,471 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <bakery_lock.h>
+#include <cci.h>
+#include <console.h>
+#include <debug.h>
+#include <errno.h>
+#include <mcucfg.h>
+#include <mmio.h>
+#include <plat_private.h>
+#include <platform_def.h>
+#include <power_tracer.h>
+#include <psci.h>
+#include <scu.h>
+
+struct core_context {
+	unsigned long timer_data[8];
+	unsigned int count;
+	unsigned int rst;
+	unsigned int abt;
+	unsigned int brk;
+};
+
+struct cluster_context {
+	struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER];
+};
+
+/*
+ * Top level structure to hold the complete context of a multi cluster system
+ */
+struct system_context {
+	struct cluster_context cluster[PLATFORM_CLUSTER_COUNT];
+};
+
+/*
+ * Top level structure which encapsulates the context of the entire system
+ */
+static struct system_context dormant_data[1];
+
+static inline struct cluster_context *system_cluster(
+						struct system_context *system,
+						uint32_t clusterid)
+{
+	return &system->cluster[clusterid];
+}
+
+static inline struct core_context *cluster_core(struct cluster_context *cluster,
+						uint32_t cpuid)
+{
+	return &cluster->core[cpuid];
+}
+
+static struct cluster_context *get_cluster_data(unsigned long mpidr)
+{
+	uint32_t clusterid;
+
+	clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
+
+	return system_cluster(dormant_data, clusterid);
+}
+
+static struct core_context *get_core_data(unsigned long mpidr)
+{
+	struct cluster_context *cluster;
+	uint32_t cpuid;
+
+	cluster = get_cluster_data(mpidr);
+	cpuid = mpidr & MPIDR_CPU_MASK;
+
+	return cluster_core(cluster, cpuid);
+}
+
+static void mt_save_generic_timer(unsigned long *container)
+{
+	uint64_t ctl;
+	uint64_t val;
+
+	__asm__ volatile("mrs	%x0, cntkctl_el1\n\t"
+			 "mrs	%x1, cntp_cval_el0\n\t"
+			 "stp	%x0, %x1, [%2, #0]"
+			 : "=&r" (ctl), "=&r" (val)
+			 : "r" (container)
+			 : "memory");
+
+	__asm__ volatile("mrs	%x0, cntp_tval_el0\n\t"
+			 "mrs	%x1, cntp_ctl_el0\n\t"
+			 "stp	%x0, %x1, [%2, #16]"
+			 : "=&r" (val), "=&r" (ctl)
+			 : "r" (container)
+			 : "memory");
+
+	__asm__ volatile("mrs	%x0, cntv_tval_el0\n\t"
+			 "mrs	%x1, cntv_ctl_el0\n\t"
+			 "stp	%x0, %x1, [%2, #32]"
+			 : "=&r" (val), "=&r" (ctl)
+			 : "r" (container)
+			 : "memory");
+}
+
+static void mt_restore_generic_timer(unsigned long *container)
+{
+	uint64_t ctl;
+	uint64_t val;
+
+	__asm__ volatile("ldp	%x0, %x1, [%2, #0]\n\t"
+			 "msr	cntkctl_el1, %x0\n\t"
+			 "msr	cntp_cval_el0, %x1"
+			 : "=&r" (ctl), "=&r" (val)
+			 : "r" (container)
+			 : "memory");
+
+	__asm__ volatile("ldp	%x0, %x1, [%2, #16]\n\t"
+			 "msr	cntp_tval_el0, %x0\n\t"
+			 "msr	cntp_ctl_el0, %x1"
+			 : "=&r" (val), "=&r" (ctl)
+			 : "r" (container)
+			 : "memory");
+
+	__asm__ volatile("ldp	%x0, %x1, [%2, #32]\n\t"
+			 "msr	cntv_tval_el0, %x0\n\t"
+			 "msr	cntv_ctl_el0, %x1"
+			 : "=&r" (val), "=&r" (ctl)
+			 : "r" (container)
+			 : "memory");
+}
+
+static void stop_generic_timer(void)
+{
+	/*
+	 * Disable the timer and mask the irq to prevent
+	 * suprious interrupts on this cpu interface. It
+	 * will bite us when we come back if we don't. It
+	 * will be replayed on the inbound cluster.
+	 */
+	uint64_t cntpctl = read_cntp_ctl_el0();
+
+	write_cntp_ctl_el0(clr_cntp_ctl_enable(cntpctl));
+}
+
+static void mt_cpu_save(unsigned long mpidr)
+{
+	struct core_context *core;
+
+	core = get_core_data(mpidr);
+	mt_save_generic_timer(core->timer_data);
+
+	/* disable timer irq, and upper layer should enable it again. */
+	stop_generic_timer();
+}
+
+static void mt_cpu_restore(unsigned long mpidr)
+{
+	struct core_context *core;
+
+	core = get_core_data(mpidr);
+	mt_restore_generic_timer(core->timer_data);
+}
+
+static void mt_platform_save_context(unsigned long mpidr)
+{
+	/* mcusys_save_context: */
+	mt_cpu_save(mpidr);
+}
+
+static void mt_platform_restore_context(unsigned long mpidr)
+{
+	/* mcusys_restore_context: */
+	mt_cpu_restore(mpidr);
+}
+
+/*******************************************************************************
+* Private function which is used to determine if any platform actions
+* should be performed for the specified affinity instance given its
+* state. Nothing needs to be done if the 'state' is not off or if this is not
+* the highest affinity level which will enter the 'state'.
+*******************************************************************************/
+static int32_t plat_do_plat_actions(unsigned int afflvl, unsigned int state)
+{
+	unsigned int max_phys_off_afflvl;
+
+	assert(afflvl <= MPIDR_AFFLVL2);
+
+	if (state != PSCI_STATE_OFF)
+		return -EAGAIN;
+
+	/*
+	 * Find the highest affinity level which will be suspended and postpone
+	 * all the platform specific actions until that level is hit.
+	 */
+	max_phys_off_afflvl = psci_get_max_phys_off_afflvl();
+	assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
+	if (afflvl != max_phys_off_afflvl)
+		return -EAGAIN;
+
+	return 0;
+}
+
+/*******************************************************************************
+ * MTK_platform handler called when an affinity instance is about to enter
+ * standby.
+ ******************************************************************************/
+static void plat_affinst_standby(unsigned int power_state)
+{
+	unsigned int target_afflvl;
+
+	/* Sanity check the requested state */
+	target_afflvl = psci_get_pstate_afflvl(power_state);
+
+	/*
+	 * It's possible to enter standby only on affinity level 0 i.e. a cpu
+	 * on the MTK_platform. Ignore any other affinity level.
+	 */
+	if (target_afflvl == MPIDR_AFFLVL0) {
+		/*
+		 * Enter standby state. dsb is good practice before using wfi
+		 * to enter low power states.
+		 */
+		dsb();
+		wfi();
+	}
+}
+
+/*******************************************************************************
+ * MTK_platform handler called when an affinity instance is about to be turned
+ * on. The level and mpidr determine the affinity instance.
+ ******************************************************************************/
+static int plat_affinst_on(unsigned long mpidr,
+		    unsigned long sec_entrypoint,
+		    unsigned int afflvl,
+		    unsigned int state)
+{
+	int rc = PSCI_E_SUCCESS;
+	unsigned long cpu_id;
+	unsigned long cluster_id;
+	uintptr_t rv;
+
+	/*
+	 * It's possible to turn on only affinity level 0 i.e. a cpu
+	 * on the MTK_platform. Ignore any other affinity level.
+	 */
+	if (afflvl != MPIDR_AFFLVL0)
+		return rc;
+
+	cpu_id = mpidr & MPIDR_CPU_MASK;
+	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
+
+	if (cluster_id)
+		rv = (uintptr_t)&mt6795_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
+	else
+		rv = (uintptr_t)&mt6795_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
+
+	mmio_write_32(rv, sec_entrypoint);
+	INFO("mt_on[%ld:%ld], entry %x\n",
+		cluster_id, cpu_id, mmio_read_32(rv));
+
+	return rc;
+}
+
+/*******************************************************************************
+ * MTK_platform handler called when an affinity instance is about to be turned
+ * off. The level and mpidr determine the affinity instance. The 'state' arg.
+ * allows the platform to decide whether the cluster is being turned off and
+ * take apt actions.
+ *
+ * CAUTION: This function is called with coherent stacks so that caches can be
+ * turned off, flushed and coherency disabled. There is no guarantee that caches
+ * will remain turned on across calls to this function as each affinity level is
+ * dealt with. So do not write & read global variables across calls. It will be
+ * wise to do flush a write to the global to prevent unpredictable results.
+ ******************************************************************************/
+static void plat_affinst_off(unsigned int afflvl, unsigned int state)
+{
+	unsigned long mpidr = read_mpidr_el1();
+
+	/* Determine if any platform actions need to be executed. */
+	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
+		return;
+
+	/* Prevent interrupts from spuriously waking up this cpu */
+	plat_mt_gic_cpuif_disable();
+
+	trace_power_flow(mpidr, CPU_DOWN);
+
+	if (afflvl != MPIDR_AFFLVL0) {
+		/* Disable coherency if this cluster is to be turned off */
+		plat_cci_disable();
+
+		trace_power_flow(mpidr, CLUSTER_DOWN);
+	}
+}
+
+/*******************************************************************************
+ * MTK_platform handler called when an affinity instance is about to be
+ * suspended. The level and mpidr determine the affinity instance. The 'state'
+ * arg. allows the platform to decide whether the cluster is being turned off
+ * and take apt actions.
+ *
+ * CAUTION: This function is called with coherent stacks so that caches can be
+ * turned off, flushed and coherency disabled. There is no guarantee that caches
+ * will remain turned on across calls to this function as each affinity level is
+ * dealt with. So do not write & read global variables across calls. It will be
+ * wise to do flush a write to the global to prevent unpredictable results.
+ ******************************************************************************/
+static void plat_affinst_suspend(unsigned long sec_entrypoint,
+			  unsigned int afflvl,
+			  unsigned int state)
+{
+	unsigned long mpidr = read_mpidr_el1();
+	unsigned long cluster_id;
+	unsigned long cpu_id;
+	uintptr_t rv;
+
+	/* Determine if any platform actions need to be executed. */
+	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
+		return;
+
+	cpu_id = mpidr & MPIDR_CPU_MASK;
+	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
+
+	if (cluster_id)
+		rv = (uintptr_t)&mt6795_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
+	else
+		rv = (uintptr_t)&mt6795_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
+
+	mmio_write_32(rv, sec_entrypoint);
+
+	if (afflvl >= MPIDR_AFFLVL0)
+		mt_platform_save_context(mpidr);
+
+	/* Perform the common cluster specific operations */
+	if (afflvl >= MPIDR_AFFLVL1) {
+		/* Disable coherency if this cluster is to be turned off */
+		plat_cci_disable();
+		disable_scu(mpidr);
+
+		trace_power_flow(mpidr, CLUSTER_SUSPEND);
+	}
+
+	if (afflvl >= MPIDR_AFFLVL2) {
+		/* Prevent interrupts from spuriously waking up this cpu */
+		plat_mt_gic_cpuif_disable();
+	}
+}
+
+/*******************************************************************************
+ * MTK_platform handler called when an affinity instance has just been powered
+ * on after being turned off earlier. The level and mpidr determine the affinity
+ * instance. The 'state' arg. allows the platform to decide whether the cluster
+ * was turned off prior to wakeup and do what's necessary to setup it up
+ * correctly.
+ ******************************************************************************/
+static void plat_affinst_on_finish(unsigned int afflvl, unsigned int state)
+{
+	unsigned long mpidr = read_mpidr_el1();
+
+	/* Determine if any platform actions need to be executed. */
+	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
+		return;
+
+	/* Perform the common cluster specific operations */
+	if (afflvl >= MPIDR_AFFLVL1) {
+		enable_scu(mpidr);
+
+		/* Enable coherency if this cluster was off */
+		plat_cci_enable();
+		trace_power_flow(mpidr, CLUSTER_UP);
+	}
+
+	/* Enable the gic cpu interface */
+	plat_mt_gic_cpuif_enable();
+	plat_mt_gic_pcpu_init();
+	trace_power_flow(mpidr, CPU_UP);
+}
+
+/*******************************************************************************
+ * MTK_platform handler called when an affinity instance has just been powered
+ * on after having been suspended earlier. The level and mpidr determine the
+ * affinity instance.
+ ******************************************************************************/
+static void plat_affinst_suspend_finish(unsigned int afflvl, unsigned int state)
+{
+	unsigned long mpidr = read_mpidr_el1();
+
+	/* Determine if any platform actions need to be executed. */
+	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
+		return;
+
+	if (afflvl >= MPIDR_AFFLVL2) {
+		/* Enable the gic cpu interface */
+		plat_mt_gic_init();
+		plat_mt_gic_cpuif_enable();
+	}
+
+	/* Perform the common cluster specific operations */
+	if (afflvl >= MPIDR_AFFLVL1) {
+		enable_scu(mpidr);
+
+		/* Enable coherency if this cluster was off */
+		plat_cci_enable();
+		trace_power_flow(mpidr, CLUSTER_UP);
+	}
+
+	if (afflvl >= MPIDR_AFFLVL0)
+		mt_platform_restore_context(mpidr);
+
+	plat_mt_gic_pcpu_init();
+}
+
+static unsigned int plat_get_sys_suspend_power_state(void)
+{
+	/* StateID: 0, StateType: 1(power down), PowerLevel: 2(system) */
+	return psci_make_powerstate(0, 1, 2);
+}
+
+/*******************************************************************************
+ * MTK handlers to shutdown/reboot the system
+ ******************************************************************************/
+static void __dead2 plat_system_off(void)
+{
+	INFO("MTK System Off\n");
+	wfi();
+	ERROR("MTK System Off: operation not handled.\n");
+	panic();
+}
+
+static void __dead2 plat_system_reset(void)
+{
+	/* Write the System Configuration Control Register */
+	INFO("MTK System Reset\n");
+
+	mmio_clrbits_32(MTK_WDT_BASE,
+		(MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ));
+	mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN));
+	mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY);
+
+	wfi();
+	ERROR("MTK System Reset: operation not handled.\n");
+	panic();
+}
+
+/*******************************************************************************
+ * Export the platform handlers to enable psci to invoke them
+ ******************************************************************************/
+static const plat_pm_ops_t plat_plat_pm_ops = {
+	.affinst_standby		= plat_affinst_standby,
+	.affinst_on			= plat_affinst_on,
+	.affinst_off			= plat_affinst_off,
+	.affinst_suspend		= plat_affinst_suspend,
+	.affinst_on_finish		= plat_affinst_on_finish,
+	.affinst_suspend_finish		= plat_affinst_suspend_finish,
+	.system_off			= plat_system_off,
+	.system_reset			= plat_system_reset,
+	.get_sys_suspend_power_state	= plat_get_sys_suspend_power_state,
+};
+
+/*******************************************************************************
+ * Export the platform specific power ops & initialize the mtk_platform power
+ * controller
+ ******************************************************************************/
+int platform_setup_pm(const plat_pm_ops_t **plat_ops)
+{
+	*plat_ops = &plat_plat_pm_ops;
+	return 0;
+}
diff --git a/plat/mediatek/mt6795/plat_topology.c b/plat/mediatek/mt6795/plat_topology.c
new file mode 100644
index 0000000..0a0cf8d
--- /dev/null
+++ b/plat/mediatek/mt6795/plat_topology.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <platform_def.h>
+#include <psci.h>
+
+unsigned int plat_get_aff_count(unsigned int aff_lvl, unsigned long mpidr)
+{
+	/* Report 1 (absent) instance at levels higher that the cluster level */
+	if (aff_lvl > MPIDR_AFFLVL1)
+		return PLATFORM_SYSTEM_COUNT;
+
+	if (aff_lvl == MPIDR_AFFLVL1)
+		return PLATFORM_CLUSTER_COUNT;
+
+	return mpidr & 0x100 ? PLATFORM_CLUSTER1_CORE_COUNT :
+			       PLATFORM_CLUSTER0_CORE_COUNT;
+}
+
+unsigned int plat_get_aff_state(unsigned int aff_lvl, unsigned long mpidr)
+{
+	return aff_lvl <= MPIDR_AFFLVL2 ? PSCI_AFF_PRESENT : PSCI_AFF_ABSENT;
+}
+
+int mt_setup_topology(void)
+{
+	/* [TODO] Make topology configurable via SCC */
+	return 0;
+}
diff --git a/plat/mediatek/mt6795/platform.mk b/plat/mediatek/mt6795/platform.mk
new file mode 100644
index 0000000..4ebc78e
--- /dev/null
+++ b/plat/mediatek/mt6795/platform.mk
@@ -0,0 +1,68 @@
+#
+# Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+MTK_PLAT		:=	plat/mediatek
+MTK_PLAT_SOC		:=	${MTK_PLAT}/${PLAT}
+
+# Add OEM customized codes
+OEMS				:= true
+MTK_SIP_KERNEL_BOOT_ENABLE := 1
+
+
+ifneq (${OEMS},none)
+  OEMS_INCLUDES		:= -I${MTK_PLAT}/common/custom/
+  OEMS_SOURCES		:=	${MTK_PLAT}/common/custom/oem_svc.c
+endif
+
+PLAT_INCLUDES		:=	-I${MTK_PLAT}/common/				\
+				-I${MTK_PLAT}/common/drivers/uart			\
+				-I${MTK_PLAT_SOC}/				\
+				-I${MTK_PLAT_SOC}/drivers/timer/			\
+				-I${MTK_PLAT_SOC}/include/					\
+				-Iinclude/plat/arm/common/					\
+				-Iinclude/common/tbbr/					\
+				${OEMS_INCLUDES}
+
+PLAT_BL_COMMON_SOURCES	:=	lib/aarch64/xlat_tables.c			\
+				plat/common/plat_gic.c
+
+BL31_SOURCES		+=	drivers/arm/cci/cci.c				\
+				drivers/delay_timer/generic_delay_timer.c	\
+				drivers/arm/gic/common/gic_common.c		\
+				drivers/arm/gic/v2/gicv2_main.c			\
+				drivers/arm/gic/v2/gicv2_helpers.c		\
+				plat/common/plat_gicv2.c			\
+				drivers/console/console.S			\
+				drivers/delay_timer/delay_timer.c		\
+				lib/cpus/aarch64/cortex_a53.S			\
+				${MTK_PLAT_SOC}/bl31_plat_setup.c		\
+				${MTK_PLAT_SOC}/plat_mt_gic.c			\
+				${MTK_PLAT}/common/mtk_sip_svc.c		\
+				${MTK_PLAT}/common/mtk_plat_common.c		\
+				${MTK_PLAT}/common/drivers/uart/8250_console.S		\
+				${MTK_PLAT_SOC}/aarch64/plat_helpers.S		\
+				${MTK_PLAT_SOC}/drivers/timer/mt_cpuxgpt.c	\
+				${MTK_PLAT_SOC}/plat_delay_timer.c		\
+				${MTK_PLAT_SOC}/plat_pm.c			\
+				${MTK_PLAT_SOC}/plat_topology.c			\
+				${MTK_PLAT_SOC}/power_tracer.c			\
+				${MTK_PLAT_SOC}/scu.c		\
+				${OEMS_SOURCES}
+
+# Flag used by the MTK_platform port to determine the version of ARM GIC
+# architecture to use for interrupt management in EL3.
+ARM_GIC_ARCH		:=	2
+$(eval $(call add_define,ARM_GIC_ARCH))
+
+# Enable workarounds for selected Cortex-A53 erratas.
+ERRATA_A53_826319	:=	1
+ERRATA_A53_836870	:=	1
+
+# indicate the reset vector address can be programmed
+PROGRAMMABLE_RESET_ADDRESS	:=	1
+
+$(eval $(call add_define,MTK_SIP_KERNEL_BOOT_ENABLE))
+
diff --git a/plat/mediatek/mt6795/power_tracer.c b/plat/mediatek/mt6795/power_tracer.c
new file mode 100644
index 0000000..f5208d0
--- /dev/null
+++ b/plat/mediatek/mt6795/power_tracer.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <debug.h>
+#include <power_tracer.h>
+
+#define trace_log(...)  INFO("psci: " __VA_ARGS__)
+
+void trace_power_flow(unsigned long mpidr, unsigned char mode)
+{
+	switch (mode) {
+	case CPU_UP:
+		trace_log("core %ld:%ld ON\n",
+			  (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS,
+			  (mpidr & MPIDR_CPU_MASK));
+		break;
+	case CPU_DOWN:
+		trace_log("core %ld:%ld OFF\n",
+			  (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS,
+			  (mpidr & MPIDR_CPU_MASK));
+		break;
+	case CPU_SUSPEND:
+		trace_log("core %ld:%ld SUSPEND\n",
+			  (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS,
+			  (mpidr & MPIDR_CPU_MASK));
+		break;
+	case CLUSTER_UP:
+		trace_log("cluster %ld ON\n",
+			  (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS);
+		break;
+	case CLUSTER_DOWN:
+		trace_log("cluster %ld OFF\n",
+			  (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS);
+		break;
+	case CLUSTER_SUSPEND:
+		trace_log("cluster %ld SUSPEND\n",
+			  (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS);
+		break;
+	default:
+		trace_log("unknown power mode\n");
+		break;
+	}
+}
diff --git a/plat/mediatek/mt6795/scu.c b/plat/mediatek/mt6795/scu.c
new file mode 100644
index 0000000..9add19e
--- /dev/null
+++ b/plat/mediatek/mt6795/scu.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <mcucfg.h>
+#include <mmio.h>
+
+void disable_scu(unsigned long mpidr)
+{
+	if (mpidr & MPIDR_CLUSTER_MASK)
+		mmio_setbits_32((uintptr_t)&mt6795_mcucfg->mp1_miscdbg,
+			MP1_ACINACTM);
+	else
+		mmio_setbits_32((uintptr_t)&mt6795_mcucfg->mp0_axi_config,
+			MP0_ACINACTM);
+}
+
+void enable_scu(unsigned long mpidr)
+{
+	if (mpidr & MPIDR_CLUSTER_MASK)
+		mmio_clrbits_32((uintptr_t)&mt6795_mcucfg->mp1_miscdbg,
+			MP1_ACINACTM);
+	else
+		mmio_clrbits_32((uintptr_t)&mt6795_mcucfg->mp0_axi_config,
+			MP0_ACINACTM);
+}
diff --git a/plat/mediatek/mt8173/aarch64/plat_helpers.S b/plat/mediatek/mt8173/aarch64/plat_helpers.S
new file mode 100644
index 0000000..3e0b4f4
--- /dev/null
+++ b/plat/mediatek/mt8173/aarch64/plat_helpers.S
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <mt8173_def.h>
+
+	.globl	plat_secondary_cold_boot_setup
+	.globl	plat_report_exception
+	.globl	platform_is_primary_cpu
+	.globl  plat_my_core_pos
+	.globl	plat_crash_console_init
+	.globl	plat_crash_console_putc
+
+	/* -----------------------------------------------------
+	 * void plat_secondary_cold_boot_setup (void);
+	 *
+	 * This function performs any platform specific actions
+	 * needed for a secondary cpu after a cold reset e.g
+	 * mark the cpu's presence, mechanism to place it in a
+	 * holding pen etc.
+	 * -----------------------------------------------------
+	 */
+func plat_secondary_cold_boot_setup
+	/* MT8173 Oak does not do cold boot for secondary CPU */
+cb_panic:
+	b	cb_panic
+endfunc plat_secondary_cold_boot_setup
+
+func platform_is_primary_cpu
+	and	x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
+	cmp	x0, #MT8173_PRIMARY_CPU
+	cset	x0, eq
+	ret
+endfunc platform_is_primary_cpu
+
+#if !ENABLE_PLAT_COMPAT
+	/* -----------------------------------------------------
+	 * unsigned int plat_my_core_pos(void);
+	 *
+	 * result: CorePos = CoreId + (ClusterId << 2)
+	 * -----------------------------------------------------
+	 */
+func plat_my_core_pos
+	mrs     x0, mpidr_el1
+	and     x1, x0, #MPIDR_CPU_MASK
+	and     x0, x0, #MPIDR_CLUSTER_MASK
+	add     x0, x1, x0, LSR #6
+	ret
+endfunc plat_my_core_pos
+#endif
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_init(void)
+	 * Function to initialize the crash console
+	 * without a C Runtime to print crash report.
+	 * Clobber list : x0 - x4
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_init
+	mov_imm	x0, MT8173_UART0_BASE
+	mov_imm	x1, MT8173_UART_CLOCK
+	mov_imm	x2, MT8173_BAUDRATE
+	b	console_core_init
+endfunc plat_crash_console_init
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_putc(void)
+	 * Function to print a character on the crash
+	 * console without a C Runtime.
+	 * Clobber list : x1, x2
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_putc
+	mov_imm x1, MT8173_UART0_BASE
+	b	console_core_putc
+endfunc plat_crash_console_putc
diff --git a/plat/mediatek/mt8173/aarch64/platform_common.c b/plat/mediatek/mt8173/aarch64/platform_common.c
new file mode 100644
index 0000000..996344d
--- /dev/null
+++ b/plat/mediatek/mt8173/aarch64/platform_common.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <bl_common.h>
+#include <cci.h>
+#include <debug.h>
+#include <mt8173_def.h>
+#include <platform_def.h>
+#include <utils.h>
+#include <xlat_tables.h>
+
+static const int cci_map[] = {
+	PLAT_MT_CCI_CLUSTER0_SL_IFACE_IX,
+	PLAT_MT_CCI_CLUSTER1_SL_IFACE_IX
+};
+
+/* Table of regions to map using the MMU.  */
+const mmap_region_t plat_mmap[] = {
+	/* for TF text, RO, RW */
+	MAP_REGION_FLAT(TZRAM_BASE, TZRAM_SIZE,
+			MT_MEMORY | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(MTK_DEV_RNG0_BASE, MTK_DEV_RNG0_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(MTK_DEV_RNG1_BASE, MTK_DEV_RNG1_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	{ 0 }
+
+};
+
+/*******************************************************************************
+ * Macro generating the code for the function setting up the pagetables as per
+ * the platform memory map & initialize the mmu, for the given exception level
+ ******************************************************************************/
+#define DEFINE_CONFIGURE_MMU_EL(_el)					\
+	void plat_configure_mmu_el ## _el(unsigned long total_base,	\
+					  unsigned long total_size,	\
+					  unsigned long ro_start,	\
+					  unsigned long ro_limit,	\
+					  unsigned long coh_start,	\
+					  unsigned long coh_limit)	\
+	{								\
+		mmap_add_region(total_base, total_base,			\
+				total_size,				\
+				MT_MEMORY | MT_RW | MT_SECURE);		\
+		mmap_add_region(ro_start, ro_start,			\
+				ro_limit - ro_start,			\
+				MT_MEMORY | MT_RO | MT_SECURE);		\
+		mmap_add_region(coh_start, coh_start,			\
+				coh_limit - coh_start,			\
+				MT_DEVICE | MT_RW | MT_SECURE);		\
+		mmap_add(plat_mmap);					\
+		init_xlat_tables();					\
+									\
+		enable_mmu_el ## _el(0);				\
+	}
+
+/* Define EL3 variants of the function initialising the MMU */
+DEFINE_CONFIGURE_MMU_EL(3)
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+	return SYS_COUNTER_FREQ_IN_TICKS;
+}
+
+void plat_cci_init(void)
+{
+	/* Initialize CCI driver */
+	cci_init(PLAT_MT_CCI_BASE, cci_map, ARRAY_SIZE(cci_map));
+}
+
+void plat_cci_enable(void)
+{
+	/*
+	 * Enable CCI coherency for this cluster.
+	 * No need for locks as no other cpu is active at the moment.
+	 */
+	cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr()));
+}
+
+void plat_cci_disable(void)
+{
+	cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr()));
+}
diff --git a/plat/mediatek/mt8173/bl31_plat_setup.c b/plat/mediatek/mt8173/bl31_plat_setup.c
new file mode 100644
index 0000000..7b29307
--- /dev/null
+++ b/plat/mediatek/mt8173/bl31_plat_setup.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <assert.h>
+#include <bl_common.h>
+#include <common_def.h>
+#include <console.h>
+#include <debug.h>
+#include <generic_delay_timer.h>
+#include <mcucfg.h>
+#include <mmio.h>
+#include <mtcmos.h>
+#include <plat_arm.h>
+#include <plat_private.h>
+#include <platform.h>
+#include <spm.h>
+
+/*******************************************************************************
+ * Declarations of linker defined symbols which will help us find the layout
+ * of trusted SRAM
+ ******************************************************************************/
+unsigned long __RO_START__;
+unsigned long __RO_END__;
+
+/*
+ * The next 3 constants identify the extents of the code, RO data region and the
+ * limit of the BL31 image.  These addresses are used by the MMU setup code and
+ * therefore they must be page-aligned.  It is the responsibility of the linker
+ * script to ensure that __RO_START__, __RO_END__ & __BL31_END__ linker symbols
+ * refer to page-aligned addresses.
+ */
+#define BL31_RO_BASE (unsigned long)(&__RO_START__)
+#define BL31_RO_LIMIT (unsigned long)(&__RO_END__)
+#define BL31_END (unsigned long)(&__BL31_END__)
+
+static entry_point_info_t bl32_ep_info;
+static entry_point_info_t bl33_ep_info;
+
+static void platform_setup_cpu(void)
+{
+	/* turn off all the little core's power except cpu 0 */
+	mtcmos_little_cpu_off();
+
+	/* setup big cores */
+	mmio_write_32((uintptr_t)&mt8173_mcucfg->mp1_config_res,
+		MP1_DIS_RGU0_WAIT_PD_CPUS_L1_ACK |
+		MP1_DIS_RGU1_WAIT_PD_CPUS_L1_ACK |
+		MP1_DIS_RGU2_WAIT_PD_CPUS_L1_ACK |
+		MP1_DIS_RGU3_WAIT_PD_CPUS_L1_ACK |
+		MP1_DIS_RGU_NOCPU_WAIT_PD_CPUS_L1_ACK);
+	mmio_setbits_32((uintptr_t)&mt8173_mcucfg->mp1_miscdbg, MP1_AINACTS);
+	mmio_setbits_32((uintptr_t)&mt8173_mcucfg->mp1_clkenm_div,
+		MP1_SW_CG_GEN);
+	mmio_clrbits_32((uintptr_t)&mt8173_mcucfg->mp1_rst_ctl,
+		MP1_L2RSTDISABLE);
+
+	/* set big cores arm64 boot mode */
+	mmio_setbits_32((uintptr_t)&mt8173_mcucfg->mp1_cpucfg,
+		MP1_CPUCFG_64BIT);
+
+	/* set LITTLE cores arm64 boot mode */
+	mmio_setbits_32((uintptr_t)&mt8173_mcucfg->mp0_rv_addr[0].rv_addr_hw,
+		MP0_CPUCFG_64BIT);
+
+	/* enable dcm control */
+	mmio_setbits_32((uintptr_t)&mt8173_mcucfg->bus_fabric_dcm_ctrl,
+		ADB400_GRP_DCM_EN | CCI400_GRP_DCM_EN | ADBCLK_GRP_DCM_EN |
+		EMICLK_GRP_DCM_EN | ACLK_GRP_DCM_EN | L2C_IDLE_DCM_EN |
+		INFRACLK_PSYS_DYNAMIC_CG_EN);
+	mmio_setbits_32((uintptr_t)&mt8173_mcucfg->l2c_sram_ctrl,
+		L2C_SRAM_DCM_EN);
+	mmio_setbits_32((uintptr_t)&mt8173_mcucfg->cci_clk_ctrl,
+		MCU_BUS_DCM_EN);
+}
+
+static void platform_setup_sram(void)
+{
+	/* protect BL31 memory from non-secure read/write access */
+	mmio_write_32(SRAMROM_SEC_ADDR, (uint32_t)(BL31_END + 0x3ff) & 0x3fc00);
+	mmio_write_32(SRAMROM_SEC_CTRL, 0x10000ff9);
+}
+
+/*******************************************************************************
+ * Return a pointer to the 'entry_point_info' structure of the next image for
+ * the security state specified. BL33 corresponds to the non-secure image type
+ * while BL32 corresponds to the secure image type. A NULL pointer is returned
+ * if the image does not exist.
+ ******************************************************************************/
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+	entry_point_info_t *next_image_info;
+
+	next_image_info = (type == NON_SECURE) ? &bl33_ep_info : &bl32_ep_info;
+
+	/* None of the images on this platform can have 0x0 as the entrypoint */
+	if (next_image_info->pc)
+		return next_image_info;
+	else
+		return NULL;
+}
+
+/*******************************************************************************
+ * Perform any BL3-1 early platform setup. Here is an opportunity to copy
+ * parameters passed by the calling EL (S-EL1 in BL2 & S-EL3 in BL1) before they
+ * are lost (potentially). This needs to be done before the MMU is initialized
+ * so that the memory layout can be used while creating page tables.
+ * BL2 has flushed this information to memory, so we are guaranteed to pick up
+ * good data.
+ ******************************************************************************/
+void bl31_early_platform_setup(bl31_params_t *from_bl2,
+			       void *plat_params_from_bl2)
+{
+	console_init(MT8173_UART0_BASE, MT8173_UART_CLOCK, MT8173_BAUDRATE);
+
+	VERBOSE("bl31_setup\n");
+
+	assert(from_bl2 != NULL);
+	assert(from_bl2->h.type == PARAM_BL31);
+	assert(from_bl2->h.version >= VERSION_1);
+
+	bl32_ep_info = *from_bl2->bl32_ep_info;
+	bl33_ep_info = *from_bl2->bl33_ep_info;
+}
+
+/*******************************************************************************
+ * Perform any BL3-1 platform setup code
+ ******************************************************************************/
+void bl31_platform_setup(void)
+{
+	platform_setup_cpu();
+	platform_setup_sram();
+
+	generic_delay_timer_init();
+
+	/* Initialize the gic cpu and distributor interfaces */
+	plat_arm_gic_driver_init();
+	plat_arm_gic_init();
+
+#if ENABLE_PLAT_COMPAT
+	/* Topologies are best known to the platform. */
+	mt_setup_topology();
+#endif
+
+	/* Initialize spm at boot time */
+	spm_boot_init();
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only intializes the mmu in a quick and dirty way.
+ ******************************************************************************/
+void bl31_plat_arch_setup(void)
+{
+	plat_cci_init();
+	plat_cci_enable();
+
+	plat_configure_mmu_el3(BL31_RO_BASE,
+			       BL_COHERENT_RAM_END - BL31_RO_BASE,
+			       BL31_RO_BASE,
+			       BL31_RO_LIMIT,
+			       BL_COHERENT_RAM_BASE,
+			       BL_COHERENT_RAM_END);
+}
+
diff --git a/plat/mediatek/mt8173/drivers/crypt/crypt.c b/plat/mediatek/mt8173/drivers/crypt/crypt.c
new file mode 100644
index 0000000..74d7702
--- /dev/null
+++ b/plat/mediatek/mt8173/drivers/crypt/crypt.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <mmio.h>
+#include <mt8173_def.h>
+#include <mtk_sip_svc.h>
+
+#define crypt_read32(offset)	\
+	mmio_read_32((uintptr_t)(CRYPT_BASE+((offset) * 4)))
+
+#define crypt_write32(offset, value)    \
+	mmio_write_32((uintptr_t)(CRYPT_BASE + ((offset) * 4)), (uint32_t)value)
+
+#define GET_L32(x) ((uint32_t)(x & 0xffffffff))
+#define GET_H32(x) ((uint32_t)((x >> 32) & 0xffffffff))
+
+#define REG_INIT 0
+#define REG_MSC 4
+#define REG_TRIG 256
+#define REG_STAT 512
+#define REG_CLR 513
+#define REG_INT 514
+#define REG_P68 768
+#define REG_P69 769
+#define REG_P70 770
+#define REG_P71 771
+#define REG_P72 772
+#define REG_D20 820
+#define KEY_SIZE 160
+#define KEY_LEN 40
+
+/* Wait until crypt is completed */
+uint64_t crypt_wait(void)
+{
+	crypt_write32(REG_TRIG, 0);
+	while (crypt_read32(REG_STAT) == 0)
+		;
+	udelay(100);
+	crypt_write32(REG_CLR, crypt_read32(REG_STAT));
+	crypt_write32(REG_INT, 0);
+	return MTK_SIP_E_SUCCESS;
+}
+
+static uint32_t record[4];
+/* Copy encrypted key to crypt engine */
+uint64_t crypt_set_hdcp_key_ex(uint64_t x1, uint64_t x2, uint64_t x3)
+{
+	uint32_t i = (uint32_t)x1;
+	uint32_t j = 0;
+
+	if (i > KEY_LEN)
+		return MTK_SIP_E_INVALID_PARAM;
+
+	if (i < KEY_LEN) {
+		crypt_write32(REG_MSC, 0x80ff3800);
+		crypt_write32(REG_INIT, 0);
+		crypt_write32(REG_INIT, 0xF);
+		crypt_write32(REG_CLR, 1);
+		crypt_write32(REG_INT, 0);
+
+		crypt_write32(REG_P68, 0x70);
+		crypt_write32(REG_P69, 0x1C0);
+		crypt_write32(REG_P70, 0x30);
+		crypt_write32(REG_P71, 0x4);
+		crypt_wait();
+
+		crypt_write32(REG_D20 + 4 * i, GET_L32(x2));
+		crypt_write32(REG_D20 + 4 * i + 1, GET_H32(x2));
+		crypt_write32(REG_D20 + 4 * i + 2, GET_L32(x3));
+		crypt_write32(REG_D20 + 4 * i + 3, GET_H32(x3));
+
+		crypt_write32(REG_P69, 0);
+		crypt_write32(REG_P68, 0x20);
+		crypt_write32(REG_P71, 0x34 + 4 * i);
+		crypt_write32(REG_P72, 0x34 + 4 * i);
+		crypt_wait();
+
+		for (j = 0; j < 4; j++) {
+			crypt_write32(REG_P68, 0x71);
+			crypt_write32(REG_P69, 0x34 + 4 * i + j);
+			crypt_write32(REG_P70, record[j]);
+			crypt_wait();
+		}
+	}
+	/* Prepare data for next iteration */
+	record[0] = GET_L32(x2);
+	record[1] = GET_H32(x2);
+	record[2] = GET_L32(x3);
+	record[3] = GET_H32(x3);
+	return MTK_SIP_E_SUCCESS;
+}
+
+/* Set key to hdcp */
+uint64_t crypt_set_hdcp_key_num(uint32_t num)
+{
+	if (num > KEY_LEN)
+		return MTK_SIP_E_INVALID_PARAM;
+
+	crypt_write32(REG_P68, 0x6A);
+	crypt_write32(REG_P69, 0x34 + 4 * num);
+	crypt_wait();
+	return MTK_SIP_E_SUCCESS;
+}
+
+/* Clear key in crypt engine */
+uint64_t crypt_clear_hdcp_key(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < KEY_SIZE; i++)
+		crypt_write32(REG_D20 + i, 0);
+	return MTK_SIP_E_SUCCESS;
+}
diff --git a/plat/mediatek/mt8173/drivers/crypt/crypt.h b/plat/mediatek/mt8173/drivers/crypt/crypt.h
new file mode 100644
index 0000000..3bbc168
--- /dev/null
+++ b/plat/mediatek/mt8173/drivers/crypt/crypt.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __CRYPT_H__
+#define __CRYPT_H__
+
+#include <stdint.h>
+
+/* crypt function prototype */
+uint64_t crypt_set_hdcp_key_ex(uint64_t x1, uint64_t x2, uint64_t x3);
+uint64_t crypt_set_hdcp_key_num(uint32_t num);
+uint64_t crypt_clear_hdcp_key(void);
+
+#endif /* __CRYPT_H__ */
diff --git a/plat/mediatek/mt8173/drivers/mtcmos/mtcmos.c b/plat/mediatek/mt8173/drivers/mtcmos/mtcmos.c
new file mode 100644
index 0000000..25f2509
--- /dev/null
+++ b/plat/mediatek/mt8173/drivers/mtcmos/mtcmos.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <delay_timer.h>
+#include <mmio.h>
+#include <mt8173_def.h>
+#include <mtcmos.h>
+#include <spm.h>
+#include <spm_mcdi.h>
+
+enum {
+	SRAM_ISOINT_B	= 1U << 6,
+	SRAM_CKISO	= 1U << 5,
+	PWR_CLK_DIS	= 1U << 4,
+	PWR_ON_2ND	= 1U << 3,
+	PWR_ON		= 1U << 2,
+	PWR_ISO		= 1U << 1,
+	PWR_RST_B	= 1U << 0
+};
+
+enum {
+	L1_PDN_ACK	= 1U << 8,
+	L1_PDN		= 1U << 0
+};
+
+enum {
+	LITTLE_CPU3	= 1U << 12,
+	LITTLE_CPU2	= 1U << 11,
+	LITTLE_CPU1	= 1U << 10,
+};
+
+enum {
+	SRAM_PDN           = 0xf << 8,
+	DIS_SRAM_ACK       = 0x1 << 12,
+	AUD_SRAM_ACK       = 0xf << 12,
+};
+
+enum {
+	DIS_PWR_STA_MASK   = 0x1 << 3,
+	AUD_PWR_STA_MASK   = 0x1 << 24,
+};
+
+#define SPM_VDE_PWR_CON				0x0210
+#define SPM_MFG_PWR_CON				0x0214
+#define SPM_VEN_PWR_CON				0x0230
+#define SPM_ISP_PWR_CON				0x0238
+#define SPM_DIS_PWR_CON				0x023c
+#define SPM_VEN2_PWR_CON			0x0298
+#define SPM_AUDIO_PWR_CON			0x029c
+#define SPM_MFG_2D_PWR_CON			0x02c0
+#define SPM_MFG_ASYNC_PWR_CON			0x02c4
+#define SPM_USB_PWR_CON				0x02cc
+
+#define MTCMOS_CTRL_SUCCESS			0
+#define MTCMOS_CTRL_ERROR			-1
+
+#define MTCMOS_CTRL_EN				(0x1 << 18)
+
+#define VDE_PWR_ON				0
+#define VEN_PWR_ON				1
+#define ISP_PWR_ON				2
+#define DIS_PWR_ON				3
+#define VEN2_PWR_ON				4
+#define AUDIO_PWR_ON				5
+#define MFG_ASYNC_PWR_ON			6
+#define MFG_2D_PWR_ON				7
+#define MFG_PWR_ON				8
+#define USB_PWR_ON				9
+
+#define VDE_PWR_OFF				10
+#define VEN_PWR_OFF				11
+#define ISP_PWR_OFF				12
+#define DIS_PWR_OFF				13
+#define VEN2_PWR_OFF				14
+#define AUDIO_PWR_OFF				15
+#define MFG_ASYNC_PWR_OFF			16
+#define MFG_2D_PWR_OFF				17
+#define MFG_PWR_OFF				18
+#define USB_PWR_OFF				19
+
+#define VDE_PWR_CON_PWR_STA			7
+#define VEN_PWR_CON_PWR_STA			21
+#define ISP_PWR_CON_PWR_STA			5
+#define DIS_PWR_CON_PWR_STA			3
+#define VEN2_PWR_CON_PWR_STA			20
+#define AUDIO_PWR_CON_PWR_STA			24
+#define MFG_ASYNC_PWR_CON_PWR_STA		23
+#define MFG_2D_PWR_CON_PWR_STA			22
+#define MFG_PWR_CON_PWR_STA			4
+#define USB_PWR_CON_PWR_STA			25
+
+/*
+ * Timeout if the ack is not signled after 1 second.
+ * According to designer, one mtcmos operation should be done
+ * around 10us.
+ */
+#define MTCMOS_ACK_POLLING_MAX_COUNT			10000
+#define MTCMOS_ACK_POLLING_INTERVAL			10
+
+static void mtcmos_ctrl_little_off(unsigned int linear_id)
+{
+	uint32_t reg_pwr_con;
+	uint32_t reg_l1_pdn;
+	uint32_t bit_cpu;
+
+	switch (linear_id) {
+	case 1:
+		reg_pwr_con = SPM_CA7_CPU1_PWR_CON;
+		reg_l1_pdn = SPM_CA7_CPU1_L1_PDN;
+		bit_cpu = LITTLE_CPU1;
+		break;
+	case 2:
+		reg_pwr_con = SPM_CA7_CPU2_PWR_CON;
+		reg_l1_pdn = SPM_CA7_CPU2_L1_PDN;
+		bit_cpu = LITTLE_CPU2;
+		break;
+	case 3:
+		reg_pwr_con = SPM_CA7_CPU3_PWR_CON;
+		reg_l1_pdn = SPM_CA7_CPU3_L1_PDN;
+		bit_cpu = LITTLE_CPU3;
+		break;
+	default:
+		/* should never come to here */
+		return;
+	}
+
+	/* enable register control */
+	mmio_write_32(SPM_POWERON_CONFIG_SET,
+			(SPM_PROJECT_CODE << 16) | (1U << 0));
+
+	mmio_setbits_32(reg_pwr_con, PWR_ISO);
+	mmio_setbits_32(reg_pwr_con, SRAM_CKISO);
+	mmio_clrbits_32(reg_pwr_con, SRAM_ISOINT_B);
+	mmio_setbits_32(reg_l1_pdn, L1_PDN);
+
+	while (!(mmio_read_32(reg_l1_pdn) & L1_PDN_ACK))
+		continue;
+
+	mmio_clrbits_32(reg_pwr_con, PWR_RST_B);
+	mmio_setbits_32(reg_pwr_con, PWR_CLK_DIS);
+	mmio_clrbits_32(reg_pwr_con, PWR_ON);
+	mmio_clrbits_32(reg_pwr_con, PWR_ON_2ND);
+
+	while ((mmio_read_32(SPM_PWR_STATUS) & bit_cpu) ||
+	       (mmio_read_32(SPM_PWR_STATUS_2ND) & bit_cpu))
+		continue;
+}
+
+void mtcmos_little_cpu_off(void)
+{
+	/* turn off little cpu 1 - 3 */
+	mtcmos_ctrl_little_off(1);
+	mtcmos_ctrl_little_off(2);
+	mtcmos_ctrl_little_off(3);
+}
+
+uint32_t wait_mtcmos_ack(uint32_t on, uint32_t pwr_ctrl, uint32_t spm_pwr_sta)
+{
+	int i = 0;
+	uint32_t cmp, pwr_sta, pwr_sta_2nd;
+
+	while (1) {
+		cmp = mmio_read_32(SPM_PCM_PASR_DPD_3) & pwr_ctrl;
+		pwr_sta = (mmio_read_32(SPM_PWR_STATUS) >> spm_pwr_sta) & 1;
+		pwr_sta_2nd =
+			(mmio_read_32(SPM_PWR_STATUS_2ND) >> spm_pwr_sta) & 1;
+		if (cmp && (pwr_sta == on) && (pwr_sta_2nd == on)) {
+			mmio_write_32(SPM_PCM_RESERVE2, 0);
+			return MTCMOS_CTRL_SUCCESS;
+		}
+		udelay(MTCMOS_ACK_POLLING_INTERVAL);
+		i++;
+		if (i > MTCMOS_ACK_POLLING_MAX_COUNT) {
+			INFO("MTCMOS control failed(%d), SPM_PWR_STA(%d),\n"
+				"SPM_PCM_RESERVE=0x%x,SPM_PCM_RESERVE2=0x%x,\n"
+				"SPM_PWR_STATUS=0x%x,SPM_PWR_STATUS_2ND=0x%x\n"
+				"SPM_PCM_PASR_DPD_3 = 0x%x\n",
+				on, spm_pwr_sta, mmio_read_32(SPM_PCM_RESERVE),
+				mmio_read_32(SPM_PCM_RESERVE2),
+				mmio_read_32(SPM_PWR_STATUS),
+				mmio_read_32(SPM_PWR_STATUS_2ND),
+				mmio_read_32(SPM_PCM_PASR_DPD_3));
+			mmio_write_32(SPM_PCM_RESERVE2, 0);
+			return MTCMOS_CTRL_ERROR;
+		}
+	}
+}
+
+uint32_t mtcmos_non_cpu_ctrl(uint32_t on, uint32_t mtcmos_num)
+{
+	uint32_t ret = MTCMOS_CTRL_SUCCESS;
+	uint32_t power_on;
+	uint32_t power_off;
+	uint32_t power_ctrl;
+	uint32_t power_status;
+
+	spm_lock_get();
+	spm_mcdi_prepare_for_mtcmos();
+	mmio_setbits_32(SPM_PCM_RESERVE, MTCMOS_CTRL_EN);
+
+	switch (mtcmos_num) {
+	case SPM_VDE_PWR_CON:
+		power_on = VDE_PWR_ON;
+		power_off = VDE_PWR_OFF;
+		power_status = VDE_PWR_CON_PWR_STA;
+		break;
+	case SPM_MFG_PWR_CON:
+		power_on = MFG_PWR_ON;
+		power_off = MFG_PWR_OFF;
+		power_status = MFG_PWR_CON_PWR_STA;
+		break;
+	case SPM_VEN_PWR_CON:
+		power_on = VEN_PWR_ON;
+		power_off = VEN_PWR_OFF;
+		power_status = VEN_PWR_CON_PWR_STA;
+		break;
+	case SPM_ISP_PWR_CON:
+		power_on = ISP_PWR_ON;
+		power_off = ISP_PWR_OFF;
+		power_status = ISP_PWR_CON_PWR_STA;
+		break;
+	case SPM_DIS_PWR_CON:
+		power_on = DIS_PWR_ON;
+		power_off = DIS_PWR_OFF;
+		power_status = DIS_PWR_CON_PWR_STA;
+		break;
+	case SPM_VEN2_PWR_CON:
+		power_on = VEN2_PWR_ON;
+		power_off = VEN2_PWR_OFF;
+		power_status = VEN2_PWR_CON_PWR_STA;
+		break;
+	case SPM_AUDIO_PWR_CON:
+		power_on = AUDIO_PWR_ON;
+		power_off = AUDIO_PWR_OFF;
+		power_status = AUDIO_PWR_CON_PWR_STA;
+		break;
+	case SPM_MFG_2D_PWR_CON:
+		power_on = MFG_2D_PWR_ON;
+		power_off = MFG_2D_PWR_OFF;
+		power_status = MFG_2D_PWR_CON_PWR_STA;
+		break;
+	case SPM_MFG_ASYNC_PWR_CON:
+		power_on = MFG_ASYNC_PWR_ON;
+		power_off = MFG_ASYNC_PWR_OFF;
+		power_status = MFG_ASYNC_PWR_CON_PWR_STA;
+		break;
+	case SPM_USB_PWR_CON:
+		power_on = USB_PWR_ON;
+		power_off = USB_PWR_OFF;
+		power_status = USB_PWR_CON_PWR_STA;
+		break;
+	default:
+		ret = MTCMOS_CTRL_ERROR;
+		INFO("No mapping MTCMOS(%d), ret = %d\n", mtcmos_num, ret);
+		break;
+	}
+	if (ret == MTCMOS_CTRL_SUCCESS) {
+		power_ctrl = on ? (1 << power_on) : (1 << power_off);
+		mmio_setbits_32(SPM_PCM_RESERVE2, power_ctrl);
+		ret = wait_mtcmos_ack(on, power_ctrl, power_status);
+		VERBOSE("0x%x(%d), PWR_STATUS(0x%x), ret(%d)\n",
+			power_ctrl, on, mmio_read_32(SPM_PWR_STATUS), ret);
+	}
+
+	mmio_clrbits_32(SPM_PCM_RESERVE, MTCMOS_CTRL_EN);
+	spm_lock_release();
+
+	return ret;
+}
diff --git a/plat/mediatek/mt8173/drivers/mtcmos/mtcmos.h b/plat/mediatek/mt8173/drivers/mtcmos/mtcmos.h
new file mode 100644
index 0000000..6e14e3d
--- /dev/null
+++ b/plat/mediatek/mt8173/drivers/mtcmos/mtcmos.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __MTCMOS_H__
+#define __MTCMOS_H__
+
+/*
+ * This function will turn off all the little core's power except cpu 0. The
+ * cores in cluster 0 are all powered when the system power on. The System
+ * Power Manager (SPM) will do nothing if it found the core's power was on
+ * during CPU_ON psci call.
+ */
+void mtcmos_little_cpu_off(void);
+uint32_t mtcmos_non_cpu_ctrl(uint32_t on, uint32_t mtcmos_num);
+
+#endif /* __MTCMOS_H__ */
diff --git a/plat/mediatek/mt8173/drivers/pmic/pmic_wrap_init.c b/plat/mediatek/mt8173/drivers/pmic/pmic_wrap_init.c
new file mode 100644
index 0000000..c64fdf7
--- /dev/null
+++ b/plat/mediatek/mt8173/drivers/pmic/pmic_wrap_init.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <debug.h>
+#include <delay_timer.h>
+#include <mmio.h>
+#include <mt8173_def.h>
+#include <pmic_wrap_init.h>
+
+/* pmic wrap module wait_idle and read polling interval (in microseconds) */
+enum {
+	WAIT_IDLE_POLLING_DELAY_US	= 1,
+	READ_POLLING_DELAY_US		= 2
+};
+
+static inline uint32_t wait_for_state_idle(uint32_t timeout_us,
+					   void *wacs_register,
+					   void *wacs_vldclr_register,
+					   uint32_t *read_reg)
+{
+	uint32_t reg_rdata;
+	uint32_t retry;
+
+	retry = (timeout_us + WAIT_IDLE_POLLING_DELAY_US) /
+		WAIT_IDLE_POLLING_DELAY_US;
+
+	do {
+		udelay(WAIT_IDLE_POLLING_DELAY_US);
+		reg_rdata = mmio_read_32((uintptr_t)wacs_register);
+		/* if last read command timeout,clear vldclr bit
+		   read command state machine:FSM_REQ-->wfdle-->WFVLDCLR;
+		   write:FSM_REQ-->idle */
+		switch (((reg_rdata >> RDATA_WACS_FSM_SHIFT) &
+			RDATA_WACS_FSM_MASK)) {
+		case WACS_FSM_WFVLDCLR:
+			mmio_write_32((uintptr_t)wacs_vldclr_register, 1);
+			ERROR("WACS_FSM = PMIC_WRAP_WACS_VLDCLR\n");
+			break;
+		case WACS_FSM_WFDLE:
+			ERROR("WACS_FSM = WACS_FSM_WFDLE\n");
+			break;
+		case WACS_FSM_REQ:
+			ERROR("WACS_FSM = WACS_FSM_REQ\n");
+			break;
+		case WACS_FSM_IDLE:
+			goto done;
+		default:
+			break;
+		}
+
+		retry--;
+	} while (retry);
+
+done:
+	if (!retry)	/* timeout */
+		return E_PWR_WAIT_IDLE_TIMEOUT;
+
+	if (read_reg)
+		*read_reg = reg_rdata;
+	return 0;
+}
+
+static inline uint32_t wait_for_state_ready(uint32_t timeout_us,
+					    void *wacs_register,
+					    uint32_t *read_reg)
+{
+	uint32_t reg_rdata;
+	uint32_t retry;
+
+	retry = (timeout_us + READ_POLLING_DELAY_US) / READ_POLLING_DELAY_US;
+
+	do {
+		udelay(READ_POLLING_DELAY_US);
+		reg_rdata = mmio_read_32((uintptr_t)wacs_register);
+
+		if (((reg_rdata >> RDATA_WACS_FSM_SHIFT) & RDATA_WACS_FSM_MASK)
+		    == WACS_FSM_WFVLDCLR)
+			break;
+
+		retry--;
+	} while (retry);
+
+	if (!retry) {	/* timeout */
+		ERROR("timeout when waiting for idle\n");
+		return E_PWR_WAIT_IDLE_TIMEOUT_READ;
+	}
+
+	if (read_reg)
+		*read_reg = reg_rdata;
+	return 0;
+}
+
+static int32_t pwrap_wacs2(uint32_t write,
+		    uint32_t adr,
+		    uint32_t wdata,
+		    uint32_t *rdata,
+		    uint32_t init_check)
+{
+	uint32_t reg_rdata = 0;
+	uint32_t wacs_write = 0;
+	uint32_t wacs_adr = 0;
+	uint32_t wacs_cmd = 0;
+	uint32_t return_value = 0;
+
+	if (init_check) {
+		reg_rdata = mmio_read_32((uintptr_t)&mt8173_pwrap->wacs2_rdata);
+		/* Prevent someone to used pwrap before pwrap init */
+		if (((reg_rdata >> RDATA_INIT_DONE_SHIFT) &
+		    RDATA_INIT_DONE_MASK) != WACS_INIT_DONE) {
+			ERROR("initialization isn't finished\n");
+			return E_PWR_NOT_INIT_DONE;
+		}
+	}
+	reg_rdata = 0;
+	/* Check IDLE in advance */
+	return_value = wait_for_state_idle(TIMEOUT_WAIT_IDLE,
+				&mt8173_pwrap->wacs2_rdata,
+				&mt8173_pwrap->wacs2_vldclr,
+				0);
+	if (return_value != 0) {
+		ERROR("wait_for_fsm_idle fail,return_value=%d\n", return_value);
+		goto FAIL;
+	}
+	wacs_write = write << 31;
+	wacs_adr = (adr >> 1) << 16;
+	wacs_cmd = wacs_write | wacs_adr | wdata;
+
+	mmio_write_32((uintptr_t)&mt8173_pwrap->wacs2_cmd, wacs_cmd);
+	if (write == 0) {
+		if (NULL == rdata) {
+			ERROR("rdata is a NULL pointer\n");
+			return_value = E_PWR_INVALID_ARG;
+			goto FAIL;
+		}
+		return_value = wait_for_state_ready(TIMEOUT_READ,
+					&mt8173_pwrap->wacs2_rdata,
+					&reg_rdata);
+		if (return_value != 0) {
+			ERROR("wait_for_fsm_vldclr fail,return_value=%d\n",
+				 return_value);
+			goto FAIL;
+		}
+		*rdata = ((reg_rdata >> RDATA_WACS_RDATA_SHIFT)
+			  & RDATA_WACS_RDATA_MASK);
+		mmio_write_32((uintptr_t)&mt8173_pwrap->wacs2_vldclr, 1);
+	}
+FAIL:
+	return return_value;
+}
+
+/* external API for pmic_wrap user */
+
+int32_t pwrap_read(uint32_t adr, uint32_t *rdata)
+{
+	return pwrap_wacs2(0, adr, 0, rdata, 1);
+}
+
+int32_t pwrap_write(uint32_t adr, uint32_t wdata)
+{
+	return pwrap_wacs2(1, adr, wdata, 0, 1);
+}
diff --git a/plat/mediatek/mt8173/drivers/pmic/pmic_wrap_init.h b/plat/mediatek/mt8173/drivers/pmic/pmic_wrap_init.h
new file mode 100644
index 0000000..01f1b25
--- /dev/null
+++ b/plat/mediatek/mt8173/drivers/pmic/pmic_wrap_init.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PMIC_WRAP_INIT_H__
+#define __PMIC_WRAP_INIT_H__
+
+/* external API */
+int32_t pwrap_read(uint32_t adr, uint32_t *rdata);
+int32_t pwrap_write(uint32_t adr, uint32_t wdata);
+
+static struct mt8173_pmic_wrap_regs *const mt8173_pwrap =
+	(void *)PMIC_WRAP_BASE;
+
+/* timeout setting */
+enum {
+	TIMEOUT_RESET       = 50,	/* us */
+	TIMEOUT_READ        = 50,	/* us */
+	TIMEOUT_WAIT_IDLE   = 50	/* us */
+};
+
+/* PMIC_WRAP registers */
+struct mt8173_pmic_wrap_regs {
+	uint32_t mux_sel;
+	uint32_t wrap_en;
+	uint32_t dio_en;
+	uint32_t sidly;
+	uint32_t rddmy;
+	uint32_t si_ck_con;
+	uint32_t cshext_write;
+	uint32_t cshext_read;
+	uint32_t cslext_start;
+	uint32_t cslext_end;
+	uint32_t staupd_prd;
+	uint32_t staupd_grpen;
+	uint32_t reserved[4];
+	uint32_t staupd_man_trig;
+	uint32_t staupd_sta;
+	uint32_t wrap_sta;
+	uint32_t harb_init;
+	uint32_t harb_hprio;
+	uint32_t hiprio_arb_en;
+	uint32_t harb_sta0;
+	uint32_t harb_sta1;
+	uint32_t man_en;
+	uint32_t man_cmd;
+	uint32_t man_rdata;
+	uint32_t man_vldclr;
+	uint32_t wacs0_en;
+	uint32_t init_done0;
+	uint32_t wacs0_cmd;
+	uint32_t wacs0_rdata;
+	uint32_t wacs0_vldclr;
+	uint32_t wacs1_en;
+	uint32_t init_done1;
+	uint32_t wacs1_cmd;
+	uint32_t wacs1_rdata;
+	uint32_t wacs1_vldclr;
+	uint32_t wacs2_en;
+	uint32_t init_done2;
+	uint32_t wacs2_cmd;
+	uint32_t wacs2_rdata;
+	uint32_t wacs2_vldclr;
+	uint32_t int_en;
+	uint32_t int_flg_raw;
+	uint32_t int_flg;
+	uint32_t int_clr;
+	uint32_t sig_adr;
+	uint32_t sig_mode;
+	uint32_t sig_value;
+	uint32_t sig_errval;
+	uint32_t crc_en;
+	uint32_t timer_en;
+	uint32_t timer_sta;
+	uint32_t wdt_unit;
+	uint32_t wdt_src_en;
+	uint32_t wdt_flg;
+	uint32_t debug_int_sel;
+	uint32_t dvfs_adr0;
+	uint32_t dvfs_wdata0;
+	uint32_t dvfs_adr1;
+	uint32_t dvfs_wdata1;
+	uint32_t dvfs_adr2;
+	uint32_t dvfs_wdata2;
+	uint32_t dvfs_adr3;
+	uint32_t dvfs_wdata3;
+	uint32_t dvfs_adr4;
+	uint32_t dvfs_wdata4;
+	uint32_t dvfs_adr5;
+	uint32_t dvfs_wdata5;
+	uint32_t dvfs_adr6;
+	uint32_t dvfs_wdata6;
+	uint32_t dvfs_adr7;
+	uint32_t dvfs_wdata7;
+	uint32_t spminf_sta;
+	uint32_t cipher_key_sel;
+	uint32_t cipher_iv_sel;
+	uint32_t cipher_en;
+	uint32_t cipher_rdy;
+	uint32_t cipher_mode;
+	uint32_t cipher_swrst;
+	uint32_t dcm_en;
+	uint32_t dcm_dbc_prd;
+};
+
+enum {
+	RDATA_WACS_RDATA_SHIFT = 0,
+	RDATA_WACS_FSM_SHIFT = 16,
+	RDATA_WACS_REQ_SHIFT = 19,
+	RDATA_SYNC_IDLE_SHIFT,
+	RDATA_INIT_DONE_SHIFT,
+	RDATA_SYS_IDLE_SHIFT,
+};
+
+enum {
+	RDATA_WACS_RDATA_MASK = 0xffff,
+	RDATA_WACS_FSM_MASK = 0x7,
+	RDATA_WACS_REQ_MASK = 0x1,
+	RDATA_SYNC_IDLE_MASK = 0x1,
+	RDATA_INIT_DONE_MASK = 0x1,
+	RDATA_SYS_IDLE_MASK = 0x1,
+};
+
+/* WACS_FSM */
+enum {
+	WACS_FSM_IDLE            = 0x00,
+	WACS_FSM_REQ             = 0x02,
+	WACS_FSM_WFDLE           = 0x04,
+	WACS_FSM_WFVLDCLR        = 0x06,
+	WACS_INIT_DONE           = 0x01,
+	WACS_SYNC_IDLE           = 0x01,
+	WACS_SYNC_BUSY           = 0x00
+};
+
+/* error information flag */
+enum {
+	E_PWR_INVALID_ARG             = 1,
+	E_PWR_INVALID_RW              = 2,
+	E_PWR_INVALID_ADDR            = 3,
+	E_PWR_INVALID_WDAT            = 4,
+	E_PWR_INVALID_OP_MANUAL       = 5,
+	E_PWR_NOT_IDLE_STATE          = 6,
+	E_PWR_NOT_INIT_DONE           = 7,
+	E_PWR_NOT_INIT_DONE_READ      = 8,
+	E_PWR_WAIT_IDLE_TIMEOUT       = 9,
+	E_PWR_WAIT_IDLE_TIMEOUT_READ  = 10,
+	E_PWR_INIT_SIDLY_FAIL         = 11,
+	E_PWR_RESET_TIMEOUT           = 12,
+	E_PWR_TIMEOUT                 = 13,
+	E_PWR_INIT_RESET_SPI          = 20,
+	E_PWR_INIT_SIDLY              = 21,
+	E_PWR_INIT_REG_CLOCK          = 22,
+	E_PWR_INIT_ENABLE_PMIC        = 23,
+	E_PWR_INIT_DIO                = 24,
+	E_PWR_INIT_CIPHER             = 25,
+	E_PWR_INIT_WRITE_TEST         = 26,
+	E_PWR_INIT_ENABLE_CRC         = 27,
+	E_PWR_INIT_ENABLE_DEWRAP      = 28,
+	E_PWR_INIT_ENABLE_EVENT       = 29,
+	E_PWR_READ_TEST_FAIL          = 30,
+	E_PWR_WRITE_TEST_FAIL         = 31,
+	E_PWR_SWITCH_DIO              = 32
+};
+
+#endif /* __PMIC_WRAP_INIT_H__ */
diff --git a/plat/mediatek/mt8173/drivers/rtc/rtc.c b/plat/mediatek/mt8173/drivers/rtc/rtc.c
new file mode 100644
index 0000000..22fed9e
--- /dev/null
+++ b/plat/mediatek/mt8173/drivers/rtc/rtc.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <assert.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <mt8173_def.h>
+#include <pmic_wrap_init.h>
+#include <rtc.h>
+
+/* RTC busy status polling interval and retry count */
+enum {
+	RTC_WRTGR_POLLING_DELAY_MS	= 10,
+	RTC_WRTGR_POLLING_CNT		= 100
+};
+
+static uint16_t RTC_Read(uint32_t addr)
+{
+	uint32_t rdata = 0;
+
+	pwrap_read((uint32_t)addr, &rdata);
+	return (uint16_t)rdata;
+}
+
+static void RTC_Write(uint32_t addr, uint16_t data)
+{
+	pwrap_write((uint32_t)addr, (uint32_t)data);
+}
+
+static inline int32_t rtc_busy_wait(void)
+{
+	uint64_t retry = RTC_WRTGR_POLLING_CNT;
+
+	do {
+		mdelay(RTC_WRTGR_POLLING_DELAY_MS);
+		if (!(RTC_Read(RTC_BBPU) & RTC_BBPU_CBUSY))
+			return 1;
+		retry--;
+	} while (retry);
+
+	ERROR("[RTC] rtc cbusy time out!\n");
+	return 0;
+}
+
+static int32_t Write_trigger(void)
+{
+	RTC_Write(RTC_WRTGR, 1);
+	return rtc_busy_wait();
+}
+
+static int32_t Writeif_unlock(void)
+{
+	RTC_Write(RTC_PROT, RTC_PROT_UNLOCK1);
+	if (!Write_trigger())
+		return 0;
+	RTC_Write(RTC_PROT, RTC_PROT_UNLOCK2);
+	if (!Write_trigger())
+		return 0;
+
+	return 1;
+}
+
+void rtc_bbpu_power_down(void)
+{
+	uint16_t bbpu;
+
+	/* pull PWRBB low */
+	bbpu = RTC_BBPU_KEY | RTC_BBPU_AUTO | RTC_BBPU_PWREN;
+	if (Writeif_unlock()) {
+		RTC_Write(RTC_BBPU, bbpu);
+		if (!Write_trigger())
+			assert(0);
+	} else {
+		assert(0);
+	}
+}
diff --git a/plat/mediatek/mt8173/drivers/rtc/rtc.h b/plat/mediatek/mt8173/drivers/rtc/rtc.h
new file mode 100644
index 0000000..d4f8a56
--- /dev/null
+++ b/plat/mediatek/mt8173/drivers/rtc/rtc.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_DRIVER_RTC_H__
+#define __PLAT_DRIVER_RTC_H__
+
+/* RTC registers */
+enum {
+	RTC_BBPU = 0xE000,
+	RTC_IRQ_STA = 0xE002,
+	RTC_IRQ_EN = 0xE004,
+	RTC_CII_EN = 0xE006
+};
+
+enum {
+	RTC_OSC32CON = 0xE026,
+	RTC_CON = 0xE03E,
+	RTC_WRTGR = 0xE03C
+};
+
+enum {
+	RTC_PDN1 = 0xE02C,
+	RTC_PDN2 = 0xE02E,
+	RTC_SPAR0 = 0xE030,
+	RTC_SPAR1 = 0xE032,
+	RTC_PROT = 0xE036,
+	RTC_DIFF = 0xE038,
+	RTC_CALI = 0xE03A
+};
+
+enum {
+	RTC_PROT_UNLOCK1 = 0x586A,
+	RTC_PROT_UNLOCK2 = 0x9136
+};
+
+enum {
+	RTC_BBPU_PWREN	= 1U << 0,
+	RTC_BBPU_BBPU	= 1U << 2,
+	RTC_BBPU_AUTO	= 1U << 3,
+	RTC_BBPU_CLRPKY	= 1U << 4,
+	RTC_BBPU_RELOAD	= 1U << 5,
+	RTC_BBPU_CBUSY	= 1U << 6
+};
+
+enum {
+	RTC_BBPU_KEY	= 0x43 << 8
+};
+
+void rtc_bbpu_power_down(void);
+
+#endif /* __PLAT_DRIVER_RTC_H__ */
diff --git a/plat/mediatek/mt8173/drivers/spm/spm.c b/plat/mediatek/mt8173/drivers/spm/spm.c
new file mode 100644
index 0000000..eb40072
--- /dev/null
+++ b/plat/mediatek/mt8173/drivers/spm/spm.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <bakery_lock.h>
+#include <debug.h>
+#include <mmio.h>
+#include <mt8173_def.h>
+#include <spm.h>
+#include <spm_suspend.h>
+
+/*
+ * System Power Manager (SPM) is a hardware module, which controls cpu or
+ * system power for different power scenarios using different firmware, i.e.,
+ * - spm_hotplug.c for cpu power control in cpu hotplug flow.
+ * - spm_mcdi.c for cpu power control in cpu idle power saving state.
+ * - spm_suspend.c for system power control in system suspend scenario.
+ *
+ * This file provide utility functions common to hotplug, mcdi(idle), suspend
+ * power scenarios. A bakery lock (software lock) is incoporated to protect
+ * certain critical sections to avoid kicking different SPM firmware
+ * concurrently.
+ */
+
+#define SPM_SYSCLK_SETTLE       128	/* 3.9ms */
+
+DEFINE_BAKERY_LOCK(spm_lock);
+
+static int spm_hotplug_ready __section("tzfw_coherent_mem");
+static int spm_mcdi_ready __section("tzfw_coherent_mem");
+static int spm_suspend_ready __section("tzfw_coherent_mem");
+
+void spm_lock_init(void)
+{
+	bakery_lock_init(&spm_lock);
+}
+
+void spm_lock_get(void)
+{
+	bakery_lock_get(&spm_lock);
+}
+
+void spm_lock_release(void)
+{
+	bakery_lock_release(&spm_lock);
+}
+
+int is_mcdi_ready(void)
+{
+	return spm_mcdi_ready;
+}
+
+int is_hotplug_ready(void)
+{
+	return spm_hotplug_ready;
+}
+
+int is_suspend_ready(void)
+{
+	return spm_suspend_ready;
+}
+
+void set_mcdi_ready(void)
+{
+	spm_mcdi_ready = 1;
+	spm_hotplug_ready = 0;
+	spm_suspend_ready = 0;
+}
+
+void set_hotplug_ready(void)
+{
+	spm_mcdi_ready = 0;
+	spm_hotplug_ready = 1;
+	spm_suspend_ready = 0;
+}
+
+void set_suspend_ready(void)
+{
+	spm_mcdi_ready = 0;
+	spm_hotplug_ready = 0;
+	spm_suspend_ready = 1;
+}
+
+void clear_all_ready(void)
+{
+	spm_mcdi_ready = 0;
+	spm_hotplug_ready = 0;
+	spm_suspend_ready = 0;
+}
+
+void spm_register_init(void)
+{
+	mmio_write_32(SPM_POWERON_CONFIG_SET, SPM_REGWR_CFG_KEY | SPM_REGWR_EN);
+
+	mmio_write_32(SPM_POWER_ON_VAL0, 0);
+	mmio_write_32(SPM_POWER_ON_VAL1, POWER_ON_VAL1_DEF);
+	mmio_write_32(SPM_PCM_PWR_IO_EN, 0);
+
+	mmio_write_32(SPM_PCM_CON0, CON0_CFG_KEY | CON0_PCM_SW_RESET);
+	mmio_write_32(SPM_PCM_CON0, CON0_CFG_KEY);
+	if (mmio_read_32(SPM_PCM_FSM_STA) != PCM_FSM_STA_DEF)
+		WARN("PCM reset failed\n");
+
+	mmio_write_32(SPM_PCM_CON0, CON0_CFG_KEY | CON0_IM_SLEEP_DVS);
+	mmio_write_32(SPM_PCM_CON1, CON1_CFG_KEY | CON1_EVENT_LOCK_EN |
+		CON1_SPM_SRAM_ISO_B | CON1_SPM_SRAM_SLP_B | CON1_MIF_APBEN);
+	mmio_write_32(SPM_PCM_IM_PTR, 0);
+	mmio_write_32(SPM_PCM_IM_LEN, 0);
+
+	mmio_write_32(SPM_CLK_CON, CC_SYSCLK0_EN_1 | CC_SYSCLK0_EN_0 |
+		CC_SYSCLK1_EN_0 | CC_SRCLKENA_MASK_0 | CC_CLKSQ1_SEL |
+		CC_CXO32K_RM_EN_MD2 | CC_CXO32K_RM_EN_MD1 | CC_MD32_DCM_EN);
+
+	mmio_write_32(SPM_SLEEP_ISR_MASK, 0xff0c);
+	mmio_write_32(SPM_SLEEP_ISR_STATUS, 0xc);
+	mmio_write_32(SPM_PCM_SW_INT_CLEAR, 0xff);
+	mmio_write_32(SPM_MD32_SRAM_CON, 0xff0);
+}
+
+void spm_reset_and_init_pcm(void)
+{
+	unsigned int con1;
+	int i = 0;
+
+	mmio_write_32(SPM_PCM_CON0, CON0_CFG_KEY | CON0_PCM_SW_RESET);
+	mmio_write_32(SPM_PCM_CON0, CON0_CFG_KEY);
+	while (mmio_read_32(SPM_PCM_FSM_STA) != PCM_FSM_STA_DEF) {
+		i++;
+		if (i > 1000) {
+			i = 0;
+			WARN("PCM reset failed\n");
+			break;
+		}
+	}
+
+	mmio_write_32(SPM_PCM_CON0, CON0_CFG_KEY | CON0_IM_SLEEP_DVS);
+
+	con1 = mmio_read_32(SPM_PCM_CON1) &
+		(CON1_PCM_WDT_WAKE_MODE | CON1_PCM_WDT_EN);
+	mmio_write_32(SPM_PCM_CON1, con1 | CON1_CFG_KEY | CON1_EVENT_LOCK_EN |
+		CON1_SPM_SRAM_ISO_B | CON1_SPM_SRAM_SLP_B |
+		CON1_IM_NONRP_EN | CON1_MIF_APBEN);
+}
+
+void spm_init_pcm_register(void)
+{
+	mmio_write_32(SPM_PCM_REG_DATA_INI, mmio_read_32(SPM_POWER_ON_VAL0));
+	mmio_write_32(SPM_PCM_PWR_IO_EN, PCM_RF_SYNC_R0);
+	mmio_write_32(SPM_PCM_PWR_IO_EN, 0);
+
+	mmio_write_32(SPM_PCM_REG_DATA_INI, mmio_read_32(SPM_POWER_ON_VAL1));
+	mmio_write_32(SPM_PCM_PWR_IO_EN, PCM_RF_SYNC_R7);
+	mmio_write_32(SPM_PCM_PWR_IO_EN, 0);
+}
+
+void spm_set_power_control(const struct pwr_ctrl *pwrctrl)
+{
+	mmio_write_32(SPM_AP_STANBY_CON, (!pwrctrl->md32_req_mask << 21) |
+					 (!pwrctrl->mfg_req_mask << 17) |
+					 (!pwrctrl->disp_req_mask << 16) |
+					 (!!pwrctrl->mcusys_idle_mask << 7) |
+					 (!!pwrctrl->ca15top_idle_mask << 6) |
+					 (!!pwrctrl->ca7top_idle_mask << 5) |
+					 (!!pwrctrl->wfi_op << 4));
+	mmio_write_32(SPM_PCM_SRC_REQ, (!!pwrctrl->pcm_apsrc_req << 0));
+	mmio_write_32(SPM_PCM_PASR_DPD_2, 0);
+
+	mmio_clrsetbits_32(SPM_CLK_CON, CC_SRCLKENA_MASK_0,
+		(pwrctrl->srclkenai_mask ? CC_SRCLKENA_MASK_0 : 0));
+
+	mmio_write_32(SPM_SLEEP_CA15_WFI0_EN, !!pwrctrl->ca15_wfi0_en);
+	mmio_write_32(SPM_SLEEP_CA15_WFI1_EN, !!pwrctrl->ca15_wfi1_en);
+	mmio_write_32(SPM_SLEEP_CA15_WFI2_EN, !!pwrctrl->ca15_wfi2_en);
+	mmio_write_32(SPM_SLEEP_CA15_WFI3_EN, !!pwrctrl->ca15_wfi3_en);
+	mmio_write_32(SPM_SLEEP_CA7_WFI0_EN, !!pwrctrl->ca7_wfi0_en);
+	mmio_write_32(SPM_SLEEP_CA7_WFI1_EN, !!pwrctrl->ca7_wfi1_en);
+	mmio_write_32(SPM_SLEEP_CA7_WFI2_EN, !!pwrctrl->ca7_wfi2_en);
+	mmio_write_32(SPM_SLEEP_CA7_WFI3_EN, !!pwrctrl->ca7_wfi3_en);
+}
+
+void spm_set_wakeup_event(const struct pwr_ctrl *pwrctrl)
+{
+	unsigned int val, mask;
+
+	if (pwrctrl->timer_val_cust == 0)
+		val = pwrctrl->timer_val ? pwrctrl->timer_val : PCM_TIMER_MAX;
+	else
+		val = pwrctrl->timer_val_cust;
+
+	mmio_write_32(SPM_PCM_TIMER_VAL, val);
+	mmio_setbits_32(SPM_PCM_CON1, CON1_CFG_KEY);
+
+	if (pwrctrl->wake_src_cust == 0)
+		mask = pwrctrl->wake_src;
+	else
+		mask = pwrctrl->wake_src_cust;
+
+	if (pwrctrl->syspwreq_mask)
+		mask &= ~WAKE_SRC_SYSPWREQ;
+
+	mmio_write_32(SPM_SLEEP_WAKEUP_EVENT_MASK, ~mask);
+	mmio_write_32(SPM_SLEEP_ISR_MASK, 0xfe04);
+}
+
+void spm_get_wakeup_status(struct wake_status *wakesta)
+{
+	wakesta->assert_pc = mmio_read_32(SPM_PCM_REG_DATA_INI);
+	wakesta->r12 = mmio_read_32(SPM_PCM_REG12_DATA);
+	wakesta->raw_sta = mmio_read_32(SPM_SLEEP_ISR_RAW_STA);
+	wakesta->wake_misc = mmio_read_32(SPM_SLEEP_WAKEUP_MISC);
+	wakesta->timer_out = mmio_read_32(SPM_PCM_TIMER_OUT);
+	wakesta->r13 = mmio_read_32(SPM_PCM_REG13_DATA);
+	wakesta->idle_sta = mmio_read_32(SPM_SLEEP_SUBSYS_IDLE_STA);
+	wakesta->debug_flag = mmio_read_32(SPM_PCM_PASR_DPD_3);
+	wakesta->event_reg = mmio_read_32(SPM_PCM_EVENT_REG_STA);
+	wakesta->isr = mmio_read_32(SPM_SLEEP_ISR_STATUS);
+}
+
+void spm_init_event_vector(const struct pcm_desc *pcmdesc)
+{
+	/* init event vector register */
+	mmio_write_32(SPM_PCM_EVENT_VECTOR0, pcmdesc->vec0);
+	mmio_write_32(SPM_PCM_EVENT_VECTOR1, pcmdesc->vec1);
+	mmio_write_32(SPM_PCM_EVENT_VECTOR2, pcmdesc->vec2);
+	mmio_write_32(SPM_PCM_EVENT_VECTOR3, pcmdesc->vec3);
+	mmio_write_32(SPM_PCM_EVENT_VECTOR4, pcmdesc->vec4);
+	mmio_write_32(SPM_PCM_EVENT_VECTOR5, pcmdesc->vec5);
+	mmio_write_32(SPM_PCM_EVENT_VECTOR6, pcmdesc->vec6);
+	mmio_write_32(SPM_PCM_EVENT_VECTOR7, pcmdesc->vec7);
+
+	/* event vector will be enabled by PCM itself */
+}
+
+void spm_kick_im_to_fetch(const struct pcm_desc *pcmdesc)
+{
+	unsigned int ptr = 0, len, con0;
+
+	ptr = (unsigned int)(unsigned long)(pcmdesc->base);
+	len = pcmdesc->size - 1;
+	if (mmio_read_32(SPM_PCM_IM_PTR) != ptr ||
+	    mmio_read_32(SPM_PCM_IM_LEN) != len ||
+	    pcmdesc->sess > 2) {
+		mmio_write_32(SPM_PCM_IM_PTR, ptr);
+		mmio_write_32(SPM_PCM_IM_LEN, len);
+	} else {
+		mmio_setbits_32(SPM_PCM_CON1, CON1_CFG_KEY | CON1_IM_SLAVE);
+	}
+
+	/* kick IM to fetch (only toggle IM_KICK) */
+	con0 = mmio_read_32(SPM_PCM_CON0) & ~(CON0_IM_KICK | CON0_PCM_KICK);
+	mmio_write_32(SPM_PCM_CON0, con0 | CON0_CFG_KEY | CON0_IM_KICK);
+	mmio_write_32(SPM_PCM_CON0, con0 | CON0_CFG_KEY);
+
+	/* kick IM to fetch (only toggle PCM_KICK) */
+	con0 = mmio_read_32(SPM_PCM_CON0) & ~(CON0_IM_KICK | CON0_PCM_KICK);
+	mmio_write_32(SPM_PCM_CON0, con0 | CON0_CFG_KEY | CON0_PCM_KICK);
+	mmio_write_32(SPM_PCM_CON0, con0 | CON0_CFG_KEY);
+}
+
+void spm_set_sysclk_settle(void)
+{
+	mmio_write_32(SPM_CLK_SETTLE, SPM_SYSCLK_SETTLE);
+
+	INFO("settle = %u\n", mmio_read_32(SPM_CLK_SETTLE));
+}
+
+void spm_kick_pcm_to_run(struct pwr_ctrl *pwrctrl)
+{
+	unsigned int con1;
+
+	con1 = mmio_read_32(SPM_PCM_CON1) &
+		~(CON1_PCM_WDT_WAKE_MODE | CON1_PCM_WDT_EN);
+
+	mmio_write_32(SPM_PCM_CON1, CON1_CFG_KEY | con1);
+
+	if (mmio_read_32(SPM_PCM_TIMER_VAL) > PCM_TIMER_MAX)
+		mmio_write_32(SPM_PCM_TIMER_VAL, PCM_TIMER_MAX);
+
+	mmio_write_32(SPM_PCM_WDT_TIMER_VAL,
+		mmio_read_32(SPM_PCM_TIMER_VAL) + PCM_WDT_TIMEOUT);
+
+	mmio_write_32(SPM_PCM_CON1, con1 | CON1_CFG_KEY | CON1_PCM_WDT_EN);
+	mmio_write_32(SPM_PCM_PASR_DPD_0, 0);
+
+	mmio_write_32(SPM_PCM_MAS_PAUSE_MASK, 0xffffffff);
+	mmio_write_32(SPM_PCM_REG_DATA_INI, 0);
+	mmio_clrbits_32(SPM_CLK_CON, CC_DISABLE_DORM_PWR);
+
+	mmio_write_32(SPM_PCM_FLAGS, pwrctrl->pcm_flags);
+
+	mmio_clrsetbits_32(SPM_CLK_CON, CC_LOCK_INFRA_DCM,
+		(pwrctrl->infra_dcm_lock ? CC_LOCK_INFRA_DCM : 0));
+
+	mmio_write_32(SPM_PCM_PWR_IO_EN,
+		(pwrctrl->r0_ctrl_en ? PCM_PWRIO_EN_R0 : 0) |
+		(pwrctrl->r7_ctrl_en ? PCM_PWRIO_EN_R7 : 0));
+}
+
+void spm_clean_after_wakeup(void)
+{
+	mmio_clrsetbits_32(SPM_PCM_CON1, CON1_PCM_WDT_EN, CON1_CFG_KEY);
+
+	mmio_write_32(SPM_PCM_PWR_IO_EN, 0);
+	mmio_write_32(SPM_SLEEP_CPU_WAKEUP_EVENT, 0);
+	mmio_clrsetbits_32(SPM_PCM_CON1, CON1_PCM_TIMER_EN, CON1_CFG_KEY);
+
+	mmio_write_32(SPM_SLEEP_WAKEUP_EVENT_MASK, ~0);
+	mmio_write_32(SPM_SLEEP_ISR_MASK, 0xFF0C);
+	mmio_write_32(SPM_SLEEP_ISR_STATUS, 0xC);
+	mmio_write_32(SPM_PCM_SW_INT_CLEAR, 0xFF);
+}
+
+enum wake_reason_t spm_output_wake_reason(struct wake_status *wakesta)
+{
+	enum wake_reason_t wr;
+	int i;
+
+	wr = WR_UNKNOWN;
+
+	if (wakesta->assert_pc != 0) {
+		ERROR("PCM ASSERT AT %u, r12=0x%x, r13=0x%x, debug_flag=0x%x\n",
+		      wakesta->assert_pc, wakesta->r12, wakesta->r13,
+		      wakesta->debug_flag);
+		return WR_PCM_ASSERT;
+	}
+
+	if (wakesta->r12 & WAKE_SRC_SPM_MERGE) {
+		if (wakesta->wake_misc & WAKE_MISC_PCM_TIMER)
+			wr = WR_PCM_TIMER;
+		if (wakesta->wake_misc & WAKE_MISC_CPU_WAKE)
+			wr = WR_WAKE_SRC;
+	}
+
+	for (i = 1; i < 32; i++) {
+		if (wakesta->r12 & (1U << i))
+			wr = WR_WAKE_SRC;
+	}
+
+	if ((wakesta->event_reg & 0x100000) == 0) {
+		INFO("pcm sleep abort!\n");
+		wr = WR_PCM_ABORT;
+	}
+
+	INFO("timer_out = %u, r12 = 0x%x, r13 = 0x%x, debug_flag = 0x%x\n",
+	     wakesta->timer_out, wakesta->r12, wakesta->r13,
+	     wakesta->debug_flag);
+
+	INFO("raw_sta = 0x%x, idle_sta = 0x%x, event_reg = 0x%x, isr = 0x%x\n",
+	     wakesta->raw_sta, wakesta->idle_sta, wakesta->event_reg,
+	     wakesta->isr);
+
+	return wr;
+}
+
+void spm_boot_init(void)
+{
+	/* set spm transaction to secure mode */
+	mmio_write_32(DEVAPC0_APC_CON, 0x0);
+	mmio_write_32(DEVAPC0_MAS_SEC_0, 0x200);
+
+	/* Only CPU0 is online during boot, initialize cpu online reserve bit */
+	mmio_write_32(SPM_PCM_RESERVE, 0xFE);
+	mmio_clrbits_32(AP_PLL_CON3, 0xFFFFF);
+	mmio_clrbits_32(AP_PLL_CON4, 0xF);
+	spm_lock_init();
+	spm_register_init();
+}
diff --git a/plat/mediatek/mt8173/drivers/spm/spm.h b/plat/mediatek/mt8173/drivers/spm/spm.h
new file mode 100644
index 0000000..abbee2f
--- /dev/null
+++ b/plat/mediatek/mt8173/drivers/spm/spm.h
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __SPM_H__
+#define __SPM_H__
+
+#define SPM_POWERON_CONFIG_SET			(SPM_BASE + 0x000)
+#define SPM_POWER_ON_VAL0			(SPM_BASE + 0x010)
+#define SPM_POWER_ON_VAL1			(SPM_BASE + 0x014)
+#define SPM_CLK_SETTLE				(SPM_BASE + 0x100)
+#define SPM_CA7_CPU1_PWR_CON			(SPM_BASE + 0x218)
+#define SPM_CA7_CPU2_PWR_CON			(SPM_BASE + 0x21c)
+#define SPM_CA7_CPU3_PWR_CON			(SPM_BASE + 0x220)
+#define SPM_CA7_CPU1_L1_PDN			(SPM_BASE + 0x264)
+#define SPM_CA7_CPU2_L1_PDN			(SPM_BASE + 0x26c)
+#define SPM_CA7_CPU3_L1_PDN			(SPM_BASE + 0x274)
+#define SPM_MD32_SRAM_CON			(SPM_BASE + 0x2c8)
+#define SPM_PCM_CON0				(SPM_BASE + 0x310)
+#define SPM_PCM_CON1				(SPM_BASE + 0x314)
+#define SPM_PCM_IM_PTR				(SPM_BASE + 0x318)
+#define SPM_PCM_IM_LEN				(SPM_BASE + 0x31c)
+#define SPM_PCM_REG_DATA_INI			(SPM_BASE + 0x320)
+#define SPM_PCM_EVENT_VECTOR0			(SPM_BASE + 0x340)
+#define SPM_PCM_EVENT_VECTOR1			(SPM_BASE + 0x344)
+#define SPM_PCM_EVENT_VECTOR2			(SPM_BASE + 0x348)
+#define SPM_PCM_EVENT_VECTOR3			(SPM_BASE + 0x34c)
+#define SPM_PCM_MAS_PAUSE_MASK			(SPM_BASE + 0x354)
+#define SPM_PCM_PWR_IO_EN			(SPM_BASE + 0x358)
+#define SPM_PCM_TIMER_VAL			(SPM_BASE + 0x35c)
+#define SPM_PCM_TIMER_OUT			(SPM_BASE + 0x360)
+#define SPM_PCM_REG0_DATA			(SPM_BASE + 0x380)
+#define SPM_PCM_REG1_DATA			(SPM_BASE + 0x384)
+#define SPM_PCM_REG2_DATA			(SPM_BASE + 0x388)
+#define SPM_PCM_REG3_DATA			(SPM_BASE + 0x38c)
+#define SPM_PCM_REG4_DATA			(SPM_BASE + 0x390)
+#define SPM_PCM_REG5_DATA			(SPM_BASE + 0x394)
+#define SPM_PCM_REG6_DATA			(SPM_BASE + 0x398)
+#define SPM_PCM_REG7_DATA			(SPM_BASE + 0x39c)
+#define SPM_PCM_REG8_DATA			(SPM_BASE + 0x3a0)
+#define SPM_PCM_REG9_DATA			(SPM_BASE + 0x3a4)
+#define SPM_PCM_REG10_DATA			(SPM_BASE + 0x3a8)
+#define SPM_PCM_REG11_DATA			(SPM_BASE + 0x3ac)
+#define SPM_PCM_REG12_DATA			(SPM_BASE + 0x3b0)
+#define SPM_PCM_REG13_DATA			(SPM_BASE + 0x3b4)
+#define SPM_PCM_REG14_DATA			(SPM_BASE + 0x3b8)
+#define SPM_PCM_REG15_DATA			(SPM_BASE + 0x3bc)
+#define SPM_PCM_EVENT_REG_STA			(SPM_BASE + 0x3c0)
+#define SPM_PCM_FSM_STA				(SPM_BASE + 0x3c4)
+#define SPM_PCM_IM_HOST_RW_PTR			(SPM_BASE + 0x3c8)
+#define SPM_PCM_IM_HOST_RW_DAT			(SPM_BASE + 0x3cc)
+#define SPM_PCM_EVENT_VECTOR4			(SPM_BASE + 0x3d0)
+#define SPM_PCM_EVENT_VECTOR5			(SPM_BASE + 0x3d4)
+#define SPM_PCM_EVENT_VECTOR6			(SPM_BASE + 0x3d8)
+#define SPM_PCM_EVENT_VECTOR7			(SPM_BASE + 0x3dc)
+#define SPM_PCM_SW_INT_SET			(SPM_BASE + 0x3e0)
+#define SPM_PCM_SW_INT_CLEAR			(SPM_BASE + 0x3e4)
+#define SPM_CLK_CON				(SPM_BASE + 0x400)
+#define SPM_SLEEP_PTPOD2_CON			(SPM_BASE + 0x408)
+#define SPM_APMCU_PWRCTL			(SPM_BASE + 0x600)
+#define SPM_AP_DVFS_CON_SET			(SPM_BASE + 0x604)
+#define SPM_AP_STANBY_CON			(SPM_BASE + 0x608)
+#define SPM_PWR_STATUS				(SPM_BASE + 0x60c)
+#define SPM_PWR_STATUS_2ND			(SPM_BASE + 0x610)
+#define SPM_AP_BSI_REQ				(SPM_BASE + 0x614)
+#define SPM_SLEEP_TIMER_STA			(SPM_BASE + 0x720)
+#define SPM_SLEEP_WAKEUP_EVENT_MASK		(SPM_BASE + 0x810)
+#define SPM_SLEEP_CPU_WAKEUP_EVENT		(SPM_BASE + 0x814)
+#define SPM_SLEEP_MD32_WAKEUP_EVENT_MASK	(SPM_BASE + 0x818)
+#define SPM_PCM_WDT_TIMER_VAL			(SPM_BASE + 0x824)
+#define SPM_PCM_WDT_TIMER_OUT			(SPM_BASE + 0x828)
+#define SPM_PCM_MD32_MAILBOX			(SPM_BASE + 0x830)
+#define SPM_PCM_MD32_IRQ			(SPM_BASE + 0x834)
+#define SPM_SLEEP_ISR_MASK			(SPM_BASE + 0x900)
+#define SPM_SLEEP_ISR_STATUS			(SPM_BASE + 0x904)
+#define SPM_SLEEP_ISR_RAW_STA			(SPM_BASE + 0x910)
+#define SPM_SLEEP_MD32_ISR_RAW_STA		(SPM_BASE + 0x914)
+#define SPM_SLEEP_WAKEUP_MISC			(SPM_BASE + 0x918)
+#define SPM_SLEEP_BUS_PROTECT_RDY		(SPM_BASE + 0x91c)
+#define SPM_SLEEP_SUBSYS_IDLE_STA		(SPM_BASE + 0x920)
+#define SPM_PCM_RESERVE				(SPM_BASE + 0xb00)
+#define SPM_PCM_RESERVE2			(SPM_BASE + 0xb04)
+#define SPM_PCM_FLAGS				(SPM_BASE + 0xb08)
+#define SPM_PCM_SRC_REQ				(SPM_BASE + 0xb0c)
+#define SPM_PCM_DEBUG_CON			(SPM_BASE + 0xb20)
+#define SPM_CA7_CPU0_IRQ_MASK			(SPM_BASE + 0xb30)
+#define SPM_CA7_CPU1_IRQ_MASK			(SPM_BASE + 0xb34)
+#define SPM_CA7_CPU2_IRQ_MASK			(SPM_BASE + 0xb38)
+#define SPM_CA7_CPU3_IRQ_MASK			(SPM_BASE + 0xb3c)
+#define SPM_CA15_CPU0_IRQ_MASK			(SPM_BASE + 0xb40)
+#define SPM_CA15_CPU1_IRQ_MASK			(SPM_BASE + 0xb44)
+#define SPM_CA15_CPU2_IRQ_MASK			(SPM_BASE + 0xb48)
+#define SPM_CA15_CPU3_IRQ_MASK			(SPM_BASE + 0xb4c)
+#define SPM_PCM_PASR_DPD_0			(SPM_BASE + 0xb60)
+#define SPM_PCM_PASR_DPD_1			(SPM_BASE + 0xb64)
+#define SPM_PCM_PASR_DPD_2			(SPM_BASE + 0xb68)
+#define SPM_PCM_PASR_DPD_3			(SPM_BASE + 0xb6c)
+#define SPM_SLEEP_CA7_WFI0_EN			(SPM_BASE + 0xf00)
+#define SPM_SLEEP_CA7_WFI1_EN			(SPM_BASE + 0xf04)
+#define SPM_SLEEP_CA7_WFI2_EN			(SPM_BASE + 0xf08)
+#define SPM_SLEEP_CA7_WFI3_EN			(SPM_BASE + 0xf0c)
+#define SPM_SLEEP_CA15_WFI0_EN			(SPM_BASE + 0xf10)
+#define SPM_SLEEP_CA15_WFI1_EN			(SPM_BASE + 0xf14)
+#define SPM_SLEEP_CA15_WFI2_EN			(SPM_BASE + 0xf18)
+#define SPM_SLEEP_CA15_WFI3_EN			(SPM_BASE + 0xf1c)
+
+#define AP_PLL_CON3		0x1020900c
+#define AP_PLL_CON4		0x10209010
+
+#define SPM_PROJECT_CODE	0xb16
+
+#define SPM_REGWR_EN		(1U << 0)
+#define SPM_REGWR_CFG_KEY	(SPM_PROJECT_CODE << 16)
+
+#define SPM_CPU_PDN_DIS		(1U << 0)
+#define SPM_INFRA_PDN_DIS	(1U << 1)
+#define SPM_DDRPHY_PDN_DIS	(1U << 2)
+#define SPM_DUALVCORE_PDN_DIS	(1U << 3)
+#define SPM_PASR_DIS		(1U << 4)
+#define SPM_DPD_DIS		(1U << 5)
+#define SPM_SODI_DIS		(1U << 6)
+#define SPM_MEMPLL_RESET	(1U << 7)
+#define SPM_MAINPLL_PDN_DIS	(1U << 8)
+#define SPM_CPU_DVS_DIS		(1U << 9)
+#define SPM_CPU_DORMANT		(1U << 10)
+#define SPM_EXT_VSEL_GPIO103	(1U << 11)
+#define SPM_DDR_HIGH_SPEED	(1U << 12)
+#define SPM_OPT			(1U << 13)
+
+#define POWER_ON_VAL1_DEF	0x01011820
+#define PCM_FSM_STA_DEF		0x48490
+#define PCM_END_FSM_STA_DEF	0x08490
+#define PCM_END_FSM_STA_MASK	0x3fff0
+#define PCM_HANDSHAKE_SEND1	0xbeefbeef
+
+#define PCM_WDT_TIMEOUT		(30 * 32768)
+#define PCM_TIMER_MAX		(0xffffffff - PCM_WDT_TIMEOUT)
+
+#define CON0_PCM_KICK		(1U << 0)
+#define CON0_IM_KICK		(1U << 1)
+#define CON0_IM_SLEEP_DVS	(1U << 3)
+#define CON0_PCM_SW_RESET	(1U << 15)
+#define CON0_CFG_KEY		(SPM_PROJECT_CODE << 16)
+
+#define CON1_IM_SLAVE		(1U << 0)
+#define CON1_MIF_APBEN		(1U << 3)
+#define CON1_PCM_TIMER_EN	(1U << 5)
+#define CON1_IM_NONRP_EN	(1U << 6)
+#define CON1_PCM_WDT_EN		(1U << 8)
+#define CON1_PCM_WDT_WAKE_MODE	(1U << 9)
+#define CON1_SPM_SRAM_SLP_B	(1U << 10)
+#define CON1_SPM_SRAM_ISO_B	(1U << 11)
+#define CON1_EVENT_LOCK_EN	(1U << 12)
+#define CON1_CFG_KEY		(SPM_PROJECT_CODE << 16)
+
+#define PCM_PWRIO_EN_R0		(1U << 0)
+#define PCM_PWRIO_EN_R7		(1U << 7)
+#define PCM_RF_SYNC_R0		(1U << 16)
+#define PCM_RF_SYNC_R2		(1U << 18)
+#define PCM_RF_SYNC_R6		(1U << 22)
+#define PCM_RF_SYNC_R7		(1U << 23)
+
+#define CC_SYSCLK0_EN_0		(1U << 0)
+#define CC_SYSCLK0_EN_1		(1U << 1)
+#define CC_SYSCLK1_EN_0		(1U << 2)
+#define CC_SYSCLK1_EN_1		(1U << 3)
+#define CC_SYSSETTLE_SEL	(1U << 4)
+#define CC_LOCK_INFRA_DCM	(1U << 5)
+#define CC_SRCLKENA_MASK_0	(1U << 6)
+#define CC_CXO32K_RM_EN_MD1	(1U << 9)
+#define CC_CXO32K_RM_EN_MD2	(1U << 10)
+#define CC_CLKSQ1_SEL		(1U << 12)
+#define CC_DISABLE_DORM_PWR	(1U << 14)
+#define CC_MD32_DCM_EN		(1U << 18)
+
+#define WFI_OP_AND		1
+#define WFI_OP_OR		0
+
+#define WAKE_MISC_PCM_TIMER	(1U << 19)
+#define WAKE_MISC_CPU_WAKE	(1U << 20)
+
+/* define WAKE_SRC_XXX */
+#define WAKE_SRC_SPM_MERGE	(1 << 0)
+#define WAKE_SRC_KP		(1 << 2)
+#define WAKE_SRC_WDT		(1 << 3)
+#define WAKE_SRC_GPT		(1 << 4)
+#define WAKE_SRC_EINT		(1 << 6)
+#define WAKE_SRC_LOW_BAT	(1 << 9)
+#define WAKE_SRC_MD32		(1 << 10)
+#define WAKE_SRC_USB_CD		(1 << 14)
+#define WAKE_SRC_USB_PDN	(1 << 15)
+#define WAKE_SRC_AFE		(1 << 20)
+#define WAKE_SRC_THERM		(1 << 21)
+#define WAKE_SRC_CIRQ		(1 << 22)
+#define WAKE_SRC_SYSPWREQ	(1 << 24)
+#define WAKE_SRC_SEJ		(1 << 27)
+#define WAKE_SRC_ALL_MD32	(1 << 28)
+#define WAKE_SRC_CPU_IRQ	(1 << 29)
+
+enum wake_reason_t {
+	WR_NONE = 0,
+	WR_UART_BUSY = 1,
+	WR_PCM_ASSERT = 2,
+	WR_PCM_TIMER = 3,
+	WR_PCM_ABORT = 4,
+	WR_WAKE_SRC = 5,
+	WR_UNKNOWN = 6,
+};
+
+struct pwr_ctrl {
+	unsigned int pcm_flags;
+	unsigned int pcm_flags_cust;
+	unsigned int pcm_reserve;
+	unsigned int timer_val;
+	unsigned int timer_val_cust;
+	unsigned int wake_src;
+	unsigned int wake_src_cust;
+	unsigned int wake_src_md32;
+	unsigned short r0_ctrl_en;
+	unsigned short r7_ctrl_en;
+	unsigned short infra_dcm_lock;
+	unsigned short pcm_apsrc_req;
+	unsigned short mcusys_idle_mask;
+	unsigned short ca15top_idle_mask;
+	unsigned short ca7top_idle_mask;
+	unsigned short wfi_op;
+	unsigned short ca15_wfi0_en;
+	unsigned short ca15_wfi1_en;
+	unsigned short ca15_wfi2_en;
+	unsigned short ca15_wfi3_en;
+	unsigned short ca7_wfi0_en;
+	unsigned short ca7_wfi1_en;
+	unsigned short ca7_wfi2_en;
+	unsigned short ca7_wfi3_en;
+	unsigned short disp_req_mask;
+	unsigned short mfg_req_mask;
+	unsigned short md32_req_mask;
+	unsigned short syspwreq_mask;
+	unsigned short srclkenai_mask;
+};
+
+struct wake_status {
+	unsigned int assert_pc;
+	unsigned int r12;
+	unsigned int raw_sta;
+	unsigned int wake_misc;
+	unsigned int timer_out;
+	unsigned int r13;
+	unsigned int idle_sta;
+	unsigned int debug_flag;
+	unsigned int event_reg;
+	unsigned int isr;
+};
+
+struct pcm_desc {
+	const char *version;		/* PCM code version */
+	const unsigned int *base;	/* binary array base */
+	const unsigned int size;	/* binary array size */
+	const unsigned char sess;	/* session number */
+	const unsigned char replace;	/* replace mode */
+
+	unsigned int vec0;		/* event vector 0 config */
+	unsigned int vec1;		/* event vector 1 config */
+	unsigned int vec2;		/* event vector 2 config */
+	unsigned int vec3;		/* event vector 3 config */
+	unsigned int vec4;		/* event vector 4 config */
+	unsigned int vec5;		/* event vector 5 config */
+	unsigned int vec6;		/* event vector 6 config */
+	unsigned int vec7;		/* event vector 7 config */
+};
+
+struct spm_lp_scen {
+	const struct pcm_desc *pcmdesc;
+	struct pwr_ctrl *pwrctrl;
+};
+
+#define EVENT_VEC(event, resume, imme, pc)	\
+	(((pc) << 16) |				\
+	 (!!(imme) << 6) |			\
+	 (!!(resume) << 5) |			\
+	 ((event) & 0x1f))
+
+#define spm_read(addr)		mmio_read_32(addr)
+#define spm_write(addr, val)	mmio_write_32(addr, val)
+
+#define is_cpu_pdn(flags)	(!((flags) & SPM_CPU_PDN_DIS))
+#define is_infra_pdn(flags)	(!((flags) & SPM_INFRA_PDN_DIS))
+#define is_ddrphy_pdn(flags)	(!((flags) & SPM_DDRPHY_PDN_DIS))
+
+static inline void set_pwrctrl_pcm_flags(struct pwr_ctrl *pwrctrl,
+					 unsigned int flags)
+{
+	flags &= ~SPM_EXT_VSEL_GPIO103;
+
+	if (pwrctrl->pcm_flags_cust == 0)
+		pwrctrl->pcm_flags = flags;
+	else
+		pwrctrl->pcm_flags = pwrctrl->pcm_flags_cust;
+}
+
+static inline void set_pwrctrl_pcm_data(struct pwr_ctrl *pwrctrl,
+					unsigned int data)
+{
+	pwrctrl->pcm_reserve = data;
+}
+
+void spm_reset_and_init_pcm(void);
+
+void spm_init_pcm_register(void);	/* init r0 and r7 */
+void spm_set_power_control(const struct pwr_ctrl *pwrctrl);
+void spm_set_wakeup_event(const struct pwr_ctrl *pwrctrl);
+
+void spm_get_wakeup_status(struct wake_status *wakesta);
+void spm_set_sysclk_settle(void);
+void spm_kick_pcm_to_run(struct pwr_ctrl *pwrctrl);
+void spm_clean_after_wakeup(void);
+enum wake_reason_t spm_output_wake_reason(struct wake_status *wakesta);
+void spm_register_init(void);
+void spm_go_to_hotplug(void);
+void spm_init_event_vector(const struct pcm_desc *pcmdesc);
+void spm_kick_im_to_fetch(const struct pcm_desc *pcmdesc);
+void spm_set_sysclk_settle(void);
+int is_mcdi_ready(void);
+int is_hotplug_ready(void);
+int is_suspend_ready(void);
+void set_mcdi_ready(void);
+void set_hotplug_ready(void);
+void set_suspend_ready(void);
+void clear_all_ready(void);
+void spm_lock_init(void);
+void spm_lock_get(void);
+void spm_lock_release(void);
+void spm_boot_init(void);
+
+#endif /* __SPM_H__ */
diff --git a/plat/mediatek/mt8173/drivers/spm/spm_hotplug.c b/plat/mediatek/mt8173/drivers/spm/spm_hotplug.c
new file mode 100644
index 0000000..6d275d0
--- /dev/null
+++ b/plat/mediatek/mt8173/drivers/spm/spm_hotplug.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <mmio.h>
+#include <mt8173_def.h>
+#include <platform.h>
+#include <spm.h>
+#include <spm_hotplug.h>
+#include <spm_mcdi.h>
+
+/*
+ * System Power Manager (SPM) is a hardware module, which controls cpu or
+ * system power for different power scenarios using different firmware.
+ * This driver controls the cpu power in cpu hotplug flow.
+ */
+
+#define PCM_HOTPLUG_VALID_MASK	0x0000ff00
+#define PCM_HOTPLUG_VALID_SHIFT	0x8
+
+/**********************************************************
+ * PCM sequence for CPU hotplug
+ **********************************************************/
+static const unsigned int hotplug_binary[] = {
+	0x1900001f, 0x1020020c, 0x1950001f, 0x1020020c, 0xa9400005, 0x00000001,
+	0xe1000005, 0x1910001f, 0x10006720, 0x814c9001, 0xd82000e5, 0x17c07c1f,
+	0x1900001f, 0x10001220, 0x1950001f, 0x10001220, 0xa15f0405, 0xe1000005,
+	0x1900001f, 0x10001228, 0x1950001f, 0x10001228, 0x810f1401, 0xd8200244,
+	0x17c07c1f, 0xe2e0006d, 0xe2e0002d, 0x1a00001f, 0x100062b8, 0x1910001f,
+	0x100062b8, 0xa9000004, 0x00000001, 0xe2000004, 0x1910001f, 0x100062b8,
+	0x81142804, 0xd8200444, 0x17c07c1f, 0xe2e0002c, 0xe2e0003c, 0xe2e0003e,
+	0xe2e0003a, 0xe2e00032, 0x1910001f, 0x1000660c, 0x81079001, 0x1950001f,
+	0x10006610, 0x81479401, 0xa1001404, 0xd8000584, 0x17c07c1f, 0x1900001f,
+	0x10006404, 0x1950001f, 0x10006404, 0xa1568405, 0xe1000005, 0xf0000000,
+	0x17c07c1f, 0x1900001f, 0x10006404, 0x1950001f, 0x10006404, 0x89400005,
+	0x0000dfff, 0xe1000005, 0xe2e00036, 0xe2e0003e, 0x1910001f, 0x1000660c,
+	0x81079001, 0x1950001f, 0x10006610, 0x81479401, 0x81001404, 0xd82008c4,
+	0x17c07c1f, 0xe2e0002e, 0x1a00001f, 0x100062b8, 0x1910001f, 0x100062b8,
+	0x89000004, 0x0000fffe, 0xe2000004, 0x1910001f, 0x100062b8, 0x81142804,
+	0xd8000ae4, 0x17c07c1f, 0xe2e0006e, 0xe2e0004e, 0xe2e0004c, 0xe2e0004d,
+	0x1900001f, 0x10001220, 0x1950001f, 0x10001220, 0x89400005, 0xbfffffff,
+	0xe1000005, 0x1900001f, 0x10001228, 0x1950001f, 0x10001228, 0x810f1401,
+	0xd8000ce4, 0x17c07c1f, 0x1900001f, 0x1020020c, 0x1950001f, 0x1020020c,
+	0x89400005, 0xfffffffe, 0xe1000005, 0xf0000000, 0x17c07c1f, 0x1212841f,
+	0xe2e00036, 0xe2e0003e, 0x1380201f, 0xe2e0003c, 0xe2a00000, 0x1b80001f,
+	0x20000080, 0xe2e0007c, 0x1b80001f, 0x20000003, 0xe2e0005c, 0xe2e0004c,
+	0xe2e0004d, 0xf0000000, 0x17c07c1f, 0xe2e0004f, 0xe2e0006f, 0xe2e0002f,
+	0xe2a00001, 0x1b80001f, 0x20000080, 0xe2e0002e, 0xe2e0003e, 0xe2e00032,
+	0xf0000000, 0x17c07c1f, 0x1212841f, 0xe2e00026, 0xe2e0002e, 0x1380201f,
+	0x1a00001f, 0x100062b4, 0x1910001f, 0x100062b4, 0x81322804, 0xe2000004,
+	0x81202804, 0xe2000004, 0x1b80001f, 0x20000034, 0x1910001f, 0x100062b4,
+	0x81142804, 0xd8001404, 0x17c07c1f, 0xe2e0000e, 0xe2e0000c, 0xe2e0000d,
+	0xf0000000, 0x17c07c1f, 0xe2e0002d, 0x1a00001f, 0x100062b4, 0x1910001f,
+	0x100062b4, 0xa1002804, 0xe2000004, 0xa1122804, 0xe2000004, 0x1b80001f,
+	0x20000080, 0x1910001f, 0x100062b4, 0x81142804, 0xd82016a4, 0x17c07c1f,
+	0xe2e0002f, 0xe2e0002b, 0xe2e00023, 0x1380201f, 0xe2e00022, 0xf0000000,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x1840001f, 0x00000001, 0x1840001f, 0x00000001,
+	0x1840001f, 0x00000001, 0xa1d48407, 0x1b00001f, 0x2f7be75f, 0xe8208000,
+	0x10006354, 0xfffe7b47, 0xa1d10407, 0x1b80001f, 0x20000020, 0x17c07c1f,
+	0x1910001f, 0x10006b00, 0x81461001, 0xb14690a1, 0xd82044e5, 0x17c07c1f,
+	0x1910001f, 0x10006610, 0x81079001, 0xd80044e4, 0x17c07c1f, 0x1990001f,
+	0x10006b00, 0x81421801, 0x82429801, 0x81402405, 0xd80044e5, 0x17c07c1f,
+	0x1a40001f, 0x100062b0, 0x1280041f, 0xc24007a0, 0x17c07c1f, 0x1910001f,
+	0x10006b00, 0x81449001, 0xd8204be5, 0x17c07c1f, 0x1910001f, 0x10006b00,
+	0x81009001, 0xd8204984, 0x17c07c1f, 0x1910001f, 0x10006610, 0x81051001,
+	0xd8204be4, 0x17c07c1f, 0x1910001f, 0x10006720, 0x81489001, 0xd82046c5,
+	0x17c07c1f, 0x1a40001f, 0x10006218, 0x1a80001f, 0x10006264, 0xc24010e0,
+	0x17c07c1f, 0x1910001f, 0x1000660c, 0x81051001, 0x1950001f, 0x10006610,
+	0x81451401, 0xa1001404, 0xd8004824, 0x17c07c1f, 0xd0004b00, 0x17c07c1f,
+	0x17c07c1f, 0x1910001f, 0x10006610, 0x81051001, 0xd8004be4, 0x17c07c1f,
+	0x1a40001f, 0x10006218, 0x1a80001f, 0x10006264, 0xc2400ee0, 0x17c07c1f,
+	0x1910001f, 0x10006b00, 0x89000004, 0xfffffdff, 0x1940001f, 0x10006b00,
+	0xe1400004, 0x17c07c1f, 0x1910001f, 0x10006b00, 0x81451001, 0xd8205305,
+	0x17c07c1f, 0x1910001f, 0x10006b00, 0x81011001, 0xd82050a4, 0x17c07c1f,
+	0x1910001f, 0x10006610, 0x81059001, 0xd8205304, 0x17c07c1f, 0x1910001f,
+	0x10006720, 0x81491001, 0xd8204de5, 0x17c07c1f, 0x1a40001f, 0x1000621c,
+	0x1a80001f, 0x1000626c, 0xc24010e0, 0x17c07c1f, 0x1910001f, 0x1000660c,
+	0x81059001, 0x1950001f, 0x10006610, 0x81459401, 0xa1001404, 0xd8004f44,
+	0x17c07c1f, 0xd0005220, 0x17c07c1f, 0x17c07c1f, 0x1910001f, 0x10006610,
+	0x81059001, 0xd8005304, 0x17c07c1f, 0x1a40001f, 0x1000621c, 0x1a80001f,
+	0x1000626c, 0xc2400ee0, 0x17c07c1f, 0x1910001f, 0x10006b00, 0x89000004,
+	0xfffffbff, 0x1940001f, 0x10006b00, 0xe1400004, 0x17c07c1f, 0x1910001f,
+	0x10006b00, 0x81459001, 0xd8205a25, 0x17c07c1f, 0x1910001f, 0x10006b00,
+	0x81019001, 0xd82057c4, 0x17c07c1f, 0x1910001f, 0x10006610, 0x81061001,
+	0xd8205a24, 0x17c07c1f, 0x1910001f, 0x10006720, 0x81499001, 0xd8205505,
+	0x17c07c1f, 0x1a40001f, 0x10006220, 0x1a80001f, 0x10006274, 0xc24010e0,
+	0x17c07c1f, 0x1910001f, 0x1000660c, 0x81061001, 0x1950001f, 0x10006610,
+	0x81461401, 0xa1001404, 0xd8005664, 0x17c07c1f, 0xd0005940, 0x17c07c1f,
+	0x17c07c1f, 0x1910001f, 0x10006610, 0x81061001, 0xd8005a24, 0x17c07c1f,
+	0x1a40001f, 0x10006220, 0x1a80001f, 0x10006274, 0xc2400ee0, 0x17c07c1f,
+	0x1910001f, 0x10006b00, 0x89000004, 0xfffff7ff, 0x1940001f, 0x10006b00,
+	0xe1400004, 0x17c07c1f, 0x1910001f, 0x10006b00, 0x81461001, 0xd8206185,
+	0x17c07c1f, 0x1910001f, 0x10006b00, 0x81021001, 0xd8205ec4, 0x17c07c1f,
+	0x1910001f, 0x10006610, 0x81081001, 0xd8206184, 0x17c07c1f, 0x1910001f,
+	0x10006720, 0x814a1001, 0xd8205c25, 0x17c07c1f, 0x1a40001f, 0x100062a0,
+	0x1280041f, 0xc2401540, 0x17c07c1f, 0x1910001f, 0x1000660c, 0x81081001,
+	0x1950001f, 0x10006610, 0x81481401, 0xa1001404, 0xd8005d64, 0x17c07c1f,
+	0xd00060a0, 0x17c07c1f, 0x17c07c1f, 0x1910001f, 0x10006610, 0x81479001,
+	0x81881001, 0x69a00006, 0x00000000, 0x81401805, 0xd8206185, 0x17c07c1f,
+	0x1a40001f, 0x100062a0, 0x1280041f, 0xc2401240, 0x17c07c1f, 0x1910001f,
+	0x10006b00, 0x89000004, 0xffffefff, 0x1940001f, 0x10006b00, 0xe1400004,
+	0x17c07c1f, 0x1910001f, 0x10006b00, 0x81469001, 0xd82068e5, 0x17c07c1f,
+	0x1910001f, 0x10006b00, 0x81029001, 0xd8206624, 0x17c07c1f, 0x1910001f,
+	0x10006610, 0x81089001, 0xd82068e4, 0x17c07c1f, 0x1910001f, 0x10006720,
+	0x814a9001, 0xd8206385, 0x17c07c1f, 0x1a40001f, 0x100062a4, 0x1290841f,
+	0xc2401540, 0x17c07c1f, 0x1910001f, 0x1000660c, 0x81089001, 0x1950001f,
+	0x10006610, 0x81489401, 0xa1001404, 0xd80064c4, 0x17c07c1f, 0xd0006800,
+	0x17c07c1f, 0x17c07c1f, 0x1910001f, 0x10006610, 0x81479001, 0x81889001,
+	0x69a00006, 0x00000000, 0x81401805, 0xd82068e5, 0x17c07c1f, 0x1a40001f,
+	0x100062a4, 0x1290841f, 0xc2401240, 0x17c07c1f, 0x1910001f, 0x10006b00,
+	0x89000004, 0xffffdfff, 0x1940001f, 0x10006b00, 0xe1400004, 0x1910001f,
+	0x10006610, 0x81479001, 0x81881001, 0x69600005, 0x00000000, 0xa1401805,
+	0x81889001, 0xa1401805, 0xd8006bc5, 0x17c07c1f, 0x1910001f, 0x10006b00,
+	0x81421001, 0x82429001, 0x82802405, 0xd8206bca, 0x17c07c1f, 0x1a40001f,
+	0x100062b0, 0x1280041f, 0xc2400000, 0x17c07c1f, 0x1990001f, 0x10006b00,
+	0x89800006, 0x00003f00, 0x69200006, 0x00000000, 0xd82041e4, 0x17c07c1f,
+	0x1990001f, 0x10006320, 0x69200006, 0xbeefbeef, 0xd8006dc4, 0x17c07c1f,
+	0xd00041e0, 0x17c07c1f, 0x1910001f, 0x10006358, 0x810b1001, 0xd8006dc4,
+	0x17c07c1f, 0x1980001f, 0xdeaddead, 0x19c0001f, 0x01411820, 0xf0000000
+};
+static const struct pcm_desc hotplug_pcm = {
+	.version	= "pcm_power_down_mt8173_V37",
+	.base		= hotplug_binary,
+	.size		= 888,
+	.sess		= 2,
+	.replace	= 0,
+};
+
+static struct pwr_ctrl hotplug_ctrl = {
+	.wake_src = 0,
+	.wake_src_md32 = 0,
+	.wfi_op = WFI_OP_OR,
+	.mcusys_idle_mask = 1,
+	.ca7top_idle_mask = 1,
+	.ca15top_idle_mask = 1,
+	.disp_req_mask = 1,
+	.mfg_req_mask = 1,
+	.md32_req_mask = 1,
+	.syspwreq_mask = 1,
+	.pcm_flags = 0,
+};
+
+static const struct spm_lp_scen spm_hotplug = {
+	.pcmdesc = &hotplug_pcm,
+	.pwrctrl = &hotplug_ctrl,
+};
+
+void spm_go_to_hotplug(void)
+{
+	const struct pcm_desc *pcmdesc = spm_hotplug.pcmdesc;
+	struct pwr_ctrl *pwrctrl = spm_hotplug.pwrctrl;
+
+	set_pwrctrl_pcm_flags(pwrctrl, 0);
+	spm_reset_and_init_pcm();
+	spm_kick_im_to_fetch(pcmdesc);
+	spm_set_power_control(pwrctrl);
+	spm_set_wakeup_event(pwrctrl);
+	spm_kick_pcm_to_run(pwrctrl);
+}
+
+void spm_clear_hotplug(void)
+{
+	/* Inform SPM that CPU wants to program CPU_WAKEUP_EVENT and
+	 * DISABLE_CPU_DROM */
+
+	mmio_write_32(SPM_PCM_REG_DATA_INI, PCM_HANDSHAKE_SEND1);
+	mmio_write_32(SPM_PCM_PWR_IO_EN, PCM_RF_SYNC_R6);
+	mmio_write_32(SPM_PCM_PWR_IO_EN, 0);
+
+	/* Wait SPM's response, can't use sleep api */
+	while ((mmio_read_32(SPM_PCM_FSM_STA) & PCM_END_FSM_STA_MASK)
+		!= PCM_END_FSM_STA_DEF)
+		;
+
+	/* no hotplug pcm running */
+	clear_all_ready();
+}
+
+void spm_hotplug_on(unsigned long mpidr)
+{
+	unsigned long linear_id;
+
+	linear_id = ((mpidr & MPIDR_CLUSTER_MASK) >> 6) |
+			(mpidr & MPIDR_CPU_MASK);
+
+	spm_lock_get();
+	if (is_hotplug_ready() == 0) {
+		spm_mcdi_wakeup_all_cores();
+		mmio_clrbits_32(SPM_PCM_RESERVE, PCM_HOTPLUG_VALID_MASK);
+		spm_go_to_hotplug();
+		set_hotplug_ready();
+	}
+	/* turn on CPUx */
+	mmio_clrsetbits_32(SPM_PCM_RESERVE,
+		PCM_HOTPLUG_VALID_MASK | (1 << linear_id),
+		1 << (linear_id + PCM_HOTPLUG_VALID_SHIFT));
+	spm_lock_release();
+}
+
+void spm_hotplug_off(unsigned long mpidr)
+{
+	unsigned long linear_id;
+
+	linear_id = ((mpidr & MPIDR_CLUSTER_MASK) >> 6) |
+			(mpidr & MPIDR_CPU_MASK);
+
+	spm_lock_get();
+	if (is_hotplug_ready() == 0) {
+		spm_mcdi_wakeup_all_cores();
+		mmio_clrbits_32(SPM_PCM_RESERVE, PCM_HOTPLUG_VALID_MASK);
+		spm_go_to_hotplug();
+		set_hotplug_ready();
+	}
+	mmio_clrsetbits_32(SPM_PCM_RESERVE, PCM_HOTPLUG_VALID_MASK,
+		(1 << linear_id) |
+		(1 << (linear_id + PCM_HOTPLUG_VALID_SHIFT)));
+	spm_lock_release();
+}
diff --git a/plat/mediatek/mt8173/drivers/spm/spm_hotplug.h b/plat/mediatek/mt8173/drivers/spm/spm_hotplug.h
new file mode 100644
index 0000000..8ebf5c4
--- /dev/null
+++ b/plat/mediatek/mt8173/drivers/spm/spm_hotplug.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __SPM_HOTPLUG_H__
+#define __SPM_HOTPLUG_H__
+
+void spm_clear_hotplug(void);
+void spm_hotplug_off(unsigned long mpidr);
+void spm_hotplug_on(unsigned long mpidr);
+
+#endif /* __SPM_HOTPLUG_H__ */
diff --git a/plat/mediatek/mt8173/drivers/spm/spm_mcdi.c b/plat/mediatek/mt8173/drivers/spm/spm_mcdi.c
new file mode 100644
index 0000000..de8d73a
--- /dev/null
+++ b/plat/mediatek/mt8173/drivers/spm/spm_mcdi.c
@@ -0,0 +1,500 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <debug.h>
+#include <mmio.h>
+#include <mt8173_def.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <spm.h>
+#include <spm_hotplug.h>
+#include <spm_mcdi.h>
+
+/*
+ * System Power Manager (SPM) is a hardware module, which controls cpu or
+ * system power for different power scenarios using different firmware.
+ * This driver controls the cpu power in cpu idle power saving state.
+ */
+
+#define WAKE_SRC_FOR_MCDI \
+	(WAKE_SRC_KP | WAKE_SRC_GPT | WAKE_SRC_EINT |		\
+	 WAKE_SRC_MD32 | WAKE_SRC_USB_CD | WAKE_SRC_USB_PDN |	\
+	 WAKE_SRC_AFE | WAKE_SRC_THERM | WAKE_SRC_CIRQ |	\
+	 WAKE_SRC_SYSPWREQ | WAKE_SRC_CPU_IRQ)
+#define PCM_MCDI_HANDSHAKE_SYNC	0xbeefbeef
+#define PCM_MCDI_HANDSHAKE_ACK	0xdeaddead
+#define PCM_MCDI_UPDATE_INFORM	0xabcdabcd
+#define PCM_MCDI_CKECK_DONE	0x12345678
+#define PCM_MCDI_ALL_CORE_AWAKE	0x0
+#define PCM_MCDI_OFFLOADED	0xaa55aa55
+#define PCM_MCDI_CA72_CPUTOP_PWRCTL	(0x1 << 16)
+#define PCM_MCDI_CA53_CPUTOP_PWRCTL	(0x1 << 17)
+#define PCM_MCDI_CA72_PWRSTA_SHIFT	16
+#define PCM_MCDI_CA53_PWRSTA_SHIFT	9
+
+static const unsigned int mcdi_binary[] = {
+	0x1a10001f, 0x10006b04, 0x1890001f, 0x10006b6c, 0x1a40001f, 0x10006210,
+	0x18d0001f, 0x10006210, 0x81002001, 0xd82001c4, 0x17c07c1f, 0xa0900402,
+	0xc2401540, 0x17c07c1f, 0x81052001, 0xd8200284, 0x17c07c1f, 0xa0950402,
+	0xc2401b80, 0x17c07c1f, 0x1a40001f, 0x10006230, 0x18d0001f, 0x10006230,
+	0x8100a001, 0xd82003c4, 0x17c07c1f, 0xa0908402, 0xc2401540, 0x17c07c1f,
+	0x8105a001, 0xd8200484, 0x17c07c1f, 0xa0958402, 0xc2401b80, 0x17c07c1f,
+	0x1a40001f, 0x10006238, 0x18d0001f, 0x10006238, 0x81012001, 0xd82005c4,
+	0x17c07c1f, 0xa0910402, 0xc2401540, 0x17c07c1f, 0x81062001, 0xd8200684,
+	0x17c07c1f, 0xa0960402, 0xc2401b80, 0x17c07c1f, 0x1a40001f, 0x1000623c,
+	0x18d0001f, 0x1000623c, 0x8101a001, 0xd82007c4, 0x17c07c1f, 0xa0918402,
+	0xc2401540, 0x17c07c1f, 0x8106a001, 0xd8200884, 0x17c07c1f, 0xa0968402,
+	0xc2401b80, 0x17c07c1f, 0x1a40001f, 0x10006298, 0x18d0001f, 0x10006298,
+	0x81022001, 0xd82009c4, 0x17c07c1f, 0xa0920402, 0xc2401540, 0x17c07c1f,
+	0x81072001, 0xd8200a84, 0x17c07c1f, 0xa0970402, 0xc2401b80, 0x17c07c1f,
+	0x1a40001f, 0x1000629c, 0x18d0001f, 0x1000629c, 0x8102a001, 0xd8200bc4,
+	0x17c07c1f, 0xa0928402, 0xc2401540, 0x17c07c1f, 0x8107a001, 0xd8200c84,
+	0x17c07c1f, 0xa0978402, 0xc2401b80, 0x17c07c1f, 0x1a40001f, 0x100062c4,
+	0x18d0001f, 0x100062c4, 0x81032001, 0xd8200dc4, 0x17c07c1f, 0xa0930402,
+	0xc2401540, 0x17c07c1f, 0x81082001, 0xd8200e84, 0x17c07c1f, 0xa0980402,
+	0xc2401b80, 0x17c07c1f, 0x1a40001f, 0x100062c0, 0x18d0001f, 0x100062c0,
+	0x8103a001, 0xd8200fc4, 0x17c07c1f, 0xa0938402, 0xc2401540, 0x17c07c1f,
+	0x8108a001, 0xd8201084, 0x17c07c1f, 0xa0988402, 0xc2401b80, 0x17c07c1f,
+	0x1a40001f, 0x10006214, 0x18d0001f, 0x10006214, 0x81042001, 0xd82011c4,
+	0x17c07c1f, 0xa0940402, 0xc2401540, 0x17c07c1f, 0x81092001, 0xd8201284,
+	0x17c07c1f, 0xa0990402, 0xc2401b80, 0x17c07c1f, 0x1a40001f, 0x100062cc,
+	0x18d0001f, 0x100062cc, 0x8104a001, 0xd82013c4, 0x17c07c1f, 0xa0948402,
+	0xc2401540, 0x17c07c1f, 0x8109a001, 0xd8201484, 0x17c07c1f, 0xa0998402,
+	0xc2401b80, 0x17c07c1f, 0x1900001f, 0x10006b6c, 0x80802002, 0xe1000002,
+	0xf0000000, 0x17c07c1f, 0xa8c00003, 0x00000004, 0xe2400003, 0xa8c00003,
+	0x00000008, 0xe2400003, 0x1b80001f, 0x00000020, 0x88c00003, 0xffffffef,
+	0xe2400003, 0x88c00003, 0xfffffffd, 0xe2400003, 0xa8c00003, 0x00000001,
+	0xe2400003, 0x88c00003, 0xfffff0ff, 0xe2400003, 0x1b80001f, 0x20000080,
+	0x1a90001f, 0x10001220, 0x69200009, 0x1000623c, 0xd8001984, 0x17c07c1f,
+	0x69200009, 0x10006214, 0xd8001a64, 0x17c07c1f, 0xd0001b00, 0x17c07c1f,
+	0x1900001f, 0x10001220, 0x8a80000a, 0xfffffff9, 0xe100000a, 0xd0001b00,
+	0x17c07c1f, 0x1900001f, 0x10001220, 0x8a80000a, 0xff1fbfff, 0xe100000a,
+	0x1b80001f, 0x20000080, 0xf0000000, 0x17c07c1f, 0x1a90001f, 0x10001220,
+	0x69200009, 0x1000623c, 0xd8001d04, 0x17c07c1f, 0x69200009, 0x10006214,
+	0xd8001de4, 0x17c07c1f, 0xd0001e80, 0x17c07c1f, 0x1900001f, 0x10001220,
+	0xaa80000a, 0x00000006, 0xe100000a, 0xd0001e80, 0x17c07c1f, 0x1900001f,
+	0x10001220, 0xaa80000a, 0x00e04000, 0xe100000a, 0x1b80001f, 0x20000080,
+	0x69200009, 0x10006214, 0xd8001fe4, 0x17c07c1f, 0xa8c00003, 0x00000f00,
+	0xe2400003, 0xd0002040, 0x17c07c1f, 0xa8c00003, 0x00003f00, 0xe2400003,
+	0x1b80001f, 0x20000080, 0xa8c00003, 0x00000002, 0xe2400003, 0x88c00003,
+	0xfffffffe, 0xe2400003, 0xa8c00003, 0x00000010, 0xe2400003, 0x88c00003,
+	0xfffffffb, 0xe2400003, 0x88c00003, 0xfffffff7, 0xe2400003, 0xf0000000,
+	0x17c07c1f, 0xe2e00036, 0xe2e0003e, 0x1b80001f, 0x00000020, 0xe2e0003c,
+	0xe8208000, 0x10006244, 0x00000000, 0x1b80001f, 0x20000080, 0xe2e0007c,
+	0x1b80001f, 0x20000003, 0xe2e0005c, 0xe2e0004c, 0xe2e0004d, 0xf0000000,
+	0x17c07c1f, 0xe2e0004f, 0xe2e0006f, 0xe2e0002f, 0xe8208000, 0x10006244,
+	0x00000001, 0x1b80001f, 0x20000080, 0xe2e0002e, 0xe2e0003e, 0xe2e0003a,
+	0xe2e00032, 0x1b80001f, 0x00000020, 0xf0000000, 0x17c07c1f, 0xe2e00036,
+	0xe2e0003e, 0x1b80001f, 0x00000020, 0xe2e0003c, 0xe2a00000, 0x1b80001f,
+	0x20000080, 0xe2e0007c, 0x1b80001f, 0x20000003, 0xe2e0005c, 0xe2e0004c,
+	0xe2e0004d, 0xf0000000, 0x17c07c1f, 0xe2e0004f, 0xe2e0006f, 0xe2e0002f,
+	0xe2a00001, 0x1b80001f, 0x20000080, 0xe2e0002e, 0xe2e0003e, 0xe2e0003a,
+	0xe2e00032, 0xf0000000, 0x17c07c1f, 0xe2e00026, 0xe2e0002e, 0x1b80001f,
+	0x00000020, 0x1a00001f, 0x100062b4, 0x1910001f, 0x100062b4, 0x81322804,
+	0xe2000004, 0x81202804, 0xe2000004, 0x1b80001f, 0x20000080, 0xe2e0000e,
+	0xe2e0000c, 0xe2e0000d, 0xf0000000, 0x17c07c1f, 0xe2e0002d, 0x1a00001f,
+	0x100062b4, 0x1910001f, 0x100062b4, 0xa1002804, 0xe2000004, 0xa1122804,
+	0xe2000004, 0x1b80001f, 0x20000080, 0xe2e0002f, 0xe2e0002b, 0xe2e00023,
+	0x1b80001f, 0x00000020, 0xe2e00022, 0xf0000000, 0x17c07c1f, 0x1910001f,
+	0x1000660c, 0x1a10001f, 0x10006610, 0xa2002004, 0x89000008, 0x00030000,
+	0xd80036c4, 0x17c07c1f, 0x8207a001, 0xd82036c8, 0x17c07c1f, 0x1900001f,
+	0x1020020c, 0x1a10001f, 0x1020020c, 0xaa000008, 0x00000001, 0xe1000008,
+	0x1910001f, 0x1020020c, 0x81001001, 0xd8203184, 0x17c07c1f, 0x1910001f,
+	0x10006720, 0x820c9001, 0xd8203228, 0x17c07c1f, 0x1900001f, 0x10001220,
+	0x1a10001f, 0x10001220, 0xa21f0408, 0xe1000008, 0x1b80001f, 0x20000080,
+	0xe2e0006d, 0xe2e0002d, 0x1a00001f, 0x100062b8, 0x1910001f, 0x100062b8,
+	0xa9000004, 0x00000001, 0xe2000004, 0x1b80001f, 0x20000080, 0xe2e0002c,
+	0xe2e0003c, 0xe2e0003e, 0xe2e0003a, 0xe2e00032, 0x1b80001f, 0x00000020,
+	0x1900001f, 0x10006404, 0x1a10001f, 0x10006404, 0xa2168408, 0xe1000008,
+	0xf0000000, 0x17c07c1f, 0x1a10001f, 0x10006610, 0x8207a001, 0xd8003e68,
+	0x17c07c1f, 0x1a10001f, 0x10006918, 0x8a000008, 0x00003030, 0xb900010c,
+	0x01000001, 0xd8203e64, 0x17c07c1f, 0x1900001f, 0x10006404, 0x1a10001f,
+	0x10006404, 0x8a000008, 0x0000dfff, 0xe1000008, 0xe2e00036, 0xe2e0003e,
+	0x1b80001f, 0x00000020, 0xe2e0002e, 0x1a00001f, 0x100062b8, 0x1910001f,
+	0x100062b8, 0x89000004, 0x0000fffe, 0xe2000004, 0x1b80001f, 0x20000080,
+	0xe2e0006e, 0xe2e0004e, 0xe2e0004c, 0xe2e0004d, 0x1900001f, 0x10001220,
+	0x1a10001f, 0x10001220, 0x8a000008, 0xbfffffff, 0xe1000008, 0x1b80001f,
+	0x20000080, 0x1900001f, 0x1020020c, 0x1a10001f, 0x1020020c, 0x8a000008,
+	0xfffffffe, 0xe1000008, 0x1910001f, 0x1020020c, 0x81001001, 0xd8003dc4,
+	0x17c07c1f, 0xf0000000, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x1840001f, 0x00000001, 0x11407c1f, 0xe8208000,
+	0x10006310, 0x0b160008, 0x1900001f, 0x000f7bde, 0x1a00001f, 0x10200268,
+	0xe2000004, 0xe8208000, 0x10006600, 0x00000000, 0x69200006, 0xbeefbeef,
+	0xd8204584, 0x17c07c1f, 0x1910001f, 0x10006358, 0x810b1001, 0xd8004244,
+	0x17c07c1f, 0x1980001f, 0xdeaddead, 0x69200006, 0xabcdabcd, 0xd8204324,
+	0x17c07c1f, 0x88900001, 0x10006814, 0x1910001f, 0x10006400, 0x81271002,
+	0x1880001f, 0x10006600, 0xe0800004, 0x1910001f, 0x10006358, 0x810b1001,
+	0xd80044a4, 0x17c07c1f, 0x1980001f, 0x12345678, 0x60a07c05, 0x89100002,
+	0x10006600, 0x80801001, 0xd8007bc2, 0x17c07c1f, 0x1890001f, 0x10006b00,
+	0x82090801, 0xc8800008, 0x17c07c1f, 0x1b00001f, 0x3fffe7ff, 0x8a00000c,
+	0x3fffe7ff, 0xd82041c8, 0x17c07c1f, 0x1b80001f, 0xd0010000, 0x1a10001f,
+	0x10006720, 0x82002001, 0x82201408, 0xd8204988, 0x17c07c1f, 0x1a40001f,
+	0x10006200, 0x1a80001f, 0x1000625c, 0xc24028e0, 0x17c07c1f, 0xa1400405,
+	0x1a10001f, 0x10006720, 0x8200a001, 0x82209408, 0xd8204b28, 0x17c07c1f,
+	0x1a40001f, 0x10006218, 0x1a80001f, 0x10006264, 0xc24028e0, 0x17c07c1f,
+	0xa1508405, 0x1a10001f, 0x10006720, 0x82012001, 0x82211408, 0xd8204cc8,
+	0x17c07c1f, 0x1a40001f, 0x1000621c, 0x1a80001f, 0x1000626c, 0xc24028e0,
+	0x17c07c1f, 0xa1510405, 0x1a10001f, 0x10006720, 0x8201a001, 0x82219408,
+	0xd8204e68, 0x17c07c1f, 0x1a40001f, 0x10006220, 0x1a80001f, 0x10006274,
+	0xc24028e0, 0x17c07c1f, 0xa1518405, 0x1a10001f, 0x10006720, 0x82022001,
+	0x82221408, 0xd8204fe8, 0x17c07c1f, 0x1a40001f, 0x100062a0, 0x1280041f,
+	0xc2402cc0, 0x17c07c1f, 0xa1520405, 0x1a10001f, 0x10006720, 0x8202a001,
+	0x82229408, 0xd8205168, 0x17c07c1f, 0x1a40001f, 0x100062a4, 0x1290841f,
+	0xc2402cc0, 0x17c07c1f, 0xa1528405, 0x1a10001f, 0x10006720, 0x82032001,
+	0x82231408, 0xd8205248, 0x17c07c1f, 0xa1530405, 0x1a10001f, 0x10006720,
+	0x8203a001, 0x82239408, 0xd8205328, 0x17c07c1f, 0xa1538405, 0x1a10001f,
+	0x10006b00, 0x8108a001, 0xd8205e84, 0x17c07c1f, 0x1910001f, 0x1000660c,
+	0x1a10001f, 0x10006610, 0xa2002004, 0x89000008, 0x00001e00, 0xd8005944,
+	0x17c07c1f, 0x82042001, 0xd8205948, 0x17c07c1f, 0x1900001f, 0x1020002c,
+	0x1a10001f, 0x1020002c, 0xaa000008, 0x00000010, 0xe1000008, 0x1910001f,
+	0x10006720, 0x820c1001, 0xd8205628, 0x17c07c1f, 0x1900001f, 0x10001250,
+	0x1a10001f, 0x10001250, 0xa2110408, 0xe1000008, 0x1b80001f, 0x20000080,
+	0x1900001f, 0x10001220, 0x1a10001f, 0x10001220, 0xa21e8408, 0xe1000008,
+	0x1b80001f, 0x20000080, 0x1a40001f, 0x10006208, 0xc24024e0, 0x17c07c1f,
+	0x1a10001f, 0x10006610, 0x82042001, 0xd8005e88, 0x17c07c1f, 0x1a10001f,
+	0x10006918, 0x8a000008, 0x00000f0f, 0xba00010c, 0x1fffe7ff, 0xd8205e88,
+	0x17c07c1f, 0x1a40001f, 0x10006208, 0xc24022a0, 0x17c07c1f, 0x1900001f,
+	0x10001250, 0x1a10001f, 0x10001250, 0x8a000008, 0xfffffffb, 0xe1000008,
+	0x1b80001f, 0x20000080, 0x1900001f, 0x10001220, 0x1a10001f, 0x10001220,
+	0x8a000008, 0xdfffffff, 0xe1000008, 0x1b80001f, 0x20000080, 0x1900001f,
+	0x1020002c, 0x1a10001f, 0x1020002c, 0x8a000008, 0xffffffef, 0xe1000008,
+	0x1a10001f, 0x10006b00, 0x81082001, 0xd8205fa4, 0x17c07c1f, 0x1a40001f,
+	0x100062b0, 0xc2402f20, 0x17c07c1f, 0x1b80001f, 0x20000208, 0xd8207b8c,
+	0x17c07c1f, 0x1a40001f, 0x100062b0, 0xc2403700, 0x17c07c1f, 0x81001401,
+	0xd8206424, 0x17c07c1f, 0x1a10001f, 0x10006918, 0x81002001, 0xb1042081,
+	0xb900008c, 0x1fffe7ff, 0xd8206424, 0x17c07c1f, 0x1a40001f, 0x10006200,
+	0x1a80001f, 0x1000625c, 0xc24026e0, 0x17c07c1f, 0x89400005, 0xfffffffe,
+	0xe8208000, 0x10006f00, 0x00000000, 0xe8208000, 0x10006b30, 0x00000000,
+	0xe8208000, 0x100063e0, 0x00000001, 0x81009401, 0xd82067a4, 0x17c07c1f,
+	0x1a10001f, 0x10006918, 0x8100a001, 0xb104a081, 0xb900008c, 0x01000001,
+	0xd82067a4, 0x17c07c1f, 0x1a40001f, 0x10006218, 0x1a80001f, 0x10006264,
+	0xc24026e0, 0x17c07c1f, 0x89400005, 0xfffffffd, 0xe8208000, 0x10006f04,
+	0x00000000, 0xe8208000, 0x10006b34, 0x00000000, 0xe8208000, 0x100063e0,
+	0x00000002, 0x81011401, 0xd8206b24, 0x17c07c1f, 0x1a10001f, 0x10006918,
+	0x81012001, 0xb1052081, 0xb900008c, 0x01000001, 0xd8206b24, 0x17c07c1f,
+	0x1a40001f, 0x1000621c, 0x1a80001f, 0x1000626c, 0xc24026e0, 0x17c07c1f,
+	0x89400005, 0xfffffffb, 0xe8208000, 0x10006f08, 0x00000000, 0xe8208000,
+	0x10006b38, 0x00000000, 0xe8208000, 0x100063e0, 0x00000004, 0x81019401,
+	0xd8206ea4, 0x17c07c1f, 0x1a10001f, 0x10006918, 0x8101a001, 0xb105a081,
+	0xb900008c, 0x01000001, 0xd8206ea4, 0x17c07c1f, 0x1a40001f, 0x10006220,
+	0x1a80001f, 0x10006274, 0xc24026e0, 0x17c07c1f, 0x89400005, 0xfffffff7,
+	0xe8208000, 0x10006f0c, 0x00000000, 0xe8208000, 0x10006b3c, 0x00000000,
+	0xe8208000, 0x100063e0, 0x00000008, 0x1a10001f, 0x10006610, 0x8207a001,
+	0xd8207608, 0x17c07c1f, 0x81021401, 0xd82072a4, 0x17c07c1f, 0x1a10001f,
+	0x10006918, 0x81022001, 0xb1062081, 0xb900008c, 0x01000001, 0xd82072a4,
+	0x17c07c1f, 0x1a40001f, 0x100062a0, 0x1280041f, 0xc2402a60, 0x17c07c1f,
+	0x89400005, 0xffffffef, 0xe8208000, 0x10006f10, 0x00000000, 0xe8208000,
+	0x10006b40, 0x00000000, 0xe8208000, 0x100063e0, 0x00000010, 0x81029401,
+	0xd8207604, 0x17c07c1f, 0x1a10001f, 0x10006918, 0x8102a001, 0xb106a081,
+	0xb900008c, 0x01000001, 0xd8207604, 0x17c07c1f, 0x1a40001f, 0x100062a4,
+	0x1290841f, 0xc2402a60, 0x17c07c1f, 0x89400005, 0xffffffdf, 0xe8208000,
+	0x10006f14, 0x00000000, 0xe8208000, 0x10006b44, 0x00000000, 0xe8208000,
+	0x100063e0, 0x00000020, 0x81031401, 0xd82078c4, 0x17c07c1f, 0x1a10001f,
+	0x10006918, 0x81032001, 0xb1072081, 0xb900008c, 0x01000001, 0xd82078c4,
+	0x17c07c1f, 0x89400005, 0xffffffbf, 0xe8208000, 0x10006f18, 0x00000000,
+	0xe8208000, 0x10006b48, 0x00000000, 0xe8208000, 0x100063e0, 0x00000040,
+	0x81039401, 0xd8207b84, 0x17c07c1f, 0x1a10001f, 0x10006918, 0x8103a001,
+	0xb107a081, 0xb900008c, 0x01000001, 0xd8207b84, 0x17c07c1f, 0x89400005,
+	0xffffff7f, 0xe8208000, 0x10006f1c, 0x00000000, 0xe8208000, 0x10006b4c,
+	0x00000000, 0xe8208000, 0x100063e0, 0x00000080, 0xd00041c0, 0x17c07c1f,
+	0xe8208000, 0x10006600, 0x00000000, 0x1ac0001f, 0x55aa55aa, 0x1940001f,
+	0xaa55aa55, 0x1b80001f, 0x00001000, 0xf0000000, 0x17c07c1f
+};
+
+static const struct pcm_desc mcdi_pcm = {
+	.version = "pcm_mcdi_mt8173_20160401_v1",
+	.base = mcdi_binary,
+	.size = 1001,
+	.sess = 2,
+	.replace = 0,
+};
+
+static struct pwr_ctrl mcdi_ctrl = {
+	.wake_src = WAKE_SRC_FOR_MCDI,
+	.wake_src_md32 = 0,
+	.wfi_op = WFI_OP_OR,
+	.mcusys_idle_mask = 1,
+	.ca7top_idle_mask = 1,
+	.ca15top_idle_mask = 1,
+	.disp_req_mask = 1,
+	.mfg_req_mask = 1,
+	.md32_req_mask = 1,
+};
+
+static const struct spm_lp_scen spm_mcdi = {
+	.pcmdesc = &mcdi_pcm,
+	.pwrctrl = &mcdi_ctrl,
+};
+
+void spm_mcdi_cpu_wake_up_event(int wake_up_event, int disable_dormant_power)
+{
+	if (((mmio_read_32(SPM_SLEEP_CPU_WAKEUP_EVENT) & 0x1) == 1)
+	    && ((mmio_read_32(SPM_CLK_CON) & CC_DISABLE_DORM_PWR) == 0)) {
+		/* MCDI is offload? */
+		INFO("%s: SPM_SLEEP_CPU_WAKEUP_EVENT:%x, SPM_CLK_CON %x",
+			__func__, mmio_read_32(SPM_SLEEP_CPU_WAKEUP_EVENT),
+			mmio_read_32(SPM_CLK_CON));
+		return;
+	}
+	/* Inform SPM that CPU wants to program CPU_WAKEUP_EVENT and
+	 * DISABLE_CPU_DROM */
+	mmio_write_32(SPM_PCM_REG_DATA_INI, PCM_MCDI_HANDSHAKE_SYNC);
+	mmio_write_32(SPM_PCM_PWR_IO_EN, PCM_RF_SYNC_R6);
+	mmio_write_32(SPM_PCM_PWR_IO_EN, 0);
+
+	/* Wait SPM's response, can't use sleep api */
+	while (mmio_read_32(SPM_PCM_REG6_DATA) != PCM_MCDI_HANDSHAKE_ACK)
+		;
+
+	if (disable_dormant_power) {
+		mmio_setbits_32(SPM_CLK_CON, CC_DISABLE_DORM_PWR);
+		while (mmio_read_32(SPM_CLK_CON) !=
+			(mmio_read_32(SPM_CLK_CON) | CC_DISABLE_DORM_PWR))
+			;
+
+	} else {
+		mmio_clrbits_32(SPM_CLK_CON, CC_DISABLE_DORM_PWR);
+		while (mmio_read_32(SPM_CLK_CON) !=
+			(mmio_read_32(SPM_CLK_CON) & ~CC_DISABLE_DORM_PWR))
+			;
+	}
+
+	mmio_write_32(SPM_SLEEP_CPU_WAKEUP_EVENT, wake_up_event);
+
+	while (mmio_read_32(SPM_SLEEP_CPU_WAKEUP_EVENT) != wake_up_event)
+		;
+
+	/* Inform SPM to see updated setting */
+	mmio_write_32(SPM_PCM_REG_DATA_INI, PCM_MCDI_UPDATE_INFORM);
+	mmio_write_32(SPM_PCM_PWR_IO_EN, PCM_RF_SYNC_R6);
+	mmio_write_32(SPM_PCM_PWR_IO_EN, 0);
+
+	while (mmio_read_32(SPM_PCM_REG6_DATA) != PCM_MCDI_CKECK_DONE)
+		;
+	/* END OF sequence */
+
+	mmio_write_32(SPM_PCM_REG_DATA_INI, 0x0);
+	mmio_write_32(SPM_PCM_PWR_IO_EN, PCM_RF_SYNC_R6);
+	mmio_write_32(SPM_PCM_PWR_IO_EN, 0);
+}
+
+void spm_mcdi_wakeup_all_cores(void)
+{
+	if (is_mcdi_ready() == 0)
+		return;
+
+	spm_mcdi_cpu_wake_up_event(1, 1);
+	while (mmio_read_32(SPM_PCM_REG5_DATA) != PCM_MCDI_ALL_CORE_AWAKE)
+		;
+	spm_mcdi_cpu_wake_up_event(1, 0);
+	while (mmio_read_32(SPM_PCM_REG5_DATA) != PCM_MCDI_OFFLOADED)
+		;
+
+	spm_clean_after_wakeup();
+	clear_all_ready();
+}
+
+static void spm_mcdi_wfi_sel_enter(unsigned long mpidr)
+{
+	int core_id_val = mpidr & MPIDR_CPU_MASK;
+	int cluster_id = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
+
+	/* SPM WFI Select by core number */
+	if (cluster_id) {
+		switch (core_id_val) {
+		case 0:
+			mmio_write_32(SPM_CA15_CPU0_IRQ_MASK, 1);
+			mmio_write_32(SPM_SLEEP_CA15_WFI0_EN, 1);
+			break;
+		case 1:
+			mmio_write_32(SPM_CA15_CPU1_IRQ_MASK, 1);
+			mmio_write_32(SPM_SLEEP_CA15_WFI1_EN, 1);
+			break;
+		case 2:
+			mmio_write_32(SPM_CA15_CPU2_IRQ_MASK, 1);
+			mmio_write_32(SPM_SLEEP_CA15_WFI2_EN, 1);
+			break;
+		case 3:
+			mmio_write_32(SPM_CA15_CPU3_IRQ_MASK, 1);
+			mmio_write_32(SPM_SLEEP_CA15_WFI3_EN, 1);
+			break;
+		default:
+			break;
+		}
+	} else {
+		switch (core_id_val) {
+		case 0:
+			mmio_write_32(SPM_CA7_CPU0_IRQ_MASK, 1);
+			mmio_write_32(SPM_SLEEP_CA7_WFI0_EN, 1);
+			break;
+		case 1:
+			mmio_write_32(SPM_CA7_CPU1_IRQ_MASK, 1);
+			mmio_write_32(SPM_SLEEP_CA7_WFI1_EN, 1);
+			break;
+		case 2:
+			mmio_write_32(SPM_CA7_CPU2_IRQ_MASK, 1);
+			mmio_write_32(SPM_SLEEP_CA7_WFI2_EN, 1);
+			break;
+		case 3:
+			mmio_write_32(SPM_CA7_CPU3_IRQ_MASK, 1);
+			mmio_write_32(SPM_SLEEP_CA7_WFI3_EN, 1);
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+static void spm_mcdi_wfi_sel_leave(unsigned long mpidr)
+{
+	int core_id_val = mpidr & MPIDR_CPU_MASK;
+	int cluster_id = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
+
+	/* SPM WFI Select by core number */
+	if (cluster_id) {
+		switch (core_id_val) {
+		case 0:
+			mmio_write_32(SPM_SLEEP_CA15_WFI0_EN, 0);
+			mmio_write_32(SPM_CA15_CPU0_IRQ_MASK, 0);
+			break;
+		case 1:
+			mmio_write_32(SPM_SLEEP_CA15_WFI1_EN, 0);
+			mmio_write_32(SPM_CA15_CPU1_IRQ_MASK, 0);
+			break;
+		case 2:
+			mmio_write_32(SPM_SLEEP_CA15_WFI2_EN, 0);
+			mmio_write_32(SPM_CA15_CPU2_IRQ_MASK, 0);
+			break;
+		case 3:
+			mmio_write_32(SPM_SLEEP_CA15_WFI3_EN, 0);
+			mmio_write_32(SPM_CA15_CPU3_IRQ_MASK, 0);
+			break;
+		default:
+			break;
+		}
+	} else {
+		switch (core_id_val) {
+		case 0:
+			mmio_write_32(SPM_SLEEP_CA7_WFI0_EN, 0);
+			mmio_write_32(SPM_CA7_CPU0_IRQ_MASK, 0);
+			break;
+		case 1:
+			mmio_write_32(SPM_SLEEP_CA7_WFI1_EN, 0);
+			mmio_write_32(SPM_CA7_CPU1_IRQ_MASK, 0);
+			break;
+		case 2:
+			mmio_write_32(SPM_SLEEP_CA7_WFI2_EN, 0);
+			mmio_write_32(SPM_CA7_CPU2_IRQ_MASK, 0);
+			break;
+		case 3:
+			mmio_write_32(SPM_SLEEP_CA7_WFI3_EN, 0);
+			mmio_write_32(SPM_CA7_CPU3_IRQ_MASK, 0);
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+static void spm_mcdi_set_cputop_pwrctrl_for_cluster_off(unsigned long mpidr)
+{
+	unsigned long cluster_id = mpidr & MPIDR_CLUSTER_MASK;
+	unsigned long cpu_id = mpidr & MPIDR_CPU_MASK;
+	unsigned int pwr_status, shift, i, flag = 0;
+
+	pwr_status = mmio_read_32(SPM_PWR_STATUS) |
+				 mmio_read_32(SPM_PWR_STATUS_2ND);
+
+	if (cluster_id) {
+		for (i = 0; i < PLATFORM_CLUSTER1_CORE_COUNT; i++) {
+			if (i == cpu_id)
+				continue;
+			shift = i + PCM_MCDI_CA72_PWRSTA_SHIFT;
+			flag |= (pwr_status & (1 << shift)) >> shift;
+		}
+		if (!flag)
+			mmio_setbits_32(SPM_PCM_RESERVE,
+					PCM_MCDI_CA72_CPUTOP_PWRCTL);
+	} else {
+		for (i = 0; i < PLATFORM_CLUSTER0_CORE_COUNT; i++) {
+			if (i == cpu_id)
+				continue;
+			shift = i + PCM_MCDI_CA53_PWRSTA_SHIFT;
+			flag |= (pwr_status & (1 << shift)) >> shift;
+		}
+		if (!flag)
+			mmio_setbits_32(SPM_PCM_RESERVE,
+					PCM_MCDI_CA53_CPUTOP_PWRCTL);
+	}
+}
+
+static void spm_mcdi_clear_cputop_pwrctrl_for_cluster_on(unsigned long mpidr)
+{
+	unsigned long cluster_id = mpidr & MPIDR_CLUSTER_MASK;
+
+	if (cluster_id)
+		mmio_clrbits_32(SPM_PCM_RESERVE,
+				PCM_MCDI_CA72_CPUTOP_PWRCTL);
+	else
+		mmio_clrbits_32(SPM_PCM_RESERVE,
+				PCM_MCDI_CA53_CPUTOP_PWRCTL);
+}
+
+void spm_mcdi_prepare_for_mtcmos(void)
+{
+	const struct pcm_desc *pcmdesc = spm_mcdi.pcmdesc;
+	struct pwr_ctrl *pwrctrl = spm_mcdi.pwrctrl;
+
+	if (is_mcdi_ready() == 0) {
+		if (is_hotplug_ready() == 1)
+			spm_clear_hotplug();
+		set_pwrctrl_pcm_flags(pwrctrl, 0);
+		spm_reset_and_init_pcm();
+		spm_kick_im_to_fetch(pcmdesc);
+		spm_set_power_control(pwrctrl);
+		spm_set_wakeup_event(pwrctrl);
+		spm_kick_pcm_to_run(pwrctrl);
+		set_mcdi_ready();
+	}
+}
+
+void spm_mcdi_prepare_for_off_state(unsigned long mpidr, unsigned int afflvl)
+{
+	const struct pcm_desc *pcmdesc = spm_mcdi.pcmdesc;
+	struct pwr_ctrl *pwrctrl = spm_mcdi.pwrctrl;
+
+	spm_lock_get();
+	if (is_mcdi_ready() == 0) {
+		if (is_hotplug_ready() == 1)
+			spm_clear_hotplug();
+		set_pwrctrl_pcm_flags(pwrctrl, 0);
+		spm_reset_and_init_pcm();
+		spm_kick_im_to_fetch(pcmdesc);
+		spm_set_power_control(pwrctrl);
+		spm_set_wakeup_event(pwrctrl);
+		spm_kick_pcm_to_run(pwrctrl);
+		set_mcdi_ready();
+	}
+	spm_mcdi_wfi_sel_enter(mpidr);
+	if (afflvl == MPIDR_AFFLVL1)
+		spm_mcdi_set_cputop_pwrctrl_for_cluster_off(mpidr);
+	spm_lock_release();
+}
+
+void spm_mcdi_finish_for_on_state(unsigned long mpidr, unsigned int afflvl)
+{
+	unsigned long linear_id;
+
+	linear_id = ((mpidr & MPIDR_CLUSTER_MASK) >> 6) |
+			(mpidr & MPIDR_CPU_MASK);
+
+	spm_lock_get();
+	spm_mcdi_clear_cputop_pwrctrl_for_cluster_on(mpidr);
+	spm_mcdi_wfi_sel_leave(mpidr);
+	mmio_write_32(SPM_PCM_SW_INT_CLEAR, (0x1 << linear_id));
+	spm_lock_release();
+}
diff --git a/plat/mediatek/mt8173/drivers/spm/spm_mcdi.h b/plat/mediatek/mt8173/drivers/spm/spm_mcdi.h
new file mode 100644
index 0000000..e29f565
--- /dev/null
+++ b/plat/mediatek/mt8173/drivers/spm/spm_mcdi.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __SPM_MCDI_H__
+#define __SPM_MCDI_H__
+
+void spm_mcdi_wakeup_all_cores(void);
+void spm_mcdi_prepare_for_mtcmos(void);
+void spm_mcdi_prepare_for_off_state(unsigned long mpidr, unsigned int afflvl);
+void spm_mcdi_finish_for_on_state(unsigned long mpidr, unsigned int afflvl);
+
+#endif /* __SPM_MCDI_H__ */
diff --git a/plat/mediatek/mt8173/drivers/spm/spm_suspend.c b/plat/mediatek/mt8173/drivers/spm/spm_suspend.c
new file mode 100644
index 0000000..8c79b3b
--- /dev/null
+++ b/plat/mediatek/mt8173/drivers/spm/spm_suspend.c
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <bakery_lock.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <mmio.h>
+#include <mt8173_def.h>
+#include <spm.h>
+#include <spm_suspend.h>
+
+/*
+ * System Power Manager (SPM) is a hardware module, which controls cpu or
+ * system power for different power scenarios using different firmware.
+ * This driver controls the system power in system suspend flow.
+ */
+
+#define WAKE_SRC_FOR_SUSPEND					\
+	(WAKE_SRC_KP | WAKE_SRC_EINT | WAKE_SRC_MD32 |		\
+	WAKE_SRC_USB_CD | WAKE_SRC_USB_PDN | WAKE_SRC_THERM |	\
+	WAKE_SRC_SYSPWREQ | WAKE_SRC_ALL_MD32)
+
+#define WAKE_SRC_FOR_MD32  0
+
+#define spm_is_wakesrc_invalid(wakesrc)	\
+	(!!((unsigned int)(wakesrc) & 0xc0003803))
+
+#define ARMCA15PLL_CON0		(APMIXED_BASE + 0x200)
+#define ARMCA15PLL_CON1		(APMIXED_BASE + 0x204)
+#define ARMCA15PLL_PWR_CON0	(APMIXED_BASE + 0x20c)
+#define ARMCA15PLL_PWR_ON	(1U << 0)
+#define ARMCA15PLL_ISO_EN	(1U << 1)
+#define ARMCA15PLL_EN		(1U << 0)
+
+const unsigned int spm_flags =
+	SPM_DUALVCORE_PDN_DIS | SPM_PASR_DIS | SPM_DPD_DIS |
+	SPM_CPU_DVS_DIS | SPM_OPT | SPM_INFRA_PDN_DIS;
+
+enum wake_reason_t spm_wake_reason = WR_NONE;
+
+/**********************************************************
+ * PCM sequence for cpu suspend
+ **********************************************************/
+static const unsigned int suspend_binary_ca7[] = {
+	0x81f58407, 0x81f68407, 0x803a0400, 0x803a8400, 0x1b80001f, 0x20000000,
+	0x80300400, 0x80318400, 0x80328400, 0xa1d28407, 0x81f20407, 0x81009801,
+	0xd8000244, 0x17c07c1f, 0x18c0001f, 0x10006234, 0xc0c032e0, 0x1200041f,
+	0x80310400, 0x1b80001f, 0x2000000a, 0xa0110400, 0x18c0001f, 0x100062c8,
+	0xe0e00010, 0xe0e00030, 0xe0e00070, 0xe0e000f0, 0x1b80001f, 0x2000001a,
+	0xe0e00ff0, 0xe8208000, 0x10006354, 0xfffe7fff, 0xe8208000, 0x10006834,
+	0x00000010, 0x81f00407, 0xa1dd0407, 0x81fd0407, 0xc2803800, 0x1290041f,
+	0x8880000c, 0x2f7be75f, 0xd8200722, 0x17c07c1f, 0xd82006a9, 0x17c07c1f,
+	0xe8208000, 0x10006814, 0x00000001, 0xc2803800, 0x1293841f, 0x1b00001f,
+	0x7fffe7ff, 0xd0000760, 0x17c07c1f, 0x1b00001f, 0x7ffff7ff, 0xf0000000,
+	0x17c07c1f, 0x80880001, 0xd8000842, 0x17c07c1f, 0xd00028e0, 0x1200041f,
+	0xe8208000, 0x10006834, 0x00000000, 0x1b00001f, 0x3fffe7ff, 0x1b80001f,
+	0x20000004, 0xd8200a0c, 0x17c07c1f, 0xe8208000, 0x10006834, 0x00000010,
+	0xd0001280, 0x17c07c1f, 0x18c0001f, 0x10006608, 0x1910001f, 0x10006608,
+	0x813b0404, 0xe0c00004, 0x1880001f, 0x10006320, 0xc0c03760, 0xe080000f,
+	0xd8200c03, 0x17c07c1f, 0x1b00001f, 0x7ffff7ff, 0xd0001280, 0x17c07c1f,
+	0xe080001f, 0xe8208000, 0x10006354, 0xffffffff, 0x18c0001f, 0x100062c8,
+	0xe0e000f0, 0xe0e00030, 0xe0e00000, 0x81009801, 0xd80010c4, 0x17c07c1f,
+	0x18c0001f, 0x10004094, 0x1910001f, 0x1020e374, 0xe0c00004, 0x18c0001f,
+	0x10004098, 0x1910001f, 0x1020e378, 0xe0c00004, 0x18c0001f, 0x10011094,
+	0x1910001f, 0x10213374, 0xe0c00004, 0x18c0001f, 0x10011098, 0x1910001f,
+	0x10213378, 0xe0c00004, 0x1910001f, 0x10213378, 0x18c0001f, 0x10006234,
+	0xc0c034a0, 0x17c07c1f, 0xc2803800, 0x1290841f, 0xa1d20407, 0x81f28407,
+	0xa1d68407, 0xa0128400, 0xa0118400, 0xa0100400, 0xa01a8400, 0xa01a0400,
+	0x19c0001f, 0x001c239f, 0x1b00001f, 0x3fffefff, 0xf0000000, 0x17c07c1f,
+	0x808d8001, 0xd8201502, 0x17c07c1f, 0x803d8400, 0x1b80001f, 0x2000001a,
+	0x80340400, 0x17c07c1f, 0x17c07c1f, 0x80310400, 0x81fa0407, 0x81f18407,
+	0x81f08407, 0xa1dc0407, 0x1b80001f, 0x200000b6, 0xd0002220, 0x17c07c1f,
+	0x1880001f, 0x20000208, 0x81011801, 0xd80016e4, 0x17c07c1f, 0xe8208000,
+	0x1000f600, 0xd2000000, 0x1380081f, 0x18c0001f, 0x10006240, 0xe0e00016,
+	0xe0e0001e, 0xe0e0000e, 0xe0e0000f, 0x80368400, 0x1380081f, 0x80370400,
+	0x1380081f, 0x80360400, 0x803e0400, 0x1380081f, 0x80380400, 0x803b0400,
+	0xa01d8400, 0x1b80001f, 0x20000034, 0x803d8400, 0x1b80001f, 0x20000152,
+	0x803d0400, 0x1380081f, 0x18c0001f, 0x1000f5c8, 0x1910001f, 0x1000f5c8,
+	0xa1000404, 0xe0c00004, 0x18c0001f, 0x100125c8, 0x1910001f, 0x100125c8,
+	0xa1000404, 0xe0c00004, 0x1910001f, 0x100125c8, 0x80340400, 0x17c07c1f,
+	0x17c07c1f, 0x80310400, 0xe8208000, 0x10000044, 0x00000100, 0x1b80001f,
+	0x20000068, 0x1b80001f, 0x2000000a, 0x18c0001f, 0x10006240, 0xe0e0000d,
+	0x81011801, 0xd8001f64, 0x17c07c1f, 0x18c0001f, 0x100040f4, 0x1910001f,
+	0x100040f4, 0xa11c8404, 0xe0c00004, 0x1b80001f, 0x2000000a, 0x813c8404,
+	0xe0c00004, 0x18c0001f, 0x100110f4, 0x1910001f, 0x100110f4, 0xa11c8404,
+	0xe0c00004, 0x1b80001f, 0x2000000a, 0x813c8404, 0xe0c00004, 0x1b80001f,
+	0x20000100, 0x81fa0407, 0x81f18407, 0x81f08407, 0xe8208000, 0x10006354,
+	0xfffe7b47, 0x18c0001f, 0x65930003, 0xc0c031c0, 0x17c07c1f, 0xc2803800,
+	0x1293041f, 0xa1d80407, 0xa1dc0407, 0x18c0001f, 0x10006608, 0x1910001f,
+	0x10006608, 0xa11b0404, 0xe0c00004, 0xc2803800, 0x1291041f, 0x8880000c,
+	0x2f7be75f, 0xd8202362, 0x17c07c1f, 0x1b00001f, 0x3fffe7ff, 0xd00023a0,
+	0x17c07c1f, 0x1b00001f, 0xbfffe7ff, 0xf0000000, 0x17c07c1f, 0x1890001f,
+	0x10006608, 0x808b0801, 0xd8202642, 0x17c07c1f, 0x1880001f, 0x10006320,
+	0xc0c03540, 0xe080000f, 0xd80027a3, 0x17c07c1f, 0xe080001f, 0xa1da0407,
+	0x81fc0407, 0xa0110400, 0xa0140400, 0xa01d8400, 0xd0003100, 0x17c07c1f,
+	0x1b80001f, 0x20000fdf, 0x1890001f, 0x10006608, 0x80c98801, 0x810a8801,
+	0x10918c1f, 0xa0939002, 0x8080080d, 0xd82028e2, 0x12007c1f, 0x1b00001f,
+	0x3fffe7ff, 0x1b80001f, 0x20000004, 0xd800318c, 0x17c07c1f, 0x1b00001f,
+	0xbfffe7ff, 0xd0003180, 0x17c07c1f, 0x81f80407, 0x81fc0407, 0x18c0001f,
+	0x65930006, 0xc0c031c0, 0x17c07c1f, 0x18c0001f, 0x65930007, 0xc0c031c0,
+	0x17c07c1f, 0x1880001f, 0x10006320, 0xc0c03540, 0xe080000f, 0xd80027a3,
+	0x17c07c1f, 0xe080001f, 0x18c0001f, 0x65930005, 0xc0c031c0, 0x17c07c1f,
+	0xa1da0407, 0xe8208000, 0x10000048, 0x00000100, 0x1b80001f, 0x20000068,
+	0xa0110400, 0xa0140400, 0x18c0001f, 0x1000f5c8, 0x1910001f, 0x1000f5c8,
+	0x81200404, 0xe0c00004, 0x18c0001f, 0x100125c8, 0x1910001f, 0x100125c8,
+	0x81200404, 0xe0c00004, 0x1910001f, 0x100125c8, 0xa01d0400, 0xa01b0400,
+	0xa0180400, 0x803d8400, 0xa01e0400, 0xa0160400, 0xa0170400, 0xa0168400,
+	0x1b80001f, 0x20000104, 0x81011801, 0xd80030c4, 0x17c07c1f, 0x18c0001f,
+	0x10006240, 0xc0c034a0, 0x17c07c1f, 0xe8208000, 0x1000f600, 0xd2000001,
+	0xd8000848, 0x17c07c1f, 0xc2803800, 0x1291841f, 0x1b00001f, 0x7ffff7ff,
+	0xf0000000, 0x17c07c1f, 0x1900001f, 0x10006830, 0xe1000003, 0x18c0001f,
+	0x10006834, 0xe0e00000, 0xe0e00001, 0xf0000000, 0x17c07c1f, 0xe0f07f16,
+	0x1380201f, 0xe0f07f1e, 0x1380201f, 0xe0f07f0e, 0x1b80001f, 0x20000104,
+	0xe0f07f0c, 0xe0f07f0d, 0xe0f07e0d, 0xe0f07c0d, 0xe0f0780d, 0xf0000000,
+	0xe0f0700d, 0xe0f07f0d, 0xe0f07f0f, 0xe0f07f1e, 0xf0000000, 0xe0f07f12,
+	0x11407c1f, 0x81f08407, 0x81f18407, 0x1b80001f, 0x20000001, 0xa1d08407,
+	0xa1d18407, 0x1392841f, 0x812ab401, 0x80ebb401, 0xa0c00c04, 0xd8203743,
+	0x17c07c1f, 0x80c01403, 0xd8203563, 0x01400405, 0xf0000000, 0xa1d00407,
+	0x1b80001f, 0x20000208, 0x80ea3401, 0xf0000000, 0x18c0001f, 0x10006b6c,
+	0x1910001f, 0x10006b6c, 0xa1002804, 0xf0000000, 0xe0c00004, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f, 0x17c07c1f,
+	0x17c07c1f, 0x17c07c1f, 0x1840001f, 0x00000001, 0xa1d48407, 0x1990001f,
+	0x10006b08, 0x1a50001f, 0x10006610, 0x8246a401, 0xe8208000, 0x10006b6c,
+	0x00000000, 0x1b00001f, 0x2f7be75f, 0x81469801, 0xd8004305, 0x17c07c1f,
+	0x1b80001f, 0xd00f0000, 0x8880000c, 0x2f7be75f, 0xd8005fa2, 0x17c07c1f,
+	0xd0004340, 0x17c07c1f, 0x1b80001f, 0x500f0000, 0xe8208000, 0x10006354,
+	0xfffe7b47, 0xc0c06c00, 0x81401801, 0xd80048e5, 0x17c07c1f, 0x81f60407,
+	0x18c0001f, 0x10006200, 0xc0c06060, 0x12807c1f, 0xe8208000, 0x1000625c,
+	0x00000001, 0x1b80001f, 0x20000080, 0xc0c06060, 0x1280041f, 0x18c0001f,
+	0x10006204, 0xc0c06400, 0x1280041f, 0x18c0001f, 0x10006208, 0xc0c06060,
+	0x12807c1f, 0xe8208000, 0x10006244, 0x00000001, 0x1b80001f, 0x20000080,
+	0xc0c06060, 0x1280041f, 0x18d0001f, 0x10200200, 0x18c0001f, 0x10006290,
+	0xc0c06060, 0x1280041f, 0xe8208000, 0x10006404, 0x00003101, 0xc2803800,
+	0x1292041f, 0x81469801, 0xd8204a45, 0x17c07c1f, 0x1b00001f, 0x2f7be75f,
+	0x1b80001f, 0x30000004, 0x8880000c, 0x2f7be75f, 0xd8005a02, 0x17c07c1f,
+	0xc0c06780, 0x17c07c1f, 0x18c0001f, 0x10006294, 0xe0f07fff, 0xe0e00fff,
+	0xe0e000ff, 0x81449801, 0xd8004c85, 0x17c07c1f, 0x1a00001f, 0x10006604,
+	0xe2200003, 0xc0c06840, 0x17c07c1f, 0xe2200005, 0xc0c06840, 0x17c07c1f,
+	0xa1d38407, 0xa1d98407, 0x1800001f, 0x00000012, 0x1800001f, 0x00000e12,
+	0x1800001f, 0x03800e12, 0x1800001f, 0x038e0e12, 0xe8208000, 0x10006310,
+	0x0b1600f8, 0x1940001f, 0x00000000, 0x12407c1f, 0x1b00001f, 0xbfffe7ff,
+	0x1b80001f, 0x90100000, 0x17c07c1f, 0xd8004fc5, 0x17c07c1f, 0x8247b001,
+	0x1940001f, 0xffffffff, 0x80c00400, 0xd82050c3, 0xa1d58407, 0xa1dd8407,
+	0x1b00001f, 0x3fffefff, 0xd0004ec0, 0x17c07c1f, 0x1890001f, 0x100063e8,
+	0x88c0000c, 0x2f7be75f, 0xd80052e3, 0x17c07c1f, 0x80c40001, 0xd8005263,
+	0x17c07c1f, 0x1b00001f, 0xbfffe7ff, 0xd00052a0, 0x17c07c1f, 0x1b00001f,
+	0x7ffff7ff, 0xd0004ec0, 0x17c07c1f, 0x80c40001, 0xd82053e3, 0x17c07c1f,
+	0xa1de0407, 0x1b00001f, 0x7fffe7ff, 0xd0004ec0, 0x17c07c1f, 0xe8208000,
+	0x10006814, 0x00000000, 0x18c0001f, 0x10006b00, 0xe0e00000, 0xe0c00009,
+	0x18c0001f, 0x10006294, 0xe0e001fe, 0xe0e003fc, 0xe0e007f8, 0xe0e00ff0,
+	0x1b80001f, 0x20000020, 0xe0f07ff0, 0xe0f07f00, 0x81449801, 0xd80057a5,
+	0x17c07c1f, 0x1a00001f, 0x10006604, 0xe2200002, 0xc0c06840, 0x17c07c1f,
+	0xe2200004, 0xc0c06840, 0x17c07c1f, 0x1b80001f, 0x200016a8, 0x1800001f,
+	0x03800e12, 0x1b80001f, 0x20000300, 0x1800001f, 0x00000e12, 0x1b80001f,
+	0x20000300, 0x1800001f, 0x00000012, 0x1b80001f, 0x20000104, 0x10007c1f,
+	0x81f38407, 0x81f98407, 0x81f90407, 0x81f40407, 0x1b80001f, 0x200016a8,
+	0x81401801, 0xd8005fa5, 0x17c07c1f, 0xe8208000, 0x10006404, 0x00002101,
+	0x18c0001f, 0x10006290, 0x1212841f, 0xc0c061e0, 0x12807c1f, 0xc0c061e0,
+	0x1280041f, 0x18c0001f, 0x10006208, 0x1212841f, 0xc0c061e0, 0x12807c1f,
+	0xe8208000, 0x10006244, 0x00000000, 0x1b80001f, 0x20000080, 0xc0c061e0,
+	0x1280041f, 0xe8208000, 0x10200268, 0x000ffffe, 0x18c0001f, 0x10006204,
+	0x1212841f, 0xc0c065a0, 0x1280041f, 0x18c0001f, 0x10006200, 0x1212841f,
+	0xc0c061e0, 0x12807c1f, 0xe8208000, 0x1000625c, 0x00000000, 0x1b80001f,
+	0x20000080, 0xc0c061e0, 0x1280041f, 0x19c0001f, 0x01411820, 0x1ac0001f,
+	0x55aa55aa, 0x10007c1f, 0xf0000000, 0xd800610a, 0x17c07c1f, 0xe2e0004f,
+	0xe2e0006f, 0xe2e0002f, 0xd82061aa, 0x17c07c1f, 0xe2e0002e, 0xe2e0003e,
+	0xe2e00032, 0xf0000000, 0x17c07c1f, 0xd80062aa, 0x17c07c1f, 0xe2e00036,
+	0xe2e0003e, 0x1380201f, 0xe2e0003c, 0xd82063ca, 0x17c07c1f, 0x1380201f,
+	0xe2e0007c, 0x1b80001f, 0x20000003, 0xe2e0005c, 0xe2e0004c, 0xe2e0004d,
+	0xf0000000, 0x17c07c1f, 0x1a50001f, 0x10006610, 0x8246a401, 0xd8206569,
+	0x17c07c1f, 0xe2e0000d, 0xe2e0000c, 0xe2e0001c, 0xe2e0001e, 0xe2e00016,
+	0xe2e00012, 0xf0000000, 0x17c07c1f, 0x1a50001f, 0x10006610, 0x8246a401,
+	0xd8206749, 0x17c07c1f, 0xe2e00016, 0x1380201f, 0xe2e0001e, 0x1380201f,
+	0xe2e0001c, 0x1380201f, 0xe2e0000c, 0xe2e0000d, 0xf0000000, 0x17c07c1f,
+	0xa1d40407, 0x1391841f, 0xa1d90407, 0x1393041f, 0xf0000000, 0x17c07c1f,
+	0x18d0001f, 0x10006604, 0x10cf8c1f, 0xd8206843, 0x17c07c1f, 0xf0000000,
+	0x17c07c1f, 0xe8208000, 0x11008014, 0x00000002, 0xe8208000, 0x11008020,
+	0x00000101, 0xe8208000, 0x11008004, 0x000000d0, 0x1a00001f, 0x11008000,
+	0xd8006b0a, 0xe220005d, 0xd8206b2a, 0xe2200000, 0xe2200001, 0xe8208000,
+	0x11008024, 0x00000001, 0x1b80001f, 0x20000424, 0xf0000000, 0x17c07c1f,
+	0xa1d10407, 0x1b80001f, 0x20000020, 0xf0000000, 0x17c07c1f
+};
+
+/*
+ * PCM binary for suspend scenario
+ */
+static const struct pcm_desc suspend_pcm_ca7 = {
+	.version = "pcm_suspend_20150917_V4",
+	.base = suspend_binary_ca7,
+	.size = 869,
+	.sess = 2,
+	.replace = 0,
+	.vec0 = EVENT_VEC(11, 1, 0, 0),
+	.vec1 = EVENT_VEC(12, 1, 0, 61),
+	.vec2 = EVENT_VEC(30, 1, 0, 150),
+	.vec3 = EVENT_VEC(31, 1, 0, 287),
+};
+
+/*
+ * SPM settings for suspend scenario
+ */
+static struct pwr_ctrl spm_ctrl = {
+	.wake_src = WAKE_SRC_FOR_SUSPEND,
+	.wake_src_md32 = WAKE_SRC_FOR_MD32,
+	.r0_ctrl_en = 1,
+	.r7_ctrl_en = 1,
+	.infra_dcm_lock = 1,
+	.wfi_op = WFI_OP_AND,
+	.pcm_apsrc_req = 0,
+	.ca7top_idle_mask = 0,
+	.ca15top_idle_mask = 0,
+	.mcusys_idle_mask = 0,
+	.disp_req_mask = 0,
+	.mfg_req_mask = 0,
+	.md32_req_mask = 1,
+	.srclkenai_mask = 1,
+	.ca7_wfi0_en = 1,
+	.ca7_wfi1_en = 1,
+	.ca7_wfi2_en = 1,
+	.ca7_wfi3_en = 1,
+	.ca15_wfi0_en = 1,
+	.ca15_wfi1_en = 1,
+	.ca15_wfi2_en = 1,
+	.ca15_wfi3_en = 1,
+};
+
+/*
+ * go_to_sleep_before_wfi() - trigger SPM to enter suspend scenario
+ */
+static void go_to_sleep_before_wfi(const unsigned int spm_flags)
+{
+	struct pwr_ctrl *pwrctrl;
+
+	pwrctrl = &spm_ctrl;
+
+	set_pwrctrl_pcm_flags(pwrctrl, spm_flags);
+
+	spm_set_sysclk_settle();
+
+	INFO("sec = %u, wakesrc = 0x%x (%u)(%u)\n",
+	     pwrctrl->timer_val, pwrctrl->wake_src,
+	     is_cpu_pdn(pwrctrl->pcm_flags),
+	     is_infra_pdn(pwrctrl->pcm_flags));
+
+	spm_reset_and_init_pcm();
+	spm_init_pcm_register();
+	spm_set_power_control(pwrctrl);
+	spm_set_wakeup_event(pwrctrl);
+	spm_kick_pcm_to_run(pwrctrl);
+	spm_init_event_vector(&suspend_pcm_ca7);
+	spm_kick_im_to_fetch(&suspend_pcm_ca7);
+}
+
+/*
+ * go_to_sleep_after_wfi() - get wakeup reason after
+ * leaving suspend scenario and clean up SPM settings
+ */
+static enum wake_reason_t go_to_sleep_after_wfi(void)
+{
+	struct wake_status wakesta;
+	static enum wake_reason_t last_wr = WR_NONE;
+
+	spm_get_wakeup_status(&wakesta);
+	spm_clean_after_wakeup();
+	last_wr = spm_output_wake_reason(&wakesta);
+
+	return last_wr;
+}
+
+static void bigcore_pll_on(void)
+{
+	mmio_setbits_32(ARMCA15PLL_PWR_CON0, ARMCA15PLL_PWR_ON);
+	mmio_clrbits_32(ARMCA15PLL_PWR_CON0, ARMCA15PLL_ISO_EN);
+	mmio_setbits_32(ARMCA15PLL_CON0, ARMCA15PLL_EN);
+}
+
+static void bigcore_pll_off(void)
+{
+	mmio_clrbits_32(ARMCA15PLL_CON0, ARMCA15PLL_EN);
+	mmio_setbits_32(ARMCA15PLL_PWR_CON0, ARMCA15PLL_ISO_EN);
+	mmio_clrbits_32(ARMCA15PLL_PWR_CON0, ARMCA15PLL_PWR_ON);
+}
+
+void spm_system_suspend(void)
+{
+	bigcore_pll_off();
+	spm_lock_get();
+	go_to_sleep_before_wfi(spm_flags);
+	set_suspend_ready();
+	spm_lock_release();
+}
+
+void spm_system_suspend_finish(void)
+{
+	spm_lock_get();
+	spm_wake_reason = go_to_sleep_after_wfi();
+	INFO("spm_wake_reason=%d\n", spm_wake_reason);
+	clear_all_ready();
+	spm_lock_release();
+	bigcore_pll_on();
+	/* Add 20us delay for turning on PLL*/
+	udelay(20);
+}
diff --git a/plat/mediatek/mt8173/drivers/spm/spm_suspend.h b/plat/mediatek/mt8173/drivers/spm/spm_suspend.h
new file mode 100644
index 0000000..4041cfe
--- /dev/null
+++ b/plat/mediatek/mt8173/drivers/spm/spm_suspend.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __SPM_SUSPEND_H__
+#define __SPM_SUSPEND_H__
+
+/* cpu dormant return code */
+#define CPU_DORMANT_RESET        0
+#define CPU_DORMANT_ABORT        1
+
+void spm_system_suspend(void);
+void spm_system_suspend_finish(void);
+
+#endif /* __SPM_SUSPEND_H__*/
diff --git a/plat/mediatek/mt8173/drivers/timer/mt_cpuxgpt.c b/plat/mediatek/mt8173/drivers/timer/mt_cpuxgpt.c
new file mode 100644
index 0000000..8a30d8a
--- /dev/null
+++ b/plat/mediatek/mt8173/drivers/timer/mt_cpuxgpt.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch_helpers.h>
+#include <mcucfg.h>
+#include <mmio.h>
+#include <mt8173_def.h>
+#include <mt_cpuxgpt.h>
+
+static void write_cpuxgpt(unsigned int reg_index, unsigned int value)
+{
+	mmio_write_32((uintptr_t)&mt8173_mcucfg->xgpt_idx, reg_index);
+	mmio_write_32((uintptr_t)&mt8173_mcucfg->xgpt_ctl, value);
+}
+
+static void cpuxgpt_set_init_cnt(unsigned int countH, unsigned int countL)
+{
+	write_cpuxgpt(INDEX_CNT_H_INIT, countH);
+	/* update count when countL programmed */
+	write_cpuxgpt(INDEX_CNT_L_INIT, countL);
+}
+
+void generic_timer_backup(void)
+{
+	uint64_t cval;
+
+	cval = read_cntpct_el0();
+	cpuxgpt_set_init_cnt((uint32_t)(cval >> 32),
+			       (uint32_t)(cval & 0xffffffff));
+}
diff --git a/plat/mediatek/mt8173/drivers/timer/mt_cpuxgpt.h b/plat/mediatek/mt8173/drivers/timer/mt_cpuxgpt.h
new file mode 100644
index 0000000..0364bba
--- /dev/null
+++ b/plat/mediatek/mt8173/drivers/timer/mt_cpuxgpt.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MT_CPUXGPT_H__
+#define __MT_CPUXGPT_H__
+
+/* REG */
+#define INDEX_CNT_L_INIT    0x008
+#define INDEX_CNT_H_INIT    0x00C
+
+void generic_timer_backup(void);
+
+#endif /* __MT_CPUXGPT_H__ */
diff --git a/plat/mediatek/mt8173/include/mcucfg.h b/plat/mediatek/mt8173/include/mcucfg.h
new file mode 100644
index 0000000..355c276
--- /dev/null
+++ b/plat/mediatek/mt8173/include/mcucfg.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __MCUCFG_H__
+#define __MCUCFG_H__
+
+#include <mt8173_def.h>
+#include <stdint.h>
+
+struct mt8173_mcucfg_regs {
+	uint32_t mp0_ca7l_cache_config;
+	struct {
+		uint32_t mem_delsel0;
+		uint32_t mem_delsel1;
+	} mp0_cpu[4];
+	uint32_t mp0_cache_mem_delsel0;
+	uint32_t mp0_cache_mem_delsel1;
+	uint32_t mp0_axi_config;
+	uint32_t mp0_misc_config[2];
+	struct {
+		uint32_t rv_addr_lw;
+		uint32_t rv_addr_hw;
+	} mp0_rv_addr[4];
+	uint32_t mp0_ca7l_cfg_dis;
+	uint32_t mp0_ca7l_clken_ctrl;
+	uint32_t mp0_ca7l_rst_ctrl;
+	uint32_t mp0_ca7l_misc_config;
+	uint32_t mp0_ca7l_dbg_pwr_ctrl;
+	uint32_t mp0_rw_rsvd0;
+	uint32_t mp0_rw_rsvd1;
+	uint32_t mp0_ro_rsvd;
+	uint32_t reserved0_0[100];
+	uint32_t mp1_cpucfg;
+	uint32_t mp1_miscdbg;
+	uint32_t reserved0_1[13];
+	uint32_t mp1_rst_ctl;
+	uint32_t mp1_clkenm_div;
+	uint32_t reserved0_2[7];
+	uint32_t mp1_config_res;
+	uint32_t reserved0_3[13];
+	struct {
+		uint32_t rv_addr_lw;
+		uint32_t rv_addr_hw;
+	} mp1_rv_addr[2];
+	uint32_t reserved0_4[84];
+	uint32_t mp0_rst_status;		/* 0x400 */
+	uint32_t mp0_dbg_ctrl;
+	uint32_t mp0_dbg_flag;
+	uint32_t mp0_ca7l_ir_mon;
+	struct {
+		uint32_t pc_lw;
+		uint32_t pc_hw;
+		uint32_t fp_arch32;
+		uint32_t sp_arch32;
+		uint32_t fp_arch64_lw;
+		uint32_t fp_arch64_hw;
+		uint32_t sp_arch64_lw;
+		uint32_t sp_arch64_hw;
+	} mp0_dbg_core[4];
+	uint32_t dfd_ctrl;
+	uint32_t dfd_cnt_l;
+	uint32_t dfd_cnt_h;
+	uint32_t misccfg_mp0_rw_rsvd;
+	uint32_t misccfg_sec_vio_status0;
+	uint32_t misccfg_sec_vio_status1;
+	uint32_t reserved1[22];
+	uint32_t misccfg_rw_rsvd;		/* 0x500 */
+	uint32_t mcusys_dbg_mon_sel_a;
+	uint32_t mcusys_dbg_mon;
+	uint32_t reserved2[61];
+	uint32_t mcusys_config_a;		/* 0x600 */
+	uint32_t mcusys_config1_a;
+	uint32_t mcusys_gic_peribase_a;
+	uint32_t reserved3;
+	uint32_t sec_range0_start;		/* 0x610 */
+	uint32_t sec_range0_end;
+	uint32_t sec_range_enable;
+	uint32_t reserved4;
+	uint32_t int_pol_ctl[8];		/* 0x620 */
+	uint32_t aclken_div;			/* 0x640 */
+	uint32_t pclken_div;
+	uint32_t l2c_sram_ctrl;
+	uint32_t armpll_jit_ctrl;
+	uint32_t cci_addrmap;			/* 0x650 */
+	uint32_t cci_config;
+	uint32_t cci_periphbase;
+	uint32_t cci_nevntcntovfl;
+	uint32_t cci_clk_ctrl;			/* 0x660 */
+	uint32_t cci_acel_s1_ctrl;
+	uint32_t bus_fabric_dcm_ctrl;
+	uint32_t reserved5;
+	uint32_t xgpt_ctl;			/* 0x670 */
+	uint32_t xgpt_idx;
+	uint32_t ptpod2_ctl0;
+	uint32_t ptpod2_ctl1;
+	uint32_t mcusys_revid;
+	uint32_t mcusys_rw_rsvd0;
+	uint32_t mcusys_rw_rsvd1;
+};
+
+static struct mt8173_mcucfg_regs *const mt8173_mcucfg = (void *)MCUCFG_BASE;
+
+/* cpu boot mode */
+#define	MP0_CPUCFG_64BIT_SHIFT	12
+#define	MP1_CPUCFG_64BIT_SHIFT	28
+#define	MP0_CPUCFG_64BIT	(U(0xf) << MP0_CPUCFG_64BIT_SHIFT)
+#define	MP1_CPUCFG_64BIT	(U(0xf) << MP1_CPUCFG_64BIT_SHIFT)
+
+/* scu related */
+enum {
+	MP0_ACINACTM_SHIFT = 4,
+	MP1_ACINACTM_SHIFT = 0,
+	MP0_ACINACTM = 1 << MP0_ACINACTM_SHIFT,
+	MP1_ACINACTM = 1 << MP1_ACINACTM_SHIFT
+};
+
+enum {
+	MP1_DIS_RGU0_WAIT_PD_CPUS_L1_ACK_SHIFT = 0,
+	MP1_DIS_RGU1_WAIT_PD_CPUS_L1_ACK_SHIFT = 4,
+	MP1_DIS_RGU2_WAIT_PD_CPUS_L1_ACK_SHIFT = 8,
+	MP1_DIS_RGU3_WAIT_PD_CPUS_L1_ACK_SHIFT = 12,
+	MP1_DIS_RGU_NOCPU_WAIT_PD_CPUS_L1_ACK_SHIFT = 16,
+
+	MP1_DIS_RGU0_WAIT_PD_CPUS_L1_ACK =
+		0xf << MP1_DIS_RGU0_WAIT_PD_CPUS_L1_ACK_SHIFT,
+	MP1_DIS_RGU1_WAIT_PD_CPUS_L1_ACK =
+		0xf << MP1_DIS_RGU1_WAIT_PD_CPUS_L1_ACK_SHIFT,
+	MP1_DIS_RGU2_WAIT_PD_CPUS_L1_ACK =
+		0xf << MP1_DIS_RGU2_WAIT_PD_CPUS_L1_ACK_SHIFT,
+	MP1_DIS_RGU3_WAIT_PD_CPUS_L1_ACK =
+		0xf << MP1_DIS_RGU3_WAIT_PD_CPUS_L1_ACK_SHIFT,
+	MP1_DIS_RGU_NOCPU_WAIT_PD_CPUS_L1_ACK =
+		0xf << MP1_DIS_RGU_NOCPU_WAIT_PD_CPUS_L1_ACK_SHIFT
+};
+
+enum {
+	MP1_AINACTS_SHIFT = 4,
+	MP1_AINACTS = 1 << MP1_AINACTS_SHIFT
+};
+
+enum {
+	MP1_SW_CG_GEN_SHIFT = 12,
+	MP1_SW_CG_GEN = 1 << MP1_SW_CG_GEN_SHIFT
+};
+
+enum {
+	MP1_L2RSTDISABLE_SHIFT = 14,
+	MP1_L2RSTDISABLE = 1 << MP1_L2RSTDISABLE_SHIFT
+};
+
+/* cci clock control related */
+enum {
+	MCU_BUS_DCM_EN	= 1 << 8
+};
+
+/* l2c sram control related */
+enum {
+	L2C_SRAM_DCM_EN = 1 << 0
+};
+
+/* bus fabric dcm control related */
+enum {
+	PSYS_ADB400_DCM_EN		= 1 << 29,
+	GPU_ADB400_DCM_EN		= 1 << 28,
+
+	EMI1_ADB400_DCM_EN		= 1 << 27,
+	EMI_ADB400_DCM_EN		= 1 << 26,
+	INFRA_ADB400_DCM_EN		= 1 << 25,
+	L2C_ADB400_DCM_EN		= 1 << 24,
+
+	MP0_ADB400_DCM_EN		= 1 << 23,
+	CCI400_CK_ONLY_DCM_EN		= 1 << 22,
+	L2C_IDLE_DCM_EN			= 1 << 21,
+
+	CA15U_ADB_DYNAMIC_CG_EN		= 1 << 19,
+	CA7L_ADB_DYNAMIC_CG_EN		= 1 << 18,
+	L2C_ADB_DYNAMIC_CG_EN		= 1 << 17,
+
+	EMICLK_EMI1_DYNAMIC_CG_EN	= 1 << 12,
+
+	INFRACLK_PSYS_DYNAMIC_CG_EN	= 1 << 11,
+	EMICLK_GPU_DYNAMIC_CG_EN	= 1 << 10,
+	EMICLK_EMI_DYNAMIC_CG_EN	= 1 << 8,
+
+	CCI400_SLV_RW_DCM_EN		= 1 << 7,
+	CCI400_SLV_DCM_EN		= 1 << 5,
+
+	ACLK_PSYS_DYNAMIC_CG_EN		= 1 << 3,
+	ACLK_GPU_DYNAMIC_CG_EN		= 1 << 2,
+	ACLK_EMI_DYNAMIC_CG_EN		= 1 << 1,
+	ACLK_INFRA_DYNAMIC_CG_EN	= 1 << 0,
+
+	/* adb400 related */
+	ADB400_GRP_DCM_EN = PSYS_ADB400_DCM_EN | GPU_ADB400_DCM_EN |
+			    EMI1_ADB400_DCM_EN | EMI_ADB400_DCM_EN |
+			    INFRA_ADB400_DCM_EN | L2C_ADB400_DCM_EN |
+			    MP0_ADB400_DCM_EN,
+
+	/* cci400 related */
+	CCI400_GRP_DCM_EN = CCI400_CK_ONLY_DCM_EN | CCI400_SLV_RW_DCM_EN |
+			    CCI400_SLV_DCM_EN,
+
+	/* adb clock related */
+	ADBCLK_GRP_DCM_EN = CA15U_ADB_DYNAMIC_CG_EN | CA7L_ADB_DYNAMIC_CG_EN |
+			    L2C_ADB_DYNAMIC_CG_EN,
+
+	/* emi clock related */
+	EMICLK_GRP_DCM_EN = EMICLK_EMI1_DYNAMIC_CG_EN |
+			    EMICLK_GPU_DYNAMIC_CG_EN |
+			    EMICLK_EMI_DYNAMIC_CG_EN,
+
+	/* bus clock related */
+	ACLK_GRP_DCM_EN = ACLK_PSYS_DYNAMIC_CG_EN | ACLK_GPU_DYNAMIC_CG_EN |
+			  ACLK_EMI_DYNAMIC_CG_EN | ACLK_INFRA_DYNAMIC_CG_EN,
+};
+
+#endif  /* __MCUCFG_H__ */
diff --git a/plat/mediatek/mt8173/include/mt8173_def.h b/plat/mediatek/mt8173/include/mt8173_def.h
new file mode 100644
index 0000000..3289de4
--- /dev/null
+++ b/plat/mediatek/mt8173/include/mt8173_def.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MT8173_DEF_H__
+#define __MT8173_DEF_H__
+
+#if RESET_TO_BL31
+#error "MT8173 is incompatible with RESET_TO_BL31!"
+#endif
+
+#define MT8173_PRIMARY_CPU	0x0
+
+/* Register base address */
+#define IO_PHYS			(0x10000000)
+#define INFRACFG_AO_BASE	(IO_PHYS + 0x1000)
+#define SRAMROM_SEC_BASE	(IO_PHYS + 0x1800)
+#define PERI_CON_BASE		(IO_PHYS + 0x3000)
+#define GPIO_BASE		(IO_PHYS + 0x5000)
+#define SPM_BASE		(IO_PHYS + 0x6000)
+#define RGU_BASE		(IO_PHYS + 0x7000)
+#define PMIC_WRAP_BASE		(IO_PHYS + 0xD000)
+#define DEVAPC0_BASE		(IO_PHYS + 0xE000)
+#define MCUCFG_BASE		(IO_PHYS + 0x200000)
+#define APMIXED_BASE		(IO_PHYS + 0x209000)
+#define TRNG_BASE		(IO_PHYS + 0x20F000)
+#define CRYPT_BASE		(IO_PHYS + 0x210000)
+#define MT_GIC_BASE		(IO_PHYS + 0x220000)
+#define PLAT_MT_CCI_BASE	(IO_PHYS + 0x390000)
+
+/* Aggregate of all devices in the first GB */
+#define MTK_DEV_RNG0_BASE	IO_PHYS
+#define MTK_DEV_RNG0_SIZE	0x400000
+#define MTK_DEV_RNG1_BASE	(IO_PHYS + 0x1000000)
+#define MTK_DEV_RNG1_SIZE	0x4000000
+
+/* SRAMROM related registers */
+#define SRAMROM_SEC_CTRL	(SRAMROM_SEC_BASE + 0x4)
+#define SRAMROM_SEC_ADDR	(SRAMROM_SEC_BASE + 0x8)
+
+/* DEVAPC0 related registers */
+#define DEVAPC0_MAS_SEC_0	(DEVAPC0_BASE + 0x500)
+#define DEVAPC0_APC_CON		(DEVAPC0_BASE + 0xF00)
+
+/*******************************************************************************
+ * UART related constants
+ ******************************************************************************/
+#define MT8173_UART0_BASE	(IO_PHYS + 0x01002000)
+#define MT8173_UART1_BASE	(IO_PHYS + 0x01003000)
+#define MT8173_UART2_BASE	(IO_PHYS + 0x01004000)
+#define MT8173_UART3_BASE	(IO_PHYS + 0x01005000)
+
+#define MT8173_BAUDRATE		(115200)
+#define MT8173_UART_CLOCK	(26000000)
+
+/*******************************************************************************
+ * System counter frequency related constants
+ ******************************************************************************/
+#define SYS_COUNTER_FREQ_IN_TICKS	13000000
+
+/*******************************************************************************
+ * GIC-400 & interrupt handling related constants
+ ******************************************************************************/
+
+/* Base MTK_platform compatible GIC memory map */
+#define BASE_GICD_BASE		(MT_GIC_BASE + 0x1000)
+#define BASE_GICC_BASE		(MT_GIC_BASE + 0x2000)
+#define BASE_GICR_BASE		0	/* no GICR in GIC-400 */
+#define BASE_GICH_BASE		(MT_GIC_BASE + 0x4000)
+#define BASE_GICV_BASE		(MT_GIC_BASE + 0x6000)
+#define INT_POL_CTL0		0x10200620
+
+#define GIC_PRIVATE_SIGNALS	(32)
+
+/*******************************************************************************
+ * CCI-400 related constants
+ ******************************************************************************/
+#define PLAT_MT_CCI_CLUSTER0_SL_IFACE_IX	4
+#define PLAT_MT_CCI_CLUSTER1_SL_IFACE_IX	3
+
+/*******************************************************************************
+ * WDT related constants
+ ******************************************************************************/
+#define MTK_WDT_BASE		(RGU_BASE + 0)
+#define MTK_WDT_SWRST		(MTK_WDT_BASE + 0x0014)
+
+#define MTK_WDT_MODE_DUAL_MODE	0x0040
+#define MTK_WDT_MODE_IRQ	0x0008
+#define MTK_WDT_MODE_KEY	0x22000000
+#define MTK_WDT_MODE_EXTEN	0x0004
+#define MTK_WDT_SWRST_KEY	0x1209
+
+/* FIQ platform related define */
+#define MT_IRQ_SEC_SGI_0	8
+#define MT_IRQ_SEC_SGI_1	9
+#define MT_IRQ_SEC_SGI_2	10
+#define MT_IRQ_SEC_SGI_3	11
+#define MT_IRQ_SEC_SGI_4	12
+#define MT_IRQ_SEC_SGI_5	13
+#define MT_IRQ_SEC_SGI_6	14
+#define MT_IRQ_SEC_SGI_7	15
+
+/*
+ *  Macros for local power states in MTK platforms encoded by State-ID field
+ *  within the power-state parameter.
+ */
+/* Local power state for power domains in Run state. */
+#define MTK_LOCAL_STATE_RUN     0
+/* Local power state for retention. Valid only for CPU power domains */
+#define MTK_LOCAL_STATE_RET     1
+/* Local power state for OFF/power-down. Valid for CPU and cluster power
+ * domains
+ */
+#define MTK_LOCAL_STATE_OFF     2
+
+#if PSCI_EXTENDED_STATE_ID
+/*
+ * Macros used to parse state information from State-ID if it is using the
+ * recommended encoding for State-ID.
+ */
+#define MTK_LOCAL_PSTATE_WIDTH		4
+#define MTK_LOCAL_PSTATE_MASK		((1 << MTK_LOCAL_PSTATE_WIDTH) - 1)
+
+/* Macros to construct the composite power state */
+
+/* Make composite power state parameter till power level 0 */
+
+#define mtk_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type) \
+	(((lvl0_state) << PSTATE_ID_SHIFT) | ((type) << PSTATE_TYPE_SHIFT))
+#else
+#define mtk_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type) \
+		(((lvl0_state) << PSTATE_ID_SHIFT) | \
+		((pwr_lvl) << PSTATE_PWR_LVL_SHIFT) | \
+		((type) << PSTATE_TYPE_SHIFT))
+
+#endif /* __PSCI_EXTENDED_STATE_ID__ */
+
+/* Make composite power state parameter till power level 1 */
+#define mtk_make_pwrstate_lvl1(lvl1_state, lvl0_state, pwr_lvl, type) \
+		(((lvl1_state) << MTK_LOCAL_PSTATE_WIDTH) | \
+		mtk_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type))
+
+/* Make composite power state parameter till power level 2 */
+#define mtk_make_pwrstate_lvl2( \
+		lvl2_state, lvl1_state, lvl0_state, pwr_lvl, type) \
+		(((lvl2_state) << (MTK_LOCAL_PSTATE_WIDTH * 2)) | \
+		mtk_make_pwrstate_lvl1(lvl1_state, lvl0_state, pwr_lvl, type))
+
+
+#endif /* __MT8173_DEF_H__ */
diff --git a/plat/mediatek/mt8173/include/plat_macros.S b/plat/mediatek/mt8173/include/plat_macros.S
new file mode 100644
index 0000000..5eb4913
--- /dev/null
+++ b/plat/mediatek/mt8173/include/plat_macros.S
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cci.h>
+#include <gic_common.h>
+#include <gicv2.h>
+#include <mt8173_def.h>
+
+.section .rodata.gic_reg_name, "aS"
+gicc_regs:
+	.asciz "gicc_hppir", "gicc_ahppir", "gicc_ctlr", ""
+gicd_pend_reg:
+	.asciz "gicd_ispendr regs (Offsets 0x200 - 0x278)\n"	\
+		" Offset:\t\t\tvalue\n"
+newline:
+	.asciz "\n"
+spacer:
+	.asciz ":\t\t0x"
+
+.section .rodata.cci_reg_name, "aS"
+cci_iface_regs:
+	.asciz "cci_snoop_ctrl_cluster0", "cci_snoop_ctrl_cluster1" , ""
+
+	/* ---------------------------------------------
+	 * The below macro prints out relevant GIC and
+	 * CCI registers whenever an unhandled exception
+	 * is taken in BL3-1.
+	 * Clobbers: x0 - x10, x16, x17, sp
+	 * ---------------------------------------------
+	 */
+	.macro plat_crash_print_regs
+	mov_imm x16, BASE_GICD_BASE
+	mov_imm x17, BASE_GICC_BASE
+	/* Load the gicc reg list to x6 */
+	adr	x6, gicc_regs
+	/* Load the gicc regs to gp regs used by str_in_crash_buf_print */
+	ldr	w8, [x17, #GICC_HPPIR]
+	ldr	w9, [x17, #GICC_AHPPIR]
+	ldr	w10, [x17, #GICC_CTLR]
+	/* Store to the crash buf and print to console */
+	bl	str_in_crash_buf_print
+
+	/* Print the GICD_ISPENDR regs */
+	add	x7, x16, #GICD_ISPENDR
+	adr	x4, gicd_pend_reg
+	bl	asm_print_str
+gicd_ispendr_loop:
+	sub	x4, x7, x16
+	cmp	x4, #0x280
+	b.eq	exit_print_gic_regs
+	bl	asm_print_hex
+
+	adr	x4, spacer
+	bl	asm_print_str
+
+	ldr	x4, [x7], #8
+	bl	asm_print_hex
+
+	adr	x4, newline
+	bl	asm_print_str
+	b	gicd_ispendr_loop
+exit_print_gic_regs:
+
+	adr	x6, cci_iface_regs
+	/* Store in x7 the base address of the first interface */
+	mov_imm	x7, (PLAT_MT_CCI_BASE + SLAVE_IFACE_OFFSET(	\
+			PLAT_MT_CCI_CLUSTER0_SL_IFACE_IX))
+	ldr	w8, [x7, #SNOOP_CTRL_REG]
+	/* Store in x7 the base address of the second interface */
+	mov_imm	x7, (PLAT_MT_CCI_BASE + SLAVE_IFACE_OFFSET(	\
+			PLAT_MT_CCI_CLUSTER1_SL_IFACE_IX))
+	ldr	w9, [x7, #SNOOP_CTRL_REG]
+	/* Store to the crash buf and print to console */
+	bl	str_in_crash_buf_print
+	.endm
diff --git a/plat/mediatek/mt8173/include/plat_private.h b/plat/mediatek/mt8173/include/plat_private.h
new file mode 100644
index 0000000..87ffbfc
--- /dev/null
+++ b/plat/mediatek/mt8173/include/plat_private.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_PRIVATE_H__
+#define __PLAT_PRIVATE_H__
+
+/*******************************************************************************
+ * Function and variable prototypes
+ ******************************************************************************/
+void plat_configure_mmu_el3(unsigned long total_base,
+			    unsigned long total_size,
+			    unsigned long,
+			    unsigned long,
+			    unsigned long,
+			    unsigned long);
+
+void plat_cci_init(void);
+void plat_cci_enable(void);
+void plat_cci_disable(void);
+
+/* Declarations for plat_topology.c */
+int mt_setup_topology(void);
+
+#endif /* __PLAT_PRIVATE_H__ */
diff --git a/plat/mediatek/mt8173/include/plat_sip_calls.h b/plat/mediatek/mt8173/include/plat_sip_calls.h
new file mode 100644
index 0000000..2593709
--- /dev/null
+++ b/plat/mediatek/mt8173/include/plat_sip_calls.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_SIP_CALLS_H__
+#define __PLAT_SIP_CALLS_H__
+
+/*******************************************************************************
+ * Plat SiP function constants
+ ******************************************************************************/
+#define MTK_PLAT_SIP_NUM_CALLS	6
+
+#define MTK_SIP_PWR_ON_MTCMOS			0x82000402
+#define MTK_SIP_PWR_OFF_MTCMOS			0x82000403
+#define MTK_SIP_PWR_MTCMOS_SUPPORT		0x82000404
+#define MTK_SIP_SET_HDCP_KEY_NUM		0x82000405
+#define MTK_SIP_CLR_HDCP_KEY			0x82000406
+#define MTK_SIP_SET_HDCP_KEY_EX			0x82000407
+
+#endif /* __PLAT_SIP_CALLS_H__ */
diff --git a/plat/mediatek/mt8173/include/platform_def.h b/plat/mediatek/mt8173/include/platform_def.h
new file mode 100644
index 0000000..76e694b
--- /dev/null
+++ b/plat/mediatek/mt8173/include/platform_def.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#include <gic_common.h>
+#include <interrupt_props.h>
+#include "mt8173_def.h"
+
+
+/*******************************************************************************
+ * Platform binary types for linking
+ ******************************************************************************/
+#define PLATFORM_LINKER_FORMAT		"elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH		aarch64
+
+/*******************************************************************************
+ * Generic platform constants
+ ******************************************************************************/
+
+/* Size of cacheable stacks */
+#if defined(IMAGE_BL1)
+#define PLATFORM_STACK_SIZE 0x440
+#elif defined(IMAGE_BL2)
+#define PLATFORM_STACK_SIZE 0x400
+#elif defined(IMAGE_BL31)
+#define PLATFORM_STACK_SIZE 0x800
+#elif defined(IMAGE_BL32)
+#define PLATFORM_STACK_SIZE 0x440
+#endif
+
+#define FIRMWARE_WELCOME_STR		"Booting Trusted Firmware\n"
+
+#define PLATFORM_MAX_AFFLVL		MPIDR_AFFLVL2
+#if !ENABLE_PLAT_COMPAT
+#define PLAT_MAX_PWR_LVL		2
+#define PLAT_MAX_RET_STATE		1
+#define PLAT_MAX_OFF_STATE		2
+#endif
+#define PLATFORM_SYSTEM_COUNT		1
+#define PLATFORM_CLUSTER_COUNT		2
+#define PLATFORM_CLUSTER0_CORE_COUNT	4
+#define PLATFORM_CLUSTER1_CORE_COUNT	2
+#define PLATFORM_CORE_COUNT		(PLATFORM_CLUSTER1_CORE_COUNT +	\
+					 PLATFORM_CLUSTER0_CORE_COUNT)
+#define PLATFORM_MAX_CPUS_PER_CLUSTER	4
+#define PLATFORM_NUM_AFFS		(PLATFORM_SYSTEM_COUNT +	\
+					 PLATFORM_CLUSTER_COUNT +	\
+					 PLATFORM_CORE_COUNT)
+
+/*******************************************************************************
+ * Platform memory map related constants
+ ******************************************************************************/
+/*
+ * MT8173 SRAM memory layout
+ * 0x100000 +-------------------+
+ *          | shared mem (4KB)  |
+ * 0x101000 +-------------------+
+ *          |                   |
+ *          |   BL3-1 (124KB)   |
+ *          |                   |
+ * 0x120000 +-------------------+
+ *          |  reserved (64KB)  |
+ * 0x130000 +-------------------+
+ */
+/* TF txet, ro, rw, xlat table, coherent memory ... etc.
+ * Size: release: 128KB, debug: 128KB
+ */
+#define TZRAM_BASE		(0x100000)
+#if DEBUG
+#define TZRAM_SIZE		(0x20000)
+#else
+#define TZRAM_SIZE		(0x20000)
+#endif
+
+/* Reserved: 64KB */
+#define TZRAM2_BASE		(TZRAM_BASE + TZRAM_SIZE)
+#define TZRAM2_SIZE		(0x10000)
+
+/*******************************************************************************
+ * BL31 specific defines.
+ ******************************************************************************/
+/*
+ * Put BL3-1 at the top of the Trusted SRAM (just below the shared memory, if
+ * present). BL31_BASE is calculated using the current BL3-1 debug size plus a
+ * little space for growth.
+ */
+#define BL31_BASE		(TZRAM_BASE + 0x1000)
+#define BL31_LIMIT		(TZRAM_BASE + TZRAM_SIZE)
+#define TZRAM2_LIMIT		(TZRAM2_BASE + TZRAM2_SIZE)
+
+/*******************************************************************************
+ * Platform specific page table and MMU setup constants
+ ******************************************************************************/
+#define PLAT_PHY_ADDR_SPACE_SIZE	(1ull << 32)
+#define PLAT_VIRT_ADDR_SPACE_SIZE	(1ull << 32)
+#define MAX_XLAT_TABLES		4
+#define MAX_MMAP_REGIONS	16
+
+/*******************************************************************************
+ * Declarations and constants to access the mailboxes safely. Each mailbox is
+ * aligned on the biggest cache line size in the platform. This is known only
+ * to the platform as it might have a combination of integrated and external
+ * caches. Such alignment ensures that two maiboxes do not sit on the same cache
+ * line at any cache level. They could belong to different cpus/clusters &
+ * get written while being protected by different locks causing corruption of
+ * a valid mailbox address.
+ ******************************************************************************/
+#define CACHE_WRITEBACK_SHIFT	6
+#define CACHE_WRITEBACK_GRANULE	(1 << CACHE_WRITEBACK_SHIFT)
+
+
+#define PLAT_ARM_GICD_BASE      BASE_GICD_BASE
+#define PLAT_ARM_GICC_BASE      BASE_GICC_BASE
+
+#define PLAT_ARM_G1S_IRQ_PROPS(grp) \
+	INTR_PROP_DESC(MT_IRQ_SEC_SGI_0, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(MT_IRQ_SEC_SGI_1, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(MT_IRQ_SEC_SGI_2, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(MT_IRQ_SEC_SGI_3, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(MT_IRQ_SEC_SGI_4, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(MT_IRQ_SEC_SGI_5, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(MT_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(MT_IRQ_SEC_SGI_7, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE)
+
+#define PLAT_ARM_G0_IRQ_PROPS(grp)
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/mediatek/mt8173/include/power_tracer.h b/plat/mediatek/mt8173/include/power_tracer.h
new file mode 100644
index 0000000..e8c0552
--- /dev/null
+++ b/plat/mediatek/mt8173/include/power_tracer.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __POWER_TRACER_H__
+#define __POWER_TRACER_H__
+
+#define CPU_UP		0
+#define CPU_DOWN	1
+#define CPU_SUSPEND	2
+#define CLUSTER_UP	3
+#define CLUSTER_DOWN	4
+#define CLUSTER_SUSPEND	5
+
+void trace_power_flow(unsigned long mpidr, unsigned char mode);
+
+#endif
diff --git a/plat/mediatek/mt8173/include/scu.h b/plat/mediatek/mt8173/include/scu.h
new file mode 100644
index 0000000..2ce4b23
--- /dev/null
+++ b/plat/mediatek/mt8173/include/scu.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SCU_H__
+#define __SCU_H__
+
+void disable_scu(unsigned long mpidr);
+void enable_scu(unsigned long mpidr);
+
+#endif
diff --git a/plat/mediatek/mt8173/plat_mt_gic.c b/plat/mediatek/mt8173/plat_mt_gic.c
new file mode 100644
index 0000000..c955d61
--- /dev/null
+++ b/plat/mediatek/mt8173/plat_mt_gic.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arm_gic.h>
+#include <bl_common.h>
+#include <mt8173_def.h>
+#include <utils.h>
+
+const unsigned int mt_irq_sec_array[] = {
+	MT_IRQ_SEC_SGI_0,
+	MT_IRQ_SEC_SGI_1,
+	MT_IRQ_SEC_SGI_2,
+	MT_IRQ_SEC_SGI_3,
+	MT_IRQ_SEC_SGI_4,
+	MT_IRQ_SEC_SGI_5,
+	MT_IRQ_SEC_SGI_6,
+	MT_IRQ_SEC_SGI_7
+};
+
+void plat_mt_gic_init(void)
+{
+	arm_gic_init(BASE_GICC_BASE,
+		BASE_GICD_BASE,
+		BASE_GICR_BASE,
+		mt_irq_sec_array,
+		ARRAY_SIZE(mt_irq_sec_array));
+}
diff --git a/plat/mediatek/mt8173/plat_pm.c b/plat/mediatek/mt8173/plat_pm.c
new file mode 100644
index 0000000..bc89ad9
--- /dev/null
+++ b/plat/mediatek/mt8173/plat_pm.c
@@ -0,0 +1,855 @@
+/*
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bakery_lock.h>
+#include <cci.h>
+#include <console.h>
+#include <debug.h>
+#include <errno.h>
+#include <gicv2.h>
+#include <mcucfg.h>
+#include <mmio.h>
+#include <mt8173_def.h>
+#include <mt_cpuxgpt.h> /* generic_timer_backup() */
+#include <plat_arm.h>
+#include <plat_private.h>
+#include <power_tracer.h>
+#include <psci.h>
+#include <rtc.h>
+#include <scu.h>
+#include <spm_hotplug.h>
+#include <spm_mcdi.h>
+#include <spm_suspend.h>
+
+#if !ENABLE_PLAT_COMPAT
+#define MTK_PWR_LVL0	0
+#define MTK_PWR_LVL1	1
+#define MTK_PWR_LVL2	2
+
+/* Macros to read the MTK power domain state */
+#define MTK_CORE_PWR_STATE(state)	(state)->pwr_domain_state[MTK_PWR_LVL0]
+#define MTK_CLUSTER_PWR_STATE(state)	(state)->pwr_domain_state[MTK_PWR_LVL1]
+#define MTK_SYSTEM_PWR_STATE(state)	((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) ?\
+			(state)->pwr_domain_state[MTK_PWR_LVL2] : 0)
+#endif
+
+#if PSCI_EXTENDED_STATE_ID
+/*
+ *  The table storing the valid idle power states. Ensure that the
+ *  array entries are populated in ascending order of state-id to
+ *  enable us to use binary search during power state validation.
+ *  The table must be terminated by a NULL entry.
+ */
+const unsigned int mtk_pm_idle_states[] = {
+	/* State-id - 0x001 */
+	mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN,
+		MTK_LOCAL_STATE_RET, MTK_PWR_LVL0, PSTATE_TYPE_STANDBY),
+	/* State-id - 0x002 */
+	mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN,
+		MTK_LOCAL_STATE_OFF, MTK_PWR_LVL0, PSTATE_TYPE_POWERDOWN),
+	/* State-id - 0x022 */
+	mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_OFF,
+		MTK_LOCAL_STATE_OFF, MTK_PWR_LVL1, PSTATE_TYPE_POWERDOWN),
+#if PLAT_MAX_PWR_LVL > MTK_PWR_LVL1
+	/* State-id - 0x222 */
+	mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_OFF, MTK_LOCAL_STATE_OFF,
+		MTK_LOCAL_STATE_OFF, MTK_PWR_LVL2, PSTATE_TYPE_POWERDOWN),
+#endif
+	0,
+};
+#endif
+
+struct core_context {
+	unsigned long timer_data[8];
+	unsigned int count;
+	unsigned int rst;
+	unsigned int abt;
+	unsigned int brk;
+};
+
+struct cluster_context {
+	struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER];
+};
+
+/*
+ * Top level structure to hold the complete context of a multi cluster system
+ */
+struct system_context {
+	struct cluster_context cluster[PLATFORM_CLUSTER_COUNT];
+};
+
+/*
+ * Top level structure which encapsulates the context of the entire system
+ */
+static struct system_context dormant_data[1];
+
+static inline struct cluster_context *system_cluster(
+						struct system_context *system,
+						uint32_t clusterid)
+{
+	return &system->cluster[clusterid];
+}
+
+static inline struct core_context *cluster_core(struct cluster_context *cluster,
+						uint32_t cpuid)
+{
+	return &cluster->core[cpuid];
+}
+
+static struct cluster_context *get_cluster_data(unsigned long mpidr)
+{
+	uint32_t clusterid;
+
+	clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
+
+	return system_cluster(dormant_data, clusterid);
+}
+
+static struct core_context *get_core_data(unsigned long mpidr)
+{
+	struct cluster_context *cluster;
+	uint32_t cpuid;
+
+	cluster = get_cluster_data(mpidr);
+	cpuid = mpidr & MPIDR_CPU_MASK;
+
+	return cluster_core(cluster, cpuid);
+}
+
+static void mt_save_generic_timer(unsigned long *container)
+{
+	uint64_t ctl;
+	uint64_t val;
+
+	__asm__ volatile("mrs	%x0, cntkctl_el1\n\t"
+			 "mrs	%x1, cntp_cval_el0\n\t"
+			 "stp	%x0, %x1, [%2, #0]"
+			 : "=&r" (ctl), "=&r" (val)
+			 : "r" (container)
+			 : "memory");
+
+	__asm__ volatile("mrs	%x0, cntp_tval_el0\n\t"
+			 "mrs	%x1, cntp_ctl_el0\n\t"
+			 "stp	%x0, %x1, [%2, #16]"
+			 : "=&r" (val), "=&r" (ctl)
+			 : "r" (container)
+			 : "memory");
+
+	__asm__ volatile("mrs	%x0, cntv_tval_el0\n\t"
+			 "mrs	%x1, cntv_ctl_el0\n\t"
+			 "stp	%x0, %x1, [%2, #32]"
+			 : "=&r" (val), "=&r" (ctl)
+			 : "r" (container)
+			 : "memory");
+}
+
+static void mt_restore_generic_timer(unsigned long *container)
+{
+	uint64_t ctl;
+	uint64_t val;
+
+	__asm__ volatile("ldp	%x0, %x1, [%2, #0]\n\t"
+			 "msr	cntkctl_el1, %x0\n\t"
+			 "msr	cntp_cval_el0, %x1"
+			 : "=&r" (ctl), "=&r" (val)
+			 : "r" (container)
+			 : "memory");
+
+	__asm__ volatile("ldp	%x0, %x1, [%2, #16]\n\t"
+			 "msr	cntp_tval_el0, %x0\n\t"
+			 "msr	cntp_ctl_el0, %x1"
+			 : "=&r" (val), "=&r" (ctl)
+			 : "r" (container)
+			 : "memory");
+
+	__asm__ volatile("ldp	%x0, %x1, [%2, #32]\n\t"
+			 "msr	cntv_tval_el0, %x0\n\t"
+			 "msr	cntv_ctl_el0, %x1"
+			 : "=&r" (val), "=&r" (ctl)
+			 : "r" (container)
+			 : "memory");
+}
+
+static inline uint64_t read_cntpctl(void)
+{
+	uint64_t cntpctl;
+
+	__asm__ volatile("mrs	%x0, cntp_ctl_el0"
+			 : "=r" (cntpctl) : : "memory");
+
+	return cntpctl;
+}
+
+static inline void write_cntpctl(uint64_t cntpctl)
+{
+	__asm__ volatile("msr	cntp_ctl_el0, %x0" : : "r"(cntpctl));
+}
+
+static void stop_generic_timer(void)
+{
+	/*
+	 * Disable the timer and mask the irq to prevent
+	 * suprious interrupts on this cpu interface. It
+	 * will bite us when we come back if we don't. It
+	 * will be replayed on the inbound cluster.
+	 */
+	uint64_t cntpctl = read_cntpctl();
+
+	write_cntpctl(clr_cntp_ctl_enable(cntpctl));
+}
+
+static void mt_cpu_save(unsigned long mpidr)
+{
+	struct core_context *core;
+
+	core = get_core_data(mpidr);
+	mt_save_generic_timer(core->timer_data);
+
+	/* disable timer irq, and upper layer should enable it again. */
+	stop_generic_timer();
+}
+
+static void mt_cpu_restore(unsigned long mpidr)
+{
+	struct core_context *core;
+
+	core = get_core_data(mpidr);
+	mt_restore_generic_timer(core->timer_data);
+}
+
+static void mt_platform_save_context(unsigned long mpidr)
+{
+	/* mcusys_save_context: */
+	mt_cpu_save(mpidr);
+}
+
+static void mt_platform_restore_context(unsigned long mpidr)
+{
+	/* mcusys_restore_context: */
+	mt_cpu_restore(mpidr);
+}
+
+#if ENABLE_PLAT_COMPAT
+/*******************************************************************************
+* Private function which is used to determine if any platform actions
+* should be performed for the specified affinity instance given its
+* state. Nothing needs to be done if the 'state' is not off or if this is not
+* the highest affinity level which will enter the 'state'.
+*******************************************************************************/
+static int32_t plat_do_plat_actions(unsigned int afflvl, unsigned int state)
+{
+	unsigned int max_phys_off_afflvl;
+
+	assert(afflvl <= MPIDR_AFFLVL2);
+
+	if (state != PSCI_STATE_OFF)
+		return -EAGAIN;
+
+	/*
+	 * Find the highest affinity level which will be suspended and postpone
+	 * all the platform specific actions until that level is hit.
+	 */
+	max_phys_off_afflvl = psci_get_max_phys_off_afflvl();
+	assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
+	if (afflvl != max_phys_off_afflvl)
+		return -EAGAIN;
+
+	return 0;
+}
+
+/*******************************************************************************
+ * MTK_platform handler called when an affinity instance is about to enter
+ * standby.
+ ******************************************************************************/
+static void plat_affinst_standby(unsigned int power_state)
+{
+	unsigned int target_afflvl;
+
+	/* Sanity check the requested state */
+	target_afflvl = psci_get_pstate_afflvl(power_state);
+
+	/*
+	 * It's possible to enter standby only on affinity level 0 i.e. a cpu
+	 * on the MTK_platform. Ignore any other affinity level.
+	 */
+	if (target_afflvl == MPIDR_AFFLVL0) {
+		/*
+		 * Enter standby state. dsb is good practice before using wfi
+		 * to enter low power states.
+		 */
+		dsb();
+		wfi();
+	}
+}
+#else
+static void plat_cpu_standby(plat_local_state_t cpu_state)
+{
+	unsigned int scr;
+
+	scr = read_scr_el3();
+	write_scr_el3(scr | SCR_IRQ_BIT);
+	isb();
+	dsb();
+	wfi();
+	write_scr_el3(scr);
+}
+#endif
+
+/*******************************************************************************
+ * MTK_platform handler called when an affinity instance is about to be turned
+ * on. The level and mpidr determine the affinity instance.
+ ******************************************************************************/
+#if ENABLE_PLAT_COMPAT
+static int plat_affinst_on(unsigned long mpidr,
+		    unsigned long sec_entrypoint,
+		    unsigned int afflvl,
+		    unsigned int state)
+{
+	int rc = PSCI_E_SUCCESS;
+	unsigned long cpu_id;
+	unsigned long cluster_id;
+	uintptr_t rv;
+
+	/*
+	 * It's possible to turn on only affinity level 0 i.e. a cpu
+	 * on the MTK_platform. Ignore any other affinity level.
+	 */
+	if (afflvl != MPIDR_AFFLVL0)
+		return rc;
+
+	cpu_id = mpidr & MPIDR_CPU_MASK;
+	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
+
+	if (cluster_id)
+		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
+	else
+		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
+
+	mmio_write_32(rv, sec_entrypoint);
+	INFO("mt_on[%ld:%ld], entry %x\n",
+		cluster_id, cpu_id, mmio_read_32(rv));
+
+	spm_hotplug_on(mpidr);
+
+	return rc;
+}
+#else
+static uintptr_t secure_entrypoint;
+
+static int plat_power_domain_on(unsigned long mpidr)
+{
+	int rc = PSCI_E_SUCCESS;
+	unsigned long cpu_id;
+	unsigned long cluster_id;
+	uintptr_t rv;
+
+	cpu_id = mpidr & MPIDR_CPU_MASK;
+	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
+
+	if (cluster_id)
+		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
+	else
+		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
+
+	mmio_write_32(rv, secure_entrypoint);
+	INFO("mt_on[%ld:%ld], entry %x\n",
+		cluster_id, cpu_id, mmio_read_32(rv));
+
+	spm_hotplug_on(mpidr);
+	return rc;
+}
+#endif
+
+/*******************************************************************************
+ * MTK_platform handler called when an affinity instance is about to be turned
+ * off. The level and mpidr determine the affinity instance. The 'state' arg.
+ * allows the platform to decide whether the cluster is being turned off and
+ * take apt actions.
+ *
+ * CAUTION: This function is called with coherent stacks so that caches can be
+ * turned off, flushed and coherency disabled. There is no guarantee that caches
+ * will remain turned on across calls to this function as each affinity level is
+ * dealt with. So do not write & read global variables across calls. It will be
+ * wise to do flush a write to the global to prevent unpredictable results.
+ ******************************************************************************/
+#if ENABLE_PLAT_COMPAT
+static void plat_affinst_off(unsigned int afflvl, unsigned int state)
+{
+	unsigned long mpidr = read_mpidr_el1();
+
+	/* Determine if any platform actions need to be executed. */
+	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
+		return;
+
+	/* Prevent interrupts from spuriously waking up this cpu */
+	gicv2_cpuif_disable();
+
+	spm_hotplug_off(mpidr);
+
+	trace_power_flow(mpidr, CPU_DOWN);
+
+	if (afflvl != MPIDR_AFFLVL0) {
+		/* Disable coherency if this cluster is to be turned off */
+		plat_cci_disable();
+
+		trace_power_flow(mpidr, CLUSTER_DOWN);
+	}
+}
+#else
+static void plat_power_domain_off(const psci_power_state_t *state)
+{
+	unsigned long mpidr = read_mpidr_el1();
+
+	/* Prevent interrupts from spuriously waking up this cpu */
+	gicv2_cpuif_disable();
+
+	spm_hotplug_off(mpidr);
+
+	trace_power_flow(mpidr, CPU_DOWN);
+
+	if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
+		/* Disable coherency if this cluster is to be turned off */
+		plat_cci_disable();
+
+		trace_power_flow(mpidr, CLUSTER_DOWN);
+	}
+}
+#endif
+
+/*******************************************************************************
+ * MTK_platform handler called when an affinity instance is about to be
+ * suspended. The level and mpidr determine the affinity instance. The 'state'
+ * arg. allows the platform to decide whether the cluster is being turned off
+ * and take apt actions.
+ *
+ * CAUTION: This function is called with coherent stacks so that caches can be
+ * turned off, flushed and coherency disabled. There is no guarantee that caches
+ * will remain turned on across calls to this function as each affinity level is
+ * dealt with. So do not write & read global variables across calls. It will be
+ * wise to do flush a write to the global to prevent unpredictable results.
+ ******************************************************************************/
+#if ENABLE_PLAT_COMPAT
+static void plat_affinst_suspend(unsigned long sec_entrypoint,
+			  unsigned int afflvl,
+			  unsigned int state)
+{
+	unsigned long mpidr = read_mpidr_el1();
+	unsigned long cluster_id;
+	unsigned long cpu_id;
+	uintptr_t rv;
+
+	/* Determine if any platform actions need to be executed. */
+	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
+		return;
+
+	cpu_id = mpidr & MPIDR_CPU_MASK;
+	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
+
+	if (cluster_id)
+		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
+	else
+		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
+
+	mmio_write_32(rv, sec_entrypoint);
+
+	if (afflvl < MPIDR_AFFLVL2)
+		spm_mcdi_prepare_for_off_state(mpidr, afflvl);
+
+	if (afflvl >= MPIDR_AFFLVL0)
+		mt_platform_save_context(mpidr);
+
+	/* Perform the common cluster specific operations */
+	if (afflvl >= MPIDR_AFFLVL1) {
+		/* Disable coherency if this cluster is to be turned off */
+		plat_cci_disable();
+	}
+
+	if (afflvl >= MPIDR_AFFLVL2) {
+		disable_scu(mpidr);
+		generic_timer_backup();
+		spm_system_suspend();
+		/* Prevent interrupts from spuriously waking up this cpu */
+		gicv2_cpuif_disable();
+	}
+}
+#else
+static void plat_power_domain_suspend(const psci_power_state_t *state)
+{
+	unsigned long mpidr = read_mpidr_el1();
+	unsigned long cluster_id;
+	unsigned long cpu_id;
+	uintptr_t rv;
+
+	cpu_id = mpidr & MPIDR_CPU_MASK;
+	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
+
+	if (cluster_id)
+		rv = (uintptr_t)&mt8173_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
+	else
+		rv = (uintptr_t)&mt8173_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
+
+	mmio_write_32(rv, secure_entrypoint);
+
+	if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) {
+		spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL0);
+		if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF)
+			spm_mcdi_prepare_for_off_state(mpidr, MTK_PWR_LVL1);
+	}
+
+	mt_platform_save_context(mpidr);
+
+	/* Perform the common cluster specific operations */
+	if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
+		/* Disable coherency if this cluster is to be turned off */
+		plat_cci_disable();
+	}
+
+	if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
+		disable_scu(mpidr);
+		generic_timer_backup();
+		spm_system_suspend();
+		/* Prevent interrupts from spuriously waking up this cpu */
+		gicv2_cpuif_disable();
+	}
+}
+#endif
+
+/*******************************************************************************
+ * MTK_platform handler called when an affinity instance has just been powered
+ * on after being turned off earlier. The level and mpidr determine the affinity
+ * instance. The 'state' arg. allows the platform to decide whether the cluster
+ * was turned off prior to wakeup and do what's necessary to setup it up
+ * correctly.
+ ******************************************************************************/
+#if ENABLE_PLAT_COMPAT
+static void plat_affinst_on_finish(unsigned int afflvl, unsigned int state)
+{
+	unsigned long mpidr = read_mpidr_el1();
+
+	/* Determine if any platform actions need to be executed. */
+	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
+		return;
+
+	/* Perform the common cluster specific operations */
+	if (afflvl >= MPIDR_AFFLVL1) {
+		/* Enable coherency if this cluster was off */
+		plat_cci_enable();
+		trace_power_flow(mpidr, CLUSTER_UP);
+	}
+
+	/* Enable the gic cpu interface */
+	gicv2_cpuif_enable();
+	gicv2_pcpu_distif_init();
+	trace_power_flow(mpidr, CPU_UP);
+}
+#else
+void mtk_system_pwr_domain_resume(void);
+
+static void plat_power_domain_on_finish(const psci_power_state_t *state)
+{
+	unsigned long mpidr = read_mpidr_el1();
+
+	assert(state->pwr_domain_state[MPIDR_AFFLVL0] == MTK_LOCAL_STATE_OFF);
+
+	if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) &&
+		(state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF))
+		mtk_system_pwr_domain_resume();
+
+	if (state->pwr_domain_state[MPIDR_AFFLVL1] == MTK_LOCAL_STATE_OFF) {
+		plat_cci_enable();
+		trace_power_flow(mpidr, CLUSTER_UP);
+	}
+
+	if ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) &&
+		(state->pwr_domain_state[MTK_PWR_LVL2] == MTK_LOCAL_STATE_OFF))
+		return;
+
+	/* Enable the gic cpu interface */
+	gicv2_cpuif_enable();
+	gicv2_pcpu_distif_init();
+	trace_power_flow(mpidr, CPU_UP);
+}
+#endif
+
+/*******************************************************************************
+ * MTK_platform handler called when an affinity instance has just been powered
+ * on after having been suspended earlier. The level and mpidr determine the
+ * affinity instance.
+ ******************************************************************************/
+#if ENABLE_PLAT_COMPAT
+static void plat_affinst_suspend_finish(unsigned int afflvl, unsigned int state)
+{
+	unsigned long mpidr = read_mpidr_el1();
+
+	/* Determine if any platform actions need to be executed. */
+	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
+		return;
+
+	if (afflvl >= MPIDR_AFFLVL2) {
+		/* Enable the gic cpu interface */
+		plat_arm_gic_init();
+		spm_system_suspend_finish();
+		enable_scu(mpidr);
+	}
+
+	/* Perform the common cluster specific operations */
+	if (afflvl >= MPIDR_AFFLVL1) {
+		/* Enable coherency if this cluster was off */
+		plat_cci_enable();
+	}
+
+	if (afflvl >= MPIDR_AFFLVL0)
+		mt_platform_restore_context(mpidr);
+
+	if (afflvl < MPIDR_AFFLVL2)
+		spm_mcdi_finish_for_on_state(mpidr, afflvl);
+
+	gicv2_pcpu_distif_init();
+}
+#else
+static void plat_power_domain_suspend_finish(const psci_power_state_t *state)
+{
+	unsigned long mpidr = read_mpidr_el1();
+
+	if (state->pwr_domain_state[MTK_PWR_LVL0] == MTK_LOCAL_STATE_RET)
+		return;
+
+	if (MTK_SYSTEM_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
+		/* Enable the gic cpu interface */
+		plat_arm_gic_init();
+		spm_system_suspend_finish();
+		enable_scu(mpidr);
+	}
+
+	/* Perform the common cluster specific operations */
+	if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF) {
+		/* Enable coherency if this cluster was off */
+		plat_cci_enable();
+	}
+
+	mt_platform_restore_context(mpidr);
+
+	if (MTK_SYSTEM_PWR_STATE(state) != MTK_LOCAL_STATE_OFF) {
+		spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL0);
+		if (MTK_CLUSTER_PWR_STATE(state) == MTK_LOCAL_STATE_OFF)
+			spm_mcdi_finish_for_on_state(mpidr, MTK_PWR_LVL1);
+	}
+
+	gicv2_pcpu_distif_init();
+}
+#endif
+
+#if ENABLE_PLAT_COMPAT
+static unsigned int plat_get_sys_suspend_power_state(void)
+{
+	/* StateID: 0, StateType: 1(power down), PowerLevel: 2(system) */
+	return psci_make_powerstate(0, 1, 2);
+}
+#else
+static void plat_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+	assert(PLAT_MAX_PWR_LVL >= 2);
+
+	for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
+		req_state->pwr_domain_state[i] = MTK_LOCAL_STATE_OFF;
+}
+#endif
+
+/*******************************************************************************
+ * MTK handlers to shutdown/reboot the system
+ ******************************************************************************/
+static void __dead2 plat_system_off(void)
+{
+	INFO("MTK System Off\n");
+
+	rtc_bbpu_power_down();
+
+	wfi();
+	ERROR("MTK System Off: operation not handled.\n");
+	panic();
+}
+
+static void __dead2 plat_system_reset(void)
+{
+	/* Write the System Configuration Control Register */
+	INFO("MTK System Reset\n");
+
+	mmio_clrsetbits_32(MTK_WDT_BASE,
+		(MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ),
+		MTK_WDT_MODE_KEY);
+	mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN));
+	mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY);
+
+	wfi();
+	ERROR("MTK System Reset: operation not handled.\n");
+	panic();
+}
+
+#if !ENABLE_PLAT_COMPAT
+#if !PSCI_EXTENDED_STATE_ID
+static int plat_validate_power_state(unsigned int power_state,
+					psci_power_state_t *req_state)
+{
+	int pstate = psci_get_pstate_type(power_state);
+	int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
+	int i;
+
+	assert(req_state);
+
+	if (pwr_lvl > PLAT_MAX_PWR_LVL)
+		return PSCI_E_INVALID_PARAMS;
+
+	/* Sanity check the requested state */
+	if (pstate == PSTATE_TYPE_STANDBY) {
+		/*
+		 * It's possible to enter standby only on power level 0
+		 * Ignore any other power level.
+		 */
+		if (pwr_lvl != 0)
+			return PSCI_E_INVALID_PARAMS;
+
+		req_state->pwr_domain_state[MTK_PWR_LVL0] =
+					MTK_LOCAL_STATE_RET;
+	} else {
+		for (i = 0; i <= pwr_lvl; i++)
+			req_state->pwr_domain_state[i] =
+					MTK_LOCAL_STATE_OFF;
+	}
+
+	/*
+	 * We expect the 'state id' to be zero.
+	 */
+	if (psci_get_pstate_id(power_state))
+		return PSCI_E_INVALID_PARAMS;
+
+	return PSCI_E_SUCCESS;
+}
+#else
+int plat_validate_power_state(unsigned int power_state,
+				psci_power_state_t *req_state)
+{
+	unsigned int state_id;
+	int i;
+
+	assert(req_state);
+
+	/*
+	 *  Currently we are using a linear search for finding the matching
+	 *  entry in the idle power state array. This can be made a binary
+	 *  search if the number of entries justify the additional complexity.
+	 */
+	for (i = 0; !!mtk_pm_idle_states[i]; i++) {
+		if (power_state == mtk_pm_idle_states[i])
+			break;
+	}
+
+	/* Return error if entry not found in the idle state array */
+	if (!mtk_pm_idle_states[i])
+		return PSCI_E_INVALID_PARAMS;
+
+	i = 0;
+	state_id = psci_get_pstate_id(power_state);
+
+	/* Parse the State ID and populate the state info parameter */
+	while (state_id) {
+		req_state->pwr_domain_state[i++] = state_id &
+						MTK_LOCAL_PSTATE_MASK;
+		state_id >>= MTK_LOCAL_PSTATE_WIDTH;
+	}
+
+	return PSCI_E_SUCCESS;
+}
+#endif
+
+void mtk_system_pwr_domain_resume(void)
+{
+	console_init(MT8173_UART0_BASE, MT8173_UART_CLOCK, MT8173_BAUDRATE);
+
+	/* Assert system power domain is available on the platform */
+	assert(PLAT_MAX_PWR_LVL >= MTK_PWR_LVL2);
+
+	plat_arm_gic_init();
+}
+#endif
+
+#if ENABLE_PLAT_COMPAT
+/*******************************************************************************
+ * Export the platform handlers to enable psci to invoke them
+ ******************************************************************************/
+static const plat_pm_ops_t plat_plat_pm_ops = {
+	.affinst_standby		= plat_affinst_standby,
+	.affinst_on			= plat_affinst_on,
+	.affinst_off			= plat_affinst_off,
+	.affinst_suspend		= plat_affinst_suspend,
+	.affinst_on_finish		= plat_affinst_on_finish,
+	.affinst_suspend_finish		= plat_affinst_suspend_finish,
+	.system_off			= plat_system_off,
+	.system_reset			= plat_system_reset,
+	.get_sys_suspend_power_state	= plat_get_sys_suspend_power_state,
+};
+
+/*******************************************************************************
+ * Export the platform specific power ops & initialize the mtk_platform power
+ * controller
+ ******************************************************************************/
+int platform_setup_pm(const plat_pm_ops_t **plat_ops)
+{
+	*plat_ops = &plat_plat_pm_ops;
+	return 0;
+}
+#else
+static const plat_psci_ops_t plat_plat_pm_ops = {
+	.cpu_standby			= plat_cpu_standby,
+	.pwr_domain_on			= plat_power_domain_on,
+	.pwr_domain_on_finish		= plat_power_domain_on_finish,
+	.pwr_domain_off			= plat_power_domain_off,
+	.pwr_domain_suspend		= plat_power_domain_suspend,
+	.pwr_domain_suspend_finish	= plat_power_domain_suspend_finish,
+	.system_off			= plat_system_off,
+	.system_reset			= plat_system_reset,
+	.validate_power_state		= plat_validate_power_state,
+	.get_sys_suspend_power_state	= plat_get_sys_suspend_power_state,
+};
+
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+			const plat_psci_ops_t **psci_ops)
+{
+	*psci_ops = &plat_plat_pm_ops;
+	secure_entrypoint = sec_entrypoint;
+	return 0;
+}
+
+/*
+ * The PSCI generic code uses this API to let the platform participate in state
+ * coordination during a power management operation. It compares the platform
+ * specific local power states requested by each cpu for a given power domain
+ * and returns the coordinated target power state that the domain should
+ * enter. A platform assigns a number to a local power state. This default
+ * implementation assumes that the platform assigns these numbers in order of
+ * increasing depth of the power state i.e. for two power states X & Y, if X < Y
+ * then X represents a shallower power state than Y. As a result, the
+ * coordinated target local power state for a power domain will be the minimum
+ * of the requested local power states.
+ */
+plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
+					     const plat_local_state_t *states,
+					     unsigned int ncpu)
+{
+	plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
+
+	assert(ncpu);
+
+	do {
+		temp = *states++;
+		if (temp < target)
+			target = temp;
+	} while (--ncpu);
+
+	return target;
+}
+#endif
diff --git a/plat/mediatek/mt8173/plat_sip_calls.c b/plat/mediatek/mt8173/plat_sip_calls.c
new file mode 100644
index 0000000..1d51cb5
--- /dev/null
+++ b/plat/mediatek/mt8173/plat_sip_calls.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <crypt.h>
+#include <debug.h>
+#include <mmio.h>
+#include <mtcmos.h>
+#include <mtk_sip_svc.h>
+#include <plat_sip_calls.h>
+#include <runtime_svc.h>
+
+/* Authorized secure register list */
+enum {
+	SREG_HDMI_COLOR_EN = 0x14000904
+};
+
+static const uint32_t authorized_sreg[] = {
+	SREG_HDMI_COLOR_EN
+};
+
+#define authorized_sreg_cnt	\
+	(sizeof(authorized_sreg) / sizeof(authorized_sreg[0]))
+
+uint64_t mt_sip_set_authorized_sreg(uint32_t sreg, uint32_t val)
+{
+	uint64_t i;
+
+	for (i = 0; i < authorized_sreg_cnt; i++) {
+		if (authorized_sreg[i] == sreg) {
+			mmio_write_32(sreg, val);
+			return MTK_SIP_E_SUCCESS;
+		}
+	}
+
+	return MTK_SIP_E_INVALID_PARAM;
+}
+
+static uint64_t mt_sip_pwr_on_mtcmos(uint32_t val)
+{
+	uint32_t ret;
+
+	ret = mtcmos_non_cpu_ctrl(1, val);
+	if (ret)
+		return MTK_SIP_E_INVALID_PARAM;
+	else
+		return MTK_SIP_E_SUCCESS;
+}
+
+static uint64_t mt_sip_pwr_off_mtcmos(uint32_t val)
+{
+	uint32_t ret;
+
+	ret = mtcmos_non_cpu_ctrl(0, val);
+	if (ret)
+		return MTK_SIP_E_INVALID_PARAM;
+	else
+		return MTK_SIP_E_SUCCESS;
+}
+
+static uint64_t mt_sip_pwr_mtcmos_support(void)
+{
+	return MTK_SIP_E_SUCCESS;
+}
+
+uint64_t mediatek_plat_sip_handler(uint32_t smc_fid,
+				   uint64_t x1,
+				   uint64_t x2,
+				   uint64_t x3,
+				   uint64_t x4,
+				   void *cookie,
+				   void *handle,
+				   uint64_t flags)
+{
+	uint64_t ret;
+
+	switch (smc_fid) {
+	case MTK_SIP_PWR_ON_MTCMOS:
+		ret = mt_sip_pwr_on_mtcmos((uint32_t)x1);
+		SMC_RET1(handle, ret);
+
+	case MTK_SIP_PWR_OFF_MTCMOS:
+		ret = mt_sip_pwr_off_mtcmos((uint32_t)x1);
+		SMC_RET1(handle, ret);
+
+	case MTK_SIP_PWR_MTCMOS_SUPPORT:
+		ret = mt_sip_pwr_mtcmos_support();
+		SMC_RET1(handle, ret);
+
+	case MTK_SIP_SET_HDCP_KEY_EX:
+		ret = crypt_set_hdcp_key_ex(x1, x2, x3);
+		SMC_RET1(handle, ret);
+
+	case MTK_SIP_SET_HDCP_KEY_NUM:
+		ret = crypt_set_hdcp_key_num((uint32_t)x1);
+		SMC_RET1(handle, ret);
+
+	case MTK_SIP_CLR_HDCP_KEY:
+		ret = crypt_clear_hdcp_key();
+		SMC_RET1(handle, ret);
+
+	default:
+		ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
+		break;
+	}
+
+	SMC_RET1(handle, SMC_UNK);
+}
diff --git a/plat/mediatek/mt8173/plat_topology.c b/plat/mediatek/mt8173/plat_topology.c
new file mode 100644
index 0000000..5bb0451
--- /dev/null
+++ b/plat/mediatek/mt8173/plat_topology.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <platform_def.h>
+#include <psci.h>
+
+#if ENABLE_PLAT_COMPAT
+unsigned int plat_get_aff_count(unsigned int aff_lvl, unsigned long mpidr)
+{
+	/* Report 1 (absent) instance at levels higher that the cluster level */
+	if (aff_lvl > MPIDR_AFFLVL1)
+		return PLATFORM_SYSTEM_COUNT;
+
+	if (aff_lvl == MPIDR_AFFLVL1)
+		return PLATFORM_CLUSTER_COUNT;
+
+	return mpidr & 0x100 ? PLATFORM_CLUSTER1_CORE_COUNT :
+			       PLATFORM_CLUSTER0_CORE_COUNT;
+}
+
+unsigned int plat_get_aff_state(unsigned int aff_lvl, unsigned long mpidr)
+{
+	return aff_lvl <= MPIDR_AFFLVL2 ? PSCI_AFF_PRESENT : PSCI_AFF_ABSENT;
+}
+
+int mt_setup_topology(void)
+{
+	/* [TODO] Make topology configurable via SCC */
+	return 0;
+}
+#else
+
+const unsigned char mtk_power_domain_tree_desc[] = {
+	/* No of root nodes */
+	PLATFORM_SYSTEM_COUNT,
+	/* No of children for the root node */
+	PLATFORM_CLUSTER_COUNT,
+	/* No of children for the first cluster node */
+	PLATFORM_CLUSTER0_CORE_COUNT,
+	/* No of children for the second cluster node */
+	PLATFORM_CLUSTER1_CORE_COUNT
+};
+
+/*******************************************************************************
+ * This function returns the MT8173 default topology tree information.
+ ******************************************************************************/
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+	return mtk_power_domain_tree_desc;
+}
+
+/*******************************************************************************
+ * This function implements a part of the critical interface between the psci
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is returned
+ * in case the MPIDR is invalid.
+ ******************************************************************************/
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+	unsigned int cluster_id, cpu_id;
+
+	mpidr &= MPIDR_AFFINITY_MASK;
+
+	if (mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK))
+		return -1;
+
+	cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+	cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
+
+	if (cluster_id >= PLATFORM_CLUSTER_COUNT)
+		return -1;
+
+	/*
+	 * Validate cpu_id by checking whether it represents a CPU in
+	 * one of the two clusters present on the platform.
+	 */
+	if (cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER)
+		return -1;
+
+	return (cpu_id + (cluster_id * 4));
+}
+#endif
diff --git a/plat/mediatek/mt8173/platform.mk b/plat/mediatek/mt8173/platform.mk
new file mode 100644
index 0000000..cd01645
--- /dev/null
+++ b/plat/mediatek/mt8173/platform.mk
@@ -0,0 +1,72 @@
+#
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+MTK_PLAT		:=	plat/mediatek
+MTK_PLAT_SOC		:=	${MTK_PLAT}/${PLAT}
+
+PLAT_INCLUDES		:=	-I${MTK_PLAT}/common/				\
+				-I${MTK_PLAT}/common/drivers/uart/		\
+				-Iinclude/plat/arm/common			\
+				-Iinclude/plat/arm/common/aarch64		\
+				-I${MTK_PLAT_SOC}/drivers/crypt/		\
+				-I${MTK_PLAT_SOC}/drivers/mtcmos/		\
+				-I${MTK_PLAT_SOC}/drivers/pmic/			\
+				-I${MTK_PLAT_SOC}/drivers/rtc/			\
+				-I${MTK_PLAT_SOC}/drivers/spm/			\
+				-I${MTK_PLAT_SOC}/drivers/timer/		\
+				-I${MTK_PLAT_SOC}/include/
+
+PLAT_BL_COMMON_SOURCES	:=	lib/xlat_tables/xlat_tables_common.c		\
+				lib/xlat_tables/aarch64/xlat_tables.c		\
+				plat/arm/common/arm_gicv2.c			\
+				plat/common/plat_gicv2.c
+
+BL31_SOURCES		+=	drivers/arm/cci/cci.c				\
+				drivers/arm/gic/common/gic_common.c		\
+				drivers/arm/gic/v2/gicv2_main.c			\
+				drivers/arm/gic/v2/gicv2_helpers.c		\
+				drivers/console/aarch64/console.S		\
+				drivers/delay_timer/delay_timer.c		\
+				drivers/delay_timer/generic_delay_timer.c	\
+				lib/cpus/aarch64/aem_generic.S			\
+				lib/cpus/aarch64/cortex_a53.S			\
+				lib/cpus/aarch64/cortex_a57.S			\
+				lib/cpus/aarch64/cortex_a72.S			\
+				${MTK_PLAT}/common/drivers/uart/8250_console.S	\
+				${MTK_PLAT}/common/mtk_plat_common.c		\
+				${MTK_PLAT}/common/mtk_sip_svc.c		\
+				${MTK_PLAT_SOC}/aarch64/plat_helpers.S		\
+				${MTK_PLAT_SOC}/aarch64/platform_common.c	\
+				${MTK_PLAT_SOC}/bl31_plat_setup.c		\
+				${MTK_PLAT_SOC}/drivers/crypt/crypt.c		\
+				${MTK_PLAT_SOC}/drivers/mtcmos/mtcmos.c		\
+				${MTK_PLAT_SOC}/drivers/pmic/pmic_wrap_init.c	\
+				${MTK_PLAT_SOC}/drivers/rtc/rtc.c		\
+				${MTK_PLAT_SOC}/drivers/spm/spm.c		\
+				${MTK_PLAT_SOC}/drivers/spm/spm_hotplug.c	\
+				${MTK_PLAT_SOC}/drivers/spm/spm_mcdi.c		\
+				${MTK_PLAT_SOC}/drivers/spm/spm_suspend.c	\
+				${MTK_PLAT_SOC}/drivers/timer/mt_cpuxgpt.c	\
+				${MTK_PLAT_SOC}/plat_pm.c			\
+				${MTK_PLAT_SOC}/plat_sip_calls.c		\
+				${MTK_PLAT_SOC}/plat_topology.c			\
+				${MTK_PLAT_SOC}/power_tracer.c			\
+				${MTK_PLAT_SOC}/scu.c
+
+# Flag used by the MTK_platform port to determine the version of ARM GIC
+# architecture to use for interrupt management in EL3.
+ARM_GIC_ARCH		:=	2
+$(eval $(call add_define,ARM_GIC_ARCH))
+
+# Enable workarounds for selected Cortex-A53 erratas.
+ERRATA_A53_826319	:=	1
+ERRATA_A53_836870	:=	1
+ERRATA_A53_855873	:=	1
+
+# indicate the reset vector address can be programmed
+PROGRAMMABLE_RESET_ADDRESS	:=	1
+
+$(eval $(call add_define,MTK_SIP_SET_AUTHORIZED_SECURE_REG_ENABLE))
diff --git a/plat/mediatek/mt8173/power_tracer.c b/plat/mediatek/mt8173/power_tracer.c
new file mode 100644
index 0000000..5c0a468
--- /dev/null
+++ b/plat/mediatek/mt8173/power_tracer.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <debug.h>
+#include <power_tracer.h>
+
+#define trace_log(...)  INFO("psci: " __VA_ARGS__)
+
+void trace_power_flow(unsigned long mpidr, unsigned char mode)
+{
+	switch (mode) {
+	case CPU_UP:
+		trace_log("core %ld:%ld ON\n",
+			  (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS,
+			  (mpidr & MPIDR_CPU_MASK));
+		break;
+	case CPU_DOWN:
+		trace_log("core %ld:%ld OFF\n",
+			  (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS,
+			  (mpidr & MPIDR_CPU_MASK));
+		break;
+	case CPU_SUSPEND:
+		trace_log("core %ld:%ld SUSPEND\n",
+			  (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS,
+			  (mpidr & MPIDR_CPU_MASK));
+		break;
+	case CLUSTER_UP:
+		trace_log("cluster %ld ON\n",
+			  (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS);
+		break;
+	case CLUSTER_DOWN:
+		trace_log("cluster %ld OFF\n",
+			  (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS);
+		break;
+	case CLUSTER_SUSPEND:
+		trace_log("cluster %ld SUSPEND\n",
+			  (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS);
+		break;
+	default:
+		trace_log("unknown power mode\n");
+		break;
+	}
+}
diff --git a/plat/mediatek/mt8173/scu.c b/plat/mediatek/mt8173/scu.c
new file mode 100644
index 0000000..4daa9e5
--- /dev/null
+++ b/plat/mediatek/mt8173/scu.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <mcucfg.h>
+#include <mmio.h>
+
+void disable_scu(unsigned long mpidr)
+{
+	if (mpidr & MPIDR_CLUSTER_MASK)
+		mmio_setbits_32((uintptr_t)&mt8173_mcucfg->mp1_miscdbg,
+			MP1_ACINACTM);
+	else
+		mmio_setbits_32((uintptr_t)&mt8173_mcucfg->mp0_axi_config,
+			MP0_ACINACTM);
+}
+
+void enable_scu(unsigned long mpidr)
+{
+	if (mpidr & MPIDR_CLUSTER_MASK)
+		mmio_clrbits_32((uintptr_t)&mt8173_mcucfg->mp1_miscdbg,
+			MP1_ACINACTM);
+	else
+		mmio_clrbits_32((uintptr_t)&mt8173_mcucfg->mp0_axi_config,
+			MP0_ACINACTM);
+}
diff --git a/plat/nvidia/tegra/common/aarch64/tegra_helpers.S b/plat/nvidia/tegra/common/aarch64/tegra_helpers.S
new file mode 100644
index 0000000..3c490d0
--- /dev/null
+++ b/plat/nvidia/tegra/common/aarch64/tegra_helpers.S
@@ -0,0 +1,454 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cpu_macros.S>
+#include <cortex_a53.h>
+#include <cortex_a57.h>
+#include <platform_def.h>
+#include <tegra_def.h>
+
+#define MIDR_PN_CORTEX_A57		0xD07
+
+/*******************************************************************************
+ * Implementation defined ACTLR_EL3 bit definitions
+ ******************************************************************************/
+#define ACTLR_EL3_L2ACTLR_BIT		(1 << 6)
+#define ACTLR_EL3_L2ECTLR_BIT		(1 << 5)
+#define ACTLR_EL3_L2CTLR_BIT		(1 << 4)
+#define ACTLR_EL3_CPUECTLR_BIT		(1 << 1)
+#define ACTLR_EL3_CPUACTLR_BIT		(1 << 0)
+#define ACTLR_EL3_ENABLE_ALL_ACCESS	(ACTLR_EL3_L2ACTLR_BIT | \
+					 ACTLR_EL3_L2ECTLR_BIT | \
+					 ACTLR_EL3_L2CTLR_BIT | \
+					 ACTLR_EL3_CPUECTLR_BIT | \
+					 ACTLR_EL3_CPUACTLR_BIT)
+
+	/* Global functions */
+	.globl	plat_is_my_cpu_primary
+	.globl	plat_my_core_pos
+	.globl	plat_get_my_entrypoint
+	.globl	plat_secondary_cold_boot_setup
+	.globl	platform_mem_init
+	.globl	plat_crash_console_init
+	.globl	plat_crash_console_putc
+	.globl	tegra_secure_entrypoint
+	.globl	plat_reset_handler
+
+	/* Global variables */
+	.globl	tegra_sec_entry_point
+	.globl	ns_image_entrypoint
+	.globl	tegra_bl31_phys_base
+	.globl	tegra_console_base
+	.globl	tegra_enable_l2_ecc_parity_prot
+
+	/* ---------------------
+	 * Common CPU init code
+	 * ---------------------
+	 */
+.macro	cpu_init_common
+
+	/* ------------------------------------------------
+	 * We enable procesor retention, L2/CPUECTLR NS
+	 * access and ECC/Parity protection for A57 CPUs
+	 * ------------------------------------------------
+	 */
+	mrs	x0, midr_el1
+	mov	x1, #(MIDR_PN_MASK << MIDR_PN_SHIFT)
+	and	x0, x0, x1
+	lsr	x0, x0, #MIDR_PN_SHIFT
+	cmp	x0, #MIDR_PN_CORTEX_A57
+	b.ne	1f
+
+	/* ---------------------------
+	 * Enable processor retention
+	 * ---------------------------
+	 */
+	mrs	x0, CORTEX_A57_L2ECTLR_EL1
+	mov	x1, #RETENTION_ENTRY_TICKS_512
+	bic	x0, x0, #CORTEX_A57_L2ECTLR_RET_CTRL_MASK
+	orr	x0, x0, x1
+	msr	CORTEX_A57_L2ECTLR_EL1, x0
+	isb
+
+	mrs	x0, CORTEX_A57_ECTLR_EL1
+	mov	x1, #RETENTION_ENTRY_TICKS_512
+	bic	x0, x0, #CORTEX_A57_ECTLR_CPU_RET_CTRL_MASK
+	orr	x0, x0, x1
+	msr	CORTEX_A57_ECTLR_EL1, x0
+	isb
+
+	/* -------------------------------------------------------
+	 * Enable L2 and CPU ECTLR RW access from non-secure world
+	 * -------------------------------------------------------
+	 */
+	mov	x0, #ACTLR_EL3_ENABLE_ALL_ACCESS
+	msr	actlr_el3, x0
+	msr	actlr_el2, x0
+	isb
+
+	/* -------------------------------------------------------
+	 * Enable L2 ECC and Parity Protection
+	 * -------------------------------------------------------
+	 */
+	adr	x0, tegra_enable_l2_ecc_parity_prot
+	ldr	x0, [x0]
+	cbz	x0, 1f
+	mrs	x0, CORTEX_A57_L2CTLR_EL1
+	and	x1, x0, #CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT
+	cbnz	x1, 1f
+	orr	x0, x0, #CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT
+	msr	CORTEX_A57_L2CTLR_EL1, x0
+	isb
+
+	/* --------------------------------
+	 * Enable the cycle count register
+	 * --------------------------------
+	 */
+1:	mrs	x0, pmcr_el0
+	ubfx	x0, x0, #11, #5		// read PMCR.N field
+	mov	x1, #1
+	lsl	x0, x1, x0
+	sub	x0, x0, #1		// mask of event counters
+	orr	x0, x0, #0x80000000	// disable overflow intrs
+	msr	pmintenclr_el1, x0
+	msr	pmuserenr_el0, x1	// enable user mode access
+
+	/* ----------------------------------------------------------------
+	 * Allow non-privileged access to CNTVCT: Set CNTKCTL (Kernel Count
+	 * register), bit 1 (EL0VCTEN) to enable access to CNTVCT/CNTFRQ
+	 * registers from EL0.
+	 * ----------------------------------------------------------------
+	 */
+	mrs	x0, cntkctl_el1
+	orr	x0, x0, #EL0VCTEN_BIT
+	msr	cntkctl_el1, x0
+.endm
+
+	/* -----------------------------------------------------
+	 * unsigned int plat_is_my_cpu_primary(void);
+	 *
+	 * This function checks if this is the Primary CPU
+	 * -----------------------------------------------------
+	 */
+func plat_is_my_cpu_primary
+	mrs	x0, mpidr_el1
+	and	x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
+	cmp	x0, #TEGRA_PRIMARY_CPU
+	cset	x0, eq
+	ret
+endfunc plat_is_my_cpu_primary
+
+	/* -----------------------------------------------------
+	 * unsigned int plat_my_core_pos(void);
+	 *
+	 * result: CorePos = CoreId + (ClusterId << 2)
+	 * -----------------------------------------------------
+	 */
+func plat_my_core_pos
+	mrs	x0, mpidr_el1
+	and	x1, x0, #MPIDR_CPU_MASK
+	and	x0, x0, #MPIDR_CLUSTER_MASK
+	add	x0, x1, x0, LSR #6
+	ret
+endfunc plat_my_core_pos
+
+	/* -----------------------------------------------------
+	 * unsigned long plat_get_my_entrypoint (void);
+	 *
+	 * Main job of this routine is to distinguish between
+	 * a cold and warm boot. If the tegra_sec_entry_point for
+	 * this CPU is present, then it's a warm boot.
+	 *
+	 * -----------------------------------------------------
+	 */
+func plat_get_my_entrypoint
+	adr	x1, tegra_sec_entry_point
+	ldr	x0, [x1]
+	ret
+endfunc plat_get_my_entrypoint
+
+	/* -----------------------------------------------------
+	 * int platform_get_core_pos(int mpidr);
+	 *
+	 * With this function: CorePos = (ClusterId * 4) +
+	 *                                CoreId
+	 * -----------------------------------------------------
+	 */
+func platform_get_core_pos
+	and	x1, x0, #MPIDR_CPU_MASK
+	and	x0, x0, #MPIDR_CLUSTER_MASK
+	add	x0, x1, x0, LSR #6
+	ret
+endfunc platform_get_core_pos
+
+	/* -----------------------------------------------------
+	 * void plat_secondary_cold_boot_setup (void);
+	 *
+	 * This function performs any platform specific actions
+	 * needed for a secondary cpu after a cold reset. Right
+	 * now this is a stub function.
+	 * -----------------------------------------------------
+	 */
+func plat_secondary_cold_boot_setup
+	mov	x0, #0
+	ret
+endfunc plat_secondary_cold_boot_setup
+
+	/* --------------------------------------------------------
+	 * void platform_mem_init (void);
+	 *
+	 * Any memory init, relocation to be done before the
+	 * platform boots. Called very early in the boot process.
+	 * --------------------------------------------------------
+	 */
+func platform_mem_init
+	mov	x0, #0
+	ret
+endfunc platform_mem_init
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_init(void)
+	 * Function to initialize the crash console
+	 * without a C Runtime to print crash report.
+	 * Clobber list : x0 - x4
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_init
+	mov	x0, #0
+	adr	x1, tegra_console_base
+	ldr	x1, [x1]
+	cbz	x1, 1f
+	mov	w0, #1
+1:	ret
+endfunc plat_crash_console_init
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_putc(void)
+	 * Function to print a character on the crash
+	 * console without a C Runtime.
+	 * Clobber list : x1, x2
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_putc
+	adr	x1, tegra_console_base
+	ldr	x1, [x1]
+	b	console_core_putc
+endfunc plat_crash_console_putc
+
+	/* ---------------------------------------------------
+	 * Function to handle a platform reset and store
+	 * input parameters passed by BL2.
+	 * ---------------------------------------------------
+	 */
+func plat_reset_handler
+
+	/* ----------------------------------------------------
+	 * Verify if we are running from BL31_BASE address
+	 * ----------------------------------------------------
+	 */
+	adr	x18, bl31_entrypoint
+	mov	x17, #BL31_BASE
+	cmp	x18, x17
+	b.eq	1f
+
+	/* ----------------------------------------------------
+	 * Copy the entire BL31 code to BL31_BASE if we are not
+	 * running from it already
+	 * ----------------------------------------------------
+	 */
+	mov	x0, x17
+	mov	x1, x18
+	mov	x2, #BL31_SIZE
+_loop16:
+	cmp	x2, #16
+	b.lo	_loop1
+	ldp	x3, x4, [x1], #16
+	stp	x3, x4, [x0], #16
+	sub	x2, x2, #16
+	b	_loop16
+	/* copy byte per byte */
+_loop1:
+	cbz	x2, _end
+	ldrb	w3, [x1], #1
+	strb	w3, [x0], #1
+	subs	x2, x2, #1
+	b.ne	_loop1
+
+	/* ----------------------------------------------------
+	 * Jump to BL31_BASE and start execution again
+	 * ----------------------------------------------------
+	 */
+_end:	mov	x0, x20
+	mov	x1, x21
+	br	x17
+1:
+
+	/* -----------------------------------
+	 * derive and save the phys_base addr
+	 * -----------------------------------
+	 */
+	adr	x17, tegra_bl31_phys_base
+	ldr	x18, [x17]
+	cbnz	x18, 1f
+	adr	x18, bl31_entrypoint
+	str	x18, [x17]
+
+1:	cpu_init_common
+
+	ret
+endfunc plat_reset_handler
+
+	/* ----------------------------------------
+	 * Secure entrypoint function for CPU boot
+	 * ----------------------------------------
+	 */
+func tegra_secure_entrypoint _align=6
+
+#if ERRATA_TEGRA_INVALIDATE_BTB_AT_BOOT
+
+	/* -------------------------------------------------------
+	 * Invalidate BTB along with I$ to remove any stale
+	 * entries from the branch predictor array.
+	 * -------------------------------------------------------
+	 */
+	mrs	x0, CORTEX_A57_CPUACTLR_EL1
+	orr	x0, x0, #1
+	msr	CORTEX_A57_CPUACTLR_EL1, x0	/* invalidate BTB and I$ together */
+	dsb	sy
+	isb
+	ic	iallu			/* actual invalidate */
+	dsb	sy
+	isb
+
+	mrs	x0, CORTEX_A57_CPUACTLR_EL1
+	bic	x0, x0, #1
+	msr	CORTEX_A57_CPUACTLR_EL1, X0	/* restore original CPUACTLR_EL1 */
+	dsb	sy
+	isb
+
+	.rept	7
+	nop				/* wait */
+	.endr
+
+	/* -----------------------------------------------
+	 * Extract OSLK bit and check if it is '1'. This
+	 * bit remains '0' for A53 on warm-resets. If '1',
+	 * turn off regional clock gating and request warm
+	 * reset.
+	 * -----------------------------------------------
+	 */
+	mrs	x0, oslsr_el1
+	and	x0, x0, #2
+	mrs	x1, mpidr_el1
+	bics	xzr, x0, x1, lsr #7	/* 0 = slow cluster or warm reset */
+	b.eq	restore_oslock
+	mov	x0, xzr
+	msr	oslar_el1, x0		/* os lock stays 0 across warm reset */
+	mov	x3, #3
+	movz	x4, #0x8000, lsl #48
+	msr	CORTEX_A57_CPUACTLR_EL1, x4	/* turn off RCG */
+	isb
+	msr	rmr_el3, x3		/* request warm reset */
+	isb
+	dsb	sy
+1:	wfi
+	b	1b
+
+	/* --------------------------------------------------
+	 * These nops are here so that speculative execution
+	 * won't harm us before we are done with warm reset.
+	 * --------------------------------------------------
+	 */
+	.rept	65
+	nop
+	.endr
+
+	/* --------------------------------------------------
+	 * Do not insert instructions here
+	 * --------------------------------------------------
+	 */
+#endif
+
+	/* --------------------------------------------------
+	 * Restore OS Lock bit
+	 * --------------------------------------------------
+	 */
+restore_oslock:
+	mov	x0, #1
+	msr	oslar_el1, x0
+
+	cpu_init_common
+
+	/* ---------------------------------------------------------------------
+	 * The initial state of the Architectural feature trap register
+	 * (CPTR_EL3) is unknown and it must be set to a known state. All
+	 * feature traps are disabled. Some bits in this register are marked as
+	 * Reserved and should not be modified.
+	 *
+	 * CPTR_EL3.TCPAC: This causes a direct access to the CPACR_EL1 from EL1
+	 *  or the CPTR_EL2 from EL2 to trap to EL3 unless it is trapped at EL2.
+	 * CPTR_EL3.TTA: This causes access to the Trace functionality to trap
+	 *  to EL3 when executed from EL0, EL1, EL2, or EL3. If system register
+	 *  access to trace functionality is not supported, this bit is RES0.
+	 * CPTR_EL3.TFP: This causes instructions that access the registers
+	 *  associated with Floating Point and Advanced SIMD execution to trap
+	 *  to EL3 when executed from any exception level, unless trapped to EL1
+	 *  or EL2.
+	 * ---------------------------------------------------------------------
+	 */
+	mrs	x1, cptr_el3
+	bic	w1, w1, #TCPAC_BIT
+	bic	w1, w1, #TTA_BIT
+	bic	w1, w1, #TFP_BIT
+	msr	cptr_el3, x1
+
+	/* --------------------------------------------------
+	 * Get secure world's entry point and jump to it
+	 * --------------------------------------------------
+	 */
+	bl	plat_get_my_entrypoint
+	br	x0
+endfunc tegra_secure_entrypoint
+
+	.data
+	.align 3
+
+	/* --------------------------------------------------
+	 * CPU Secure entry point - resume from suspend
+	 * --------------------------------------------------
+	 */
+tegra_sec_entry_point:
+	.quad	0
+
+	/* --------------------------------------------------
+	 * NS world's cold boot entry point
+	 * --------------------------------------------------
+	 */
+ns_image_entrypoint:
+	.quad	0
+
+	/* --------------------------------------------------
+	 * BL31's physical base address
+	 * --------------------------------------------------
+	 */
+tegra_bl31_phys_base:
+	.quad	0
+
+	/* --------------------------------------------------
+	 * UART controller base for console init
+	 * --------------------------------------------------
+	 */
+tegra_console_base:
+	.quad	0
+
+	/* --------------------------------------------------
+	 * Enable L2 ECC and Parity Protection
+	 * --------------------------------------------------
+	 */
+tegra_enable_l2_ecc_parity_prot:
+	.quad	0
diff --git a/plat/nvidia/tegra/common/drivers/flowctrl/flowctrl.c b/plat/nvidia/tegra/common/drivers/flowctrl/flowctrl.c
new file mode 100644
index 0000000..2d827da
--- /dev/null
+++ b/plat/nvidia/tegra/common/drivers/flowctrl/flowctrl.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cortex_a53.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <flowctrl.h>
+#include <mmio.h>
+#include <pmc.h>
+#include <tegra_def.h>
+
+#define CLK_RST_DEV_L_SET		0x300
+#define CLK_RST_DEV_L_CLR		0x304
+#define  CLK_BPMP_RST			(1 << 1)
+
+#define EVP_BPMP_RESET_VECTOR		0x200
+
+static const uint64_t flowctrl_offset_cpu_csr[4] = {
+	(TEGRA_FLOWCTRL_BASE + FLOWCTRL_CPU0_CSR),
+	(TEGRA_FLOWCTRL_BASE + FLOWCTRL_CPU1_CSR),
+	(TEGRA_FLOWCTRL_BASE + FLOWCTRL_CPU1_CSR + 8),
+	(TEGRA_FLOWCTRL_BASE + FLOWCTRL_CPU1_CSR + 16)
+};
+
+static const uint64_t flowctrl_offset_halt_cpu[4] = {
+	(TEGRA_FLOWCTRL_BASE + FLOWCTRL_HALT_CPU0_EVENTS),
+	(TEGRA_FLOWCTRL_BASE + FLOWCTRL_HALT_CPU1_EVENTS),
+	(TEGRA_FLOWCTRL_BASE + FLOWCTRL_HALT_CPU1_EVENTS + 8),
+	(TEGRA_FLOWCTRL_BASE + FLOWCTRL_HALT_CPU1_EVENTS + 16)
+};
+
+static const uint64_t flowctrl_offset_cc4_ctrl[4] = {
+	(TEGRA_FLOWCTRL_BASE + FLOWCTRL_CC4_CORE0_CTRL),
+	(TEGRA_FLOWCTRL_BASE + FLOWCTRL_CC4_CORE0_CTRL + 4),
+	(TEGRA_FLOWCTRL_BASE + FLOWCTRL_CC4_CORE0_CTRL + 8),
+	(TEGRA_FLOWCTRL_BASE + FLOWCTRL_CC4_CORE0_CTRL + 12)
+};
+
+static inline void tegra_fc_cc4_ctrl(int cpu_id, uint32_t val)
+{
+	mmio_write_32(flowctrl_offset_cc4_ctrl[cpu_id], val);
+	val = mmio_read_32(flowctrl_offset_cc4_ctrl[cpu_id]);
+}
+
+static inline void tegra_fc_cpu_csr(int cpu_id, uint32_t val)
+{
+	mmio_write_32(flowctrl_offset_cpu_csr[cpu_id], val);
+	val = mmio_read_32(flowctrl_offset_cpu_csr[cpu_id]);
+}
+
+static inline void tegra_fc_halt_cpu(int cpu_id, uint32_t val)
+{
+	mmio_write_32(flowctrl_offset_halt_cpu[cpu_id], val);
+	val = mmio_read_32(flowctrl_offset_halt_cpu[cpu_id]);
+}
+
+static void tegra_fc_prepare_suspend(int cpu_id, uint32_t csr)
+{
+	uint32_t val;
+
+	val = FLOWCTRL_HALT_GIC_IRQ | FLOWCTRL_HALT_GIC_FIQ |
+	      FLOWCTRL_HALT_LIC_IRQ | FLOWCTRL_HALT_LIC_FIQ |
+	      FLOWCTRL_WAITEVENT;
+	tegra_fc_halt_cpu(cpu_id, val);
+
+	val = FLOWCTRL_CSR_INTR_FLAG | FLOWCTRL_CSR_EVENT_FLAG |
+	      FLOWCTRL_CSR_ENABLE | (FLOWCTRL_WAIT_WFI_BITMAP << cpu_id);
+	tegra_fc_cpu_csr(cpu_id, val | csr);
+}
+
+/*******************************************************************************
+ * Powerdn the current CPU
+ ******************************************************************************/
+void tegra_fc_cpu_powerdn(uint32_t mpidr)
+{
+	int cpu = mpidr & MPIDR_CPU_MASK;
+
+	VERBOSE("CPU%d powering down...\n", cpu);
+	tegra_fc_prepare_suspend(cpu, 0);
+}
+
+/*******************************************************************************
+ * Suspend the current CPU cluster
+ ******************************************************************************/
+void tegra_fc_cluster_idle(uint32_t mpidr)
+{
+	int cpu = mpidr & MPIDR_CPU_MASK;
+	uint32_t val;
+
+	VERBOSE("Entering cluster idle state...\n");
+
+	tegra_fc_cc4_ctrl(cpu, 0);
+
+	/* hardware L2 flush is faster for A53 only */
+	tegra_fc_write_32(FLOWCTRL_L2_FLUSH_CONTROL,
+		!!MPIDR_AFFLVL1_VAL(mpidr));
+
+	/* suspend the CPU cluster */
+	val = FLOWCTRL_PG_CPU_NONCPU << FLOWCTRL_ENABLE_EXT;
+	tegra_fc_prepare_suspend(cpu, val);
+}
+
+/*******************************************************************************
+ * Power down the current CPU cluster
+ ******************************************************************************/
+void tegra_fc_cluster_powerdn(uint32_t mpidr)
+{
+	int cpu = mpidr & MPIDR_CPU_MASK;
+	uint32_t val;
+
+	VERBOSE("Entering cluster powerdn state...\n");
+
+	tegra_fc_cc4_ctrl(cpu, 0);
+
+	/* hardware L2 flush is faster for A53 only */
+	tegra_fc_write_32(FLOWCTRL_L2_FLUSH_CONTROL,
+		read_midr() == CORTEX_A53_MIDR);
+
+	/* power down the CPU cluster */
+	val = FLOWCTRL_TURNOFF_CPURAIL << FLOWCTRL_ENABLE_EXT;
+	tegra_fc_prepare_suspend(cpu, val);
+}
+
+/*******************************************************************************
+ * Suspend the entire SoC
+ ******************************************************************************/
+void tegra_fc_soc_powerdn(uint32_t mpidr)
+{
+	int cpu = mpidr & MPIDR_CPU_MASK;
+	uint32_t val;
+
+	VERBOSE("Entering SoC powerdn state...\n");
+
+	tegra_fc_cc4_ctrl(cpu, 0);
+
+	tegra_fc_write_32(FLOWCTRL_L2_FLUSH_CONTROL, 1);
+
+	val = FLOWCTRL_TURNOFF_CPURAIL << FLOWCTRL_ENABLE_EXT;
+	tegra_fc_prepare_suspend(cpu, val);
+
+	/* overwrite HALT register */
+	tegra_fc_halt_cpu(cpu, FLOWCTRL_WAITEVENT);
+}
+
+/*******************************************************************************
+ * Power up the CPU
+ ******************************************************************************/
+void tegra_fc_cpu_on(int cpu)
+{
+	tegra_fc_cpu_csr(cpu, FLOWCTRL_CSR_ENABLE);
+	tegra_fc_halt_cpu(cpu, FLOWCTRL_WAITEVENT | FLOWCTRL_HALT_SCLK);
+}
+
+/*******************************************************************************
+ * Power down the CPU
+ ******************************************************************************/
+void tegra_fc_cpu_off(int cpu)
+{
+	uint32_t val;
+
+	/*
+	 * Flow controller powers down the CPU during wfi. The CPU would be
+	 * powered on when it receives any interrupt.
+	 */
+	val = FLOWCTRL_CSR_INTR_FLAG | FLOWCTRL_CSR_EVENT_FLAG |
+		FLOWCTRL_CSR_ENABLE | (FLOWCTRL_WAIT_WFI_BITMAP << cpu);
+	tegra_fc_cpu_csr(cpu, val);
+	tegra_fc_halt_cpu(cpu, FLOWCTRL_WAITEVENT);
+	tegra_fc_cc4_ctrl(cpu, 0);
+}
+
+/*******************************************************************************
+ * Inform the BPMP that we have completed the cluster power up
+ ******************************************************************************/
+void tegra_fc_lock_active_cluster(void)
+{
+	uint32_t val;
+
+	val = tegra_fc_read_32(FLOWCTRL_BPMP_CLUSTER_CONTROL);
+	val |= FLOWCTRL_BPMP_CLUSTER_PWRON_LOCK;
+	tegra_fc_write_32(FLOWCTRL_BPMP_CLUSTER_CONTROL, val);
+	val = tegra_fc_read_32(FLOWCTRL_BPMP_CLUSTER_CONTROL);
+}
+
+/*******************************************************************************
+ * Reset BPMP processor
+ ******************************************************************************/
+void tegra_fc_reset_bpmp(void)
+{
+	uint32_t val;
+
+	/* halt BPMP */
+	tegra_fc_write_32(FLOWCTRL_HALT_BPMP_EVENTS, FLOWCTRL_WAITEVENT);
+
+	/* Assert BPMP reset */
+	mmio_write_32(TEGRA_CAR_RESET_BASE + CLK_RST_DEV_L_SET, CLK_BPMP_RST);
+
+	/* Restore reset address (stored in PMC_SCRATCH39) */
+	val = tegra_pmc_read_32(PMC_SCRATCH39);
+	mmio_write_32(TEGRA_EVP_BASE + EVP_BPMP_RESET_VECTOR, val);
+	while (val != mmio_read_32(TEGRA_EVP_BASE + EVP_BPMP_RESET_VECTOR))
+		; /* wait till value reaches EVP_BPMP_RESET_VECTOR */
+
+	/* Wait for 2us before de-asserting the reset signal. */
+	udelay(2);
+
+	/* De-assert BPMP reset */
+	mmio_write_32(TEGRA_CAR_RESET_BASE + CLK_RST_DEV_L_CLR, CLK_BPMP_RST);
+
+	/* Un-halt BPMP */
+	tegra_fc_write_32(FLOWCTRL_HALT_BPMP_EVENTS, 0);
+}
diff --git a/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v1.c b/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v1.c
new file mode 100644
index 0000000..9944e72
--- /dev/null
+++ b/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v1.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <memctrl.h>
+#include <memctrl_v1.h>
+#include <mmio.h>
+#include <string.h>
+#include <tegra_def.h>
+#include <utils.h>
+#include <xlat_tables_v2.h>
+
+/* Video Memory base and size (live values) */
+static uint64_t video_mem_base;
+static uint64_t video_mem_size;
+
+/*
+ * Init SMMU.
+ */
+void tegra_memctrl_setup(void)
+{
+	/*
+	 * Setup the Memory controller to allow only secure accesses to
+	 * the TZDRAM carveout
+	 */
+	INFO("Tegra Memory Controller (v1)\n");
+
+	/* allow translations for all MC engines */
+	tegra_mc_write_32(MC_SMMU_TRANSLATION_ENABLE_0_0,
+			(unsigned int)MC_SMMU_TRANSLATION_ENABLE);
+	tegra_mc_write_32(MC_SMMU_TRANSLATION_ENABLE_1_0,
+			(unsigned int)MC_SMMU_TRANSLATION_ENABLE);
+	tegra_mc_write_32(MC_SMMU_TRANSLATION_ENABLE_2_0,
+			(unsigned int)MC_SMMU_TRANSLATION_ENABLE);
+	tegra_mc_write_32(MC_SMMU_TRANSLATION_ENABLE_3_0,
+			(unsigned int)MC_SMMU_TRANSLATION_ENABLE);
+	tegra_mc_write_32(MC_SMMU_TRANSLATION_ENABLE_4_0,
+			(unsigned int)MC_SMMU_TRANSLATION_ENABLE);
+
+	tegra_mc_write_32(MC_SMMU_ASID_SECURITY_0, MC_SMMU_ASID_SECURITY);
+
+	tegra_mc_write_32(MC_SMMU_TLB_CONFIG_0, MC_SMMU_TLB_CONFIG_0_RESET_VAL);
+	tegra_mc_write_32(MC_SMMU_PTC_CONFIG_0, MC_SMMU_PTC_CONFIG_0_RESET_VAL);
+
+	/* flush PTC and TLB */
+	tegra_mc_write_32(MC_SMMU_PTC_FLUSH_0, MC_SMMU_PTC_FLUSH_ALL);
+	(void)tegra_mc_read_32(MC_SMMU_CONFIG_0); /* read to flush writes */
+	tegra_mc_write_32(MC_SMMU_TLB_FLUSH_0, MC_SMMU_TLB_FLUSH_ALL);
+
+	/* enable SMMU */
+	tegra_mc_write_32(MC_SMMU_CONFIG_0,
+			  MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE);
+	(void)tegra_mc_read_32(MC_SMMU_CONFIG_0); /* read to flush writes */
+
+	/* video memory carveout */
+	tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI,
+			  (uint32_t)(video_mem_base >> 32));
+	tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO, (uint32_t)video_mem_base);
+	tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, video_mem_size);
+}
+
+/*
+ * Restore Memory Controller settings after "System Suspend"
+ */
+void tegra_memctrl_restore_settings(void)
+{
+	tegra_memctrl_setup();
+}
+
+/*
+ * Secure the BL31 DRAM aperture.
+ *
+ * phys_base = physical base of TZDRAM aperture
+ * size_in_bytes = size of aperture in bytes
+ */
+void tegra_memctrl_tzdram_setup(uint64_t phys_base, uint32_t size_in_bytes)
+{
+	/*
+	 * Setup the Memory controller to allow only secure accesses to
+	 * the TZDRAM carveout
+	 */
+	INFO("Configuring TrustZone DRAM Memory Carveout\n");
+
+	tegra_mc_write_32(MC_SECURITY_CFG0_0, phys_base);
+	tegra_mc_write_32(MC_SECURITY_CFG1_0, size_in_bytes >> 20);
+}
+
+/*
+ * Secure the BL31 TZRAM aperture.
+ *
+ * phys_base = physical base of TZRAM aperture
+ * size_in_bytes = size of aperture in bytes
+ */
+void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes)
+{
+	/*
+	 * The v1 hardware controller does not have any registers
+	 * for setting up the on-chip TZRAM.
+	 */
+}
+
+static void tegra_clear_videomem(uintptr_t non_overlap_area_start,
+				 unsigned long long non_overlap_area_size)
+{
+	/*
+	 * Map the NS memory first, clean it and then unmap it.
+	 */
+	mmap_add_dynamic_region(non_overlap_area_start, /* PA */
+				non_overlap_area_start, /* VA */
+				non_overlap_area_size, /* size */
+				MT_NS | MT_RW | MT_EXECUTE_NEVER); /* attrs */
+
+	zeromem((void *)non_overlap_area_start, non_overlap_area_size);
+	flush_dcache_range(non_overlap_area_start, non_overlap_area_size);
+
+	mmap_remove_dynamic_region(non_overlap_area_start,
+		non_overlap_area_size);
+}
+
+/*
+ * Program the Video Memory carveout region
+ *
+ * phys_base = physical base of aperture
+ * size_in_bytes = size of aperture in bytes
+ */
+void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes)
+{
+	uintptr_t vmem_end_old = video_mem_base + (video_mem_size << 20);
+	uintptr_t vmem_end_new = phys_base + size_in_bytes;
+	unsigned long long non_overlap_area_size;
+
+	/*
+	 * Setup the Memory controller to restrict CPU accesses to the Video
+	 * Memory region
+	 */
+	INFO("Configuring Video Memory Carveout\n");
+
+	/*
+	 * Configure Memory Controller directly for the first time.
+	 */
+	if (video_mem_base == 0)
+		goto done;
+
+	/*
+	 * Clear the old regions now being exposed. The following cases
+	 * can occur -
+	 *
+	 * 1. clear whole old region (no overlap with new region)
+	 * 2. clear old sub-region below new base
+	 * 3. clear old sub-region above new end
+	 */
+	INFO("Cleaning previous Video Memory Carveout\n");
+
+	if (phys_base > vmem_end_old || video_mem_base > vmem_end_new) {
+		tegra_clear_videomem(video_mem_base, video_mem_size << 20);
+	} else {
+		if (video_mem_base < phys_base) {
+			non_overlap_area_size = phys_base - video_mem_base;
+			tegra_clear_videomem(video_mem_base, non_overlap_area_size);
+		}
+		if (vmem_end_old > vmem_end_new) {
+			non_overlap_area_size = vmem_end_old - vmem_end_new;
+			tegra_clear_videomem(vmem_end_new, non_overlap_area_size);
+		}
+	}
+
+done:
+	tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI, (uint32_t)(phys_base >> 32));
+	tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO, (uint32_t)phys_base);
+	tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, size_in_bytes >> 20);
+
+	/* store new values */
+	video_mem_base = phys_base;
+	video_mem_size = size_in_bytes >> 20;
+}
+
+/*
+ * During boot, USB3 and flash media (SDMMC/SATA) devices need access to
+ * IRAM. Because these clients connect to the MC and do not have a direct
+ * path to the IRAM, the MC implements AHB redirection during boot to allow
+ * path to IRAM. In this mode, accesses to a programmed memory address aperture
+ * are directed to the AHB bus, allowing access to the IRAM. The AHB aperture
+ * is defined by the IRAM_BASE_LO and IRAM_BASE_HI registers, which are
+ * initialized to disable this aperture.
+ *
+ * Once bootup is complete, we must program IRAM base to 0xffffffff and
+ * IRAM top to 0x00000000, thus disabling access to IRAM. DRAM is then
+ * potentially accessible in this address range. These aperture registers
+ * also have an access_control/lock bit. After disabling the aperture, the
+ * access_control register should be programmed to lock the registers.
+ */
+void tegra_memctrl_disable_ahb_redirection(void)
+{
+	/* program the aperture registers */
+	tegra_mc_write_32(MC_IRAM_BASE_LO, 0xFFFFFFFF);
+	tegra_mc_write_32(MC_IRAM_TOP_LO, 0);
+	tegra_mc_write_32(MC_IRAM_BASE_TOP_HI, 0);
+
+	/* lock the aperture registers */
+	tegra_mc_write_32(MC_IRAM_REG_CTRL, MC_DISABLE_IRAM_CFG_WRITES);
+}
diff --git a/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c b/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c
new file mode 100644
index 0000000..92fdadc
--- /dev/null
+++ b/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c
@@ -0,0 +1,677 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <debug.h>
+#include <mce.h>
+#include <memctrl.h>
+#include <memctrl_v2.h>
+#include <mmio.h>
+#include <smmu.h>
+#include <string.h>
+#include <tegra_def.h>
+#include <tegra_platform.h>
+#include <utils.h>
+#include <xlat_tables_v2.h>
+
+/* Video Memory base and size (live values) */
+static uint64_t video_mem_base;
+static uint64_t video_mem_size_mb;
+
+static void tegra_memctrl_reconfig_mss_clients(void)
+{
+#if ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS
+	uint32_t val, wdata_0, wdata_1;
+
+	/*
+	 * Assert Memory Controller's HOTRESET_FLUSH_ENABLE signal for
+	 * boot and strongly ordered MSS clients to flush existing memory
+	 * traffic and stall future requests.
+	 */
+	val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL0);
+	assert(val == MC_CLIENT_HOTRESET_CTRL0_RESET_VAL);
+
+	wdata_0 = MC_CLIENT_HOTRESET_CTRL0_HDA_FLUSH_ENB |
+#if ENABLE_AFI_DEVICE
+		  MC_CLIENT_HOTRESET_CTRL0_AFI_FLUSH_ENB |
+#endif
+		  MC_CLIENT_HOTRESET_CTRL0_SATA_FLUSH_ENB |
+		  MC_CLIENT_HOTRESET_CTRL0_XUSB_HOST_FLUSH_ENB |
+		  MC_CLIENT_HOTRESET_CTRL0_XUSB_DEV_FLUSH_ENB;
+	tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL0, wdata_0);
+
+	/* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
+	do {
+		val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0);
+	} while ((val & wdata_0) != wdata_0);
+
+	/* Wait one more time due to SW WAR for known legacy issue */
+	do {
+		val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0);
+	} while ((val & wdata_0) != wdata_0);
+
+	val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL1);
+	assert(val == MC_CLIENT_HOTRESET_CTRL1_RESET_VAL);
+
+	wdata_1 = MC_CLIENT_HOTRESET_CTRL1_SDMMC4A_FLUSH_ENB |
+		  MC_CLIENT_HOTRESET_CTRL1_APE_FLUSH_ENB |
+		  MC_CLIENT_HOTRESET_CTRL1_SE_FLUSH_ENB |
+		  MC_CLIENT_HOTRESET_CTRL1_ETR_FLUSH_ENB |
+		  MC_CLIENT_HOTRESET_CTRL1_AXIS_FLUSH_ENB |
+		  MC_CLIENT_HOTRESET_CTRL1_EQOS_FLUSH_ENB |
+		  MC_CLIENT_HOTRESET_CTRL1_UFSHC_FLUSH_ENB |
+		  MC_CLIENT_HOTRESET_CTRL1_BPMP_FLUSH_ENB |
+		  MC_CLIENT_HOTRESET_CTRL1_AON_FLUSH_ENB |
+		  MC_CLIENT_HOTRESET_CTRL1_SCE_FLUSH_ENB;
+	tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL1, wdata_1);
+
+	/* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
+	do {
+		val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1);
+	} while ((val & wdata_1) != wdata_1);
+
+	/* Wait one more time due to SW WAR for known legacy issue */
+	do {
+		val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1);
+	} while ((val & wdata_1) != wdata_1);
+
+	/*
+	 * Change MEMTYPE_OVERRIDE from SO_DEV -> PASSTHRU for boot and
+	 * strongly ordered MSS clients. ROC needs to be single point
+	 * of control on overriding the memory type. So, remove TSA's
+	 * memtype override.
+	 */
+#if ENABLE_AFI_DEVICE
+	mc_set_tsa_passthrough(AFIW);
+#endif
+	mc_set_tsa_passthrough(HDAW);
+	mc_set_tsa_passthrough(SATAW);
+	mc_set_tsa_passthrough(XUSB_HOSTW);
+	mc_set_tsa_passthrough(XUSB_DEVW);
+	mc_set_tsa_passthrough(SDMMCWAB);
+	mc_set_tsa_passthrough(APEDMAW);
+	mc_set_tsa_passthrough(SESWR);
+	mc_set_tsa_passthrough(ETRW);
+	mc_set_tsa_passthrough(AXISW);
+	mc_set_tsa_passthrough(EQOSW);
+	mc_set_tsa_passthrough(UFSHCW);
+	mc_set_tsa_passthrough(BPMPDMAW);
+	mc_set_tsa_passthrough(AONDMAW);
+	mc_set_tsa_passthrough(SCEDMAW);
+
+	/*
+	 * Change COH_PATH_OVERRIDE_SO_DEV from NO_OVERRIDE -> FORCE_COHERENT
+	 * for boot and strongly ordered MSS clients. This steers all sodev
+	 * transactions to ROC.
+	 *
+	 * Change AXID_OVERRIDE/AXID_OVERRIDE_SO_DEV only for some clients
+	 * whose AXI IDs we know and trust.
+	 */
+
+#if ENABLE_AFI_DEVICE
+	/* Match AFIW */
+	mc_set_forced_coherent_so_dev_cfg(AFIR);
+#endif
+
+	/*
+	 * See bug 200131110 comment #35 - there are no normal requests
+	 * and AWID for SO/DEV requests is hardcoded in RTL for a
+	 * particular PCIE controller
+	 */
+#if ENABLE_AFI_DEVICE
+	mc_set_forced_coherent_so_dev_cfg(AFIW);
+#endif
+	mc_set_forced_coherent_cfg(HDAR);
+	mc_set_forced_coherent_cfg(HDAW);
+	mc_set_forced_coherent_cfg(SATAR);
+	mc_set_forced_coherent_cfg(SATAW);
+	mc_set_forced_coherent_cfg(XUSB_HOSTR);
+	mc_set_forced_coherent_cfg(XUSB_HOSTW);
+	mc_set_forced_coherent_cfg(XUSB_DEVR);
+	mc_set_forced_coherent_cfg(XUSB_DEVW);
+	mc_set_forced_coherent_cfg(SDMMCRAB);
+	mc_set_forced_coherent_cfg(SDMMCWAB);
+
+	/* Match APEDMAW */
+	mc_set_forced_coherent_axid_so_dev_cfg(APEDMAR);
+
+	/*
+	 * See bug 200131110 comment #35 - AWID for normal requests
+	 * is 0x80 and AWID for SO/DEV requests is 0x01
+	 */
+	mc_set_forced_coherent_axid_so_dev_cfg(APEDMAW);
+	mc_set_forced_coherent_cfg(SESRD);
+	mc_set_forced_coherent_cfg(SESWR);
+	mc_set_forced_coherent_cfg(ETRR);
+	mc_set_forced_coherent_cfg(ETRW);
+	mc_set_forced_coherent_cfg(AXISR);
+	mc_set_forced_coherent_cfg(AXISW);
+	mc_set_forced_coherent_cfg(EQOSR);
+	mc_set_forced_coherent_cfg(EQOSW);
+	mc_set_forced_coherent_cfg(UFSHCR);
+	mc_set_forced_coherent_cfg(UFSHCW);
+	mc_set_forced_coherent_cfg(BPMPDMAR);
+	mc_set_forced_coherent_cfg(BPMPDMAW);
+	mc_set_forced_coherent_cfg(AONDMAR);
+	mc_set_forced_coherent_cfg(AONDMAW);
+	mc_set_forced_coherent_cfg(SCEDMAR);
+	mc_set_forced_coherent_cfg(SCEDMAW);
+
+	/*
+	 * At this point, ordering can occur at ROC. So, remove PCFIFO's
+	 * control over ordering requests.
+	 *
+	 * Change PCFIFO_*_ORDERED_CLIENT from ORDERED -> UNORDERED for
+	 * boot and strongly ordered MSS clients
+	 */
+	val = MC_PCFIFO_CLIENT_CONFIG1_RESET_VAL &
+#if ENABLE_AFI_DEVICE
+		mc_set_pcfifo_unordered_boot_so_mss(1, AFIW) &
+#endif
+		mc_set_pcfifo_unordered_boot_so_mss(1, HDAW) &
+		mc_set_pcfifo_unordered_boot_so_mss(1, SATAW);
+	tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG1, val);
+
+	val = MC_PCFIFO_CLIENT_CONFIG2_RESET_VAL &
+		mc_set_pcfifo_unordered_boot_so_mss(2, XUSB_HOSTW) &
+		mc_set_pcfifo_unordered_boot_so_mss(2, XUSB_DEVW);
+	tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG2, val);
+
+	val = MC_PCFIFO_CLIENT_CONFIG3_RESET_VAL &
+		mc_set_pcfifo_unordered_boot_so_mss(3, SDMMCWAB);
+	tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG3, val);
+
+	val = MC_PCFIFO_CLIENT_CONFIG4_RESET_VAL &
+		mc_set_pcfifo_unordered_boot_so_mss(4, SESWR) &
+		mc_set_pcfifo_unordered_boot_so_mss(4, ETRW) &
+		mc_set_pcfifo_unordered_boot_so_mss(4, AXISW) &
+		mc_set_pcfifo_unordered_boot_so_mss(4, EQOSW) &
+		mc_set_pcfifo_unordered_boot_so_mss(4, UFSHCW) &
+		mc_set_pcfifo_unordered_boot_so_mss(4, BPMPDMAW) &
+		mc_set_pcfifo_unordered_boot_so_mss(4, AONDMAW) &
+		mc_set_pcfifo_unordered_boot_so_mss(4, SCEDMAW);
+	tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG4, val);
+
+	val = MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL &
+		mc_set_pcfifo_unordered_boot_so_mss(5, APEDMAW);
+	tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG5, val);
+
+	/*
+	 * At this point, ordering can occur at ROC. SMMU need not
+	 * reorder any requests.
+	 *
+	 * Change SMMU_*_ORDERED_CLIENT from ORDERED -> UNORDERED
+	 * for boot and strongly ordered MSS clients
+	 */
+	val = MC_SMMU_CLIENT_CONFIG1_RESET_VAL &
+#if ENABLE_AFI_DEVICE
+		mc_set_smmu_unordered_boot_so_mss(1, AFIW) &
+#endif
+		mc_set_smmu_unordered_boot_so_mss(1, HDAW) &
+		mc_set_smmu_unordered_boot_so_mss(1, SATAW);
+	tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG1, val);
+
+	val = MC_SMMU_CLIENT_CONFIG2_RESET_VAL &
+		mc_set_smmu_unordered_boot_so_mss(2, XUSB_HOSTW) &
+		mc_set_smmu_unordered_boot_so_mss(2, XUSB_DEVW);
+	tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG2, val);
+
+	val = MC_SMMU_CLIENT_CONFIG3_RESET_VAL &
+		mc_set_smmu_unordered_boot_so_mss(3, SDMMCWAB);
+	tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG3, val);
+
+	val = MC_SMMU_CLIENT_CONFIG4_RESET_VAL &
+		mc_set_smmu_unordered_boot_so_mss(4, SESWR) &
+		mc_set_smmu_unordered_boot_so_mss(4, ETRW) &
+		mc_set_smmu_unordered_boot_so_mss(4, AXISW) &
+		mc_set_smmu_unordered_boot_so_mss(4, EQOSW) &
+		mc_set_smmu_unordered_boot_so_mss(4, UFSHCW) &
+		mc_set_smmu_unordered_boot_so_mss(4, BPMPDMAW) &
+		mc_set_smmu_unordered_boot_so_mss(4, AONDMAW) &
+		mc_set_smmu_unordered_boot_so_mss(4, SCEDMAW);
+	tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG4, val);
+
+	val = MC_SMMU_CLIENT_CONFIG5_RESET_VAL &
+		mc_set_smmu_unordered_boot_so_mss(5, APEDMAW);
+	tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG5, val);
+
+	/*
+	 * Deassert HOTRESET FLUSH_ENABLE for boot and strongly ordered MSS
+	 * clients to allow memory traffic from all clients to start passing
+	 * through ROC
+	 */
+	val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL0);
+	assert(val == wdata_0);
+
+	wdata_0 = MC_CLIENT_HOTRESET_CTRL0_RESET_VAL;
+	tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL0, wdata_0);
+
+	val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL1);
+	assert(val == wdata_1);
+
+	wdata_1 = MC_CLIENT_HOTRESET_CTRL1_RESET_VAL;
+	tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL1, wdata_1);
+
+#endif
+}
+
+static void tegra_memctrl_set_overrides(void)
+{
+	tegra_mc_settings_t *plat_mc_settings = tegra_get_mc_settings();
+	const mc_txn_override_cfg_t *mc_txn_override_cfgs;
+	uint32_t num_txn_override_cfgs;
+	uint32_t i, val;
+
+	/* Get the settings from the platform */
+	assert(plat_mc_settings);
+	mc_txn_override_cfgs = plat_mc_settings->txn_override_cfg;
+	num_txn_override_cfgs = plat_mc_settings->num_txn_override_cfgs;
+
+	/*
+	 * Set the MC_TXN_OVERRIDE registers for write clients.
+	 */
+	if ((tegra_chipid_is_t186()) &&
+	    (!tegra_platform_is_silicon() ||
+	    (tegra_platform_is_silicon() && (tegra_get_chipid_minor() == 1)))) {
+
+		/*
+		 * GPU and NVENC settings for Tegra186 simulation and
+		 * Silicon rev. A01
+		 */
+		val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR);
+		val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
+		tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR,
+			val | MC_TXN_OVERRIDE_CGID_TAG_ZERO);
+
+		val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR2);
+		val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
+		tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR2,
+			val | MC_TXN_OVERRIDE_CGID_TAG_ZERO);
+
+		val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_NVENCSWR);
+		val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
+		tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_NVENCSWR,
+			val | MC_TXN_OVERRIDE_CGID_TAG_CLIENT_AXI_ID);
+
+	} else {
+
+		/*
+		 * Settings for Tegra186 silicon rev. A02 and onwards.
+		 */
+		for (i = 0; i < num_txn_override_cfgs; i++) {
+			val = tegra_mc_read_32(mc_txn_override_cfgs[i].offset);
+			val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
+			tegra_mc_write_32(mc_txn_override_cfgs[i].offset,
+				val | mc_txn_override_cfgs[i].cgid_tag);
+		}
+	}
+}
+
+/*
+ * Init Memory controller during boot.
+ */
+void tegra_memctrl_setup(void)
+{
+	uint32_t val;
+	const uint32_t *mc_streamid_override_regs;
+	uint32_t num_streamid_override_regs;
+	const mc_streamid_security_cfg_t *mc_streamid_sec_cfgs;
+	uint32_t num_streamid_sec_cfgs;
+	tegra_mc_settings_t *plat_mc_settings = tegra_get_mc_settings();
+	uint32_t i;
+
+	INFO("Tegra Memory Controller (v2)\n");
+
+#if ENABLE_SMMU_DEVICE
+	/* Program the SMMU pagesize */
+	tegra_smmu_init();
+#endif
+	/* Get the settings from the platform */
+	assert(plat_mc_settings);
+	mc_streamid_override_regs = plat_mc_settings->streamid_override_cfg;
+	num_streamid_override_regs = plat_mc_settings->num_streamid_override_cfgs;
+	mc_streamid_sec_cfgs = plat_mc_settings->streamid_security_cfg;
+	num_streamid_sec_cfgs = plat_mc_settings->num_streamid_security_cfgs;
+
+	/* Program all the Stream ID overrides */
+	for (i = 0; i < num_streamid_override_regs; i++)
+		tegra_mc_streamid_write_32(mc_streamid_override_regs[i],
+			MC_STREAM_ID_MAX);
+
+	/* Program the security config settings for all Stream IDs */
+	for (i = 0; i < num_streamid_sec_cfgs; i++) {
+		val = mc_streamid_sec_cfgs[i].override_enable << 16 |
+		      mc_streamid_sec_cfgs[i].override_client_inputs << 8 |
+		      mc_streamid_sec_cfgs[i].override_client_ns_flag << 0;
+		tegra_mc_streamid_write_32(mc_streamid_sec_cfgs[i].offset, val);
+	}
+
+	/*
+	 * All requests at boot time, and certain requests during
+	 * normal run time, are physically addressed and must bypass
+	 * the SMMU. The client hub logic implements a hardware bypass
+	 * path around the Translation Buffer Units (TBU). During
+	 * boot-time, the SMMU_BYPASS_CTRL register (which defaults to
+	 * TBU_BYPASS mode) will be used to steer all requests around
+	 * the uninitialized TBUs. During normal operation, this register
+	 * is locked into TBU_BYPASS_SID config, which routes requests
+	 * with special StreamID 0x7f on the bypass path and all others
+	 * through the selected TBU. This is done to disable SMMU Bypass
+	 * mode, as it could be used to circumvent SMMU security checks.
+	 */
+	tegra_mc_write_32(MC_SMMU_BYPASS_CONFIG,
+			  MC_SMMU_BYPASS_CONFIG_SETTINGS);
+
+	/*
+	 * Re-configure MSS to allow ROC to deal with ordering of the
+	 * Memory Controller traffic. This is needed as the Memory Controller
+	 * boots with MSS having all control, but ROC provides a performance
+	 * boost as compared to MSS.
+	 */
+	tegra_memctrl_reconfig_mss_clients();
+
+	/* Program overrides for MC transactions */
+	tegra_memctrl_set_overrides();
+}
+
+/*
+ * Restore Memory Controller settings after "System Suspend"
+ */
+void tegra_memctrl_restore_settings(void)
+{
+	/*
+	 * Re-configure MSS to allow ROC to deal with ordering of the
+	 * Memory Controller traffic. This is needed as the Memory Controller
+	 * resets during System Suspend with MSS having all control, but ROC
+	 * provides a performance boost as compared to MSS.
+	 */
+	tegra_memctrl_reconfig_mss_clients();
+
+	/* Program overrides for MC transactions */
+	tegra_memctrl_set_overrides();
+
+	/* video memory carveout region */
+	if (video_mem_base) {
+		tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO,
+				  (uint32_t)video_mem_base);
+		tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI,
+				  (uint32_t)(video_mem_base >> 32));
+		tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, video_mem_size_mb);
+
+		/*
+		 * MCE propagates the VideoMem configuration values across the
+		 * CCPLEX.
+		 */
+		mce_update_gsc_videomem();
+	}
+}
+
+/*
+ * Secure the BL31 DRAM aperture.
+ *
+ * phys_base = physical base of TZDRAM aperture
+ * size_in_bytes = size of aperture in bytes
+ */
+void tegra_memctrl_tzdram_setup(uint64_t phys_base, uint32_t size_in_bytes)
+{
+	/*
+	 * Setup the Memory controller to allow only secure accesses to
+	 * the TZDRAM carveout
+	 */
+	INFO("Configuring TrustZone DRAM Memory Carveout\n");
+
+	tegra_mc_write_32(MC_SECURITY_CFG0_0, (uint32_t)phys_base);
+	tegra_mc_write_32(MC_SECURITY_CFG3_0, (uint32_t)(phys_base >> 32));
+	tegra_mc_write_32(MC_SECURITY_CFG1_0, size_in_bytes >> 20);
+
+	/*
+	 * When TZ encryption enabled,
+	 * We need setup TZDRAM before CPU to access TZ Carveout,
+	 * otherwise CPU will fetch non-decrypted data.
+	 * So save TZDRAM setting for retore by SC7 resume FW.
+	 */
+
+	mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV55_LO,
+					tegra_mc_read_32(MC_SECURITY_CFG0_0));
+	mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV55_HI,
+					tegra_mc_read_32(MC_SECURITY_CFG3_0));
+	mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV54_HI,
+					tegra_mc_read_32(MC_SECURITY_CFG1_0));
+
+	/*
+	 * MCE propagates the security configuration values across the
+	 * CCPLEX.
+	 */
+	mce_update_gsc_tzdram();
+}
+
+/*
+ * Secure the BL31 TZRAM aperture.
+ *
+ * phys_base = physical base of TZRAM aperture
+ * size_in_bytes = size of aperture in bytes
+ */
+void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes)
+{
+	uint32_t index;
+	uint32_t total_128kb_blocks = size_in_bytes >> 17;
+	uint32_t residual_4kb_blocks = (size_in_bytes & (uint32_t)0x1FFFF) >> 12;
+	uint32_t val;
+
+	INFO("Configuring TrustZone SRAM Memory Carveout\n");
+
+	/*
+	 * Reset the access configuration registers to restrict access
+	 * to the TZRAM aperture
+	 */
+	for (index = MC_TZRAM_CLIENT_ACCESS_CFG0;
+	     index < ((uint32_t)MC_TZRAM_CARVEOUT_CFG + (uint32_t)MC_GSC_CONFIG_REGS_SIZE);
+	     index += 4U) {
+		tegra_mc_write_32(index, 0);
+	}
+
+	/*
+	 * Set the TZRAM base. TZRAM base must be 4k aligned, at least.
+	 */
+	assert((phys_base & (uint64_t)0xFFF) == 0U);
+	tegra_mc_write_32(MC_TZRAM_BASE_LO, (uint32_t)phys_base);
+	tegra_mc_write_32(MC_TZRAM_BASE_HI,
+		(uint32_t)(phys_base >> 32) & MC_GSC_BASE_HI_MASK);
+
+	/*
+	 * Set the TZRAM size
+	 *
+	 * total size = (number of 128KB blocks) + (number of remaining 4KB
+	 * blocks)
+	 *
+	 */
+	val = (residual_4kb_blocks << MC_GSC_SIZE_RANGE_4KB_SHIFT) |
+	      total_128kb_blocks;
+	tegra_mc_write_32(MC_TZRAM_SIZE, val);
+
+	/*
+	 * Lock the configuration settings by disabling TZ-only lock
+	 * and locking the configuration against any future changes
+	 * at all.
+	 */
+	val = tegra_mc_read_32(MC_TZRAM_CARVEOUT_CFG);
+	val &= ~MC_GSC_ENABLE_TZ_LOCK_BIT;
+	val |= MC_GSC_LOCK_CFG_SETTINGS_BIT;
+	tegra_mc_write_32(MC_TZRAM_CARVEOUT_CFG, val);
+
+	/*
+	 * MCE propagates the security configuration values across the
+	 * CCPLEX.
+	 */
+	mce_update_gsc_tzram();
+}
+
+static void tegra_lock_videomem_nonoverlap(uint64_t phys_base,
+					   uint64_t size_in_bytes)
+{
+	uint32_t index;
+	uint64_t total_128kb_blocks = size_in_bytes >> 17;
+	uint64_t residual_4kb_blocks = (size_in_bytes & (uint32_t)0x1FFFF) >> 12;
+	uint64_t val;
+
+	/*
+	 * Reset the access configuration registers to restrict access to
+	 * old Videomem aperture
+	 */
+	for (index = MC_VIDEO_PROTECT_CLEAR_ACCESS_CFG0;
+	     index < ((uint32_t)MC_VIDEO_PROTECT_CLEAR_ACCESS_CFG0 + (uint32_t)MC_GSC_CONFIG_REGS_SIZE);
+	     index += 4U) {
+		tegra_mc_write_32(index, 0);
+	}
+
+	/*
+	 * Set the base. It must be 4k aligned, at least.
+	 */
+	assert((phys_base & (uint64_t)0xFFF) == 0U);
+	tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_BASE_LO, (uint32_t)phys_base);
+	tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_BASE_HI,
+		(uint32_t)(phys_base >> 32) & (uint32_t)MC_GSC_BASE_HI_MASK);
+
+	/*
+	 * Set the aperture size
+	 *
+	 * total size = (number of 128KB blocks) + (number of remaining 4KB
+	 * blocks)
+	 *
+	 */
+	val = (uint32_t)((residual_4kb_blocks << MC_GSC_SIZE_RANGE_4KB_SHIFT) |
+			 total_128kb_blocks);
+	tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_SIZE, (uint32_t)val);
+
+	/*
+	 * Lock the configuration settings by enabling TZ-only lock and
+	 * locking the configuration against any future changes from NS
+	 * world.
+	 */
+	tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_CFG,
+			  (uint32_t)MC_GSC_ENABLE_TZ_LOCK_BIT);
+
+	/*
+	 * MCE propagates the GSC configuration values across the
+	 * CCPLEX.
+	 */
+}
+
+static void tegra_unlock_videomem_nonoverlap(void)
+{
+	/* Clear the base */
+	tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_BASE_LO, 0);
+	tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_BASE_HI, 0);
+
+	/* Clear the size */
+	tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_SIZE, 0);
+}
+
+static void tegra_clear_videomem(uintptr_t non_overlap_area_start,
+				 unsigned long long non_overlap_area_size)
+{
+	/*
+	 * Map the NS memory first, clean it and then unmap it.
+	 */
+	mmap_add_dynamic_region(non_overlap_area_start, /* PA */
+				non_overlap_area_start, /* VA */
+				non_overlap_area_size, /* size */
+				MT_NS | MT_RW | MT_EXECUTE_NEVER); /* attrs */
+
+	zero_normalmem((void *)non_overlap_area_start, non_overlap_area_size);
+	flush_dcache_range(non_overlap_area_start, non_overlap_area_size);
+
+	mmap_remove_dynamic_region(non_overlap_area_start,
+		non_overlap_area_size);
+}
+
+/*
+ * Program the Video Memory carveout region
+ *
+ * phys_base = physical base of aperture
+ * size_in_bytes = size of aperture in bytes
+ */
+void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes)
+{
+	uintptr_t vmem_end_old = video_mem_base + (video_mem_size_mb << 20);
+	uintptr_t vmem_end_new = phys_base + size_in_bytes;
+	unsigned long long non_overlap_area_size;
+
+	/*
+	 * Setup the Memory controller to restrict CPU accesses to the Video
+	 * Memory region
+	 */
+	INFO("Configuring Video Memory Carveout\n");
+
+	/*
+	 * Configure Memory Controller directly for the first time.
+	 */
+	if (video_mem_base == 0U)
+		goto done;
+
+	/*
+	 * Lock the non overlapping memory being cleared so that other masters
+	 * do not accidently write to it. The memory would be unlocked once
+	 * the non overlapping region is cleared and the new memory
+	 * settings take effect.
+	 */
+	tegra_lock_videomem_nonoverlap(video_mem_base,
+				       video_mem_size_mb << 20);
+
+	/*
+	 * Clear the old regions now being exposed. The following cases
+	 * can occur -
+	 *
+	 * 1. clear whole old region (no overlap with new region)
+	 * 2. clear old sub-region below new base
+	 * 3. clear old sub-region above new end
+	 */
+	INFO("Cleaning previous Video Memory Carveout\n");
+
+	if (phys_base > vmem_end_old || video_mem_base > vmem_end_new) {
+		tegra_clear_videomem(video_mem_base,
+				     (uint64_t)video_mem_size_mb << 20);
+	} else {
+		if (video_mem_base < phys_base) {
+			non_overlap_area_size = phys_base - video_mem_base;
+			tegra_clear_videomem(video_mem_base, non_overlap_area_size);
+		}
+		if (vmem_end_old > vmem_end_new) {
+			non_overlap_area_size = vmem_end_old - vmem_end_new;
+			tegra_clear_videomem(vmem_end_new, non_overlap_area_size);
+		}
+	}
+
+done:
+	/* program the Videomem aperture */
+	tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO, (uint32_t)phys_base);
+	tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI,
+			  (uint32_t)(phys_base >> 32));
+	tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, size_in_bytes >> 20);
+
+	/* unlock the previous locked nonoverlapping aperture */
+	tegra_unlock_videomem_nonoverlap();
+
+	/* store new values */
+	video_mem_base = phys_base;
+	video_mem_size_mb = size_in_bytes >> 20;
+
+	/*
+	 * MCE propagates the VideoMem configuration values across the
+	 * CCPLEX.
+	 */
+	mce_update_gsc_videomem();
+}
+
+/*
+ * This feature exists only for v1 of the Tegra Memory Controller.
+ */
+void tegra_memctrl_disable_ahb_redirection(void)
+{
+	; /* do nothing */
+}
diff --git a/plat/nvidia/tegra/common/drivers/pmc/pmc.c b/plat/nvidia/tegra/common/drivers/pmc/pmc.c
new file mode 100644
index 0000000..d8827e1
--- /dev/null
+++ b/plat/nvidia/tegra/common/drivers/pmc/pmc.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <mmio.h>
+#include <pmc.h>
+#include <tegra_def.h>
+
+#define RESET_ENABLE	0x10U
+
+/* Module IDs used during power ungate procedure */
+static const uint32_t pmc_cpu_powergate_id[4] = {
+	0, /* CPU 0 */
+	9, /* CPU 1 */
+	10, /* CPU 2 */
+	11 /* CPU 3 */
+};
+
+/*******************************************************************************
+ * Power ungate CPU to start the boot process. CPU reset vectors must be
+ * populated before calling this function.
+ ******************************************************************************/
+void tegra_pmc_cpu_on(int32_t cpu)
+{
+	uint32_t val;
+
+	/*
+	 * Check if CPU is already power ungated
+	 */
+	val = tegra_pmc_read_32(PMC_PWRGATE_STATUS);
+	if ((val & (1U << pmc_cpu_powergate_id[cpu])) == 0U) {
+		/*
+		 * The PMC deasserts the START bit when it starts the power
+		 * ungate process. Loop till no power toggle is in progress.
+		 */
+		do {
+			val = tegra_pmc_read_32(PMC_PWRGATE_TOGGLE);
+		} while ((val & PMC_TOGGLE_START) != 0U);
+
+		/*
+		 * Start the power ungate procedure
+		 */
+		val = pmc_cpu_powergate_id[cpu] | PMC_TOGGLE_START;
+		tegra_pmc_write_32(PMC_PWRGATE_TOGGLE, val);
+
+		/*
+		 * The PMC deasserts the START bit when it starts the power
+		 * ungate process. Loop till powergate START bit is asserted.
+		 */
+		do {
+			val = tegra_pmc_read_32(PMC_PWRGATE_TOGGLE);
+		} while ((val & (1U << 8)) != 0U);
+
+		/* loop till the CPU is power ungated */
+		do {
+			val = tegra_pmc_read_32(PMC_PWRGATE_STATUS);
+		} while ((val & (1U << pmc_cpu_powergate_id[cpu])) == 0U);
+	}
+}
+
+/*******************************************************************************
+ * Setup CPU vectors for resume from deep sleep
+ ******************************************************************************/
+void tegra_pmc_cpu_setup(uint64_t reset_addr)
+{
+	uint32_t val;
+
+	tegra_pmc_write_32(PMC_SECURE_SCRATCH34,
+			   ((uint32_t)reset_addr & 0xFFFFFFFFU) | 1U);
+	val = (uint32_t)(reset_addr >> 32U);
+	tegra_pmc_write_32(PMC_SECURE_SCRATCH35, val & 0x7FFU);
+}
+
+/*******************************************************************************
+ * Lock CPU vectors to restrict further writes
+ ******************************************************************************/
+void tegra_pmc_lock_cpu_vectors(void)
+{
+	uint32_t val;
+
+	/* lock PMC_SECURE_SCRATCH22 */
+	val = tegra_pmc_read_32(PMC_SECURE_DISABLE2);
+	val |= PMC_SECURE_DISABLE2_WRITE22_ON;
+	tegra_pmc_write_32(PMC_SECURE_DISABLE2, val);
+
+	/* lock PMC_SECURE_SCRATCH34/35 */
+	val = tegra_pmc_read_32(PMC_SECURE_DISABLE3);
+	val |= (PMC_SECURE_DISABLE3_WRITE34_ON |
+		PMC_SECURE_DISABLE3_WRITE35_ON);
+	tegra_pmc_write_32(PMC_SECURE_DISABLE3, val);
+}
+
+/*******************************************************************************
+ * Restart the system
+ ******************************************************************************/
+__dead2 void tegra_pmc_system_reset(void)
+{
+	uint32_t reg;
+
+	reg = tegra_pmc_read_32(PMC_CONFIG);
+	reg |= RESET_ENABLE;		/* restart */
+	tegra_pmc_write_32(PMC_CONFIG, reg);
+	wfi();
+
+	ERROR("Tegra System Reset: operation not handled.\n");
+	panic();
+}
diff --git a/plat/nvidia/tegra/common/drivers/smmu/smmu.c b/plat/nvidia/tegra/common/drivers/smmu/smmu.c
new file mode 100644
index 0000000..fa3f000
--- /dev/null
+++ b/plat/nvidia/tegra/common/drivers/smmu/smmu.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <bl_common.h>
+#include <debug.h>
+#include <platform_def.h>
+#include <smmu.h>
+#include <string.h>
+#include <tegra_private.h>
+
+/* SMMU IDs currently supported by the driver */
+enum {
+	TEGRA_SMMU0,
+	TEGRA_SMMU1,
+	TEGRA_SMMU2
+};
+
+static uint32_t tegra_smmu_read_32(uint32_t smmu_id, uint32_t off)
+{
+#if defined(TEGRA_SMMU0_BASE)
+	if (smmu_id == TEGRA_SMMU0)
+		return mmio_read_32(TEGRA_SMMU0_BASE + off);
+#endif
+
+#if defined(TEGRA_SMMU1_BASE)
+	if (smmu_id == TEGRA_SMMU1)
+		return mmio_read_32(TEGRA_SMMU1_BASE + off);
+#endif
+
+#if defined(TEGRA_SMMU2_BASE)
+	if (smmu_id == TEGRA_SMMU2)
+		return mmio_read_32(TEGRA_SMMU2_BASE + off);
+#endif
+
+	return 0;
+}
+
+static void tegra_smmu_write_32(uint32_t smmu_id,
+			uint32_t off, uint32_t val)
+{
+#if defined(TEGRA_SMMU0_BASE)
+	if (smmu_id == TEGRA_SMMU0)
+		mmio_write_32(TEGRA_SMMU0_BASE + off, val);
+#endif
+
+#if defined(TEGRA_SMMU1_BASE)
+	if (smmu_id == TEGRA_SMMU1)
+		mmio_write_32(TEGRA_SMMU1_BASE + off, val);
+#endif
+
+#if defined(TEGRA_SMMU2_BASE)
+	if (smmu_id == TEGRA_SMMU2)
+		mmio_write_32(TEGRA_SMMU2_BASE + off, val);
+#endif
+}
+
+/*
+ * Save SMMU settings before "System Suspend" to TZDRAM
+ */
+void tegra_smmu_save_context(uint64_t smmu_ctx_addr)
+{
+	uint32_t i, num_entries = 0;
+	smmu_regs_t *smmu_ctx_regs;
+	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+	uint64_t tzdram_base = params_from_bl2->tzdram_base;
+	uint64_t tzdram_end = tzdram_base + params_from_bl2->tzdram_size;
+	uint32_t reg_id1, pgshift, cb_size;
+
+	/* sanity check SMMU settings c*/
+	reg_id1 = mmio_read_32((TEGRA_SMMU0_BASE + SMMU_GNSR0_IDR1));
+	pgshift = (reg_id1 & ID1_PAGESIZE) ? 16 : 12;
+	cb_size = (2 << pgshift) * \
+	(1 << (((reg_id1 >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1));
+
+	assert(!((pgshift != PGSHIFT) || (cb_size != CB_SIZE)));
+	assert((smmu_ctx_addr >= tzdram_base) && (smmu_ctx_addr <= tzdram_end));
+
+	/* get SMMU context table */
+	smmu_ctx_regs = plat_get_smmu_ctx();
+	assert(smmu_ctx_regs);
+
+	/*
+	 * smmu_ctx_regs[0].val contains the size of the context table minus
+	 * the last entry. Sanity check the table size before we start with
+	 * the context save operation.
+	 */
+	while (smmu_ctx_regs[num_entries].val != 0xFFFFFFFFU) {
+		num_entries++;
+	}
+
+	/* panic if the sizes do not match */
+	if (num_entries != smmu_ctx_regs[0].val)
+		panic();
+
+	/* save SMMU register values */
+	for (i = 1; i < num_entries; i++)
+		smmu_ctx_regs[i].val = mmio_read_32(smmu_ctx_regs[i].reg);
+
+	/* increment by 1 to take care of the last entry */
+	num_entries++;
+
+	/* Save SMMU config settings */
+	memcpy16((void *)(uintptr_t)smmu_ctx_addr, (void *)smmu_ctx_regs,
+		 (sizeof(smmu_regs_t) * num_entries));
+
+	/* save the SMMU table address */
+	mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV11_LO,
+		(uint32_t)smmu_ctx_addr);
+	mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV11_HI,
+		(uint32_t)(smmu_ctx_addr >> 32));
+}
+
+#define SMMU_NUM_CONTEXTS		64
+#define SMMU_CONTEXT_BANK_MAX_IDX	64
+
+/*
+ * Init SMMU during boot or "System Suspend" exit
+ */
+void tegra_smmu_init(void)
+{
+	uint32_t val, cb_idx, smmu_id, ctx_base;
+
+	for (smmu_id = 0; smmu_id < NUM_SMMU_DEVICES; smmu_id++) {
+		/* Program the SMMU pagesize and reset CACHE_LOCK bit */
+		val = tegra_smmu_read_32(smmu_id, SMMU_GSR0_SECURE_ACR);
+		val |= SMMU_GSR0_PGSIZE_64K;
+		val &= ~SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
+		tegra_smmu_write_32(smmu_id, SMMU_GSR0_SECURE_ACR, val);
+
+		/* reset CACHE LOCK bit for NS Aux. Config. Register */
+		val = tegra_smmu_read_32(smmu_id, SMMU_GNSR_ACR);
+		val &= ~SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
+		tegra_smmu_write_32(smmu_id, SMMU_GNSR_ACR, val);
+
+		/* disable TCU prefetch for all contexts */
+		ctx_base = (SMMU_GSR0_PGSIZE_64K * SMMU_NUM_CONTEXTS)
+				+ SMMU_CBn_ACTLR;
+		for (cb_idx = 0; cb_idx < SMMU_CONTEXT_BANK_MAX_IDX; cb_idx++) {
+			val = tegra_smmu_read_32(smmu_id,
+				ctx_base + (SMMU_GSR0_PGSIZE_64K * cb_idx));
+			val &= ~SMMU_CBn_ACTLR_CPRE_BIT;
+			tegra_smmu_write_32(smmu_id, ctx_base +
+				(SMMU_GSR0_PGSIZE_64K * cb_idx), val);
+		}
+
+		/* set CACHE LOCK bit for NS Aux. Config. Register */
+		val = tegra_smmu_read_32(smmu_id, SMMU_GNSR_ACR);
+		val |= SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
+		tegra_smmu_write_32(smmu_id, SMMU_GNSR_ACR, val);
+
+		/* set CACHE LOCK bit for S Aux. Config. Register */
+		val = tegra_smmu_read_32(smmu_id, SMMU_GSR0_SECURE_ACR);
+		val |= SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
+		tegra_smmu_write_32(smmu_id, SMMU_GSR0_SECURE_ACR, val);
+	}
+}
diff --git a/plat/nvidia/tegra/common/tegra_bl31_setup.c b/plat/nvidia/tegra/common/tegra_bl31_setup.c
new file mode 100644
index 0000000..d5d3d53
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_bl31_setup.c
@@ -0,0 +1,381 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl31.h>
+#include <bl_common.h>
+#include <console.h>
+#include <cortex_a53.h>
+#include <cortex_a57.h>
+#include <debug.h>
+#include <denver.h>
+#include <errno.h>
+#include <memctrl.h>
+#include <mmio.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <stddef.h>
+#include <string.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+extern void zeromem16(void *mem, unsigned int length);
+
+/*******************************************************************************
+ * Declarations of linker defined symbols which will help us find the layout
+ * of trusted SRAM
+ ******************************************************************************/
+extern unsigned long __TEXT_START__;
+extern unsigned long __TEXT_END__;
+extern unsigned long __RW_START__;
+extern unsigned long __RW_END__;
+extern unsigned long __RODATA_START__;
+extern unsigned long __RODATA_END__;
+extern unsigned long __BL31_END__;
+
+extern uint64_t tegra_bl31_phys_base;
+extern uint64_t tegra_console_base;
+
+/*
+ * The next 3 constants identify the extents of the code, RO data region and the
+ * limit of the BL3-1 image.  These addresses are used by the MMU setup code and
+ * therefore they must be page-aligned.  It is the responsibility of the linker
+ * script to ensure that __RO_START__, __RO_END__ & __BL31_END__ linker symbols
+ * refer to page-aligned addresses.
+ */
+#define BL31_RW_START (unsigned long)(&__RW_START__)
+#define BL31_RW_END (unsigned long)(&__RW_END__)
+#define BL31_RODATA_BASE (unsigned long)(&__RODATA_START__)
+#define BL31_RODATA_END (unsigned long)(&__RODATA_END__)
+#define BL31_END (unsigned long)(&__BL31_END__)
+
+static entry_point_info_t bl33_image_ep_info, bl32_image_ep_info;
+static plat_params_from_bl2_t plat_bl31_params_from_bl2 = {
+	.tzdram_size = (uint64_t)TZDRAM_SIZE
+};
+
+/*******************************************************************************
+ * This variable holds the non-secure image entry address
+ ******************************************************************************/
+extern uint64_t ns_image_entrypoint;
+
+/*******************************************************************************
+ * The following platform setup functions are weakly defined. They
+ * provide typical implementations that will be overridden by a SoC.
+ ******************************************************************************/
+#pragma weak plat_early_platform_setup
+#pragma weak plat_get_bl31_params
+#pragma weak plat_get_bl31_plat_params
+
+void plat_early_platform_setup(void)
+{
+	; /* do nothing */
+}
+
+bl31_params_t *plat_get_bl31_params(void)
+{
+	return NULL;
+}
+
+plat_params_from_bl2_t *plat_get_bl31_plat_params(void)
+{
+	return NULL;
+}
+
+/*******************************************************************************
+ * Return a pointer to the 'entry_point_info' structure of the next image for
+ * security state specified. BL33 corresponds to the non-secure image type
+ * while BL32 corresponds to the secure image type.
+ ******************************************************************************/
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+	if (type == NON_SECURE)
+		return &bl33_image_ep_info;
+
+	/* return BL32 entry point info if it is valid */
+	if (type == SECURE && bl32_image_ep_info.pc)
+		return &bl32_image_ep_info;
+
+	return NULL;
+}
+
+/*******************************************************************************
+ * Return a pointer to the 'plat_params_from_bl2_t' structure. The BL2 image
+ * passes this platform specific information.
+ ******************************************************************************/
+plat_params_from_bl2_t *bl31_get_plat_params(void)
+{
+	return &plat_bl31_params_from_bl2;
+}
+
+/*******************************************************************************
+ * Perform any BL31 specific platform actions. Populate the BL33 and BL32 image
+ * info.
+ ******************************************************************************/
+void bl31_early_platform_setup(bl31_params_t *from_bl2,
+				void *plat_params_from_bl2)
+{
+	plat_params_from_bl2_t *plat_params =
+		(plat_params_from_bl2_t *)plat_params_from_bl2;
+#if LOG_LEVEL >= LOG_LEVEL_INFO
+	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+#endif
+	image_info_t bl32_img_info = { {0} };
+	uint64_t tzdram_start, tzdram_end, bl32_start, bl32_end;
+
+	/*
+	 * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so
+	 * there's no argument to relay from a previous bootloader. Platforms
+	 * might use custom ways to get arguments, so provide handlers which
+	 * they can override.
+	 */
+	if (from_bl2 == NULL)
+		from_bl2 = plat_get_bl31_params();
+	if (plat_params == NULL)
+		plat_params = plat_get_bl31_plat_params();
+
+	/*
+	 * Copy BL3-3, BL3-2 entry point information.
+	 * They are stored in Secure RAM, in BL2's address space.
+	 */
+	assert(from_bl2);
+	assert(from_bl2->bl33_ep_info);
+	bl33_image_ep_info = *from_bl2->bl33_ep_info;
+
+	if (from_bl2->bl32_ep_info)
+		bl32_image_ep_info = *from_bl2->bl32_ep_info;
+
+	/*
+	 * Parse platform specific parameters - TZDRAM aperture base and size
+	 */
+	assert(plat_params);
+	plat_bl31_params_from_bl2.tzdram_base = plat_params->tzdram_base;
+	plat_bl31_params_from_bl2.tzdram_size = plat_params->tzdram_size;
+	plat_bl31_params_from_bl2.uart_id = plat_params->uart_id;
+
+	/*
+	 * It is very important that we run either from TZDRAM or TZSRAM base.
+	 * Add an explicit check here.
+	 */
+	if ((plat_bl31_params_from_bl2.tzdram_base != BL31_BASE) &&
+	    (TEGRA_TZRAM_BASE != BL31_BASE))
+		panic();
+
+	/*
+	 * Get the base address of the UART controller to be used for the
+	 * console
+	 */
+	tegra_console_base = plat_get_console_from_id(plat_params->uart_id);
+
+	if (tegra_console_base != (uint64_t)0) {
+		/*
+		 * Configure the UART port to be used as the console
+		 */
+		console_init(tegra_console_base, TEGRA_BOOT_UART_CLK_IN_HZ,
+			TEGRA_CONSOLE_BAUDRATE);
+	}
+
+	/*
+	 * Initialize delay timer
+	 */
+	tegra_delay_timer_init();
+
+	/*
+	 * Do initial security configuration to allow DRAM/device access.
+	 */
+	tegra_memctrl_tzdram_setup(plat_bl31_params_from_bl2.tzdram_base,
+			plat_bl31_params_from_bl2.tzdram_size);
+
+	/*
+	 * The previous bootloader might not have placed the BL32 image
+	 * inside the TZDRAM. We check the BL32 image info to find out
+	 * the base/PC values and relocate the image if necessary.
+	 */
+	if (from_bl2->bl32_image_info) {
+
+		bl32_img_info = *from_bl2->bl32_image_info;
+
+		/* Relocate BL32 if it resides outside of the TZDRAM */
+		tzdram_start = plat_bl31_params_from_bl2.tzdram_base;
+		tzdram_end = plat_bl31_params_from_bl2.tzdram_base +
+				plat_bl31_params_from_bl2.tzdram_size;
+		bl32_start = bl32_img_info.image_base;
+		bl32_end = bl32_img_info.image_base + bl32_img_info.image_size;
+
+		assert(tzdram_end > tzdram_start);
+		assert(bl32_end > bl32_start);
+		assert(bl32_image_ep_info.pc > tzdram_start);
+		assert(bl32_image_ep_info.pc < tzdram_end);
+
+		/* relocate BL32 */
+		if (bl32_start >= tzdram_end || bl32_end <= tzdram_start) {
+
+			INFO("Relocate BL32 to TZDRAM\n");
+
+			memcpy16((void *)(uintptr_t)bl32_image_ep_info.pc,
+				 (void *)(uintptr_t)bl32_start,
+				 bl32_img_info.image_size);
+
+			/* clean up non-secure intermediate buffer */
+			zeromem16((void *)(uintptr_t)bl32_start,
+				bl32_img_info.image_size);
+		}
+	}
+
+	/* Early platform setup for Tegra SoCs */
+	plat_early_platform_setup();
+
+	INFO("BL3-1: Boot CPU: %s Processor [%lx]\n", (impl == DENVER_IMPL) ?
+		"Denver" : "ARM", read_mpidr());
+}
+
+/*******************************************************************************
+ * Initialize the gic, configure the SCR.
+ ******************************************************************************/
+void bl31_platform_setup(void)
+{
+	uint32_t tmp_reg;
+
+	/* Initialize the gic cpu and distributor interfaces */
+	plat_gic_setup();
+
+	/*
+	 * Setup secondary CPU POR infrastructure.
+	 */
+	plat_secondary_setup();
+
+	/*
+	 * Initial Memory Controller configuration.
+	 */
+	tegra_memctrl_setup();
+
+	/*
+	 * Set up the TZRAM memory aperture to allow only secure world
+	 * access
+	 */
+	tegra_memctrl_tzram_setup(TEGRA_TZRAM_BASE, TEGRA_TZRAM_SIZE);
+
+	/* Set the next EL to be AArch64 */
+	tmp_reg = SCR_RES1_BITS | SCR_RW_BIT;
+	write_scr(tmp_reg);
+
+	INFO("BL3-1: Tegra platform setup complete\n");
+}
+
+/*******************************************************************************
+ * Perform any BL3-1 platform runtime setup prior to BL3-1 cold boot exit
+ ******************************************************************************/
+void bl31_plat_runtime_setup(void)
+{
+	/*
+	 * During boot, USB3 and flash media (SDMMC/SATA) devices need
+	 * access to IRAM. Because these clients connect to the MC and
+	 * do not have a direct path to the IRAM, the MC implements AHB
+	 * redirection during boot to allow path to IRAM. In this mode
+	 * accesses to a programmed memory address aperture are directed
+	 * to the AHB bus, allowing access to the IRAM. This mode must be
+	 * disabled before we jump to the non-secure world.
+	 */
+	tegra_memctrl_disable_ahb_redirection();
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this only intializes the mmu in a quick and dirty way.
+ ******************************************************************************/
+void bl31_plat_arch_setup(void)
+{
+	unsigned long rw_start = BL31_RW_START;
+	unsigned long rw_size = BL31_RW_END - BL31_RW_START;
+	unsigned long rodata_start = BL31_RODATA_BASE;
+	unsigned long rodata_size = BL31_RODATA_END - BL31_RODATA_BASE;
+	unsigned long code_base = (unsigned long)(&__TEXT_START__);
+	unsigned long code_size = (unsigned long)(&__TEXT_END__) - code_base;
+	const mmap_region_t *plat_mmio_map = NULL;
+#if USE_COHERENT_MEM
+	unsigned long coh_start, coh_size;
+#endif
+	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+
+	/* add memory regions */
+	mmap_add_region(rw_start, rw_start,
+			rw_size,
+			MT_MEMORY | MT_RW | MT_SECURE);
+	mmap_add_region(rodata_start, rodata_start,
+			rodata_size,
+			MT_RO_DATA | MT_SECURE);
+	mmap_add_region(code_base, code_base,
+			code_size,
+			MT_CODE | MT_SECURE);
+
+	/* map TZDRAM used by BL31 as coherent memory */
+	if (TEGRA_TZRAM_BASE == tegra_bl31_phys_base) {
+		mmap_add_region(params_from_bl2->tzdram_base,
+				params_from_bl2->tzdram_base,
+				BL31_SIZE,
+				MT_DEVICE | MT_RW | MT_SECURE);
+	}
+
+#if USE_COHERENT_MEM
+	coh_start = total_base + (BL_COHERENT_RAM_BASE - BL31_RO_BASE);
+	coh_size = BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE;
+
+	mmap_add_region(coh_start, coh_start,
+			coh_size,
+			MT_DEVICE | MT_RW | MT_SECURE);
+#endif
+
+	/* map on-chip free running uS timer */
+	mmap_add_region(page_align((uint64_t)TEGRA_TMRUS_BASE, 0),
+			page_align((uint64_t)TEGRA_TMRUS_BASE, 0),
+			(uint64_t)TEGRA_TMRUS_SIZE,
+			MT_DEVICE | MT_RO | MT_SECURE);
+
+	/* add MMIO space */
+	plat_mmio_map = plat_get_mmio_map();
+	if (plat_mmio_map)
+		mmap_add(plat_mmio_map);
+	else
+		WARN("MMIO map not available\n");
+
+	/* set up translation tables */
+	init_xlat_tables();
+
+	/* enable the MMU */
+	enable_mmu_el3(0);
+
+	INFO("BL3-1: Tegra: MMU enabled\n");
+}
+
+/*******************************************************************************
+ * Check if the given NS DRAM range is valid
+ ******************************************************************************/
+int bl31_check_ns_address(uint64_t base, uint64_t size_in_bytes)
+{
+	uint64_t end = base + size_in_bytes;
+
+	/*
+	 * Check if the NS DRAM address is valid
+	 */
+	if ((base < TEGRA_DRAM_BASE) || (end > TEGRA_DRAM_END)) {
+		ERROR("NS address is out-of-bounds!\n");
+		return -EFAULT;
+	}
+
+	/*
+	 * TZDRAM aperture contains the BL31 and BL32 images, so we need
+	 * to check if the NS DRAM range overlaps the TZDRAM aperture.
+	 */
+	if ((base < TZDRAM_END) && (end > tegra_bl31_phys_base)) {
+		ERROR("NS address overlaps TZDRAM!\n");
+		return -ENOTSUP;
+	}
+
+	/* valid NS address */
+	return 0;
+}
diff --git a/plat/nvidia/tegra/common/tegra_common.mk b/plat/nvidia/tegra/common/tegra_common.mk
new file mode 100644
index 0000000..8f6c7b8
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_common.mk
@@ -0,0 +1,29 @@
+#
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PLAT_INCLUDES		:=	-Iplat/nvidia/tegra/include/drivers \
+				-Iplat/nvidia/tegra/include \
+				-Iplat/nvidia/tegra/include/${TARGET_SOC}
+
+include lib/xlat_tables_v2/xlat_tables.mk
+PLAT_BL_COMMON_SOURCES	+=	${XLAT_TABLES_LIB_SRCS}
+
+COMMON_DIR		:=	plat/nvidia/tegra/common
+
+BL31_SOURCES		+=	drivers/arm/gic/gic_v2.c			\
+				drivers/console/aarch64/console.S		\
+				drivers/delay_timer/delay_timer.c		\
+				drivers/ti/uart/aarch64/16550_console.S		\
+				${COMMON_DIR}/aarch64/tegra_helpers.S		\
+				${COMMON_DIR}/drivers/pmc/pmc.c			\
+				${COMMON_DIR}/tegra_bl31_setup.c		\
+				${COMMON_DIR}/tegra_delay_timer.c		\
+				${COMMON_DIR}/tegra_fiq_glue.c			\
+				${COMMON_DIR}/tegra_gic.c			\
+				${COMMON_DIR}/tegra_platform.c			\
+				${COMMON_DIR}/tegra_pm.c			\
+				${COMMON_DIR}/tegra_sip_calls.c			\
+				${COMMON_DIR}/tegra_topology.c
diff --git a/plat/nvidia/tegra/common/tegra_delay_timer.c b/plat/nvidia/tegra/common/tegra_delay_timer.c
new file mode 100644
index 0000000..3bd2b0e
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_delay_timer.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <delay_timer.h>
+#include <mmio.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+static uint32_t tegra_timerus_get_value(void)
+{
+	return mmio_read_32(TEGRA_TMRUS_BASE);
+}
+
+/*
+ * Initialise the on-chip free rolling us counter as the delay
+ * timer.
+ */
+void tegra_delay_timer_init(void)
+{
+	static const timer_ops_t tegra_timer_ops = {
+		.get_timer_value	= tegra_timerus_get_value,
+		.clk_mult		= 1,
+		.clk_div		= 1,
+	};
+
+	timer_init(&tegra_timer_ops);
+}
diff --git a/plat/nvidia/tegra/common/tegra_fiq_glue.c b/plat/nvidia/tegra/common/tegra_fiq_glue.c
new file mode 100644
index 0000000..2f43958
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_fiq_glue.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bakery_lock.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <denver.h>
+#include <gic_v2.h>
+#include <interrupt_mgmt.h>
+#include <platform.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+static DEFINE_BAKERY_LOCK(tegra_fiq_lock);
+
+/*******************************************************************************
+ * Static variables
+ ******************************************************************************/
+static uint64_t ns_fiq_handler_addr;
+static uint32_t fiq_handler_active;
+static pcpu_fiq_state_t fiq_state[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * Handler for FIQ interrupts
+ ******************************************************************************/
+static uint64_t tegra_fiq_interrupt_handler(uint32_t id,
+					  uint32_t flags,
+					  void *handle,
+					  void *cookie)
+{
+	cpu_context_t *ctx = cm_get_context(NON_SECURE);
+	el3_state_t *el3state_ctx = get_el3state_ctx(ctx);
+	uint32_t cpu = plat_my_core_pos();
+	uint32_t irq;
+
+	bakery_lock_get(&tegra_fiq_lock);
+
+	/*
+	 * The FIQ was generated when the execution was in the non-secure
+	 * world. Save the context registers to start with.
+	 */
+	cm_el1_sysregs_context_save(NON_SECURE);
+
+	/*
+	 * Save elr_el3 and spsr_el3 from the saved context, and overwrite
+	 * the context with the NS fiq_handler_addr and SPSR value.
+	 */
+	fiq_state[cpu].elr_el3 = read_ctx_reg((el3state_ctx), (uint32_t)(CTX_ELR_EL3));
+	fiq_state[cpu].spsr_el3 = read_ctx_reg((el3state_ctx), (uint32_t)(CTX_SPSR_EL3));
+
+	/*
+	 * Set the new ELR to continue execution in the NS world using the
+	 * FIQ handler registered earlier.
+	 */
+	assert(ns_fiq_handler_addr);
+	write_ctx_reg((el3state_ctx), (uint32_t)(CTX_ELR_EL3), (ns_fiq_handler_addr));
+
+	/*
+	 * Mark this interrupt as complete to avoid a FIQ storm.
+	 */
+	irq = plat_ic_acknowledge_interrupt();
+	if (irq < 1022U) {
+		plat_ic_end_of_interrupt(irq);
+	}
+
+	bakery_lock_release(&tegra_fiq_lock);
+
+	return 0;
+}
+
+/*******************************************************************************
+ * Setup handler for FIQ interrupts
+ ******************************************************************************/
+void tegra_fiq_handler_setup(void)
+{
+	uint32_t flags;
+	int32_t rc;
+
+	/* return if already registered */
+	if (fiq_handler_active == 0U) {
+		/*
+		 * Register an interrupt handler for FIQ interrupts generated for
+		 * NS interrupt sources
+		 */
+		flags = 0U;
+		set_interrupt_rm_flag((flags), (NON_SECURE));
+		rc = register_interrupt_type_handler(INTR_TYPE_EL3,
+					tegra_fiq_interrupt_handler,
+					flags);
+		if (rc != 0) {
+			panic();
+		}
+
+		/* handler is now active */
+		fiq_handler_active = 1;
+	}
+}
+
+/*******************************************************************************
+ * Validate and store NS world's entrypoint for FIQ interrupts
+ ******************************************************************************/
+void tegra_fiq_set_ns_entrypoint(uint64_t entrypoint)
+{
+	ns_fiq_handler_addr = entrypoint;
+}
+
+/*******************************************************************************
+ * Handler to return the NS EL1/EL0 CPU context
+ ******************************************************************************/
+int32_t tegra_fiq_get_intr_context(void)
+{
+	cpu_context_t *ctx = cm_get_context(NON_SECURE);
+	gp_regs_t *gpregs_ctx = get_gpregs_ctx(ctx);
+	const el1_sys_regs_t *el1state_ctx = get_sysregs_ctx(ctx);
+	uint32_t cpu = plat_my_core_pos();
+	uint64_t val;
+
+	/*
+	 * We store the ELR_EL3, SPSR_EL3, SP_EL0 and SP_EL1 registers so
+	 * that el3_exit() sends these values back to the NS world.
+	 */
+	write_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_X0), (fiq_state[cpu].elr_el3));
+	write_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_X1), (fiq_state[cpu].spsr_el3));
+
+	val = read_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_SP_EL0));
+	write_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_X2), (val));
+
+	val = read_ctx_reg((el1state_ctx), (uint32_t)(CTX_SP_EL1));
+	write_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_X3), (val));
+
+	return 0;
+}
diff --git a/plat/nvidia/tegra/common/tegra_gic.c b/plat/nvidia/tegra/common/tegra_gic.c
new file mode 100644
index 0000000..3ace554
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_gic.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <debug.h>
+#include <gic_v2.h>
+#include <interrupt_mgmt.h>
+#include <platform.h>
+#include <stdint.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+/* Value used to initialize Non-Secure IRQ priorities four at a time */
+#define GICD_IPRIORITYR_DEF_VAL \
+	(GIC_HIGHEST_NS_PRIORITY | \
+	(GIC_HIGHEST_NS_PRIORITY << 8) | \
+	(GIC_HIGHEST_NS_PRIORITY << 16) | \
+	(GIC_HIGHEST_NS_PRIORITY << 24))
+
+static const irq_sec_cfg_t *g_irq_sec_ptr;
+static uint32_t g_num_irqs;
+
+/*******************************************************************************
+ * Place the cpu interface in a state where it can never make a cpu exit wfi as
+ * as result of an asserted interrupt. This is critical for powering down a cpu
+ ******************************************************************************/
+void tegra_gic_cpuif_deactivate(void)
+{
+	uint32_t val;
+
+	/* Disable secure, non-secure interrupts and disable their bypass */
+	val = gicc_read_ctlr(TEGRA_GICC_BASE);
+	val &= ~(ENABLE_GRP0 | ENABLE_GRP1);
+	val |= FIQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP0;
+	val |= IRQ_BYP_DIS_GRP0 | IRQ_BYP_DIS_GRP1;
+	gicc_write_ctlr(TEGRA_GICC_BASE, val);
+}
+
+/*******************************************************************************
+ * Enable secure interrupts and set the priority mask register to allow all
+ * interrupts to trickle in.
+ ******************************************************************************/
+static void tegra_gic_cpuif_setup(uint32_t gicc_base)
+{
+	uint32_t val;
+
+	val = ENABLE_GRP0 | ENABLE_GRP1 | FIQ_EN | FIQ_BYP_DIS_GRP0;
+	val |= IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP1 | IRQ_BYP_DIS_GRP1;
+
+	gicc_write_ctlr(gicc_base, val);
+	gicc_write_pmr(gicc_base, GIC_PRI_MASK);
+}
+
+/*******************************************************************************
+ * Per cpu gic distributor setup which will be done by all cpus after a cold
+ * boot/hotplug. This marks out the secure interrupts & enables them.
+ ******************************************************************************/
+static void tegra_gic_pcpu_distif_setup(uint32_t gicd_base)
+{
+	uint32_t index, sec_ppi_sgi_mask = 0;
+
+	assert(gicd_base != 0U);
+
+	/* Setup PPI priorities doing four at a time */
+	for (index = 0U; index < 32U; index += 4U) {
+		gicd_write_ipriorityr(gicd_base, index,
+				GICD_IPRIORITYR_DEF_VAL);
+	}
+
+	/*
+	 * Invert the bitmask to create a mask for non-secure PPIs and
+	 * SGIs. Program the GICD_IGROUPR0 with this bit mask. This write will
+	 * update the GICR_IGROUPR0 as well in case we are running on a GICv3
+	 * system. This is critical if GICD_CTLR.ARE_NS=1.
+	 */
+	gicd_write_igroupr(gicd_base, 0, ~sec_ppi_sgi_mask);
+}
+
+/*******************************************************************************
+ * Global gic distributor setup which will be done by the primary cpu after a
+ * cold boot. It marks out the non secure SPIs, PPIs & SGIs and enables them.
+ * It then enables the secure GIC distributor interface.
+ ******************************************************************************/
+static void tegra_gic_distif_setup(uint32_t gicd_base)
+{
+	uint32_t index, num_ints, irq_num;
+	uint8_t target_cpus;
+	uint32_t val;
+
+	/*
+	 * Mark out non-secure interrupts. Calculate number of
+	 * IGROUPR registers to consider. Will be equal to the
+	 * number of IT_LINES
+	 */
+	num_ints = gicd_read_typer(gicd_base) & IT_LINES_NO_MASK;
+	num_ints = (num_ints + 1U) << 5;
+	for (index = MIN_SPI_ID; index < num_ints; index += 32U) {
+		gicd_write_igroupr(gicd_base, index, 0xFFFFFFFFU);
+	}
+
+	/* Setup SPI priorities doing four at a time */
+	for (index = MIN_SPI_ID; index < num_ints; index += 4U) {
+		gicd_write_ipriorityr(gicd_base, index,
+				GICD_IPRIORITYR_DEF_VAL);
+	}
+
+	/* Configure SPI secure interrupts now */
+	if (g_irq_sec_ptr != NULL) {
+
+		for (index = 0U; index < g_num_irqs; index++) {
+			irq_num = g_irq_sec_ptr[index].irq;
+			target_cpus = (uint8_t)g_irq_sec_ptr[index].target_cpus;
+
+			if (irq_num >= MIN_SPI_ID) {
+
+				/* Configure as a secure interrupt */
+				gicd_clr_igroupr(gicd_base, irq_num);
+
+				/* Configure SPI priority */
+				mmio_write_8((uint64_t)gicd_base +
+					(uint64_t)GICD_IPRIORITYR +
+					(uint64_t)irq_num,
+					GIC_HIGHEST_SEC_PRIORITY &
+					GIC_PRI_MASK);
+
+				/* Configure as level triggered */
+				val = gicd_read_icfgr(gicd_base, irq_num);
+				val |= (3U << ((irq_num & 0xFU) << 1U));
+				gicd_write_icfgr(gicd_base, irq_num, val);
+
+				/* Route SPI to the target CPUs */
+				gicd_set_itargetsr(gicd_base, irq_num,
+					target_cpus);
+
+				/* Enable this interrupt */
+				gicd_set_isenabler(gicd_base, irq_num);
+			}
+		}
+	}
+
+	/*
+	 * Configure the SGI and PPI. This is done in a separated function
+	 * because each CPU is responsible for initializing its own private
+	 * interrupts.
+	 */
+	tegra_gic_pcpu_distif_setup(gicd_base);
+
+	/* enable distributor */
+	gicd_write_ctlr(gicd_base, ENABLE_GRP0 | ENABLE_GRP1);
+}
+
+void tegra_gic_setup(const irq_sec_cfg_t *irq_sec_ptr, uint32_t num_irqs)
+{
+	g_irq_sec_ptr = irq_sec_ptr;
+	g_num_irqs = num_irqs;
+
+	tegra_gic_cpuif_setup(TEGRA_GICC_BASE);
+	tegra_gic_distif_setup(TEGRA_GICD_BASE);
+}
+
+/*******************************************************************************
+ * An ARM processor signals interrupt exceptions through the IRQ and FIQ pins.
+ * The interrupt controller knows which pin/line it uses to signal a type of
+ * interrupt. This function provides a common implementation of
+ * plat_interrupt_type_to_line() in an ARM GIC environment for optional re-use
+ * across platforms. It lets the interrupt management framework determine
+ * for a type of interrupt and security state, which line should be used in the
+ * SCR_EL3 to control its routing to EL3. The interrupt line is represented as
+ * the bit position of the IRQ or FIQ bit in the SCR_EL3.
+ ******************************************************************************/
+static uint32_t tegra_gic_interrupt_type_to_line(uint32_t type,
+				uint32_t security_state)
+{
+	assert((type == INTR_TYPE_S_EL1) ||
+	       (type == INTR_TYPE_EL3) ||
+	       (type == INTR_TYPE_NS));
+
+	assert(sec_state_is_valid(security_state));
+
+	/*
+	 * We ignore the security state parameter under the assumption that
+	 * both normal and secure worlds are using ARM GICv2. This parameter
+	 * will be used when the secure world starts using GICv3.
+	 */
+#if ARM_GIC_ARCH == 2
+	return gicv2_interrupt_type_to_line(TEGRA_GICC_BASE, type);
+#else
+#error "Invalid ARM GIC architecture version specified for platform port"
+#endif /* ARM_GIC_ARCH */
+}
+
+#if ARM_GIC_ARCH == 2
+/*******************************************************************************
+ * This function returns the type of the highest priority pending interrupt at
+ * the GIC cpu interface. INTR_TYPE_INVAL is returned when there is no
+ * interrupt pending.
+ ******************************************************************************/
+static uint32_t tegra_gic_get_pending_interrupt_type(void)
+{
+	uint32_t id;
+	uint32_t index;
+	uint32_t ret = INTR_TYPE_NS;
+
+	id = gicc_read_hppir(TEGRA_GICC_BASE) & INT_ID_MASK;
+
+	/* get the interrupt type */
+	if (id < 1022U) {
+		for (index = 0U; index < g_num_irqs; index++) {
+			if (id == g_irq_sec_ptr[index].irq) {
+				ret = g_irq_sec_ptr[index].type;
+				break;
+			}
+		}
+	} else {
+		 if (id == GIC_SPURIOUS_INTERRUPT) {
+			ret = INTR_TYPE_INVAL;
+		}
+	}
+
+	return ret;
+}
+
+/*******************************************************************************
+ * This function returns the id of the highest priority pending interrupt at
+ * the GIC cpu interface. INTR_ID_UNAVAILABLE is returned when there is no
+ * interrupt pending.
+ ******************************************************************************/
+static uint32_t tegra_gic_get_pending_interrupt_id(void)
+{
+	uint32_t id, ret;
+
+	id = gicc_read_hppir(TEGRA_GICC_BASE) & INT_ID_MASK;
+
+	if (id < 1022U) {
+		ret = id;
+	} else if (id == 1023U) {
+		ret = 0xFFFFFFFFU; /* INTR_ID_UNAVAILABLE */
+	} else {
+		/*
+		 * Find out which non-secure interrupt it is under the assumption that
+		 * the GICC_CTLR.AckCtl bit is 0.
+		 */
+		ret = gicc_read_ahppir(TEGRA_GICC_BASE) & INT_ID_MASK;
+	}
+
+	return ret;
+}
+
+/*******************************************************************************
+ * This functions reads the GIC cpu interface Interrupt Acknowledge register
+ * to start handling the pending interrupt. It returns the contents of the IAR.
+ ******************************************************************************/
+static uint32_t tegra_gic_acknowledge_interrupt(void)
+{
+	return gicc_read_IAR(TEGRA_GICC_BASE);
+}
+
+/*******************************************************************************
+ * This functions writes the GIC cpu interface End Of Interrupt register with
+ * the passed value to finish handling the active interrupt
+ ******************************************************************************/
+static void tegra_gic_end_of_interrupt(uint32_t id)
+{
+	gicc_write_EOIR(TEGRA_GICC_BASE, id);
+}
+
+/*******************************************************************************
+ * This function returns the type of the interrupt id depending upon the group
+ * this interrupt has been configured under by the interrupt controller i.e.
+ * group0 or group1.
+ ******************************************************************************/
+static uint32_t tegra_gic_get_interrupt_type(uint32_t id)
+{
+	uint32_t group;
+	uint32_t index;
+	uint32_t ret = INTR_TYPE_NS;
+
+	group = gicd_get_igroupr(TEGRA_GICD_BASE, id);
+
+	/* get the interrupt type */
+	if (group == GRP0) {
+		for (index = 0U; index < g_num_irqs; index++) {
+			if (id == g_irq_sec_ptr[index].irq) {
+				ret = g_irq_sec_ptr[index].type;
+				break;
+			}
+		}
+	}
+
+	return ret;
+}
+
+#else
+#error "Invalid ARM GIC architecture version specified for platform port"
+#endif /* ARM_GIC_ARCH */
+
+uint32_t plat_ic_get_pending_interrupt_id(void)
+{
+	return tegra_gic_get_pending_interrupt_id();
+}
+
+uint32_t plat_ic_get_pending_interrupt_type(void)
+{
+	return tegra_gic_get_pending_interrupt_type();
+}
+
+uint32_t plat_ic_acknowledge_interrupt(void)
+{
+	return tegra_gic_acknowledge_interrupt();
+}
+
+uint32_t plat_ic_get_interrupt_type(uint32_t id)
+{
+	return tegra_gic_get_interrupt_type(id);
+}
+
+void plat_ic_end_of_interrupt(uint32_t id)
+{
+	tegra_gic_end_of_interrupt(id);
+}
+
+uint32_t plat_interrupt_type_to_line(uint32_t type,
+				uint32_t security_state)
+{
+	return tegra_gic_interrupt_type_to_line(type, security_state);
+}
diff --git a/plat/nvidia/tegra/common/tegra_platform.c b/plat/nvidia/tegra/common/tegra_platform.c
new file mode 100644
index 0000000..6a906ae
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_platform.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <mmio.h>
+#include <tegra_def.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+
+/*******************************************************************************
+ * Tegra platforms
+ ******************************************************************************/
+typedef enum tegra_platform {
+	TEGRA_PLATFORM_SILICON = 0,
+	TEGRA_PLATFORM_QT,
+	TEGRA_PLATFORM_FPGA,
+	TEGRA_PLATFORM_EMULATION,
+	TEGRA_PLATFORM_MAX,
+} tegra_platform_t;
+
+/*******************************************************************************
+ * Tegra macros defining all the SoC minor versions
+ ******************************************************************************/
+#define TEGRA_MINOR_QT			0
+#define TEGRA_MINOR_FPGA		1
+#define TEGRA_MINOR_EMULATION_MIN	2
+#define TEGRA_MINOR_EMULATION_MAX	10
+
+/*******************************************************************************
+ * Tegra major, minor version helper macros
+ ******************************************************************************/
+#define MAJOR_VERSION_SHIFT		0x4
+#define MAJOR_VERSION_MASK		0xF
+#define MINOR_VERSION_SHIFT		0x10
+#define MINOR_VERSION_MASK		0xF
+#define CHIP_ID_SHIFT			8
+#define CHIP_ID_MASK			0xFF
+
+/*******************************************************************************
+ * Tegra chip ID values
+ ******************************************************************************/
+typedef enum tegra_chipid {
+	TEGRA_CHIPID_TEGRA13 = 0x13,
+	TEGRA_CHIPID_TEGRA21 = 0x21,
+	TEGRA_CHIPID_TEGRA18 = 0x18,
+} tegra_chipid_t;
+
+/*
+ * Read the chip ID value
+ */
+static uint32_t tegra_get_chipid(void)
+{
+	return mmio_read_32(TEGRA_MISC_BASE + HARDWARE_REVISION_OFFSET);
+}
+
+/*
+ * Read the chip's major version from chip ID value
+ */
+uint32_t tegra_get_chipid_major(void)
+{
+	return (tegra_get_chipid() >> MAJOR_VERSION_SHIFT) & MAJOR_VERSION_MASK;
+}
+
+/*
+ * Read the chip's minor version from the chip ID value
+ */
+uint32_t tegra_get_chipid_minor(void)
+{
+	return (tegra_get_chipid() >> MINOR_VERSION_SHIFT) & MINOR_VERSION_MASK;
+}
+
+uint8_t tegra_chipid_is_t132(void)
+{
+	uint32_t chip_id = (tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK;
+
+	return (chip_id == TEGRA_CHIPID_TEGRA13);
+}
+
+uint8_t tegra_chipid_is_t210(void)
+{
+	uint32_t chip_id = (tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK;
+
+	return (chip_id == TEGRA_CHIPID_TEGRA21);
+}
+
+uint8_t tegra_chipid_is_t186(void)
+{
+	uint32_t chip_id = (tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK;
+
+	return (chip_id == TEGRA_CHIPID_TEGRA18);
+}
+
+/*
+ * Read the chip ID value and derive the platform
+ */
+static tegra_platform_t tegra_get_platform(void)
+{
+	uint32_t major = tegra_get_chipid_major();
+	uint32_t minor = tegra_get_chipid_minor();
+
+	/* Actual silicon platforms have a non-zero major version */
+	if (major > 0)
+		return TEGRA_PLATFORM_SILICON;
+
+	/*
+	 * The minor version number is used by simulation platforms
+	 */
+
+	/*
+	 * Cadence's QuickTurn emulation system is a Solaris-based
+	 * chip emulation system
+	 */
+	if (minor == TEGRA_MINOR_QT)
+		return TEGRA_PLATFORM_QT;
+
+	/*
+	 * FPGAs are used during early software/hardware development
+	 */
+	if (minor == TEGRA_MINOR_FPGA)
+		return TEGRA_PLATFORM_FPGA;
+
+	/* Minor version reserved for other emulation platforms */
+	if ((minor > TEGRA_MINOR_FPGA) && (minor <= TEGRA_MINOR_EMULATION_MAX))
+		return TEGRA_PLATFORM_EMULATION;
+
+	/* unsupported platform */
+	return TEGRA_PLATFORM_MAX;
+}
+
+uint8_t tegra_platform_is_silicon(void)
+{
+	return (tegra_get_platform() == TEGRA_PLATFORM_SILICON);
+}
+
+uint8_t tegra_platform_is_qt(void)
+{
+	return (tegra_get_platform() == TEGRA_PLATFORM_QT);
+}
+
+uint8_t tegra_platform_is_fpga(void)
+{
+	return (tegra_get_platform() == TEGRA_PLATFORM_FPGA);
+}
+
+uint8_t tegra_platform_is_emulation(void)
+{
+	return (tegra_get_platform() == TEGRA_PLATFORM_EMULATION);
+}
diff --git a/plat/nvidia/tegra/common/tegra_pm.c b/plat/nvidia/tegra/common/tegra_pm.c
new file mode 100644
index 0000000..86021ba
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_pm.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <memctrl.h>
+#include <mmio.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <pmc.h>
+#include <psci.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+extern uint64_t tegra_bl31_phys_base;
+extern uint64_t tegra_sec_entry_point;
+extern uint64_t tegra_console_base;
+
+/*
+ * tegra_fake_system_suspend acts as a boolean var controlling whether
+ * we are going to take fake system suspend code or normal system suspend code
+ * path. This variable is set inside the sip call handlers,when the kernel
+ * requests a SIP call to set the suspend debug flags.
+ */
+uint8_t tegra_fake_system_suspend;
+
+/*
+ * The following platform setup functions are weakly defined. They
+ * provide typical implementations that will be overridden by a SoC.
+ */
+#pragma weak tegra_soc_pwr_domain_suspend_pwrdown_early
+#pragma weak tegra_soc_pwr_domain_suspend
+#pragma weak tegra_soc_pwr_domain_on
+#pragma weak tegra_soc_pwr_domain_off
+#pragma weak tegra_soc_pwr_domain_on_finish
+#pragma weak tegra_soc_pwr_domain_power_down_wfi
+#pragma weak tegra_soc_prepare_system_reset
+#pragma weak tegra_soc_prepare_system_off
+#pragma weak tegra_soc_get_target_pwr_state
+
+int tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int tegra_soc_pwr_domain_on(u_register_t mpidr)
+{
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
+{
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_prepare_system_reset(void)
+{
+	return PSCI_E_SUCCESS;
+}
+
+__dead2 void tegra_soc_prepare_system_off(void)
+{
+	ERROR("Tegra System Off: operation not handled.\n");
+	panic();
+}
+
+plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
+					     const plat_local_state_t *states,
+					     unsigned int ncpu)
+{
+	plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
+
+	assert(ncpu);
+
+	do {
+		temp = *states++;
+		if ((temp < target))
+			target = temp;
+	} while (--ncpu);
+
+	return target;
+}
+
+/*******************************************************************************
+ * This handler is called by the PSCI implementation during the `SYSTEM_SUSPEND`
+ * call to get the `power_state` parameter. This allows the platform to encode
+ * the appropriate State-ID field within the `power_state` parameter which can
+ * be utilized in `pwr_domain_suspend()` to suspend to system affinity level.
+******************************************************************************/
+void tegra_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+	/* all affinities use system suspend state id */
+	for (uint32_t i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
+		req_state->pwr_domain_state[i] = PSTATE_ID_SOC_POWERDN;
+}
+
+/*******************************************************************************
+ * Handler called when an affinity instance is about to enter standby.
+ ******************************************************************************/
+void tegra_cpu_standby(plat_local_state_t cpu_state)
+{
+	/*
+	 * Enter standby state
+	 * dsb is good practice before using wfi to enter low power states
+	 */
+	dsb();
+	wfi();
+}
+
+/*******************************************************************************
+ * Handler called when an affinity instance is about to be turned on. The
+ * level and mpidr determine the affinity instance.
+ ******************************************************************************/
+int tegra_pwr_domain_on(u_register_t mpidr)
+{
+	return tegra_soc_pwr_domain_on(mpidr);
+}
+
+/*******************************************************************************
+ * Handler called when a power domain is about to be turned off. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+void tegra_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	tegra_soc_pwr_domain_off(target_state);
+}
+
+/*******************************************************************************
+ * Handler called when a power domain is about to be suspended. The
+ * target_state encodes the power state that each level should transition to.
+ * This handler is called with SMP and data cache enabled, when
+ * HW_ASSISTED_COHERENCY = 0
+ ******************************************************************************/
+void tegra_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
+{
+	tegra_soc_pwr_domain_suspend_pwrdown_early(target_state);
+}
+
+/*******************************************************************************
+ * Handler called when a power domain is about to be suspended. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+void tegra_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+	tegra_soc_pwr_domain_suspend(target_state);
+
+	/* Disable console if we are entering deep sleep. */
+	if (target_state->pwr_domain_state[PLAT_MAX_PWR_LVL] ==
+			PSTATE_ID_SOC_POWERDN)
+		console_uninit();
+
+	/* disable GICC */
+	tegra_gic_cpuif_deactivate();
+}
+
+/*******************************************************************************
+ * Handler called at the end of the power domain suspend sequence. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+__dead2 void tegra_pwr_domain_power_down_wfi(const psci_power_state_t
+					     *target_state)
+{
+	uint8_t pwr_state = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
+	uint64_t rmr_el3 = 0;
+
+	/* call the chip's power down handler */
+	tegra_soc_pwr_domain_power_down_wfi(target_state);
+
+	/*
+	 * If we are in fake system suspend mode, ensure we start doing
+	 * procedures that help in looping back towards system suspend exit
+	 * instead of calling WFI by requesting a warm reset.
+	 * Else, just call WFI to enter low power state.
+	 */
+	if ((tegra_fake_system_suspend != 0U) &&
+	    (pwr_state == (uint8_t)PSTATE_ID_SOC_POWERDN)) {
+
+		/* warm reboot */
+		rmr_el3 = read_rmr_el3();
+		write_rmr_el3(rmr_el3 | RMR_WARM_RESET_CPU);
+
+	} else {
+		/* enter power down state */
+		wfi();
+	}
+
+	/* we can never reach here */
+	panic();
+}
+
+/*******************************************************************************
+ * Handler called when a power domain has just been powered on after
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from.
+ ******************************************************************************/
+void tegra_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+	plat_params_from_bl2_t *plat_params;
+
+	/*
+	 * Initialize the GIC cpu and distributor interfaces
+	 */
+	plat_gic_setup();
+
+	/*
+	 * Check if we are exiting from deep sleep.
+	 */
+	if (target_state->pwr_domain_state[PLAT_MAX_PWR_LVL] ==
+			PSTATE_ID_SOC_POWERDN) {
+
+		/* Initialize the runtime console */
+		if (tegra_console_base != (uint64_t)0) {
+			console_init(tegra_console_base, TEGRA_BOOT_UART_CLK_IN_HZ,
+				TEGRA_CONSOLE_BAUDRATE);
+		}
+
+		/*
+		 * Restore Memory Controller settings as it loses state
+		 * during system suspend.
+		 */
+		tegra_memctrl_restore_settings();
+
+		/*
+		 * Security configuration to allow DRAM/device access.
+		 */
+		plat_params = bl31_get_plat_params();
+		tegra_memctrl_tzdram_setup(plat_params->tzdram_base,
+			plat_params->tzdram_size);
+
+		/*
+		 * Set up the TZRAM memory aperture to allow only secure world
+		 * access
+		 */
+		tegra_memctrl_tzram_setup(TEGRA_TZRAM_BASE, TEGRA_TZRAM_SIZE);
+	}
+
+	/*
+	 * Reset hardware settings.
+	 */
+	tegra_soc_pwr_domain_on_finish(target_state);
+}
+
+/*******************************************************************************
+ * Handler called when a power domain has just been powered on after
+ * having been suspended earlier. The target_state encodes the low power state
+ * that each level has woken up from.
+ ******************************************************************************/
+void tegra_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
+{
+	tegra_pwr_domain_on_finish(target_state);
+}
+
+/*******************************************************************************
+ * Handler called when the system wants to be powered off
+ ******************************************************************************/
+__dead2 void tegra_system_off(void)
+{
+	INFO("Powering down system...\n");
+
+	tegra_soc_prepare_system_off();
+}
+
+/*******************************************************************************
+ * Handler called when the system wants to be restarted.
+ ******************************************************************************/
+__dead2 void tegra_system_reset(void)
+{
+	INFO("Restarting system...\n");
+
+	/* per-SoC system reset handler */
+	tegra_soc_prepare_system_reset();
+
+	/*
+	 * Program the PMC in order to restart the system.
+	 */
+	tegra_pmc_system_reset();
+}
+
+/*******************************************************************************
+ * Handler called to check the validity of the power state parameter.
+ ******************************************************************************/
+int32_t tegra_validate_power_state(unsigned int power_state,
+				   psci_power_state_t *req_state)
+{
+	assert(req_state);
+
+	return tegra_soc_validate_power_state(power_state, req_state);
+}
+
+/*******************************************************************************
+ * Platform handler called to check the validity of the non secure entrypoint.
+ ******************************************************************************/
+int tegra_validate_ns_entrypoint(uintptr_t entrypoint)
+{
+	/*
+	 * Check if the non secure entrypoint lies within the non
+	 * secure DRAM.
+	 */
+	if ((entrypoint >= TEGRA_DRAM_BASE) && (entrypoint <= TEGRA_DRAM_END))
+		return PSCI_E_SUCCESS;
+
+	return PSCI_E_INVALID_ADDRESS;
+}
+
+/*******************************************************************************
+ * Export the platform handlers to enable psci to invoke them
+ ******************************************************************************/
+static const plat_psci_ops_t tegra_plat_psci_ops = {
+	.cpu_standby			= tegra_cpu_standby,
+	.pwr_domain_on			= tegra_pwr_domain_on,
+	.pwr_domain_off			= tegra_pwr_domain_off,
+	.pwr_domain_suspend_pwrdown_early = tegra_pwr_domain_suspend_pwrdown_early,
+	.pwr_domain_suspend		= tegra_pwr_domain_suspend,
+	.pwr_domain_on_finish		= tegra_pwr_domain_on_finish,
+	.pwr_domain_suspend_finish	= tegra_pwr_domain_suspend_finish,
+	.pwr_domain_pwr_down_wfi	= tegra_pwr_domain_power_down_wfi,
+	.system_off			= tegra_system_off,
+	.system_reset			= tegra_system_reset,
+	.validate_power_state		= tegra_validate_power_state,
+	.validate_ns_entrypoint		= tegra_validate_ns_entrypoint,
+	.get_sys_suspend_power_state	= tegra_get_sys_suspend_power_state,
+};
+
+/*******************************************************************************
+ * Export the platform specific power ops and initialize Power Controller
+ ******************************************************************************/
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+			const plat_psci_ops_t **psci_ops)
+{
+	psci_power_state_t target_state = { { PSCI_LOCAL_STATE_RUN } };
+
+	/*
+	 * Flush entrypoint variable to PoC since it will be
+	 * accessed after a reset with the caches turned off.
+	 */
+	tegra_sec_entry_point = sec_entrypoint;
+	flush_dcache_range((uint64_t)&tegra_sec_entry_point, sizeof(uint64_t));
+
+	/*
+	 * Reset hardware settings.
+	 */
+	tegra_soc_pwr_domain_on_finish(&target_state);
+
+	/*
+	 * Initialize PSCI ops struct
+	 */
+	*psci_ops = &tegra_plat_psci_ops;
+
+	return 0;
+}
+
+/*******************************************************************************
+ * Platform handler to calculate the proper target power level at the
+ * specified affinity level
+ ******************************************************************************/
+plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
+					     const plat_local_state_t *states,
+					     unsigned int ncpu)
+{
+	return tegra_soc_get_target_pwr_state(lvl, states, ncpu);
+}
diff --git a/plat/nvidia/tegra/common/tegra_sip_calls.c b/plat/nvidia/tegra/common/tegra_sip_calls.c
new file mode 100644
index 0000000..d96ce7a
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_sip_calls.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <debug.h>
+#include <errno.h>
+#include <memctrl.h>
+#include <mmio.h>
+#include <runtime_svc.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+
+/*******************************************************************************
+ * Common Tegra SiP SMCs
+ ******************************************************************************/
+#define TEGRA_SIP_NEW_VIDEOMEM_REGION		0x82000003
+#define TEGRA_SIP_FIQ_NS_ENTRYPOINT		0x82000005
+#define TEGRA_SIP_FIQ_NS_GET_CONTEXT		0x82000006
+#define TEGRA_SIP_ENABLE_FAKE_SYSTEM_SUSPEND	0xC2000007
+
+/*******************************************************************************
+ * Fake system suspend mode control var
+ ******************************************************************************/
+extern uint8_t tegra_fake_system_suspend;
+
+
+/*******************************************************************************
+ * SoC specific SiP handler
+ ******************************************************************************/
+#pragma weak plat_sip_handler
+int plat_sip_handler(uint32_t smc_fid,
+		     uint64_t x1,
+		     uint64_t x2,
+		     uint64_t x3,
+		     uint64_t x4,
+		     void *cookie,
+		     void *handle,
+		     uint64_t flags)
+{
+	return -ENOTSUP;
+}
+
+/*******************************************************************************
+ * This function is responsible for handling all SiP calls
+ ******************************************************************************/
+uint64_t tegra_sip_handler(uint32_t smc_fid,
+			   uint64_t x1,
+			   uint64_t x2,
+			   uint64_t x3,
+			   uint64_t x4,
+			   void *cookie,
+			   void *handle,
+			   uint64_t flags)
+{
+	uint32_t regval;
+	int err;
+
+	/* Check if this is a SoC specific SiP */
+	err = plat_sip_handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags);
+	if (err == 0)
+		SMC_RET1(handle, (uint64_t)err);
+
+	switch (smc_fid) {
+
+	case TEGRA_SIP_NEW_VIDEOMEM_REGION:
+
+		/* clean up the high bits */
+		x2 = (uint32_t)x2;
+
+		/*
+		 * Check if Video Memory overlaps TZDRAM (contains bl31/bl32)
+		 * or falls outside of the valid DRAM range
+		 */
+		err = bl31_check_ns_address(x1, x2);
+		if (err)
+			SMC_RET1(handle, err);
+
+		/*
+		 * Check if Video Memory is aligned to 1MB.
+		 */
+		if ((x1 & 0xFFFFF) || (x2 & 0xFFFFF)) {
+			ERROR("Unaligned Video Memory base address!\n");
+			SMC_RET1(handle, -ENOTSUP);
+		}
+
+		/*
+		 * The GPU is the user of the Video Memory region. In order to
+		 * transition to the new memory region smoothly, we program the
+		 * new base/size ONLY if the GPU is in reset mode.
+		 */
+		regval = mmio_read_32(TEGRA_CAR_RESET_BASE +
+				      TEGRA_GPU_RESET_REG_OFFSET);
+		if ((regval & GPU_RESET_BIT) == 0U) {
+			ERROR("GPU not in reset! Video Memory setup failed\n");
+			SMC_RET1(handle, -ENOTSUP);
+		}
+
+		/* new video memory carveout settings */
+		tegra_memctrl_videomem_setup(x1, x2);
+
+		SMC_RET1(handle, 0);
+		break;
+
+	/*
+	 * The NS world registers the address of its handler to be
+	 * used for processing the FIQ. This is normally used by the
+	 * NS FIQ debugger driver to detect system hangs by programming
+	 * a watchdog timer to fire a FIQ interrupt.
+	 */
+	case TEGRA_SIP_FIQ_NS_ENTRYPOINT:
+
+		if (!x1)
+			SMC_RET1(handle, SMC_UNK);
+
+		/*
+		 * TODO: Check if x1 contains a valid DRAM address
+		 */
+
+		/* store the NS world's entrypoint */
+		tegra_fiq_set_ns_entrypoint(x1);
+
+		SMC_RET1(handle, 0);
+		break;
+
+	/*
+	 * The NS world's FIQ handler issues this SMC to get the NS EL1/EL0
+	 * CPU context when the FIQ interrupt was triggered. This allows the
+	 * NS world to understand the CPU state when the watchdog interrupt
+	 * triggered.
+	 */
+	case TEGRA_SIP_FIQ_NS_GET_CONTEXT:
+
+		/* retrieve context registers when FIQ triggered */
+		tegra_fiq_get_intr_context();
+
+		SMC_RET0(handle);
+		break;
+
+	case TEGRA_SIP_ENABLE_FAKE_SYSTEM_SUSPEND:
+		/*
+		 * System suspend fake mode is set if we are on VDK and we make
+		 * a debug SIP call. This mode ensures that we excercise debug
+		 * path instead of the regular code path to suit the pre-silicon
+		 * platform needs. These include replacing the call to WFI by
+		 * a warm reset request.
+		 */
+		if (tegra_platform_is_emulation() != 0U) {
+
+			tegra_fake_system_suspend = 1;
+			SMC_RET1(handle, 0);
+		}
+
+		/*
+		 * We return to the external world as if this SIP is not
+		 * implemented in case, we are not running on VDK.
+		 */
+		break;
+
+	default:
+		ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
+		break;
+	}
+
+	SMC_RET1(handle, SMC_UNK);
+}
+
+/* Define a runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+	tegra_sip_fast,
+
+	OEN_SIP_START,
+	OEN_SIP_END,
+	SMC_TYPE_FAST,
+	NULL,
+	tegra_sip_handler
+);
diff --git a/plat/nvidia/tegra/common/tegra_topology.c b/plat/nvidia/tegra/common/tegra_topology.c
new file mode 100644
index 0000000..0593053
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_topology.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <platform_def.h>
+#include <psci.h>
+
+extern const unsigned char tegra_power_domain_tree_desc[];
+#pragma weak plat_core_pos_by_mpidr
+
+/*******************************************************************************
+ * This function returns the Tegra default topology tree information.
+ ******************************************************************************/
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+	return tegra_power_domain_tree_desc;
+}
+
+/*******************************************************************************
+ * This function implements a part of the critical interface between the psci
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is returned
+ * in case the MPIDR is invalid.
+ ******************************************************************************/
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+	unsigned int cluster_id, cpu_id;
+
+	cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+	cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
+
+	if (cluster_id >= PLATFORM_CLUSTER_COUNT)
+		return PSCI_E_NOT_PRESENT;
+
+	/*
+	 * Validate cpu_id by checking whether it represents a CPU in
+	 * one of the two clusters present on the platform.
+	 */
+	if (cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER)
+		return PSCI_E_NOT_PRESENT;
+
+	return (cpu_id + (cluster_id * 4));
+}
diff --git a/plat/nvidia/tegra/include/drivers/flowctrl.h b/plat/nvidia/tegra/include/drivers/flowctrl.h
new file mode 100644
index 0000000..2e3bcf0
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/flowctrl.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __FLOWCTRL_H__
+#define __FLOWCTRL_H__
+
+#include <mmio.h>
+#include <tegra_def.h>
+
+#define FLOWCTRL_HALT_CPU0_EVENTS	0x0U
+#define  FLOWCTRL_WAITEVENT		(2U << 29)
+#define  FLOWCTRL_WAIT_FOR_INTERRUPT	(4U << 29)
+#define  FLOWCTRL_JTAG_RESUME		(1U << 28)
+#define  FLOWCTRL_HALT_SCLK		(1U << 27)
+#define  FLOWCTRL_HALT_LIC_IRQ		(1U << 11)
+#define  FLOWCTRL_HALT_LIC_FIQ		(1U << 10)
+#define  FLOWCTRL_HALT_GIC_IRQ		(1U << 9)
+#define  FLOWCTRL_HALT_GIC_FIQ		(1U << 8)
+#define FLOWCTRL_HALT_BPMP_EVENTS	0x4U
+#define FLOWCTRL_CPU0_CSR		0x8U
+#define  FLOW_CTRL_CSR_PWR_OFF_STS	(1U << 16)
+#define  FLOWCTRL_CSR_INTR_FLAG		(1U << 15)
+#define  FLOWCTRL_CSR_EVENT_FLAG	(1U << 14)
+#define  FLOWCTRL_CSR_IMMEDIATE_WAKE	(1U << 3)
+#define  FLOWCTRL_CSR_ENABLE		(1U << 0)
+#define FLOWCTRL_HALT_CPU1_EVENTS	0x14U
+#define FLOWCTRL_CPU1_CSR		0x18U
+#define FLOWCTRL_CC4_CORE0_CTRL		0x6cU
+#define FLOWCTRL_WAIT_WFI_BITMAP	0x100U
+#define FLOWCTRL_L2_FLUSH_CONTROL	0x94U
+#define FLOWCTRL_BPMP_CLUSTER_CONTROL	0x98U
+#define  FLOWCTRL_BPMP_CLUSTER_PWRON_LOCK	(1U << 2)
+
+#define FLOWCTRL_ENABLE_EXT		12U
+#define FLOWCTRL_ENABLE_EXT_MASK	3U
+#define FLOWCTRL_PG_CPU_NONCPU		0x1U
+#define FLOWCTRL_TURNOFF_CPURAIL	0x2U
+
+static inline uint32_t tegra_fc_read_32(uint32_t off)
+{
+	return mmio_read_32(TEGRA_FLOWCTRL_BASE + off);
+}
+
+static inline void tegra_fc_write_32(uint32_t off, uint32_t val)
+{
+	mmio_write_32(TEGRA_FLOWCTRL_BASE + off, val);
+}
+
+void tegra_fc_cluster_idle(uint32_t midr);
+void tegra_fc_cpu_powerdn(uint32_t mpidr);
+void tegra_fc_cluster_powerdn(uint32_t midr);
+void tegra_fc_soc_powerdn(uint32_t midr);
+void tegra_fc_cpu_on(int cpu);
+void tegra_fc_cpu_off(int cpu);
+void tegra_fc_lock_active_cluster(void);
+void tegra_fc_reset_bpmp(void);
+
+#endif /* __FLOWCTRL_H__ */
diff --git a/plat/nvidia/tegra/include/drivers/mce.h b/plat/nvidia/tegra/include/drivers/mce.h
new file mode 100644
index 0000000..c7867a5
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/mce.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MCE_H__
+#define __MCE_H__
+
+#include <mmio.h>
+#include <tegra_def.h>
+
+/*******************************************************************************
+ * MCE commands
+ ******************************************************************************/
+typedef enum mce_cmd {
+	MCE_CMD_ENTER_CSTATE = 0U,
+	MCE_CMD_UPDATE_CSTATE_INFO = 1U,
+	MCE_CMD_UPDATE_CROSSOVER_TIME = 2U,
+	MCE_CMD_READ_CSTATE_STATS = 3U,
+	MCE_CMD_WRITE_CSTATE_STATS = 4U,
+	MCE_CMD_IS_SC7_ALLOWED = 5U,
+	MCE_CMD_ONLINE_CORE = 6U,
+	MCE_CMD_CC3_CTRL = 7U,
+	MCE_CMD_ECHO_DATA = 8U,
+	MCE_CMD_READ_VERSIONS = 9U,
+	MCE_CMD_ENUM_FEATURES = 10U,
+	MCE_CMD_ROC_FLUSH_CACHE_TRBITS = 11U,
+	MCE_CMD_ENUM_READ_MCA = 12U,
+	MCE_CMD_ENUM_WRITE_MCA = 13U,
+	MCE_CMD_ROC_FLUSH_CACHE = 14U,
+	MCE_CMD_ROC_CLEAN_CACHE = 15U,
+	MCE_CMD_ENABLE_LATIC = 16U,
+	MCE_CMD_UNCORE_PERFMON_REQ = 17U,
+	MCE_CMD_MISC_CCPLEX = 18U,
+	MCE_CMD_IS_CCX_ALLOWED = 0xFEU,
+	MCE_CMD_MAX = 0xFFU,
+} mce_cmd_t;
+
+#define MCE_CMD_MASK				0xFFU
+
+/*******************************************************************************
+ * Timeout value used to powerdown a core
+ ******************************************************************************/
+#define MCE_CORE_SLEEP_TIME_INFINITE		0xFFFFFFFFU
+
+/*******************************************************************************
+ * Struct to prepare UPDATE_CSTATE_INFO request
+ ******************************************************************************/
+typedef struct mce_cstate_info {
+	/* cluster cstate value */
+	uint32_t cluster;
+	/* ccplex cstate value */
+	uint32_t ccplex;
+	/* system cstate value */
+	uint32_t system;
+	/* force system state? */
+	uint8_t system_state_force;
+	/* wake mask value */
+	uint32_t wake_mask;
+	/* update the wake mask? */
+	uint8_t update_wake_mask;
+} mce_cstate_info_t;
+
+/* public interfaces */
+int mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
+		uint64_t arg2);
+int mce_update_reset_vector(void);
+int mce_update_gsc_videomem(void);
+int mce_update_gsc_tzdram(void);
+int mce_update_gsc_tzram(void);
+__dead2 void mce_enter_ccplex_state(uint32_t state_idx);
+void mce_update_cstate_info(const mce_cstate_info_t *cstate);
+void mce_verify_firmware_version(void);
+
+#endif /* __MCE_H__ */
diff --git a/plat/nvidia/tegra/include/drivers/memctrl.h b/plat/nvidia/tegra/include/drivers/memctrl.h
new file mode 100644
index 0000000..8413299
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/memctrl.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MEMCTRL_H__
+#define __MEMCTRL_H__
+
+void tegra_memctrl_setup(void);
+void tegra_memctrl_restore_settings(void);
+void tegra_memctrl_tzdram_setup(uint64_t phys_base, uint32_t size_in_bytes);
+void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes);
+void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes);
+void tegra_memctrl_disable_ahb_redirection(void);
+
+#endif /* __MEMCTRL_H__ */
diff --git a/plat/nvidia/tegra/include/drivers/memctrl_v1.h b/plat/nvidia/tegra/include/drivers/memctrl_v1.h
new file mode 100644
index 0000000..78ee2e7
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/memctrl_v1.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MEMCTRLV1_H__
+#define __MEMCTRLV1_H__
+
+#include <mmio.h>
+#include <tegra_def.h>
+
+/* SMMU registers */
+#define MC_SMMU_CONFIG_0			0x10U
+#define  MC_SMMU_CONFIG_0_SMMU_ENABLE_DISABLE	0U
+#define  MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE	1U
+#define MC_SMMU_TLB_CONFIG_0			0x14U
+#define  MC_SMMU_TLB_CONFIG_0_RESET_VAL		0x20000010U
+#define MC_SMMU_PTC_CONFIG_0			0x18U
+#define  MC_SMMU_PTC_CONFIG_0_RESET_VAL		0x2000003fU
+#define MC_SMMU_TLB_FLUSH_0			0x30U
+#define  TLB_FLUSH_VA_MATCH_ALL			0U
+#define  TLB_FLUSH_ASID_MATCH_DISABLE		0U
+#define  TLB_FLUSH_ASID_MATCH_SHIFT		31U
+#define  MC_SMMU_TLB_FLUSH_ALL		\
+	 (TLB_FLUSH_VA_MATCH_ALL | 	\
+	 (TLB_FLUSH_ASID_MATCH_DISABLE << TLB_FLUSH_ASID_MATCH_SHIFT))
+#define MC_SMMU_PTC_FLUSH_0			0x34U
+#define  MC_SMMU_PTC_FLUSH_ALL			0U
+#define MC_SMMU_ASID_SECURITY_0			0x38U
+#define  MC_SMMU_ASID_SECURITY			0U
+#define MC_SMMU_TRANSLATION_ENABLE_0_0		0x228U
+#define MC_SMMU_TRANSLATION_ENABLE_1_0		0x22cU
+#define MC_SMMU_TRANSLATION_ENABLE_2_0		0x230U
+#define MC_SMMU_TRANSLATION_ENABLE_3_0		0x234U
+#define MC_SMMU_TRANSLATION_ENABLE_4_0		0xb98U
+#define  MC_SMMU_TRANSLATION_ENABLE		(~0)
+
+/* MC IRAM aperture registers */
+#define MC_IRAM_BASE_LO				0x65CU
+#define MC_IRAM_TOP_LO				0x660U
+#define MC_IRAM_BASE_TOP_HI			0x980U
+#define MC_IRAM_REG_CTRL			0x964U
+#define  MC_DISABLE_IRAM_CFG_WRITES		1U
+
+static inline uint32_t tegra_mc_read_32(uint32_t off)
+{
+	return mmio_read_32(TEGRA_MC_BASE + off);
+}
+
+static inline void tegra_mc_write_32(uint32_t off, uint32_t val)
+{
+	mmio_write_32(TEGRA_MC_BASE + off, val);
+}
+
+#endif /* __MEMCTRLV1_H__ */
diff --git a/plat/nvidia/tegra/include/drivers/memctrl_v2.h b/plat/nvidia/tegra/include/drivers/memctrl_v2.h
new file mode 100644
index 0000000..60c8a04
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/memctrl_v2.h
@@ -0,0 +1,478 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MEMCTRLV2_H__
+#define __MEMCTRLV2_H__
+
+#include <tegra_def.h>
+
+#ifndef __ASSEMBLY__
+
+#include <sys/types.h>
+
+/*******************************************************************************
+ * StreamID to indicate no SMMU translations (requests to be steered on the
+ * SMMU bypass path)
+ ******************************************************************************/
+#define MC_STREAM_ID_MAX			0x7F
+
+/*******************************************************************************
+ * Stream ID Override Config registers
+ ******************************************************************************/
+#define MC_STREAMID_OVERRIDE_CFG_PTCR		0x000
+#define MC_STREAMID_OVERRIDE_CFG_AFIR		0x070
+#define MC_STREAMID_OVERRIDE_CFG_HDAR		0x0A8
+#define MC_STREAMID_OVERRIDE_CFG_HOST1XDMAR	0x0B0
+#define MC_STREAMID_OVERRIDE_CFG_NVENCSRD	0x0E0
+#define MC_STREAMID_OVERRIDE_CFG_SATAR		0x0F8
+#define MC_STREAMID_OVERRIDE_CFG_MPCORER	0x138
+#define MC_STREAMID_OVERRIDE_CFG_NVENCSWR	0x158
+#define MC_STREAMID_OVERRIDE_CFG_AFIW		0x188
+#define MC_STREAMID_OVERRIDE_CFG_HDAW		0x1A8
+#define MC_STREAMID_OVERRIDE_CFG_MPCOREW	0x1C8
+#define MC_STREAMID_OVERRIDE_CFG_SATAW		0x1E8
+#define MC_STREAMID_OVERRIDE_CFG_ISPRA		0x220
+#define MC_STREAMID_OVERRIDE_CFG_ISPWA		0x230
+#define MC_STREAMID_OVERRIDE_CFG_ISPWB		0x238
+#define MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTR	0x250
+#define MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTW	0x258
+#define MC_STREAMID_OVERRIDE_CFG_XUSB_DEVR	0x260
+#define MC_STREAMID_OVERRIDE_CFG_XUSB_DEVW	0x268
+#define MC_STREAMID_OVERRIDE_CFG_TSECSRD	0x2A0
+#define MC_STREAMID_OVERRIDE_CFG_TSECSWR	0x2A8
+#define MC_STREAMID_OVERRIDE_CFG_GPUSRD		0x2C0
+#define MC_STREAMID_OVERRIDE_CFG_GPUSWR		0x2C8
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCRA	0x300
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCRAA	0x308
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCR		0x310
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCRAB	0x318
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCWA	0x320
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCWAA	0x328
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCW		0x330
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCWAB	0x338
+#define MC_STREAMID_OVERRIDE_CFG_VICSRD		0x360
+#define MC_STREAMID_OVERRIDE_CFG_VICSWR		0x368
+#define MC_STREAMID_OVERRIDE_CFG_VIW		0x390
+#define MC_STREAMID_OVERRIDE_CFG_NVDECSRD	0x3C0
+#define MC_STREAMID_OVERRIDE_CFG_NVDECSWR	0x3C8
+#define MC_STREAMID_OVERRIDE_CFG_APER		0x3D0
+#define MC_STREAMID_OVERRIDE_CFG_APEW		0x3D8
+#define MC_STREAMID_OVERRIDE_CFG_NVJPGSRD	0x3F0
+#define MC_STREAMID_OVERRIDE_CFG_NVJPGSWR	0x3F8
+#define MC_STREAMID_OVERRIDE_CFG_SESRD		0x400
+#define MC_STREAMID_OVERRIDE_CFG_SESWR		0x408
+#define MC_STREAMID_OVERRIDE_CFG_ETRR		0x420
+#define MC_STREAMID_OVERRIDE_CFG_ETRW		0x428
+#define MC_STREAMID_OVERRIDE_CFG_TSECSRDB	0x430
+#define MC_STREAMID_OVERRIDE_CFG_TSECSWRB	0x438
+#define MC_STREAMID_OVERRIDE_CFG_GPUSRD2	0x440
+#define MC_STREAMID_OVERRIDE_CFG_GPUSWR2	0x448
+#define MC_STREAMID_OVERRIDE_CFG_AXISR		0x460
+#define MC_STREAMID_OVERRIDE_CFG_AXISW		0x468
+#define MC_STREAMID_OVERRIDE_CFG_EQOSR		0x470
+#define MC_STREAMID_OVERRIDE_CFG_EQOSW		0x478
+#define MC_STREAMID_OVERRIDE_CFG_UFSHCR		0x480
+#define MC_STREAMID_OVERRIDE_CFG_UFSHCW		0x488
+#define MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR	0x490
+#define MC_STREAMID_OVERRIDE_CFG_BPMPR		0x498
+#define MC_STREAMID_OVERRIDE_CFG_BPMPW		0x4A0
+#define MC_STREAMID_OVERRIDE_CFG_BPMPDMAR	0x4A8
+#define MC_STREAMID_OVERRIDE_CFG_BPMPDMAW	0x4B0
+#define MC_STREAMID_OVERRIDE_CFG_AONR		0x4B8
+#define MC_STREAMID_OVERRIDE_CFG_AONW		0x4C0
+#define MC_STREAMID_OVERRIDE_CFG_AONDMAR	0x4C8
+#define MC_STREAMID_OVERRIDE_CFG_AONDMAW	0x4D0
+#define MC_STREAMID_OVERRIDE_CFG_SCER		0x4D8
+#define MC_STREAMID_OVERRIDE_CFG_SCEW		0x4E0
+#define MC_STREAMID_OVERRIDE_CFG_SCEDMAR	0x4E8
+#define MC_STREAMID_OVERRIDE_CFG_SCEDMAW	0x4F0
+#define MC_STREAMID_OVERRIDE_CFG_APEDMAR	0x4F8
+#define MC_STREAMID_OVERRIDE_CFG_APEDMAW	0x500
+#define MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR1	0x508
+#define MC_STREAMID_OVERRIDE_CFG_VICSRD1	0x510
+#define MC_STREAMID_OVERRIDE_CFG_NVDECSRD1	0x518
+
+/*******************************************************************************
+ * Macro to calculate Security cfg register addr from StreamID Override register
+ ******************************************************************************/
+#define MC_STREAMID_OVERRIDE_TO_SECURITY_CFG(addr) (addr + sizeof(uint32_t))
+
+/*******************************************************************************
+ * Memory Controller transaction override config registers
+ ******************************************************************************/
+#define MC_TXN_OVERRIDE_CONFIG_HDAR		0x10a8
+#define MC_TXN_OVERRIDE_CONFIG_BPMPW		0x14a0
+#define MC_TXN_OVERRIDE_CONFIG_PTCR		0x1000
+#define MC_TXN_OVERRIDE_CONFIG_NVDISPLAYR	0x1490
+#define MC_TXN_OVERRIDE_CONFIG_EQOSW		0x1478
+#define MC_TXN_OVERRIDE_CONFIG_NVJPGSWR		0x13f8
+#define MC_TXN_OVERRIDE_CONFIG_ISPRA		0x1220
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCWAA		0x1328
+#define MC_TXN_OVERRIDE_CONFIG_VICSRD		0x1360
+#define MC_TXN_OVERRIDE_CONFIG_MPCOREW		0x11c8
+#define MC_TXN_OVERRIDE_CONFIG_GPUSRD		0x12c0
+#define MC_TXN_OVERRIDE_CONFIG_AXISR		0x1460
+#define MC_TXN_OVERRIDE_CONFIG_SCEDMAW		0x14f0
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCW		0x1330
+#define MC_TXN_OVERRIDE_CONFIG_EQOSR		0x1470
+#define MC_TXN_OVERRIDE_CONFIG_APEDMAR		0x14f8
+#define MC_TXN_OVERRIDE_CONFIG_NVENCSRD		0x10e0
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCRAB		0x1318
+#define MC_TXN_OVERRIDE_CONFIG_VICSRD1		0x1510
+#define MC_TXN_OVERRIDE_CONFIG_BPMPDMAR		0x14a8
+#define MC_TXN_OVERRIDE_CONFIG_VIW		0x1390
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCRAA		0x1308
+#define MC_TXN_OVERRIDE_CONFIG_AXISW		0x1468
+#define MC_TXN_OVERRIDE_CONFIG_XUSB_DEVR	0x1260
+#define MC_TXN_OVERRIDE_CONFIG_UFSHCR		0x1480
+#define MC_TXN_OVERRIDE_CONFIG_TSECSWR		0x12a8
+#define MC_TXN_OVERRIDE_CONFIG_GPUSWR		0x12c8
+#define MC_TXN_OVERRIDE_CONFIG_SATAR		0x10f8
+#define MC_TXN_OVERRIDE_CONFIG_XUSB_HOSTW	0x1258
+#define MC_TXN_OVERRIDE_CONFIG_TSECSWRB		0x1438
+#define MC_TXN_OVERRIDE_CONFIG_GPUSRD2		0x1440
+#define MC_TXN_OVERRIDE_CONFIG_SCEDMAR		0x14e8
+#define MC_TXN_OVERRIDE_CONFIG_GPUSWR2		0x1448
+#define MC_TXN_OVERRIDE_CONFIG_AONDMAW		0x14d0
+#define MC_TXN_OVERRIDE_CONFIG_APEDMAW		0x1500
+#define MC_TXN_OVERRIDE_CONFIG_AONW		0x14c0
+#define MC_TXN_OVERRIDE_CONFIG_HOST1XDMAR	0x10b0
+#define MC_TXN_OVERRIDE_CONFIG_ETRR		0x1420
+#define MC_TXN_OVERRIDE_CONFIG_SESWR		0x1408
+#define MC_TXN_OVERRIDE_CONFIG_NVJPGSRD		0x13f0
+#define MC_TXN_OVERRIDE_CONFIG_NVDECSRD		0x13c0
+#define MC_TXN_OVERRIDE_CONFIG_TSECSRDB		0x1430
+#define MC_TXN_OVERRIDE_CONFIG_BPMPDMAW		0x14b0
+#define MC_TXN_OVERRIDE_CONFIG_APER		0x13d0
+#define MC_TXN_OVERRIDE_CONFIG_NVDECSRD1	0x1518
+#define MC_TXN_OVERRIDE_CONFIG_XUSB_HOSTR	0x1250
+#define MC_TXN_OVERRIDE_CONFIG_ISPWA		0x1230
+#define MC_TXN_OVERRIDE_CONFIG_SESRD		0x1400
+#define MC_TXN_OVERRIDE_CONFIG_SCER		0x14d8
+#define MC_TXN_OVERRIDE_CONFIG_AONR		0x14b8
+#define MC_TXN_OVERRIDE_CONFIG_MPCORER		0x1138
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCWA		0x1320
+#define MC_TXN_OVERRIDE_CONFIG_HDAW		0x11a8
+#define MC_TXN_OVERRIDE_CONFIG_NVDECSWR		0x13c8
+#define MC_TXN_OVERRIDE_CONFIG_UFSHCW		0x1488
+#define MC_TXN_OVERRIDE_CONFIG_AONDMAR		0x14c8
+#define MC_TXN_OVERRIDE_CONFIG_SATAW		0x11e8
+#define MC_TXN_OVERRIDE_CONFIG_ETRW		0x1428
+#define MC_TXN_OVERRIDE_CONFIG_VICSWR		0x1368
+#define MC_TXN_OVERRIDE_CONFIG_NVENCSWR		0x1158
+#define MC_TXN_OVERRIDE_CONFIG_AFIR		0x1070
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCWAB		0x1338
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCRA		0x1300
+#define MC_TXN_OVERRIDE_CONFIG_NVDISPLAYR1	0x1508
+#define MC_TXN_OVERRIDE_CONFIG_ISPWB		0x1238
+#define MC_TXN_OVERRIDE_CONFIG_BPMPR		0x1498
+#define MC_TXN_OVERRIDE_CONFIG_APEW		0x13d8
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCR		0x1310
+#define MC_TXN_OVERRIDE_CONFIG_XUSB_DEVW	0x1268
+#define MC_TXN_OVERRIDE_CONFIG_TSECSRD		0x12a0
+#define MC_TXN_OVERRIDE_CONFIG_AFIW		0x1188
+#define MC_TXN_OVERRIDE_CONFIG_SCEW		0x14e0
+
+/*******************************************************************************
+ * Structure to hold the transaction override settings to use to override
+ * client inputs
+ ******************************************************************************/
+typedef struct mc_txn_override_cfg {
+	uint32_t offset;
+	uint8_t cgid_tag;
+} mc_txn_override_cfg_t;
+
+#define mc_make_txn_override_cfg(off, val) \
+	{ \
+		.offset = MC_TXN_OVERRIDE_CONFIG_ ## off, \
+		.cgid_tag = MC_TXN_OVERRIDE_ ## val \
+	}
+
+/*******************************************************************************
+ * Structure to hold the Stream ID to use to override client inputs
+ ******************************************************************************/
+typedef struct mc_streamid_override_cfg {
+	uint32_t offset;
+	uint8_t stream_id;
+} mc_streamid_override_cfg_t;
+
+/*******************************************************************************
+ * Structure to hold the Stream ID Security Configuration settings
+ ******************************************************************************/
+typedef struct mc_streamid_security_cfg {
+	char *name;
+	uint32_t offset;
+	int override_enable;
+	int override_client_inputs;
+	int override_client_ns_flag;
+} mc_streamid_security_cfg_t;
+
+#define OVERRIDE_DISABLE				1
+#define OVERRIDE_ENABLE					0
+#define CLIENT_FLAG_SECURE				0
+#define CLIENT_FLAG_NON_SECURE				1
+#define CLIENT_INPUTS_OVERRIDE				1
+#define CLIENT_INPUTS_NO_OVERRIDE			0
+
+#define mc_make_sec_cfg(off, ns, ovrrd, access) \
+	{ \
+		.name = # off, \
+		.offset = MC_STREAMID_OVERRIDE_TO_SECURITY_CFG( \
+				MC_STREAMID_OVERRIDE_CFG_ ## off), \
+		.override_client_ns_flag = CLIENT_FLAG_ ## ns, \
+		.override_client_inputs = CLIENT_INPUTS_ ## ovrrd, \
+		.override_enable = OVERRIDE_ ## access \
+	}
+
+/*******************************************************************************
+ * Structure to hold Memory Controller's Configuration settings
+ ******************************************************************************/
+typedef struct tegra_mc_settings {
+	const uint32_t *streamid_override_cfg;
+	uint32_t num_streamid_override_cfgs;
+	const mc_streamid_security_cfg_t *streamid_security_cfg;
+	uint32_t num_streamid_security_cfgs;
+	const mc_txn_override_cfg_t *txn_override_cfg;
+	uint32_t num_txn_override_cfgs;
+} tegra_mc_settings_t;
+
+#endif /* __ASSEMBLY__ */
+
+/*******************************************************************************
+ * Memory Controller SMMU Bypass config register
+ ******************************************************************************/
+#define MC_SMMU_BYPASS_CONFIG			0x1820
+#define MC_SMMU_BYPASS_CTRL_MASK		0x3
+#define MC_SMMU_BYPASS_CTRL_SHIFT		0
+#define MC_SMMU_CTRL_TBU_BYPASS_ALL		(0 << MC_SMMU_BYPASS_CTRL_SHIFT)
+#define MC_SMMU_CTRL_TBU_RSVD			(1 << MC_SMMU_BYPASS_CTRL_SHIFT)
+#define MC_SMMU_CTRL_TBU_BYPASS_SPL_STREAMID	(2 << MC_SMMU_BYPASS_CTRL_SHIFT)
+#define MC_SMMU_CTRL_TBU_BYPASS_NONE		(3 << MC_SMMU_BYPASS_CTRL_SHIFT)
+#define MC_SMMU_BYPASS_CONFIG_WRITE_ACCESS_BIT	(1 << 31)
+#define MC_SMMU_BYPASS_CONFIG_SETTINGS		(MC_SMMU_BYPASS_CONFIG_WRITE_ACCESS_BIT | \
+						 MC_SMMU_CTRL_TBU_BYPASS_SPL_STREAMID)
+
+#define MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_CGID	(1 << 0)
+#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV	(2 << 4)
+#define MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT	(1 << 12)
+
+/*******************************************************************************
+ * Non-SO_DEV transactions override values for CGID_TAG bitfield for the
+ * MC_TXN_OVERRIDE_CONFIG_{module} registers
+ ******************************************************************************/
+#define MC_TXN_OVERRIDE_CGID_TAG_DEFAULT	0
+#define MC_TXN_OVERRIDE_CGID_TAG_CLIENT_AXI_ID	1
+#define MC_TXN_OVERRIDE_CGID_TAG_ZERO		2
+#define MC_TXN_OVERRIDE_CGID_TAG_ADR		3
+#define MC_TXN_OVERRIDE_CGID_TAG_MASK		3
+
+/*******************************************************************************
+ * Memory Controller Reset Control registers
+ ******************************************************************************/
+#define MC_CLIENT_HOTRESET_CTRL0			0x200
+#define  MC_CLIENT_HOTRESET_CTRL0_RESET_VAL		0
+#define  MC_CLIENT_HOTRESET_CTRL0_AFI_FLUSH_ENB		(1 << 0)
+#define  MC_CLIENT_HOTRESET_CTRL0_HC_FLUSH_ENB		(1 << 6)
+#define  MC_CLIENT_HOTRESET_CTRL0_HDA_FLUSH_ENB		(1 << 7)
+#define  MC_CLIENT_HOTRESET_CTRL0_ISP2_FLUSH_ENB	(1 << 8)
+#define  MC_CLIENT_HOTRESET_CTRL0_MPCORE_FLUSH_ENB	(1 << 9)
+#define  MC_CLIENT_HOTRESET_CTRL0_NVENC_FLUSH_ENB	(1 << 11)
+#define  MC_CLIENT_HOTRESET_CTRL0_SATA_FLUSH_ENB	(1 << 15)
+#define  MC_CLIENT_HOTRESET_CTRL0_VI_FLUSH_ENB		(1 << 17)
+#define  MC_CLIENT_HOTRESET_CTRL0_VIC_FLUSH_ENB		(1 << 18)
+#define  MC_CLIENT_HOTRESET_CTRL0_XUSB_HOST_FLUSH_ENB	(1 << 19)
+#define  MC_CLIENT_HOTRESET_CTRL0_XUSB_DEV_FLUSH_ENB	(1 << 20)
+#define  MC_CLIENT_HOTRESET_CTRL0_TSEC_FLUSH_ENB	(1 << 22)
+#define  MC_CLIENT_HOTRESET_CTRL0_SDMMC1A_FLUSH_ENB	(1 << 29)
+#define  MC_CLIENT_HOTRESET_CTRL0_SDMMC2A_FLUSH_ENB	(1 << 30)
+#define  MC_CLIENT_HOTRESET_CTRL0_SDMMC3A_FLUSH_ENB	(1 << 31)
+#define MC_CLIENT_HOTRESET_STATUS0			0x204
+#define MC_CLIENT_HOTRESET_CTRL1			0x970
+#define  MC_CLIENT_HOTRESET_CTRL1_RESET_VAL		0
+#define  MC_CLIENT_HOTRESET_CTRL1_SDMMC4A_FLUSH_ENB	(1 << 0)
+#define  MC_CLIENT_HOTRESET_CTRL1_GPU_FLUSH_ENB		(1 << 2)
+#define  MC_CLIENT_HOTRESET_CTRL1_NVDEC_FLUSH_ENB	(1 << 5)
+#define  MC_CLIENT_HOTRESET_CTRL1_APE_FLUSH_ENB		(1 << 6)
+#define  MC_CLIENT_HOTRESET_CTRL1_SE_FLUSH_ENB		(1 << 7)
+#define  MC_CLIENT_HOTRESET_CTRL1_NVJPG_FLUSH_ENB	(1 << 8)
+#define  MC_CLIENT_HOTRESET_CTRL1_ETR_FLUSH_ENB		(1 << 12)
+#define  MC_CLIENT_HOTRESET_CTRL1_TSECB_FLUSH_ENB	(1 << 13)
+#define  MC_CLIENT_HOTRESET_CTRL1_AXIS_FLUSH_ENB	(1 << 18)
+#define  MC_CLIENT_HOTRESET_CTRL1_EQOS_FLUSH_ENB	(1 << 19)
+#define  MC_CLIENT_HOTRESET_CTRL1_UFSHC_FLUSH_ENB	(1 << 20)
+#define  MC_CLIENT_HOTRESET_CTRL1_NVDISPLAY_FLUSH_ENB	(1 << 21)
+#define  MC_CLIENT_HOTRESET_CTRL1_BPMP_FLUSH_ENB	(1 << 22)
+#define  MC_CLIENT_HOTRESET_CTRL1_AON_FLUSH_ENB		(1 << 23)
+#define  MC_CLIENT_HOTRESET_CTRL1_SCE_FLUSH_ENB		(1 << 24)
+#define MC_CLIENT_HOTRESET_STATUS1			0x974
+
+/*******************************************************************************
+ * Memory Controller's PCFIFO client configuration registers
+ ******************************************************************************/
+#define MC_PCFIFO_CLIENT_CONFIG1			0xdd4
+#define  MC_PCFIFO_CLIENT_CONFIG1_RESET_VAL		0x20000
+#define  MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_AFIW_UNORDERED	(0 << 17)
+#define  MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_AFIW_MASK	(1 << 17)
+#define  MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_HDAW_UNORDERED	(0 << 21)
+#define  MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_HDAW_MASK	(1 << 21)
+#define  MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_SATAW_UNORDERED (0 << 29)
+#define  MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_SATAW_MASK	(1 << 29)
+
+#define MC_PCFIFO_CLIENT_CONFIG2			0xdd8
+#define  MC_PCFIFO_CLIENT_CONFIG2_RESET_VAL		0x20000
+#define  MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_HOSTW_UNORDERED	(0 << 11)
+#define  MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_HOSTW_MASK	(1 << 11)
+#define  MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_DEVW_UNORDERED	(0 << 13)
+#define  MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_DEVW_MASK	(1 << 13)
+
+#define MC_PCFIFO_CLIENT_CONFIG3			0xddc
+#define  MC_PCFIFO_CLIENT_CONFIG3_RESET_VAL		0
+#define  MC_PCFIFO_CLIENT_CONFIG3_PCFIFO_SDMMCWAB_UNORDERED	(0 << 7)
+#define  MC_PCFIFO_CLIENT_CONFIG3_PCFIFO_SDMMCWAB_MASK	(1 << 7)
+
+#define MC_PCFIFO_CLIENT_CONFIG4		0xde0
+#define  MC_PCFIFO_CLIENT_CONFIG4_RESET_VAL	0
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SESWR_UNORDERED (0 << 1)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SESWR_MASK	(1 << 1)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_ETRW_UNORDERED	(0 << 5)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_ETRW_MASK	(1 << 5)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AXISW_UNORDERED (0 << 13)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AXISW_MASK	(1 << 13)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_UNORDERED (0 << 15)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_MASK	(1 << 15)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_UFSHCW_UNORDERED	(0 << 17)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_UFSHCW_MASK	(1 << 17)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_BPMPDMAW_UNORDERED	(0 << 22)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_BPMPDMAW_MASK	(1 << 22)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AONDMAW_UNORDERED	(0 << 26)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AONDMAW_MASK	(1 << 26)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SCEDMAW_UNORDERED	(0 << 30)
+#define  MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SCEDMAW_MASK	(1 << 30)
+
+#define MC_PCFIFO_CLIENT_CONFIG5		0xbf4
+#define  MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL	0
+#define  MC_PCFIFO_CLIENT_CONFIG5_PCFIFO_APEDMAW_UNORDERED	(0 << 0)
+#define  MC_PCFIFO_CLIENT_CONFIG5_PCFIFO_APEDMAW_MASK	(1 << 0)
+
+/*******************************************************************************
+ * Memory Controller's SMMU client configuration registers
+ ******************************************************************************/
+#define MC_SMMU_CLIENT_CONFIG1				0x44
+#define  MC_SMMU_CLIENT_CONFIG1_RESET_VAL		0x20000
+#define  MC_SMMU_CLIENT_CONFIG1_AFIW_UNORDERED		(0 << 17)
+#define  MC_SMMU_CLIENT_CONFIG1_AFIW_MASK		(1 << 17)
+#define  MC_SMMU_CLIENT_CONFIG1_HDAW_UNORDERED		(0 << 21)
+#define  MC_SMMU_CLIENT_CONFIG1_HDAW_MASK		(1 << 21)
+#define  MC_SMMU_CLIENT_CONFIG1_SATAW_UNORDERED		(0 << 29)
+#define  MC_SMMU_CLIENT_CONFIG1_SATAW_MASK		(1 << 29)
+
+#define MC_SMMU_CLIENT_CONFIG2				0x48
+#define  MC_SMMU_CLIENT_CONFIG2_RESET_VAL		0x20000
+#define  MC_SMMU_CLIENT_CONFIG2_XUSB_HOSTW_UNORDERED	(0 << 11)
+#define  MC_SMMU_CLIENT_CONFIG2_XUSB_HOSTW_MASK		(1 << 11)
+#define  MC_SMMU_CLIENT_CONFIG2_XUSB_DEVW_UNORDERED	(0 << 13)
+#define  MC_SMMU_CLIENT_CONFIG2_XUSB_DEVW_MASK		(1 << 13)
+
+#define MC_SMMU_CLIENT_CONFIG3				0x4c
+#define  MC_SMMU_CLIENT_CONFIG3_RESET_VAL		0
+#define  MC_SMMU_CLIENT_CONFIG3_SDMMCWAB_UNORDERED	(0 << 7)
+#define  MC_SMMU_CLIENT_CONFIG3_SDMMCWAB_MASK		(1 << 7)
+
+#define MC_SMMU_CLIENT_CONFIG4				0xb9c
+#define  MC_SMMU_CLIENT_CONFIG4_RESET_VAL		0
+#define  MC_SMMU_CLIENT_CONFIG4_SESWR_UNORDERED		(0 << 1)
+#define  MC_SMMU_CLIENT_CONFIG4_SESWR_MASK		(1 << 1)
+#define  MC_SMMU_CLIENT_CONFIG4_ETRW_UNORDERED		(0 << 5)
+#define  MC_SMMU_CLIENT_CONFIG4_ETRW_MASK		(1 << 5)
+#define  MC_SMMU_CLIENT_CONFIG4_AXISW_UNORDERED		(0 << 13)
+#define  MC_SMMU_CLIENT_CONFIG4_AXISW_MASK		(1 << 13)
+#define  MC_SMMU_CLIENT_CONFIG4_EQOSW_UNORDERED		(0 << 15)
+#define  MC_SMMU_CLIENT_CONFIG4_EQOSW_MASK		(1 << 15)
+#define  MC_SMMU_CLIENT_CONFIG4_UFSHCW_UNORDERED	(0 << 17)
+#define  MC_SMMU_CLIENT_CONFIG4_UFSHCW_MASK		(1 << 17)
+#define  MC_SMMU_CLIENT_CONFIG4_BPMPDMAW_UNORDERED	(0 << 22)
+#define  MC_SMMU_CLIENT_CONFIG4_BPMPDMAW_MASK		(1 << 22)
+#define  MC_SMMU_CLIENT_CONFIG4_AONDMAW_UNORDERED	(0 << 26)
+#define  MC_SMMU_CLIENT_CONFIG4_AONDMAW_MASK		(1 << 26)
+#define  MC_SMMU_CLIENT_CONFIG4_SCEDMAW_UNORDERED	(0 << 30)
+#define  MC_SMMU_CLIENT_CONFIG4_SCEDMAW_MASK		(1 << 30)
+
+#define MC_SMMU_CLIENT_CONFIG5				0xbac
+#define  MC_SMMU_CLIENT_CONFIG5_RESET_VAL		0
+#define  MC_SMMU_CLIENT_CONFIG5_APEDMAW_UNORDERED	(0 << 0)
+#define  MC_SMMU_CLIENT_CONFIG5_APEDMAW_MASK	(1 << 0)
+
+#ifndef __ASSEMBLY__
+
+#include <mmio.h>
+
+static inline uint32_t tegra_mc_read_32(uint32_t off)
+{
+	return mmio_read_32(TEGRA_MC_BASE + off);
+}
+
+static inline void tegra_mc_write_32(uint32_t off, uint32_t val)
+{
+	mmio_write_32(TEGRA_MC_BASE + off, val);
+}
+
+static inline uint32_t tegra_mc_streamid_read_32(uint32_t off)
+{
+	return mmio_read_32(TEGRA_MC_STREAMID_BASE + off);
+}
+
+static inline void tegra_mc_streamid_write_32(uint32_t off, uint32_t val)
+{
+	mmio_write_32(TEGRA_MC_STREAMID_BASE + off, val);
+}
+
+#define mc_set_pcfifo_unordered_boot_so_mss(id, client) \
+	(~MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_MASK | \
+	 MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_UNORDERED)
+
+#define mc_set_smmu_unordered_boot_so_mss(id, client) \
+	(~MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_MASK | \
+	 MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_UNORDERED)
+
+#define mc_set_tsa_passthrough(client) \
+	{ \
+		mmio_write_32(TEGRA_TSA_BASE + TSA_CONFIG_STATIC0_CSW_##client, \
+			(TSA_CONFIG_STATIC0_CSW_##client##_RESET & \
+			 ~TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_MASK) | \
+			TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU); \
+	}
+
+#define mc_set_forced_coherent_cfg(client) \
+	{ \
+		tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_##client, \
+			MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV); \
+	}
+
+#define mc_set_forced_coherent_so_dev_cfg(client) \
+	{ \
+		tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_##client, \
+			MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV | \
+			MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT); \
+	}
+
+#define mc_set_forced_coherent_axid_so_dev_cfg(client) \
+	{ \
+		tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_##client, \
+			MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV | \
+			MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_CGID | \
+			MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT); \
+	}
+
+/*******************************************************************************
+ * Handler to read memory configuration settings
+ *
+ * Implemented by SoCs under tegra/soc/txxx
+ ******************************************************************************/
+tegra_mc_settings_t *tegra_get_mc_settings(void);
+
+#endif /* __ASSMEBLY__ */
+
+#endif /* __MEMCTRLV2_H__ */
diff --git a/plat/nvidia/tegra/include/drivers/pmc.h b/plat/nvidia/tegra/include/drivers/pmc.h
new file mode 100644
index 0000000..ea9392b
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/pmc.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PMC_H__
+#define __PMC_H__
+
+#include <mmio.h>
+#include <tegra_def.h>
+#include <utils_def.h>
+
+#define PMC_CONFIG				U(0x0)
+#define PMC_PWRGATE_STATUS			U(0x38)
+#define PMC_PWRGATE_TOGGLE			U(0x30)
+#define  PMC_TOGGLE_START			U(0x100)
+#define PMC_SCRATCH39				U(0x138)
+#define PMC_SECURE_DISABLE2			U(0x2c4)
+#define  PMC_SECURE_DISABLE2_WRITE22_ON		(U(1) << 28)
+#define PMC_SECURE_SCRATCH22			U(0x338)
+#define PMC_SECURE_DISABLE3			U(0x2d8)
+#define  PMC_SECURE_DISABLE3_WRITE34_ON		(U(1) << 20)
+#define  PMC_SECURE_DISABLE3_WRITE35_ON		(U(1) << 22)
+#define PMC_SECURE_SCRATCH34			U(0x368)
+#define PMC_SECURE_SCRATCH35			U(0x36c)
+
+static inline uint32_t tegra_pmc_read_32(uint32_t off)
+{
+	return mmio_read_32(TEGRA_PMC_BASE + off);
+}
+
+static inline void tegra_pmc_write_32(uint32_t off, uint32_t val)
+{
+	mmio_write_32(TEGRA_PMC_BASE + off, val);
+}
+
+void tegra_pmc_cpu_setup(uint64_t reset_addr);
+void tegra_pmc_lock_cpu_vectors(void);
+void tegra_pmc_cpu_on(int32_t cpu);
+__dead2 void tegra_pmc_system_reset(void);
+
+#endif /* __PMC_H__ */
diff --git a/plat/nvidia/tegra/include/drivers/smmu.h b/plat/nvidia/tegra/include/drivers/smmu.h
new file mode 100644
index 0000000..86e911a
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/smmu.h
@@ -0,0 +1,708 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SMMU_H
+#define __SMMU_H
+
+#include <memctrl_v2.h>
+#include <mmio.h>
+#include <tegra_def.h>
+
+/*******************************************************************************
+ * SMMU Register constants
+ ******************************************************************************/
+#define SMMU_CBn_SCTLR				(0x0U)
+#define SMMU_CBn_SCTLR_STAGE2			(0x0U)
+#define SMMU_CBn_ACTLR				(0x4U)
+#define SMMU_CBn_RESUME				(0x8U)
+#define SMMU_CBn_TCR2				(0x10U)
+#define SMMU_CBn_TTBR0_LO			(0x20U)
+#define SMMU_CBn_TTBR0_HI			(0x24U)
+#define SMMU_CBn_TTBR1_LO			(0x28U)
+#define SMMU_CBn_TTBR1_HI			(0x2cU)
+#define SMMU_CBn_TCR_LPAE			(0x30U)
+#define SMMU_CBn_TCR				(0x30U)
+#define SMMU_CBn_TCR_EAE_1			(0x30U)
+#define SMMU_CBn_TCR				(0x30U)
+#define SMMU_CBn_CONTEXTIDR			(0x34U)
+#define SMMU_CBn_CONTEXTIDR_EAE_1		(0x34U)
+#define SMMU_CBn_PRRR_MAIR0			(0x38U)
+#define SMMU_CBn_NMRR_MAIR1			(0x3cU)
+#define SMMU_CBn_SMMU_CBn_PAR			(0x50U)
+#define SMMU_CBn_SMMU_CBn_PAR0			(0x50U)
+#define SMMU_CBn_SMMU_CBn_PAR1			(0x54U)
+/*      SMMU_CBn_SMMU_CBn_PAR0_Fault		(0x50U) */
+/*      SMMU_CBn_SMMU_CBn_PAR0_Fault		(0x54U) */
+#define SMMU_CBn_FSR				(0x58U)
+#define SMMU_CBn_FSRRESTORE			(0x5cU)
+#define SMMU_CBn_FAR_LO				(0x60U)
+#define SMMU_CBn_FAR_HI				(0x64U)
+#define SMMU_CBn_FSYNR0				(0x68U)
+#define SMMU_CBn_IPAFAR_LO			(0x70U)
+#define SMMU_CBn_IPAFAR_HI			(0x74U)
+#define SMMU_CBn_TLBIVA_LO			(0x600U)
+#define SMMU_CBn_TLBIVA_HI			(0x604U)
+#define SMMU_CBn_TLBIVA_AARCH_32		(0x600U)
+#define SMMU_CBn_TLBIVAA_LO			(0x608U)
+#define SMMU_CBn_TLBIVAA_HI			(0x60cU)
+#define SMMU_CBn_TLBIVAA_AARCH_32		(0x608U)
+#define SMMU_CBn_TLBIASID			(0x610U)
+#define SMMU_CBn_TLBIALL			(0x618U)
+#define SMMU_CBn_TLBIVAL_LO			(0x620U)
+#define SMMU_CBn_TLBIVAL_HI			(0x624U)
+#define SMMU_CBn_TLBIVAL_AARCH_32		(0x618U)
+#define SMMU_CBn_TLBIVAAL_LO			(0x628U)
+#define SMMU_CBn_TLBIVAAL_HI			(0x62cU)
+#define SMMU_CBn_TLBIVAAL_AARCH_32		(0x628U)
+#define SMMU_CBn_TLBIIPAS2_LO			(0x630U)
+#define SMMU_CBn_TLBIIPAS2_HI			(0x634U)
+#define SMMU_CBn_TLBIIPAS2L_LO			(0x638U)
+#define SMMU_CBn_TLBIIPAS2L_HI			(0x63cU)
+#define SMMU_CBn_TLBSYNC			(0x7f0U)
+#define SMMU_CBn_TLBSTATUS			(0x7f4U)
+#define SMMU_CBn_ATSR				(0x800U)
+#define SMMU_CBn_PMEVCNTR0			(0xe00U)
+#define SMMU_CBn_PMEVCNTR1			(0xe04U)
+#define SMMU_CBn_PMEVCNTR2			(0xe08U)
+#define SMMU_CBn_PMEVCNTR3			(0xe0cU)
+#define SMMU_CBn_PMEVTYPER0			(0xe80U)
+#define SMMU_CBn_PMEVTYPER1			(0xe84U)
+#define SMMU_CBn_PMEVTYPER2			(0xe88U)
+#define SMMU_CBn_PMEVTYPER3			(0xe8cU)
+#define SMMU_CBn_PMCFGR				(0xf00U)
+#define SMMU_CBn_PMCR				(0xf04U)
+#define SMMU_CBn_PMCEID				(0xf20U)
+#define SMMU_CBn_PMCNTENSE			(0xf40U)
+#define SMMU_CBn_PMCNTENCLR			(0xf44U)
+#define SMMU_CBn_PMCNTENSET			(0xf48U)
+#define SMMU_CBn_PMINTENCLR			(0xf4cU)
+#define SMMU_CBn_PMOVSCLR			(0xf50U)
+#define SMMU_CBn_PMOVSSET			(0xf58U)
+#define SMMU_CBn_PMAUTHSTATUS			(0xfb8U)
+#define SMMU_GNSR0_CR0				(0x0U)
+#define SMMU_GNSR0_CR2				(0x8U)
+#define SMMU_GNSR0_ACR				(0x10U)
+#define SMMU_GNSR0_IDR0				(0x20U)
+#define SMMU_GNSR0_IDR1				(0x24U)
+#define SMMU_GNSR0_IDR2				(0x28U)
+#define SMMU_GNSR0_IDR7				(0x3cU)
+#define SMMU_GNSR0_GFAR_LO			(0x40U)
+#define SMMU_GNSR0_GFAR_HI			(0x44U)
+#define SMMU_GNSR0_GFSR				(0x48U)
+#define SMMU_GNSR0_GFSRRESTORE			(0x4cU)
+#define SMMU_GNSR0_GFSYNR0			(0x50U)
+#define SMMU_GNSR0_GFSYNR1			(0x54U)
+#define SMMU_GNSR0_GFSYNR1_v2			(0x54U)
+#define SMMU_GNSR0_TLBIVMID			(0x64U)
+#define SMMU_GNSR0_TLBIALLNSNH			(0x68U)
+#define SMMU_GNSR0_TLBIALLH			(0x6cU)
+#define SMMU_GNSR0_TLBGSYNC			(0x70U)
+#define SMMU_GNSR0_TLBGSTATUS			(0x74U)
+#define SMMU_GNSR0_TLBIVAH_LO			(0x78U)
+#define SMMU_GNSR0_TLBIVALH64_LO		(0xb0U)
+#define SMMU_GNSR0_TLBIVALH64_HI		(0xb4U)
+#define SMMU_GNSR0_TLBIVMIDS1			(0xb8U)
+#define SMMU_GNSR0_TLBIVAH64_LO			(0xc0U)
+#define SMMU_GNSR0_TLBIVAH64_HI			(0xc4U)
+#define SMMU_GNSR0_SMR0				(0x800U)
+#define SMMU_GNSR0_SMRn				(0x800U)
+#define SMMU_GNSR0_SMR1				(0x804U)
+#define SMMU_GNSR0_SMR2				(0x808U)
+#define SMMU_GNSR0_SMR3				(0x80cU)
+#define SMMU_GNSR0_SMR4				(0x810U)
+#define SMMU_GNSR0_SMR5				(0x814U)
+#define SMMU_GNSR0_SMR6				(0x818U)
+#define SMMU_GNSR0_SMR7				(0x81cU)
+#define SMMU_GNSR0_SMR8				(0x820U)
+#define SMMU_GNSR0_SMR9				(0x824U)
+#define SMMU_GNSR0_SMR10			(0x828U)
+#define SMMU_GNSR0_SMR11			(0x82cU)
+#define SMMU_GNSR0_SMR12			(0x830U)
+#define SMMU_GNSR0_SMR13			(0x834U)
+#define SMMU_GNSR0_SMR14			(0x838U)
+#define SMMU_GNSR0_SMR15			(0x83cU)
+#define SMMU_GNSR0_SMR16			(0x840U)
+#define SMMU_GNSR0_SMR17			(0x844U)
+#define SMMU_GNSR0_SMR18			(0x848U)
+#define SMMU_GNSR0_SMR19			(0x84cU)
+#define SMMU_GNSR0_SMR20			(0x850U)
+#define SMMU_GNSR0_SMR21			(0x854U)
+#define SMMU_GNSR0_SMR22			(0x858U)
+#define SMMU_GNSR0_SMR23			(0x85cU)
+#define SMMU_GNSR0_SMR24			(0x860U)
+#define SMMU_GNSR0_SMR25			(0x864U)
+#define SMMU_GNSR0_SMR26			(0x868U)
+#define SMMU_GNSR0_SMR27			(0x86cU)
+#define SMMU_GNSR0_SMR28			(0x870U)
+#define SMMU_GNSR0_SMR29			(0x874U)
+#define SMMU_GNSR0_SMR30			(0x878U)
+#define SMMU_GNSR0_SMR31			(0x87cU)
+#define SMMU_GNSR0_SMR32			(0x880U)
+#define SMMU_GNSR0_SMR33			(0x884U)
+#define SMMU_GNSR0_SMR34			(0x888U)
+#define SMMU_GNSR0_SMR35			(0x88cU)
+#define SMMU_GNSR0_SMR36			(0x890U)
+#define SMMU_GNSR0_SMR37			(0x894U)
+#define SMMU_GNSR0_SMR38			(0x898U)
+#define SMMU_GNSR0_SMR39			(0x89cU)
+#define SMMU_GNSR0_SMR40			(0x8a0U)
+#define SMMU_GNSR0_SMR41			(0x8a4U)
+#define SMMU_GNSR0_SMR42			(0x8a8U)
+#define SMMU_GNSR0_SMR43			(0x8acU)
+#define SMMU_GNSR0_SMR44			(0x8b0U)
+#define SMMU_GNSR0_SMR45			(0x8b4U)
+#define SMMU_GNSR0_SMR46			(0x8b8U)
+#define SMMU_GNSR0_SMR47			(0x8bcU)
+#define SMMU_GNSR0_SMR48			(0x8c0U)
+#define SMMU_GNSR0_SMR49			(0x8c4U)
+#define SMMU_GNSR0_SMR50			(0x8c8U)
+#define SMMU_GNSR0_SMR51			(0x8ccU)
+#define SMMU_GNSR0_SMR52			(0x8d0U)
+#define SMMU_GNSR0_SMR53			(0x8d4U)
+#define SMMU_GNSR0_SMR54			(0x8d8U)
+#define SMMU_GNSR0_SMR55			(0x8dcU)
+#define SMMU_GNSR0_SMR56			(0x8e0U)
+#define SMMU_GNSR0_SMR57			(0x8e4U)
+#define SMMU_GNSR0_SMR58			(0x8e8U)
+#define SMMU_GNSR0_SMR59			(0x8ecU)
+#define SMMU_GNSR0_SMR60			(0x8f0U)
+#define SMMU_GNSR0_SMR61			(0x8f4U)
+#define SMMU_GNSR0_SMR62			(0x8f8U)
+#define SMMU_GNSR0_SMR63			(0x8fcU)
+#define SMMU_GNSR0_SMR64			(0x900U)
+#define SMMU_GNSR0_SMR65			(0x904U)
+#define SMMU_GNSR0_SMR66			(0x908U)
+#define SMMU_GNSR0_SMR67			(0x90cU)
+#define SMMU_GNSR0_SMR68			(0x910U)
+#define SMMU_GNSR0_SMR69			(0x914U)
+#define SMMU_GNSR0_SMR70			(0x918U)
+#define SMMU_GNSR0_SMR71			(0x91cU)
+#define SMMU_GNSR0_SMR72			(0x920U)
+#define SMMU_GNSR0_SMR73			(0x924U)
+#define SMMU_GNSR0_SMR74			(0x928U)
+#define SMMU_GNSR0_SMR75			(0x92cU)
+#define SMMU_GNSR0_SMR76			(0x930U)
+#define SMMU_GNSR0_SMR77			(0x934U)
+#define SMMU_GNSR0_SMR78			(0x938U)
+#define SMMU_GNSR0_SMR79			(0x93cU)
+#define SMMU_GNSR0_SMR80			(0x940U)
+#define SMMU_GNSR0_SMR81			(0x944U)
+#define SMMU_GNSR0_SMR82			(0x948U)
+#define SMMU_GNSR0_SMR83			(0x94cU)
+#define SMMU_GNSR0_SMR84			(0x950U)
+#define SMMU_GNSR0_SMR85			(0x954U)
+#define SMMU_GNSR0_SMR86			(0x958U)
+#define SMMU_GNSR0_SMR87			(0x95cU)
+#define SMMU_GNSR0_SMR88			(0x960U)
+#define SMMU_GNSR0_SMR89			(0x964U)
+#define SMMU_GNSR0_SMR90			(0x968U)
+#define SMMU_GNSR0_SMR91			(0x96cU)
+#define SMMU_GNSR0_SMR92			(0x970U)
+#define SMMU_GNSR0_SMR93			(0x974U)
+#define SMMU_GNSR0_SMR94			(0x978U)
+#define SMMU_GNSR0_SMR95			(0x97cU)
+#define SMMU_GNSR0_SMR96			(0x980U)
+#define SMMU_GNSR0_SMR97			(0x984U)
+#define SMMU_GNSR0_SMR98			(0x988U)
+#define SMMU_GNSR0_SMR99			(0x98cU)
+#define SMMU_GNSR0_SMR100			(0x990U)
+#define SMMU_GNSR0_SMR101			(0x994U)
+#define SMMU_GNSR0_SMR102			(0x998U)
+#define SMMU_GNSR0_SMR103			(0x99cU)
+#define SMMU_GNSR0_SMR104			(0x9a0U)
+#define SMMU_GNSR0_SMR105			(0x9a4U)
+#define SMMU_GNSR0_SMR106			(0x9a8U)
+#define SMMU_GNSR0_SMR107			(0x9acU)
+#define SMMU_GNSR0_SMR108			(0x9b0U)
+#define SMMU_GNSR0_SMR109			(0x9b4U)
+#define SMMU_GNSR0_SMR110			(0x9b8U)
+#define SMMU_GNSR0_SMR111			(0x9bcU)
+#define SMMU_GNSR0_SMR112			(0x9c0U)
+#define SMMU_GNSR0_SMR113			(0x9c4U)
+#define SMMU_GNSR0_SMR114			(0x9c8U)
+#define SMMU_GNSR0_SMR115			(0x9ccU)
+#define SMMU_GNSR0_SMR116			(0x9d0U)
+#define SMMU_GNSR0_SMR117			(0x9d4U)
+#define SMMU_GNSR0_SMR118			(0x9d8U)
+#define SMMU_GNSR0_SMR119			(0x9dcU)
+#define SMMU_GNSR0_SMR120			(0x9e0U)
+#define SMMU_GNSR0_SMR121			(0x9e4U)
+#define SMMU_GNSR0_SMR122			(0x9e8U)
+#define SMMU_GNSR0_SMR123			(0x9ecU)
+#define SMMU_GNSR0_SMR124			(0x9f0U)
+#define SMMU_GNSR0_SMR125			(0x9f4U)
+#define SMMU_GNSR0_SMR126			(0x9f8U)
+#define SMMU_GNSR0_SMR127			(0x9fcU)
+#define SMMU_GNSR0_S2CR0			(0xc00U)
+#define SMMU_GNSR0_S2CRn			(0xc00U)
+#define SMMU_GNSR0_S2CRn			(0xc00U)
+#define SMMU_GNSR0_S2CR1			(0xc04U)
+#define SMMU_GNSR0_S2CR2			(0xc08U)
+#define SMMU_GNSR0_S2CR3			(0xc0cU)
+#define SMMU_GNSR0_S2CR4			(0xc10U)
+#define SMMU_GNSR0_S2CR5			(0xc14U)
+#define SMMU_GNSR0_S2CR6			(0xc18U)
+#define SMMU_GNSR0_S2CR7			(0xc1cU)
+#define SMMU_GNSR0_S2CR8			(0xc20U)
+#define SMMU_GNSR0_S2CR9			(0xc24U)
+#define SMMU_GNSR0_S2CR10			(0xc28U)
+#define SMMU_GNSR0_S2CR11			(0xc2cU)
+#define SMMU_GNSR0_S2CR12			(0xc30U)
+#define SMMU_GNSR0_S2CR13			(0xc34U)
+#define SMMU_GNSR0_S2CR14			(0xc38U)
+#define SMMU_GNSR0_S2CR15			(0xc3cU)
+#define SMMU_GNSR0_S2CR16			(0xc40U)
+#define SMMU_GNSR0_S2CR17			(0xc44U)
+#define SMMU_GNSR0_S2CR18			(0xc48U)
+#define SMMU_GNSR0_S2CR19			(0xc4cU)
+#define SMMU_GNSR0_S2CR20			(0xc50U)
+#define SMMU_GNSR0_S2CR21			(0xc54U)
+#define SMMU_GNSR0_S2CR22			(0xc58U)
+#define SMMU_GNSR0_S2CR23			(0xc5cU)
+#define SMMU_GNSR0_S2CR24			(0xc60U)
+#define SMMU_GNSR0_S2CR25			(0xc64U)
+#define SMMU_GNSR0_S2CR26			(0xc68U)
+#define SMMU_GNSR0_S2CR27			(0xc6cU)
+#define SMMU_GNSR0_S2CR28			(0xc70U)
+#define SMMU_GNSR0_S2CR29			(0xc74U)
+#define SMMU_GNSR0_S2CR30			(0xc78U)
+#define SMMU_GNSR0_S2CR31			(0xc7cU)
+#define SMMU_GNSR0_S2CR32			(0xc80U)
+#define SMMU_GNSR0_S2CR33			(0xc84U)
+#define SMMU_GNSR0_S2CR34			(0xc88U)
+#define SMMU_GNSR0_S2CR35			(0xc8cU)
+#define SMMU_GNSR0_S2CR36			(0xc90U)
+#define SMMU_GNSR0_S2CR37			(0xc94U)
+#define SMMU_GNSR0_S2CR38			(0xc98U)
+#define SMMU_GNSR0_S2CR39			(0xc9cU)
+#define SMMU_GNSR0_S2CR40			(0xca0U)
+#define SMMU_GNSR0_S2CR41			(0xca4U)
+#define SMMU_GNSR0_S2CR42			(0xca8U)
+#define SMMU_GNSR0_S2CR43			(0xcacU)
+#define SMMU_GNSR0_S2CR44			(0xcb0U)
+#define SMMU_GNSR0_S2CR45			(0xcb4U)
+#define SMMU_GNSR0_S2CR46			(0xcb8U)
+#define SMMU_GNSR0_S2CR47			(0xcbcU)
+#define SMMU_GNSR0_S2CR48			(0xcc0U)
+#define SMMU_GNSR0_S2CR49			(0xcc4U)
+#define SMMU_GNSR0_S2CR50			(0xcc8U)
+#define SMMU_GNSR0_S2CR51			(0xcccU)
+#define SMMU_GNSR0_S2CR52			(0xcd0U)
+#define SMMU_GNSR0_S2CR53			(0xcd4U)
+#define SMMU_GNSR0_S2CR54			(0xcd8U)
+#define SMMU_GNSR0_S2CR55			(0xcdcU)
+#define SMMU_GNSR0_S2CR56			(0xce0U)
+#define SMMU_GNSR0_S2CR57			(0xce4U)
+#define SMMU_GNSR0_S2CR58			(0xce8U)
+#define SMMU_GNSR0_S2CR59			(0xcecU)
+#define SMMU_GNSR0_S2CR60			(0xcf0U)
+#define SMMU_GNSR0_S2CR61			(0xcf4U)
+#define SMMU_GNSR0_S2CR62			(0xcf8U)
+#define SMMU_GNSR0_S2CR63			(0xcfcU)
+#define SMMU_GNSR0_S2CR64			(0xd00U)
+#define SMMU_GNSR0_S2CR65			(0xd04U)
+#define SMMU_GNSR0_S2CR66			(0xd08U)
+#define SMMU_GNSR0_S2CR67			(0xd0cU)
+#define SMMU_GNSR0_S2CR68			(0xd10U)
+#define SMMU_GNSR0_S2CR69			(0xd14U)
+#define SMMU_GNSR0_S2CR70			(0xd18U)
+#define SMMU_GNSR0_S2CR71			(0xd1cU)
+#define SMMU_GNSR0_S2CR72			(0xd20U)
+#define SMMU_GNSR0_S2CR73			(0xd24U)
+#define SMMU_GNSR0_S2CR74			(0xd28U)
+#define SMMU_GNSR0_S2CR75			(0xd2cU)
+#define SMMU_GNSR0_S2CR76			(0xd30U)
+#define SMMU_GNSR0_S2CR77			(0xd34U)
+#define SMMU_GNSR0_S2CR78			(0xd38U)
+#define SMMU_GNSR0_S2CR79			(0xd3cU)
+#define SMMU_GNSR0_S2CR80			(0xd40U)
+#define SMMU_GNSR0_S2CR81			(0xd44U)
+#define SMMU_GNSR0_S2CR82			(0xd48U)
+#define SMMU_GNSR0_S2CR83			(0xd4cU)
+#define SMMU_GNSR0_S2CR84			(0xd50U)
+#define SMMU_GNSR0_S2CR85			(0xd54U)
+#define SMMU_GNSR0_S2CR86			(0xd58U)
+#define SMMU_GNSR0_S2CR87			(0xd5cU)
+#define SMMU_GNSR0_S2CR88			(0xd60U)
+#define SMMU_GNSR0_S2CR89			(0xd64U)
+#define SMMU_GNSR0_S2CR90			(0xd68U)
+#define SMMU_GNSR0_S2CR91			(0xd6cU)
+#define SMMU_GNSR0_S2CR92			(0xd70U)
+#define SMMU_GNSR0_S2CR93			(0xd74U)
+#define SMMU_GNSR0_S2CR94			(0xd78U)
+#define SMMU_GNSR0_S2CR95			(0xd7cU)
+#define SMMU_GNSR0_S2CR96			(0xd80U)
+#define SMMU_GNSR0_S2CR97			(0xd84U)
+#define SMMU_GNSR0_S2CR98			(0xd88U)
+#define SMMU_GNSR0_S2CR99			(0xd8cU)
+#define SMMU_GNSR0_S2CR100			(0xd90U)
+#define SMMU_GNSR0_S2CR101			(0xd94U)
+#define SMMU_GNSR0_S2CR102			(0xd98U)
+#define SMMU_GNSR0_S2CR103			(0xd9cU)
+#define SMMU_GNSR0_S2CR104			(0xda0U)
+#define SMMU_GNSR0_S2CR105			(0xda4U)
+#define SMMU_GNSR0_S2CR106			(0xda8U)
+#define SMMU_GNSR0_S2CR107			(0xdacU)
+#define SMMU_GNSR0_S2CR108			(0xdb0U)
+#define SMMU_GNSR0_S2CR109			(0xdb4U)
+#define SMMU_GNSR0_S2CR110			(0xdb8U)
+#define SMMU_GNSR0_S2CR111			(0xdbcU)
+#define SMMU_GNSR0_S2CR112			(0xdc0U)
+#define SMMU_GNSR0_S2CR113			(0xdc4U)
+#define SMMU_GNSR0_S2CR114			(0xdc8U)
+#define SMMU_GNSR0_S2CR115			(0xdccU)
+#define SMMU_GNSR0_S2CR116			(0xdd0U)
+#define SMMU_GNSR0_S2CR117			(0xdd4U)
+#define SMMU_GNSR0_S2CR118			(0xdd8U)
+#define SMMU_GNSR0_S2CR119			(0xddcU)
+#define SMMU_GNSR0_S2CR120			(0xde0U)
+#define SMMU_GNSR0_S2CR121			(0xde4U)
+#define SMMU_GNSR0_S2CR122			(0xde8U)
+#define SMMU_GNSR0_S2CR123			(0xdecU)
+#define SMMU_GNSR0_S2CR124			(0xdf0U)
+#define SMMU_GNSR0_S2CR125			(0xdf4U)
+#define SMMU_GNSR0_S2CR126			(0xdf8U)
+#define SMMU_GNSR0_S2CR127			(0xdfcU)
+#define SMMU_GNSR0_PIDR0			(0xfe0U)
+#define SMMU_GNSR0_PIDR1			(0xfe4U)
+#define SMMU_GNSR0_PIDR2			(0xfe8U)
+#define SMMU_GNSR0_PIDR3			(0xfecU)
+#define SMMU_GNSR0_PIDR4			(0xfd0U)
+#define SMMU_GNSR0_PIDR5			(0xfd4U)
+#define SMMU_GNSR0_PIDR6			(0xfd8U)
+#define SMMU_GNSR0_PIDR7			(0xfdcU)
+#define SMMU_GNSR0_CIDR0			(0xff0U)
+#define SMMU_GNSR0_CIDR1			(0xff4U)
+#define SMMU_GNSR0_CIDR2			(0xff8U)
+#define SMMU_GNSR0_CIDR3			(0xffcU)
+#define SMMU_GNSR1_CBAR0			(0x0U)
+#define SMMU_GNSR1_CBARn			(0x0U)
+#define SMMU_GNSR1_CBFRSYNRA0			(0x400U)
+#define SMMU_GNSR1_CBA2R0			(0x800U)
+#define SMMU_GNSR1_CBAR1			(0x4U)
+#define SMMU_GNSR1_CBFRSYNRA1			(0x404U)
+#define SMMU_GNSR1_CBA2R1			(0x804U)
+#define SMMU_GNSR1_CBAR2			(0x8U)
+#define SMMU_GNSR1_CBFRSYNRA2			(0x408U)
+#define SMMU_GNSR1_CBA2R2			(0x808U)
+#define SMMU_GNSR1_CBAR3			(0xcU)
+#define SMMU_GNSR1_CBFRSYNRA3			(0x40cU)
+#define SMMU_GNSR1_CBA2R3			(0x80cU)
+#define SMMU_GNSR1_CBAR4			(0x10U)
+#define SMMU_GNSR1_CBFRSYNRA4			(0x410U)
+#define SMMU_GNSR1_CBA2R4			(0x810U)
+#define SMMU_GNSR1_CBAR5			(0x14U)
+#define SMMU_GNSR1_CBFRSYNRA5			(0x414U)
+#define SMMU_GNSR1_CBA2R5			(0x814U)
+#define SMMU_GNSR1_CBAR6			(0x18U)
+#define SMMU_GNSR1_CBFRSYNRA6			(0x418U)
+#define SMMU_GNSR1_CBA2R6			(0x818U)
+#define SMMU_GNSR1_CBAR7			(0x1cU)
+#define SMMU_GNSR1_CBFRSYNRA7			(0x41cU)
+#define SMMU_GNSR1_CBA2R7			(0x81cU)
+#define SMMU_GNSR1_CBAR8			(0x20U)
+#define SMMU_GNSR1_CBFRSYNRA8			(0x420U)
+#define SMMU_GNSR1_CBA2R8			(0x820U)
+#define SMMU_GNSR1_CBAR9			(0x24U)
+#define SMMU_GNSR1_CBFRSYNRA9			(0x424U)
+#define SMMU_GNSR1_CBA2R9			(0x824U)
+#define SMMU_GNSR1_CBAR10			(0x28U)
+#define SMMU_GNSR1_CBFRSYNRA10			(0x428U)
+#define SMMU_GNSR1_CBA2R10			(0x828U)
+#define SMMU_GNSR1_CBAR11			(0x2cU)
+#define SMMU_GNSR1_CBFRSYNRA11			(0x42cU)
+#define SMMU_GNSR1_CBA2R11			(0x82cU)
+#define SMMU_GNSR1_CBAR12			(0x30U)
+#define SMMU_GNSR1_CBFRSYNRA12			(0x430U)
+#define SMMU_GNSR1_CBA2R12			(0x830U)
+#define SMMU_GNSR1_CBAR13			(0x34U)
+#define SMMU_GNSR1_CBFRSYNRA13			(0x434U)
+#define SMMU_GNSR1_CBA2R13			(0x834U)
+#define SMMU_GNSR1_CBAR14			(0x38U)
+#define SMMU_GNSR1_CBFRSYNRA14			(0x438U)
+#define SMMU_GNSR1_CBA2R14			(0x838U)
+#define SMMU_GNSR1_CBAR15			(0x3cU)
+#define SMMU_GNSR1_CBFRSYNRA15			(0x43cU)
+#define SMMU_GNSR1_CBA2R15			(0x83cU)
+#define SMMU_GNSR1_CBAR16			(0x40U)
+#define SMMU_GNSR1_CBFRSYNRA16			(0x440U)
+#define SMMU_GNSR1_CBA2R16			(0x840U)
+#define SMMU_GNSR1_CBAR17			(0x44U)
+#define SMMU_GNSR1_CBFRSYNRA17			(0x444U)
+#define SMMU_GNSR1_CBA2R17			(0x844U)
+#define SMMU_GNSR1_CBAR18			(0x48U)
+#define SMMU_GNSR1_CBFRSYNRA18			(0x448U)
+#define SMMU_GNSR1_CBA2R18			(0x848U)
+#define SMMU_GNSR1_CBAR19			(0x4cU)
+#define SMMU_GNSR1_CBFRSYNRA19			(0x44cU)
+#define SMMU_GNSR1_CBA2R19			(0x84cU)
+#define SMMU_GNSR1_CBAR20			(0x50U)
+#define SMMU_GNSR1_CBFRSYNRA20			(0x450U)
+#define SMMU_GNSR1_CBA2R20			(0x850U)
+#define SMMU_GNSR1_CBAR21			(0x54U)
+#define SMMU_GNSR1_CBFRSYNRA21			(0x454U)
+#define SMMU_GNSR1_CBA2R21			(0x854U)
+#define SMMU_GNSR1_CBAR22			(0x58U)
+#define SMMU_GNSR1_CBFRSYNRA22			(0x458U)
+#define SMMU_GNSR1_CBA2R22			(0x858U)
+#define SMMU_GNSR1_CBAR23			(0x5cU)
+#define SMMU_GNSR1_CBFRSYNRA23			(0x45cU)
+#define SMMU_GNSR1_CBA2R23			(0x85cU)
+#define SMMU_GNSR1_CBAR24			(0x60U)
+#define SMMU_GNSR1_CBFRSYNRA24			(0x460U)
+#define SMMU_GNSR1_CBA2R24			(0x860U)
+#define SMMU_GNSR1_CBAR25			(0x64U)
+#define SMMU_GNSR1_CBFRSYNRA25			(0x464U)
+#define SMMU_GNSR1_CBA2R25			(0x864U)
+#define SMMU_GNSR1_CBAR26			(0x68U)
+#define SMMU_GNSR1_CBFRSYNRA26			(0x468U)
+#define SMMU_GNSR1_CBA2R26			(0x868U)
+#define SMMU_GNSR1_CBAR27			(0x6cU)
+#define SMMU_GNSR1_CBFRSYNRA27			(0x46cU)
+#define SMMU_GNSR1_CBA2R27			(0x86cU)
+#define SMMU_GNSR1_CBAR28			(0x70U)
+#define SMMU_GNSR1_CBFRSYNRA28			(0x470U)
+#define SMMU_GNSR1_CBA2R28			(0x870U)
+#define SMMU_GNSR1_CBAR29			(0x74U)
+#define SMMU_GNSR1_CBFRSYNRA29			(0x474U)
+#define SMMU_GNSR1_CBA2R29			(0x874U)
+#define SMMU_GNSR1_CBAR30			(0x78U)
+#define SMMU_GNSR1_CBFRSYNRA30			(0x478U)
+#define SMMU_GNSR1_CBA2R30			(0x878U)
+#define SMMU_GNSR1_CBAR31			(0x7cU)
+#define SMMU_GNSR1_CBFRSYNRA31			(0x47cU)
+#define SMMU_GNSR1_CBA2R31			(0x87cU)
+#define SMMU_GNSR1_CBAR32			(0x80U)
+#define SMMU_GNSR1_CBFRSYNRA32			(0x480U)
+#define SMMU_GNSR1_CBA2R32			(0x880U)
+#define SMMU_GNSR1_CBAR33			(0x84U)
+#define SMMU_GNSR1_CBFRSYNRA33			(0x484U)
+#define SMMU_GNSR1_CBA2R33			(0x884U)
+#define SMMU_GNSR1_CBAR34			(0x88U)
+#define SMMU_GNSR1_CBFRSYNRA34			(0x488U)
+#define SMMU_GNSR1_CBA2R34			(0x888U)
+#define SMMU_GNSR1_CBAR35			(0x8cU)
+#define SMMU_GNSR1_CBFRSYNRA35			(0x48cU)
+#define SMMU_GNSR1_CBA2R35			(0x88cU)
+#define SMMU_GNSR1_CBAR36			(0x90U)
+#define SMMU_GNSR1_CBFRSYNRA36			(0x490U)
+#define SMMU_GNSR1_CBA2R36			(0x890U)
+#define SMMU_GNSR1_CBAR37			(0x94U)
+#define SMMU_GNSR1_CBFRSYNRA37			(0x494U)
+#define SMMU_GNSR1_CBA2R37			(0x894U)
+#define SMMU_GNSR1_CBAR38			(0x98U)
+#define SMMU_GNSR1_CBFRSYNRA38			(0x498U)
+#define SMMU_GNSR1_CBA2R38			(0x898U)
+#define SMMU_GNSR1_CBAR39			(0x9cU)
+#define SMMU_GNSR1_CBFRSYNRA39			(0x49cU)
+#define SMMU_GNSR1_CBA2R39			(0x89cU)
+#define SMMU_GNSR1_CBAR40			(0xa0U)
+#define SMMU_GNSR1_CBFRSYNRA40			(0x4a0U)
+#define SMMU_GNSR1_CBA2R40			(0x8a0U)
+#define SMMU_GNSR1_CBAR41			(0xa4U)
+#define SMMU_GNSR1_CBFRSYNRA41			(0x4a4U)
+#define SMMU_GNSR1_CBA2R41			(0x8a4U)
+#define SMMU_GNSR1_CBAR42			(0xa8U)
+#define SMMU_GNSR1_CBFRSYNRA42			(0x4a8U)
+#define SMMU_GNSR1_CBA2R42			(0x8a8U)
+#define SMMU_GNSR1_CBAR43			(0xacU)
+#define SMMU_GNSR1_CBFRSYNRA43			(0x4acU)
+#define SMMU_GNSR1_CBA2R43			(0x8acU)
+#define SMMU_GNSR1_CBAR44			(0xb0U)
+#define SMMU_GNSR1_CBFRSYNRA44			(0x4b0U)
+#define SMMU_GNSR1_CBA2R44			(0x8b0U)
+#define SMMU_GNSR1_CBAR45			(0xb4U)
+#define SMMU_GNSR1_CBFRSYNRA45			(0x4b4U)
+#define SMMU_GNSR1_CBA2R45			(0x8b4U)
+#define SMMU_GNSR1_CBAR46			(0xb8U)
+#define SMMU_GNSR1_CBFRSYNRA46			(0x4b8U)
+#define SMMU_GNSR1_CBA2R46			(0x8b8U)
+#define SMMU_GNSR1_CBAR47			(0xbcU)
+#define SMMU_GNSR1_CBFRSYNRA47			(0x4bcU)
+#define SMMU_GNSR1_CBA2R47			(0x8bcU)
+#define SMMU_GNSR1_CBAR48			(0xc0U)
+#define SMMU_GNSR1_CBFRSYNRA48			(0x4c0U)
+#define SMMU_GNSR1_CBA2R48			(0x8c0U)
+#define SMMU_GNSR1_CBAR49			(0xc4U)
+#define SMMU_GNSR1_CBFRSYNRA49			(0x4c4U)
+#define SMMU_GNSR1_CBA2R49			(0x8c4U)
+#define SMMU_GNSR1_CBAR50			(0xc8U)
+#define SMMU_GNSR1_CBFRSYNRA50			(0x4c8U)
+#define SMMU_GNSR1_CBA2R50			(0x8c8U)
+#define SMMU_GNSR1_CBAR51			(0xccU)
+#define SMMU_GNSR1_CBFRSYNRA51			(0x4ccU)
+#define SMMU_GNSR1_CBA2R51			(0x8ccU)
+#define SMMU_GNSR1_CBAR52			(0xd0U)
+#define SMMU_GNSR1_CBFRSYNRA52			(0x4d0U)
+#define SMMU_GNSR1_CBA2R52			(0x8d0U)
+#define SMMU_GNSR1_CBAR53			(0xd4U)
+#define SMMU_GNSR1_CBFRSYNRA53			(0x4d4U)
+#define SMMU_GNSR1_CBA2R53			(0x8d4U)
+#define SMMU_GNSR1_CBAR54			(0xd8U)
+#define SMMU_GNSR1_CBFRSYNRA54			(0x4d8U)
+#define SMMU_GNSR1_CBA2R54			(0x8d8U)
+#define SMMU_GNSR1_CBAR55			(0xdcU)
+#define SMMU_GNSR1_CBFRSYNRA55			(0x4dcU)
+#define SMMU_GNSR1_CBA2R55			(0x8dcU)
+#define SMMU_GNSR1_CBAR56			(0xe0U)
+#define SMMU_GNSR1_CBFRSYNRA56			(0x4e0U)
+#define SMMU_GNSR1_CBA2R56			(0x8e0U)
+#define SMMU_GNSR1_CBAR57			(0xe4U)
+#define SMMU_GNSR1_CBFRSYNRA57			(0x4e4U)
+#define SMMU_GNSR1_CBA2R57			(0x8e4U)
+#define SMMU_GNSR1_CBAR58			(0xe8U)
+#define SMMU_GNSR1_CBFRSYNRA58			(0x4e8U)
+#define SMMU_GNSR1_CBA2R58			(0x8e8U)
+#define SMMU_GNSR1_CBAR59			(0xecU)
+#define SMMU_GNSR1_CBFRSYNRA59			(0x4ecU)
+#define SMMU_GNSR1_CBA2R59			(0x8ecU)
+#define SMMU_GNSR1_CBAR60			(0xf0U)
+#define SMMU_GNSR1_CBFRSYNRA60			(0x4f0U)
+#define SMMU_GNSR1_CBA2R60			(0x8f0U)
+#define SMMU_GNSR1_CBAR61			(0xf4U)
+#define SMMU_GNSR1_CBFRSYNRA61			(0x4f4U)
+#define SMMU_GNSR1_CBA2R61			(0x8f4U)
+#define SMMU_GNSR1_CBAR62			(0xf8U)
+#define SMMU_GNSR1_CBFRSYNRA62			(0x4f8U)
+#define SMMU_GNSR1_CBA2R62			(0x8f8U)
+#define SMMU_GNSR1_CBAR63			(0xfcU)
+#define SMMU_GNSR1_CBFRSYNRA63			(0x4fcU)
+#define SMMU_GNSR1_CBA2R63			(0x8fcU)
+
+/*******************************************************************************
+ * SMMU Global Secure Aux. Configuration Register
+ ******************************************************************************/
+#define SMMU_GSR0_SECURE_ACR			0x10U
+#define SMMU_GNSR_ACR				(SMMU_GSR0_SECURE_ACR + 0x400U)
+#define SMMU_GSR0_PGSIZE_SHIFT			16U
+#define SMMU_GSR0_PGSIZE_4K			(0U << SMMU_GSR0_PGSIZE_SHIFT)
+#define SMMU_GSR0_PGSIZE_64K			(1U << SMMU_GSR0_PGSIZE_SHIFT)
+#define SMMU_ACR_CACHE_LOCK_ENABLE_BIT		(1U << 26)
+
+/*******************************************************************************
+ * SMMU Global Aux. Control Register
+ ******************************************************************************/
+#define SMMU_CBn_ACTLR_CPRE_BIT			(1U << 1)
+
+/*******************************************************************************
+ * SMMU configuration constants
+ ******************************************************************************/
+#define ID1_PAGESIZE				(1U << 31)
+#define ID1_NUMPAGENDXB_SHIFT			28U
+#define ID1_NUMPAGENDXB_MASK			7U
+#define ID1_NUMS2CB_SHIFT			16U
+#define ID1_NUMS2CB_MASK			0xffU
+#define ID1_NUMCB_SHIFT				0U
+#define ID1_NUMCB_MASK				0xffU
+#define PGSHIFT					16U
+#define CB_SIZE					0x800000U
+
+typedef struct smmu_regs {
+	uint32_t reg;
+	uint32_t val;
+} smmu_regs_t;
+
+#define mc_make_sid_override_cfg(name) \
+	{ \
+		.reg = TEGRA_MC_STREAMID_BASE + MC_STREAMID_OVERRIDE_CFG_ ## name, \
+		.val = 0x00000000U, \
+	}
+
+#define mc_make_sid_security_cfg(name) \
+	{ \
+		.reg = TEGRA_MC_STREAMID_BASE + MC_STREAMID_OVERRIDE_TO_SECURITY_CFG(MC_STREAMID_OVERRIDE_CFG_ ## name), \
+		.val = 0x00000000U, \
+	}
+
+#define smmu_make_gnsr0_sec_cfg(name) \
+	{ \
+		.reg = TEGRA_SMMU0_BASE + SMMU_GNSR0_ ## name, \
+		.val = 0x00000000U, \
+	}
+
+/*
+ * On ARM-SMMU, conditional offset to access secure aliases of non-secure registers
+ * is 0x400. So, add it to register address
+ */
+#define smmu_make_gnsr0_nsec_cfg(name) \
+	{ \
+		.reg = TEGRA_SMMU0_BASE + 0x400U + SMMU_GNSR0_ ## name, \
+		.val = 0x00000000U, \
+	}
+
+#define smmu_make_gnsr0_smr_cfg(n) \
+	{ \
+		.reg = TEGRA_SMMU0_BASE + SMMU_GNSR0_SMR ## n, \
+		.val = 0x00000000U, \
+	}
+
+#define smmu_make_gnsr0_s2cr_cfg(n) \
+	{ \
+		.reg = TEGRA_SMMU0_BASE + SMMU_GNSR0_S2CR ## n, \
+		.val = 0x00000000U, \
+	}
+
+#define smmu_make_gnsr1_cbar_cfg(n) \
+	{ \
+		.reg = TEGRA_SMMU0_BASE + (1U << PGSHIFT) + SMMU_GNSR1_CBAR ## n, \
+		.val = 0x00000000U, \
+	}
+
+#define smmu_make_gnsr1_cba2r_cfg(n) \
+	{ \
+		.reg = TEGRA_SMMU0_BASE + (1U << PGSHIFT) + SMMU_GNSR1_CBA2R ## n, \
+		.val = 0x00000000U, \
+	}
+
+#define make_smmu_cb_cfg(name, n) \
+	{ \
+		.reg = TEGRA_SMMU0_BASE + (CB_SIZE >> 1) + (n * (1 << PGSHIFT)) \
+			+ SMMU_CBn_ ## name, \
+		.val = 0x00000000U, \
+	}
+
+#define smmu_make_smrg_group(n)	\
+	smmu_make_gnsr0_smr_cfg(n),	\
+	smmu_make_gnsr0_s2cr_cfg(n),	\
+	smmu_make_gnsr1_cbar_cfg(n),	\
+	smmu_make_gnsr1_cba2r_cfg(n)	/* don't put "," here. */
+
+#define smmu_make_cb_group(n)		\
+	make_smmu_cb_cfg(SCTLR, n),	\
+	make_smmu_cb_cfg(TCR2, n),	\
+	make_smmu_cb_cfg(TTBR0_LO, n),	\
+	make_smmu_cb_cfg(TTBR0_HI, n),	\
+	make_smmu_cb_cfg(TCR, n),	\
+	make_smmu_cb_cfg(PRRR_MAIR0, n),\
+	make_smmu_cb_cfg(FSR, n),	\
+	make_smmu_cb_cfg(FAR_LO, n),	\
+	make_smmu_cb_cfg(FAR_HI, n),	\
+	make_smmu_cb_cfg(FSYNR0, n)	/* don't put "," here. */
+
+#define smmu_bypass_cfg \
+	{ \
+		.reg = TEGRA_MC_BASE + MC_SMMU_BYPASS_CONFIG, \
+		.val = 0x00000000U, \
+	}
+
+#define _START_OF_TABLE_ \
+	{ \
+		.reg = 0xCAFE05C7U, \
+		.val = 0x00000000U, \
+	}
+
+#define _END_OF_TABLE_ \
+	{ \
+		.reg = 0xFFFFFFFFU, \
+		.val = 0xFFFFFFFFU, \
+	}
+
+
+void tegra_smmu_init(void);
+void tegra_smmu_save_context(uint64_t smmu_ctx_addr);
+smmu_regs_t *plat_get_smmu_ctx(void);
+
+#endif /*__SMMU_H */
diff --git a/plat/nvidia/tegra/include/plat_macros.S b/plat/nvidia/tegra/include/plat_macros.S
new file mode 100644
index 0000000..f54e168
--- /dev/null
+++ b/plat/nvidia/tegra/include/plat_macros.S
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_MACROS_S__
+#define __PLAT_MACROS_S__
+
+#include <gic_v2.h>
+#include <tegra_def.h>
+
+.section .rodata.gic_reg_name, "aS"
+gicc_regs:
+	.asciz "gicc_hppir", "gicc_ahppir", "gicc_ctlr", ""
+gicd_pend_reg:
+	.asciz "gicd_ispendr regs (Offsets 0x200 - 0x278)\n Offset:\t\t\tvalue\n"
+newline:
+	.asciz "\n"
+spacer:
+	.asciz ":\t\t0x"
+
+/* ---------------------------------------------
+ * The below macro prints out relevant GIC
+ * registers whenever an unhandled exception is
+ * taken in BL31.
+ * ---------------------------------------------
+ */
+.macro plat_crash_print_regs
+	mov_imm	x16, TEGRA_GICC_BASE
+
+	/* gicc base address is now in x16 */
+	adr	x6, gicc_regs	/* Load the gicc reg list to x6 */
+	/* Load the gicc regs to gp regs used by str_in_crash_buf_print */
+	ldr	w8, [x16, #GICC_HPPIR]
+	ldr	w9, [x16, #GICC_AHPPIR]
+	ldr	w10, [x16, #GICC_CTLR]
+	/* Store to the crash buf and print to cosole */
+	bl	str_in_crash_buf_print
+
+	/* Print the GICD_ISPENDR regs */
+	mov_imm	x16, TEGRA_GICD_BASE
+	add	x7, x16, #GICD_ISPENDR
+	adr	x4, gicd_pend_reg
+	bl	asm_print_str
+2:
+	sub	x4, x7, x16
+	cmp	x4, #0x280
+	b.eq	1f
+	bl	asm_print_hex
+	adr	x4, spacer
+	bl	asm_print_str
+	ldr	x4, [x7], #8
+	bl	asm_print_hex
+	adr	x4, newline
+	bl	asm_print_str
+	b	2b
+1:
+.endm
+
+#endif /* __PLAT_MACROS_S__ */
diff --git a/plat/nvidia/tegra/include/platform_def.h b/plat/nvidia/tegra/include/platform_def.h
new file mode 100644
index 0000000..4894442
--- /dev/null
+++ b/plat/nvidia/tegra/include/platform_def.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#include <arch.h>
+#include <common_def.h>
+#include <tegra_def.h>
+#include <utils_def.h>
+
+/*******************************************************************************
+ * Generic platform constants
+ ******************************************************************************/
+
+/* Size of cacheable stacks */
+#ifdef IMAGE_BL31
+#define PLATFORM_STACK_SIZE 		U(0x400)
+#endif
+
+#define TEGRA_PRIMARY_CPU		U(0x0)
+
+#define PLAT_MAX_PWR_LVL		MPIDR_AFFLVL2
+#define PLATFORM_CORE_COUNT		(PLATFORM_CLUSTER_COUNT * \
+					 PLATFORM_MAX_CPUS_PER_CLUSTER)
+#define PLAT_NUM_PWR_DOMAINS		(PLATFORM_CORE_COUNT + \
+					 PLATFORM_CLUSTER_COUNT + 1)
+
+/*******************************************************************************
+ * Platform console related constants
+ ******************************************************************************/
+#define TEGRA_CONSOLE_BAUDRATE		U(115200)
+#define TEGRA_BOOT_UART_CLK_IN_HZ	U(408000000)
+
+/*******************************************************************************
+ * Platform memory map related constants
+ ******************************************************************************/
+/* Size of trusted dram */
+#define TZDRAM_SIZE			U(0x00400000)
+#define TZDRAM_END			(TZDRAM_BASE + TZDRAM_SIZE)
+
+/*******************************************************************************
+ * BL31 specific defines.
+ ******************************************************************************/
+#define BL31_SIZE			U(0x40000)
+#define BL31_BASE			TZDRAM_BASE
+#define BL31_LIMIT			(TZDRAM_BASE + BL31_SIZE - 1)
+#define BL32_BASE			(TZDRAM_BASE + BL31_SIZE)
+#define BL32_LIMIT			TZDRAM_END
+
+/*******************************************************************************
+ * Platform specific page table and MMU setup constants
+ ******************************************************************************/
+#define PLAT_PHY_ADDR_SPACE_SIZE	(ULL(1) << 35)
+#define PLAT_VIRT_ADDR_SPACE_SIZE	(ULL(1) << 35)
+
+/*******************************************************************************
+ * Some data must be aligned on the biggest cache line size in the platform.
+ * This is known only to the platform as it might have a combination of
+ * integrated and external caches.
+ ******************************************************************************/
+#define CACHE_WRITEBACK_SHIFT		6
+#define CACHE_WRITEBACK_GRANULE		(U(1) << CACHE_WRITEBACK_SHIFT)
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/nvidia/tegra/include/t132/tegra_def.h b/plat/nvidia/tegra/include/t132/tegra_def.h
new file mode 100644
index 0000000..ae00fb5
--- /dev/null
+++ b/plat/nvidia/tegra/include/t132/tegra_def.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TEGRA_DEF_H__
+#define __TEGRA_DEF_H__
+
+#include <utils_def.h>
+
+/*******************************************************************************
+ * This value is used by the PSCI implementation during the `SYSTEM_SUSPEND`
+ * call as the `state-id` field in the 'power state' parameter.
+ ******************************************************************************/
+#define PSTATE_ID_SOC_POWERDN	U(0xD)
+
+/*******************************************************************************
+ * Platform power states (used by PSCI framework)
+ *
+ * - PLAT_MAX_RET_STATE should be less than lowest PSTATE_ID
+ * - PLAT_MAX_OFF_STATE should be greater than the highest PSTATE_ID
+ ******************************************************************************/
+#define PLAT_MAX_RET_STATE		U(1)
+#define PLAT_MAX_OFF_STATE		(PSTATE_ID_SOC_POWERDN + U(1))
+
+/*******************************************************************************
+ * GIC memory map
+ ******************************************************************************/
+#define TEGRA_GICD_BASE			U(0x50041000)
+#define TEGRA_GICC_BASE			U(0x50042000)
+
+/*******************************************************************************
+ * Tegra micro-seconds timer constants
+ ******************************************************************************/
+#define TEGRA_TMRUS_BASE		U(0x60005010)
+#define TEGRA_TMRUS_SIZE		U(0x1000)
+
+/*******************************************************************************
+ * Tegra Clock and Reset Controller constants
+ ******************************************************************************/
+#define TEGRA_CAR_RESET_BASE		U(0x60006000)
+#define TEGRA_GPU_RESET_REG_OFFSET	U(0x28C)
+#define  GPU_RESET_BIT			(U(1) << 24)
+
+/*******************************************************************************
+ * Tegra Flow Controller constants
+ ******************************************************************************/
+#define TEGRA_FLOWCTRL_BASE		U(0x60007000)
+
+/*******************************************************************************
+ * Tegra Secure Boot Controller constants
+ ******************************************************************************/
+#define TEGRA_SB_BASE			U(0x6000C200)
+
+/*******************************************************************************
+ * Tegra Exception Vectors constants
+ ******************************************************************************/
+#define TEGRA_EVP_BASE			U(0x6000F000)
+
+/*******************************************************************************
+ * Tegra Miscellaneous register constants
+ ******************************************************************************/
+#define TEGRA_MISC_BASE			U(0x70000000)
+#define  HARDWARE_REVISION_OFFSET	U(0x804)
+
+/*******************************************************************************
+ * Tegra UART controller base addresses
+ ******************************************************************************/
+#define TEGRA_UARTA_BASE		U(0x70006000)
+#define TEGRA_UARTB_BASE		U(0x70006040)
+#define TEGRA_UARTC_BASE		U(0x70006200)
+#define TEGRA_UARTD_BASE		U(0x70006300)
+#define TEGRA_UARTE_BASE		U(0x70006400)
+
+/*******************************************************************************
+ * Tegra Power Mgmt Controller constants
+ ******************************************************************************/
+#define TEGRA_PMC_BASE			U(0x7000E400)
+
+/*******************************************************************************
+ * Tegra Memory Controller constants
+ ******************************************************************************/
+#define TEGRA_MC_BASE			U(0x70019000)
+
+/* TZDRAM carveout configuration registers */
+#define MC_SECURITY_CFG0_0		U(0x70)
+#define MC_SECURITY_CFG1_0		U(0x74)
+#define MC_SECURITY_CFG3_0		U(0x9BC)
+
+/* Video Memory carveout configuration registers */
+#define MC_VIDEO_PROTECT_BASE_HI	U(0x978)
+#define MC_VIDEO_PROTECT_BASE_LO	U(0x648)
+#define MC_VIDEO_PROTECT_SIZE_MB	U(0x64c)
+
+/*******************************************************************************
+ * Tegra TZRAM constants
+ ******************************************************************************/
+#define TEGRA_TZRAM_BASE		U(0x7C010000)
+#define TEGRA_TZRAM_SIZE		U(0x10000)
+
+#endif /* __TEGRA_DEF_H__ */
diff --git a/plat/nvidia/tegra/include/t186/tegra_def.h b/plat/nvidia/tegra/include/t186/tegra_def.h
new file mode 100644
index 0000000..d033147
--- /dev/null
+++ b/plat/nvidia/tegra/include/t186/tegra_def.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TEGRA_DEF_H__
+#define __TEGRA_DEF_H__
+
+#include <utils_def.h>
+
+/*******************************************************************************
+ * MCE apertures used by the ARI interface
+ *
+ * Aperture 0 - Cpu0 (ARM Cortex A-57)
+ * Aperture 1 - Cpu1 (ARM Cortex A-57)
+ * Aperture 2 - Cpu2 (ARM Cortex A-57)
+ * Aperture 3 - Cpu3 (ARM Cortex A-57)
+ * Aperture 4 - Cpu4 (Denver15)
+ * Aperture 5 - Cpu5 (Denver15)
+ ******************************************************************************/
+#define MCE_ARI_APERTURE_0_OFFSET	U(0x0)
+#define MCE_ARI_APERTURE_1_OFFSET	U(0x10000)
+#define MCE_ARI_APERTURE_2_OFFSET	U(0x20000)
+#define MCE_ARI_APERTURE_3_OFFSET	U(0x30000)
+#define MCE_ARI_APERTURE_4_OFFSET	U(0x40000)
+#define MCE_ARI_APERTURE_5_OFFSET	U(0x50000)
+#define MCE_ARI_APERTURE_OFFSET_MAX	MCE_APERTURE_5_OFFSET
+
+/* number of apertures */
+#define MCE_ARI_APERTURES_MAX		U(6)
+
+/* each ARI aperture is 64KB */
+#define MCE_ARI_APERTURE_SIZE		U(0x10000)
+
+/*******************************************************************************
+ * CPU core id macros for the MCE_ONLINE_CORE ARI
+ ******************************************************************************/
+#define MCE_CORE_ID_MAX			U(8)
+#define MCE_CORE_ID_MASK		U(0x7)
+
+/*******************************************************************************
+ * These values are used by the PSCI implementation during the `CPU_SUSPEND`
+ * and `SYSTEM_SUSPEND` calls as the `state-id` field in the 'power state'
+ * parameter.
+ ******************************************************************************/
+#define PSTATE_ID_CORE_IDLE		U(6)
+#define PSTATE_ID_CORE_POWERDN		U(7)
+#define PSTATE_ID_SOC_POWERDN		U(2)
+
+/*******************************************************************************
+ * Platform power states (used by PSCI framework)
+ *
+ * - PLAT_MAX_RET_STATE should be less than lowest PSTATE_ID
+ * - PLAT_MAX_OFF_STATE should be greater than the highest PSTATE_ID
+ ******************************************************************************/
+#define PLAT_MAX_RET_STATE		U(1)
+#define PLAT_MAX_OFF_STATE		U(8)
+
+/*******************************************************************************
+ * Secure IRQ definitions
+ ******************************************************************************/
+#define TEGRA186_TOP_WDT_IRQ		U(49)
+#define TEGRA186_AON_WDT_IRQ		U(50)
+
+#define TEGRA186_SEC_IRQ_TARGET_MASK	U(0xF3) /* 4 A57 - 2 Denver */
+
+/*******************************************************************************
+ * Tegra Miscellanous register constants
+ ******************************************************************************/
+#define TEGRA_MISC_BASE			U(0x00100000)
+#define  HARDWARE_REVISION_OFFSET	U(0x4)
+
+#define  MISCREG_PFCFG			U(0x200C)
+
+/*******************************************************************************
+ * Tegra TSA Controller constants
+ ******************************************************************************/
+#define TEGRA_TSA_BASE			U(0x02400000)
+
+/*******************************************************************************
+ * TSA configuration registers
+ ******************************************************************************/
+#define TSA_CONFIG_STATIC0_CSW_SESWR			U(0x4010)
+#define  TSA_CONFIG_STATIC0_CSW_SESWR_RESET		U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_ETRW			U(0x4038)
+#define  TSA_CONFIG_STATIC0_CSW_ETRW_RESET		U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_SDMMCWAB			U(0x5010)
+#define  TSA_CONFIG_STATIC0_CSW_SDMMCWAB_RESET		U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_AXISW			U(0x7008)
+#define  TSA_CONFIG_STATIC0_CSW_AXISW_RESET		U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_HDAW			U(0xA008)
+#define  TSA_CONFIG_STATIC0_CSW_HDAW_RESET		U(0x100)
+#define TSA_CONFIG_STATIC0_CSW_AONDMAW			U(0xB018)
+#define  TSA_CONFIG_STATIC0_CSW_AONDMAW_RESET		U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_SCEDMAW			U(0xD018)
+#define  TSA_CONFIG_STATIC0_CSW_SCEDMAW_RESET		U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_BPMPDMAW			U(0xD028)
+#define  TSA_CONFIG_STATIC0_CSW_BPMPDMAW_RESET		U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_APEDMAW			U(0x12018)
+#define  TSA_CONFIG_STATIC0_CSW_APEDMAW_RESET		U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_UFSHCW			U(0x13008)
+#define  TSA_CONFIG_STATIC0_CSW_UFSHCW_RESET		U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_AFIW			U(0x13018)
+#define  TSA_CONFIG_STATIC0_CSW_AFIW_RESET		U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_SATAW			U(0x13028)
+#define  TSA_CONFIG_STATIC0_CSW_SATAW_RESET		U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_EQOSW			U(0x13038)
+#define  TSA_CONFIG_STATIC0_CSW_EQOSW_RESET		U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_XUSB_DEVW		U(0x15008)
+#define  TSA_CONFIG_STATIC0_CSW_XUSB_DEVW_RESET		U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_XUSB_HOSTW		U(0x15018)
+#define  TSA_CONFIG_STATIC0_CSW_XUSB_HOSTW_RESET	U(0x1100)
+
+#define TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_MASK		(U(0x3) << 11)
+#define TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU		(U(0) << 11)
+
+/*******************************************************************************
+ * Tegra Memory Controller constants
+ ******************************************************************************/
+#define TEGRA_MC_STREAMID_BASE		U(0x02C00000)
+#define TEGRA_MC_BASE			U(0x02C10000)
+
+/* General Security Carveout register macros */
+#define MC_GSC_CONFIG_REGS_SIZE		U(0x40)
+#define MC_GSC_LOCK_CFG_SETTINGS_BIT	(U(1) << 1)
+#define MC_GSC_ENABLE_TZ_LOCK_BIT	(U(1) << 0)
+#define MC_GSC_SIZE_RANGE_4KB_SHIFT	U(27)
+#define MC_GSC_BASE_LO_SHIFT		U(12)
+#define MC_GSC_BASE_LO_MASK		U(0xFFFFF)
+#define MC_GSC_BASE_HI_SHIFT		U(0)
+#define MC_GSC_BASE_HI_MASK		U(3)
+
+/* TZDRAM carveout configuration registers */
+#define MC_SECURITY_CFG0_0		U(0x70)
+#define MC_SECURITY_CFG1_0		U(0x74)
+#define MC_SECURITY_CFG3_0		U(0x9BC)
+
+/* Video Memory carveout configuration registers */
+#define MC_VIDEO_PROTECT_BASE_HI	U(0x978)
+#define MC_VIDEO_PROTECT_BASE_LO	U(0x648)
+#define MC_VIDEO_PROTECT_SIZE_MB	U(0x64C)
+
+/*
+ * Carveout (MC_SECURITY_CARVEOUT24) registers used to clear the
+ * non-overlapping Video memory region
+ */
+#define MC_VIDEO_PROTECT_CLEAR_CFG	U(0x25A0)
+#define MC_VIDEO_PROTECT_CLEAR_BASE_LO	U(0x25A4)
+#define MC_VIDEO_PROTECT_CLEAR_BASE_HI	U(0x25A8)
+#define MC_VIDEO_PROTECT_CLEAR_SIZE	U(0x25AC)
+#define MC_VIDEO_PROTECT_CLEAR_ACCESS_CFG0	U(0x25B0)
+
+/* TZRAM carveout (MC_SECURITY_CARVEOUT11) configuration registers */
+#define MC_TZRAM_CARVEOUT_CFG		U(0x2190)
+#define MC_TZRAM_BASE_LO		U(0x2194)
+#define MC_TZRAM_BASE_HI		U(0x2198)
+#define MC_TZRAM_SIZE			U(0x219C)
+#define MC_TZRAM_CLIENT_ACCESS_CFG0	U(0x21A0)
+
+/*******************************************************************************
+ * Tegra UART Controller constants
+ ******************************************************************************/
+#define TEGRA_UARTA_BASE		U(0x03100000)
+#define TEGRA_UARTB_BASE		U(0x03110000)
+#define TEGRA_UARTC_BASE		U(0x0C280000)
+#define TEGRA_UARTD_BASE		U(0x03130000)
+#define TEGRA_UARTE_BASE		U(0x03140000)
+#define TEGRA_UARTF_BASE		U(0x03150000)
+#define TEGRA_UARTG_BASE		U(0x0C290000)
+
+/*******************************************************************************
+ * Tegra Fuse Controller related constants
+ ******************************************************************************/
+#define TEGRA_FUSE_BASE			U(0x03820000)
+#define  OPT_SUBREVISION		U(0x248)
+#define  SUBREVISION_MASK		U(0xFF)
+
+/*******************************************************************************
+ * GICv2 & interrupt handling related constants
+ ******************************************************************************/
+#define TEGRA_GICD_BASE			U(0x03881000)
+#define TEGRA_GICC_BASE			U(0x03882000)
+
+/*******************************************************************************
+ * Security Engine related constants
+ ******************************************************************************/
+#define TEGRA_SE0_BASE			U(0x03AC0000)
+#define  SE_MUTEX_WATCHDOG_NS_LIMIT	U(0x6C)
+#define TEGRA_PKA1_BASE			U(0x03AD0000)
+#define  PKA_MUTEX_WATCHDOG_NS_LIMIT	U(0x8144)
+#define TEGRA_RNG1_BASE			U(0x03AE0000)
+#define  RNG_MUTEX_WATCHDOG_NS_LIMIT	U(0xFE0)
+
+/*******************************************************************************
+ * Tegra Clock and Reset Controller constants
+ ******************************************************************************/
+#define TEGRA_CAR_RESET_BASE		U(0x05000000)
+#define TEGRA_GPU_RESET_REG_OFFSET	U(0x30)
+#define  GPU_RESET_BIT			(U(1) << 0)
+
+/*******************************************************************************
+ * Tegra micro-seconds timer constants
+ ******************************************************************************/
+#define TEGRA_TMRUS_BASE		U(0x0C2E0000)
+#define TEGRA_TMRUS_SIZE		U(0x1000)
+
+/*******************************************************************************
+ * Tegra Power Mgmt Controller constants
+ ******************************************************************************/
+#define TEGRA_PMC_BASE			U(0x0C360000)
+
+/*******************************************************************************
+ * Tegra scratch registers constants
+ ******************************************************************************/
+#define TEGRA_SCRATCH_BASE		U(0x0C390000)
+#define  SECURE_SCRATCH_RSV1_LO		U(0x658)
+#define  SECURE_SCRATCH_RSV1_HI		U(0x65C)
+#define  SECURE_SCRATCH_RSV6		U(0x680)
+#define  SECURE_SCRATCH_RSV11_LO	U(0x6A8)
+#define  SECURE_SCRATCH_RSV11_HI	U(0x6AC)
+#define  SECURE_SCRATCH_RSV53_LO	U(0x7F8)
+#define  SECURE_SCRATCH_RSV53_HI	U(0x7FC)
+#define  SECURE_SCRATCH_RSV54_HI	U(0x804)
+#define  SECURE_SCRATCH_RSV55_LO	U(0x808)
+#define  SECURE_SCRATCH_RSV55_HI	U(0x80C)
+
+/*******************************************************************************
+ * Tegra Memory Mapped Control Register Access constants
+ ******************************************************************************/
+#define TEGRA_MMCRAB_BASE		U(0x0E000000)
+
+/*******************************************************************************
+ * Tegra Memory Mapped Activity Monitor Register Access constants
+ ******************************************************************************/
+#define TEGRA_ARM_ACTMON_CTR_BASE	U(0x0E060000)
+#define TEGRA_DENVER_ACTMON_CTR_BASE	U(0x0E070000)
+
+/*******************************************************************************
+ * Tegra SMMU Controller constants
+ ******************************************************************************/
+#define TEGRA_SMMU0_BASE		U(0x12000000)
+
+/*******************************************************************************
+ * Tegra TZRAM constants
+ ******************************************************************************/
+#define TEGRA_TZRAM_BASE		U(0x30000000)
+#define TEGRA_TZRAM_SIZE		U(0x40000)
+
+#endif /* __TEGRA_DEF_H__ */
diff --git a/plat/nvidia/tegra/include/t210/tegra_def.h b/plat/nvidia/tegra/include/t210/tegra_def.h
new file mode 100644
index 0000000..454c666
--- /dev/null
+++ b/plat/nvidia/tegra/include/t210/tegra_def.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TEGRA_DEF_H__
+#define __TEGRA_DEF_H__
+
+#include <utils_def.h>
+
+/*******************************************************************************
+ * Power down state IDs
+ ******************************************************************************/
+#define PSTATE_ID_CORE_POWERDN		U(7)
+#define PSTATE_ID_CLUSTER_IDLE		U(16)
+#define PSTATE_ID_CLUSTER_POWERDN	U(17)
+#define PSTATE_ID_SOC_POWERDN		U(27)
+
+/*******************************************************************************
+ * This value is used by the PSCI implementation during the `SYSTEM_SUSPEND`
+ * call as the `state-id` field in the 'power state' parameter.
+ ******************************************************************************/
+#define PLAT_SYS_SUSPEND_STATE_ID	PSTATE_ID_SOC_POWERDN
+
+/*******************************************************************************
+ * Platform power states (used by PSCI framework)
+ *
+ * - PLAT_MAX_RET_STATE should be less than lowest PSTATE_ID
+ * - PLAT_MAX_OFF_STATE should be greater than the highest PSTATE_ID
+ ******************************************************************************/
+#define PLAT_MAX_RET_STATE		U(1)
+#define PLAT_MAX_OFF_STATE		(PSTATE_ID_SOC_POWERDN + U(1))
+
+/*******************************************************************************
+ * GIC memory map
+ ******************************************************************************/
+#define TEGRA_GICD_BASE			U(0x50041000)
+#define TEGRA_GICC_BASE			U(0x50042000)
+
+/*******************************************************************************
+ * Tegra Memory Select Switch Controller constants
+ ******************************************************************************/
+#define TEGRA_MSELECT_BASE		U(0x50060000)
+
+#define MSELECT_CONFIG			U(0x0)
+#define ENABLE_WRAP_INCR_MASTER2_BIT	(U(1) << U(29))
+#define ENABLE_WRAP_INCR_MASTER1_BIT	(U(1) << U(28))
+#define ENABLE_WRAP_INCR_MASTER0_BIT	(U(1) << U(27))
+#define UNSUPPORTED_TX_ERR_MASTER2_BIT	(U(1) << U(25))
+#define UNSUPPORTED_TX_ERR_MASTER1_BIT	(U(1) << U(24))
+#define ENABLE_UNSUP_TX_ERRORS		(UNSUPPORTED_TX_ERR_MASTER2_BIT | \
+					 UNSUPPORTED_TX_ERR_MASTER1_BIT)
+#define ENABLE_WRAP_TO_INCR_BURSTS	(ENABLE_WRAP_INCR_MASTER2_BIT | \
+					 ENABLE_WRAP_INCR_MASTER1_BIT | \
+					 ENABLE_WRAP_INCR_MASTER0_BIT)
+
+/*******************************************************************************
+ * Tegra micro-seconds timer constants
+ ******************************************************************************/
+#define TEGRA_TMRUS_BASE		U(0x60005010)
+#define TEGRA_TMRUS_SIZE		U(0x1000)
+
+/*******************************************************************************
+ * Tegra Clock and Reset Controller constants
+ ******************************************************************************/
+#define TEGRA_CAR_RESET_BASE		U(0x60006000)
+#define TEGRA_GPU_RESET_REG_OFFSET	U(0x28C)
+#define  GPU_RESET_BIT			(U(1) << 24)
+
+/*******************************************************************************
+ * Tegra Flow Controller constants
+ ******************************************************************************/
+#define TEGRA_FLOWCTRL_BASE		U(0x60007000)
+
+/*******************************************************************************
+ * Tegra Secure Boot Controller constants
+ ******************************************************************************/
+#define TEGRA_SB_BASE			U(0x6000C200)
+
+/*******************************************************************************
+ * Tegra Exception Vectors constants
+ ******************************************************************************/
+#define TEGRA_EVP_BASE			U(0x6000F000)
+
+/*******************************************************************************
+ * Tegra Miscellaneous register constants
+ ******************************************************************************/
+#define TEGRA_MISC_BASE			U(0x70000000)
+#define  HARDWARE_REVISION_OFFSET	U(0x804)
+
+/*******************************************************************************
+ * Tegra UART controller base addresses
+ ******************************************************************************/
+#define TEGRA_UARTA_BASE		U(0x70006000)
+#define TEGRA_UARTB_BASE		U(0x70006040)
+#define TEGRA_UARTC_BASE		U(0x70006200)
+#define TEGRA_UARTD_BASE		U(0x70006300)
+#define TEGRA_UARTE_BASE		U(0x70006400)
+
+/*******************************************************************************
+ * Tegra Power Mgmt Controller constants
+ ******************************************************************************/
+#define TEGRA_PMC_BASE			U(0x7000E400)
+
+/*******************************************************************************
+ * Tegra Memory Controller constants
+ ******************************************************************************/
+#define TEGRA_MC_BASE			U(0x70019000)
+
+/* TZDRAM carveout configuration registers */
+#define MC_SECURITY_CFG0_0		U(0x70)
+#define MC_SECURITY_CFG1_0		U(0x74)
+#define MC_SECURITY_CFG3_0		U(0x9BC)
+
+/* Video Memory carveout configuration registers */
+#define MC_VIDEO_PROTECT_BASE_HI	U(0x978)
+#define MC_VIDEO_PROTECT_BASE_LO	U(0x648)
+#define MC_VIDEO_PROTECT_SIZE_MB	U(0x64c)
+
+/*******************************************************************************
+ * Tegra TZRAM constants
+ ******************************************************************************/
+#define TEGRA_TZRAM_BASE		U(0x7C010000)
+#define TEGRA_TZRAM_SIZE		U(0x10000)
+
+#endif /* __TEGRA_DEF_H__ */
diff --git a/plat/nvidia/tegra/include/tegra_platform.h b/plat/nvidia/tegra/include/tegra_platform.h
new file mode 100644
index 0000000..fbaad6e
--- /dev/null
+++ b/plat/nvidia/tegra/include/tegra_platform.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TEGRA_PLATFORM_H__
+#define __TEGRA_PLATFORM_H__
+
+#include <sys/cdefs.h>
+
+/*
+ * Tegra chip major/minor version
+ */
+uint32_t tegra_get_chipid_major(void);
+uint32_t tegra_get_chipid_minor(void);
+
+/*
+ * Tegra chip identifiers
+ */
+uint8_t tegra_chipid_is_t132(void);
+uint8_t tegra_chipid_is_t210(void);
+uint8_t tegra_chipid_is_t186(void);
+
+
+/*
+ * Tegra platform identifiers
+ */
+uint8_t tegra_platform_is_silicon(void);
+uint8_t tegra_platform_is_qt(void);
+uint8_t tegra_platform_is_emulation(void);
+uint8_t tegra_platform_is_fpga(void);
+
+#endif /* __TEGRA_PLATFORM_H__ */
diff --git a/plat/nvidia/tegra/include/tegra_private.h b/plat/nvidia/tegra/include/tegra_private.h
new file mode 100644
index 0000000..ec7a277
--- /dev/null
+++ b/plat/nvidia/tegra/include/tegra_private.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TEGRA_PRIVATE_H__
+#define __TEGRA_PRIVATE_H__
+
+#include <arch.h>
+#include <platform_def.h>
+#include <psci.h>
+#include <xlat_tables.h>
+
+/*******************************************************************************
+ * Tegra DRAM memory base address
+ ******************************************************************************/
+#define TEGRA_DRAM_BASE		ULL(0x80000000)
+#define TEGRA_DRAM_END		ULL(0x27FFFFFFF)
+
+/*******************************************************************************
+ * Struct for parameters received from BL2
+ ******************************************************************************/
+typedef struct plat_params_from_bl2 {
+	/* TZ memory size */
+	uint64_t tzdram_size;
+	/* TZ memory base */
+	uint64_t tzdram_base;
+	/* UART port ID */
+	int uart_id;
+} plat_params_from_bl2_t;
+
+/*******************************************************************************
+ * Per-CPU struct describing FIQ state to be stored
+ ******************************************************************************/
+typedef struct pcpu_fiq_state {
+	uint64_t elr_el3;
+	uint64_t spsr_el3;
+} pcpu_fiq_state_t;
+
+/*******************************************************************************
+ * Struct describing per-FIQ configuration settings
+ ******************************************************************************/
+typedef struct irq_sec_cfg {
+	/* IRQ number */
+	unsigned int irq;
+	/* Target CPUs servicing this interrupt */
+	unsigned int target_cpus;
+	/* type = INTR_TYPE_S_EL1 or INTR_TYPE_EL3 */
+	uint32_t type;
+} irq_sec_cfg_t;
+
+/* Declarations for plat_psci_handlers.c */
+int32_t tegra_soc_validate_power_state(unsigned int power_state,
+		psci_power_state_t *req_state);
+
+/* Declarations for plat_setup.c */
+const mmap_region_t *plat_get_mmio_map(void);
+uint32_t plat_get_console_from_id(int id);
+void plat_gic_setup(void);
+bl31_params_t *plat_get_bl31_params(void);
+plat_params_from_bl2_t *plat_get_bl31_plat_params(void);
+
+/* Declarations for plat_secondary.c */
+void plat_secondary_setup(void);
+int plat_lock_cpu_vectors(void);
+
+/* Declarations for tegra_fiq_glue.c */
+void tegra_fiq_handler_setup(void);
+int tegra_fiq_get_intr_context(void);
+void tegra_fiq_set_ns_entrypoint(uint64_t entrypoint);
+
+/* Declarations for tegra_gic.c */
+void tegra_gic_cpuif_deactivate(void);
+void tegra_gic_setup(const irq_sec_cfg_t *irq_sec_ptr, uint32_t num_irqs);
+
+/* Declarations for tegra_security.c */
+void tegra_security_setup(void);
+void tegra_security_setup_videomem(uintptr_t base, uint64_t size);
+
+/* Declarations for tegra_pm.c */
+extern uint8_t tegra_fake_system_suspend;
+
+void tegra_pm_system_suspend_entry(void);
+void tegra_pm_system_suspend_exit(void);
+int tegra_system_suspended(void);
+
+/* Declarations for tegraXXX_pm.c */
+int tegra_prepare_cpu_suspend(unsigned int id, unsigned int afflvl);
+int tegra_prepare_cpu_on_finish(unsigned long mpidr);
+
+/* Declarations for tegra_bl31_setup.c */
+plat_params_from_bl2_t *bl31_get_plat_params(void);
+int bl31_check_ns_address(uint64_t base, uint64_t size_in_bytes);
+void plat_early_platform_setup(void);
+
+/* Declarations for tegra_delay_timer.c */
+void tegra_delay_timer_init(void);
+
+void tegra_secure_entrypoint(void);
+void tegra186_cpu_reset_handler(void);
+
+#endif /* __TEGRA_PRIVATE_H__ */
diff --git a/plat/nvidia/tegra/platform.mk b/plat/nvidia/tegra/platform.mk
new file mode 100644
index 0000000..9a9e79e
--- /dev/null
+++ b/plat/nvidia/tegra/platform.mk
@@ -0,0 +1,39 @@
+#
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+SOC_DIR			:=	plat/nvidia/tegra/soc/${TARGET_SOC}
+
+# dump the state on crash console
+CRASH_REPORTING		:=	1
+$(eval $(call add_define,CRASH_REPORTING))
+
+# enable assert() for release/debug builds
+ENABLE_ASSERTIONS	:=	1
+
+# Disable the PSCI platform compatibility layer
+ENABLE_PLAT_COMPAT	:=	0
+
+# enable dynamic memory mapping
+PLAT_XLAT_TABLES_DYNAMIC :=	1
+$(eval $(call add_define,PLAT_XLAT_TABLES_DYNAMIC))
+
+# Enable PSCI v1.0 extended state ID format
+PSCI_EXTENDED_STATE_ID	:=	1
+
+# code and read-only data should be put on separate memory pages
+SEPARATE_CODE_AND_RODATA :=	1
+
+# do not use coherent memory
+USE_COHERENT_MEM	:=	0
+
+include plat/nvidia/tegra/common/tegra_common.mk
+include ${SOC_DIR}/platform_${TARGET_SOC}.mk
+
+# modify BUILD_PLAT to point to SoC specific build directory
+BUILD_PLAT	:=	${BUILD_BASE}/${PLAT}/${TARGET_SOC}/${BUILD_TYPE}
+
+# enable signed comparison checks
+TF_CFLAGS	+= -Wsign-compare
diff --git a/plat/nvidia/tegra/soc/t132/plat_psci_handlers.c b/plat/nvidia/tegra/soc/t132/plat_psci_handlers.c
new file mode 100644
index 0000000..1cffb74
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t132/plat_psci_handlers.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <denver.h>
+#include <flowctrl.h>
+#include <mmio.h>
+#include <platform_def.h>
+#include <pmc.h>
+#include <psci.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+/*
+ * Register used to clear CPU reset signals. Each CPU has two reset
+ * signals: CPU reset (3:0) and Core reset (19:16)
+ */
+#define CPU_CMPLX_RESET_CLR		0x344
+#define CPU_CORE_RESET_MASK		0x10001
+
+/* Clock and Reset controller registers for system clock's settings */
+#define SCLK_RATE			0x30
+#define SCLK_BURST_POLICY		0x28
+#define SCLK_BURST_POLICY_DEFAULT	0x10000000
+
+static int cpu_powergate_mask[PLATFORM_MAX_CPUS_PER_CLUSTER];
+
+int32_t tegra_soc_validate_power_state(unsigned int power_state,
+					psci_power_state_t *req_state)
+{
+	int state_id = psci_get_pstate_id(power_state);
+	int cpu = read_mpidr() & MPIDR_CPU_MASK;
+
+	/*
+	 * Sanity check the requested state id, power level and CPU number.
+	 * Currently T132 only supports SYSTEM_SUSPEND on last standing CPU
+	 * i.e. CPU 0
+	 */
+	if ((state_id != PSTATE_ID_SOC_POWERDN) || (cpu != 0)) {
+		ERROR("unsupported state id @ power level\n");
+		return PSCI_E_INVALID_PARAMS;
+	}
+
+	/* Set lower power states to PLAT_MAX_OFF_STATE */
+	for (uint32_t i = MPIDR_AFFLVL0; i < PLAT_MAX_PWR_LVL; i++)
+		req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
+
+	/* Set the SYSTEM_SUSPEND state-id */
+	req_state->pwr_domain_state[PLAT_MAX_PWR_LVL] =
+		PSTATE_ID_SOC_POWERDN;
+
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_on(u_register_t mpidr)
+{
+	int cpu = mpidr & MPIDR_CPU_MASK;
+	uint32_t mask = CPU_CORE_RESET_MASK << cpu;
+
+	if (cpu_powergate_mask[cpu] == 0) {
+
+		/* Deassert CPU reset signals */
+		mmio_write_32(TEGRA_CAR_RESET_BASE + CPU_CMPLX_RESET_CLR, mask);
+
+		/* Power on CPU using PMC */
+		tegra_pmc_cpu_on(cpu);
+
+		/* Fill in the CPU powergate mask */
+		cpu_powergate_mask[cpu] = 1;
+
+	} else {
+		/* Power on CPU using Flow Controller */
+		tegra_fc_cpu_on(cpu);
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+	/*
+	 * Lock scratch registers which hold the CPU vectors
+	 */
+	tegra_pmc_lock_cpu_vectors();
+
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	tegra_fc_cpu_off(read_mpidr() & MPIDR_CPU_MASK);
+
+	/* Disable DCO operations */
+	denver_disable_dco();
+
+	/* Power down the CPU */
+	write_actlr_el1(DENVER_CPU_STATE_POWER_DOWN);
+
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+#if ENABLE_ASSERTIONS
+	int cpu = read_mpidr() & MPIDR_CPU_MASK;
+
+	/* SYSTEM_SUSPEND only on CPU0 */
+	assert(cpu == 0);
+#endif
+
+	/* Allow restarting CPU #1 using PMC on suspend exit */
+	cpu_powergate_mask[1] = 0;
+
+	/* Program FC to enter suspend state */
+	tegra_fc_cpu_powerdn(read_mpidr());
+
+	/* Disable DCO operations */
+	denver_disable_dco();
+
+	/* Program the suspend state ID */
+	write_actlr_el1(target_state->pwr_domain_state[PLAT_MAX_PWR_LVL]);
+
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_prepare_system_reset(void)
+{
+	/*
+	 * Set System Clock (SCLK) to POR default so that the clock source
+	 * for the PMC APB clock would not be changed due to system reset.
+	 */
+	mmio_write_32((uintptr_t)TEGRA_CAR_RESET_BASE + SCLK_BURST_POLICY,
+		       SCLK_BURST_POLICY_DEFAULT);
+	mmio_write_32((uintptr_t)TEGRA_CAR_RESET_BASE + SCLK_RATE, 0);
+
+	/* Wait 1 ms to make sure clock source/device logic is stabilized. */
+	mdelay(1);
+
+	return PSCI_E_SUCCESS;
+}
diff --git a/plat/nvidia/tegra/soc/t132/plat_secondary.c b/plat/nvidia/tegra/soc/t132/plat_secondary.c
new file mode 100644
index 0000000..d5ca30c
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t132/plat_secondary.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <denver.h>
+#include <mmio.h>
+#include <platform.h>
+#include <pmc.h>
+#include <psci.h>
+#include <tegra_def.h>
+
+#define SB_CSR				0x0
+#define  SB_CSR_NS_RST_VEC_WR_DIS	(1 << 1)
+
+/* AARCH64 CPU reset vector */
+#define SB_AA64_RESET_LOW		0x30	/* width = 31:0 */
+#define SB_AA64_RESET_HI		0x34	/* width = 11:0 */
+
+/* AARCH32 CPU reset vector */
+#define EVP_CPU_RESET_VECTOR		0x100
+
+extern void tegra_secure_entrypoint(void);
+
+/*
+ * For T132, CPUs reset to AARCH32, so the reset vector is first
+ * armv8_trampoline which does a warm reset to AARCH64 and starts
+ * execution at the address in SB_AA64_RESET_LOW/SB_AA64_RESET_HI.
+ */
+__aligned(8) const uint32_t armv8_trampoline[] = {
+	0xE3A00003,		/* mov	r0, #3 */
+	0xEE0C0F50,		/* mcr	p15, 0, r0, c12, c0, 2 */
+	0xEAFFFFFE,		/* b	. */
+};
+
+/*******************************************************************************
+ * Setup secondary CPU vectors
+ ******************************************************************************/
+void plat_secondary_setup(void)
+{
+	uint32_t val;
+	uint64_t reset_addr = (uint64_t)tegra_secure_entrypoint;
+
+	/*
+	 * For T132, CPUs reset to AARCH32, so the reset vector is first
+	 * armv8_trampoline, which does a warm reset to AARCH64 and starts
+	 * execution at the address in SCRATCH34/SCRATCH35.
+	 */
+	INFO("Setting up T132 CPU boot\n");
+
+	/* initial AARCH32 reset address */
+	tegra_pmc_write_32(PMC_SECURE_SCRATCH22,
+		(unsigned long)&armv8_trampoline);
+
+	/* set AARCH32 exception vector (read to flush) */
+	mmio_write_32(TEGRA_EVP_BASE + EVP_CPU_RESET_VECTOR,
+		(unsigned long)&armv8_trampoline);
+	val = mmio_read_32(TEGRA_EVP_BASE + EVP_CPU_RESET_VECTOR);
+
+	/* setup secondary CPU vector */
+	mmio_write_32(TEGRA_SB_BASE + SB_AA64_RESET_LOW,
+			(reset_addr & 0xFFFFFFFF) | 1);
+	val = reset_addr >> 32;
+	mmio_write_32(TEGRA_SB_BASE + SB_AA64_RESET_HI, val & 0x7FF);
+
+	/* configure PMC */
+	tegra_pmc_cpu_setup(reset_addr);
+	tegra_pmc_lock_cpu_vectors();
+}
diff --git a/plat/nvidia/tegra/soc/t132/plat_setup.c b/plat/nvidia/tegra/soc/t132/plat_setup.c
new file mode 100644
index 0000000..2419965
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t132/plat_setup.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <bl_common.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+#include <xlat_tables.h>
+
+/*******************************************************************************
+ * The Tegra power domain tree has a single system level power domain i.e. a
+ * single root node. The first entry in the power domain descriptor specifies
+ * the number of power domains at the highest power level.
+ *******************************************************************************
+ */
+const unsigned char tegra_power_domain_tree_desc[] = {
+	/* No of root nodes */
+	1,
+	/* No of clusters */
+	PLATFORM_CLUSTER_COUNT,
+	/* No of CPU cores */
+	PLATFORM_CORE_COUNT,
+};
+
+/* sets of MMIO ranges setup */
+#define MMIO_RANGE_0_ADDR	0x50000000
+#define MMIO_RANGE_1_ADDR	0x60000000
+#define MMIO_RANGE_2_ADDR	0x70000000
+#define MMIO_RANGE_SIZE		0x200000
+
+/*
+ * Table of regions to map using the MMU.
+ */
+static const mmap_region_t tegra_mmap[] = {
+	MAP_REGION_FLAT(MMIO_RANGE_0_ADDR, MMIO_RANGE_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(MMIO_RANGE_1_ADDR, MMIO_RANGE_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(MMIO_RANGE_2_ADDR, MMIO_RANGE_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	{0}
+};
+
+/*******************************************************************************
+ * Set up the pagetables as per the platform memory map & initialize the MMU
+ ******************************************************************************/
+const mmap_region_t *plat_get_mmio_map(void)
+{
+	/* MMIO space */
+	return tegra_mmap;
+}
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+	return 12000000;
+}
+
+/*******************************************************************************
+ * Maximum supported UART controllers
+ ******************************************************************************/
+#define TEGRA132_MAX_UART_PORTS		5
+
+/*******************************************************************************
+ * This variable holds the UART port base addresses
+ ******************************************************************************/
+static uint32_t tegra132_uart_addresses[TEGRA132_MAX_UART_PORTS + 1] = {
+	0,	/* undefined - treated as an error case */
+	TEGRA_UARTA_BASE,
+	TEGRA_UARTB_BASE,
+	TEGRA_UARTC_BASE,
+	TEGRA_UARTD_BASE,
+	TEGRA_UARTE_BASE,
+};
+
+/*******************************************************************************
+ * Retrieve the UART controller base to be used as the console
+ ******************************************************************************/
+uint32_t plat_get_console_from_id(int id)
+{
+	if (id > TEGRA132_MAX_UART_PORTS)
+		return 0;
+
+	return tegra132_uart_addresses[id];
+}
+
+/*******************************************************************************
+ * Initialize the GIC and SGIs
+ ******************************************************************************/
+void plat_gic_setup(void)
+{
+	tegra_gic_setup(NULL, 0);
+}
diff --git a/plat/nvidia/tegra/soc/t132/plat_sip_calls.c b/plat/nvidia/tegra/soc/t132/plat_sip_calls.c
new file mode 100644
index 0000000..adc1c71
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t132/plat_sip_calls.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <errno.h>
+#include <tegra_private.h>
+
+#define NS_SWITCH_AARCH32	1
+#define SCR_RW_BITPOS		__builtin_ctz(SCR_RW_BIT)
+
+/*******************************************************************************
+ * Tegra132 SiP SMCs
+ ******************************************************************************/
+#define TEGRA_SIP_AARCH_SWITCH			0x82000004
+
+/*******************************************************************************
+ * SPSR settings for AARCH32/AARCH64 modes
+ ******************************************************************************/
+#define SPSR32		SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, \
+			DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT)
+#define SPSR64		SPSR_64(MODE_EL2, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)
+
+/*******************************************************************************
+ * This function is responsible for handling all T132 SiP calls
+ ******************************************************************************/
+int plat_sip_handler(uint32_t smc_fid,
+		     uint64_t x1,
+		     uint64_t x2,
+		     uint64_t x3,
+		     uint64_t x4,
+		     void *cookie,
+		     void *handle,
+		     uint64_t flags)
+{
+	switch (smc_fid) {
+
+	case TEGRA_SIP_AARCH_SWITCH:
+
+		/* clean up the high bits */
+		x1 = (uint32_t)x1;
+		x2 = (uint32_t)x2;
+
+		if (!x1 || x2 > NS_SWITCH_AARCH32) {
+			ERROR("%s: invalid parameters\n", __func__);
+			return -EINVAL;
+		}
+
+		/* x1 = ns entry point */
+		cm_set_elr_spsr_el3(NON_SECURE, x1,
+			(x2 == NS_SWITCH_AARCH32) ? SPSR32 : SPSR64);
+
+		/* switch NS world mode */
+		cm_write_scr_el3_bit(NON_SECURE, SCR_RW_BITPOS, !x2);
+
+		INFO("CPU switched to AARCH%s mode\n",
+			(x2 == NS_SWITCH_AARCH32) ? "32" : "64");
+		return 0;
+
+	default:
+		ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
+		break;
+	}
+
+	return -ENOTSUP;
+}
diff --git a/plat/nvidia/tegra/soc/t132/platform_t132.mk b/plat/nvidia/tegra/soc/t132/platform_t132.mk
new file mode 100644
index 0000000..8b3d238
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t132/platform_t132.mk
@@ -0,0 +1,28 @@
+#
+# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TZDRAM_BASE			:= 0xF5C00000
+$(eval $(call add_define,TZDRAM_BASE))
+
+PLATFORM_CLUSTER_COUNT		:= 1
+$(eval $(call add_define,PLATFORM_CLUSTER_COUNT))
+
+PLATFORM_MAX_CPUS_PER_CLUSTER	:= 2
+$(eval $(call add_define,PLATFORM_MAX_CPUS_PER_CLUSTER))
+
+MAX_XLAT_TABLES			:= 3
+$(eval $(call add_define,MAX_XLAT_TABLES))
+
+MAX_MMAP_REGIONS		:= 8
+$(eval $(call add_define,MAX_MMAP_REGIONS))
+
+BL31_SOURCES		+=	lib/cpus/aarch64/denver.S		\
+				${COMMON_DIR}/drivers/flowctrl/flowctrl.c	\
+				${COMMON_DIR}/drivers/memctrl/memctrl_v1.c	\
+				${SOC_DIR}/plat_psci_handlers.c		\
+				${SOC_DIR}/plat_sip_calls.c		\
+				${SOC_DIR}/plat_setup.c			\
+				${SOC_DIR}/plat_secondary.c
diff --git a/plat/nvidia/tegra/soc/t186/drivers/include/mce_private.h b/plat/nvidia/tegra/soc/t186/drivers/include/mce_private.h
new file mode 100644
index 0000000..26197e9
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/include/mce_private.h
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MCE_PRIVATE_H__
+#define __MCE_PRIVATE_H__
+
+#include <mmio.h>
+#include <tegra_def.h>
+
+/*******************************************************************************
+ * Macros to prepare CSTATE info request
+ ******************************************************************************/
+/* Description of the parameters for UPDATE_CSTATE_INFO request */
+#define CLUSTER_CSTATE_MASK			ULL(0x7)
+#define CLUSTER_CSTATE_SHIFT			U(0)
+#define CLUSTER_CSTATE_UPDATE_BIT		(ULL(1) << 7)
+#define CCPLEX_CSTATE_MASK			ULL(0x3)
+#define CCPLEX_CSTATE_SHIFT			ULL(8)
+#define CCPLEX_CSTATE_UPDATE_BIT		(ULL(1) << 15)
+#define SYSTEM_CSTATE_MASK			ULL(0xF)
+#define SYSTEM_CSTATE_SHIFT			ULL(16)
+#define SYSTEM_CSTATE_FORCE_UPDATE_SHIFT	ULL(22)
+#define SYSTEM_CSTATE_FORCE_UPDATE_BIT		(ULL(1) << 22)
+#define SYSTEM_CSTATE_UPDATE_BIT		(ULL(1) << 23)
+#define CSTATE_WAKE_MASK_UPDATE_BIT		(ULL(1) << 31)
+#define CSTATE_WAKE_MASK_SHIFT			ULL(32)
+#define CSTATE_WAKE_MASK_CLEAR			U(0xFFFFFFFF)
+
+/*******************************************************************************
+ * Auto-CC3 control macros
+ ******************************************************************************/
+#define MCE_AUTO_CC3_FREQ_MASK			U(0x1FF)
+#define MCE_AUTO_CC3_FREQ_SHIFT			U(0)
+#define MCE_AUTO_CC3_VTG_MASK			U(0x7F)
+#define MCE_AUTO_CC3_VTG_SHIFT			U(16)
+#define MCE_AUTO_CC3_ENABLE_BIT			(U(1) << 31)
+
+/*******************************************************************************
+ * Macros for the 'IS_SC7_ALLOWED' command
+ ******************************************************************************/
+#define MCE_SC7_ALLOWED_MASK			U(0x7)
+#define MCE_SC7_WAKE_TIME_SHIFT			U(32)
+
+/*******************************************************************************
+ * Macros for 'read/write ctats' commands
+ ******************************************************************************/
+#define MCE_CSTATE_STATS_TYPE_SHIFT		ULL(32)
+#define MCE_CSTATE_WRITE_DATA_LO_MASK		U(0xF)
+
+/*******************************************************************************
+ * Macros for 'update crossover threshold' command
+ ******************************************************************************/
+#define MCE_CROSSOVER_THRESHOLD_TIME_SHIFT	U(32)
+
+/*******************************************************************************
+ * MCA argument macros
+ ******************************************************************************/
+#define MCA_ARG_ERROR_MASK			U(0xFF)
+#define MCA_ARG_FINISH_SHIFT			U(24)
+#define MCA_ARG_FINISH_MASK			U(0xFF)
+
+/*******************************************************************************
+ * Uncore PERFMON ARI struct
+ ******************************************************************************/
+#define UNCORE_PERFMON_CMD_READ			U(0)
+#define UNCORE_PERFMON_CMD_WRITE		U(1)
+
+#define UNCORE_PERFMON_CMD_MASK			U(0xFF)
+#define UNCORE_PERFMON_CMD_SHIFT		U(24)
+#define UNCORE_PERFMON_UNIT_GRP_MASK		U(0xF)
+#define UNCORE_PERFMON_SELECTOR_MASK		U(0xF)
+#define UNCORE_PERFMON_REG_MASK			U(0xFF)
+#define UNCORE_PERFMON_CTR_MASK			U(0xFF)
+#define UNCORE_PERFMON_RESP_STATUS_MASK		U(0xFF)
+#define UNCORE_PERFMON_RESP_STATUS_SHIFT	U(24)
+
+/*******************************************************************************
+ * Structure populated by arch specific code to export routines which perform
+ * common low level MCE functions
+ ******************************************************************************/
+typedef struct arch_mce_ops {
+	/*
+	 * This ARI request sets up the MCE to start execution on assertion
+	 * of STANDBYWFI, update the core power state and expected wake time,
+	 * then determine the proper power state to enter.
+	 */
+	int32_t (*enter_cstate)(uint32_t ari_base, uint32_t state,
+			    uint32_t wake_time);
+	/*
+	 * This ARI request allows updating of the CLUSTER_CSTATE,
+	 * CCPLEX_CSTATE, and SYSTEM_CSTATE register values.
+	 */
+	int32_t (*update_cstate_info)(uint32_t ari_base,
+				  uint32_t cluster,
+				  uint32_t ccplex,
+				  uint32_t system,
+				  uint8_t sys_state_force,
+				  uint32_t wake_mask,
+				  uint8_t update_wake_mask);
+	/*
+	 * This ARI request allows updating of power state crossover
+	 * threshold times. An index value specifies which crossover
+	 * state is being updated.
+	 */
+	int32_t (*update_crossover_time)(uint32_t ari_base,
+				     uint32_t type,
+				     uint32_t time);
+	/*
+	 * This ARI request allows read access to statistical information
+	 * related to power states.
+	 */
+	uint64_t (*read_cstate_stats)(uint32_t ari_base,
+				     uint32_t state);
+	/*
+	 * This ARI request allows write access to statistical information
+	 * related to power states.
+	 */
+	int32_t (*write_cstate_stats)(uint32_t ari_base,
+				  uint32_t state,
+				  uint32_t stats);
+	/*
+	 * This ARI request allows the CPU to understand the features
+	 * supported by the MCE firmware.
+	 */
+	uint64_t (*call_enum_misc)(uint32_t ari_base, uint32_t cmd,
+				   uint32_t data);
+	/*
+	 * This ARI request allows querying the CCPLEX to determine if
+	 * the CCx state is allowed given a target core C-state and wake
+	 * time. If the CCx state is allowed, the response indicates CCx
+	 * must be entered. If the CCx state is not allowed, the response
+	 * indicates CC6/CC7 can't be entered
+	 */
+	int32_t (*is_ccx_allowed)(uint32_t ari_base, uint32_t state,
+			      uint32_t wake_time);
+	/*
+	 * This ARI request allows querying the CCPLEX to determine if
+	 * the SC7 state is allowed given a target core C-state and wake
+	 * time. If the SC7 state is allowed, all cores but the associated
+	 * core are offlined (WAKE_EVENTS are set to 0) and the response
+	 * indicates SC7 must be entered. If the SC7 state is not allowed,
+	 * the response indicates SC7 can't be entered
+	 */
+	int32_t (*is_sc7_allowed)(uint32_t ari_base, uint32_t state,
+			      uint32_t wake_time);
+	/*
+	 * This ARI request allows a core to bring another offlined core
+	 * back online to the C0 state. Note that a core is offlined by
+	 * entering a C-state where the WAKE_MASK is all 0.
+	 */
+	int32_t (*online_core)(uint32_t ari_base, uint32_t cpuid);
+	/*
+	 * This ARI request allows the CPU to enable/disable Auto-CC3 idle
+	 * state.
+	 */
+	int32_t (*cc3_ctrl)(uint32_t ari_base,
+			uint32_t freq,
+			uint32_t volt,
+			uint8_t enable);
+	/*
+	 * This ARI request allows updating the reset vector register for
+	 * D15 and A57 CPUs.
+	 */
+	int32_t (*update_reset_vector)(uint32_t ari_base);
+	/*
+	 * This ARI request instructs the ROC to flush A57 data caches in
+	 * order to maintain coherency with the Denver cluster.
+	 */
+	int32_t (*roc_flush_cache)(uint32_t ari_base);
+	/*
+	 * This ARI request instructs the ROC to flush A57 data caches along
+	 * with the caches covering ARM code in order to maintain coherency
+	 * with the Denver cluster.
+	 */
+	int32_t (*roc_flush_cache_trbits)(uint32_t ari_base);
+	/*
+	 * This ARI request instructs the ROC to clean A57 data caches along
+	 * with the caches covering ARM code in order to maintain coherency
+	 * with the Denver cluster.
+	 */
+	int32_t (*roc_clean_cache)(uint32_t ari_base);
+	/*
+	 * This ARI request reads/writes the Machine Check Arch. (MCA)
+	 * registers.
+	 */
+	uint64_t (*read_write_mca)(uint32_t ari_base,
+			      uint64_t cmd,
+			      uint64_t *data);
+	/*
+	 * Some MC GSC (General Security Carveout) register values are
+	 * expected to be changed by TrustZone secure ARM code after boot.
+	 * Since there is no hardware mechanism for the CCPLEX to know
+	 * that an MC GSC register has changed to allow it to update its
+	 * own internal GSC register, there needs to be a mechanism that
+	 * can be used by ARM code to cause the CCPLEX to update its GSC
+	 * register value. This ARI request allows updating the GSC register
+	 * value for a certain carveout in the CCPLEX.
+	 */
+	int32_t (*update_ccplex_gsc)(uint32_t ari_base, uint32_t gsc_idx);
+	/*
+	 * This ARI request instructs the CCPLEX to either shutdown or
+	 * reset the entire system
+	 */
+	void (*enter_ccplex_state)(uint32_t ari_base, uint32_t state_idx);
+	/*
+	 * This ARI request reads/writes data from/to Uncore PERFMON
+	 * registers
+	 */
+	int32_t (*read_write_uncore_perfmon)(uint32_t ari_base,
+			uint64_t req, uint64_t *data);
+	/*
+	 * This ARI implements ARI_MISC_CCPLEX commands. This can be
+	 * used to enable/disable coresight clock gating.
+	 */
+	void (*misc_ccplex)(uint32_t ari_base, uint32_t index,
+			uint32_t value);
+} arch_mce_ops_t;
+
+/* declarations for ARI/NVG handler functions */
+int32_t ari_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int32_t ari_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
+	uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
+	uint8_t update_wake_mask);
+int32_t ari_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time);
+uint64_t ari_read_cstate_stats(uint32_t ari_base, uint32_t state);
+int32_t ari_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats);
+uint64_t ari_enumeration_misc(uint32_t ari_base, uint32_t cmd, uint32_t data);
+int32_t ari_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int32_t ari_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int32_t ari_online_core(uint32_t ari_base, uint32_t core);
+int32_t ari_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable);
+int32_t ari_reset_vector_update(uint32_t ari_base);
+int32_t ari_roc_flush_cache_trbits(uint32_t ari_base);
+int32_t ari_roc_flush_cache(uint32_t ari_base);
+int32_t ari_roc_clean_cache(uint32_t ari_base);
+uint64_t ari_read_write_mca(uint32_t ari_base, uint64_t cmd, uint64_t *data);
+int32_t ari_update_ccplex_gsc(uint32_t ari_base, uint32_t gsc_idx);
+void ari_enter_ccplex_state(uint32_t ari_base, uint32_t state_idx);
+int32_t ari_read_write_uncore_perfmon(uint32_t ari_base,
+		uint64_t req, uint64_t *data);
+void ari_misc_ccplex(uint32_t ari_base, uint32_t index, uint32_t value);
+
+int32_t nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int32_t nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
+		uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
+		uint8_t update_wake_mask);
+int32_t nvg_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time);
+uint64_t nvg_read_cstate_stats(uint32_t ari_base, uint32_t state);
+int32_t nvg_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats);
+int32_t nvg_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int32_t nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int32_t nvg_online_core(uint32_t ari_base, uint32_t core);
+int32_t nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable);
+
+extern void nvg_set_request_data(uint64_t req, uint64_t data);
+extern void nvg_set_request(uint64_t req);
+extern uint64_t nvg_get_result(void);
+#endif /* __MCE_PRIVATE_H__ */
diff --git a/plat/nvidia/tegra/soc/t186/drivers/include/t18x_ari.h b/plat/nvidia/tegra/soc/t186/drivers/include/t18x_ari.h
new file mode 100644
index 0000000..8c6f30c
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/include/t18x_ari.h
@@ -0,0 +1,437 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef T18X_TEGRA_ARI_H
+#define T18X_TEGRA_ARI_H
+
+/*
+ * ----------------------------------------------------------------------------
+ * t18x_ari.h
+ *
+ * Global ARI definitions.
+ * ----------------------------------------------------------------------------
+ */
+
+enum {
+	TEGRA_ARI_VERSION_MAJOR = 3U,
+	TEGRA_ARI_VERSION_MINOR = 1U,
+};
+
+typedef enum {
+	/* indexes below get the core lock */
+	TEGRA_ARI_MISC = 0U,
+	/* index 1 is deprecated */
+	/* index 2 is deprecated */
+	/* index 3 is deprecated */
+	TEGRA_ARI_ONLINE_CORE = 4U,
+
+	/* indexes below need cluster lock */
+	TEGRA_ARI_MISC_CLUSTER = 41U,
+	TEGRA_ARI_IS_CCX_ALLOWED = 42U,
+	TEGRA_ARI_CC3_CTRL = 43U,
+
+	/* indexes below need ccplex lock */
+	TEGRA_ARI_ENTER_CSTATE = 80U,
+	TEGRA_ARI_UPDATE_CSTATE_INFO = 81U,
+	TEGRA_ARI_IS_SC7_ALLOWED = 82U,
+	/* index 83 is deprecated */
+	TEGRA_ARI_PERFMON = 84U,
+	TEGRA_ARI_UPDATE_CCPLEX_GSC = 85U,
+	/* index 86 is depracated */
+	/* index 87 is deprecated */
+	TEGRA_ARI_ROC_FLUSH_CACHE_ONLY = 88U,
+	TEGRA_ARI_ROC_FLUSH_CACHE_TRBITS = 89U,
+	TEGRA_ARI_MISC_CCPLEX = 90U,
+	TEGRA_ARI_MCA = 91U,
+	TEGRA_ARI_UPDATE_CROSSOVER = 92U,
+	TEGRA_ARI_CSTATE_STATS = 93U,
+	TEGRA_ARI_WRITE_CSTATE_STATS = 94U,
+	TEGRA_ARI_COPY_MISCREG_AA64_RST = 95U,
+	TEGRA_ARI_ROC_CLEAN_CACHE_ONLY = 96U,
+} tegra_ari_req_id_t;
+
+typedef enum {
+	TEGRA_ARI_MISC_ECHO = 0U,
+	TEGRA_ARI_MISC_VERSION = 1U,
+	TEGRA_ARI_MISC_FEATURE_LEAF_0 = 2U,
+} tegra_ari_misc_index_t;
+
+typedef enum {
+	TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF = 0U,
+	TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT = 1U,
+	TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL = 2U,
+	TEGRA_ARI_MISC_CCPLEX_EDBGREQ = 3U,
+} tegra_ari_misc_ccplex_index_t;
+
+typedef enum {
+	TEGRA_ARI_CORE_C0 = 0U,
+	TEGRA_ARI_CORE_C1 = 1U,
+	TEGRA_ARI_CORE_C6 = 6U,
+	TEGRA_ARI_CORE_C7 = 7U,
+	TEGRA_ARI_CORE_WARMRSTREQ = 8U,
+} tegra_ari_core_sleep_state_t;
+
+typedef enum {
+	TEGRA_ARI_CLUSTER_CC0 = 0U,
+	TEGRA_ARI_CLUSTER_CC1 = 1U,
+	TEGRA_ARI_CLUSTER_CC6 = 6U,
+	TEGRA_ARI_CLUSTER_CC7 = 7U,
+} tegra_ari_cluster_sleep_state_t;
+
+typedef enum {
+	TEGRA_ARI_CCPLEX_CCP0 = 0U,
+	TEGRA_ARI_CCPLEX_CCP1 = 1U,
+	TEGRA_ARI_CCPLEX_CCP3 = 3U,  /* obsoleted */
+} tegra_ari_ccplex_sleep_state_t;
+
+typedef enum {
+	TEGRA_ARI_SYSTEM_SC0 = 0U,
+	TEGRA_ARI_SYSTEM_SC1 = 1U,  /* obsoleted */
+	TEGRA_ARI_SYSTEM_SC2 = 2U,  /* obsoleted */
+	TEGRA_ARI_SYSTEM_SC3 = 3U,  /* obsoleted */
+	TEGRA_ARI_SYSTEM_SC4 = 4U,  /* obsoleted */
+	TEGRA_ARI_SYSTEM_SC7 = 7U,
+	TEGRA_ARI_SYSTEM_SC8 = 8U,
+} tegra_ari_system_sleep_state_t;
+
+typedef enum {
+	TEGRA_ARI_CROSSOVER_C1_C6 = 0U,
+	TEGRA_ARI_CROSSOVER_CC1_CC6 = 1U,
+	TEGRA_ARI_CROSSOVER_CC1_CC7 = 2U,
+	TEGRA_ARI_CROSSOVER_CCP1_CCP3 = 3U,  /* obsoleted */
+	TEGRA_ARI_CROSSOVER_CCP3_SC2 = 4U,  /* obsoleted */
+	TEGRA_ARI_CROSSOVER_CCP3_SC3 = 5U,  /* obsoleted */
+	TEGRA_ARI_CROSSOVER_CCP3_SC4 = 6U,  /* obsoleted */
+	TEGRA_ARI_CROSSOVER_CCP3_SC7 = 7U,  /* obsoleted */
+	TEGRA_ARI_CROSSOVER_SC0_SC7 = 7U,
+	TEGRA_ARI_CROSSOVER_CCP3_SC1 = 8U,  /* obsoleted */
+} tegra_ari_crossover_index_t;
+
+typedef enum {
+	TEGRA_ARI_CSTATE_STATS_CLEAR = 0U,
+	TEGRA_ARI_CSTATE_STATS_SC7_ENTRIES = 1U,
+	TEGRA_ARI_CSTATE_STATS_SC4_ENTRIES, /* obsoleted */
+	TEGRA_ARI_CSTATE_STATS_SC3_ENTRIES, /* obsoleted */
+	TEGRA_ARI_CSTATE_STATS_SC2_ENTRIES, /* obsoleted */
+	TEGRA_ARI_CSTATE_STATS_CCP3_ENTRIES, /* obsoleted */
+	TEGRA_ARI_CSTATE_STATS_A57_CC6_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_A57_CC7_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_D15_CC6_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_D15_CC7_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_D15_0_C6_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_D15_1_C6_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_D15_0_C7_ENTRIES = 14U,
+	TEGRA_ARI_CSTATE_STATS_D15_1_C7_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_A57_0_C7_ENTRIES = 18U,
+	TEGRA_ARI_CSTATE_STATS_A57_1_C7_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_A57_2_C7_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_A57_3_C7_ENTRIES,
+	TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_0,
+	TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_1,
+	TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_0 = 26U,
+	TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_1,
+	TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_2,
+	TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_3,
+} tegra_ari_cstate_stats_index_t;
+
+typedef enum {
+	TEGRA_ARI_GSC_ALL = 0U,
+	TEGRA_ARI_GSC_BPMP = 6U,
+	TEGRA_ARI_GSC_APE = 7U,
+	TEGRA_ARI_GSC_SPE = 8U,
+	TEGRA_ARI_GSC_SCE = 9U,
+	TEGRA_ARI_GSC_APR = 10U,
+	TEGRA_ARI_GSC_TZRAM = 11U,
+	TEGRA_ARI_GSC_SE = 12U,
+	TEGRA_ARI_GSC_BPMP_TO_SPE = 16U,
+	TEGRA_ARI_GSC_SPE_TO_BPMP = 17U,
+	TEGRA_ARI_GSC_CPU_TZ_TO_BPMP = 18U,
+	TEGRA_ARI_GSC_BPMP_TO_CPU_TZ = 19U,
+	TEGRA_ARI_GSC_CPU_NS_TO_BPMP = 20U,
+	TEGRA_ARI_GSC_BPMP_TO_CPU_NS = 21U,
+	TEGRA_ARI_GSC_IPC_SE_SPE_SCE_BPMP = 22U,
+	TEGRA_ARI_GSC_SC7_RESUME_FW = 23U,
+	TEGRA_ARI_GSC_TZ_DRAM_IDX = 34U,
+	TEGRA_ARI_GSC_VPR_IDX = 35U,
+} tegra_ari_gsc_index_t;
+
+/* This macro will produce enums for __name##_LSB, __name##_MSB and __name##_MSK */
+#define TEGRA_ARI_ENUM_MASK_LSB_MSB(__name, __lsb, __msb) __name##_LSB = __lsb, __name##_MSB = __msb
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CLUSTER_CSTATE, 0U, 2U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CLUSTER_CSTATE_PRESENT, 7U, 7U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CCPLEX_CSTATE, 8U, 9U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CCPLEX_CSTATE_PRESENT, 15U, 15U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__SYSTEM_CSTATE, 16U, 19U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__IGNORE_CROSSOVERS, 22U, 22U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__SYSTEM_CSTATE_PRESENT, 23U, 23U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__WAKE_MASK_PRESENT, 31U, 31U),
+} tegra_ari_update_cstate_info_bitmasks_t;
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL__EN, 0U, 0U),
+} tegra_ari_misc_ccplex_bitmasks_t;
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_CC3_CTRL__IDLE_FREQ, 0U, 8U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_CC3_CTRL__IDLE_VOLT, 16U, 23U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_CC3_CTRL__ENABLE, 31U, 31U),
+} tegra_ari_cc3_ctrl_bitmasks_t;
+
+typedef enum {
+	TEGRA_ARI_MCA_NOP = 0U,
+	TEGRA_ARI_MCA_READ_SERR = 1U,
+	TEGRA_ARI_MCA_WRITE_SERR = 2U,
+	TEGRA_ARI_MCA_CLEAR_SERR = 4U,
+	TEGRA_ARI_MCA_REPORT_SERR = 5U,
+	TEGRA_ARI_MCA_READ_INTSTS = 6U,
+	TEGRA_ARI_MCA_WRITE_INTSTS = 7U,
+	TEGRA_ARI_MCA_READ_PREBOOT_SERR = 8U,
+} tegra_ari_mca_commands_t;
+
+typedef enum {
+	TEGRA_ARI_MCA_RD_WR_DPMU = 0U,
+	TEGRA_ARI_MCA_RD_WR_IOB = 1U,
+	TEGRA_ARI_MCA_RD_WR_MCB = 2U,
+	TEGRA_ARI_MCA_RD_WR_CCE = 3U,
+	TEGRA_ARI_MCA_RD_WR_CQX = 4U,
+	TEGRA_ARI_MCA_RD_WR_CTU = 5U,
+	TEGRA_ARI_MCA_RD_WR_JSR_MTS = 7U,
+	TEGRA_ARI_MCA_RD_BANK_INFO = 0x0fU,
+	TEGRA_ARI_MCA_RD_BANK_TEMPLATE = 0x10U,
+	TEGRA_ARI_MCA_RD_WR_SECURE_ACCESS_REGISTER = 0x11U,
+	TEGRA_ARI_MCA_RD_WR_GLOBAL_CONFIG_REGISTER = 0x12U,
+} tegra_ari_mca_rd_wr_indexes_t;
+
+typedef enum {
+	TEGRA_ARI_MCA_RD_WR_ASERRX_CTRL = 0U,
+	TEGRA_ARI_MCA_RD_WR_ASERRX_STATUS = 1U,
+	TEGRA_ARI_MCA_RD_WR_ASERRX_ADDR = 2U,
+	TEGRA_ARI_MCA_RD_WR_ASERRX_MISC1 = 3U,
+	TEGRA_ARI_MCA_RD_WR_ASERRX_MISC2 = 4U,
+} tegra_ari_mca_read_asserx_subindexes_t;
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_SETTING_ENABLES_NS_PERMITTED, 0U, 0U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_READING_STATUS_NS_PERMITTED, 1U, 1U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_PENDING_MCA_ERRORS_NS_PERMITTED, 2U, 2U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_CLEARING_MCA_INTERRUPTS_NS_PERMITTED, 3U, 3U),
+} tegra_ari_mca_secure_register_bitmasks_t;
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_SERR_ERR_CODE, 0U, 15U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_PWM_ERR, 16U, 16U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_CRAB_ERR, 17U, 17U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_RD_WR_N, 18U, 18U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_UCODE_ERR, 19U, 19U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_PWM, 20U, 23U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_AV, 58U, 58U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_MV, 59U, 59U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_EN, 60U, 60U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_UC, 61U, 61U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_OVF, 62U, 62U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_VAL, 63U, 63U),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_ADDR_ADDR, 0U, 41U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_ADDR_UCODE_ERRCD, 42U, 52U),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_CTRL_EN_PWM_ERR, 0U, 0U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_CTRL_EN_CRAB_ERR, 1U, 1U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_CTRL_EN_UCODE_ERR, 3U, 3U),
+} tegra_ari_mca_aserr0_bitmasks_t;
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_SERR_ERR_CODE, 0U, 15U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_MSI_ERR, 16U, 16U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_IHI_ERR, 17U, 17U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CRI_ERR, 18U, 18U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_MMCRAB_ERR, 19U, 19U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CSI_ERR, 20U, 20U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_RD_WR_N, 21U, 21U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_REQ_ERRT, 22U, 23U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_RESP_ERRT, 24U, 25U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_AV, 58U, 58U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_MV, 59U, 59U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_EN, 60U, 60U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_UC, 61U, 61U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_OVF, 62U, 62U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_VAL, 63U, 63U),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_AXI_ID, 0U, 7U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CQX_ID, 8U, 27U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CQX_CID, 28U, 31U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CQX_CMD, 32U, 35U),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_MSI_ERR, 0U, 0U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_IHI_ERR, 1U, 1U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_CRI_ERR, 2U, 2U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_MMCRAB_ERR, 3U, 3U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_CSI_ERR, 4U, 4U),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_MISC_ADDR, 0U, 41U),
+} tegra_ari_mca_aserr1_bitmasks_t;
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_SERR_ERR_CODE, 0U, 15U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_MC_ERR, 16U, 16U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_SYSRAM_ERR, 17U, 17U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_CLIENT_ID, 18U, 19U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_AV, 58U, 58U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_MV, 59U, 59U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_EN, 60U, 60U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_UC, 61U, 61U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_OVF, 62U, 62U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_VAL, 63U, 63U),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_ADDR_ID, 0U, 17U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_ADDR_CMD, 18U, 21U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_ADDR_ADDR, 22U, 53U),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_CTRL_EN_MC_ERR, 0U, 0U),
+} tegra_ari_mca_aserr2_bitmasks_t;
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_SERR_ERR_CODE, 0U, 15U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_TO_ERR, 16U, 16U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_STAT_ERR, 17U, 17U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_DST_ERR, 18U, 18U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_UNC_ERR, 19U, 19U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_MH_ERR, 20U, 20U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_PERR, 21U, 21U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_PSN_ERR, 22U, 22U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_AV, 58U, 58U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_MV, 59U, 59U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_EN, 60U, 60U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_UC, 61U, 61U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_OVF, 62U, 62U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_VAL, 63U, 63U),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_ADDR_CMD, 0U, 5U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_ADDR_ADDR, 6U, 47U),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_TO, 0U, 0U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_DIV4, 1U, 1U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_TLIMIT, 2U, 11U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_PSN_ERR_CORR_MSK, 12U, 25U),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_MORE_INFO, 0U, 17U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_TO_INFO, 18U, 43U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_SRC, 44U, 45U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_TID, 46U, 52U),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_TO_ERR, 0U, 0U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_STAT_ERR, 1U, 1U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_DST_ERR, 2U, 2U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_UNC_ERR, 3U, 3U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_MH_ERR, 4U, 4U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_PERR, 5U, 5U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_PSN_ERR, 6U, 19U),
+} tegra_ari_mca_aserr3_bitmasks_t;
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_SERR_ERR_CODE, 0U, 15U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_SRC_ERR, 16U, 16U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_DST_ERR, 17U, 17U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_REQ_ERR, 18U, 18U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_RSP_ERR, 19U, 19U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_AV, 58U, 58U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_MV, 59U, 59U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_EN, 60U, 60U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_UC, 61U, 61U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_OVF, 62U, 62U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_VAL, 63U, 63U),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_CTRL_EN_CPE_ERR, 0U, 0U),
+} tegra_ari_mca_aserr4_bitmasks_t;
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_SERR_ERR_CODE, 0U, 15U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_CTUPAR, 16U, 16U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_MULTI, 17U, 17U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_AV, 58U, 58U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_MV, 59U, 59U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_EN, 60U, 60U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_UC, 61U, 61U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_OVF, 62U, 62U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_VAL, 63U, 63U),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_SRC, 0U, 7U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_ID, 8U, 15U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_DATA, 16U, 26U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_CMD, 32U, 35U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_ADDR, 36U, 45U),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_CTRL_EN_CTUPAR, 0U, 0U),
+} tegra_ari_mca_aserr5_bitmasks_t;
+
+typedef enum {
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_SERR_ERR_CODE, 0U, 15U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_AV, 58U, 58U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_MV, 59U, 59U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_EN, 60U, 60U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_UC, 61U, 61U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_OVF, 62U, 62U),
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_VAL, 63U, 63U),
+
+	TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_ADDR_TBD_INFO, 0U, 63U),
+} tegra_ari_mca_serr1_bitmasks_t;
+
+#undef TEGRA_ARI_ENUM_MASK_LSB_MSB
+
+typedef enum {
+	TEGRA_NVG_CHANNEL_PMIC = 0U,
+	TEGRA_NVG_CHANNEL_POWER_PERF = 1U,
+	TEGRA_NVG_CHANNEL_POWER_MODES = 2U,
+	TEGRA_NVG_CHANNEL_WAKE_TIME = 3U,
+	TEGRA_NVG_CHANNEL_CSTATE_INFO = 4U,
+	TEGRA_NVG_CHANNEL_CROSSOVER_C1_C6 = 5U,
+	TEGRA_NVG_CHANNEL_CROSSOVER_CC1_CC6 = 6U,
+	TEGRA_NVG_CHANNEL_CROSSOVER_CC1_CC7 = 7U,
+	TEGRA_NVG_CHANNEL_CROSSOVER_CCP1_CCP3 = 8U,  /* obsoleted */
+	TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC2 = 9U,  /* obsoleted */
+	TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC3 = 10U,  /* obsoleted */
+	TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC4 = 11U,  /* obsoleted */
+	TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC7 = 12U,  /* obsoleted */
+	TEGRA_NVG_CHANNEL_CROSSOVER_SC0_SC7 = 12U,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR = 13U,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_SC7_ENTRIES = 14U,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_SC4_ENTRIES = 15U,  /* obsoleted */
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_SC3_ENTRIES = 16U,  /* obsoleted */
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_SC2_ENTRIES = 17U,  /* obsoleted */
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_CCP3_ENTRIES = 18U,  /* obsoleted */
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_CC6_ENTRIES = 19U,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_CC7_ENTRIES = 20U,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_CC6_ENTRIES = 21U,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_CC7_ENTRIES = 22U,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_0_C6_ENTRIES = 23U,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_1_C6_ENTRIES = 24U,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_2_C6_ENTRIES = 25U, /* Reserved (for Denver15 core 2) */
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_3_C6_ENTRIES = 26U, /* Reserved (for Denver15 core 3) */
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_0_C7_ENTRIES = 27U,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_1_C7_ENTRIES = 28U,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_2_C7_ENTRIES = 29U, /* Reserved (for Denver15 core 2) */
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_3_C7_ENTRIES = 30U, /*  Reserved (for Denver15 core 3) */
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_0_C7_ENTRIES = 31U,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_1_C7_ENTRIES = 32U,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_2_C7_ENTRIES = 33U,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_3_C7_ENTRIES = 34U,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_0 = 35U,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_1 = 36U,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_2 = 37U, /* Reserved (for Denver15 core 2) */
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_3 = 38U, /*  Reserved (for Denver15 core 3) */
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_0 = 39U,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_1 = 40U,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_2 = 41U,
+	TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_3 = 42U,
+	TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED = 43U,
+	TEGRA_NVG_CHANNEL_ONLINE_CORE = 44U,
+	TEGRA_NVG_CHANNEL_CC3_CTRL = 45U,
+	TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC1 = 46U,  /* obsoleted */
+	TEGRA_NVG_CHANNEL_LAST_INDEX,
+} tegra_nvg_channel_id_t;
+
+#endif /* T18X_TEGRA_ARI_H */
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/aarch64/nvg_helpers.S b/plat/nvidia/tegra/soc/t186/drivers/mce/aarch64/nvg_helpers.S
new file mode 100644
index 0000000..e3591ce
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/aarch64/nvg_helpers.S
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+	.globl	nvg_set_request_data
+	.globl	nvg_set_request
+	.globl	nvg_get_result
+
+/* void nvg_set_request_data(uint64_t req, uint64_t data) */
+func nvg_set_request_data
+	msr	s3_0_c15_c1_2, x0
+	msr	s3_0_c15_c1_3, x1
+	ret
+endfunc nvg_set_request_data
+
+/* void nvg_set_request(uint64_t req) */
+func nvg_set_request
+	msr	s3_0_c15_c1_2, x0
+	ret
+endfunc nvg_set_request
+
+/* uint64_t nvg_get_result(void) */
+func nvg_get_result
+	mrs	x0, s3_0_c15_c1_3
+	ret
+endfunc nvg_get_result
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c b/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c
new file mode 100644
index 0000000..7eb6c6c
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c
@@ -0,0 +1,558 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <denver.h>
+#include <mce_private.h>
+#include <mmio.h>
+#include <platform.h>
+#include <sys/errno.h>
+#include <t18x_ari.h>
+
+/*******************************************************************************
+ * Register offsets for ARI request/results
+ ******************************************************************************/
+#define ARI_REQUEST			0x0U
+#define ARI_REQUEST_EVENT_MASK		0x4U
+#define ARI_STATUS			0x8U
+#define ARI_REQUEST_DATA_LO		0xCU
+#define ARI_REQUEST_DATA_HI		0x10U
+#define ARI_RESPONSE_DATA_LO		0x14U
+#define ARI_RESPONSE_DATA_HI		0x18U
+
+/* Status values for the current request */
+#define ARI_REQ_PENDING			1U
+#define ARI_REQ_ONGOING			3U
+#define ARI_REQUEST_VALID_BIT		(1U << 8)
+#define ARI_EVT_MASK_STANDBYWFI_BIT	(1U << 7)
+
+/* default timeout (ms) to wait for ARI completion */
+#define ARI_MAX_RETRY_COUNT		2000
+
+/*******************************************************************************
+ * ARI helper functions
+ ******************************************************************************/
+static inline uint32_t ari_read_32(uint32_t ari_base, uint32_t reg)
+{
+	return mmio_read_32((uint64_t)ari_base + (uint64_t)reg);
+}
+
+static inline void ari_write_32(uint32_t ari_base, uint32_t val, uint32_t reg)
+{
+	mmio_write_32((uint64_t)ari_base + (uint64_t)reg, val);
+}
+
+static inline uint32_t ari_get_request_low(uint32_t ari_base)
+{
+	return ari_read_32(ari_base, ARI_REQUEST_DATA_LO);
+}
+
+static inline uint32_t ari_get_request_high(uint32_t ari_base)
+{
+	return ari_read_32(ari_base, ARI_REQUEST_DATA_HI);
+}
+
+static inline uint32_t ari_get_response_low(uint32_t ari_base)
+{
+	return ari_read_32(ari_base, ARI_RESPONSE_DATA_LO);
+}
+
+static inline uint32_t ari_get_response_high(uint32_t ari_base)
+{
+	return ari_read_32(ari_base, ARI_RESPONSE_DATA_HI);
+}
+
+static inline void ari_clobber_response(uint32_t ari_base)
+{
+	ari_write_32(ari_base, 0, ARI_RESPONSE_DATA_LO);
+	ari_write_32(ari_base, 0, ARI_RESPONSE_DATA_HI);
+}
+
+static int32_t ari_request_wait(uint32_t ari_base, uint32_t evt_mask, uint32_t req,
+		uint32_t lo, uint32_t hi)
+{
+	uint32_t retries = ARI_MAX_RETRY_COUNT;
+	uint32_t status;
+	int32_t ret = 0;
+
+	/* program the request, event_mask, hi and lo registers */
+	ari_write_32(ari_base, lo, ARI_REQUEST_DATA_LO);
+	ari_write_32(ari_base, hi, ARI_REQUEST_DATA_HI);
+	ari_write_32(ari_base, evt_mask, ARI_REQUEST_EVENT_MASK);
+	ari_write_32(ari_base, req | ARI_REQUEST_VALID_BIT, ARI_REQUEST);
+
+	/*
+	 * For commands that have an event trigger, we should bypass
+	 * ARI_STATUS polling, since MCE is waiting for SW to trigger
+	 * the event.
+	 */
+	if (evt_mask != 0U) {
+		ret = 0;
+	} else {
+		/* For shutdown/reboot commands, we dont have to check for timeouts */
+		if ((req == (uint32_t)TEGRA_ARI_MISC_CCPLEX) &&
+		    ((lo == (uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) ||
+		     (lo == (uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT))) {
+				ret = 0;
+		} else {
+			/*
+			 * Wait for the command response for not more than the timeout
+			 */
+			while (retries != 0U) {
+
+				/* read the command status */
+				status = ari_read_32(ari_base, ARI_STATUS);
+				if ((status & (ARI_REQ_ONGOING | ARI_REQ_PENDING)) == 0U) {
+					break;
+				}
+
+				/* delay 1 ms */
+				mdelay(1);
+
+				/* decrement the retry count */
+				retries--;
+			}
+
+			/* assert if the command timed out */
+			if (retries == 0U) {
+				ERROR("ARI request timed out: req %d on CPU %d\n",
+					req, plat_my_core_pos());
+				assert(retries != 0U);
+			}
+		}
+	}
+
+	return ret;
+}
+
+int32_t ari_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+	int32_t ret = 0;
+
+	/* check for allowed power state */
+	if ((state != TEGRA_ARI_CORE_C0) &&
+	    (state != TEGRA_ARI_CORE_C1) &&
+	    (state != TEGRA_ARI_CORE_C6) &&
+	    (state != TEGRA_ARI_CORE_C7)) {
+		ERROR("%s: unknown cstate (%d)\n", __func__, state);
+		ret = EINVAL;
+	} else {
+		/* clean the previous response state */
+		ari_clobber_response(ari_base);
+
+		/* Enter the cstate, to be woken up after wake_time (TSC ticks) */
+		ret = ari_request_wait(ari_base, ARI_EVT_MASK_STANDBYWFI_BIT,
+		TEGRA_ARI_ENTER_CSTATE, state, wake_time);
+	}
+
+	return ret;
+}
+
+int32_t ari_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
+	uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
+	uint8_t update_wake_mask)
+{
+	uint32_t val = 0U;
+
+	/* clean the previous response state */
+	ari_clobber_response(ari_base);
+
+	/* update CLUSTER_CSTATE? */
+	if (cluster != 0U) {
+		val |= (cluster & (uint32_t)CLUSTER_CSTATE_MASK) |
+			(uint32_t)CLUSTER_CSTATE_UPDATE_BIT;
+	}
+
+	/* update CCPLEX_CSTATE? */
+	if (ccplex != 0U) {
+		val |= ((ccplex & (uint32_t)CCPLEX_CSTATE_MASK) << (uint32_t)CCPLEX_CSTATE_SHIFT) |
+			(uint32_t)CCPLEX_CSTATE_UPDATE_BIT;
+	}
+
+	/* update SYSTEM_CSTATE? */
+	if (system != 0U) {
+		val |= ((system & (uint32_t)SYSTEM_CSTATE_MASK) << (uint32_t)SYSTEM_CSTATE_SHIFT) |
+		       (((uint32_t)sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) |
+			(uint32_t)SYSTEM_CSTATE_UPDATE_BIT);
+	}
+
+	/* update wake mask value? */
+	if (update_wake_mask != 0U) {
+		val |= (uint32_t)CSTATE_WAKE_MASK_UPDATE_BIT;
+	}
+
+	/* set the updated cstate info */
+	return ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CSTATE_INFO, val,
+			wake_mask);
+}
+
+int32_t ari_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time)
+{
+	int32_t ret = 0;
+
+	/* sanity check crossover type */
+	if ((type == TEGRA_ARI_CROSSOVER_C1_C6) ||
+	    (type > TEGRA_ARI_CROSSOVER_CCP3_SC1)) {
+		ret = EINVAL;
+	} else {
+		/* clean the previous response state */
+		ari_clobber_response(ari_base);
+
+		/* update crossover threshold time */
+		ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CROSSOVER,
+			type, time);
+	}
+
+	return ret;
+}
+
+uint64_t ari_read_cstate_stats(uint32_t ari_base, uint32_t state)
+{
+	int32_t ret;
+	uint64_t result;
+
+	/* sanity check crossover type */
+	if (state == 0U) {
+		result = EINVAL;
+	} else {
+		/* clean the previous response state */
+		ari_clobber_response(ari_base);
+
+		ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_CSTATE_STATS, state, 0U);
+		if (ret != 0) {
+			result = EINVAL;
+		} else {
+			result = (uint64_t)ari_get_response_low(ari_base);
+		}
+	}
+	return result;
+}
+
+int32_t ari_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats)
+{
+	/* clean the previous response state */
+	ari_clobber_response(ari_base);
+
+	/* write the cstate stats */
+	return ari_request_wait(ari_base, 0U, TEGRA_ARI_WRITE_CSTATE_STATS, state,
+			stats);
+}
+
+uint64_t ari_enumeration_misc(uint32_t ari_base, uint32_t cmd, uint32_t data)
+{
+	uint64_t resp;
+	int32_t ret;
+	uint32_t local_data = data;
+
+	/* clean the previous response state */
+	ari_clobber_response(ari_base);
+
+	/* ARI_REQUEST_DATA_HI is reserved for commands other than 'ECHO' */
+	if (cmd != TEGRA_ARI_MISC_ECHO) {
+		local_data = 0U;
+	}
+
+	ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_MISC, cmd, local_data);
+	if (ret != 0) {
+		resp = (uint64_t)ret;
+	} else {
+		/* get the command response */
+		resp = ari_get_response_low(ari_base);
+		resp |= ((uint64_t)ari_get_response_high(ari_base) << 32);
+	}
+
+	return resp;
+}
+
+int32_t ari_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+	int32_t ret;
+	uint32_t result;
+
+	/* clean the previous response state */
+	ari_clobber_response(ari_base);
+
+	ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_IS_CCX_ALLOWED, state & 0x7U,
+			wake_time);
+	if (ret != 0) {
+		ERROR("%s: failed (%d)\n", __func__, ret);
+		result = 0U;
+	} else {
+		result = ari_get_response_low(ari_base) & 0x1U;
+	}
+
+	/* 1 = CCx allowed, 0 = CCx not allowed */
+	return (int32_t)result;
+}
+
+int32_t ari_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+	int32_t ret, result;
+
+	/* check for allowed power state */
+	if ((state != TEGRA_ARI_CORE_C0) &&
+	    (state != TEGRA_ARI_CORE_C1) &&
+	    (state != TEGRA_ARI_CORE_C6) &&
+	    (state != TEGRA_ARI_CORE_C7)) {
+		ERROR("%s: unknown cstate (%d)\n", __func__, state);
+		result = EINVAL;
+	} else {
+		/* clean the previous response state */
+		ari_clobber_response(ari_base);
+
+		ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_IS_SC7_ALLOWED, state,
+				wake_time);
+		if (ret != 0) {
+			ERROR("%s: failed (%d)\n", __func__, ret);
+			result = 0;
+		} else {
+			/* 1 = SC7 allowed, 0 = SC7 not allowed */
+			result = (ari_get_response_low(ari_base) != 0U) ? 1 : 0;
+		}
+	}
+
+	return result;
+}
+
+int32_t ari_online_core(uint32_t ari_base, uint32_t core)
+{
+	uint64_t cpu = read_mpidr() & (uint64_t)(MPIDR_CPU_MASK);
+	uint64_t cluster = (read_mpidr() & (uint64_t)(MPIDR_CLUSTER_MASK)) >>
+			   (uint64_t)(MPIDR_AFFINITY_BITS);
+	uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
+	int32_t ret;
+
+	/* construct the current CPU # */
+	cpu |= (cluster << 2);
+
+	/* sanity check target core id */
+	if ((core >= MCE_CORE_ID_MAX) || (cpu == (uint64_t)core)) {
+		ERROR("%s: unsupported core id (%d)\n", __func__, core);
+		ret = EINVAL;
+	} else {
+		/*
+		 * The Denver cluster has 2 CPUs only - 0, 1.
+		 */
+		if ((impl == (uint32_t)DENVER_IMPL) &&
+		    ((core == 2U) || (core == 3U))) {
+			ERROR("%s: unknown core id (%d)\n", __func__, core);
+			ret = EINVAL;
+		} else {
+			/* clean the previous response state */
+			ari_clobber_response(ari_base);
+			ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_ONLINE_CORE, core, 0U);
+		}
+	}
+
+	return ret;
+}
+
+int32_t ari_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable)
+{
+	uint32_t val;
+
+	/* clean the previous response state */
+	ari_clobber_response(ari_base);
+
+	/*
+	 * If the enable bit is cleared, Auto-CC3 will be disabled by setting
+	 * the SW visible voltage/frequency request registers for all non
+	 * floorswept cores valid independent of StandbyWFI and disabling
+	 * the IDLE voltage/frequency request register. If set, Auto-CC3
+	 * will be enabled by setting the ARM SW visible voltage/frequency
+	 * request registers for all non floorswept cores to be enabled by
+	 * StandbyWFI or the equivalent signal, and always keeping the IDLE
+	 * voltage/frequency request register enabled.
+	 */
+	val = (((freq & MCE_AUTO_CC3_FREQ_MASK) << MCE_AUTO_CC3_FREQ_SHIFT) |\
+		((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |\
+		((enable != 0U) ? MCE_AUTO_CC3_ENABLE_BIT : 0U));
+
+	return ari_request_wait(ari_base, 0U, TEGRA_ARI_CC3_CTRL, val, 0U);
+}
+
+int32_t ari_reset_vector_update(uint32_t ari_base)
+{
+	/* clean the previous response state */
+	ari_clobber_response(ari_base);
+
+	/*
+	 * Need to program the CPU reset vector one time during cold boot
+	 * and SC7 exit
+	 */
+	(void)ari_request_wait(ari_base, 0U, TEGRA_ARI_COPY_MISCREG_AA64_RST, 0U, 0U);
+
+	return 0;
+}
+
+int32_t ari_roc_flush_cache_trbits(uint32_t ari_base)
+{
+	/* clean the previous response state */
+	ari_clobber_response(ari_base);
+
+	return ari_request_wait(ari_base, 0U, TEGRA_ARI_ROC_FLUSH_CACHE_TRBITS,
+			0U, 0U);
+}
+
+int32_t ari_roc_flush_cache(uint32_t ari_base)
+{
+	/* clean the previous response state */
+	ari_clobber_response(ari_base);
+
+	return ari_request_wait(ari_base, 0U, TEGRA_ARI_ROC_FLUSH_CACHE_ONLY,
+			0U, 0U);
+}
+
+int32_t ari_roc_clean_cache(uint32_t ari_base)
+{
+	/* clean the previous response state */
+	ari_clobber_response(ari_base);
+
+	return ari_request_wait(ari_base, 0U, TEGRA_ARI_ROC_CLEAN_CACHE_ONLY,
+			0U, 0U);
+}
+
+uint64_t ari_read_write_mca(uint32_t ari_base, uint64_t cmd, uint64_t *data)
+{
+	uint64_t mca_arg_data, result = 0;
+	uint32_t resp_lo, resp_hi;
+	uint32_t mca_arg_err, mca_arg_finish;
+	int32_t ret;
+
+	/* Set data (write) */
+	mca_arg_data = (data != NULL) ? *data : 0ULL;
+
+	/* Set command */
+	ari_write_32(ari_base, (uint32_t)cmd, ARI_RESPONSE_DATA_LO);
+	ari_write_32(ari_base, (uint32_t)(cmd >> 32U), ARI_RESPONSE_DATA_HI);
+
+	ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_MCA,
+			       (uint32_t)mca_arg_data,
+			       (uint32_t)(mca_arg_data >> 32U));
+	if (ret == 0) {
+		resp_lo = ari_get_response_low(ari_base);
+		resp_hi = ari_get_response_high(ari_base);
+
+		mca_arg_err = resp_lo & MCA_ARG_ERROR_MASK;
+		mca_arg_finish = (resp_hi >> MCA_ARG_FINISH_SHIFT) &
+				 MCA_ARG_FINISH_MASK;
+
+		if (mca_arg_finish == 0U) {
+			result = (uint64_t)mca_arg_err;
+		} else {
+			if (data != NULL) {
+				resp_lo = ari_get_request_low(ari_base);
+				resp_hi = ari_get_request_high(ari_base);
+				*data = ((uint64_t)resp_hi << 32U) |
+					 (uint64_t)resp_lo;
+			}
+		}
+	}
+
+	return result;
+}
+
+int32_t ari_update_ccplex_gsc(uint32_t ari_base, uint32_t gsc_idx)
+{
+	int32_t ret = 0;
+	/* sanity check GSC ID */
+	if (gsc_idx > (uint32_t)TEGRA_ARI_GSC_VPR_IDX) {
+		ret = EINVAL;
+	} else {
+		/* clean the previous response state */
+		ari_clobber_response(ari_base);
+
+		/*
+		 * The MCE code will read the GSC carveout value, corrseponding to
+		 * the ID, from the MC registers and update the internal GSC registers
+		 * of the CCPLEX.
+		 */
+		(void)ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CCPLEX_GSC, gsc_idx, 0U);
+	}
+
+	return ret;
+}
+
+void ari_enter_ccplex_state(uint32_t ari_base, uint32_t state_idx)
+{
+	/* clean the previous response state */
+	ari_clobber_response(ari_base);
+
+	/*
+	 * The MCE will shutdown or restart the entire system
+	 */
+	(void)ari_request_wait(ari_base, 0U, TEGRA_ARI_MISC_CCPLEX, state_idx, 0U);
+}
+
+int32_t ari_read_write_uncore_perfmon(uint32_t ari_base, uint64_t req,
+		uint64_t *data)
+{
+	int32_t ret, result;
+	uint32_t val;
+	uint8_t req_cmd, req_status;
+
+	req_cmd = (uint8_t)(req >> UNCORE_PERFMON_CMD_SHIFT);
+
+	/* clean the previous response state */
+	ari_clobber_response(ari_base);
+
+	/* sanity check input parameters */
+	if ((req_cmd == UNCORE_PERFMON_CMD_READ) && (data == NULL)) {
+		ERROR("invalid parameters\n");
+		result = EINVAL;
+	} else {
+		/*
+		 * For "write" commands get the value that has to be written
+		 * to the uncore perfmon registers
+		 */
+		val = (req_cmd == UNCORE_PERFMON_CMD_WRITE) ?
+			(uint32_t)*data : 0U;
+
+		ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_PERFMON, val,
+				       (uint32_t)req);
+		if (ret != 0) {
+			result = ret;
+		} else {
+			/* read the command status value */
+			req_status = (uint8_t)ari_get_response_high(ari_base) &
+					 UNCORE_PERFMON_RESP_STATUS_MASK;
+
+			/*
+			 * For "read" commands get the data from the uncore
+			 * perfmon registers
+			 */
+			req_status >>= UNCORE_PERFMON_RESP_STATUS_SHIFT;
+			if ((req_status == 0U) && (req_cmd == UNCORE_PERFMON_CMD_READ)) {
+				*data = ari_get_response_low(ari_base);
+			}
+			result = (int32_t)req_status;
+		}
+	}
+
+	return result;
+}
+
+void ari_misc_ccplex(uint32_t ari_base, uint32_t index, uint32_t value)
+{
+	/*
+	 * This invokes the ARI_MISC_CCPLEX commands. This can be
+	 * used to enable/disable coresight clock gating.
+	 */
+
+	if ((index > TEGRA_ARI_MISC_CCPLEX_EDBGREQ) ||
+		((index == TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL) &&
+		(value > 1U))) {
+		ERROR("%s: invalid parameters \n", __func__);
+	} else {
+		/* clean the previous response state */
+		ari_clobber_response(ari_base);
+		(void)ari_request_wait(ari_base, 0U, TEGRA_ARI_MISC_CCPLEX, index, value);
+	}
+}
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c b/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c
new file mode 100644
index 0000000..5435ce6
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c
@@ -0,0 +1,523 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <denver.h>
+#include <mce.h>
+#include <mce_private.h>
+#include <mmio.h>
+#include <string.h>
+#include <sys/errno.h>
+#include <t18x_ari.h>
+#include <tegra_def.h>
+#include <tegra_platform.h>
+
+/* NVG functions handlers */
+static arch_mce_ops_t nvg_mce_ops = {
+	.enter_cstate = nvg_enter_cstate,
+	.update_cstate_info = nvg_update_cstate_info,
+	.update_crossover_time = nvg_update_crossover_time,
+	.read_cstate_stats = nvg_read_cstate_stats,
+	.write_cstate_stats = nvg_write_cstate_stats,
+	.call_enum_misc = ari_enumeration_misc,
+	.is_ccx_allowed = nvg_is_ccx_allowed,
+	.is_sc7_allowed = nvg_is_sc7_allowed,
+	.online_core = nvg_online_core,
+	.cc3_ctrl = nvg_cc3_ctrl,
+	.update_reset_vector = ari_reset_vector_update,
+	.roc_flush_cache = ari_roc_flush_cache,
+	.roc_flush_cache_trbits = ari_roc_flush_cache_trbits,
+	.roc_clean_cache = ari_roc_clean_cache,
+	.read_write_mca = ari_read_write_mca,
+	.update_ccplex_gsc = ari_update_ccplex_gsc,
+	.enter_ccplex_state = ari_enter_ccplex_state,
+	.read_write_uncore_perfmon = ari_read_write_uncore_perfmon,
+	.misc_ccplex = ari_misc_ccplex
+};
+
+/* ARI functions handlers */
+static arch_mce_ops_t ari_mce_ops = {
+	.enter_cstate = ari_enter_cstate,
+	.update_cstate_info = ari_update_cstate_info,
+	.update_crossover_time = ari_update_crossover_time,
+	.read_cstate_stats = ari_read_cstate_stats,
+	.write_cstate_stats = ari_write_cstate_stats,
+	.call_enum_misc = ari_enumeration_misc,
+	.is_ccx_allowed = ari_is_ccx_allowed,
+	.is_sc7_allowed = ari_is_sc7_allowed,
+	.online_core = ari_online_core,
+	.cc3_ctrl = ari_cc3_ctrl,
+	.update_reset_vector = ari_reset_vector_update,
+	.roc_flush_cache = ari_roc_flush_cache,
+	.roc_flush_cache_trbits = ari_roc_flush_cache_trbits,
+	.roc_clean_cache = ari_roc_clean_cache,
+	.read_write_mca = ari_read_write_mca,
+	.update_ccplex_gsc = ari_update_ccplex_gsc,
+	.enter_ccplex_state = ari_enter_ccplex_state,
+	.read_write_uncore_perfmon = ari_read_write_uncore_perfmon,
+	.misc_ccplex = ari_misc_ccplex
+};
+
+typedef struct {
+	uint32_t ari_base;
+	arch_mce_ops_t *ops;
+} mce_config_t;
+
+/* Table to hold the per-CPU ARI base address and function handlers */
+static mce_config_t mce_cfg_table[MCE_ARI_APERTURES_MAX] = {
+	{
+		/* A57 Core 0 */
+		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_0_OFFSET,
+		.ops = &ari_mce_ops,
+	},
+	{
+		/* A57 Core 1 */
+		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_1_OFFSET,
+		.ops = &ari_mce_ops,
+	},
+	{
+		/* A57 Core 2 */
+		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_2_OFFSET,
+		.ops = &ari_mce_ops,
+	},
+	{
+		/* A57 Core 3 */
+		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_3_OFFSET,
+		.ops = &ari_mce_ops,
+	},
+	{
+		/* D15 Core 0 */
+		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_4_OFFSET,
+		.ops = &nvg_mce_ops,
+	},
+	{
+		/* D15 Core 1 */
+		.ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_5_OFFSET,
+		.ops = &nvg_mce_ops,
+	}
+};
+
+static uint32_t mce_get_curr_cpu_ari_base(void)
+{
+	uint64_t mpidr = read_mpidr();
+	uint64_t cpuid = mpidr & (uint64_t)MPIDR_CPU_MASK;
+	uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
+
+	/*
+	 * T186 has 2 CPU clusters, one with Denver CPUs and the other with
+	 * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU
+	 * numbers start from 0. In order to get the proper arch_mce_ops_t
+	 * struct, we have to convert the Denver CPU ids to the corresponding
+	 * indices in the mce_ops_table array.
+	 */
+	if (impl == DENVER_IMPL) {
+		cpuid |= 0x4U;
+	}
+
+	return mce_cfg_table[cpuid].ari_base;
+}
+
+static arch_mce_ops_t *mce_get_curr_cpu_ops(void)
+{
+	uint64_t mpidr = read_mpidr();
+	uint64_t cpuid = mpidr & (uint64_t)MPIDR_CPU_MASK;
+	uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) &
+			(uint64_t)MIDR_IMPL_MASK;
+
+	/*
+	 * T186 has 2 CPU clusters, one with Denver CPUs and the other with
+	 * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU
+	 * numbers start from 0. In order to get the proper arch_mce_ops_t
+	 * struct, we have to convert the Denver CPU ids to the corresponding
+	 * indices in the mce_ops_table array.
+	 */
+	if (impl == DENVER_IMPL) {
+		cpuid |= 0x4U;
+	}
+
+	return mce_cfg_table[cpuid].ops;
+}
+
+/*******************************************************************************
+ * Common handler for all MCE commands
+ ******************************************************************************/
+int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
+			uint64_t arg2)
+{
+	const arch_mce_ops_t *ops;
+	gp_regs_t *gp_regs = get_gpregs_ctx(cm_get_context(NON_SECURE));
+	uint32_t cpu_ari_base;
+	uint64_t ret64 = 0, arg3, arg4, arg5;
+	int32_t ret = 0;
+
+	assert(gp_regs != NULL);
+
+	/* get a pointer to the CPU's arch_mce_ops_t struct */
+	ops = mce_get_curr_cpu_ops();
+
+	/* get the CPU's ARI base address */
+	cpu_ari_base = mce_get_curr_cpu_ari_base();
+
+	switch (cmd) {
+	case MCE_CMD_ENTER_CSTATE:
+		ret = ops->enter_cstate(cpu_ari_base, arg0, arg1);
+		if (ret < 0) {
+			ERROR("%s: enter_cstate failed(%d)\n", __func__, ret);
+		}
+
+		break;
+
+	case MCE_CMD_UPDATE_CSTATE_INFO:
+		/*
+		 * get the parameters required for the update cstate info
+		 * command
+		 */
+		arg3 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X4));
+		arg4 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X5));
+		arg5 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X6));
+
+		ret = ops->update_cstate_info(cpu_ari_base, (uint32_t)arg0,
+				(uint32_t)arg1, (uint32_t)arg2, (uint8_t)arg3,
+				(uint32_t)arg4, (uint8_t)arg5);
+		if (ret < 0) {
+			ERROR("%s: update_cstate_info failed(%d)\n",
+				__func__, ret);
+		}
+
+		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X4), (0));
+		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X5), (0));
+		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X6), (0));
+
+		break;
+
+	case MCE_CMD_UPDATE_CROSSOVER_TIME:
+		ret = ops->update_crossover_time(cpu_ari_base, arg0, arg1);
+		if (ret < 0) {
+			ERROR("%s: update_crossover_time failed(%d)\n",
+				__func__, ret);
+		}
+
+		break;
+
+	case MCE_CMD_READ_CSTATE_STATS:
+		ret64 = ops->read_cstate_stats(cpu_ari_base, arg0);
+
+		/* update context to return cstate stats value */
+		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
+		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2), (ret64));
+
+		break;
+
+	case MCE_CMD_WRITE_CSTATE_STATS:
+		ret = ops->write_cstate_stats(cpu_ari_base, arg0, arg1);
+		if (ret < 0) {
+			ERROR("%s: write_cstate_stats failed(%d)\n",
+				__func__, ret);
+		}
+
+		break;
+
+	case MCE_CMD_IS_CCX_ALLOWED:
+		ret = ops->is_ccx_allowed(cpu_ari_base, arg0, arg1);
+		if (ret < 0) {
+			ERROR("%s: is_ccx_allowed failed(%d)\n", __func__, ret);
+			break;
+		}
+
+		/* update context to return CCx status value */
+		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
+			      (uint64_t)(ret));
+
+		break;
+
+	case MCE_CMD_IS_SC7_ALLOWED:
+		ret = ops->is_sc7_allowed(cpu_ari_base, arg0, arg1);
+		if (ret < 0) {
+			ERROR("%s: is_sc7_allowed failed(%d)\n", __func__, ret);
+			break;
+		}
+
+		/* update context to return SC7 status value */
+		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
+			      (uint64_t)(ret));
+		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3),
+			      (uint64_t)(ret));
+
+		break;
+
+	case MCE_CMD_ONLINE_CORE:
+		ret = ops->online_core(cpu_ari_base, arg0);
+		if (ret < 0) {
+			ERROR("%s: online_core failed(%d)\n", __func__, ret);
+		}
+
+		break;
+
+	case MCE_CMD_CC3_CTRL:
+		ret = ops->cc3_ctrl(cpu_ari_base, arg0, arg1, arg2);
+		if (ret < 0) {
+			ERROR("%s: cc3_ctrl failed(%d)\n", __func__, ret);
+		}
+
+		break;
+
+	case MCE_CMD_ECHO_DATA:
+		ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_ECHO,
+				arg0);
+
+		/* update context to return if echo'd data matched source */
+		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
+			      ((ret64 == arg0) ? 1ULL : 0ULL));
+		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2),
+			      ((ret64 == arg0) ? 1ULL : 0ULL));
+
+		break;
+
+	case MCE_CMD_READ_VERSIONS:
+		ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION,
+			arg0);
+
+		/*
+		 * version = minor(63:32) | major(31:0). Update context
+		 * to return major and minor version number.
+		 */
+		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
+			      (ret64));
+		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2),
+			      (ret64 >> 32ULL));
+
+		break;
+
+	case MCE_CMD_ENUM_FEATURES:
+		ret64 = ops->call_enum_misc(cpu_ari_base,
+				TEGRA_ARI_MISC_FEATURE_LEAF_0, arg0);
+
+		/* update context to return features value */
+		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
+
+		break;
+
+	case MCE_CMD_ROC_FLUSH_CACHE_TRBITS:
+		ret = ops->roc_flush_cache_trbits(cpu_ari_base);
+		if (ret < 0) {
+			ERROR("%s: flush cache_trbits failed(%d)\n", __func__,
+				ret);
+		}
+
+		break;
+
+	case MCE_CMD_ROC_FLUSH_CACHE:
+		ret = ops->roc_flush_cache(cpu_ari_base);
+		if (ret < 0) {
+			ERROR("%s: flush cache failed(%d)\n", __func__, ret);
+		}
+
+		break;
+
+	case MCE_CMD_ROC_CLEAN_CACHE:
+		ret = ops->roc_clean_cache(cpu_ari_base);
+		if (ret < 0) {
+			ERROR("%s: clean cache failed(%d)\n", __func__, ret);
+		}
+
+		break;
+
+	case MCE_CMD_ENUM_READ_MCA:
+		ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1);
+
+		/* update context to return MCA data/error */
+		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
+		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2), (arg1));
+		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3), (ret64));
+
+		break;
+
+	case MCE_CMD_ENUM_WRITE_MCA:
+		ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1);
+
+		/* update context to return MCA error */
+		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
+		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3), (ret64));
+
+		break;
+
+#if ENABLE_CHIP_VERIFICATION_HARNESS
+	case MCE_CMD_ENABLE_LATIC:
+		/*
+		 * This call is not for production use. The constant value,
+		 * 0xFFFF0000, is specific to allowing for enabling LATIC on
+		 * pre-production parts for the chip verification harness.
+		 *
+		 * Enabling LATIC allows S/W to read the MINI ISPs in the
+		 * CCPLEX. The ISMs are used for various measurements relevant
+		 * to particular locations in the Silicon. They are small
+		 * counters which can be polled to determine how fast a
+		 * particular location in the Silicon is.
+		 */
+		ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(),
+			0xFFFF0000);
+
+		break;
+#endif
+
+	case MCE_CMD_UNCORE_PERFMON_REQ:
+		ret = ops->read_write_uncore_perfmon(cpu_ari_base, arg0, &arg1);
+
+		/* update context to return data */
+		write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (arg1));
+		break;
+
+	case MCE_CMD_MISC_CCPLEX:
+		ops->misc_ccplex(cpu_ari_base, arg0, arg1);
+
+		break;
+
+	default:
+		ERROR("unknown MCE command (%lu)\n", cmd);
+		ret = EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+/*******************************************************************************
+ * Handler to update the reset vector for CPUs
+ ******************************************************************************/
+int32_t mce_update_reset_vector(void)
+{
+	const arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
+
+	ops->update_reset_vector(mce_get_curr_cpu_ari_base());
+
+	return 0;
+}
+
+static int32_t mce_update_ccplex_gsc(tegra_ari_gsc_index_t gsc_idx)
+{
+	const arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
+
+	ops->update_ccplex_gsc(mce_get_curr_cpu_ari_base(), gsc_idx);
+
+	return 0;
+}
+
+/*******************************************************************************
+ * Handler to update carveout values for Video Memory Carveout region
+ ******************************************************************************/
+int32_t mce_update_gsc_videomem(void)
+{
+	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_VPR_IDX);
+}
+
+/*******************************************************************************
+ * Handler to update carveout values for TZDRAM aperture
+ ******************************************************************************/
+int32_t mce_update_gsc_tzdram(void)
+{
+	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZ_DRAM_IDX);
+}
+
+/*******************************************************************************
+ * Handler to update carveout values for TZ SysRAM aperture
+ ******************************************************************************/
+int32_t mce_update_gsc_tzram(void)
+{
+	return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZRAM);
+}
+
+/*******************************************************************************
+ * Handler to shutdown/reset the entire system
+ ******************************************************************************/
+__dead2 void mce_enter_ccplex_state(uint32_t state_idx)
+{
+	const arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
+
+	/* sanity check state value */
+	if ((state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) &&
+	    (state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT)) {
+		panic();
+	}
+
+	ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(), state_idx);
+
+	/* wait till the CCPLEX powers down */
+	for (;;) {
+		;
+	}
+
+}
+
+/*******************************************************************************
+ * Handler to issue the UPDATE_CSTATE_INFO request
+ ******************************************************************************/
+void mce_update_cstate_info(const mce_cstate_info_t *cstate)
+{
+	const arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
+
+	/* issue the UPDATE_CSTATE_INFO request */
+	ops->update_cstate_info(mce_get_curr_cpu_ari_base(), cstate->cluster,
+		cstate->ccplex, cstate->system, cstate->system_state_force,
+		cstate->wake_mask, cstate->update_wake_mask);
+}
+
+/*******************************************************************************
+ * Handler to read the MCE firmware version and check if it is compatible
+ * with interface header the BL3-1 was compiled against
+ ******************************************************************************/
+void mce_verify_firmware_version(void)
+{
+	const arch_mce_ops_t *ops;
+	uint32_t cpu_ari_base;
+	uint64_t version;
+	uint32_t major, minor;
+
+	/*
+	 * MCE firmware is not supported on simulation platforms.
+	 */
+	if (tegra_platform_is_emulation()) {
+
+		INFO("MCE firmware is not supported\n");
+
+	} else {
+		/* get a pointer to the CPU's arch_mce_ops_t struct */
+		ops = mce_get_curr_cpu_ops();
+
+		/* get the CPU's ARI base address */
+		cpu_ari_base = mce_get_curr_cpu_ari_base();
+
+		/*
+		 * Read the MCE firmware version and extract the major and minor
+		 * version fields
+		 */
+		version = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION, 0);
+		major = (uint32_t)version;
+		minor = (uint32_t)(version >> 32);
+
+		INFO("MCE Version - HW=%d:%d, SW=%d:%d\n", major, minor,
+			TEGRA_ARI_VERSION_MAJOR, TEGRA_ARI_VERSION_MINOR);
+
+		/*
+		 * Verify that the MCE firmware version and the interface header
+		 * match
+		 */
+		if (major != TEGRA_ARI_VERSION_MAJOR) {
+			ERROR("ARI major version mismatch\n");
+			panic();
+		}
+
+		if (minor < TEGRA_ARI_VERSION_MINOR) {
+			ERROR("ARI minor version mismatch\n");
+			panic();
+		}
+	}
+}
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c b/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c
new file mode 100644
index 0000000..243c8f3
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <denver.h>
+#include <mce_private.h>
+#include <mmio.h>
+#include <sys/errno.h>
+#include <t18x_ari.h>
+
+int32_t nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+	int32_t ret = 0;
+
+	(void)ari_base;
+
+	/* check for allowed power state */
+	if ((state != TEGRA_ARI_CORE_C0) && (state != TEGRA_ARI_CORE_C1) &&
+	    (state != TEGRA_ARI_CORE_C6) && (state != TEGRA_ARI_CORE_C7)) {
+		ERROR("%s: unknown cstate (%d)\n", __func__, state);
+		ret = EINVAL;
+	} else {
+		/* time (TSC ticks) until the core is expected to get a wake event */
+		nvg_set_request_data(TEGRA_NVG_CHANNEL_WAKE_TIME, wake_time);
+
+		/* set the core cstate */
+		write_actlr_el1(state);
+	}
+
+	return ret;
+}
+
+/*
+ * This request allows updating of CLUSTER_CSTATE, CCPLEX_CSTATE and
+ * SYSTEM_CSTATE values.
+ */
+int32_t nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
+		uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
+		uint8_t update_wake_mask)
+{
+	uint64_t val = 0ULL;
+
+	(void)ari_base;
+
+	/* update CLUSTER_CSTATE? */
+	if (cluster != 0U) {
+		val |= ((uint64_t)cluster & CLUSTER_CSTATE_MASK) |
+			CLUSTER_CSTATE_UPDATE_BIT;
+	}
+
+	/* update CCPLEX_CSTATE? */
+	if (ccplex != 0U) {
+		val |= (((uint64_t)ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT) |
+			CCPLEX_CSTATE_UPDATE_BIT;
+	}
+
+	/* update SYSTEM_CSTATE? */
+	if (system != 0U) {
+		val |= (((uint64_t)system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) |
+		       (((uint64_t)sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) |
+			SYSTEM_CSTATE_UPDATE_BIT);
+	}
+
+	/* update wake mask value? */
+	if (update_wake_mask != 0U) {
+		val |= CSTATE_WAKE_MASK_UPDATE_BIT;
+	}
+
+	/* set the wake mask */
+	val &= CSTATE_WAKE_MASK_CLEAR;
+	val |= ((uint64_t)wake_mask << CSTATE_WAKE_MASK_SHIFT);
+
+	/* set the updated cstate info */
+	nvg_set_request_data(TEGRA_NVG_CHANNEL_CSTATE_INFO, val);
+
+	return 0;
+}
+
+int32_t nvg_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time)
+{
+	int32_t ret = 0;
+
+	(void)ari_base;
+
+	/* sanity check crossover type */
+	if (type > TEGRA_ARI_CROSSOVER_CCP3_SC1) {
+		ret = EINVAL;
+	} else {
+		/*
+		 * The crossover threshold limit types start from
+		 * TEGRA_CROSSOVER_TYPE_C1_C6 to TEGRA_CROSSOVER_TYPE_CCP3_SC7.
+		 * The command indices for updating the threshold be generated
+		 * by adding the type to the NVG_SET_THRESHOLD_CROSSOVER_C1_C6
+		 * command index.
+		 */
+		nvg_set_request_data((TEGRA_NVG_CHANNEL_CROSSOVER_C1_C6 +
+			(uint64_t)type), (uint64_t)time);
+	}
+
+	return ret;
+}
+
+uint64_t nvg_read_cstate_stats(uint32_t ari_base, uint32_t state)
+{
+	uint64_t ret;
+
+	(void)ari_base;
+
+	/* sanity check state */
+	if (state == 0U) {
+		ret = EINVAL;
+	} else {
+		/*
+		 * The cstate types start from NVG_READ_CSTATE_STATS_SC7_ENTRIES
+		 * to NVG_GET_LAST_CSTATE_ENTRY_A57_3. The command indices for
+		 * reading the threshold can be generated by adding the type to
+		 * the NVG_CLEAR_CSTATE_STATS command index.
+		 */
+		nvg_set_request((TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR +
+				(uint64_t)state));
+		ret = nvg_get_result();
+	}
+
+	return ret;
+}
+
+int32_t nvg_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats)
+{
+	uint64_t val;
+
+	(void)ari_base;
+
+	/*
+	 * The only difference between a CSTATE_STATS_WRITE and
+	 * CSTATE_STATS_READ is the usage of the 63:32 in the request.
+	 * 63:32 are set to '0' for a read, while a write contains the
+	 * actual stats value to be written.
+	 */
+	val = ((uint64_t)stats << MCE_CSTATE_STATS_TYPE_SHIFT) | state;
+
+	/*
+	 * The cstate types start from NVG_READ_CSTATE_STATS_SC7_ENTRIES
+	 * to NVG_GET_LAST_CSTATE_ENTRY_A57_3. The command indices for
+	 * reading the threshold can be generated by adding the type to
+	 * the NVG_CLEAR_CSTATE_STATS command index.
+	 */
+	nvg_set_request_data((TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR +
+			     (uint64_t)state), val);
+
+	return 0;
+}
+
+int32_t nvg_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+	(void)ari_base;
+	(void)state;
+	(void)wake_time;
+
+	/* This does not apply to the Denver cluster */
+	return 0;
+}
+
+int32_t nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+	uint64_t val;
+	int32_t ret;
+
+	(void)ari_base;
+
+	/* check for allowed power state */
+	if ((state != TEGRA_ARI_CORE_C0) && (state != TEGRA_ARI_CORE_C1) &&
+	    (state != TEGRA_ARI_CORE_C6) && (state != TEGRA_ARI_CORE_C7)) {
+		ERROR("%s: unknown cstate (%d)\n", __func__, state);
+		ret = EINVAL;
+	} else {
+		/*
+		 * Request format -
+		 * 63:32 = wake time
+		 * 31:0 = C-state for this core
+		 */
+		val = ((uint64_t)wake_time << MCE_SC7_WAKE_TIME_SHIFT) |
+				((uint64_t)state & MCE_SC7_ALLOWED_MASK);
+
+		/* issue command to check if SC7 is allowed */
+		nvg_set_request_data(TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED, val);
+
+		/* 1 = SC7 allowed, 0 = SC7 not allowed */
+		ret = (nvg_get_result() != 0ULL) ? 1 : 0;
+	}
+
+	return ret;
+}
+
+int32_t nvg_online_core(uint32_t ari_base, uint32_t core)
+{
+	uint64_t cpu = read_mpidr() & (uint64_t)MPIDR_CPU_MASK;
+	uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) &
+			(uint64_t)MIDR_IMPL_MASK;
+	int32_t ret = 0;
+
+	(void)ari_base;
+
+	/* sanity check code id */
+	if ((core >= (uint32_t)MCE_CORE_ID_MAX) || (cpu == core)) {
+		ERROR("%s: unsupported core id (%d)\n", __func__, core);
+		ret = EINVAL;
+	} else {
+		/*
+		 * The Denver cluster has 2 CPUs only - 0, 1.
+		 */
+		if ((impl == DENVER_IMPL) && ((core == 2U) || (core == 3U))) {
+			ERROR("%s: unknown core id (%d)\n", __func__, core);
+			ret = EINVAL;
+		} else {
+			/* get a core online */
+			nvg_set_request_data(TEGRA_NVG_CHANNEL_ONLINE_CORE,
+				((uint64_t)core & MCE_CORE_ID_MASK));
+		}
+	}
+
+	return ret;
+}
+
+int32_t nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable)
+{
+	uint32_t val;
+
+	(void)ari_base;
+
+	/*
+	 * If the enable bit is cleared, Auto-CC3 will be disabled by setting
+	 * the SW visible voltage/frequency request registers for all non
+	 * floorswept cores valid independent of StandbyWFI and disabling
+	 * the IDLE voltage/frequency request register. If set, Auto-CC3
+	 * will be enabled by setting the ARM SW visible voltage/frequency
+	 * request registers for all non floorswept cores to be enabled by
+	 * StandbyWFI or the equivalent signal, and always keeping the IDLE
+	 * voltage/frequency request register enabled.
+	 */
+	val = (((freq & MCE_AUTO_CC3_FREQ_MASK) << MCE_AUTO_CC3_FREQ_SHIFT) |\
+		((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |\
+		((enable != 0U) ? MCE_AUTO_CC3_ENABLE_BIT : 0U));
+
+	nvg_set_request_data(TEGRA_NVG_CHANNEL_CC3_CTRL, (uint64_t)val);
+
+	return 0;
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_memctrl.c b/plat/nvidia/tegra/soc/t186/plat_memctrl.c
new file mode 100644
index 0000000..957ecf1
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_memctrl.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <bl_common.h>
+#include <memctrl_v2.h>
+
+/*******************************************************************************
+ * Array to hold stream_id override config register offsets
+ ******************************************************************************/
+const static uint32_t tegra186_streamid_override_regs[] = {
+	MC_STREAMID_OVERRIDE_CFG_PTCR,
+	MC_STREAMID_OVERRIDE_CFG_AFIR,
+	MC_STREAMID_OVERRIDE_CFG_HDAR,
+	MC_STREAMID_OVERRIDE_CFG_HOST1XDMAR,
+	MC_STREAMID_OVERRIDE_CFG_NVENCSRD,
+	MC_STREAMID_OVERRIDE_CFG_SATAR,
+	MC_STREAMID_OVERRIDE_CFG_MPCORER,
+	MC_STREAMID_OVERRIDE_CFG_NVENCSWR,
+	MC_STREAMID_OVERRIDE_CFG_AFIW,
+	MC_STREAMID_OVERRIDE_CFG_HDAW,
+	MC_STREAMID_OVERRIDE_CFG_MPCOREW,
+	MC_STREAMID_OVERRIDE_CFG_SATAW,
+	MC_STREAMID_OVERRIDE_CFG_ISPRA,
+	MC_STREAMID_OVERRIDE_CFG_ISPWA,
+	MC_STREAMID_OVERRIDE_CFG_ISPWB,
+	MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTR,
+	MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTW,
+	MC_STREAMID_OVERRIDE_CFG_XUSB_DEVR,
+	MC_STREAMID_OVERRIDE_CFG_XUSB_DEVW,
+	MC_STREAMID_OVERRIDE_CFG_TSECSRD,
+	MC_STREAMID_OVERRIDE_CFG_TSECSWR,
+	MC_STREAMID_OVERRIDE_CFG_GPUSRD,
+	MC_STREAMID_OVERRIDE_CFG_GPUSWR,
+	MC_STREAMID_OVERRIDE_CFG_SDMMCRA,
+	MC_STREAMID_OVERRIDE_CFG_SDMMCRAA,
+	MC_STREAMID_OVERRIDE_CFG_SDMMCR,
+	MC_STREAMID_OVERRIDE_CFG_SDMMCRAB,
+	MC_STREAMID_OVERRIDE_CFG_SDMMCWA,
+	MC_STREAMID_OVERRIDE_CFG_SDMMCWAA,
+	MC_STREAMID_OVERRIDE_CFG_SDMMCW,
+	MC_STREAMID_OVERRIDE_CFG_SDMMCWAB,
+	MC_STREAMID_OVERRIDE_CFG_VICSRD,
+	MC_STREAMID_OVERRIDE_CFG_VICSWR,
+	MC_STREAMID_OVERRIDE_CFG_VIW,
+	MC_STREAMID_OVERRIDE_CFG_NVDECSRD,
+	MC_STREAMID_OVERRIDE_CFG_NVDECSWR,
+	MC_STREAMID_OVERRIDE_CFG_APER,
+	MC_STREAMID_OVERRIDE_CFG_APEW,
+	MC_STREAMID_OVERRIDE_CFG_NVJPGSRD,
+	MC_STREAMID_OVERRIDE_CFG_NVJPGSWR,
+	MC_STREAMID_OVERRIDE_CFG_SESRD,
+	MC_STREAMID_OVERRIDE_CFG_SESWR,
+	MC_STREAMID_OVERRIDE_CFG_ETRR,
+	MC_STREAMID_OVERRIDE_CFG_ETRW,
+	MC_STREAMID_OVERRIDE_CFG_TSECSRDB,
+	MC_STREAMID_OVERRIDE_CFG_TSECSWRB,
+	MC_STREAMID_OVERRIDE_CFG_GPUSRD2,
+	MC_STREAMID_OVERRIDE_CFG_GPUSWR2,
+	MC_STREAMID_OVERRIDE_CFG_AXISR,
+	MC_STREAMID_OVERRIDE_CFG_AXISW,
+	MC_STREAMID_OVERRIDE_CFG_EQOSR,
+	MC_STREAMID_OVERRIDE_CFG_EQOSW,
+	MC_STREAMID_OVERRIDE_CFG_UFSHCR,
+	MC_STREAMID_OVERRIDE_CFG_UFSHCW,
+	MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR,
+	MC_STREAMID_OVERRIDE_CFG_BPMPR,
+	MC_STREAMID_OVERRIDE_CFG_BPMPW,
+	MC_STREAMID_OVERRIDE_CFG_BPMPDMAR,
+	MC_STREAMID_OVERRIDE_CFG_BPMPDMAW,
+	MC_STREAMID_OVERRIDE_CFG_AONR,
+	MC_STREAMID_OVERRIDE_CFG_AONW,
+	MC_STREAMID_OVERRIDE_CFG_AONDMAR,
+	MC_STREAMID_OVERRIDE_CFG_AONDMAW,
+	MC_STREAMID_OVERRIDE_CFG_SCER,
+	MC_STREAMID_OVERRIDE_CFG_SCEW,
+	MC_STREAMID_OVERRIDE_CFG_SCEDMAR,
+	MC_STREAMID_OVERRIDE_CFG_SCEDMAW,
+	MC_STREAMID_OVERRIDE_CFG_APEDMAR,
+	MC_STREAMID_OVERRIDE_CFG_APEDMAW,
+	MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR1,
+	MC_STREAMID_OVERRIDE_CFG_VICSRD1,
+	MC_STREAMID_OVERRIDE_CFG_NVDECSRD1
+};
+
+/*******************************************************************************
+ * Array to hold the security configs for stream IDs
+ ******************************************************************************/
+const static mc_streamid_security_cfg_t tegra186_streamid_sec_cfgs[] = {
+	mc_make_sec_cfg(SCEW, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(AFIR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(AFIW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(NVDISPLAYR1, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(XUSB_DEVR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(VICSRD1, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(NVENCSWR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(TSECSRDB, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(AXISW, SECURE, NO_OVERRIDE, DISABLE),
+	mc_make_sec_cfg(SDMMCWAB, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(AONDMAW, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(GPUSWR2, SECURE, NO_OVERRIDE, DISABLE),
+	mc_make_sec_cfg(SATAW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(UFSHCW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SDMMCR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SCEDMAW, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(UFSHCR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SDMMCWAA, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SESWR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(MPCORER, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(PTCR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(BPMPW, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(ETRW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(GPUSRD, SECURE, NO_OVERRIDE, DISABLE),
+	mc_make_sec_cfg(VICSWR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SCEDMAR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(HDAW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(ISPWA, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(EQOSW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(XUSB_HOSTW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(TSECSWR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SDMMCRAA, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(VIW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(AXISR, SECURE, NO_OVERRIDE, DISABLE),
+	mc_make_sec_cfg(SDMMCW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(BPMPDMAW, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(ISPRA, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(NVDECSWR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(XUSB_DEVW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(NVDECSRD, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(MPCOREW, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(NVDISPLAYR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(BPMPDMAR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(NVJPGSWR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(NVDECSRD1, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(TSECSRD, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(NVJPGSRD, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SDMMCWA, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SCER, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(XUSB_HOSTR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(VICSRD, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(AONDMAR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(AONW, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SDMMCRA, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(HOST1XDMAR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(EQOSR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SATAR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(BPMPR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(HDAR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SDMMCRAB, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(ETRR, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(AONR, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(SESRD, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(NVENCSRD, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(GPUSWR, SECURE, NO_OVERRIDE, DISABLE),
+	mc_make_sec_cfg(TSECSWRB, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(ISPWB, NON_SECURE, OVERRIDE, ENABLE),
+	mc_make_sec_cfg(GPUSRD2, SECURE, NO_OVERRIDE, DISABLE),
+	mc_make_sec_cfg(APEDMAW, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(APER, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(APEW, NON_SECURE, NO_OVERRIDE, ENABLE),
+	mc_make_sec_cfg(APEDMAR, NON_SECURE, NO_OVERRIDE, ENABLE),
+};
+
+/*******************************************************************************
+ * Array to hold the transaction override configs
+ ******************************************************************************/
+const static mc_txn_override_cfg_t tegra186_txn_override_cfgs[] = {
+	mc_make_txn_override_cfg(BPMPW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(EQOSW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(NVJPGSWR, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(SDMMCWAA, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(MPCOREW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(SCEDMAW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(SDMMCW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(AXISW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(TSECSWR, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(GPUSWR, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(XUSB_HOSTW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(TSECSWRB, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(GPUSWR2, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(AONDMAW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(AONW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(SESWR, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(BPMPDMAW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(SDMMCWA, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(HDAW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(NVDECSWR, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(UFSHCW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(SATAW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(ETRW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(VICSWR, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(NVENCSWR, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(SDMMCWAB, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(ISPWB, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(APEW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(XUSB_DEVW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(AFIW, CGID_TAG_ADR),
+	mc_make_txn_override_cfg(SCEW, CGID_TAG_ADR),
+};
+
+/*******************************************************************************
+ * Struct to hold the memory controller settings
+ ******************************************************************************/
+static tegra_mc_settings_t tegra186_mc_settings = {
+	.streamid_override_cfg = tegra186_streamid_override_regs,
+	.num_streamid_override_cfgs = ARRAY_SIZE(tegra186_streamid_override_regs),
+	.streamid_security_cfg = tegra186_streamid_sec_cfgs,
+	.num_streamid_security_cfgs = ARRAY_SIZE(tegra186_streamid_sec_cfgs),
+	.txn_override_cfg = tegra186_txn_override_cfgs,
+	.num_txn_override_cfgs = ARRAY_SIZE(tegra186_txn_override_cfgs)
+};
+
+/*******************************************************************************
+ * Handler to return the pointer to the memory controller's settings struct
+ ******************************************************************************/
+tegra_mc_settings_t *tegra_get_mc_settings(void)
+{
+	return &tegra186_mc_settings;
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c b/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c
new file mode 100644
index 0000000..095614e
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <denver.h>
+#include <mce.h>
+#include <platform.h>
+#include <psci.h>
+#include <smmu.h>
+#include <string.h>
+#include <t18x_ari.h>
+#include <tegra_private.h>
+
+extern void prepare_cpu_pwr_dwn(void);
+extern void tegra186_cpu_reset_handler(void);
+extern uint32_t __tegra186_cpu_reset_handler_end,
+		__tegra186_smmu_context;
+
+/* state id mask */
+#define TEGRA186_STATE_ID_MASK		0xF
+/* constants to get power state's wake time */
+#define TEGRA186_WAKE_TIME_MASK		0x0FFFFFF0
+#define TEGRA186_WAKE_TIME_SHIFT	4
+/* default core wake mask for CPU_SUSPEND */
+#define TEGRA186_CORE_WAKE_MASK		0x180c
+/* context size to save during system suspend */
+#define TEGRA186_SE_CONTEXT_SIZE	3
+
+static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE];
+static struct t18x_psci_percpu_data {
+	unsigned int wake_time;
+} __aligned(CACHE_WRITEBACK_GRANULE) percpu_data[PLATFORM_CORE_COUNT];
+
+/* System power down state */
+uint32_t tegra186_system_powerdn_state = TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF;
+
+int32_t tegra_soc_validate_power_state(unsigned int power_state,
+					psci_power_state_t *req_state)
+{
+	int state_id = psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK;
+	int cpu = plat_my_core_pos();
+
+	/* save the core wake time (in TSC ticks)*/
+	percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK)
+			<< TEGRA186_WAKE_TIME_SHIFT;
+
+	/*
+	 * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that
+	 * the correct value is read in tegra_soc_pwr_domain_suspend(), which
+	 * is called with caches disabled. It is possible to read a stale value
+	 * from DRAM in that function, because the L2 cache is not flushed
+	 * unless the cluster is entering CC6/CC7.
+	 */
+	clean_dcache_range((uint64_t)&percpu_data[cpu],
+			sizeof(percpu_data[cpu]));
+
+	/* Sanity check the requested state id */
+	switch (state_id) {
+	case PSTATE_ID_CORE_IDLE:
+	case PSTATE_ID_CORE_POWERDN:
+
+		/* Core powerdown request */
+		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
+		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
+
+		break;
+
+	default:
+		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
+		return PSCI_E_INVALID_PARAMS;
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+	const plat_local_state_t *pwr_domain_state;
+	unsigned int stateid_afflvl0, stateid_afflvl2;
+	int cpu = plat_my_core_pos();
+	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+	mce_cstate_info_t cstate_info = { 0 };
+	uint64_t smmu_ctx_base;
+	uint32_t val;
+
+	/* get the state ID */
+	pwr_domain_state = target_state->pwr_domain_state;
+	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
+		TEGRA186_STATE_ID_MASK;
+	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
+		TEGRA186_STATE_ID_MASK;
+
+	if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ||
+	    (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {
+
+		/* Enter CPU idle/powerdown */
+		val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
+			TEGRA_ARI_CORE_C6 : TEGRA_ARI_CORE_C7;
+		(void)mce_command_handler(MCE_CMD_ENTER_CSTATE, val,
+				percpu_data[cpu].wake_time, 0);
+
+	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
+
+		/* save SE registers */
+		se_regs[0] = mmio_read_32(TEGRA_SE0_BASE +
+				SE_MUTEX_WATCHDOG_NS_LIMIT);
+		se_regs[1] = mmio_read_32(TEGRA_RNG1_BASE +
+				RNG_MUTEX_WATCHDOG_NS_LIMIT);
+		se_regs[2] = mmio_read_32(TEGRA_PKA1_BASE +
+				PKA_MUTEX_WATCHDOG_NS_LIMIT);
+
+		/* save 'Secure Boot' Processor Feature Config Register */
+		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
+		mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV6, val);
+
+		/* save SMMU context to TZDRAM */
+		smmu_ctx_base = params_from_bl2->tzdram_base +
+			((uintptr_t)&__tegra186_smmu_context -
+			 (uintptr_t)tegra186_cpu_reset_handler);
+		tegra_smmu_save_context((uintptr_t)smmu_ctx_base);
+
+		/* Prepare for system suspend */
+		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
+		cstate_info.system = TEGRA_ARI_SYSTEM_SC7;
+		cstate_info.system_state_force = 1;
+		cstate_info.update_wake_mask = 1;
+		mce_update_cstate_info(&cstate_info);
+
+		/* Loop until system suspend is allowed */
+		do {
+			val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED,
+					TEGRA_ARI_CORE_C7,
+					MCE_CORE_SLEEP_TIME_INFINITE,
+					0);
+		} while (val == 0);
+
+		/* Instruct the MCE to enter system suspend state */
+		(void)mce_command_handler(MCE_CMD_ENTER_CSTATE,
+			TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0);
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * Platform handler to calculate the proper target power level at the
+ * specified affinity level
+ ******************************************************************************/
+plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
+					     const plat_local_state_t *states,
+					     unsigned int ncpu)
+{
+	plat_local_state_t target = *states;
+	int cpu = plat_my_core_pos(), ret, cluster_powerdn = 1;
+	int core_pos = read_mpidr() & MPIDR_CPU_MASK;
+	mce_cstate_info_t cstate_info = { 0 };
+
+	/* get the power state at this level */
+	if (lvl == MPIDR_AFFLVL1)
+		target = *(states + core_pos);
+	if (lvl == MPIDR_AFFLVL2)
+		target = *(states + cpu);
+
+	/* CPU suspend */
+	if (lvl == MPIDR_AFFLVL1 && target == PSTATE_ID_CORE_POWERDN) {
+
+		/* Program default wake mask */
+		cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK;
+		cstate_info.update_wake_mask = 1;
+		mce_update_cstate_info(&cstate_info);
+
+		/* Check if CCx state is allowed. */
+		ret = mce_command_handler(MCE_CMD_IS_CCX_ALLOWED,
+				TEGRA_ARI_CORE_C7, percpu_data[cpu].wake_time,
+				0);
+		if (ret)
+			return PSTATE_ID_CORE_POWERDN;
+	}
+
+	/* CPU off */
+	if (lvl == MPIDR_AFFLVL1 && target == PLAT_MAX_OFF_STATE) {
+
+		/* find out the number of ON cpus in the cluster */
+		do {
+			target = *states++;
+			if (target != PLAT_MAX_OFF_STATE)
+				cluster_powerdn = 0;
+		} while (--ncpu);
+
+		/* Enable cluster powerdn from last CPU in the cluster */
+		if (cluster_powerdn) {
+
+			/* Enable CC7 state and turn off wake mask */
+			cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
+			cstate_info.update_wake_mask = 1;
+			mce_update_cstate_info(&cstate_info);
+
+			/* Check if CCx state is allowed. */
+			ret = mce_command_handler(MCE_CMD_IS_CCX_ALLOWED,
+						  TEGRA_ARI_CORE_C7,
+						  MCE_CORE_SLEEP_TIME_INFINITE,
+						  0);
+			if (ret)
+				return PSTATE_ID_CORE_POWERDN;
+
+		} else {
+
+			/* Turn off wake_mask */
+			cstate_info.update_wake_mask = 1;
+			mce_update_cstate_info(&cstate_info);
+		}
+	}
+
+	/* System Suspend */
+	if (((lvl == MPIDR_AFFLVL2) || (lvl == MPIDR_AFFLVL1)) &&
+	    (target == PSTATE_ID_SOC_POWERDN))
+		return PSTATE_ID_SOC_POWERDN;
+
+	/* default state */
+	return PSCI_LOCAL_STATE_RUN;
+}
+
+int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
+{
+	const plat_local_state_t *pwr_domain_state =
+		target_state->pwr_domain_state;
+	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+	unsigned int stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
+		TEGRA186_STATE_ID_MASK;
+	uint64_t val;
+
+	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
+		/*
+		 * The TZRAM loses power when we enter system suspend. To
+		 * allow graceful exit from system suspend, we need to copy
+		 * BL3-1 over to TZDRAM.
+		 */
+		val = params_from_bl2->tzdram_base +
+			((uintptr_t)&__tegra186_cpu_reset_handler_end -
+			 (uintptr_t)tegra186_cpu_reset_handler);
+		memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
+			 (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE);
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_on(u_register_t mpidr)
+{
+	uint32_t target_cpu = mpidr & MPIDR_CPU_MASK;
+	uint32_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
+			MPIDR_AFFINITY_BITS;
+
+	if (target_cluster > MPIDR_AFFLVL1) {
+		ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr);
+		return PSCI_E_NOT_PRESENT;
+	}
+
+	/* construct the target CPU # */
+	target_cpu |= (target_cluster << 2);
+
+	mce_command_handler(MCE_CMD_ONLINE_CORE, target_cpu, 0, 0);
+
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+	int stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
+	int stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0];
+	mce_cstate_info_t cstate_info = { 0 };
+
+	/*
+	 * Reset power state info for CPUs when onlining, we set
+	 * deepest power when offlining a core but that may not be
+	 * requested by non-secure sw which controls idle states. It
+	 * will re-init this info from non-secure software when the
+	 * core come online.
+	 */
+	if (stateid_afflvl0 == PLAT_MAX_OFF_STATE) {
+
+		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC1;
+		cstate_info.update_wake_mask = 1;
+		mce_update_cstate_info(&cstate_info);
+	}
+
+	/*
+	 * Check if we are exiting from deep sleep and restore SE
+	 * context if we are.
+	 */
+	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
+
+		mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT,
+			se_regs[0]);
+		mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT,
+			se_regs[1]);
+		mmio_write_32(TEGRA_PKA1_BASE + PKA_MUTEX_WATCHDOG_NS_LIMIT,
+			se_regs[2]);
+
+		/* Init SMMU */
+		tegra_smmu_init();
+
+		/*
+		 * Reset power state info for the last core doing SC7
+		 * entry and exit, we set deepest power state as CC7
+		 * and SC7 for SC7 entry which may not be requested by
+		 * non-secure SW which controls idle states.
+		 */
+		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
+		cstate_info.system = TEGRA_ARI_SYSTEM_SC1;
+		cstate_info.update_wake_mask = 1;
+		mce_update_cstate_info(&cstate_info);
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+
+	/* Disable Denver's DCO operations */
+	if (impl == DENVER_IMPL)
+		denver_disable_dco();
+
+	/* Turn off CPU */
+	(void)mce_command_handler(MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7,
+			MCE_CORE_SLEEP_TIME_INFINITE, 0);
+
+	return PSCI_E_SUCCESS;
+}
+
+__dead2 void tegra_soc_prepare_system_off(void)
+{
+	mce_cstate_info_t cstate_info = { 0 };
+	uint32_t val;
+
+	if (tegra186_system_powerdn_state == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) {
+
+		/* power off the entire system */
+		mce_enter_ccplex_state(tegra186_system_powerdn_state);
+
+	} else if (tegra186_system_powerdn_state == TEGRA_ARI_SYSTEM_SC8) {
+
+		/* Prepare for quasi power down */
+		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
+		cstate_info.system = TEGRA_ARI_SYSTEM_SC8;
+		cstate_info.system_state_force = 1;
+		cstate_info.update_wake_mask = 1;
+		mce_update_cstate_info(&cstate_info);
+
+		/* loop until other CPUs power down */
+		do {
+			val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED,
+					TEGRA_ARI_CORE_C7,
+					MCE_CORE_SLEEP_TIME_INFINITE,
+					0);
+		} while (val == 0);
+
+		/* Enter quasi power down state */
+		(void)mce_command_handler(MCE_CMD_ENTER_CSTATE,
+			TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0);
+
+		/* disable GICC */
+		tegra_gic_cpuif_deactivate();
+
+		/* power down core */
+		prepare_cpu_pwr_dwn();
+
+		/* flush L1/L2 data caches */
+		dcsw_op_all(DCCISW);
+
+	} else {
+		ERROR("%s: unsupported power down state (%d)\n", __func__,
+			tegra186_system_powerdn_state);
+	}
+
+	wfi();
+
+	/* wait for the system to power down */
+	for (;;) {
+		;
+	}
+}
+
+int tegra_soc_prepare_system_reset(void)
+{
+	mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT);
+
+	return PSCI_E_SUCCESS;
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_secondary.c b/plat/nvidia/tegra/soc/t186/plat_secondary.c
new file mode 100644
index 0000000..52daab2
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_secondary.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <mce.h>
+#include <mmio.h>
+#include <string.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+#define MISCREG_CPU_RESET_VECTOR	0x2000
+#define MISCREG_AA64_RST_LOW		0x2004
+#define MISCREG_AA64_RST_HIGH		0x2008
+
+#define SCRATCH_SECURE_RSV1_SCRATCH_0	0x658
+#define SCRATCH_SECURE_RSV1_SCRATCH_1	0x65C
+
+#define CPU_RESET_MODE_AA64		1
+
+extern uint64_t tegra_bl31_phys_base;
+extern uint64_t __tegra186_cpu_reset_handler_end;
+
+/*******************************************************************************
+ * Setup secondary CPU vectors
+ ******************************************************************************/
+void plat_secondary_setup(void)
+{
+	uint32_t addr_low, addr_high;
+	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+	uint64_t cpu_reset_handler_base;
+
+	INFO("Setting up secondary CPU boot\n");
+
+	if ((tegra_bl31_phys_base >= TEGRA_TZRAM_BASE) &&
+	    (tegra_bl31_phys_base <= (TEGRA_TZRAM_BASE + TEGRA_TZRAM_SIZE))) {
+
+		/*
+		 * The BL31 code resides in the TZSRAM which loses state
+		 * when we enter System Suspend. Copy the wakeup trampoline
+		 * code to TZDRAM to help us exit from System Suspend.
+		 */
+		cpu_reset_handler_base = params_from_bl2->tzdram_base;
+		memcpy16((void *)((uintptr_t)cpu_reset_handler_base),
+			 (void *)(uintptr_t)tegra186_cpu_reset_handler,
+			 (uintptr_t)&__tegra186_cpu_reset_handler_end -
+			 (uintptr_t)tegra186_cpu_reset_handler);
+
+	} else {
+		cpu_reset_handler_base = (uintptr_t)tegra_secure_entrypoint;
+	}
+
+	addr_low = (uint32_t)cpu_reset_handler_base | CPU_RESET_MODE_AA64;
+	addr_high = (uint32_t)((cpu_reset_handler_base >> 32) & 0x7ff);
+
+	/* write lower 32 bits first, then the upper 11 bits */
+	mmio_write_32(TEGRA_MISC_BASE + MISCREG_AA64_RST_LOW, addr_low);
+	mmio_write_32(TEGRA_MISC_BASE + MISCREG_AA64_RST_HIGH, addr_high);
+
+	/* save reset vector to be used during SYSTEM_SUSPEND exit */
+	mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_RSV1_SCRATCH_0,
+			addr_low);
+	mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_RSV1_SCRATCH_1,
+			addr_high);
+
+	/* update reset vector address to the CCPLEX */
+	mce_update_reset_vector();
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_setup.c b/plat/nvidia/tegra/soc/t186/plat_setup.c
new file mode 100644
index 0000000..ba24579
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_setup.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl31.h>
+#include <bl_common.h>
+#include <console.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <cortex_a57.h>
+#include <debug.h>
+#include <denver.h>
+#include <interrupt_mgmt.h>
+#include <mce.h>
+#include <platform.h>
+#include <tegra_def.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+#include <xlat_tables.h>
+
+DEFINE_RENAME_SYSREG_RW_FUNCS(l2ctlr_el1, CORTEX_A57_L2CTLR_EL1)
+extern uint64_t tegra_enable_l2_ecc_parity_prot;
+
+/*******************************************************************************
+ * Tegra186 CPU numbers in cluster #0
+ *******************************************************************************
+ */
+#define TEGRA186_CLUSTER0_CORE2		2
+#define TEGRA186_CLUSTER0_CORE3		3
+
+/*******************************************************************************
+ * The Tegra power domain tree has a single system level power domain i.e. a
+ * single root node. The first entry in the power domain descriptor specifies
+ * the number of power domains at the highest power level.
+ *******************************************************************************
+ */
+const unsigned char tegra_power_domain_tree_desc[] = {
+	/* No of root nodes */
+	1,
+	/* No of clusters */
+	PLATFORM_CLUSTER_COUNT,
+	/* No of CPU cores - cluster0 */
+	PLATFORM_MAX_CPUS_PER_CLUSTER,
+	/* No of CPU cores - cluster1 */
+	PLATFORM_MAX_CPUS_PER_CLUSTER
+};
+
+/*
+ * Table of regions to map using the MMU.
+ */
+static const mmap_region_t tegra_mmap[] = {
+	MAP_REGION_FLAT(TEGRA_MISC_BASE, 0x10000, /* 64KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_TSA_BASE, 0x20000, /* 128KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_MC_STREAMID_BASE, 0x10000, /* 64KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_MC_BASE, 0x10000, /* 64KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_UARTA_BASE, 0x20000, /* 128KB - UART A, B*/
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_UARTC_BASE, 0x20000, /* 128KB - UART C, G */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_UARTD_BASE, 0x30000, /* 192KB - UART D, E, F */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_FUSE_BASE, 0x10000, /* 64KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_GICD_BASE, 0x20000, /* 128KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_SE0_BASE, 0x10000, /* 64KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_PKA1_BASE, 0x10000, /* 64KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_RNG1_BASE, 0x10000, /* 64KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_CAR_RESET_BASE, 0x10000, /* 64KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_PMC_BASE, 0x40000, /* 256KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_SCRATCH_BASE, 0x10000, /* 64KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_MMCRAB_BASE, 0x60000, /* 384KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_ARM_ACTMON_CTR_BASE, 0x20000, /* 128KB - ARM/Denver */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(TEGRA_SMMU0_BASE, 0x1000000, /* 64KB */
+			MT_DEVICE | MT_RW | MT_SECURE),
+	{0}
+};
+
+/*******************************************************************************
+ * Set up the pagetables as per the platform memory map & initialize the MMU
+ ******************************************************************************/
+const mmap_region_t *plat_get_mmio_map(void)
+{
+	/* MMIO space */
+	return tegra_mmap;
+}
+
+/*******************************************************************************
+ * Handler to get the System Counter Frequency
+ ******************************************************************************/
+unsigned int plat_get_syscnt_freq2(void)
+{
+	return 31250000;
+}
+
+/*******************************************************************************
+ * Maximum supported UART controllers
+ ******************************************************************************/
+#define TEGRA186_MAX_UART_PORTS		7
+
+/*******************************************************************************
+ * This variable holds the UART port base addresses
+ ******************************************************************************/
+static uint32_t tegra186_uart_addresses[TEGRA186_MAX_UART_PORTS + 1] = {
+	0,	/* undefined - treated as an error case */
+	TEGRA_UARTA_BASE,
+	TEGRA_UARTB_BASE,
+	TEGRA_UARTC_BASE,
+	TEGRA_UARTD_BASE,
+	TEGRA_UARTE_BASE,
+	TEGRA_UARTF_BASE,
+	TEGRA_UARTG_BASE,
+};
+
+/*******************************************************************************
+ * Retrieve the UART controller base to be used as the console
+ ******************************************************************************/
+uint32_t plat_get_console_from_id(int id)
+{
+	if (id > TEGRA186_MAX_UART_PORTS)
+		return 0;
+
+	return tegra186_uart_addresses[id];
+}
+
+/* represent chip-version as concatenation of major (15:12), minor (11:8) and subrev (7:0) */
+#define TEGRA186_VER_A02P	0x1201
+
+/*******************************************************************************
+ * Handler for early platform setup
+ ******************************************************************************/
+void plat_early_platform_setup(void)
+{
+	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+	uint32_t chip_subrev, val;
+
+	/* sanity check MCE firmware compatibility */
+	mce_verify_firmware_version();
+
+	/*
+	 * Enable ECC and Parity Protection for Cortex-A57 CPUs
+	 * for Tegra A02p SKUs
+	 */
+	if (impl != DENVER_IMPL) {
+
+		/* get the major, minor and sub-version values */
+		chip_subrev = mmio_read_32(TEGRA_FUSE_BASE + OPT_SUBREVISION) &
+			      SUBREVISION_MASK;
+
+		/* prepare chip version number */
+		val = (tegra_get_chipid_major() << 12) |
+		      (tegra_get_chipid_minor() << 8) |
+		       chip_subrev;
+
+		/* enable L2 ECC for Tegra186 A02P and beyond */
+		if (val >= TEGRA186_VER_A02P) {
+
+			val = read_l2ctlr_el1();
+			val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
+			write_l2ctlr_el1(val);
+
+			/*
+			 * Set the flag to enable ECC/Parity Protection
+			 * when we exit System Suspend or Cluster Powerdn
+			 */
+			tegra_enable_l2_ecc_parity_prot = 1;
+		}
+	}
+}
+
+/* Secure IRQs for Tegra186 */
+static const irq_sec_cfg_t tegra186_sec_irqs[] = {
+	{
+		TEGRA186_TOP_WDT_IRQ,
+		TEGRA186_SEC_IRQ_TARGET_MASK,
+		INTR_TYPE_EL3,
+	},
+	{
+		TEGRA186_AON_WDT_IRQ,
+		TEGRA186_SEC_IRQ_TARGET_MASK,
+		INTR_TYPE_EL3,
+	},
+};
+
+/*******************************************************************************
+ * Initialize the GIC and SGIs
+ ******************************************************************************/
+void plat_gic_setup(void)
+{
+	tegra_gic_setup(tegra186_sec_irqs,
+		sizeof(tegra186_sec_irqs) / sizeof(tegra186_sec_irqs[0]));
+
+	/*
+	 * Initialize the FIQ handler only if the platform supports any
+	 * FIQ interrupt sources.
+	 */
+	if (sizeof(tegra186_sec_irqs) > 0)
+		tegra_fiq_handler_setup();
+}
+
+/*******************************************************************************
+ * Return pointer to the BL31 params from previous bootloader
+ ******************************************************************************/
+bl31_params_t *plat_get_bl31_params(void)
+{
+	uint32_t val;
+
+	val = mmio_read_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV53_LO);
+
+	return (bl31_params_t *)(uintptr_t)val;
+}
+
+/*******************************************************************************
+ * Return pointer to the BL31 platform params from previous bootloader
+ ******************************************************************************/
+plat_params_from_bl2_t *plat_get_bl31_plat_params(void)
+{
+	uint32_t val;
+
+	val = mmio_read_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV53_HI);
+
+	return (plat_params_from_bl2_t *)(uintptr_t)val;
+}
+
+/*******************************************************************************
+ * This function implements a part of the critical interface between the psci
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is returned
+ * in case the MPIDR is invalid.
+ ******************************************************************************/
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+	unsigned int cluster_id, cpu_id, pos;
+
+	cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+	cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
+
+	/*
+	 * Validate cluster_id by checking whether it represents
+	 * one of the two clusters present on the platform.
+	 */
+	if (cluster_id >= PLATFORM_CLUSTER_COUNT)
+		return PSCI_E_NOT_PRESENT;
+
+	/*
+	 * Validate cpu_id by checking whether it represents a CPU in
+	 * one of the two clusters present on the platform.
+	 */
+	if (cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER)
+		return PSCI_E_NOT_PRESENT;
+
+	/* calculate the core position */
+	pos = cpu_id + (cluster_id << 2);
+
+	/* check for non-existent CPUs */
+	if (pos == TEGRA186_CLUSTER0_CORE2 || pos == TEGRA186_CLUSTER0_CORE3)
+		return PSCI_E_NOT_PRESENT;
+
+	return pos;
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_sip_calls.c b/plat/nvidia/tegra/soc/t186/plat_sip_calls.c
new file mode 100644
index 0000000..dfe1c7d
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_sip_calls.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <denver.h>
+#include <errno.h>
+#include <mce.h>
+#include <memctrl.h>
+#include <runtime_svc.h>
+#include <t18x_ari.h>
+#include <tegra_private.h>
+
+extern uint32_t tegra186_system_powerdn_state;
+
+/*******************************************************************************
+ * Offset to read the ref_clk counter value
+ ******************************************************************************/
+#define REF_CLK_OFFSET		4
+
+/*******************************************************************************
+ * Tegra186 SiP SMCs
+ ******************************************************************************/
+#define TEGRA_SIP_SYSTEM_SHUTDOWN_STATE			0xC2FFFE01
+#define TEGRA_SIP_GET_ACTMON_CLK_COUNTERS		0xC2FFFE02
+#define TEGRA_SIP_MCE_CMD_ENTER_CSTATE			0xC2FFFF00
+#define TEGRA_SIP_MCE_CMD_UPDATE_CSTATE_INFO		0xC2FFFF01
+#define TEGRA_SIP_MCE_CMD_UPDATE_CROSSOVER_TIME		0xC2FFFF02
+#define TEGRA_SIP_MCE_CMD_READ_CSTATE_STATS		0xC2FFFF03
+#define TEGRA_SIP_MCE_CMD_WRITE_CSTATE_STATS		0xC2FFFF04
+#define TEGRA_SIP_MCE_CMD_IS_SC7_ALLOWED		0xC2FFFF05
+#define TEGRA_SIP_MCE_CMD_ONLINE_CORE			0xC2FFFF06
+#define TEGRA_SIP_MCE_CMD_CC3_CTRL			0xC2FFFF07
+#define TEGRA_SIP_MCE_CMD_ECHO_DATA			0xC2FFFF08
+#define TEGRA_SIP_MCE_CMD_READ_VERSIONS			0xC2FFFF09
+#define TEGRA_SIP_MCE_CMD_ENUM_FEATURES			0xC2FFFF0A
+#define TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE_TRBITS	0xC2FFFF0B
+#define TEGRA_SIP_MCE_CMD_ENUM_READ_MCA			0xC2FFFF0C
+#define TEGRA_SIP_MCE_CMD_ENUM_WRITE_MCA		0xC2FFFF0D
+#define TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE		0xC2FFFF0E
+#define TEGRA_SIP_MCE_CMD_ROC_CLEAN_CACHE		0xC2FFFF0F
+#define TEGRA_SIP_MCE_CMD_ENABLE_LATIC			0xC2FFFF10
+#define TEGRA_SIP_MCE_CMD_UNCORE_PERFMON_REQ		0xC2FFFF11
+#define TEGRA_SIP_MCE_CMD_MISC_CCPLEX			0xC2FFFF12
+
+/*******************************************************************************
+ * This function is responsible for handling all T186 SiP calls
+ ******************************************************************************/
+int plat_sip_handler(uint32_t smc_fid,
+		     uint64_t x1,
+		     uint64_t x2,
+		     uint64_t x3,
+		     uint64_t x4,
+		     void *cookie,
+		     void *handle,
+		     uint64_t flags)
+{
+	int mce_ret;
+	int impl, cpu;
+	uint32_t base, core_clk_ctr, ref_clk_ctr;
+
+	if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
+		/* 32-bit function, clear top parameter bits */
+
+		x1 = (uint32_t)x1;
+		x2 = (uint32_t)x2;
+		x3 = (uint32_t)x3;
+	}
+
+	/*
+	 * Convert SMC FID to SMC64, to support SMC32/SMC64 configurations
+	 */
+	smc_fid |= (SMC_64 << FUNCID_CC_SHIFT);
+
+	switch (smc_fid) {
+	/*
+	 * Micro Coded Engine (MCE) commands reside in the 0x82FFFF00 -
+	 * 0x82FFFFFF SiP SMC space
+	 */
+	case TEGRA_SIP_MCE_CMD_ENTER_CSTATE:
+	case TEGRA_SIP_MCE_CMD_UPDATE_CSTATE_INFO:
+	case TEGRA_SIP_MCE_CMD_UPDATE_CROSSOVER_TIME:
+	case TEGRA_SIP_MCE_CMD_READ_CSTATE_STATS:
+	case TEGRA_SIP_MCE_CMD_WRITE_CSTATE_STATS:
+	case TEGRA_SIP_MCE_CMD_IS_SC7_ALLOWED:
+	case TEGRA_SIP_MCE_CMD_CC3_CTRL:
+	case TEGRA_SIP_MCE_CMD_ECHO_DATA:
+	case TEGRA_SIP_MCE_CMD_READ_VERSIONS:
+	case TEGRA_SIP_MCE_CMD_ENUM_FEATURES:
+	case TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE_TRBITS:
+	case TEGRA_SIP_MCE_CMD_ENUM_READ_MCA:
+	case TEGRA_SIP_MCE_CMD_ENUM_WRITE_MCA:
+	case TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE:
+	case TEGRA_SIP_MCE_CMD_ROC_CLEAN_CACHE:
+	case TEGRA_SIP_MCE_CMD_ENABLE_LATIC:
+	case TEGRA_SIP_MCE_CMD_UNCORE_PERFMON_REQ:
+	case TEGRA_SIP_MCE_CMD_MISC_CCPLEX:
+
+		/* clean up the high bits */
+		smc_fid &= MCE_CMD_MASK;
+
+		/* execute the command and store the result */
+		mce_ret = mce_command_handler(smc_fid, x1, x2, x3);
+		write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X0,
+			      (uint64_t)mce_ret);
+
+		return 0;
+
+	case TEGRA_SIP_SYSTEM_SHUTDOWN_STATE:
+
+		/* clean up the high bits */
+		x1 = (uint32_t)x1;
+
+		/*
+		 * SC8 is a special Tegra186 system state where the CPUs and
+		 * DRAM are powered down but the other subsystem is still
+		 * alive.
+		 */
+		if ((x1 == TEGRA_ARI_SYSTEM_SC8) ||
+		    (x1 == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF)) {
+
+			tegra186_system_powerdn_state = x1;
+			flush_dcache_range(
+				(uintptr_t)&tegra186_system_powerdn_state,
+				sizeof(tegra186_system_powerdn_state));
+
+		} else {
+
+			ERROR("%s: unhandled powerdn state (%d)\n", __func__,
+				(uint32_t)x1);
+			return -ENOTSUP;
+		}
+
+		return 0;
+
+	/*
+	 * This function ID reads the Activity monitor's core/ref clock
+	 * counter values for a core/cluster.
+	 *
+	 * x1 = MPIDR of the target core
+	 * x2 = MIDR of the target core
+	 */
+	case TEGRA_SIP_GET_ACTMON_CLK_COUNTERS:
+
+		cpu = (uint32_t)x1 & MPIDR_CPU_MASK;
+		impl = ((uint32_t)x2 >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+
+		/* sanity check target CPU number */
+		if (cpu > PLATFORM_MAX_CPUS_PER_CLUSTER)
+			return -EINVAL;
+
+		/* get the base address for the current CPU */
+		base = (impl == DENVER_IMPL) ? TEGRA_DENVER_ACTMON_CTR_BASE :
+			TEGRA_ARM_ACTMON_CTR_BASE;
+
+		/* read the clock counter values */
+		core_clk_ctr = mmio_read_32(base + (8 * cpu));
+		ref_clk_ctr = mmio_read_32(base + (8 * cpu) + REF_CLK_OFFSET);
+
+		/* return the counter values as two different parameters */
+		write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X1,
+			      (uint64_t)core_clk_ctr);
+		write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X2,
+			      (uint64_t)ref_clk_ctr);
+
+		return 0;
+
+	default:
+		break;
+	}
+
+	return -ENOTSUP;
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_smmu.c b/plat/nvidia/tegra/soc/t186/plat_smmu.c
new file mode 100644
index 0000000..4a8e1be
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_smmu.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <bl_common.h>
+#include <smmu.h>
+#include <tegra_def.h>
+
+/*******************************************************************************
+ * Array to hold SMMU context for Tegra186
+ ******************************************************************************/
+static __attribute__((aligned(16))) smmu_regs_t tegra186_smmu_context[] = {
+	_START_OF_TABLE_,
+	mc_make_sid_security_cfg(SCEW),
+	mc_make_sid_security_cfg(AFIR),
+	mc_make_sid_security_cfg(NVDISPLAYR1),
+	mc_make_sid_security_cfg(XUSB_DEVR),
+	mc_make_sid_security_cfg(VICSRD1),
+	mc_make_sid_security_cfg(NVENCSWR),
+	mc_make_sid_security_cfg(TSECSRDB),
+	mc_make_sid_security_cfg(AXISW),
+	mc_make_sid_security_cfg(SDMMCWAB),
+	mc_make_sid_security_cfg(AONDMAW),
+	mc_make_sid_security_cfg(GPUSWR2),
+	mc_make_sid_security_cfg(SATAW),
+	mc_make_sid_security_cfg(UFSHCW),
+	mc_make_sid_security_cfg(AFIW),
+	mc_make_sid_security_cfg(SDMMCR),
+	mc_make_sid_security_cfg(SCEDMAW),
+	mc_make_sid_security_cfg(UFSHCR),
+	mc_make_sid_security_cfg(SDMMCWAA),
+	mc_make_sid_security_cfg(APEDMAW),
+	mc_make_sid_security_cfg(SESWR),
+	mc_make_sid_security_cfg(MPCORER),
+	mc_make_sid_security_cfg(PTCR),
+	mc_make_sid_security_cfg(BPMPW),
+	mc_make_sid_security_cfg(ETRW),
+	mc_make_sid_security_cfg(GPUSRD),
+	mc_make_sid_security_cfg(VICSWR),
+	mc_make_sid_security_cfg(SCEDMAR),
+	mc_make_sid_security_cfg(HDAW),
+	mc_make_sid_security_cfg(ISPWA),
+	mc_make_sid_security_cfg(EQOSW),
+	mc_make_sid_security_cfg(XUSB_HOSTW),
+	mc_make_sid_security_cfg(TSECSWR),
+	mc_make_sid_security_cfg(SDMMCRAA),
+	mc_make_sid_security_cfg(APER),
+	mc_make_sid_security_cfg(VIW),
+	mc_make_sid_security_cfg(APEW),
+	mc_make_sid_security_cfg(AXISR),
+	mc_make_sid_security_cfg(SDMMCW),
+	mc_make_sid_security_cfg(BPMPDMAW),
+	mc_make_sid_security_cfg(ISPRA),
+	mc_make_sid_security_cfg(NVDECSWR),
+	mc_make_sid_security_cfg(XUSB_DEVW),
+	mc_make_sid_security_cfg(NVDECSRD),
+	mc_make_sid_security_cfg(MPCOREW),
+	mc_make_sid_security_cfg(NVDISPLAYR),
+	mc_make_sid_security_cfg(BPMPDMAR),
+	mc_make_sid_security_cfg(NVJPGSWR),
+	mc_make_sid_security_cfg(NVDECSRD1),
+	mc_make_sid_security_cfg(TSECSRD),
+	mc_make_sid_security_cfg(NVJPGSRD),
+	mc_make_sid_security_cfg(SDMMCWA),
+	mc_make_sid_security_cfg(SCER),
+	mc_make_sid_security_cfg(XUSB_HOSTR),
+	mc_make_sid_security_cfg(VICSRD),
+	mc_make_sid_security_cfg(AONDMAR),
+	mc_make_sid_security_cfg(AONW),
+	mc_make_sid_security_cfg(SDMMCRA),
+	mc_make_sid_security_cfg(HOST1XDMAR),
+	mc_make_sid_security_cfg(EQOSR),
+	mc_make_sid_security_cfg(SATAR),
+	mc_make_sid_security_cfg(BPMPR),
+	mc_make_sid_security_cfg(HDAR),
+	mc_make_sid_security_cfg(SDMMCRAB),
+	mc_make_sid_security_cfg(ETRR),
+	mc_make_sid_security_cfg(AONR),
+	mc_make_sid_security_cfg(APEDMAR),
+	mc_make_sid_security_cfg(SESRD),
+	mc_make_sid_security_cfg(NVENCSRD),
+	mc_make_sid_security_cfg(GPUSWR),
+	mc_make_sid_security_cfg(TSECSWRB),
+	mc_make_sid_security_cfg(ISPWB),
+	mc_make_sid_security_cfg(GPUSRD2),
+	mc_make_sid_override_cfg(APER),
+	mc_make_sid_override_cfg(VICSRD),
+	mc_make_sid_override_cfg(NVENCSRD),
+	mc_make_sid_override_cfg(NVJPGSWR),
+	mc_make_sid_override_cfg(AONW),
+	mc_make_sid_override_cfg(BPMPR),
+	mc_make_sid_override_cfg(BPMPW),
+	mc_make_sid_override_cfg(HDAW),
+	mc_make_sid_override_cfg(NVDISPLAYR1),
+	mc_make_sid_override_cfg(APEDMAR),
+	mc_make_sid_override_cfg(AFIR),
+	mc_make_sid_override_cfg(AXISR),
+	mc_make_sid_override_cfg(VICSRD1),
+	mc_make_sid_override_cfg(TSECSRD),
+	mc_make_sid_override_cfg(BPMPDMAW),
+	mc_make_sid_override_cfg(MPCOREW),
+	mc_make_sid_override_cfg(XUSB_HOSTR),
+	mc_make_sid_override_cfg(GPUSWR),
+	mc_make_sid_override_cfg(XUSB_DEVR),
+	mc_make_sid_override_cfg(UFSHCW),
+	mc_make_sid_override_cfg(XUSB_HOSTW),
+	mc_make_sid_override_cfg(SDMMCWAB),
+	mc_make_sid_override_cfg(SATAW),
+	mc_make_sid_override_cfg(SCEDMAR),
+	mc_make_sid_override_cfg(HOST1XDMAR),
+	mc_make_sid_override_cfg(SDMMCWA),
+	mc_make_sid_override_cfg(APEDMAW),
+	mc_make_sid_override_cfg(SESWR),
+	mc_make_sid_override_cfg(AXISW),
+	mc_make_sid_override_cfg(AONDMAW),
+	mc_make_sid_override_cfg(TSECSWRB),
+	mc_make_sid_override_cfg(MPCORER),
+	mc_make_sid_override_cfg(ISPWB),
+	mc_make_sid_override_cfg(AONR),
+	mc_make_sid_override_cfg(BPMPDMAR),
+	mc_make_sid_override_cfg(HDAR),
+	mc_make_sid_override_cfg(SDMMCRA),
+	mc_make_sid_override_cfg(ETRW),
+	mc_make_sid_override_cfg(GPUSWR2),
+	mc_make_sid_override_cfg(EQOSR),
+	mc_make_sid_override_cfg(TSECSWR),
+	mc_make_sid_override_cfg(ETRR),
+	mc_make_sid_override_cfg(NVDECSRD),
+	mc_make_sid_override_cfg(TSECSRDB),
+	mc_make_sid_override_cfg(SDMMCRAA),
+	mc_make_sid_override_cfg(NVDECSRD1),
+	mc_make_sid_override_cfg(SDMMCR),
+	mc_make_sid_override_cfg(NVJPGSRD),
+	mc_make_sid_override_cfg(SCEDMAW),
+	mc_make_sid_override_cfg(SDMMCWAA),
+	mc_make_sid_override_cfg(APEW),
+	mc_make_sid_override_cfg(AONDMAR),
+	mc_make_sid_override_cfg(PTCR),
+	mc_make_sid_override_cfg(SCER),
+	mc_make_sid_override_cfg(ISPRA),
+	mc_make_sid_override_cfg(ISPWA),
+	mc_make_sid_override_cfg(VICSWR),
+	mc_make_sid_override_cfg(SESRD),
+	mc_make_sid_override_cfg(SDMMCW),
+	mc_make_sid_override_cfg(SDMMCRAB),
+	mc_make_sid_override_cfg(EQOSW),
+	mc_make_sid_override_cfg(GPUSRD2),
+	mc_make_sid_override_cfg(SCEW),
+	mc_make_sid_override_cfg(GPUSRD),
+	mc_make_sid_override_cfg(NVDECSWR),
+	mc_make_sid_override_cfg(XUSB_DEVW),
+	mc_make_sid_override_cfg(SATAR),
+	mc_make_sid_override_cfg(NVDISPLAYR),
+	mc_make_sid_override_cfg(VIW),
+	mc_make_sid_override_cfg(UFSHCR),
+	mc_make_sid_override_cfg(NVENCSWR),
+	mc_make_sid_override_cfg(AFIW),
+	smmu_make_gnsr0_nsec_cfg(CR0),
+	smmu_make_gnsr0_sec_cfg(IDR0),
+	smmu_make_gnsr0_sec_cfg(IDR1),
+	smmu_make_gnsr0_sec_cfg(IDR2),
+	smmu_make_gnsr0_nsec_cfg(GFSR),
+	smmu_make_gnsr0_nsec_cfg(GFSYNR0),
+	smmu_make_gnsr0_nsec_cfg(GFSYNR1),
+	smmu_make_gnsr0_nsec_cfg(TLBGSTATUS),
+	smmu_make_gnsr0_nsec_cfg(PIDR2),
+	smmu_make_smrg_group(0),
+	smmu_make_smrg_group(1),
+	smmu_make_smrg_group(2),
+	smmu_make_smrg_group(3),
+	smmu_make_smrg_group(4),
+	smmu_make_smrg_group(5),
+	smmu_make_smrg_group(6),
+	smmu_make_smrg_group(7),
+	smmu_make_smrg_group(8),
+	smmu_make_smrg_group(9),
+	smmu_make_smrg_group(10),
+	smmu_make_smrg_group(11),
+	smmu_make_smrg_group(12),
+	smmu_make_smrg_group(13),
+	smmu_make_smrg_group(14),
+	smmu_make_smrg_group(15),
+	smmu_make_smrg_group(16),
+	smmu_make_smrg_group(17),
+	smmu_make_smrg_group(18),
+	smmu_make_smrg_group(19),
+	smmu_make_smrg_group(20),
+	smmu_make_smrg_group(21),
+	smmu_make_smrg_group(22),
+	smmu_make_smrg_group(23),
+	smmu_make_smrg_group(24),
+	smmu_make_smrg_group(25),
+	smmu_make_smrg_group(26),
+	smmu_make_smrg_group(27),
+	smmu_make_smrg_group(28),
+	smmu_make_smrg_group(29),
+	smmu_make_smrg_group(30),
+	smmu_make_smrg_group(31),
+	smmu_make_smrg_group(32),
+	smmu_make_smrg_group(33),
+	smmu_make_smrg_group(34),
+	smmu_make_smrg_group(35),
+	smmu_make_smrg_group(36),
+	smmu_make_smrg_group(37),
+	smmu_make_smrg_group(38),
+	smmu_make_smrg_group(39),
+	smmu_make_smrg_group(40),
+	smmu_make_smrg_group(41),
+	smmu_make_smrg_group(42),
+	smmu_make_smrg_group(43),
+	smmu_make_smrg_group(44),
+	smmu_make_smrg_group(45),
+	smmu_make_smrg_group(46),
+	smmu_make_smrg_group(47),
+	smmu_make_smrg_group(48),
+	smmu_make_smrg_group(49),
+	smmu_make_smrg_group(50),
+	smmu_make_smrg_group(51),
+	smmu_make_smrg_group(52),
+	smmu_make_smrg_group(53),
+	smmu_make_smrg_group(54),
+	smmu_make_smrg_group(55),
+	smmu_make_smrg_group(56),
+	smmu_make_smrg_group(57),
+	smmu_make_smrg_group(58),
+	smmu_make_smrg_group(59),
+	smmu_make_smrg_group(60),
+	smmu_make_smrg_group(61),
+	smmu_make_smrg_group(62),
+	smmu_make_smrg_group(63),
+	smmu_make_cb_group(0),
+	smmu_make_cb_group(1),
+	smmu_make_cb_group(2),
+	smmu_make_cb_group(3),
+	smmu_make_cb_group(4),
+	smmu_make_cb_group(5),
+	smmu_make_cb_group(6),
+	smmu_make_cb_group(7),
+	smmu_make_cb_group(8),
+	smmu_make_cb_group(9),
+	smmu_make_cb_group(10),
+	smmu_make_cb_group(11),
+	smmu_make_cb_group(12),
+	smmu_make_cb_group(13),
+	smmu_make_cb_group(14),
+	smmu_make_cb_group(15),
+	smmu_make_cb_group(16),
+	smmu_make_cb_group(17),
+	smmu_make_cb_group(18),
+	smmu_make_cb_group(19),
+	smmu_make_cb_group(20),
+	smmu_make_cb_group(21),
+	smmu_make_cb_group(22),
+	smmu_make_cb_group(23),
+	smmu_make_cb_group(24),
+	smmu_make_cb_group(25),
+	smmu_make_cb_group(26),
+	smmu_make_cb_group(27),
+	smmu_make_cb_group(28),
+	smmu_make_cb_group(29),
+	smmu_make_cb_group(30),
+	smmu_make_cb_group(31),
+	smmu_make_cb_group(32),
+	smmu_make_cb_group(33),
+	smmu_make_cb_group(34),
+	smmu_make_cb_group(35),
+	smmu_make_cb_group(36),
+	smmu_make_cb_group(37),
+	smmu_make_cb_group(38),
+	smmu_make_cb_group(39),
+	smmu_make_cb_group(40),
+	smmu_make_cb_group(41),
+	smmu_make_cb_group(42),
+	smmu_make_cb_group(43),
+	smmu_make_cb_group(44),
+	smmu_make_cb_group(45),
+	smmu_make_cb_group(46),
+	smmu_make_cb_group(47),
+	smmu_make_cb_group(48),
+	smmu_make_cb_group(49),
+	smmu_make_cb_group(50),
+	smmu_make_cb_group(51),
+	smmu_make_cb_group(52),
+	smmu_make_cb_group(53),
+	smmu_make_cb_group(54),
+	smmu_make_cb_group(55),
+	smmu_make_cb_group(56),
+	smmu_make_cb_group(57),
+	smmu_make_cb_group(58),
+	smmu_make_cb_group(59),
+	smmu_make_cb_group(60),
+	smmu_make_cb_group(61),
+	smmu_make_cb_group(62),
+	smmu_make_cb_group(63),
+	smmu_bypass_cfg,	/* TBU settings */
+	_END_OF_TABLE_,
+};
+
+/*******************************************************************************
+ * Handler to return the pointer to the SMMU's context struct
+ ******************************************************************************/
+smmu_regs_t *plat_get_smmu_ctx(void)
+{
+	/* index of _END_OF_TABLE_ */
+	tegra186_smmu_context[0].val = ARRAY_SIZE(tegra186_smmu_context) - 1;
+
+	return tegra186_smmu_context;
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_trampoline.S b/plat/nvidia/tegra/soc/t186/plat_trampoline.S
new file mode 100644
index 0000000..6a17c33
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_trampoline.S
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common_def.h>
+#include <memctrl_v2.h>
+#include <tegra_def.h>
+
+#define TEGRA186_SMMU_CTX_SIZE		0x420
+
+	.globl	tegra186_cpu_reset_handler
+
+/* CPU reset handler routine */
+func tegra186_cpu_reset_handler _align=4
+	/*
+	 * The TZRAM loses state during System Suspend. We use this
+	 * information to decide if the reset handler is running after a
+	 * System Suspend. Resume from system suspend requires restoring
+	 * the entire state from TZDRAM to TZRAM.
+	 */
+	mov	x0, #BL31_BASE
+	ldr	x0, [x0]
+	cbnz	x0, boot_cpu
+
+	/* resume from system suspend */
+	mov	x0, #BL31_BASE
+	adr	x1, __tegra186_cpu_reset_handler_end
+	adr	x2, __tegra186_cpu_reset_handler_data
+	ldr	x2, [x2, #8]
+
+	/* memcpy16 */
+m_loop16:
+	cmp	x2, #16
+	b.lt	m_loop1
+	ldp	x3, x4, [x1], #16
+	stp	x3, x4, [x0], #16
+	sub	x2, x2, #16
+	b	m_loop16
+	/* copy byte per byte */
+m_loop1:
+	cbz	x2, boot_cpu
+	ldrb	w3, [x1], #1
+	strb	w3, [x0], #1
+	subs	x2, x2, #1
+	b.ne	m_loop1
+
+boot_cpu:
+	adr	x0, __tegra186_cpu_reset_handler_data
+	ldr	x0, [x0]
+	br	x0
+endfunc tegra186_cpu_reset_handler
+
+	/*
+	 * Tegra186 reset data (offset 0x0 - 0x430)
+	 *
+	 * 0x000: secure world's entrypoint
+	 * 0x008: BL31 size (RO + RW)
+	 * 0x00C: SMMU context start
+	 * 0x42C: SMMU context end
+	 */
+
+	.align 4
+	.type	__tegra186_cpu_reset_handler_data, %object
+	.globl	__tegra186_cpu_reset_handler_data
+__tegra186_cpu_reset_handler_data:
+	.quad	tegra_secure_entrypoint
+	.quad	__BL31_END__ - BL31_BASE
+	.globl	__tegra186_smmu_context
+__tegra186_smmu_context:
+	.rept	TEGRA186_SMMU_CTX_SIZE
+	.quad	0
+	.endr
+	.size	__tegra186_cpu_reset_handler_data, \
+		. - __tegra186_cpu_reset_handler_data
+
+	.align 4
+	.globl	__tegra186_cpu_reset_handler_end
+__tegra186_cpu_reset_handler_end:
diff --git a/plat/nvidia/tegra/soc/t186/platform_t186.mk b/plat/nvidia/tegra/soc/t186/platform_t186.mk
new file mode 100644
index 0000000..c905323
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/platform_t186.mk
@@ -0,0 +1,66 @@
+#
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# platform configs
+ENABLE_AFI_DEVICE			:= 1
+$(eval $(call add_define,ENABLE_AFI_DEVICE))
+
+ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS	:= 1
+$(eval $(call add_define,ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS))
+
+RELOCATE_TO_BL31_BASE			:= 1
+$(eval $(call add_define,RELOCATE_TO_BL31_BASE))
+
+ENABLE_CHIP_VERIFICATION_HARNESS	:= 0
+$(eval $(call add_define,ENABLE_CHIP_VERIFICATION_HARNESS))
+
+ENABLE_SMMU_DEVICE			:= 1
+$(eval $(call add_define,ENABLE_SMMU_DEVICE))
+
+NUM_SMMU_DEVICES			:= 1
+$(eval $(call add_define,NUM_SMMU_DEVICES))
+
+RESET_TO_BL31				:= 1
+
+PROGRAMMABLE_RESET_ADDRESS		:= 1
+
+COLD_BOOT_SINGLE_CPU			:= 1
+
+# platform settings
+TZDRAM_BASE				:= 0x30000000
+$(eval $(call add_define,TZDRAM_BASE))
+
+PLATFORM_CLUSTER_COUNT			:= 2
+$(eval $(call add_define,PLATFORM_CLUSTER_COUNT))
+
+PLATFORM_MAX_CPUS_PER_CLUSTER		:= 4
+$(eval $(call add_define,PLATFORM_MAX_CPUS_PER_CLUSTER))
+
+MAX_XLAT_TABLES				:= 24
+$(eval $(call add_define,MAX_XLAT_TABLES))
+
+MAX_MMAP_REGIONS			:= 24
+$(eval $(call add_define,MAX_MMAP_REGIONS))
+
+# platform files
+PLAT_INCLUDES		+=	-I${SOC_DIR}/drivers/include
+
+BL31_SOURCES		+=	lib/cpus/aarch64/denver.S		\
+				lib/cpus/aarch64/cortex_a57.S		\
+				${COMMON_DIR}/drivers/memctrl/memctrl_v2.c \
+				${COMMON_DIR}/drivers/smmu/smmu.c	\
+				${SOC_DIR}/drivers/mce/mce.c		\
+				${SOC_DIR}/drivers/mce/ari.c		\
+				${SOC_DIR}/drivers/mce/nvg.c		\
+				${SOC_DIR}/drivers/mce/aarch64/nvg_helpers.S \
+				${SOC_DIR}/plat_memctrl.c		\
+				${SOC_DIR}/plat_psci_handlers.c		\
+				${SOC_DIR}/plat_setup.c			\
+				${SOC_DIR}/plat_secondary.c		\
+				${SOC_DIR}/plat_sip_calls.c		\
+				${SOC_DIR}/plat_smmu.c			\
+				${SOC_DIR}/plat_trampoline.S
+
diff --git a/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c b/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c
new file mode 100644
index 0000000..f77746c
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <flowctrl.h>
+#include <mmio.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <pmc.h>
+#include <psci.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+/*
+ * Register used to clear CPU reset signals. Each CPU has two reset
+ * signals: CPU reset (3:0) and Core reset (19:16).
+ */
+#define CPU_CMPLX_RESET_CLR		0x454
+#define CPU_CORE_RESET_MASK		0x10001
+
+/* Clock and Reset controller registers for system clock's settings */
+#define SCLK_RATE			0x30
+#define SCLK_BURST_POLICY		0x28
+#define SCLK_BURST_POLICY_DEFAULT	0x10000000
+
+static int cpu_powergate_mask[PLATFORM_MAX_CPUS_PER_CLUSTER];
+
+int32_t tegra_soc_validate_power_state(unsigned int power_state,
+					psci_power_state_t *req_state)
+{
+	int state_id = psci_get_pstate_id(power_state);
+
+	/* Sanity check the requested state id */
+	switch (state_id) {
+	case PSTATE_ID_CORE_POWERDN:
+		/*
+		 * Core powerdown request only for afflvl 0
+		 */
+		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id & 0xff;
+
+		break;
+
+	case PSTATE_ID_CLUSTER_IDLE:
+	case PSTATE_ID_CLUSTER_POWERDN:
+		/*
+		 * Cluster powerdown/idle request only for afflvl 1
+		 */
+		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
+		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
+
+		break;
+
+	case PSTATE_ID_SOC_POWERDN:
+		/*
+		 * System powerdown request only for afflvl 2
+		 */
+		for (uint32_t i = MPIDR_AFFLVL0; i < PLAT_MAX_PWR_LVL; i++)
+			req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
+
+		req_state->pwr_domain_state[PLAT_MAX_PWR_LVL] =
+			PLAT_SYS_SUSPEND_STATE_ID;
+
+		break;
+
+	default:
+		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
+		return PSCI_E_INVALID_PARAMS;
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * Platform handler to calculate the proper target power level at the
+ * specified affinity level
+ ******************************************************************************/
+plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
+					     const plat_local_state_t *states,
+					     unsigned int ncpu)
+{
+	plat_local_state_t target = *states;
+	int cpu = plat_my_core_pos();
+	int core_pos = read_mpidr() & MPIDR_CPU_MASK;
+
+	/* get the power state at this level */
+	if (lvl == MPIDR_AFFLVL1)
+		target = *(states + core_pos);
+	if (lvl == MPIDR_AFFLVL2)
+		target = *(states + cpu);
+
+	/* Cluster idle/power-down */
+	if ((lvl == MPIDR_AFFLVL1) && ((target == PSTATE_ID_CLUSTER_IDLE) ||
+	    (target == PSTATE_ID_CLUSTER_POWERDN))) {
+		return target;
+	}
+
+	/* System Suspend */
+	if (((lvl == MPIDR_AFFLVL2) || (lvl == MPIDR_AFFLVL1)) &&
+	    (target == PSTATE_ID_SOC_POWERDN))
+		return PSTATE_ID_SOC_POWERDN;
+
+	/* default state */
+	return PSCI_LOCAL_STATE_RUN;
+}
+
+int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+	u_register_t mpidr = read_mpidr();
+	const plat_local_state_t *pwr_domain_state =
+		target_state->pwr_domain_state;
+	unsigned int stateid_afflvl2 = pwr_domain_state[MPIDR_AFFLVL2];
+	unsigned int stateid_afflvl1 = pwr_domain_state[MPIDR_AFFLVL1];
+	unsigned int stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0];
+
+	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
+
+		assert((stateid_afflvl0 == PLAT_MAX_OFF_STATE) ||
+		       (stateid_afflvl0 == PSTATE_ID_SOC_POWERDN));
+		assert((stateid_afflvl1 == PLAT_MAX_OFF_STATE) ||
+		       (stateid_afflvl1 == PSTATE_ID_SOC_POWERDN));
+
+		/* suspend the entire soc */
+		tegra_fc_soc_powerdn(mpidr);
+
+	} else if (stateid_afflvl1 == PSTATE_ID_CLUSTER_IDLE) {
+
+		assert(stateid_afflvl0 == PSTATE_ID_CLUSTER_IDLE);
+
+		/* Prepare for cluster idle */
+		tegra_fc_cluster_idle(mpidr);
+
+	} else if (stateid_afflvl1 == PSTATE_ID_CLUSTER_POWERDN) {
+
+		assert(stateid_afflvl0 == PSTATE_ID_CLUSTER_POWERDN);
+
+		/* Prepare for cluster powerdn */
+		tegra_fc_cluster_powerdn(mpidr);
+
+	} else if (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN) {
+
+		/* Prepare for cpu powerdn */
+		tegra_fc_cpu_powerdn(mpidr);
+
+	} else {
+		ERROR("%s: Unknown state id\n", __func__);
+		return PSCI_E_NOT_SUPPORTED;
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+	uint32_t val;
+
+	/*
+	 * Check if we are exiting from SOC_POWERDN.
+	 */
+	if (target_state->pwr_domain_state[PLAT_MAX_PWR_LVL] ==
+			PLAT_SYS_SUSPEND_STATE_ID) {
+
+		/*
+		 * Lock scratch registers which hold the CPU vectors
+		 */
+		tegra_pmc_lock_cpu_vectors();
+
+		/*
+		 * Enable WRAP to INCR burst type conversions for
+		 * incoming requests on the AXI slave ports.
+		 */
+		val = mmio_read_32(TEGRA_MSELECT_BASE + MSELECT_CONFIG);
+		val &= ~ENABLE_UNSUP_TX_ERRORS;
+		val |= ENABLE_WRAP_TO_INCR_BURSTS;
+		mmio_write_32(TEGRA_MSELECT_BASE + MSELECT_CONFIG, val);
+
+		/*
+		 * Restore Boot and Power Management Processor (BPMP) reset
+		 * address and reset it.
+		 */
+		tegra_fc_reset_bpmp();
+	}
+
+	/*
+	 * T210 has a dedicated ARMv7 boot and power mgmt processor, BPMP. It's
+	 * used for power management and boot purposes. Inform the BPMP that
+	 * we have completed the cluster power up.
+	 */
+	tegra_fc_lock_active_cluster();
+
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_on(u_register_t mpidr)
+{
+	int cpu = mpidr & MPIDR_CPU_MASK;
+	uint32_t mask = CPU_CORE_RESET_MASK << cpu;
+
+	/* Deassert CPU reset signals */
+	mmio_write_32(TEGRA_CAR_RESET_BASE + CPU_CMPLX_RESET_CLR, mask);
+
+	/* Turn on CPU using flow controller or PMC */
+	if (cpu_powergate_mask[cpu] == 0) {
+		tegra_pmc_cpu_on(cpu);
+		cpu_powergate_mask[cpu] = 1;
+	} else {
+		tegra_fc_cpu_on(cpu);
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	tegra_fc_cpu_off(read_mpidr() & MPIDR_CPU_MASK);
+	return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_prepare_system_reset(void)
+{
+	/*
+	 * Set System Clock (SCLK) to POR default so that the clock source
+	 * for the PMC APB clock would not be changed due to system reset.
+	 */
+	mmio_write_32((uintptr_t)TEGRA_CAR_RESET_BASE + SCLK_BURST_POLICY,
+		       SCLK_BURST_POLICY_DEFAULT);
+	mmio_write_32((uintptr_t)TEGRA_CAR_RESET_BASE + SCLK_RATE, 0);
+
+	/* Wait 1 ms to make sure clock source/device logic is stabilized. */
+	mdelay(1);
+
+	return PSCI_E_SUCCESS;
+}
diff --git a/plat/nvidia/tegra/soc/t210/plat_secondary.c b/plat/nvidia/tegra/soc/t210/plat_secondary.c
new file mode 100644
index 0000000..ecb258b
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t210/plat_secondary.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <mmio.h>
+#include <pmc.h>
+#include <tegra_def.h>
+
+#define SB_CSR				0x0
+#define  SB_CSR_NS_RST_VEC_WR_DIS	(1 << 1)
+
+/* CPU reset vector */
+#define SB_AA64_RESET_LOW		0x30	/* width = 31:0 */
+#define SB_AA64_RESET_HI		0x34	/* width = 11:0 */
+
+extern void tegra_secure_entrypoint(void);
+
+/*******************************************************************************
+ * Setup secondary CPU vectors
+ ******************************************************************************/
+void plat_secondary_setup(void)
+{
+	uint32_t val;
+	uint64_t reset_addr = (uint64_t)tegra_secure_entrypoint;
+
+	INFO("Setting up secondary CPU boot\n");
+
+	/* setup secondary CPU vector */
+	mmio_write_32(TEGRA_SB_BASE + SB_AA64_RESET_LOW,
+			(reset_addr & 0xFFFFFFFF) | 1);
+	val = reset_addr >> 32;
+	mmio_write_32(TEGRA_SB_BASE + SB_AA64_RESET_HI, val & 0x7FF);
+
+	/* configure PMC */
+	tegra_pmc_cpu_setup(reset_addr);
+	tegra_pmc_lock_cpu_vectors();
+}
diff --git a/plat/nvidia/tegra/soc/t210/plat_setup.c b/plat/nvidia/tegra/soc/t210/plat_setup.c
new file mode 100644
index 0000000..b058bed
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t210/plat_setup.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <bl_common.h>
+#include <console.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+#include <xlat_tables.h>
+
+/*******************************************************************************
+ * The Tegra power domain tree has a single system level power domain i.e. a
+ * single root node. The first entry in the power domain descriptor specifies
+ * the number of power domains at the highest power level.
+ *******************************************************************************
+ */
+const unsigned char tegra_power_domain_tree_desc[] = {
+	/* No of root nodes */
+	1,
+	/* No of clusters */
+	PLATFORM_CLUSTER_COUNT,
+	/* No of CPU cores - cluster0 */
+	PLATFORM_MAX_CPUS_PER_CLUSTER,
+	/* No of CPU cores - cluster1 */
+	PLATFORM_MAX_CPUS_PER_CLUSTER
+};
+
+/* sets of MMIO ranges setup */
+#define MMIO_RANGE_0_ADDR	0x50000000
+#define MMIO_RANGE_1_ADDR	0x60000000
+#define MMIO_RANGE_2_ADDR	0x70000000
+#define MMIO_RANGE_SIZE		0x200000
+
+/*
+ * Table of regions to map using the MMU.
+ */
+static const mmap_region_t tegra_mmap[] = {
+	MAP_REGION_FLAT(MMIO_RANGE_0_ADDR, MMIO_RANGE_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(MMIO_RANGE_1_ADDR, MMIO_RANGE_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(MMIO_RANGE_2_ADDR, MMIO_RANGE_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	{0}
+};
+
+/*******************************************************************************
+ * Set up the pagetables as per the platform memory map & initialize the MMU
+ ******************************************************************************/
+const mmap_region_t *plat_get_mmio_map(void)
+{
+	/* MMIO space */
+	return tegra_mmap;
+}
+
+/*******************************************************************************
+ * Handler to get the System Counter Frequency
+ ******************************************************************************/
+unsigned int plat_get_syscnt_freq2(void)
+{
+	return 19200000;
+}
+
+/*******************************************************************************
+ * Maximum supported UART controllers
+ ******************************************************************************/
+#define TEGRA210_MAX_UART_PORTS		5
+
+/*******************************************************************************
+ * This variable holds the UART port base addresses
+ ******************************************************************************/
+static uint32_t tegra210_uart_addresses[TEGRA210_MAX_UART_PORTS + 1] = {
+	0,	/* undefined - treated as an error case */
+	TEGRA_UARTA_BASE,
+	TEGRA_UARTB_BASE,
+	TEGRA_UARTC_BASE,
+	TEGRA_UARTD_BASE,
+	TEGRA_UARTE_BASE,
+};
+
+/*******************************************************************************
+ * Retrieve the UART controller base to be used as the console
+ ******************************************************************************/
+uint32_t plat_get_console_from_id(int id)
+{
+	if (id > TEGRA210_MAX_UART_PORTS)
+		return 0;
+
+	return tegra210_uart_addresses[id];
+}
+
+/*******************************************************************************
+ * Initialize the GIC and SGIs
+ ******************************************************************************/
+void plat_gic_setup(void)
+{
+	tegra_gic_setup(NULL, 0);
+}
diff --git a/plat/nvidia/tegra/soc/t210/platform_t210.mk b/plat/nvidia/tegra/soc/t210/platform_t210.mk
new file mode 100644
index 0000000..97ca3f1
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t210/platform_t210.mk
@@ -0,0 +1,45 @@
+#
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TZDRAM_BASE				:= 0xFF800000
+$(eval $(call add_define,TZDRAM_BASE))
+
+ERRATA_TEGRA_INVALIDATE_BTB_AT_BOOT	:= 1
+$(eval $(call add_define,ERRATA_TEGRA_INVALIDATE_BTB_AT_BOOT))
+
+PLATFORM_CLUSTER_COUNT			:= 2
+$(eval $(call add_define,PLATFORM_CLUSTER_COUNT))
+
+PLATFORM_MAX_CPUS_PER_CLUSTER		:= 4
+$(eval $(call add_define,PLATFORM_MAX_CPUS_PER_CLUSTER))
+
+MAX_XLAT_TABLES				:= 4
+$(eval $(call add_define,MAX_XLAT_TABLES))
+
+MAX_MMAP_REGIONS			:= 8
+$(eval $(call add_define,MAX_MMAP_REGIONS))
+
+BL31_SOURCES		+=	lib/cpus/aarch64/cortex_a53.S		\
+				lib/cpus/aarch64/cortex_a57.S		\
+				${COMMON_DIR}/drivers/flowctrl/flowctrl.c	\
+				${COMMON_DIR}/drivers/memctrl/memctrl_v1.c	\
+				${SOC_DIR}/plat_psci_handlers.c		\
+				${SOC_DIR}/plat_setup.c			\
+				${SOC_DIR}/plat_secondary.c
+
+# Enable workarounds for selected Cortex-A57 erratas.
+A57_DISABLE_NON_TEMPORAL_HINT	:=	1
+ERRATA_A57_826974		:=	1
+ERRATA_A57_826977		:=	1
+ERRATA_A57_828024		:=	1
+ERRATA_A57_829520		:=	1
+ERRATA_A57_833471		:=	1
+
+# Enable workarounds for selected Cortex-A53 erratas.
+A53_DISABLE_NON_TEMPORAL_HINT	:=	1
+ERRATA_A53_826319		:=	1
+ERRATA_A53_836870		:=	1
+ERRATA_A53_855873		:=	1
diff --git a/plat/qemu/aarch64/plat_helpers.S b/plat/qemu/aarch64/plat_helpers.S
new file mode 100644
index 0000000..ed55379
--- /dev/null
+++ b/plat/qemu/aarch64/plat_helpers.S
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <platform_def.h>
+
+	.globl	plat_my_core_pos
+	.globl	plat_get_my_entrypoint
+	.globl	platform_mem_init
+	.globl	plat_qemu_calc_core_pos
+	.globl	plat_crash_console_init
+	.globl	plat_crash_console_putc
+	.globl  plat_secondary_cold_boot_setup
+	.globl  plat_get_my_entrypoint
+	.globl  plat_is_my_cpu_primary
+
+
+func plat_my_core_pos
+	mrs	x0, mpidr_el1
+	b	plat_qemu_calc_core_pos
+endfunc plat_my_core_pos
+
+/*
+ *  unsigned int plat_qemu_calc_core_pos(u_register_t mpidr);
+ *  With this function: CorePos = (ClusterId * 4) + CoreId
+ */
+func plat_qemu_calc_core_pos
+	and	x1, x0, #MPIDR_CPU_MASK
+	and	x0, x0, #MPIDR_CLUSTER_MASK
+	add	x0, x1, x0, LSR #6
+	ret
+endfunc plat_qemu_calc_core_pos
+
+	/* -----------------------------------------------------
+	 * unsigned int plat_is_my_cpu_primary (void);
+	 *
+	 * Find out whether the current cpu is the primary
+	 * cpu.
+	 * -----------------------------------------------------
+	 */
+func plat_is_my_cpu_primary
+	mrs	x0, mpidr_el1
+	and	x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
+	cmp	x0, #QEMU_PRIMARY_CPU
+	cset	w0, eq
+	ret
+endfunc plat_is_my_cpu_primary
+
+	/* -----------------------------------------------------
+	 * void plat_secondary_cold_boot_setup (void);
+	 *
+	 * This function performs any platform specific actions
+	 * needed for a secondary cpu after a cold reset e.g
+	 * mark the cpu's presence, mechanism to place it in a
+	 * holding pen etc.
+	 * -----------------------------------------------------
+	 */
+func plat_secondary_cold_boot_setup
+	/* Calculate address of our hold entry */
+	bl	plat_my_core_pos
+	lsl	x0, x0, #PLAT_QEMU_HOLD_ENTRY_SHIFT
+	mov_imm	x2, PLAT_QEMU_HOLD_BASE
+
+	/* Wait until we have a go */
+poll_mailbox:
+	ldr	x1, [x2, x0]
+	cbz	x1, 1f
+	mov_imm	x0, PLAT_QEMU_TRUSTED_MAILBOX_BASE
+	ldr	x1, [x0]
+	br	x1
+1:
+	wfe
+	b	poll_mailbox
+endfunc plat_secondary_cold_boot_setup
+
+func plat_get_my_entrypoint
+	/* TODO support warm boot */
+	mov	x0, #0
+	ret
+endfunc plat_get_my_entrypoint
+
+func platform_mem_init
+	ret
+endfunc platform_mem_init
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_init(void)
+	 * Function to initialize the crash console
+	 * without a C Runtime to print crash report.
+	 * Clobber list : x0, x1, x2
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_init
+	mov_imm	x0, PLAT_QEMU_CRASH_UART_BASE
+	mov_imm	x1, PLAT_QEMU_CRASH_UART_CLK_IN_HZ
+	mov_imm	x2, PLAT_QEMU_CONSOLE_BAUDRATE
+	b	console_core_init
+endfunc plat_crash_console_init
+
+	/* ---------------------------------------------
+	 * int plat_crash_console_putc(int c)
+	 * Function to print a character on the crash
+	 * console without a C Runtime.
+	 * Clobber list : x1, x2
+	 * ---------------------------------------------
+	 */
+func plat_crash_console_putc
+	mov_imm	x1, PLAT_QEMU_CRASH_UART_BASE
+	b	console_core_putc
+endfunc plat_crash_console_putc
+
+
diff --git a/plat/qemu/dt.c b/plat/qemu/dt.c
new file mode 100644
index 0000000..c544d9f
--- /dev/null
+++ b/plat/qemu/dt.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <console.h>
+#include <debug.h>
+#include <libfdt.h>
+#include <psci.h>
+#include <string.h>
+#include "qemu_private.h"
+
+static int append_psci_compatible(void *fdt, int offs, const char *str)
+{
+	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
+}
+
+int dt_add_psci_node(void *fdt)
+{
+	int offs;
+
+	if (fdt_path_offset(fdt, "/psci") >= 0) {
+		WARN("PSCI Device Tree node already exists!\n");
+		return 0;
+	}
+
+	offs = fdt_path_offset(fdt, "/");
+	if (offs < 0)
+		return -1;
+	offs = fdt_add_subnode(fdt, offs, "psci");
+	if (offs < 0)
+		return -1;
+	if (append_psci_compatible(fdt, offs, "arm,psci-1.0"))
+		return -1;
+	if (append_psci_compatible(fdt, offs, "arm,psci-0.2"))
+		return -1;
+	if (append_psci_compatible(fdt, offs, "arm,psci"))
+		return -1;
+	if (fdt_setprop_string(fdt, offs, "method", "smc"))
+		return -1;
+	if (fdt_setprop_u32(fdt, offs, "cpu_suspend", PSCI_CPU_SUSPEND_AARCH64))
+		return -1;
+	if (fdt_setprop_u32(fdt, offs, "cpu_off", PSCI_CPU_OFF))
+		return -1;
+	if (fdt_setprop_u32(fdt, offs, "cpu_on", PSCI_CPU_ON_AARCH64))
+		return -1;
+	if (fdt_setprop_u32(fdt, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
+		return -1;
+	if (fdt_setprop_u32(fdt, offs, "sys_reset", PSCI_SYSTEM_RESET))
+		return -1;
+	return 0;
+}
+
+static int check_node_compat_prefix(void *fdt, int offs, const char *prefix)
+{
+	const size_t prefix_len = strlen(prefix);
+	size_t l;
+	int plen;
+	const char *prop;
+
+	prop = fdt_getprop(fdt, offs, "compatible", &plen);
+	if (!prop)
+		return -1;
+
+	while (plen > 0) {
+		if (memcmp(prop, prefix, prefix_len) == 0)
+			return 0; /* match */
+
+		l = strlen(prop) + 1;
+		prop += l;
+		plen -= l;
+	}
+
+	return -1;
+}
+
+int dt_add_psci_cpu_enable_methods(void *fdt)
+{
+	int offs = 0;
+
+	while (1) {
+		offs = fdt_next_node(fdt, offs, NULL);
+		if (offs < 0)
+			break;
+		if (fdt_getprop(fdt, offs, "enable-method", NULL))
+			continue; /* already set */
+		if (check_node_compat_prefix(fdt, offs, "arm,cortex-a"))
+			continue; /* no compatible */
+		if (fdt_setprop_string(fdt, offs, "enable-method", "psci"))
+			return -1;
+		/* Need to restart scanning as offsets may have changed */
+		offs = 0;
+	}
+	return 0;
+}
diff --git a/plat/qemu/include/plat_macros.S b/plat/qemu/include/plat_macros.S
new file mode 100644
index 0000000..93cded2
--- /dev/null
+++ b/plat/qemu/include/plat_macros.S
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __PLAT_MACROS_S__
+#define __PLAT_MACROS_S__
+
+#include <arm_macros.S>
+#include <platform_def.h>
+
+	/* ---------------------------------------------
+	 * The below required platform porting macro
+	 * prints out relevant GIC and CCI registers
+	 * whenever an unhandled exception is taken in
+	 * BL31.
+	 * Clobbers: x0 - x10, x16, x17, sp
+	 * ---------------------------------------------
+	 */
+	.macro plat_crash_print_regs
+	mov_imm	x17, GICC_BASE
+	mov_imm	x16, GICD_BASE
+	arm_print_gic_regs
+	.endm
+
+#endif /* __PLAT_MACROS_S__ */
diff --git a/plat/qemu/include/platform_def.h b/plat/qemu/include/platform_def.h
new file mode 100644
index 0000000..0ae28ea
--- /dev/null
+++ b/plat/qemu/include/platform_def.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#include <arch.h>
+#include <common_def.h>
+#include <tbbr_img_def.h>
+
+/* Special value used to verify platform parameters from BL2 to BL3-1 */
+#define QEMU_BL31_PLAT_PARAM_VAL	0x0f1e2d3c4b5a6978ULL
+
+#define PLATFORM_STACK_SIZE 0x1000
+
+#define PLATFORM_MAX_CPUS_PER_CLUSTER	4
+#define PLATFORM_CLUSTER_COUNT		2
+#define PLATFORM_CLUSTER0_CORE_COUNT	PLATFORM_MAX_CPUS_PER_CLUSTER
+#define PLATFORM_CLUSTER1_CORE_COUNT	PLATFORM_MAX_CPUS_PER_CLUSTER
+#define PLATFORM_CORE_COUNT		(PLATFORM_CLUSTER0_CORE_COUNT + \
+					 PLATFORM_CLUSTER1_CORE_COUNT)
+
+#define QEMU_PRIMARY_CPU		0
+
+#define PLAT_NUM_PWR_DOMAINS		(PLATFORM_CLUSTER_COUNT + \
+					PLATFORM_CORE_COUNT)
+#define PLAT_MAX_PWR_LVL		MPIDR_AFFLVL1
+
+#define PLAT_MAX_RET_STATE		1
+#define PLAT_MAX_OFF_STATE		2
+
+/* Local power state for power domains in Run state. */
+#define PLAT_LOCAL_STATE_RUN		0
+/* Local power state for retention. Valid only for CPU power domains */
+#define PLAT_LOCAL_STATE_RET		1
+/*
+ * Local power state for OFF/power-down. Valid for CPU and cluster power
+ * domains.
+ */
+#define PLAT_LOCAL_STATE_OFF		2
+
+/*
+ * Macros used to parse state information from State-ID if it is using the
+ * recommended encoding for State-ID.
+ */
+#define PLAT_LOCAL_PSTATE_WIDTH		4
+#define PLAT_LOCAL_PSTATE_MASK		((1 << PLAT_LOCAL_PSTATE_WIDTH) - 1)
+
+/*
+ * Some data must be aligned on the biggest cache line size in the platform.
+ * This is known only to the platform as it might have a combination of
+ * integrated and external caches.
+ */
+#define CACHE_WRITEBACK_SHIFT		6
+#define CACHE_WRITEBACK_GRANULE		(1 << CACHE_WRITEBACK_SHIFT)
+
+/*
+ * Partition memory into secure ROM, non-secure DRAM, secure "SRAM",
+ * and secure DRAM.
+ */
+#define SEC_ROM_BASE			0x00000000
+#define SEC_ROM_SIZE			0x00020000
+
+#define NS_DRAM0_BASE			0x40000000
+#define NS_DRAM0_SIZE			0x3de00000
+
+#define SEC_SRAM_BASE			0x0e000000
+#define SEC_SRAM_SIZE			0x00040000
+
+#define SEC_DRAM_BASE			0x0e100000
+#define SEC_DRAM_SIZE			0x00f00000
+
+/* Load pageable part of OP-TEE 2MB above secure DRAM base */
+#define QEMU_OPTEE_PAGEABLE_LOAD_BASE	(SEC_DRAM_BASE + 0x00200000)
+#define QEMU_OPTEE_PAGEABLE_LOAD_SIZE	0x00400000
+
+/*
+ * ARM-TF lives in SRAM, partition it here
+ */
+
+#define SHARED_RAM_BASE			SEC_SRAM_BASE
+#define SHARED_RAM_SIZE			0x00001000
+
+#define PLAT_QEMU_TRUSTED_MAILBOX_BASE	SHARED_RAM_BASE
+#define PLAT_QEMU_TRUSTED_MAILBOX_SIZE	(8 + PLAT_QEMU_HOLD_SIZE)
+#define PLAT_QEMU_HOLD_BASE		(PLAT_QEMU_TRUSTED_MAILBOX_BASE + 8)
+#define PLAT_QEMU_HOLD_SIZE		(PLATFORM_CORE_COUNT * \
+					 PLAT_QEMU_HOLD_ENTRY_SIZE)
+#define PLAT_QEMU_HOLD_ENTRY_SHIFT	3
+#define PLAT_QEMU_HOLD_ENTRY_SIZE	(1 << PLAT_QEMU_HOLD_ENTRY_SHIFT)
+#define PLAT_QEMU_HOLD_STATE_WAIT	0
+#define PLAT_QEMU_HOLD_STATE_GO		1
+
+#define BL_RAM_BASE			(SHARED_RAM_BASE + SHARED_RAM_SIZE)
+#define BL_RAM_SIZE			(SEC_SRAM_SIZE - SHARED_RAM_SIZE)
+
+/*
+ * BL1 specific defines.
+ *
+ * BL1 RW data is relocated from ROM to RAM at runtime so we need 2 sets of
+ * addresses.
+ * Put BL1 RW at the top of the Secure SRAM. BL1_RW_BASE is calculated using
+ * the current BL1 RW debug size plus a little space for growth.
+ */
+#define BL1_RO_BASE			SEC_ROM_BASE
+#define BL1_RO_LIMIT			(SEC_ROM_BASE + SEC_ROM_SIZE)
+#define BL1_RW_BASE			(BL1_RW_LIMIT - 0x12000)
+#define BL1_RW_LIMIT			(BL_RAM_BASE + BL_RAM_SIZE)
+
+/*
+ * BL2 specific defines.
+ *
+ * Put BL2 just below BL3-1. BL2_BASE is calculated using the current BL2 debug
+ * size plus a little space for growth.
+ */
+#define BL2_BASE			(BL31_BASE - 0x1D000)
+#define BL2_LIMIT			BL31_BASE
+
+/*
+ * BL3-1 specific defines.
+ *
+ * Put BL3-1 at the top of the Trusted SRAM. BL31_BASE is calculated using the
+ * current BL3-1 debug size plus a little space for growth.
+ */
+#define BL31_BASE			(BL31_LIMIT - 0x20000)
+#define BL31_LIMIT			(BL_RAM_BASE + BL_RAM_SIZE)
+#define BL31_PROGBITS_LIMIT		BL1_RW_BASE
+
+
+/*
+ * BL3-2 specific defines.
+ *
+ * BL3-2 can execute from Secure SRAM, or Secure DRAM.
+ */
+#define BL32_SRAM_BASE			BL_RAM_BASE
+#define BL32_SRAM_LIMIT			BL31_BASE
+#define BL32_DRAM_BASE			SEC_DRAM_BASE
+#define BL32_DRAM_LIMIT			(SEC_DRAM_BASE + SEC_DRAM_SIZE)
+
+#define SEC_SRAM_ID			0
+#define SEC_DRAM_ID			1
+
+#if BL32_RAM_LOCATION_ID == SEC_SRAM_ID
+# define BL32_MEM_BASE			BL_RAM_BASE
+# define BL32_MEM_SIZE			BL_RAM_SIZE
+# define BL32_BASE			BL32_SRAM_BASE
+# define BL32_LIMIT			BL32_SRAM_LIMIT
+#elif BL32_RAM_LOCATION_ID == SEC_DRAM_ID
+# define BL32_MEM_BASE			SEC_DRAM_BASE
+# define BL32_MEM_SIZE			SEC_DRAM_SIZE
+# define BL32_BASE			BL32_DRAM_BASE
+# define BL32_LIMIT			BL32_DRAM_LIMIT
+#else
+# error "Unsupported BL32_RAM_LOCATION_ID value"
+#endif
+
+#define NS_IMAGE_OFFSET			0x60000000
+
+#define PLAT_PHY_ADDR_SPACE_SIZE	(1ull << 32)
+#define PLAT_VIRT_ADDR_SPACE_SIZE	(1ull << 32)
+#define MAX_MMAP_REGIONS		8
+#define MAX_XLAT_TABLES			6
+#define MAX_IO_DEVICES			3
+#define MAX_IO_HANDLES			4
+
+/*
+ * PL011 related constants
+ */
+#define UART0_BASE			0x09000000
+#define UART1_BASE			0x09040000
+#define UART0_CLK_IN_HZ			1
+#define UART1_CLK_IN_HZ			1
+
+#define PLAT_QEMU_BOOT_UART_BASE	UART0_BASE
+#define PLAT_QEMU_BOOT_UART_CLK_IN_HZ	UART0_CLK_IN_HZ
+
+#define PLAT_QEMU_CRASH_UART_BASE	UART1_BASE
+#define PLAT_QEMU_CRASH_UART_CLK_IN_HZ	UART1_CLK_IN_HZ
+
+#define PLAT_QEMU_CONSOLE_BAUDRATE	115200
+
+#define QEMU_FLASH0_BASE		0x04000000
+#define QEMU_FLASH0_SIZE		0x04000000
+
+#define PLAT_QEMU_FIP_BASE		QEMU_FLASH0_BASE
+#define PLAT_QEMU_FIP_MAX_SIZE		QEMU_FLASH0_SIZE
+
+#define DEVICE0_BASE			0x08000000
+#define DEVICE0_SIZE			0x00021000
+#define DEVICE1_BASE			0x09000000
+#define DEVICE1_SIZE			0x00011000
+
+/*
+ * GIC related constants
+ */
+
+#define GICD_BASE			0x8000000
+#define GICC_BASE			0x8010000
+#define GICR_BASE			0
+
+
+#define QEMU_IRQ_SEC_SGI_0		8
+#define QEMU_IRQ_SEC_SGI_1		9
+#define QEMU_IRQ_SEC_SGI_2		10
+#define QEMU_IRQ_SEC_SGI_3		11
+#define QEMU_IRQ_SEC_SGI_4		12
+#define QEMU_IRQ_SEC_SGI_5		13
+#define QEMU_IRQ_SEC_SGI_6		14
+#define QEMU_IRQ_SEC_SGI_7		15
+
+/*
+ * DT related constants
+ */
+#define PLAT_QEMU_DT_BASE		NS_DRAM0_BASE
+#define PLAT_QEMU_DT_MAX_SIZE		0x10000
+
+/*
+ * System counter
+ */
+#define SYS_COUNTER_FREQ_IN_TICKS	((1000 * 1000 * 1000) / 16)
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/qemu/platform.mk b/plat/qemu/platform.mk
new file mode 100644
index 0000000..2a7415f
--- /dev/null
+++ b/plat/qemu/platform.mk
@@ -0,0 +1,155 @@
+#
+# Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+include lib/libfdt/libfdt.mk
+
+# Enable new version of image loading on QEMU platforms
+LOAD_IMAGE_V2		:=	1
+
+ifeq ($(NEED_BL32),yes)
+$(eval $(call add_define,QEMU_LOAD_BL32))
+endif
+
+PLAT_PATH               :=      plat/qemu/
+PLAT_INCLUDES		:=	-Iinclude/plat/arm/common/		\
+				-Iinclude/plat/arm/common/aarch64/	\
+				-Iplat/qemu/include			\
+				-Iinclude/common/tbbr
+
+# Use translation tables library v2 by default
+ARM_XLAT_TABLES_LIB_V1		:=	0
+$(eval $(call assert_boolean,ARM_XLAT_TABLES_LIB_V1))
+$(eval $(call add_define,ARM_XLAT_TABLES_LIB_V1))
+
+
+PLAT_BL_COMMON_SOURCES	:=	plat/qemu/qemu_common.c			\
+				drivers/arm/pl011/aarch64/pl011_console.S
+
+ifeq (${ARM_XLAT_TABLES_LIB_V1}, 1)
+PLAT_BL_COMMON_SOURCES	+=	lib/xlat_tables/xlat_tables_common.c		\
+				lib/xlat_tables/aarch64/xlat_tables.c
+else
+include lib/xlat_tables_v2/xlat_tables.mk
+
+PLAT_BL_COMMON_SOURCES	+=	${XLAT_TABLES_LIB_SRCS}
+endif
+
+ifneq (${TRUSTED_BOARD_BOOT},0)
+
+    include drivers/auth/mbedtls/mbedtls_crypto.mk
+    include drivers/auth/mbedtls/mbedtls_x509.mk
+
+    USE_TBBR_DEFS	:=	1
+
+    AUTH_SOURCES	:=	drivers/auth/auth_mod.c			\
+				drivers/auth/crypto_mod.c		\
+				drivers/auth/img_parser_mod.c		\
+				drivers/auth/tbbr/tbbr_cot.c
+
+    PLAT_INCLUDES	+=	-Iinclude/bl1/tbbr
+
+    BL1_SOURCES		+=	${AUTH_SOURCES}				\
+				bl1/tbbr/tbbr_img_desc.c		\
+				plat/common/tbbr/plat_tbbr.c		\
+				plat/qemu/qemu_trusted_boot.c	     	\
+				$(PLAT_PATH)/qemu_rotpk.S
+
+    BL2_SOURCES		+=	${AUTH_SOURCES}				\
+				plat/common/tbbr/plat_tbbr.c		\
+				plat/qemu/qemu_trusted_boot.c	     	\
+				$(PLAT_PATH)/qemu_rotpk.S
+
+    ROT_KEY             = $(BUILD_PLAT)/rot_key.pem
+    ROTPK_HASH          = $(BUILD_PLAT)/rotpk_sha256.bin
+
+    $(eval $(call add_define_val,ROTPK_HASH,'"$(ROTPK_HASH)"'))
+
+    $(BUILD_PLAT)/bl1/qemu_rotpk.o: $(ROTPK_HASH)
+    $(BUILD_PLAT)/bl2/qemu_rotpk.o: $(ROTPK_HASH)
+
+    certificates: $(ROT_KEY)
+
+    $(ROT_KEY):
+	@echo "  OPENSSL $@"
+	$(Q)openssl genrsa 2048 > $@ 2>/dev/null
+
+    $(ROTPK_HASH): $(ROT_KEY)
+	@echo "  OPENSSL $@"
+	$(Q)openssl rsa -in $< -pubout -outform DER 2>/dev/null |\
+	openssl dgst -sha256 -binary > $@ 2>/dev/null
+endif
+
+BL1_SOURCES		+=	drivers/io/io_semihosting.c		\
+				drivers/io/io_storage.c			\
+				drivers/io/io_fip.c			\
+				drivers/io/io_memmap.c			\
+				lib/semihosting/semihosting.c		\
+				lib/semihosting/aarch64/semihosting_call.S \
+				plat/qemu/qemu_io_storage.c		\
+				lib/cpus/aarch64/aem_generic.S		\
+				lib/cpus/aarch64/cortex_a53.S		\
+				lib/cpus/aarch64/cortex_a57.S		\
+				plat/qemu/aarch64/plat_helpers.S	\
+				plat/qemu/qemu_bl1_setup.c
+
+BL2_SOURCES		+=	drivers/io/io_semihosting.c		\
+				drivers/io/io_storage.c			\
+				drivers/io/io_fip.c			\
+				drivers/io/io_memmap.c			\
+				lib/semihosting/semihosting.c		\
+				lib/semihosting/aarch64/semihosting_call.S\
+				plat/qemu/qemu_io_storage.c		\
+				plat/qemu/aarch64/plat_helpers.S	\
+				plat/qemu/qemu_bl2_setup.c		\
+				plat/qemu/dt.c				\
+				$(LIBFDT_SRCS)
+ifeq (${LOAD_IMAGE_V2},1)
+BL2_SOURCES		+=	plat/qemu/qemu_bl2_mem_params_desc.c	\
+				plat/qemu/qemu_image_load.c		\
+				common/desc_image_load.c
+endif
+ifeq (${SPD},opteed)
+BL2_SOURCES		+=	lib/optee/optee_utils.c
+endif
+
+
+BL31_SOURCES		+=	lib/cpus/aarch64/aem_generic.S		\
+				lib/cpus/aarch64/cortex_a53.S		\
+				lib/cpus/aarch64/cortex_a57.S		\
+				drivers/arm/gic/v2/gicv2_helpers.c	\
+				drivers/arm/gic/v2/gicv2_main.c		\
+				drivers/arm/gic/common/gic_common.c	\
+				plat/common/plat_psci_common.c		\
+				plat/qemu/qemu_pm.c			\
+				plat/qemu/topology.c			\
+				plat/qemu/aarch64/plat_helpers.S	\
+				plat/qemu/qemu_bl31_setup.c		\
+				plat/qemu/qemu_gic.c
+
+
+# Add the build options to pack Trusted OS Extra1 and Trusted OS Extra2 images
+# in the FIP if the platform requires.
+ifneq ($(BL32_EXTRA1),)
+$(eval $(call FIP_ADD_IMG,BL32_EXTRA1,--tos-fw-extra1))
+endif
+ifneq ($(BL32_EXTRA2),)
+$(eval $(call FIP_ADD_IMG,BL32_EXTRA2,--tos-fw-extra2))
+endif
+
+# Disable the PSCI platform compatibility layer
+ENABLE_PLAT_COMPAT	:= 	0
+
+BL32_RAM_LOCATION	:=	tdram
+ifeq (${BL32_RAM_LOCATION}, tsram)
+  BL32_RAM_LOCATION_ID = SEC_SRAM_ID
+else ifeq (${BL32_RAM_LOCATION}, tdram)
+  BL32_RAM_LOCATION_ID = SEC_DRAM_ID
+else
+  $(error "Unsupported BL32_RAM_LOCATION value")
+endif
+
+# Process flags
+$(eval $(call add_define,BL32_RAM_LOCATION_ID))
diff --git a/plat/qemu/qemu_bl1_setup.c b/plat/qemu/qemu_bl1_setup.c
new file mode 100644
index 0000000..5a70558
--- /dev/null
+++ b/plat/qemu/qemu_bl1_setup.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <platform_def.h>
+#include "qemu_private.h"
+
+/*******************************************************************************
+ * Declarations of linker defined symbols which will tell us where BL1 lives
+ * in Trusted RAM
+ ******************************************************************************/
+extern uint64_t __BL1_RAM_START__;
+extern uint64_t __BL1_RAM_END__;
+#define BL1_RAM_BASE (uint64_t)(&__BL1_RAM_START__)
+#define BL1_RAM_LIMIT (uint64_t)(&__BL1_RAM_END__)
+
+/* Data structure which holds the extents of the trusted SRAM for BL1*/
+static meminfo_t bl1_tzram_layout;
+
+
+meminfo_t *bl1_plat_sec_mem_layout(void)
+{
+	return &bl1_tzram_layout;
+}
+
+/*******************************************************************************
+ * Perform any BL1 specific platform actions.
+ ******************************************************************************/
+void bl1_early_platform_setup(void)
+{
+	/* Initialize the console to provide early debug support */
+	console_init(PLAT_QEMU_BOOT_UART_BASE, PLAT_QEMU_BOOT_UART_CLK_IN_HZ,
+		     PLAT_QEMU_CONSOLE_BAUDRATE);
+
+	/* Allow BL1 to see the whole Trusted RAM */
+	bl1_tzram_layout.total_base = BL_RAM_BASE;
+	bl1_tzram_layout.total_size = BL_RAM_SIZE;
+
+#if !LOAD_IMAGE_V2
+	/* Calculate how much RAM BL1 is using and how much remains free */
+	bl1_tzram_layout.free_base = BL_RAM_BASE;
+	bl1_tzram_layout.free_size = BL_RAM_SIZE;
+	reserve_mem(&bl1_tzram_layout.free_base, &bl1_tzram_layout.free_size,
+		    BL1_RAM_BASE, BL1_RAM_LIMIT - BL1_RAM_BASE);
+#endif /* !LOAD_IMAGE_V2 */
+}
+
+/******************************************************************************
+ * Perform the very early platform specific architecture setup.  This only
+ * does basic initialization. Later architectural setup (bl1_arch_setup())
+ * does not do anything platform specific.
+ *****************************************************************************/
+void bl1_plat_arch_setup(void)
+{
+	qemu_configure_mmu_el3(bl1_tzram_layout.total_base,
+				bl1_tzram_layout.total_size,
+				BL1_RO_BASE, BL1_RO_LIMIT,
+				BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_END);
+}
+
+void bl1_platform_setup(void)
+{
+	plat_qemu_io_setup();
+}
+
+#if !LOAD_IMAGE_V2
+/*******************************************************************************
+ * Function that takes a memory layout into which BL2 has been loaded and
+ * populates a new memory layout for BL2 that ensures that BL1's data sections
+ * resident in secure RAM are not visible to BL2.
+ ******************************************************************************/
+void bl1_init_bl2_mem_layout(const meminfo_t *bl1_mem_layout,
+			     meminfo_t *bl2_mem_layout)
+{
+	const size_t bl1_size = BL1_RAM_LIMIT - BL1_RAM_BASE;
+
+	assert(bl1_mem_layout != NULL);
+	assert(bl2_mem_layout != NULL);
+
+	/* Check that BL1's memory is lying outside of the free memory */
+	assert((BL1_RAM_LIMIT <= bl1_mem_layout->free_base) ||
+	       (BL1_RAM_BASE >= (bl1_mem_layout->free_base +
+				 bl1_mem_layout->free_size)));
+
+	/* Remove BL1 RW data from the scope of memory visible to BL2 */
+	*bl2_mem_layout = *bl1_mem_layout;
+	reserve_mem(&bl2_mem_layout->total_base,
+		    &bl2_mem_layout->total_size,
+		    BL1_RAM_BASE,
+		    bl1_size);
+
+	flush_dcache_range((unsigned long)bl2_mem_layout, sizeof(meminfo_t));
+}
+
+/*******************************************************************************
+ * Before calling this function BL2 is loaded in memory and its entrypoint
+ * is set by load_image. This is a placeholder for the platform to change
+ * the entrypoint of BL2 and set SPSR and security state.
+ * On ARM standard platforms we only set the security state of the entrypoint
+ ******************************************************************************/
+void bl1_plat_set_bl2_ep_info(image_info_t *bl2_image,
+				entry_point_info_t *bl2_ep)
+{
+	SET_SECURITY_STATE(bl2_ep->h.attr, SECURE);
+	bl2_ep->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+}
+#endif /* !LOAD_IMAGE_V2 */
diff --git a/plat/qemu/qemu_bl2_mem_params_desc.c b/plat/qemu/qemu_bl2_mem_params_desc.c
new file mode 100644
index 0000000..47f88ac
--- /dev/null
+++ b/plat/qemu/qemu_bl2_mem_params_desc.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <desc_image_load.h>
+#include <platform.h>
+#include <platform_def.h>
+
+/*******************************************************************************
+ * Following descriptor provides BL image/ep information that gets used
+ * by BL2 to load the images and also subset of this information is
+ * passed to next BL image. The image loading sequence is managed by
+ * populating the images in required loading order. The image execution
+ * sequence is managed by populating the `next_handoff_image_id` with
+ * the next executable image id.
+ ******************************************************************************/
+static bl_mem_params_node_t bl2_mem_params_descs[] = {
+#ifdef EL3_PAYLOAD_BASE
+	/* Fill EL3 payload related information (BL31 is EL3 payload) */
+	{ .image_id = BL31_IMAGE_ID,
+
+	  SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2,
+				entry_point_info_t,
+				SECURE | EXECUTABLE | EP_FIRST_EXE),
+	  .ep_info.pc = EL3_PAYLOAD_BASE,
+	  .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+				  DISABLE_ALL_EXCEPTIONS),
+
+	  SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, VERSION_2, image_info_t,
+				IMAGE_ATTRIB_PLAT_SETUP | IMAGE_ATTRIB_SKIP_LOADING),
+
+	  .next_handoff_image_id = INVALID_IMAGE_ID,
+	},
+#else /* EL3_PAYLOAD_BASE */
+	/* Fill BL31 related information */
+	{ .image_id = BL31_IMAGE_ID,
+
+	  SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2,
+				entry_point_info_t,
+				SECURE | EXECUTABLE | EP_FIRST_EXE),
+	  .ep_info.pc = BL31_BASE,
+	  .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+				  DISABLE_ALL_EXCEPTIONS),
+# if DEBUG
+	  .ep_info.args.arg1 = QEMU_BL31_PLAT_PARAM_VAL,
+# endif
+	  SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, VERSION_2, image_info_t,
+				IMAGE_ATTRIB_PLAT_SETUP),
+	  .image_info.image_base = BL31_BASE,
+	  .image_info.image_max_size = BL31_LIMIT - BL31_BASE,
+
+# ifdef QEMU_LOAD_BL32
+	  .next_handoff_image_id = BL32_IMAGE_ID,
+# else
+	  .next_handoff_image_id = BL33_IMAGE_ID,
+# endif
+	},
+# ifdef QEMU_LOAD_BL32
+	/* Fill BL32 related information */
+	{ .image_id = BL32_IMAGE_ID,
+
+	  SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2,
+				entry_point_info_t, SECURE | EXECUTABLE),
+	  .ep_info.pc = BL32_BASE,
+
+	  SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, VERSION_2, image_info_t,
+				0),
+	  .image_info.image_base = BL32_BASE,
+	  .image_info.image_max_size = BL32_LIMIT - BL32_BASE,
+
+	  .next_handoff_image_id = BL33_IMAGE_ID,
+	},
+
+	/*
+	 * Fill BL32 external 1 related information.
+	 * A typical use for extra1 image is with OP-TEE where it is the
+	 * pager image.
+	 */
+	{ .image_id = BL32_EXTRA1_IMAGE_ID,
+
+	   SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2,
+				 entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+	   SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, VERSION_2,
+				 image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+	   .image_info.image_base = BL32_BASE,
+	   .image_info.image_max_size = BL32_LIMIT - BL32_BASE,
+
+	   .next_handoff_image_id = INVALID_IMAGE_ID,
+	},
+
+	/*
+	 * Fill BL32 external 2 related information.
+	 * A typical use for extra2 image is with OP-TEE where it is the
+	 * paged image.
+	 */
+	{ .image_id = BL32_EXTRA2_IMAGE_ID,
+
+	   SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2,
+				 entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+	   SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, VERSION_2,
+				 image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+#ifdef SPD_opteed
+	   .image_info.image_base = QEMU_OPTEE_PAGEABLE_LOAD_BASE,
+	   .image_info.image_max_size = QEMU_OPTEE_PAGEABLE_LOAD_SIZE,
+#endif
+	   .next_handoff_image_id = INVALID_IMAGE_ID,
+	},
+# endif /* QEMU_LOAD_BL32 */
+
+	/* Fill BL33 related information */
+	{ .image_id = BL33_IMAGE_ID,
+	  SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2,
+				entry_point_info_t, NON_SECURE | EXECUTABLE),
+# ifdef PRELOADED_BL33_BASE
+	  .ep_info.pc = PRELOADED_BL33_BASE,
+
+	  SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, VERSION_2, image_info_t,
+				IMAGE_ATTRIB_SKIP_LOADING),
+# else /* PRELOADED_BL33_BASE */
+	  .ep_info.pc = NS_IMAGE_OFFSET,
+
+	  SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, VERSION_2, image_info_t,
+				0),
+	  .image_info.image_base = NS_IMAGE_OFFSET,
+	  .image_info.image_max_size = NS_DRAM0_BASE + NS_DRAM0_SIZE -
+				       NS_IMAGE_OFFSET,
+# endif /* !PRELOADED_BL33_BASE */
+
+	  .next_handoff_image_id = INVALID_IMAGE_ID,
+	}
+#endif /* !EL3_PAYLOAD_BASE */
+};
+
+REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs)
diff --git a/plat/qemu/qemu_bl2_setup.c b/plat/qemu/qemu_bl2_setup.c
new file mode 100644
index 0000000..60d9623
--- /dev/null
+++ b/plat/qemu/qemu_bl2_setup.c
@@ -0,0 +1,355 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <debug.h>
+#include <desc_image_load.h>
+#ifdef SPD_opteed
+#include <optee_utils.h>
+#endif
+#include <libfdt.h>
+#include <platform_def.h>
+#include <string.h>
+#include <utils.h>
+#include "qemu_private.h"
+
+/*
+ * The next 2 constants identify the extents of the code & RO data region.
+ * These addresses are used by the MMU setup code and therefore they must be
+ * page-aligned.  It is the responsibility of the linker script to ensure that
+ * __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
+ */
+#define BL2_RO_BASE (unsigned long)(&__RO_START__)
+#define BL2_RO_LIMIT (unsigned long)(&__RO_END__)
+
+/* Data structure which holds the extents of the trusted SRAM for BL2 */
+static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
+
+#if !LOAD_IMAGE_V2
+/*******************************************************************************
+ * This structure represents the superset of information that is passed to
+ * BL3-1, e.g. while passing control to it from BL2, bl31_params
+ * and other platform specific params
+ ******************************************************************************/
+typedef struct bl2_to_bl31_params_mem {
+	bl31_params_t bl31_params;
+	image_info_t bl31_image_info;
+	image_info_t bl32_image_info;
+	image_info_t bl33_image_info;
+	entry_point_info_t bl33_ep_info;
+	entry_point_info_t bl32_ep_info;
+	entry_point_info_t bl31_ep_info;
+} bl2_to_bl31_params_mem_t;
+
+
+static bl2_to_bl31_params_mem_t bl31_params_mem;
+
+
+meminfo_t *bl2_plat_sec_mem_layout(void)
+{
+	return &bl2_tzram_layout;
+}
+
+/*******************************************************************************
+ * This function assigns a pointer to the memory that the platform has kept
+ * aside to pass platform specific and trusted firmware related information
+ * to BL31. This memory is allocated by allocating memory to
+ * bl2_to_bl31_params_mem_t structure which is a superset of all the
+ * structure whose information is passed to BL31
+ * NOTE: This function should be called only once and should be done
+ * before generating params to BL31
+ ******************************************************************************/
+bl31_params_t *bl2_plat_get_bl31_params(void)
+{
+	bl31_params_t *bl2_to_bl31_params;
+
+	/*
+	 * Initialise the memory for all the arguments that needs to
+	 * be passed to BL3-1
+	 */
+	zeromem(&bl31_params_mem, sizeof(bl2_to_bl31_params_mem_t));
+
+	/* Assign memory for TF related information */
+	bl2_to_bl31_params = &bl31_params_mem.bl31_params;
+	SET_PARAM_HEAD(bl2_to_bl31_params, PARAM_BL31, VERSION_1, 0);
+
+	/* Fill BL3-1 related information */
+	bl2_to_bl31_params->bl31_image_info = &bl31_params_mem.bl31_image_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl31_image_info, PARAM_IMAGE_BINARY,
+		VERSION_1, 0);
+
+	/* Fill BL3-2 related information */
+	bl2_to_bl31_params->bl32_ep_info = &bl31_params_mem.bl32_ep_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl32_ep_info, PARAM_EP,
+		VERSION_1, 0);
+	bl2_to_bl31_params->bl32_image_info = &bl31_params_mem.bl32_image_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl32_image_info, PARAM_IMAGE_BINARY,
+		VERSION_1, 0);
+
+	/* Fill BL3-3 related information */
+	bl2_to_bl31_params->bl33_ep_info = &bl31_params_mem.bl33_ep_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl33_ep_info,
+		PARAM_EP, VERSION_1, 0);
+
+	/* BL3-3 expects to receive the primary CPU MPID (through x0) */
+	bl2_to_bl31_params->bl33_ep_info->args.arg0 = 0xffff & read_mpidr();
+
+	bl2_to_bl31_params->bl33_image_info = &bl31_params_mem.bl33_image_info;
+	SET_PARAM_HEAD(bl2_to_bl31_params->bl33_image_info, PARAM_IMAGE_BINARY,
+		VERSION_1, 0);
+
+	return bl2_to_bl31_params;
+}
+
+/* Flush the TF params and the TF plat params */
+void bl2_plat_flush_bl31_params(void)
+{
+	flush_dcache_range((unsigned long)&bl31_params_mem,
+			sizeof(bl2_to_bl31_params_mem_t));
+}
+
+/*******************************************************************************
+ * This function returns a pointer to the shared memory that the platform
+ * has kept to point to entry point information of BL31 to BL2
+ ******************************************************************************/
+struct entry_point_info *bl2_plat_get_bl31_ep_info(void)
+{
+#if DEBUG
+	bl31_params_mem.bl31_ep_info.args.arg1 = QEMU_BL31_PLAT_PARAM_VAL;
+#endif
+
+	return &bl31_params_mem.bl31_ep_info;
+}
+#endif /* !LOAD_IMAGE_V2 */
+
+
+
+void bl2_early_platform_setup(meminfo_t *mem_layout)
+{
+	/* Initialize the console to provide early debug support */
+	console_init(PLAT_QEMU_BOOT_UART_BASE, PLAT_QEMU_BOOT_UART_CLK_IN_HZ,
+			PLAT_QEMU_CONSOLE_BAUDRATE);
+
+	/* Setup the BL2 memory layout */
+	bl2_tzram_layout = *mem_layout;
+
+	plat_qemu_io_setup();
+}
+
+static void security_setup(void)
+{
+	/*
+	 * This is where a TrustZone address space controller and other
+	 * security related peripherals, would be configured.
+	 */
+}
+
+static void update_dt(void)
+{
+	int ret;
+	void *fdt = (void *)(uintptr_t)PLAT_QEMU_DT_BASE;
+
+	ret = fdt_open_into(fdt, fdt, PLAT_QEMU_DT_MAX_SIZE);
+	if (ret < 0) {
+		ERROR("Invalid Device Tree at %p: error %d\n", fdt, ret);
+		return;
+	}
+
+	if (dt_add_psci_node(fdt)) {
+		ERROR("Failed to add PSCI Device Tree node\n");
+		return;
+	}
+
+	if (dt_add_psci_cpu_enable_methods(fdt)) {
+		ERROR("Failed to add PSCI cpu enable methods in Device Tree\n");
+		return;
+	}
+
+	ret = fdt_pack(fdt);
+	if (ret < 0)
+		ERROR("Failed to pack Device Tree at %p: error %d\n", fdt, ret);
+}
+
+void bl2_platform_setup(void)
+{
+	security_setup();
+	update_dt();
+
+	/* TODO Initialize timer */
+}
+
+void bl2_plat_arch_setup(void)
+{
+	qemu_configure_mmu_el1(bl2_tzram_layout.total_base,
+			      bl2_tzram_layout.total_size,
+			      BL2_RO_BASE, BL2_RO_LIMIT,
+			      BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_END);
+}
+
+/*******************************************************************************
+ * Gets SPSR for BL32 entry
+ ******************************************************************************/
+static uint32_t qemu_get_spsr_for_bl32_entry(void)
+{
+	/*
+	 * The Secure Payload Dispatcher service is responsible for
+	 * setting the SPSR prior to entry into the BL3-2 image.
+	 */
+	return 0;
+}
+
+/*******************************************************************************
+ * Gets SPSR for BL33 entry
+ ******************************************************************************/
+static uint32_t qemu_get_spsr_for_bl33_entry(void)
+{
+	unsigned int mode;
+	uint32_t spsr;
+
+	/* Figure out what mode we enter the non-secure world in */
+	mode = EL_IMPLEMENTED(2) ? MODE_EL2 : MODE_EL1;
+
+	/*
+	 * TODO: Consider the possibility of specifying the SPSR in
+	 * the FIP ToC and allowing the platform to have a say as
+	 * well.
+	 */
+	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+	return spsr;
+}
+
+#if LOAD_IMAGE_V2
+static int qemu_bl2_handle_post_image_load(unsigned int image_id)
+{
+	int err = 0;
+	bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
+#ifdef SPD_opteed
+	bl_mem_params_node_t *pager_mem_params = NULL;
+	bl_mem_params_node_t *paged_mem_params = NULL;
+#endif
+
+	assert(bl_mem_params);
+
+	switch (image_id) {
+# ifdef AARCH64
+	case BL32_IMAGE_ID:
+#ifdef SPD_opteed
+		pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
+		assert(pager_mem_params);
+
+		paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
+		assert(paged_mem_params);
+
+		err = parse_optee_header(&bl_mem_params->ep_info,
+					 &pager_mem_params->image_info,
+					 &paged_mem_params->image_info);
+		if (err != 0) {
+			WARN("OPTEE header parse error.\n");
+		}
+
+		/*
+		 * OP-TEE expect to receive DTB address in x2.
+		 * This will be copied into x2 by dispatcher.
+		 */
+		bl_mem_params->ep_info.args.arg3 = PLAT_QEMU_DT_BASE;
+#endif
+		bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl32_entry();
+		break;
+# endif
+	case BL33_IMAGE_ID:
+		/* BL33 expects to receive the primary CPU MPID (through r0) */
+		bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
+		bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl33_entry();
+		break;
+	}
+
+	return err;
+}
+
+/*******************************************************************************
+ * This function can be used by the platforms to update/use image
+ * information for given `image_id`.
+ ******************************************************************************/
+int bl2_plat_handle_post_image_load(unsigned int image_id)
+{
+	return qemu_bl2_handle_post_image_load(image_id);
+}
+
+#else /* LOAD_IMAGE_V2 */
+
+/*******************************************************************************
+ * Before calling this function BL3-1 is loaded in memory and its entrypoint
+ * is set by load_image. This is a placeholder for the platform to change
+ * the entrypoint of BL3-1 and set SPSR and security state.
+ * On ARM standard platforms we only set the security state of the entrypoint
+ ******************************************************************************/
+void bl2_plat_set_bl31_ep_info(image_info_t *bl31_image_info,
+					entry_point_info_t *bl31_ep_info)
+{
+	SET_SECURITY_STATE(bl31_ep_info->h.attr, SECURE);
+	bl31_ep_info->spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+					DISABLE_ALL_EXCEPTIONS);
+}
+
+/*******************************************************************************
+ * Before calling this function BL3-2 is loaded in memory and its entrypoint
+ * is set by load_image. This is a placeholder for the platform to change
+ * the entrypoint of BL3-2 and set SPSR and security state.
+ * On ARM standard platforms we only set the security state of the entrypoint
+ ******************************************************************************/
+void bl2_plat_set_bl32_ep_info(image_info_t *bl32_image_info,
+					entry_point_info_t *bl32_ep_info)
+{
+	SET_SECURITY_STATE(bl32_ep_info->h.attr, SECURE);
+	bl32_ep_info->spsr = qemu_get_spsr_for_bl32_entry();
+}
+
+/*******************************************************************************
+ * Before calling this function BL3-3 is loaded in memory and its entrypoint
+ * is set by load_image. This is a placeholder for the platform to change
+ * the entrypoint of BL3-3 and set SPSR and security state.
+ * On ARM standard platforms we only set the security state of the entrypoint
+ ******************************************************************************/
+void bl2_plat_set_bl33_ep_info(image_info_t *image,
+					entry_point_info_t *bl33_ep_info)
+{
+
+	SET_SECURITY_STATE(bl33_ep_info->h.attr, NON_SECURE);
+	bl33_ep_info->spsr = qemu_get_spsr_for_bl33_entry();
+}
+
+/*******************************************************************************
+ * Populate the extents of memory available for loading BL32
+ ******************************************************************************/
+void bl2_plat_get_bl32_meminfo(meminfo_t *bl32_meminfo)
+{
+	/*
+	 * Populate the extents of memory available for loading BL32.
+	 */
+	bl32_meminfo->total_base = BL32_BASE;
+	bl32_meminfo->free_base = BL32_BASE;
+	bl32_meminfo->total_size = (BL32_MEM_BASE + BL32_MEM_SIZE) - BL32_BASE;
+	bl32_meminfo->free_size = (BL32_MEM_BASE + BL32_MEM_SIZE) - BL32_BASE;
+}
+
+/*******************************************************************************
+ * Populate the extents of memory available for loading BL33
+ ******************************************************************************/
+void bl2_plat_get_bl33_meminfo(meminfo_t *bl33_meminfo)
+{
+	bl33_meminfo->total_base = NS_DRAM0_BASE;
+	bl33_meminfo->total_size = NS_DRAM0_SIZE;
+	bl33_meminfo->free_base = NS_DRAM0_BASE;
+	bl33_meminfo->free_size = NS_DRAM0_SIZE;
+}
+#endif /* !LOAD_IMAGE_V2 */
+
+unsigned long plat_get_ns_image_entrypoint(void)
+{
+	return NS_IMAGE_OFFSET;
+}
diff --git a/plat/qemu/qemu_bl31_setup.c b/plat/qemu/qemu_bl31_setup.c
new file mode 100644
index 0000000..f79a885
--- /dev/null
+++ b/plat/qemu/qemu_bl31_setup.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <gic_common.h>
+#include <gicv2.h>
+#include <platform_def.h>
+#include "qemu_private.h"
+
+/*
+ * The next 3 constants identify the extents of the code, RO data region and the
+ * limit of the BL3-1 image.  These addresses are used by the MMU setup code and
+ * therefore they must be page-aligned.  It is the responsibility of the linker
+ * script to ensure that __RO_START__, __RO_END__ & __BL31_END__ linker symbols
+ * refer to page-aligned addresses.
+ */
+#define BL31_RO_BASE (unsigned long)(&__RO_START__)
+#define BL31_RO_LIMIT (unsigned long)(&__RO_END__)
+#define BL31_END (unsigned long)(&__BL31_END__)
+
+/*
+ * Placeholder variables for copying the arguments that have been passed to
+ * BL3-1 from BL2.
+ */
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+/*******************************************************************************
+ * Perform any BL3-1 early platform setup.  Here is an opportunity to copy
+ * parameters passed by the calling EL (S-EL1 in BL2 & S-EL3 in BL1) before
+ * they are lost (potentially). This needs to be done before the MMU is
+ * initialized so that the memory layout can be used while creating page
+ * tables. BL2 has flushed this information to memory, so we are guaranteed
+ * to pick up good data.
+ ******************************************************************************/
+#if LOAD_IMAGE_V2
+void bl31_early_platform_setup(void *from_bl2,
+			       void *plat_params_from_bl2)
+#else
+void bl31_early_platform_setup(bl31_params_t *from_bl2,
+				void *plat_params_from_bl2)
+#endif
+{
+	/* Initialize the console to provide early debug support */
+	console_init(PLAT_QEMU_BOOT_UART_BASE, PLAT_QEMU_BOOT_UART_CLK_IN_HZ,
+			PLAT_QEMU_CONSOLE_BAUDRATE);
+
+#if LOAD_IMAGE_V2
+	/*
+	 * Check params passed from BL2
+	 */
+	bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
+
+	assert(params_from_bl2);
+	assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
+	assert(params_from_bl2->h.version >= VERSION_2);
+
+	bl_params_node_t *bl_params = params_from_bl2->head;
+
+	/*
+	 * Copy BL33 and BL32 (if present), entry point information.
+	 * They are stored in Secure RAM, in BL2's address space.
+	 */
+	while (bl_params) {
+		if (bl_params->image_id == BL32_IMAGE_ID)
+			bl32_image_ep_info = *bl_params->ep_info;
+
+		if (bl_params->image_id == BL33_IMAGE_ID)
+			bl33_image_ep_info = *bl_params->ep_info;
+
+		bl_params = bl_params->next_params_info;
+	}
+
+	if (!bl33_image_ep_info.pc)
+		panic();
+
+#else /* LOAD_IMAGE_V2 */
+
+	/*
+	 * Check params passed from BL2 should not be NULL,
+	 */
+	assert(from_bl2 != NULL);
+	assert(from_bl2->h.type == PARAM_BL31);
+	assert(from_bl2->h.version >= VERSION_1);
+	/*
+	 * In debug builds, we pass a special value in 'plat_params_from_bl2'
+	 * to verify platform parameters from BL2 to BL3-1.
+	 * In release builds, it's not used.
+	 */
+	assert(((unsigned long long)plat_params_from_bl2) ==
+		QEMU_BL31_PLAT_PARAM_VAL);
+
+	/*
+	 * Copy BL3-2 (if populated by BL2) and BL3-3 entry point information.
+	 * They are stored in Secure RAM, in BL2's address space.
+	 */
+	if (from_bl2->bl32_ep_info)
+		bl32_image_ep_info = *from_bl2->bl32_ep_info;
+	bl33_image_ep_info = *from_bl2->bl33_ep_info;
+
+#endif /* !LOAD_IMAGE_V2 */
+}
+
+void bl31_plat_arch_setup(void)
+{
+	qemu_configure_mmu_el3(BL31_RO_BASE, (BL31_END - BL31_RO_BASE),
+			      BL31_RO_BASE, BL31_RO_LIMIT,
+			      BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_END);
+}
+
+/******************************************************************************
+ * On a GICv2 system, the Group 1 secure interrupts are treated as Group 0
+ * interrupts.
+ *****************************************************************************/
+#define PLATFORM_G1S_PROPS(grp)						\
+	INTR_PROP_DESC(QEMU_IRQ_SEC_SGI_0, GIC_HIGHEST_SEC_PRIORITY,	\
+					   grp, GIC_INTR_CFG_EDGE),	\
+	INTR_PROP_DESC(QEMU_IRQ_SEC_SGI_1, GIC_HIGHEST_SEC_PRIORITY,	\
+					   grp, GIC_INTR_CFG_EDGE),	\
+	INTR_PROP_DESC(QEMU_IRQ_SEC_SGI_2, GIC_HIGHEST_SEC_PRIORITY,	\
+					   grp, GIC_INTR_CFG_EDGE),	\
+	INTR_PROP_DESC(QEMU_IRQ_SEC_SGI_3, GIC_HIGHEST_SEC_PRIORITY,	\
+					   grp, GIC_INTR_CFG_EDGE),	\
+	INTR_PROP_DESC(QEMU_IRQ_SEC_SGI_4, GIC_HIGHEST_SEC_PRIORITY,	\
+					   grp, GIC_INTR_CFG_EDGE),	\
+	INTR_PROP_DESC(QEMU_IRQ_SEC_SGI_5, GIC_HIGHEST_SEC_PRIORITY,	\
+					   grp, GIC_INTR_CFG_EDGE),	\
+	INTR_PROP_DESC(QEMU_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY,	\
+					   grp, GIC_INTR_CFG_EDGE),	\
+	INTR_PROP_DESC(QEMU_IRQ_SEC_SGI_7, GIC_HIGHEST_SEC_PRIORITY,	\
+					   grp, GIC_INTR_CFG_EDGE)
+
+#define PLATFORM_G0_PROPS(grp)
+
+static const interrupt_prop_t qemu_interrupt_props[] = {
+	PLATFORM_G1S_PROPS(GICV2_INTR_GROUP0),
+	PLATFORM_G0_PROPS(GICV2_INTR_GROUP0)
+};
+
+static const struct gicv2_driver_data plat_gicv2_driver_data = {
+	.gicd_base = GICD_BASE,
+	.gicc_base = GICC_BASE,
+	.interrupt_props = qemu_interrupt_props,
+	.interrupt_props_num = ARRAY_SIZE(qemu_interrupt_props),
+};
+
+void bl31_platform_setup(void)
+{
+	/* Initialize the gic cpu and distributor interfaces */
+	gicv2_driver_init(&plat_gicv2_driver_data);
+	gicv2_distif_init();
+	gicv2_pcpu_distif_init();
+	gicv2_cpuif_enable();
+}
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+	return SYS_COUNTER_FREQ_IN_TICKS;
+}
+
+/*******************************************************************************
+ * Return a pointer to the 'entry_point_info' structure of the next image
+ * for the security state specified. BL3-3 corresponds to the non-secure
+ * image type while BL3-2 corresponds to the secure image type. A NULL
+ * pointer is returned if the image does not exist.
+ ******************************************************************************/
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+	entry_point_info_t *next_image_info;
+
+	assert(sec_state_is_valid(type));
+	next_image_info = (type == NON_SECURE)
+			? &bl33_image_ep_info : &bl32_image_ep_info;
+	/*
+	 * None of the images on the ARM development platforms can have 0x0
+	 * as the entrypoint
+	 */
+	if (next_image_info->pc)
+		return next_image_info;
+	else
+		return NULL;
+}
diff --git a/plat/qemu/qemu_common.c b/plat/qemu/qemu_common.c
new file mode 100644
index 0000000..e34b16f
--- /dev/null
+++ b/plat/qemu/qemu_common.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <bl_common.h>
+#include <platform_def.h>
+#include <arm_xlat_tables.h>
+#include "qemu_private.h"
+
+#define MAP_DEVICE0	MAP_REGION_FLAT(DEVICE0_BASE,			\
+					DEVICE0_SIZE,			\
+					MT_DEVICE | MT_RW | MT_SECURE)
+
+#ifdef DEVICE1_BASE
+#define MAP_DEVICE1	MAP_REGION_FLAT(DEVICE1_BASE,			\
+					DEVICE1_SIZE,			\
+					MT_DEVICE | MT_RW | MT_SECURE)
+#endif
+
+#ifdef DEVICE2_BASE
+#define MAP_DEVICE2	MAP_REGION_FLAT(DEVICE2_BASE,			\
+					DEVICE2_SIZE,			\
+					MT_DEVICE | MT_RO | MT_SECURE)
+#endif
+
+#define MAP_SHARED_RAM	MAP_REGION_FLAT(SHARED_RAM_BASE,		\
+					SHARED_RAM_SIZE,		\
+					MT_DEVICE  | MT_RW | MT_SECURE)
+
+#define MAP_BL32_MEM	MAP_REGION_FLAT(BL32_MEM_BASE, BL32_MEM_SIZE,	\
+					MT_MEMORY | MT_RW | MT_SECURE)
+
+#define MAP_NS_DRAM0	MAP_REGION_FLAT(NS_DRAM0_BASE, NS_DRAM0_SIZE,	\
+					MT_MEMORY | MT_RW | MT_NS)
+
+#define MAP_FLASH0	MAP_REGION_FLAT(QEMU_FLASH0_BASE, QEMU_FLASH0_SIZE, \
+					MT_MEMORY | MT_RO | MT_SECURE)
+
+/*
+ * Table of regions for various BL stages to map using the MMU.
+ * This doesn't include TZRAM as the 'mem_layout' argument passed to
+ * arm_configure_mmu_elx() will give the available subset of that,
+ */
+#ifdef IMAGE_BL1
+static const mmap_region_t plat_qemu_mmap[] = {
+	MAP_FLASH0,
+	MAP_SHARED_RAM,
+	MAP_DEVICE0,
+#ifdef MAP_DEVICE1
+	MAP_DEVICE1,
+#endif
+#ifdef MAP_DEVICE2
+	MAP_DEVICE2,
+#endif
+	{0}
+};
+#endif
+#ifdef IMAGE_BL2
+static const mmap_region_t plat_qemu_mmap[] = {
+	MAP_FLASH0,
+	MAP_SHARED_RAM,
+	MAP_DEVICE0,
+#ifdef MAP_DEVICE1
+	MAP_DEVICE1,
+#endif
+#ifdef MAP_DEVICE2
+	MAP_DEVICE2,
+#endif
+	MAP_NS_DRAM0,
+	MAP_BL32_MEM,
+	{0}
+};
+#endif
+#ifdef IMAGE_BL31
+static const mmap_region_t plat_qemu_mmap[] = {
+	MAP_SHARED_RAM,
+	MAP_DEVICE0,
+#ifdef MAP_DEVICE1
+	MAP_DEVICE1,
+#endif
+	MAP_BL32_MEM,
+	{0}
+};
+#endif
+
+/*******************************************************************************
+ * Macro generating the code for the function setting up the pagetables as per
+ * the platform memory map & initialize the mmu, for the given exception level
+ ******************************************************************************/
+
+#define DEFINE_CONFIGURE_MMU_EL(_el)					\
+	void qemu_configure_mmu_el##_el(unsigned long total_base,	\
+				   unsigned long total_size,		\
+				   unsigned long ro_start,		\
+				   unsigned long ro_limit,		\
+				   unsigned long coh_start,		\
+				   unsigned long coh_limit)		\
+	{								\
+		mmap_add_region(total_base, total_base,			\
+				total_size,				\
+				MT_MEMORY | MT_RW | MT_SECURE);		\
+		mmap_add_region(ro_start, ro_start,			\
+				ro_limit - ro_start,			\
+				MT_MEMORY | MT_RO | MT_SECURE);		\
+		mmap_add_region(coh_start, coh_start,			\
+				coh_limit - coh_start,			\
+				MT_DEVICE | MT_RW | MT_SECURE);		\
+		mmap_add(plat_qemu_mmap);				\
+		init_xlat_tables();					\
+									\
+		enable_mmu_el##_el(0);					\
+	}
+
+/* Define EL1 and EL3 variants of the function initialising the MMU */
+DEFINE_CONFIGURE_MMU_EL(1)
+DEFINE_CONFIGURE_MMU_EL(3)
+
+
diff --git a/plat/qemu/qemu_gic.c b/plat/qemu/qemu_gic.c
new file mode 100644
index 0000000..41b5eb4
--- /dev/null
+++ b/plat/qemu/qemu_gic.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <bl_common.h>
+#include <gicv2.h>
+#include <interrupt_mgmt.h>
+
+uint32_t plat_ic_get_pending_interrupt_id(void)
+{
+	return gicv2_get_pending_interrupt_id();
+}
+
+uint32_t plat_ic_get_pending_interrupt_type(void)
+{
+	return gicv2_get_pending_interrupt_type();
+}
+
+uint32_t plat_ic_acknowledge_interrupt(void)
+{
+	return gicv2_acknowledge_interrupt();
+}
+
+uint32_t plat_ic_get_interrupt_type(uint32_t id)
+{
+	uint32_t group;
+
+	group = gicv2_get_interrupt_group(id);
+
+	/* Assume that all secure interrupts are S-EL1 interrupts */
+	if (!group)
+		return INTR_TYPE_S_EL1;
+	else
+		return INTR_TYPE_NS;
+
+}
+
+void plat_ic_end_of_interrupt(uint32_t id)
+{
+	gicv2_end_of_interrupt(id);
+}
+
+uint32_t plat_interrupt_type_to_line(uint32_t type,
+				uint32_t security_state)
+{
+	assert(type == INTR_TYPE_S_EL1 ||
+	       type == INTR_TYPE_EL3 ||
+	       type == INTR_TYPE_NS);
+
+	assert(sec_state_is_valid(security_state));
+
+	/* Non-secure interrupts are signalled on the IRQ line always */
+	if (type == INTR_TYPE_NS)
+		return __builtin_ctz(SCR_IRQ_BIT);
+
+	/*
+	 * Secure interrupts are signalled using the IRQ line if the FIQ_EN
+	 * bit is not set else they are signalled using the FIQ line.
+	 */
+	if (gicv2_is_fiq_enabled())
+		return __builtin_ctz(SCR_FIQ_BIT);
+	else
+		return __builtin_ctz(SCR_IRQ_BIT);
+}
+
diff --git a/plat/qemu/qemu_image_load.c b/plat/qemu/qemu_image_load.c
new file mode 100644
index 0000000..8e24647
--- /dev/null
+++ b/plat/qemu/qemu_image_load.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <desc_image_load.h>
+
+/*******************************************************************************
+ * This function is a wrapper of a common function which flushes the data
+ * structures so that they are visible in memory for the next BL image.
+ ******************************************************************************/
+void plat_flush_next_bl_params(void)
+{
+	flush_bl_params_desc();
+}
+
+/*******************************************************************************
+ * This function is a wrapper of a common function which returns the list of
+ * loadable images.
+ ******************************************************************************/
+bl_load_info_t *plat_get_bl_image_load_info(void)
+{
+	return get_bl_load_info_from_mem_params_desc();
+}
+
+/*******************************************************************************
+ * This function is a wrapper of a common function which returns the data
+ * structures of the next BL image.
+ ******************************************************************************/
+bl_params_t *plat_get_next_bl_params(void)
+{
+	return get_next_bl_params_from_mem_params_desc();
+}
diff --git a/plat/qemu/qemu_io_storage.c b/plat/qemu/qemu_io_storage.c
new file mode 100644
index 0000000..1918f21
--- /dev/null
+++ b/plat/qemu/qemu_io_storage.c
@@ -0,0 +1,382 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <bl_common.h>		/* For ARRAY_SIZE */
+#include <debug.h>
+#include <firmware_image_package.h>
+#include <io_driver.h>
+#include <io_fip.h>
+#include <io_memmap.h>
+#include <io_semihosting.h>
+#include <io_storage.h>
+#include <platform_def.h>
+#include <semihosting.h>
+#include <string.h>
+
+/* Semihosting filenames */
+#define BL2_IMAGE_NAME			"bl2.bin"
+#define BL31_IMAGE_NAME			"bl31.bin"
+#define BL32_IMAGE_NAME			"bl32.bin"
+#define BL32_EXTRA1_IMAGE_NAME		"bl32_extra1.bin"
+#define BL32_EXTRA2_IMAGE_NAME		"bl32_extra2.bin"
+#define BL33_IMAGE_NAME			"bl33.bin"
+
+#if TRUSTED_BOARD_BOOT
+#define TRUSTED_BOOT_FW_CERT_NAME	"tb_fw.crt"
+#define TRUSTED_KEY_CERT_NAME		"trusted_key.crt"
+#define SOC_FW_KEY_CERT_NAME		"soc_fw_key.crt"
+#define TOS_FW_KEY_CERT_NAME		"tos_fw_key.crt"
+#define NT_FW_KEY_CERT_NAME		"nt_fw_key.crt"
+#define SOC_FW_CONTENT_CERT_NAME	"soc_fw_content.crt"
+#define TOS_FW_CONTENT_CERT_NAME	"tos_fw_content.crt"
+#define NT_FW_CONTENT_CERT_NAME		"nt_fw_content.crt"
+#endif /* TRUSTED_BOARD_BOOT */
+
+
+
+/* IO devices */
+static const io_dev_connector_t *fip_dev_con;
+static uintptr_t fip_dev_handle;
+static const io_dev_connector_t *memmap_dev_con;
+static uintptr_t memmap_dev_handle;
+static const io_dev_connector_t *sh_dev_con;
+static uintptr_t sh_dev_handle;
+
+static const io_block_spec_t fip_block_spec = {
+	.offset = PLAT_QEMU_FIP_BASE,
+	.length = PLAT_QEMU_FIP_MAX_SIZE
+};
+
+static const io_uuid_spec_t bl2_uuid_spec = {
+	.uuid = UUID_TRUSTED_BOOT_FIRMWARE_BL2,
+};
+
+static const io_uuid_spec_t bl31_uuid_spec = {
+	.uuid = UUID_EL3_RUNTIME_FIRMWARE_BL31,
+};
+
+static const io_uuid_spec_t bl32_uuid_spec = {
+	.uuid = UUID_SECURE_PAYLOAD_BL32,
+};
+
+static const io_uuid_spec_t bl32_extra1_uuid_spec = {
+	.uuid = UUID_SECURE_PAYLOAD_BL32_EXTRA1,
+};
+
+static const io_uuid_spec_t bl32_extra2_uuid_spec = {
+	.uuid = UUID_SECURE_PAYLOAD_BL32_EXTRA2,
+};
+
+static const io_uuid_spec_t bl33_uuid_spec = {
+	.uuid = UUID_NON_TRUSTED_FIRMWARE_BL33,
+};
+
+#if TRUSTED_BOARD_BOOT
+static const io_uuid_spec_t tb_fw_cert_uuid_spec = {
+	.uuid = UUID_TRUSTED_BOOT_FW_CERT,
+};
+
+static const io_uuid_spec_t trusted_key_cert_uuid_spec = {
+	.uuid = UUID_TRUSTED_KEY_CERT,
+};
+
+static const io_uuid_spec_t soc_fw_key_cert_uuid_spec = {
+	.uuid = UUID_SOC_FW_KEY_CERT,
+};
+
+static const io_uuid_spec_t tos_fw_key_cert_uuid_spec = {
+	.uuid = UUID_TRUSTED_OS_FW_KEY_CERT,
+};
+
+static const io_uuid_spec_t nt_fw_key_cert_uuid_spec = {
+	.uuid = UUID_NON_TRUSTED_FW_KEY_CERT,
+};
+
+static const io_uuid_spec_t soc_fw_cert_uuid_spec = {
+	.uuid = UUID_SOC_FW_CONTENT_CERT,
+};
+
+static const io_uuid_spec_t tos_fw_cert_uuid_spec = {
+	.uuid = UUID_TRUSTED_OS_FW_CONTENT_CERT,
+};
+
+static const io_uuid_spec_t nt_fw_cert_uuid_spec = {
+	.uuid = UUID_NON_TRUSTED_FW_CONTENT_CERT,
+};
+#endif /* TRUSTED_BOARD_BOOT */
+
+static const io_file_spec_t sh_file_spec[] = {
+	[BL2_IMAGE_ID] = {
+		.path = BL2_IMAGE_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[BL31_IMAGE_ID] = {
+		.path = BL31_IMAGE_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[BL32_IMAGE_ID] = {
+		.path = BL32_IMAGE_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[BL32_EXTRA1_IMAGE_ID] = {
+		.path = BL32_EXTRA1_IMAGE_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[BL32_EXTRA2_IMAGE_ID] = {
+		.path = BL32_EXTRA2_IMAGE_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[BL33_IMAGE_ID] = {
+		.path = BL33_IMAGE_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+#if TRUSTED_BOARD_BOOT
+	[TRUSTED_BOOT_FW_CERT_ID] = {
+		.path = TRUSTED_BOOT_FW_CERT_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[TRUSTED_KEY_CERT_ID] = {
+		.path = TRUSTED_KEY_CERT_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[SOC_FW_KEY_CERT_ID] = {
+		.path = SOC_FW_KEY_CERT_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[TRUSTED_OS_FW_KEY_CERT_ID] = {
+		.path = TOS_FW_KEY_CERT_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[NON_TRUSTED_FW_KEY_CERT_ID] = {
+		.path = NT_FW_KEY_CERT_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[SOC_FW_CONTENT_CERT_ID] = {
+		.path = SOC_FW_CONTENT_CERT_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[TRUSTED_OS_FW_CONTENT_CERT_ID] = {
+		.path = TOS_FW_CONTENT_CERT_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+	[NON_TRUSTED_FW_CONTENT_CERT_ID] = {
+		.path = NT_FW_CONTENT_CERT_NAME,
+		.mode = FOPEN_MODE_RB
+	},
+#endif /* TRUSTED_BOARD_BOOT */
+};
+
+
+
+static int open_fip(const uintptr_t spec);
+static int open_memmap(const uintptr_t spec);
+
+struct plat_io_policy {
+	uintptr_t *dev_handle;
+	uintptr_t image_spec;
+	int (*check)(const uintptr_t spec);
+};
+
+/* By default, ARM platforms load images from the FIP */
+static const struct plat_io_policy policies[] = {
+	[FIP_IMAGE_ID] = {
+		&memmap_dev_handle,
+		(uintptr_t)&fip_block_spec,
+		open_memmap
+	},
+	[BL2_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl2_uuid_spec,
+		open_fip
+	},
+	[BL31_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl31_uuid_spec,
+		open_fip
+	},
+	[BL32_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl32_uuid_spec,
+		open_fip
+	},
+	[BL32_EXTRA1_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl32_extra1_uuid_spec,
+		open_fip
+	},
+	[BL32_EXTRA2_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl32_extra2_uuid_spec,
+		open_fip
+	},
+	[BL33_IMAGE_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&bl33_uuid_spec,
+		open_fip
+	},
+#if TRUSTED_BOARD_BOOT
+	[TRUSTED_BOOT_FW_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&tb_fw_cert_uuid_spec,
+		open_fip
+	},
+	[TRUSTED_KEY_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&trusted_key_cert_uuid_spec,
+		open_fip
+	},
+	[SOC_FW_KEY_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&soc_fw_key_cert_uuid_spec,
+		open_fip
+	},
+	[TRUSTED_OS_FW_KEY_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&tos_fw_key_cert_uuid_spec,
+		open_fip
+	},
+	[NON_TRUSTED_FW_KEY_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&nt_fw_key_cert_uuid_spec,
+		open_fip
+	},
+	[SOC_FW_CONTENT_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&soc_fw_cert_uuid_spec,
+		open_fip
+	},
+	[TRUSTED_OS_FW_CONTENT_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&tos_fw_cert_uuid_spec,
+		open_fip
+	},
+	[NON_TRUSTED_FW_CONTENT_CERT_ID] = {
+		&fip_dev_handle,
+		(uintptr_t)&nt_fw_cert_uuid_spec,
+		open_fip
+	},
+#endif /* TRUSTED_BOARD_BOOT */
+};
+
+static int open_fip(const uintptr_t spec)
+{
+	int result;
+	uintptr_t local_image_handle;
+
+	/* See if a Firmware Image Package is available */
+	result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID);
+	if (result == 0) {
+		result = io_open(fip_dev_handle, spec, &local_image_handle);
+		if (result == 0) {
+			VERBOSE("Using FIP\n");
+			io_close(local_image_handle);
+		}
+	}
+	return result;
+}
+
+static int open_memmap(const uintptr_t spec)
+{
+	int result;
+	uintptr_t local_image_handle;
+
+	result = io_dev_init(memmap_dev_handle, (uintptr_t)NULL);
+	if (result == 0) {
+		result = io_open(memmap_dev_handle, spec, &local_image_handle);
+		if (result == 0) {
+			VERBOSE("Using Memmap\n");
+			io_close(local_image_handle);
+		}
+	}
+	return result;
+}
+
+static int open_semihosting(const uintptr_t spec)
+{
+	int result;
+	uintptr_t local_image_handle;
+
+	/* See if the file exists on semi-hosting.*/
+	result = io_dev_init(sh_dev_handle, (uintptr_t)NULL);
+	if (result == 0) {
+		result = io_open(sh_dev_handle, spec, &local_image_handle);
+		if (result == 0) {
+			VERBOSE("Using Semi-hosting IO\n");
+			io_close(local_image_handle);
+		}
+	}
+	return result;
+}
+
+void plat_qemu_io_setup(void)
+{
+	int io_result;
+
+	io_result = register_io_dev_fip(&fip_dev_con);
+	assert(io_result == 0);
+
+	io_result = register_io_dev_memmap(&memmap_dev_con);
+	assert(io_result == 0);
+
+	/* Open connections to devices and cache the handles */
+	io_result = io_dev_open(fip_dev_con, (uintptr_t)NULL,
+				&fip_dev_handle);
+	assert(io_result == 0);
+
+	io_result = io_dev_open(memmap_dev_con, (uintptr_t)NULL,
+				&memmap_dev_handle);
+	assert(io_result == 0);
+
+	/* Register the additional IO devices on this platform */
+	io_result = register_io_dev_sh(&sh_dev_con);
+	assert(io_result == 0);
+
+	/* Open connections to devices and cache the handles */
+	io_result = io_dev_open(sh_dev_con, (uintptr_t)NULL, &sh_dev_handle);
+	assert(io_result == 0);
+
+	/* Ignore improbable errors in release builds */
+	(void)io_result;
+}
+
+static int get_alt_image_source(unsigned int image_id, uintptr_t *dev_handle,
+				  uintptr_t *image_spec)
+{
+	int result = open_semihosting((const uintptr_t)&sh_file_spec[image_id]);
+
+	if (result == 0) {
+		*dev_handle = sh_dev_handle;
+		*image_spec = (uintptr_t)&sh_file_spec[image_id];
+	}
+
+	return result;
+}
+
+/*
+ * Return an IO device handle and specification which can be used to access
+ * an image. Use this to enforce platform load policy
+ */
+int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
+			  uintptr_t *image_spec)
+{
+	int result;
+	const struct plat_io_policy *policy;
+
+	assert(image_id < ARRAY_SIZE(policies));
+
+	policy = &policies[image_id];
+	result = policy->check(policy->image_spec);
+	if (result == 0) {
+		*image_spec = policy->image_spec;
+		*dev_handle = *(policy->dev_handle);
+	} else {
+		VERBOSE("Trying alternative IO\n");
+		result = get_alt_image_source(image_id, dev_handle, image_spec);
+	}
+
+	return result;
+}
diff --git a/plat/qemu/qemu_pm.c b/plat/qemu/qemu_pm.c
new file mode 100644
index 0000000..c184f1c
--- /dev/null
+++ b/plat/qemu/qemu_pm.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <gicv2.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <psci.h>
+
+/*
+ * The secure entry point to be used on warm reset.
+ */
+static unsigned long secure_entrypoint;
+
+/* Make composite power state parameter till power level 0 */
+#if PSCI_EXTENDED_STATE_ID
+
+#define qemu_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type) \
+		(((lvl0_state) << PSTATE_ID_SHIFT) | \
+		 ((type) << PSTATE_TYPE_SHIFT))
+#else
+#define qemu_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type) \
+		(((lvl0_state) << PSTATE_ID_SHIFT) | \
+		 ((pwr_lvl) << PSTATE_PWR_LVL_SHIFT) | \
+		 ((type) << PSTATE_TYPE_SHIFT))
+#endif /* PSCI_EXTENDED_STATE_ID */
+
+
+#define qemu_make_pwrstate_lvl1(lvl1_state, lvl0_state, pwr_lvl, type) \
+		(((lvl1_state) << PLAT_LOCAL_PSTATE_WIDTH) | \
+		 qemu_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type))
+
+
+
+/*
+ *  The table storing the valid idle power states. Ensure that the
+ *  array entries are populated in ascending order of state-id to
+ *  enable us to use binary search during power state validation.
+ *  The table must be terminated by a NULL entry.
+ */
+static const unsigned int qemu_pm_idle_states[] = {
+	/* State-id - 0x01 */
+	qemu_make_pwrstate_lvl1(PLAT_LOCAL_STATE_RUN, PLAT_LOCAL_STATE_RET,
+				MPIDR_AFFLVL0, PSTATE_TYPE_STANDBY),
+	/* State-id - 0x02 */
+	qemu_make_pwrstate_lvl1(PLAT_LOCAL_STATE_RUN, PLAT_LOCAL_STATE_OFF,
+				MPIDR_AFFLVL0, PSTATE_TYPE_POWERDOWN),
+	/* State-id - 0x22 */
+	qemu_make_pwrstate_lvl1(PLAT_LOCAL_STATE_OFF, PLAT_LOCAL_STATE_OFF,
+				MPIDR_AFFLVL1, PSTATE_TYPE_POWERDOWN),
+	0,
+};
+
+/*******************************************************************************
+ * Platform handler called to check the validity of the power state
+ * parameter. The power state parameter has to be a composite power state.
+ ******************************************************************************/
+static int qemu_validate_power_state(unsigned int power_state,
+				psci_power_state_t *req_state)
+{
+	unsigned int state_id;
+	int i;
+
+	assert(req_state);
+
+	/*
+	 *  Currently we are using a linear search for finding the matching
+	 *  entry in the idle power state array. This can be made a binary
+	 *  search if the number of entries justify the additional complexity.
+	 */
+	for (i = 0; !!qemu_pm_idle_states[i]; i++) {
+		if (power_state == qemu_pm_idle_states[i])
+			break;
+	}
+
+	/* Return error if entry not found in the idle state array */
+	if (!qemu_pm_idle_states[i])
+		return PSCI_E_INVALID_PARAMS;
+
+	i = 0;
+	state_id = psci_get_pstate_id(power_state);
+
+	/* Parse the State ID and populate the state info parameter */
+	while (state_id) {
+		req_state->pwr_domain_state[i++] = state_id &
+						PLAT_LOCAL_PSTATE_MASK;
+		state_id >>= PLAT_LOCAL_PSTATE_WIDTH;
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * Platform handler called to check the validity of the non secure
+ * entrypoint.
+ ******************************************************************************/
+static int qemu_validate_ns_entrypoint(uintptr_t entrypoint)
+{
+	/*
+	 * Check if the non secure entrypoint lies within the non
+	 * secure DRAM.
+	 */
+	if ((entrypoint >= NS_DRAM0_BASE) &&
+	    (entrypoint < (NS_DRAM0_BASE + NS_DRAM0_SIZE)))
+		return PSCI_E_SUCCESS;
+	return PSCI_E_INVALID_ADDRESS;
+}
+
+/*******************************************************************************
+ * Platform handler called when a CPU is about to enter standby.
+ ******************************************************************************/
+static void qemu_cpu_standby(plat_local_state_t cpu_state)
+{
+
+	assert(cpu_state == PLAT_LOCAL_STATE_RET);
+
+	/*
+	 * Enter standby state
+	 * dsb is good practice before using wfi to enter low power states
+	 */
+	dsb();
+	wfi();
+}
+
+/*******************************************************************************
+ * Platform handler called when a power domain is about to be turned on. The
+ * mpidr determines the CPU to be turned on.
+ ******************************************************************************/
+static int qemu_pwr_domain_on(u_register_t mpidr)
+{
+	int rc = PSCI_E_SUCCESS;
+	unsigned pos = plat_core_pos_by_mpidr(mpidr);
+	uint64_t *hold_base = (uint64_t *)PLAT_QEMU_HOLD_BASE;
+
+	hold_base[pos] = PLAT_QEMU_HOLD_STATE_GO;
+	sev();
+
+	return rc;
+}
+
+/*******************************************************************************
+ * Platform handler called when a power domain is about to be turned off. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+void qemu_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	assert(0);
+}
+
+/*******************************************************************************
+ * Platform handler called when a power domain is about to be suspended. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+void qemu_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+	assert(0);
+}
+
+/*******************************************************************************
+ * Platform handler called when a power domain has just been powered on after
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from.
+ ******************************************************************************/
+void qemu_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+	assert(target_state->pwr_domain_state[MPIDR_AFFLVL0] ==
+					PLAT_LOCAL_STATE_OFF);
+
+	/* TODO: This setup is needed only after a cold boot */
+	gicv2_pcpu_distif_init();
+
+	/* Enable the gic cpu interface */
+	gicv2_cpuif_enable();
+}
+
+/*******************************************************************************
+ * Platform handler called when a power domain has just been powered on after
+ * having been suspended earlier. The target_state encodes the low power state
+ * that each level has woken up from.
+ ******************************************************************************/
+void qemu_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
+{
+	assert(0);
+}
+
+/*******************************************************************************
+ * Platform handlers to shutdown/reboot the system
+ ******************************************************************************/
+static void __dead2 qemu_system_off(void)
+{
+	ERROR("QEMU System Off: operation not handled.\n");
+	panic();
+}
+
+static void __dead2 qemu_system_reset(void)
+{
+	ERROR("QEMU System Reset: operation not handled.\n");
+	panic();
+}
+
+static const plat_psci_ops_t plat_qemu_psci_pm_ops = {
+	.cpu_standby = qemu_cpu_standby,
+	.pwr_domain_on = qemu_pwr_domain_on,
+	.pwr_domain_off = qemu_pwr_domain_off,
+	.pwr_domain_suspend = qemu_pwr_domain_suspend,
+	.pwr_domain_on_finish = qemu_pwr_domain_on_finish,
+	.pwr_domain_suspend_finish = qemu_pwr_domain_suspend_finish,
+	.system_off = qemu_system_off,
+	.system_reset = qemu_system_reset,
+	.validate_power_state = qemu_validate_power_state,
+	.validate_ns_entrypoint = qemu_validate_ns_entrypoint
+};
+
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+			const plat_psci_ops_t **psci_ops)
+{
+	uintptr_t *mailbox = (void *) PLAT_QEMU_TRUSTED_MAILBOX_BASE;
+
+	*mailbox = sec_entrypoint;
+	secure_entrypoint = (unsigned long) sec_entrypoint;
+	*psci_ops = &plat_qemu_psci_pm_ops;
+
+	return 0;
+}
diff --git a/plat/qemu/qemu_private.h b/plat/qemu/qemu_private.h
new file mode 100644
index 0000000..1671ec7
--- /dev/null
+++ b/plat/qemu/qemu_private.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __QEMU_PRIVATE_H
+#define __QEMU_PRIVATE_H
+
+#include <sys/types.h>
+
+void qemu_configure_mmu_el1(unsigned long total_base, unsigned long total_size,
+			unsigned long ro_start, unsigned long ro_limit,
+			unsigned long coh_start, unsigned long coh_limit);
+
+void qemu_configure_mmu_el3(unsigned long total_base, unsigned long total_size,
+			unsigned long ro_start, unsigned long ro_limit,
+			unsigned long coh_start, unsigned long coh_limit);
+
+void plat_qemu_io_setup(void);
+unsigned int plat_qemu_calc_core_pos(u_register_t mpidr);
+
+int dt_add_psci_node(void *fdt);
+int dt_add_psci_cpu_enable_methods(void *fdt);
+
+#endif /*__QEMU_PRIVATE_H*/
diff --git a/plat/qemu/qemu_rotpk.S b/plat/qemu/qemu_rotpk.S
new file mode 100644
index 0000000..5d1b83f
--- /dev/null
+++ b/plat/qemu/qemu_rotpk.S
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+	.global qemu_rotpk_hash
+	.global qemu_rotpk_hash_end
+qemu_rotpk_hash:
+	/* DER header */
+	.byte 0x30, 0x31, 0x30, 0x0D, 0x06, 0x09, 0x60, 0x86, 0x48
+	.byte 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20
+	/* SHA256 */
+	.incbin ROTPK_HASH
+qemu_rotpk_hash_end:
diff --git a/plat/qemu/qemu_trusted_boot.c b/plat/qemu/qemu_trusted_boot.c
new file mode 100644
index 0000000..7d8fed2
--- /dev/null
+++ b/plat/qemu/qemu_trusted_boot.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform.h>
+
+extern char qemu_rotpk_hash[], qemu_rotpk_hash_end[];
+
+int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len,
+			unsigned int *flags)
+{
+	*key_ptr = qemu_rotpk_hash;
+	*key_len = qemu_rotpk_hash_end - qemu_rotpk_hash;
+	*flags = ROTPK_IS_HASH;
+
+	return 0;
+}
+
+int plat_get_nv_ctr(void *cookie, unsigned int *nv_ctr)
+{
+	*nv_ctr = 0;
+
+	return 0;
+}
+
+int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr)
+{
+	return 1;
+}
diff --git a/plat/qemu/topology.c b/plat/qemu/topology.c
new file mode 100644
index 0000000..d7ba9b7
--- /dev/null
+++ b/plat/qemu/topology.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <platform_def.h>
+#include <sys/types.h>
+#include "qemu_private.h"
+
+/* The power domain tree descriptor */
+static unsigned char power_domain_tree_desc[] = {
+	/* Number of root nodes */
+	PLATFORM_CLUSTER_COUNT,
+	/* Number of children for the first node */
+	PLATFORM_CLUSTER0_CORE_COUNT,
+	/* Number of children for the second node */
+	PLATFORM_CLUSTER1_CORE_COUNT,
+};
+
+/*******************************************************************************
+ * This function returns the ARM default topology tree information.
+ ******************************************************************************/
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+	return power_domain_tree_desc;
+}
+
+/*******************************************************************************
+ * This function implements a part of the critical interface between the psci
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is returned
+ * in case the MPIDR is invalid.
+ ******************************************************************************/
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+	unsigned int cluster_id, cpu_id;
+
+	mpidr &= MPIDR_AFFINITY_MASK;
+	if (mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK))
+		return -1;
+
+	cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+	cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
+
+	if (cluster_id >= PLATFORM_CLUSTER_COUNT)
+		return -1;
+
+	if (cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER)
+		return -1;
+
+	return plat_qemu_calc_core_pos(mpidr);
+}
diff --git a/plat/rockchip/common/aarch64/plat_helpers.S b/plat/rockchip/common/aarch64/plat_helpers.S
new file mode 100644
index 0000000..abfb5a7
--- /dev/null
+++ b/plat/rockchip/common/aarch64/plat_helpers.S
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <cortex_a53.h>
+#include <cortex_a72.h>
+#include <plat_private.h>
+#include <platform_def.h>
+#include <plat_pmu_macros.S>
+
+	.globl	cpuson_entry_point
+	.globl	cpuson_flags
+	.globl	platform_cpu_warmboot
+	.globl	plat_secondary_cold_boot_setup
+	.globl	plat_report_exception
+	.globl	platform_is_primary_cpu
+	.globl	plat_crash_console_init
+	.globl	plat_crash_console_putc
+	.globl	plat_my_core_pos
+	.globl	plat_reset_handler
+
+	/*
+	 * void plat_reset_handler(void);
+	 *
+	 * Determine the SOC type and call the appropriate reset
+	 * handler.
+	 *
+	 */
+func plat_reset_handler
+	mrs x0, midr_el1
+	ubfx x0, x0, MIDR_PN_SHIFT, #12
+	cmp w0, #((CORTEX_A72_MIDR >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
+	b.eq	handler_a72
+	b	handler_end
+handler_a72:
+	/*
+	 * This handler does the following:
+	 * Set the L2 Data RAM latency for Cortex-A72.
+	 * Set the L2 Tag RAM latency to for Cortex-A72.
+	 */
+	mov x0, #((5 << CORTEX_A72_L2CTLR_DATA_RAM_LATENCY_SHIFT) |	\
+			 (0x1 << 5))
+	msr	CORTEX_A72_L2CTLR_EL1, x0
+	isb
+handler_end:
+	ret
+endfunc plat_reset_handler
+
+func plat_my_core_pos
+	mrs	x0, mpidr_el1
+	and	x1, x0, #MPIDR_CPU_MASK
+	and	x0, x0, #MPIDR_CLUSTER_MASK
+	add	x0, x1, x0, LSR #PLAT_RK_CLST_TO_CPUID_SHIFT
+	ret
+endfunc plat_my_core_pos
+
+	/* --------------------------------------------------------------------
+	 * void plat_secondary_cold_boot_setup (void);
+	 *
+	 * This function performs any platform specific actions
+	 * needed for a secondary cpu after a cold reset e.g
+	 * mark the cpu's presence, mechanism to place it in a
+	 * holding pen etc.
+	 * --------------------------------------------------------------------
+	 */
+func plat_secondary_cold_boot_setup
+	/* rk3368 does not do cold boot for secondary CPU */
+cb_panic:
+	b	cb_panic
+endfunc plat_secondary_cold_boot_setup
+
+func platform_is_primary_cpu
+	and	x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
+	cmp	x0, #PLAT_RK_PRIMARY_CPU
+	cset	x0, eq
+	ret
+endfunc platform_is_primary_cpu
+
+	/* --------------------------------------------------------------------
+	 * int plat_crash_console_init(void)
+	 * Function to initialize the crash console
+	 * without a C Runtime to print crash report.
+	 * Clobber list : x0, x1, x2
+	 * --------------------------------------------------------------------
+	 */
+func plat_crash_console_init
+	mov_imm	x0, PLAT_RK_UART_BASE
+	mov_imm	x1, PLAT_RK_UART_CLOCK
+	mov_imm	x2, PLAT_RK_UART_BAUDRATE
+	b	console_core_init
+endfunc plat_crash_console_init
+
+	/* --------------------------------------------------------------------
+	 * int plat_crash_console_putc(void)
+	 * Function to print a character on the crash
+	 * console without a C Runtime.
+	 * Clobber list : x1, x2
+	 * --------------------------------------------------------------------
+	 */
+func plat_crash_console_putc
+	mov_imm x1, PLAT_RK_UART_BASE
+	b	console_core_putc
+endfunc plat_crash_console_putc
+
+	/* --------------------------------------------------------------------
+	 * void platform_cpu_warmboot (void);
+	 * cpus online or resume enterpoint
+	 * --------------------------------------------------------------------
+	 */
+func platform_cpu_warmboot _align=16
+	mrs	x0, MPIDR_EL1
+	and	x19, x0, #MPIDR_CPU_MASK
+	and	x20, x0, #MPIDR_CLUSTER_MASK
+	mov	x0, x20
+	func_rockchip_clst_warmboot
+	/* --------------------------------------------------------------------
+	 * big cluster id is 1
+	 * big cores id is from 0-3, little cores id 4-7
+	 * --------------------------------------------------------------------
+	 */
+	add	x21, x19, x20, lsr #PLAT_RK_CLST_TO_CPUID_SHIFT
+	/* --------------------------------------------------------------------
+	 * get per cpuup flag
+         * --------------------------------------------------------------------
+	 */
+	adr	x4, cpuson_flags
+	add	x4, x4, x21, lsl #2
+	ldr	w1, [x4]
+	/* --------------------------------------------------------------------
+	 * check cpuon reason
+         * --------------------------------------------------------------------
+	 */
+	cmp	w1, PMU_CPU_AUTO_PWRDN
+	b.eq	boot_entry
+	cmp	w1, PMU_CPU_HOTPLUG
+	b.eq	boot_entry
+	/* --------------------------------------------------------------------
+	 * If the boot core cpuson_flags or cpuson_entry_point is not
+	 * expection. force the core into wfe.
+         * --------------------------------------------------------------------
+	 */
+wfe_loop:
+	wfe
+	b	wfe_loop
+boot_entry:
+	str	wzr, [x4]
+	/* --------------------------------------------------------------------
+	 * get per cpuup boot addr
+	 * --------------------------------------------------------------------
+	 */
+	adr	x5, cpuson_entry_point
+	ldr	x2, [x5, x21, lsl #3]
+	br	x2
+endfunc platform_cpu_warmboot
+
+	/* --------------------------------------------------------------------
+	 * Per-CPU Secure entry point - resume or power up
+	 * --------------------------------------------------------------------
+	 */
+	.section tzfw_coherent_mem, "a"
+	.align  3
+cpuson_entry_point:
+	.rept	PLATFORM_CORE_COUNT
+	.quad	0
+	.endr
+cpuson_flags:
+	.rept	PLATFORM_CORE_COUNT
+	.word	0
+	.endr
+rockchip_clst_warmboot_data
diff --git a/plat/rockchip/common/aarch64/platform_common.c b/plat/rockchip/common/aarch64/platform_common.c
new file mode 100644
index 0000000..25eab43
--- /dev/null
+++ b/plat/rockchip/common/aarch64/platform_common.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <bl_common.h>
+#include <cci.h>
+#include <debug.h>
+#include <plat_private.h>
+#include <platform_def.h>
+#include <string.h>
+#include <utils.h>
+#include <xlat_tables.h>
+
+#ifdef PLAT_RK_CCI_BASE
+static const int cci_map[] = {
+	PLAT_RK_CCI_CLUSTER0_SL_IFACE_IX,
+	PLAT_RK_CCI_CLUSTER1_SL_IFACE_IX
+};
+#endif
+
+/******************************************************************************
+ * Macro generating the code for the function setting up the pagetables as per
+ * the platform memory map & initialize the mmu, for the given exception level
+ ******************************************************************************/
+#define DEFINE_CONFIGURE_MMU_EL(_el)					\
+	void plat_configure_mmu_el ## _el(unsigned long total_base,	\
+					  unsigned long total_size,	\
+					  unsigned long ro_start,	\
+					  unsigned long ro_limit,	\
+					  unsigned long coh_start,	\
+					  unsigned long coh_limit)	\
+	{								\
+		mmap_add_region(total_base, total_base,			\
+				total_size,				\
+				MT_MEMORY | MT_RW | MT_SECURE);		\
+		mmap_add_region(ro_start, ro_start,			\
+				ro_limit - ro_start,			\
+				MT_MEMORY | MT_RO | MT_SECURE);		\
+		mmap_add_region(coh_start, coh_start,			\
+				coh_limit - coh_start,			\
+				MT_DEVICE | MT_RW | MT_SECURE);		\
+		mmap_add(plat_rk_mmap);					\
+		rockchip_plat_mmu_el##_el();				\
+		init_xlat_tables();					\
+									\
+		enable_mmu_el ## _el(0);				\
+	}
+
+/* Define EL3 variants of the function initialising the MMU */
+DEFINE_CONFIGURE_MMU_EL(3)
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+	return SYS_COUNTER_FREQ_IN_TICKS;
+}
+
+void plat_cci_init(void)
+{
+#ifdef PLAT_RK_CCI_BASE
+	/* Initialize CCI driver */
+	cci_init(PLAT_RK_CCI_BASE, cci_map, ARRAY_SIZE(cci_map));
+#endif
+}
+
+void plat_cci_enable(void)
+{
+	/*
+	 * Enable CCI coherency for this cluster.
+	 * No need for locks as no other cpu is active at the moment.
+	 */
+#ifdef PLAT_RK_CCI_BASE
+	cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr()));
+#endif
+}
+
+void plat_cci_disable(void)
+{
+#ifdef PLAT_RK_CCI_BASE
+	cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr()));
+#endif
+}
diff --git a/plat/rockchip/common/bl31_plat_setup.c b/plat/rockchip/common/bl31_plat_setup.c
new file mode 100644
index 0000000..292f0dd
--- /dev/null
+++ b/plat/rockchip/common/bl31_plat_setup.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_gic.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <debug.h>
+#include <generic_delay_timer.h>
+#include <mmio.h>
+#include <plat_private.h>
+#include <platform.h>
+#include <platform_def.h>
+
+/*******************************************************************************
+ * Declarations of linker defined symbols which will help us find the layout
+ * of trusted SRAM
+ ******************************************************************************/
+unsigned long __RO_START__;
+unsigned long __RO_END__;
+
+/*
+ * The next 2 constants identify the extents of the code & RO data region.
+ * These addresses are used by the MMU setup code and therefore they must be
+ * page-aligned.  It is the responsibility of the linker script to ensure that
+ * __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
+ */
+#define BL31_RO_BASE (unsigned long)(&__RO_START__)
+#define BL31_RO_LIMIT (unsigned long)(&__RO_END__)
+
+static entry_point_info_t bl32_ep_info;
+static entry_point_info_t bl33_ep_info;
+
+/*******************************************************************************
+ * Return a pointer to the 'entry_point_info' structure of the next image for
+ * the security state specified. BL33 corresponds to the non-secure image type
+ * while BL32 corresponds to the secure image type. A NULL pointer is returned
+ * if the image does not exist.
+ ******************************************************************************/
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+	entry_point_info_t *next_image_info;
+
+	next_image_info = (type == NON_SECURE) ? &bl33_ep_info : &bl32_ep_info;
+
+	/* None of the images on this platform can have 0x0 as the entrypoint */
+	if (next_image_info->pc)
+		return next_image_info;
+	else
+		return NULL;
+}
+
+#pragma weak params_early_setup
+void params_early_setup(void *plat_param_from_bl2)
+{
+}
+
+/*******************************************************************************
+ * Perform any BL3-1 early platform setup. Here is an opportunity to copy
+ * parameters passed by the calling EL (S-EL1 in BL2 & S-EL3 in BL1) before they
+ * are lost (potentially). This needs to be done before the MMU is initialized
+ * so that the memory layout can be used while creating page tables.
+ * BL2 has flushed this information to memory, so we are guaranteed to pick up
+ * good data.
+ ******************************************************************************/
+void bl31_early_platform_setup(bl31_params_t *from_bl2,
+			       void *plat_params_from_bl2)
+{
+	console_init(PLAT_RK_UART_BASE, PLAT_RK_UART_CLOCK,
+		     PLAT_RK_UART_BAUDRATE);
+
+	VERBOSE("bl31_setup\n");
+
+	/* Passing a NULL context is a critical programming error */
+	assert(from_bl2);
+
+	assert(from_bl2->h.type == PARAM_BL31);
+	assert(from_bl2->h.version >= VERSION_1);
+
+	bl32_ep_info = *from_bl2->bl32_ep_info;
+	bl33_ep_info = *from_bl2->bl33_ep_info;
+
+	/* there may have some board sepcific message need to initialize */
+	params_early_setup(plat_params_from_bl2);
+}
+
+/*******************************************************************************
+ * Perform any BL3-1 platform setup code
+ ******************************************************************************/
+void bl31_platform_setup(void)
+{
+	generic_delay_timer_init();
+	plat_rockchip_soc_init();
+
+	/* Initialize the gic cpu and distributor interfaces */
+	plat_rockchip_gic_driver_init();
+	plat_rockchip_gic_init();
+	plat_rockchip_pmu_init();
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only intializes the mmu in a quick and dirty way.
+ ******************************************************************************/
+void bl31_plat_arch_setup(void)
+{
+	plat_cci_init();
+	plat_cci_enable();
+	plat_configure_mmu_el3(BL31_RO_BASE,
+			       BL_COHERENT_RAM_END - BL31_RO_BASE,
+			       BL31_RO_BASE,
+			       BL31_RO_LIMIT,
+			       BL_COHERENT_RAM_BASE,
+			       BL_COHERENT_RAM_END);
+}
diff --git a/plat/rockchip/common/drivers/parameter/ddr_parameter.c b/plat/rockchip/common/drivers/parameter/ddr_parameter.c
new file mode 100644
index 0000000..ea77757
--- /dev/null
+++ b/plat/rockchip/common/drivers/parameter/ddr_parameter.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <console.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <mmio.h>
+#include <plat_private.h>
+#include <platform_def.h>
+#include <soc.h>
+#include <string.h>
+#include "ddr_parameter.h"
+
+/*
+ *  The miniloader delivers the parameters about ddr usage info from address
+ * 0x02000000 and the data format is defined as below figure. It tells ATF the
+ * areas of ddr that are used by platform, we treat them as non-secure regions
+ * by default. Then we should parse the other part regions and configurate them
+ * as secure regions to avoid illegal access.
+ *
+ *			[ddr usage info data format]
+ * 0x02000000
+ * -----------------------------------------------------------------------------
+ * |       <name>        |  <size>   |      <description>                      |
+ * -----------------------------------------------------------------------------
+ * | count               |  4byte    | the array numbers of the                |
+ * |                     |           | 'addr_array' and 'size_array'           |
+ * -----------------------------------------------------------------------------
+ * | reserved            |  4byte    | just for 'addr_array' 8byte aligned     |
+ * -----------------------------------------------------------------------------
+ * | addr_array[count]   | per 8byte | memory region base address              |
+ * -----------------------------------------------------------------------------
+ * | size_array[count]   | per 8byte | memory region size (byte)               |
+ * -----------------------------------------------------------------------------
+ */
+
+/*
+ * function: read parameters info(ns-regions) and try to parse s-regions info
+ *
+ * @addr: head address to the ddr usage struct from miniloader
+ * @max_mb: the max ddr capacity(MB) that the platform support
+ */
+struct param_ddr_usage ddr_region_usage_parse(uint64_t addr, uint64_t max_mb)
+{
+	uint64_t base, top;
+	uint32_t i, addr_offset, size_offset;
+	struct param_ddr_usage p;
+
+	memset(&p, 0, sizeof(p));
+
+	/* read how many blocks of ns-regions, read from offset: 0x0 */
+	p.ns_nr = mmio_read_32(addr + REGION_NR_OFFSET);
+	if ((p.ns_nr > DDR_REGION_NR_MAX) || (p.ns_nr == 0)) {
+		ERROR("over or zero region, nr=%d, max=%d\n",
+		      p.ns_nr, DDR_REGION_NR_MAX);
+		return p;
+	}
+
+	/* whole ddr regions boundary, it will be used when parse s-regions */
+	p.boundary = max_mb;
+
+	/* calculate ns-region base addr and size offset */
+	addr_offset = REGION_ADDR_OFFSET;
+	size_offset = REGION_ADDR_OFFSET + p.ns_nr * REGION_DATA_PER_BYTES;
+
+	/* read all ns-regions base and top address */
+	for (i = 0; i < p.ns_nr; i++) {
+		base = mmio_read_64(addr + addr_offset);
+		top = base + mmio_read_64(addr + size_offset);
+		/*
+		 * translate byte to MB and store info,
+		 * Miniloader will promise every ns-region is MB aligned.
+		 */
+		p.ns_base[i] = RG_SIZE_MB(base);
+		p.ns_top[i] = RG_SIZE_MB(top);
+
+		addr_offset += REGION_DATA_PER_BYTES;
+		size_offset += REGION_DATA_PER_BYTES;
+	}
+
+	/*
+	 * a s-region's base starts from previous ns-region's top, and a
+	 * s-region's top ends with next ns-region's base. maybe like this:
+	 *
+	 *	   case1: ns-regison start from 0MB
+	 *	 -----------------------------------------------
+	 *	 |    ns0   |  S0  |  ns1  |   S1  |    ns2    |
+	 *	0----------------------------------------------- max_mb
+	 *
+	 *
+	 *	   case2: ns-regison not start from 0MB
+	 *	 -----------------------------------------------
+	 *	 |    S0   |  ns0  |  ns1  |   ns2  |    S1    |
+	 *	0----------------------------------------------- max_mb
+	 */
+
+	/* like above case2 figure, ns-region is not start from 0MB */
+	if (p.ns_base[0] != 0) {
+		p.s_base[p.s_nr] = 0;
+		p.s_top[p.s_nr] = p.ns_base[0];
+		p.s_nr++;
+	}
+
+	/*
+	 * notice: if ns-regions not start from 0MB, p.s_nr = 1 now, otherwise 0
+	 */
+	for (i = 0; i < p.ns_nr; i++) {
+		/*
+		 * if current ns-regions top covers boundary,
+		 * that means s-regions are all parsed yet, so finsh.
+		 */
+		if (p.ns_top[i] == p.boundary)
+			goto out;
+
+		/* s-region's base starts from previous ns-region's top */
+		p.s_base[p.s_nr] = p.ns_top[i];
+
+		/* s-region's top ends with next ns-region's base */
+		if (i + 1 < p.ns_nr)
+			p.s_top[p.s_nr] = p.ns_base[i + 1];
+		else
+			p.s_top[p.s_nr] = p.boundary;
+		p.s_nr++;
+	}
+out:
+	return p;
+}
diff --git a/plat/rockchip/common/drivers/parameter/ddr_parameter.h b/plat/rockchip/common/drivers/parameter/ddr_parameter.h
new file mode 100644
index 0000000..f8e3be9
--- /dev/null
+++ b/plat/rockchip/common/drivers/parameter/ddr_parameter.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PARAMETER_H__
+#define __PARAMETER_H__
+
+#include <arch_helpers.h>
+#include <console.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <mmio.h>
+#include <plat_private.h>
+#include <platform_def.h>
+#include <soc.h>
+#include <string.h>
+
+#define DDR_REGION_NR_MAX		10
+#define REGION_NR_OFFSET		0
+#define REGION_ADDR_OFFSET		8
+#define REGION_DATA_PER_BYTES		8
+#define RG_SIZE_MB(byte)		((byte) >> 20)
+
+/* unit: MB */
+struct param_ddr_usage {
+	uint64_t boundary;
+
+	uint32_t ns_nr;
+	uint64_t ns_base[DDR_REGION_NR_MAX];
+	uint64_t ns_top[DDR_REGION_NR_MAX];
+
+	uint32_t s_nr;
+	uint64_t s_base[DDR_REGION_NR_MAX];
+	uint64_t s_top[DDR_REGION_NR_MAX];
+};
+
+struct param_ddr_usage ddr_region_usage_parse(uint64_t addr, uint64_t max_mb);
+
+#endif /* __PARAMETER_H__ */
diff --git a/plat/rockchip/common/drivers/pmu/pmu_com.h b/plat/rockchip/common/drivers/pmu/pmu_com.h
new file mode 100644
index 0000000..75e924d
--- /dev/null
+++ b/plat/rockchip/common/drivers/pmu/pmu_com.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PMU_COM_H__
+#define __PMU_COM_H__
+
+#ifndef CHECK_CPU_WFIE_BASE
+#define CHECK_CPU_WFIE_BASE (PMU_BASE + PMU_CORE_PWR_ST)
+#endif
+/*
+ * Use this macro to instantiate lock before it is used in below
+ * rockchip_pd_lock_xxx() macros
+ */
+DECLARE_BAKERY_LOCK(rockchip_pd_lock);
+
+/*
+ * These are wrapper macros to the powe domain Bakery Lock API.
+ */
+#define rockchip_pd_lock_init() bakery_lock_init(&rockchip_pd_lock)
+#define rockchip_pd_lock_get() bakery_lock_get(&rockchip_pd_lock)
+#define rockchip_pd_lock_rls() bakery_lock_release(&rockchip_pd_lock)
+
+/*****************************************************************************
+ * power domain on or off
+ *****************************************************************************/
+enum pmu_pd_state {
+	pmu_pd_on = 0,
+	pmu_pd_off = 1
+};
+
+#pragma weak plat_ic_get_pending_interrupt_id
+#pragma weak pmu_power_domain_ctr
+#pragma weak check_cpu_wfie
+
+static inline uint32_t pmu_power_domain_st(uint32_t pd)
+{
+	uint32_t pwrdn_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST) &  BIT(pd);
+
+	if (pwrdn_st)
+		return pmu_pd_off;
+	else
+		return pmu_pd_on;
+}
+
+static int pmu_power_domain_ctr(uint32_t pd, uint32_t pd_state)
+{
+	uint32_t val;
+	uint32_t loop = 0;
+	int ret = 0;
+
+	rockchip_pd_lock_get();
+
+	val = mmio_read_32(PMU_BASE + PMU_PWRDN_CON);
+	if (pd_state == pmu_pd_off)
+		val |=  BIT(pd);
+	else
+		val &= ~BIT(pd);
+
+	mmio_write_32(PMU_BASE + PMU_PWRDN_CON, val);
+	dsb();
+
+	while ((pmu_power_domain_st(pd) != pd_state) && (loop < PD_CTR_LOOP)) {
+		udelay(1);
+		loop++;
+	}
+
+	if (pmu_power_domain_st(pd) != pd_state) {
+		WARN("%s: %d, %d, error!\n", __func__, pd, pd_state);
+		ret = -EINVAL;
+	}
+
+	rockchip_pd_lock_rls();
+
+	return ret;
+}
+
+static int check_cpu_wfie(uint32_t cpu_id, uint32_t wfie_msk)
+{
+	uint32_t cluster_id, loop = 0;
+
+	if (cpu_id >= PLATFORM_CLUSTER0_CORE_COUNT) {
+		cluster_id = 1;
+		cpu_id -= PLATFORM_CLUSTER0_CORE_COUNT;
+	} else {
+		cluster_id = 0;
+	}
+
+	if (cluster_id)
+		wfie_msk <<= (clstb_cpu_wfe + cpu_id);
+	else
+		wfie_msk <<= (clstl_cpu_wfe + cpu_id);
+
+	while (!(mmio_read_32(CHECK_CPU_WFIE_BASE) & wfie_msk) &&
+	       (loop < CHK_CPU_LOOP)) {
+		udelay(1);
+		loop++;
+	}
+
+	if ((mmio_read_32(CHECK_CPU_WFIE_BASE) & wfie_msk) == 0) {
+		WARN("%s: %d, %d, %d, error!\n", __func__,
+		     cluster_id, cpu_id, wfie_msk);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+#endif /* __PMU_COM_H__ */
diff --git a/plat/rockchip/common/include/plat_macros.S b/plat/rockchip/common/include/plat_macros.S
new file mode 100644
index 0000000..be1a9fa
--- /dev/null
+++ b/plat/rockchip/common/include/plat_macros.S
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __ROCKCHIP_PLAT_MACROS_S__
+#define __ROCKCHIP_PLAT_MACROS_S__
+
+#include <cci.h>
+#include <gic_common.h>
+#include <gicv2.h>
+#include <gicv3.h>
+#include <platform_def.h>
+
+.section .rodata.gic_reg_name, "aS"
+/* Applicable only to GICv2 and GICv3 with SRE disabled (legacy mode) */
+gicc_regs:
+	.asciz "gicc_hppir", "gicc_ahppir", "gicc_ctlr", ""
+
+/* Applicable only to GICv3 with SRE enabled */
+icc_regs:
+	.asciz "icc_hppir0_el1", "icc_hppir1_el1", "icc_ctlr_el3", ""
+
+/* Registers common to both GICv2 and GICv3 */
+gicd_pend_reg:
+	.asciz "gicd_ispendr regs (Offsets 0x200 - 0x278)\n"	\
+		" Offset:\t\t\tvalue\n"
+newline:
+	.asciz "\n"
+spacer:
+	.asciz ":\t\t0x"
+
+.section .rodata.cci_reg_name, "aS"
+cci_iface_regs:
+	.asciz "cci_snoop_ctrl_cluster0", "cci_snoop_ctrl_cluster1" , ""
+
+	/* ---------------------------------------------
+	 * The below utility macro prints out relevant GIC
+	 * and CCI registers whenever an unhandled
+	 * exception is taken in BL31.
+	 * Expects: GICD base in x16, GICC base in x17
+	 * Clobbers: x0 - x10, sp
+	 * ---------------------------------------------
+	 */
+	.macro plat_crash_print_regs
+
+	mov_imm	x16, PLAT_RK_GICD_BASE
+	mov_imm	x17, PLAT_RK_GICC_BASE
+
+	/* Check for GICv3 system register access */
+	mrs	x7, id_aa64pfr0_el1
+	ubfx	x7, x7, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_WIDTH
+	cmp	x7, #1
+	b.ne	print_gicv2
+
+	/* Check for SRE enable */
+	mrs	x8, ICC_SRE_EL3
+	tst	x8, #ICC_SRE_SRE_BIT
+	b.eq	print_gicv2
+
+	/* Load the icc reg list to x6 */
+	adr	x6, icc_regs
+	/* Load the icc regs to gp regs used by str_in_crash_buf_print */
+	mrs	x8, ICC_HPPIR0_EL1
+	mrs	x9, ICC_HPPIR1_EL1
+	mrs	x10, ICC_CTLR_EL3
+	/* Store to the crash buf and print to console */
+	bl	str_in_crash_buf_print
+	b	print_gic_common
+
+print_gicv2:
+	/* Load the gicc reg list to x6 */
+	adr	x6, gicc_regs
+	/* Load the gicc regs to gp regs used by str_in_crash_buf_print */
+	ldr	w8, [x17, #GICC_HPPIR]
+	ldr	w9, [x17, #GICC_AHPPIR]
+	ldr	w10, [x17, #GICC_CTLR]
+	/* Store to the crash buf and print to console */
+	bl	str_in_crash_buf_print
+
+print_gic_common:
+	/* Print the GICD_ISPENDR regs */
+	add	x7, x16, #GICD_ISPENDR
+	adr	x4, gicd_pend_reg
+	bl	asm_print_str
+gicd_ispendr_loop:
+	sub	x4, x7, x16
+	cmp	x4, #0x280
+	b.eq	exit_print_gic_regs
+	bl	asm_print_hex
+
+	adr	x4, spacer
+	bl	asm_print_str
+
+	ldr	x4, [x7], #8
+	bl	asm_print_hex
+
+	adr	x4, newline
+	bl	asm_print_str
+	b	gicd_ispendr_loop
+exit_print_gic_regs:
+
+#if PLATFORM_CLUSTER_COUNT > 1
+	adr	x6, cci_iface_regs
+	/* Store in x7 the base address of the first interface */
+	mov_imm	x7, (PLAT_RK_CCI_BASE + SLAVE_IFACE_OFFSET(	\
+			PLAT_RK_CCI_CLUSTER0_SL_IFACE_IX))
+	ldr	w8, [x7, #SNOOP_CTRL_REG]
+	/* Store in x7 the base address of the second interface */
+	mov_imm	x7, (PLAT_RK_CCI_BASE + SLAVE_IFACE_OFFSET(	\
+			PLAT_RK_CCI_CLUSTER1_SL_IFACE_IX))
+	ldr	w9, [x7, #SNOOP_CTRL_REG]
+	/* Store to the crash buf and print to console */
+	bl	str_in_crash_buf_print
+#endif
+	.endm
+
+#endif /* __ROCKCHIP_PLAT_MACROS_S__ */
diff --git a/plat/rockchip/common/include/plat_params.h b/plat/rockchip/common/include/plat_params.h
new file mode 100644
index 0000000..aa13f87
--- /dev/null
+++ b/plat/rockchip/common/include/plat_params.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_PARAMS_H__
+#define __PLAT_PARAMS_H__
+
+#include <stdint.h>
+
+/*
+ * We defined several plat parameter structs for BL2 to pass platform related
+ * parameters to Rockchip BL31 platform code.  All plat parameters start with
+ * a common header, which has a type field to indicate the parameter type, and
+ * a next pointer points to next parameter. If the parameter is the last one in
+ * the list, next pointer will points to NULL.  After the header comes the
+ * variable-sized members that describe the parameter. The picture below shows
+ * how the parameters are kept in memory.
+ *
+ * head of list  ---> +----------------+ --+
+ *                    |      type      |   |
+ *                    +----------------+   |--> struct bl31_plat_param
+ *               +----|      next      |   |
+ *               |    +----------------+ --+
+ *               |    | parameter data |
+ *               |    +----------------+
+ *               |
+ *               +--> +----------------+ --+
+ *                    |      type      |   |
+ *                    +----------------+   |--> struct bl31_plat_param
+ *           NULL <---|      next      |   |
+ *                    +----------------+ --+
+ *                    | parameter data |
+ *                    +----------------+
+ *
+ * Note: The SCTLR_EL3.A bit (Alignment fault check enable) of ARM TF is set,
+ * so be sure each parameter struct starts on 64-bit aligned address. If not,
+ * alignment fault will occur during accessing its data member.
+ */
+
+#define BL31_GPIO_DIR_OUT		0
+#define BL31_GPIO_DIR_IN		1
+
+#define BL31_GPIO_LEVEL_LOW		0
+#define BL31_GPIO_LEVEL_HIGH		1
+
+#define BL31_GPIO_PULL_NONE		0
+#define BL31_GPIO_PULL_UP		1
+#define BL31_GPIO_PULL_DOWN		2
+
+/* param type */
+enum {
+	PARAM_NONE = 0,
+	PARAM_RESET,
+	PARAM_POWEROFF,
+	PARAM_SUSPEND_GPIO,
+	PARAM_SUSPEND_APIO,
+};
+
+struct apio_info {
+	uint8_t apio1 : 1;
+	uint8_t apio2 : 1;
+	uint8_t apio3 : 1;
+	uint8_t apio4 : 1;
+	uint8_t apio5 : 1;
+};
+
+struct gpio_info {
+	uint8_t polarity;
+	uint8_t direction;
+	uint8_t pull_mode;
+	uint32_t index;
+};
+
+/* common header for all plat parameter type */
+struct bl31_plat_param {
+	uint64_t type;
+	void *next;
+};
+
+struct bl31_gpio_param {
+	struct bl31_plat_param h;
+	struct gpio_info gpio;
+};
+
+struct bl31_apio_param {
+	struct bl31_plat_param h;
+	struct apio_info apio;
+};
+
+#endif /* __PLAT_PARAMS_H__ */
diff --git a/plat/rockchip/common/include/plat_private.h b/plat/rockchip/common/include/plat_private.h
new file mode 100644
index 0000000..5456773
--- /dev/null
+++ b/plat/rockchip/common/include/plat_private.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_PRIVATE_H__
+#define __PLAT_PRIVATE_H__
+
+#ifndef __ASSEMBLY__
+#include <mmio.h>
+#include <psci.h>
+#include <stdint.h>
+#include <xlat_tables.h>
+
+#define __sramdata __attribute__((section(".sram.data")))
+#define __sramconst __attribute__((section(".sram.rodata")))
+#define __sramfunc __attribute__((section(".sram.text")))
+
+#define __pmusramdata __attribute__((section(".pmusram.data")))
+#define __pmusramconst __attribute__((section(".pmusram.rodata")))
+#define __pmusramfunc __attribute__((section(".pmusram.text")))
+
+extern uint32_t __bl31_sram_text_start, __bl31_sram_text_end;
+extern uint32_t __bl31_sram_data_start, __bl31_sram_data_end;
+extern uint32_t __bl31_sram_stack_start, __bl31_sram_stack_end;
+extern uint32_t __bl31_sram_text_real_end, __bl31_sram_data_real_end;
+extern uint32_t __sram_incbin_start, __sram_incbin_end;
+extern uint32_t __sram_incbin_real_end;
+
+
+/******************************************************************************
+ * The register have write-mask bits, it is mean, if you want to set the bits,
+ * you needs set the write-mask bits at the same time,
+ * The write-mask bits is in high 16-bits.
+ * The fllowing macro definition helps access write-mask bits reg efficient!
+ ******************************************************************************/
+#define REG_MSK_SHIFT	16
+
+#ifndef WMSK_BIT
+#define WMSK_BIT(nr)		BIT((nr) + REG_MSK_SHIFT)
+#endif
+
+/* set one bit with write mask */
+#ifndef BIT_WITH_WMSK
+#define BIT_WITH_WMSK(nr)	(BIT(nr) | WMSK_BIT(nr))
+#endif
+
+#ifndef BITS_SHIFT
+#define BITS_SHIFT(bits, shift)	(bits << (shift))
+#endif
+
+#ifndef BITS_WITH_WMASK
+#define BITS_WITH_WMASK(bits, msk, shift)\
+	(BITS_SHIFT(bits, shift) | BITS_SHIFT(msk, (shift + REG_MSK_SHIFT)))
+#endif
+
+/******************************************************************************
+ * Function and variable prototypes
+ *****************************************************************************/
+void plat_configure_mmu_el3(unsigned long total_base,
+			    unsigned long total_size,
+			    unsigned long,
+			    unsigned long,
+			    unsigned long,
+			    unsigned long);
+
+void plat_cci_init(void);
+void plat_cci_enable(void);
+void plat_cci_disable(void);
+
+void plat_delay_timer_init(void);
+
+void params_early_setup(void *plat_params_from_bl2);
+
+void plat_rockchip_gic_driver_init(void);
+void plat_rockchip_gic_init(void);
+void plat_rockchip_gic_cpuif_enable(void);
+void plat_rockchip_gic_cpuif_disable(void);
+void plat_rockchip_gic_pcpu_init(void);
+
+void plat_rockchip_pmu_init(void);
+void plat_rockchip_soc_init(void);
+uintptr_t plat_get_sec_entrypoint(void);
+
+void platform_cpu_warmboot(void);
+
+struct gpio_info *plat_get_rockchip_gpio_reset(void);
+struct gpio_info *plat_get_rockchip_gpio_poweroff(void);
+struct gpio_info *plat_get_rockchip_suspend_gpio(uint32_t *count);
+struct apio_info *plat_get_rockchip_suspend_apio(void);
+void plat_rockchip_gpio_init(void);
+void plat_rockchip_save_gpio(void);
+void plat_rockchip_restore_gpio(void);
+
+int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint);
+int rockchip_soc_hlvl_pwr_dm_off(uint32_t lvl,
+				 plat_local_state_t lvl_state);
+int rockchip_soc_cores_pwr_dm_off(void);
+int rockchip_soc_sys_pwr_dm_suspend(void);
+int rockchip_soc_cores_pwr_dm_suspend(void);
+int rockchip_soc_hlvl_pwr_dm_suspend(uint32_t lvl,
+				     plat_local_state_t lvl_state);
+int rockchip_soc_hlvl_pwr_dm_on_finish(uint32_t lvl,
+				       plat_local_state_t lvl_state);
+int rockchip_soc_cores_pwr_dm_on_finish(void);
+int rockchip_soc_sys_pwr_dm_resume(void);
+
+int rockchip_soc_hlvl_pwr_dm_resume(uint32_t lvl,
+				    plat_local_state_t lvl_state);
+int rockchip_soc_cores_pwr_dm_resume(void);
+void __dead2 rockchip_soc_soft_reset(void);
+void __dead2 rockchip_soc_system_off(void);
+void __dead2 rockchip_soc_cores_pd_pwr_dn_wfi(
+				const psci_power_state_t *target_state);
+void __dead2 rockchip_soc_sys_pd_pwr_dn_wfi(void);
+
+extern const unsigned char rockchip_power_domain_tree_desc[];
+
+extern void *pmu_cpuson_entrypoint;
+extern uint64_t cpuson_entry_point[PLATFORM_CORE_COUNT];
+extern uint32_t cpuson_flags[PLATFORM_CORE_COUNT];
+
+extern const mmap_region_t plat_rk_mmap[];
+
+void rockchip_plat_mmu_el3(void);
+
+#endif /* __ASSEMBLY__ */
+
+/******************************************************************************
+ * cpu up status
+ * The bits of macro value is not more than 12 bits for cmp instruction!
+ ******************************************************************************/
+#define PMU_CPU_HOTPLUG		0xf00
+#define PMU_CPU_AUTO_PWRDN	0xf0
+#define PMU_CLST_RET	0xa5
+
+#endif /* __PLAT_PRIVATE_H__ */
diff --git a/plat/rockchip/common/include/rockchip_sip_svc.h b/plat/rockchip/common/include/rockchip_sip_svc.h
new file mode 100644
index 0000000..8125ab0
--- /dev/null
+++ b/plat/rockchip/common/include/rockchip_sip_svc.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ROCKCHIP_SIP_SVC_H__
+#define __ROCKCHIP_SIP_SVC_H__
+
+/* SMC function IDs for SiP Service queries */
+#define SIP_SVC_CALL_COUNT		0x8200ff00
+#define SIP_SVC_UID			0x8200ff01
+#define SIP_SVC_VERSION			0x8200ff03
+
+/* rockchip SiP Service Calls version numbers */
+#define RK_SIP_SVC_VERSION_MAJOR	0x0
+#define RK_SIP_SVC_VERSION_MINOR	0x1
+
+/* Number of ROCKCHIP SiP Calls implemented */
+#define RK_COMMON_SIP_NUM_CALLS		0x3
+
+enum {
+	RK_SIP_E_SUCCESS = 0,
+	RK_SIP_E_INVALID_PARAM = -1
+};
+
+#endif
diff --git a/plat/rockchip/common/params_setup.c b/plat/rockchip/common/params_setup.c
new file mode 100644
index 0000000..b37acb7
--- /dev/null
+++ b/plat/rockchip/common/params_setup.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arm_gic.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <debug.h>
+#include <gpio.h>
+#include <mmio.h>
+#include <plat_params.h>
+#include <plat_private.h>
+#include <platform.h>
+#include <string.h>
+
+static struct gpio_info param_reset;
+static struct gpio_info param_poweroff;
+static struct bl31_apio_param param_apio;
+static struct gpio_info *rst_gpio;
+static struct gpio_info *poweroff_gpio;
+static struct gpio_info suspend_gpio[10];
+uint32_t suspend_gpio_cnt;
+static struct apio_info *suspend_apio;
+
+struct gpio_info *plat_get_rockchip_gpio_reset(void)
+{
+	return rst_gpio;
+}
+
+struct gpio_info *plat_get_rockchip_gpio_poweroff(void)
+{
+	return poweroff_gpio;
+}
+
+struct gpio_info *plat_get_rockchip_suspend_gpio(uint32_t *count)
+{
+	*count = suspend_gpio_cnt;
+
+	return &suspend_gpio[0];
+}
+
+struct apio_info *plat_get_rockchip_suspend_apio(void)
+{
+	return suspend_apio;
+}
+
+void params_early_setup(void *plat_param_from_bl2)
+{
+	struct bl31_plat_param *bl2_param;
+	struct bl31_gpio_param *gpio_param;
+
+	/* keep plat parameters for later processing if need */
+	bl2_param = (struct bl31_plat_param *)plat_param_from_bl2;
+	while (bl2_param) {
+		switch (bl2_param->type) {
+		case PARAM_RESET:
+			gpio_param = (struct bl31_gpio_param *)bl2_param;
+			memcpy(&param_reset, &gpio_param->gpio,
+			       sizeof(struct gpio_info));
+			rst_gpio = &param_reset;
+			break;
+		case PARAM_POWEROFF:
+			gpio_param = (struct bl31_gpio_param *)bl2_param;
+			memcpy(&param_poweroff, &gpio_param->gpio,
+				sizeof(struct gpio_info));
+			poweroff_gpio = &param_poweroff;
+			break;
+		case PARAM_SUSPEND_GPIO:
+			if (suspend_gpio_cnt >= ARRAY_SIZE(suspend_gpio)) {
+				ERROR("exceed support suspend gpio number\n");
+				break;
+			}
+			gpio_param = (struct bl31_gpio_param *)bl2_param;
+			memcpy(&suspend_gpio[suspend_gpio_cnt],
+			       &gpio_param->gpio,
+			       sizeof(struct gpio_info));
+			suspend_gpio_cnt++;
+			break;
+		case PARAM_SUSPEND_APIO:
+			memcpy(&param_apio, bl2_param,
+			       sizeof(struct bl31_apio_param));
+			suspend_apio = &param_apio.apio;
+			break;
+		default:
+			ERROR("not expected type found %ld\n",
+			      bl2_param->type);
+			break;
+		}
+		bl2_param = bl2_param->next;
+	}
+}
diff --git a/plat/rockchip/common/plat_pm.c b/plat/rockchip/common/plat_pm.c
new file mode 100644
index 0000000..cd88f60
--- /dev/null
+++ b/plat/rockchip/common/plat_pm.c
@@ -0,0 +1,410 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <console.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <errno.h>
+#include <plat_private.h>
+#include <platform_def.h>
+#include <psci.h>
+
+/* Macros to read the rk power domain state */
+#define RK_CORE_PWR_STATE(state) \
+	((state)->pwr_domain_state[MPIDR_AFFLVL0])
+#define RK_CLUSTER_PWR_STATE(state) \
+	((state)->pwr_domain_state[MPIDR_AFFLVL1])
+#define RK_SYSTEM_PWR_STATE(state) \
+	((state)->pwr_domain_state[PLAT_MAX_PWR_LVL])
+
+static uintptr_t rockchip_sec_entrypoint;
+
+#pragma weak rockchip_soc_cores_pwr_dm_on
+#pragma weak rockchip_soc_hlvl_pwr_dm_off
+#pragma weak rockchip_soc_cores_pwr_dm_off
+#pragma weak rockchip_soc_sys_pwr_dm_suspend
+#pragma weak rockchip_soc_cores_pwr_dm_suspend
+#pragma weak rockchip_soc_hlvl_pwr_dm_suspend
+#pragma weak rockchip_soc_hlvl_pwr_dm_on_finish
+#pragma weak rockchip_soc_cores_pwr_dm_on_finish
+#pragma weak rockchip_soc_sys_pwr_dm_resume
+#pragma weak rockchip_soc_hlvl_pwr_dm_resume
+#pragma weak rockchip_soc_cores_pwr_dm_resume
+#pragma weak rockchip_soc_soft_reset
+#pragma weak rockchip_soc_system_off
+#pragma weak rockchip_soc_sys_pd_pwr_dn_wfi
+#pragma weak rockchip_soc_cores_pd_pwr_dn_wfi
+
+int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int rockchip_soc_hlvl_pwr_dm_off(uint32_t lvl,
+				 plat_local_state_t lvl_state)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int rockchip_soc_cores_pwr_dm_off(void)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int rockchip_soc_sys_pwr_dm_suspend(void)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int rockchip_soc_cores_pwr_dm_suspend(void)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int rockchip_soc_hlvl_pwr_dm_suspend(uint32_t lvl,
+				     plat_local_state_t lvl_state)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int rockchip_soc_hlvl_pwr_dm_on_finish(uint32_t lvl,
+				       plat_local_state_t lvl_state)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int rockchip_soc_cores_pwr_dm_on_finish(void)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int rockchip_soc_sys_pwr_dm_resume(void)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int rockchip_soc_hlvl_pwr_dm_resume(uint32_t lvl,
+				    plat_local_state_t lvl_state)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+int rockchip_soc_cores_pwr_dm_resume(void)
+{
+	return PSCI_E_NOT_SUPPORTED;
+}
+
+void __dead2 rockchip_soc_soft_reset(void)
+{
+	while (1)
+		;
+}
+
+void __dead2 rockchip_soc_system_off(void)
+{
+	while (1)
+		;
+}
+
+void __dead2 rockchip_soc_cores_pd_pwr_dn_wfi(
+				const psci_power_state_t *target_state)
+{
+	psci_power_down_wfi();
+}
+
+void __dead2 rockchip_soc_sys_pd_pwr_dn_wfi(void)
+{
+	psci_power_down_wfi();
+}
+
+/*******************************************************************************
+ * Rockchip standard platform handler called to check the validity of the power
+ * state parameter.
+ ******************************************************************************/
+int rockchip_validate_power_state(unsigned int power_state,
+				  psci_power_state_t *req_state)
+{
+	int pstate = psci_get_pstate_type(power_state);
+	int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
+	int i;
+
+	assert(req_state);
+
+	if (pwr_lvl > PLAT_MAX_PWR_LVL)
+		return PSCI_E_INVALID_PARAMS;
+
+	/* Sanity check the requested state */
+	if (pstate == PSTATE_TYPE_STANDBY) {
+		/*
+		 * It's probably to enter standby only on power level 0
+		 * ignore any other power level.
+		 */
+		if (pwr_lvl != MPIDR_AFFLVL0)
+			return PSCI_E_INVALID_PARAMS;
+
+		req_state->pwr_domain_state[MPIDR_AFFLVL0] =
+					PLAT_MAX_RET_STATE;
+	} else {
+		for (i = MPIDR_AFFLVL0; i <= pwr_lvl; i++)
+			req_state->pwr_domain_state[i] =
+					PLAT_MAX_OFF_STATE;
+
+		for (i = (pwr_lvl + 1); i <= PLAT_MAX_PWR_LVL; i++)
+			req_state->pwr_domain_state[i] =
+					PLAT_MAX_RET_STATE;
+	}
+
+	/* We expect the 'state id' to be zero */
+	if (psci_get_pstate_id(power_state))
+		return PSCI_E_INVALID_PARAMS;
+
+	return PSCI_E_SUCCESS;
+}
+
+void rockchip_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+	int i;
+
+	for (i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
+		req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
+}
+
+/*******************************************************************************
+ * RockChip handler called when a CPU is about to enter standby.
+ ******************************************************************************/
+void rockchip_cpu_standby(plat_local_state_t cpu_state)
+{
+	unsigned int scr;
+
+	assert(cpu_state == PLAT_MAX_RET_STATE);
+
+	scr = read_scr_el3();
+	/* Enable PhysicalIRQ bit for NS world to wake the CPU */
+	write_scr_el3(scr | SCR_IRQ_BIT);
+	isb();
+	dsb();
+	wfi();
+
+	/*
+	 * Restore SCR to the original value, synchronisation of scr_el3 is
+	 * done by eret while el3_exit to save some execution cycles.
+	 */
+	write_scr_el3(scr);
+}
+
+/*******************************************************************************
+ * RockChip handler called when a power domain is about to be turned on. The
+ * mpidr determines the CPU to be turned on.
+ ******************************************************************************/
+int rockchip_pwr_domain_on(u_register_t mpidr)
+{
+	return rockchip_soc_cores_pwr_dm_on(mpidr, rockchip_sec_entrypoint);
+}
+
+/*******************************************************************************
+ * RockChip handler called when a power domain is about to be turned off. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+void rockchip_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	uint32_t lvl;
+	plat_local_state_t lvl_state;
+	int ret;
+
+	assert(RK_CORE_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE);
+
+	plat_rockchip_gic_cpuif_disable();
+
+	if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
+		plat_cci_disable();
+
+	rockchip_soc_cores_pwr_dm_off();
+
+	for (lvl = MPIDR_AFFLVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
+		lvl_state = target_state->pwr_domain_state[lvl];
+		ret = rockchip_soc_hlvl_pwr_dm_off(lvl, lvl_state);
+		if (ret == PSCI_E_NOT_SUPPORTED)
+			break;
+	}
+}
+
+/*******************************************************************************
+ * RockChip handler called when a power domain is about to be suspended. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+void rockchip_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+	uint32_t lvl;
+	plat_local_state_t lvl_state;
+	int ret;
+
+	if (RK_CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE)
+		return;
+
+	if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
+		rockchip_soc_sys_pwr_dm_suspend();
+	else
+		rockchip_soc_cores_pwr_dm_suspend();
+
+	/* Prevent interrupts from spuriously waking up this cpu */
+	plat_rockchip_gic_cpuif_disable();
+
+	/* Perform the common cluster specific operations */
+	if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
+		plat_cci_disable();
+
+	if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
+		return;
+
+	for (lvl = MPIDR_AFFLVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
+		lvl_state = target_state->pwr_domain_state[lvl];
+		ret = rockchip_soc_hlvl_pwr_dm_suspend(lvl, lvl_state);
+		if (ret == PSCI_E_NOT_SUPPORTED)
+			break;
+	}
+}
+
+/*******************************************************************************
+ * RockChip handler called when a power domain has just been powered on after
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from.
+ ******************************************************************************/
+void rockchip_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+	uint32_t lvl;
+	plat_local_state_t lvl_state;
+	int ret;
+
+	assert(RK_CORE_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE);
+
+	for (lvl = MPIDR_AFFLVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
+		lvl_state = target_state->pwr_domain_state[lvl];
+		ret = rockchip_soc_hlvl_pwr_dm_on_finish(lvl, lvl_state);
+		if (ret == PSCI_E_NOT_SUPPORTED)
+			break;
+	}
+
+	rockchip_soc_cores_pwr_dm_on_finish();
+
+	/* Perform the common cluster specific operations */
+	if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
+		/* Enable coherency if this cluster was off */
+		plat_cci_enable();
+	}
+
+	/* Enable the gic cpu interface */
+	plat_rockchip_gic_pcpu_init();
+
+	/* Program the gic per-cpu distributor or re-distributor interface */
+	plat_rockchip_gic_cpuif_enable();
+}
+
+/*******************************************************************************
+ * RockChip handler called when a power domain has just been powered on after
+ * having been suspended earlier. The target_state encodes the low power state
+ * that each level has woken up from.
+ * TODO: At the moment we reuse the on finisher and reinitialize the secure
+ * context. Need to implement a separate suspend finisher.
+ ******************************************************************************/
+void rockchip_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
+{
+	uint32_t lvl;
+	plat_local_state_t lvl_state;
+	int ret;
+
+	/* Nothing to be done on waking up from retention from CPU level */
+	if (RK_CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE)
+		return;
+
+	if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
+		rockchip_soc_sys_pwr_dm_resume();
+		goto comm_finish;
+	}
+
+	for (lvl = MPIDR_AFFLVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
+		lvl_state = target_state->pwr_domain_state[lvl];
+		ret = rockchip_soc_hlvl_pwr_dm_resume(lvl, lvl_state);
+		if (ret == PSCI_E_NOT_SUPPORTED)
+			break;
+	}
+
+	rockchip_soc_cores_pwr_dm_resume();
+
+	/*
+	 * Program the gic per-cpu distributor or re-distributor interface.
+	 * For sys power domain operation, resuming of the gic needs to operate
+	 * in rockchip_soc_sys_pwr_dm_resume(), according to the sys power mode
+	 * implements.
+	 */
+	plat_rockchip_gic_cpuif_enable();
+
+comm_finish:
+	/* Perform the common cluster specific operations */
+	if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
+		/* Enable coherency if this cluster was off */
+		plat_cci_enable();
+	}
+}
+
+/*******************************************************************************
+ * RockChip handlers to reboot the system
+ ******************************************************************************/
+static void __dead2 rockchip_system_reset(void)
+{
+	rockchip_soc_soft_reset();
+}
+
+/*******************************************************************************
+ * RockChip handlers to power off the system
+ ******************************************************************************/
+static void __dead2 rockchip_system_poweroff(void)
+{
+	rockchip_soc_system_off();
+}
+
+static void __dead2 rockchip_pd_pwr_down_wfi(
+		const psci_power_state_t *target_state)
+{
+	if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
+		rockchip_soc_sys_pd_pwr_dn_wfi();
+	else
+		rockchip_soc_cores_pd_pwr_dn_wfi(target_state);
+}
+
+/*******************************************************************************
+ * Export the platform handlers via plat_rockchip_psci_pm_ops. The rockchip
+ * standard
+ * platform layer will take care of registering the handlers with PSCI.
+ ******************************************************************************/
+const plat_psci_ops_t plat_rockchip_psci_pm_ops = {
+	.cpu_standby = rockchip_cpu_standby,
+	.pwr_domain_on = rockchip_pwr_domain_on,
+	.pwr_domain_off = rockchip_pwr_domain_off,
+	.pwr_domain_suspend = rockchip_pwr_domain_suspend,
+	.pwr_domain_on_finish = rockchip_pwr_domain_on_finish,
+	.pwr_domain_suspend_finish = rockchip_pwr_domain_suspend_finish,
+	.pwr_domain_pwr_down_wfi = rockchip_pd_pwr_down_wfi,
+	.system_reset = rockchip_system_reset,
+	.system_off = rockchip_system_poweroff,
+	.validate_power_state = rockchip_validate_power_state,
+	.get_sys_suspend_power_state = rockchip_get_sys_suspend_power_state
+};
+
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+			const plat_psci_ops_t **psci_ops)
+{
+	*psci_ops = &plat_rockchip_psci_pm_ops;
+	rockchip_sec_entrypoint = sec_entrypoint;
+	return 0;
+}
+
+uintptr_t plat_get_sec_entrypoint(void)
+{
+	assert(rockchip_sec_entrypoint);
+	return rockchip_sec_entrypoint;
+}
diff --git a/plat/rockchip/common/plat_topology.c b/plat/rockchip/common/plat_topology.c
new file mode 100644
index 0000000..49d063c
--- /dev/null
+++ b/plat/rockchip/common/plat_topology.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <plat_private.h>
+#include <platform_def.h>
+#include <psci.h>
+
+/*******************************************************************************
+ * This function returns the RockChip default topology tree information.
+ ******************************************************************************/
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+	return rockchip_power_domain_tree_desc;
+}
+
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+	unsigned int cluster_id, cpu_id;
+
+	cpu_id = mpidr & MPIDR_AFFLVL_MASK;
+	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
+
+	cpu_id += (cluster_id >> PLAT_RK_CLST_TO_CPUID_SHIFT);
+
+	if (cpu_id >= PLATFORM_CORE_COUNT)
+		return -1;
+
+	return cpu_id;
+}
diff --git a/plat/rockchip/common/pmusram/pmu_sram_cpus_on.S b/plat/rockchip/common/pmusram/pmu_sram_cpus_on.S
new file mode 100644
index 0000000..5a1854b
--- /dev/null
+++ b/plat/rockchip/common/pmusram/pmu_sram_cpus_on.S
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+	.globl pmu_cpuson_entrypoint
+	.macro pmusram_entry_func _name
+	.section .pmusram.entry, "ax"
+	.type \_name, %function
+	.func \_name
+	.cfi_startproc
+	\_name:
+	.endm
+
+pmusram_entry_func pmu_cpuson_entrypoint
+
+#if PSRAM_CHECK_WAKEUP_CPU
+check_wake_cpus:
+	mrs	x0, MPIDR_EL1
+	and	x1, x0, #MPIDR_CPU_MASK
+	and	x0, x0, #MPIDR_CLUSTER_MASK
+	orr	x0, x0, x1
+
+	/* primary_cpu */
+	ldr	w1, boot_mpidr
+	cmp	w0, w1
+	b.eq	sys_wakeup
+
+	/*
+	 * If the core is not the primary cpu,
+	 * force the core into wfe.
+	 */
+wfe_loop:
+	wfe
+	b	wfe_loop
+sys_wakeup:
+#endif
+
+#if PSRAM_DO_DDR_RESUME
+ddr_resume:
+	ldr	x2, =__bl31_sram_stack_end
+	mov     sp, x2
+	bl	dmc_resume
+#endif
+	bl	sram_restore
+sys_resume:
+	bl	psci_entrypoint
+endfunc pmu_cpuson_entrypoint
diff --git a/plat/rockchip/common/rockchip_gicv2.c b/plat/rockchip/common/rockchip_gicv2.c
new file mode 100644
index 0000000..afdc6aa
--- /dev/null
+++ b/plat/rockchip/common/rockchip_gicv2.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <bl_common.h>
+#include <gicv2.h>
+#include <platform_def.h>
+#include <utils.h>
+
+/******************************************************************************
+ * The following functions are defined as weak to allow a platform to override
+ * the way the GICv2 driver is initialised and used.
+ *****************************************************************************/
+#pragma weak plat_rockchip_gic_driver_init
+#pragma weak plat_rockchip_gic_init
+#pragma weak plat_rockchip_gic_cpuif_enable
+#pragma weak plat_rockchip_gic_cpuif_disable
+#pragma weak plat_rockchip_gic_pcpu_init
+
+/******************************************************************************
+ * On a GICv2 system, the Group 1 secure interrupts are treated as Group 0
+ * interrupts.
+ *****************************************************************************/
+const unsigned int g0_interrupt_array[] = {
+	PLAT_RK_G1S_IRQS,
+};
+
+/*
+ * Ideally `rockchip_gic_data` structure definition should be a `const` but it
+ * is kept as modifiable for overwriting with different GICD and GICC base when
+ * running on FVP with VE memory map.
+ */
+gicv2_driver_data_t rockchip_gic_data = {
+	.gicd_base = PLAT_RK_GICD_BASE,
+	.gicc_base = PLAT_RK_GICC_BASE,
+	.g0_interrupt_num = ARRAY_SIZE(g0_interrupt_array),
+	.g0_interrupt_array = g0_interrupt_array,
+};
+
+/******************************************************************************
+ * RockChip common helper to initialize the GICv2 only driver.
+ *****************************************************************************/
+void plat_rockchip_gic_driver_init(void)
+{
+	gicv2_driver_init(&rockchip_gic_data);
+}
+
+void plat_rockchip_gic_init(void)
+{
+	gicv2_distif_init();
+	gicv2_pcpu_distif_init();
+	gicv2_cpuif_enable();
+}
+
+/******************************************************************************
+ * RockChip common helper to enable the GICv2 CPU interface
+ *****************************************************************************/
+void plat_rockchip_gic_cpuif_enable(void)
+{
+	gicv2_cpuif_enable();
+}
+
+/******************************************************************************
+ * RockChip common helper to disable the GICv2 CPU interface
+ *****************************************************************************/
+void plat_rockchip_gic_cpuif_disable(void)
+{
+	gicv2_cpuif_disable();
+}
+
+/******************************************************************************
+ * RockChip common helper to initialize the per cpu distributor interface
+ * in GICv2
+ *****************************************************************************/
+void plat_rockchip_gic_pcpu_init(void)
+{
+	gicv2_pcpu_distif_init();
+}
diff --git a/plat/rockchip/common/rockchip_gicv3.c b/plat/rockchip/common/rockchip_gicv3.c
new file mode 100644
index 0000000..0500da6
--- /dev/null
+++ b/plat/rockchip/common/rockchip_gicv3.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <bl_common.h>
+#include <gicv3.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <utils.h>
+
+/******************************************************************************
+ * The following functions are defined as weak to allow a platform to override
+ * the way the GICv3 driver is initialised and used.
+ *****************************************************************************/
+#pragma weak plat_rockchip_gic_driver_init
+#pragma weak plat_rockchip_gic_init
+#pragma weak plat_rockchip_gic_cpuif_enable
+#pragma weak plat_rockchip_gic_cpuif_disable
+#pragma weak plat_rockchip_gic_pcpu_init
+
+/* The GICv3 driver only needs to be initialized in EL3 */
+uintptr_t rdistif_base_addrs[PLATFORM_CORE_COUNT];
+
+/* Array of Group1 secure interrupts to be configured by the gic driver */
+const unsigned int g1s_interrupt_array[] = {
+	PLAT_RK_G1S_IRQS
+};
+
+/* Array of Group0 interrupts to be configured by the gic driver */
+const unsigned int g0_interrupt_array[] = {
+	PLAT_RK_G0_IRQS
+};
+
+static unsigned int plat_rockchip_mpidr_to_core_pos(unsigned long mpidr)
+{
+	return (unsigned int)plat_core_pos_by_mpidr(mpidr);
+}
+
+const gicv3_driver_data_t rockchip_gic_data = {
+	.gicd_base = PLAT_RK_GICD_BASE,
+	.gicr_base = PLAT_RK_GICR_BASE,
+	.g0_interrupt_num = ARRAY_SIZE(g0_interrupt_array),
+	.g1s_interrupt_num = ARRAY_SIZE(g1s_interrupt_array),
+	.g0_interrupt_array = g0_interrupt_array,
+	.g1s_interrupt_array = g1s_interrupt_array,
+	.rdistif_num = PLATFORM_CORE_COUNT,
+	.rdistif_base_addrs = rdistif_base_addrs,
+	.mpidr_to_core_pos = plat_rockchip_mpidr_to_core_pos,
+};
+
+void plat_rockchip_gic_driver_init(void)
+{
+	/*
+	 * The GICv3 driver is initialized in EL3 and does not need
+	 * to be initialized again in SEL1. This is because the S-EL1
+	 * can use GIC system registers to manage interrupts and does
+	 * not need GIC interface base addresses to be configured.
+	 */
+#ifdef IMAGE_BL31
+	gicv3_driver_init(&rockchip_gic_data);
+#endif
+}
+
+/******************************************************************************
+ * RockChip common helper to initialize the GIC. Only invoked
+ * by BL31
+ *****************************************************************************/
+void plat_rockchip_gic_init(void)
+{
+	gicv3_distif_init();
+	gicv3_rdistif_init(plat_my_core_pos());
+	gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+/******************************************************************************
+ * RockChip common helper to enable the GIC CPU interface
+ *****************************************************************************/
+void plat_rockchip_gic_cpuif_enable(void)
+{
+	gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+/******************************************************************************
+ * RockChip common helper to disable the GIC CPU interface
+ *****************************************************************************/
+void plat_rockchip_gic_cpuif_disable(void)
+{
+	gicv3_cpuif_disable(plat_my_core_pos());
+}
+
+/******************************************************************************
+ * RockChip common helper to initialize the per-cpu redistributor interface
+ * in GICv3
+ *****************************************************************************/
+void plat_rockchip_gic_pcpu_init(void)
+{
+	gicv3_rdistif_init(plat_my_core_pos());
+}
diff --git a/plat/rockchip/common/rockchip_sip_svc.c b/plat/rockchip/common/rockchip_sip_svc.c
new file mode 100644
index 0000000..40cc94b
--- /dev/null
+++ b/plat/rockchip/common/rockchip_sip_svc.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <mmio.h>
+#include <plat_sip_calls.h>
+#include <rockchip_sip_svc.h>
+#include <runtime_svc.h>
+#include <uuid.h>
+
+/* Rockchip SiP Service UUID */
+DEFINE_SVC_UUID(rk_sip_svc_uid,
+		0xe86fc7e2, 0x313e, 0x11e6, 0xb7, 0x0d,
+		0x8f, 0x88, 0xee, 0x74, 0x7b, 0x72);
+
+#pragma weak rockchip_plat_sip_handler
+uint64_t rockchip_plat_sip_handler(uint32_t smc_fid,
+				   uint64_t x1,
+				   uint64_t x2,
+				   uint64_t x3,
+				   uint64_t x4,
+				   void *cookie,
+				   void *handle,
+				   uint64_t flags)
+{
+	ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
+	SMC_RET1(handle, SMC_UNK);
+}
+
+/*
+ * This function is responsible for handling all SiP calls from the NS world
+ */
+uint64_t sip_smc_handler(uint32_t smc_fid,
+			 uint64_t x1,
+			 uint64_t x2,
+			 uint64_t x3,
+			 uint64_t x4,
+			 void *cookie,
+			 void *handle,
+			 uint64_t flags)
+{
+	uint32_t ns;
+
+	/* Determine which security state this SMC originated from */
+	ns = is_caller_non_secure(flags);
+	if (!ns)
+		SMC_RET1(handle, SMC_UNK);
+
+	switch (smc_fid) {
+	case SIP_SVC_CALL_COUNT:
+		/* Return the number of Rockchip SiP Service Calls. */
+		SMC_RET1(handle,
+			 RK_COMMON_SIP_NUM_CALLS + RK_PLAT_SIP_NUM_CALLS);
+
+	case SIP_SVC_UID:
+		/* Return UID to the caller */
+		SMC_UUID_RET(handle, rk_sip_svc_uid);
+		break;
+
+	case SIP_SVC_VERSION:
+		/* Return the version of current implementation */
+		SMC_RET2(handle, RK_SIP_SVC_VERSION_MAJOR,
+			RK_SIP_SVC_VERSION_MINOR);
+		break;
+
+	default:
+		return rockchip_plat_sip_handler(smc_fid, x1, x2, x3, x4,
+			cookie, handle, flags);
+	}
+}
+
+/* Define a runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+	rockchip_sip_svc,
+	OEN_SIP_START,
+	OEN_SIP_END,
+	SMC_TYPE_FAST,
+	NULL,
+	sip_smc_handler
+);
diff --git a/plat/rockchip/rk3328/drivers/pmu/plat_pmu_macros.S b/plat/rockchip/rk3328/drivers/pmu/plat_pmu_macros.S
new file mode 100644
index 0000000..cd604d2
--- /dev/null
+++ b/plat/rockchip/rk3328/drivers/pmu/plat_pmu_macros.S
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+.globl	clst_warmboot_data
+
+.macro	func_rockchip_clst_warmboot
+.endm
+
+.macro rockchip_clst_warmboot_data
+clst_warmboot_data:
+	.rept	PLATFORM_CLUSTER_COUNT
+	.word	0
+	.endr
+.endm
diff --git a/plat/rockchip/rk3328/drivers/pmu/pmu.c b/plat/rockchip/rk3328/drivers/pmu/pmu.c
new file mode 100644
index 0000000..f576fe4
--- /dev/null
+++ b/plat/rockchip/rk3328/drivers/pmu/pmu.c
@@ -0,0 +1,662 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bakery_lock.h>
+#include <bl31.h>
+#include <console.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <errno.h>
+#include <mmio.h>
+#include <plat_private.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <pmu.h>
+#include <pmu_com.h>
+#include <rk3328_def.h>
+
+DEFINE_BAKERY_LOCK(rockchip_pd_lock);
+
+static struct rk3328_sleep_ddr_data ddr_data;
+static __sramdata struct rk3328_sleep_sram_data sram_data;
+
+static uint32_t cpu_warm_boot_addr;
+
+#pragma weak rk3328_pmic_suspend
+#pragma weak rk3328_pmic_resume
+
+static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id)
+{
+	uint32_t pd_reg, apm_reg;
+
+	pd_reg = mmio_read_32(PMU_BASE + PMU_PWRDN_CON) & BIT(cpu_id);
+	apm_reg = mmio_read_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id)) &
+			       BIT(core_pm_en);
+
+	if (pd_reg && !apm_reg)
+		return core_pwr_pd;
+	else if (!pd_reg && apm_reg)
+		return core_pwr_wfi;
+
+	ERROR("%s: 0x%x, 0x%x\n", __func__, pd_reg, apm_reg);
+	while (1)
+	;
+}
+
+static int cpus_power_domain_on(uint32_t cpu_id)
+{
+	uint32_t cpu_pd, cfg_info;
+
+	cpu_pd = PD_CPU0 + cpu_id;
+	cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id);
+
+	if (cfg_info == core_pwr_pd) {
+		/* disable apm cfg */
+		mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
+			      CORES_PM_DISABLE);
+
+		/* if the cores have be on, power off it firstly */
+		if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
+			mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
+				      CORES_PM_DISABLE);
+			pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
+		}
+		pmu_power_domain_ctr(cpu_pd, pmu_pd_on);
+	} else {
+		if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
+			WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id);
+			return -EINVAL;
+		}
+
+		mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
+			      BIT(core_pm_sft_wakeup_en));
+	}
+
+	return 0;
+}
+
+static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg)
+{
+	uint32_t cpu_pd, core_pm_value;
+
+	cpu_pd = PD_CPU0 + cpu_id;
+	if (pmu_power_domain_st(cpu_pd) == pmu_pd_off)
+		return 0;
+
+	if (pd_cfg == core_pwr_pd) {
+		if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK))
+			return -EINVAL;
+		/* disable apm cfg */
+		mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
+			      CORES_PM_DISABLE);
+		pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
+	} else {
+		core_pm_value = BIT(core_pm_en) | BIT(core_pm_dis_int);
+		if (pd_cfg == core_pwr_wfi_int)
+			core_pm_value |= BIT(core_pm_int_wakeup_en);
+
+		mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
+			      core_pm_value);
+	}
+
+	return 0;
+}
+
+static void nonboot_cpus_off(void)
+{
+	uint32_t boot_cpu, cpu;
+
+	/* turn off noboot cpus */
+	boot_cpu = plat_my_core_pos();
+	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) {
+		if (cpu == boot_cpu)
+			continue;
+		cpus_power_domain_off(cpu, core_pwr_pd);
+	}
+}
+
+void sram_save(void)
+{
+	/* TODO: support the sdram save for rk3328 SoCs*/
+}
+
+void sram_restore(void)
+{
+	/* TODO: support the sdram restore for rk3328 SoCs */
+}
+
+int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
+{
+	uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
+
+	assert(cpu_id < PLATFORM_CORE_COUNT);
+	assert(cpuson_flags[cpu_id] == 0);
+	cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG;
+	cpuson_entry_point[cpu_id] = entrypoint;
+	dsb();
+
+	cpus_power_domain_on(cpu_id);
+
+	return 0;
+}
+
+int rockchip_soc_cores_pwr_dm_off(void)
+{
+	uint32_t cpu_id = plat_my_core_pos();
+
+	cpus_power_domain_off(cpu_id, core_pwr_wfi);
+
+	return 0;
+}
+
+int rockchip_soc_cores_pwr_dm_suspend(void)
+{
+	uint32_t cpu_id = plat_my_core_pos();
+
+	assert(cpu_id < PLATFORM_CORE_COUNT);
+	assert(cpuson_flags[cpu_id] == 0);
+	cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN;
+	cpuson_entry_point[cpu_id] = (uintptr_t)plat_get_sec_entrypoint();
+	dsb();
+
+	cpus_power_domain_off(cpu_id, core_pwr_wfi_int);
+
+	return 0;
+}
+
+int rockchip_soc_cores_pwr_dm_on_finish(void)
+{
+	uint32_t cpu_id = plat_my_core_pos();
+
+	mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), CORES_PM_DISABLE);
+
+	return 0;
+}
+
+int rockchip_soc_cores_pwr_dm_resume(void)
+{
+	uint32_t cpu_id = plat_my_core_pos();
+
+	mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), CORES_PM_DISABLE);
+
+	return 0;
+}
+
+void __dead2 rockchip_soc_soft_reset(void)
+{
+	mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(CPLL_ID));
+	mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(GPLL_ID));
+	mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(NPLL_ID));
+	mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(APLL_ID));
+	dsb();
+
+	mmio_write_32(CRU_BASE + CRU_GLB_SRST_FST, CRU_GLB_SRST_FST_VALUE);
+	dsb();
+	/*
+	 * Maybe the HW needs some times to reset the system,
+	 * so we do not hope the core to excute valid codes.
+	 */
+	while (1)
+		;
+}
+
+/*
+ * For PMIC RK805, its sleep pin is connect with gpio2_d2 from rk3328.
+ * If the PMIC is configed for responding the sleep pin to power off it,
+ * once the pin is output high,  it will get the pmic power off.
+ */
+void __dead2 rockchip_soc_system_off(void)
+{
+	uint32_t val;
+
+	/* gpio config */
+	val = mmio_read_32(GRF_BASE + GRF_GPIO2D_IOMUX);
+	val &= ~GPIO2_D2_GPIO_MODE;
+	mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX, val);
+
+	/* config output */
+	val = mmio_read_32(GPIO2_BASE + SWPORTA_DDR);
+	val |= GPIO2_D2;
+	mmio_write_32(GPIO2_BASE + SWPORTA_DDR, val);
+
+	/* config output high level */
+	val = mmio_read_32(GPIO2_BASE);
+	val |= GPIO2_D2;
+	mmio_write_32(GPIO2_BASE, val);
+	dsb();
+
+	while (1)
+		;
+}
+
+static uint32_t clk_ungt_msk[CRU_CLKGATE_NUMS] = {
+	0x187f, 0x0000, 0x010c, 0x0000, 0x0200,
+	0x0010, 0x0000, 0x0017, 0x001f, 0x0000,
+	0x0000, 0x0000, 0x0000, 0x0003, 0x0000,
+	0xf001, 0x27c0, 0x04D9, 0x03ff, 0x0000,
+	0x0000, 0x0000, 0x0010, 0x0000, 0x0000,
+	0x0000, 0x0000, 0x0003, 0x0008
+};
+
+static void clks_gating_suspend(uint32_t *ungt_msk)
+{
+	int i;
+
+	for (i = 0; i < CRU_CLKGATE_NUMS; i++) {
+		ddr_data.clk_ungt_save[i] =
+			mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(i));
+		mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(i),
+			      ((~ungt_msk[i]) << 16) | 0xffff);
+	}
+}
+
+static void clks_gating_resume(void)
+{
+	int i;
+
+	for (i = 0; i < CRU_CLKGATE_NUMS; i++)
+		mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(i),
+			      ddr_data.clk_ungt_save[i] | 0xffff0000);
+}
+
+static inline void pm_pll_wait_lock(uint32_t pll_id)
+{
+	uint32_t delay = PLL_LOCKED_TIMEOUT;
+
+	while (delay > 0) {
+		if (mmio_read_32(CRU_BASE + PLL_CONS(pll_id, 1)) &
+		    PLL_IS_LOCKED)
+			break;
+		delay--;
+	}
+	if (delay == 0)
+		ERROR("lock-pll: %d\n", pll_id);
+}
+
+static inline void pll_pwr_dwn(uint32_t pll_id, uint32_t pd)
+{
+	mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
+		      BITS_WITH_WMASK(1, 1, 15));
+	if (pd)
+		mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
+			      BITS_WITH_WMASK(1, 1, 14));
+	else
+		mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
+			      BITS_WITH_WMASK(0, 1, 14));
+}
+
+static __sramfunc void dpll_suspend(void)
+{
+	int i;
+
+	/* slow mode */
+	mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(DPLL_ID));
+
+	/* save pll con */
+	for (i = 0; i < CRU_PLL_CON_NUMS; i++)
+		sram_data.dpll_con_save[i] =
+				mmio_read_32(CRU_BASE + PLL_CONS(DPLL_ID, i));
+	mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
+		      BITS_WITH_WMASK(1, 1, 15));
+	mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
+		      BITS_WITH_WMASK(1, 1, 14));
+}
+
+static __sramfunc void dpll_resume(void)
+{
+	uint32_t delay = PLL_LOCKED_TIMEOUT;
+
+	mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
+		      BITS_WITH_WMASK(1, 1, 15));
+	mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
+		      BITS_WITH_WMASK(0, 1, 14));
+	mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
+		      sram_data.dpll_con_save[1] | 0xc0000000);
+
+	dsb();
+
+	while (delay > 0) {
+		if (mmio_read_32(CRU_BASE + PLL_CONS(DPLL_ID, 1)) &
+				 PLL_IS_LOCKED)
+			break;
+		delay--;
+	}
+	if (delay == 0)
+		while (1)
+			;
+
+	mmio_write_32(CRU_BASE + CRU_CRU_MODE,
+		      PLL_NORM_MODE(DPLL_ID));
+}
+
+static inline void pll_suspend(uint32_t pll_id)
+{
+	int i;
+
+	/* slow mode */
+	mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(pll_id));
+
+	/* save pll con */
+	for (i = 0; i < CRU_PLL_CON_NUMS; i++)
+		ddr_data.cru_plls_con_save[pll_id][i] =
+				mmio_read_32(CRU_BASE + PLL_CONS(pll_id, i));
+
+	/* powerdown pll */
+	pll_pwr_dwn(pll_id, pmu_pd_off);
+}
+
+static inline void pll_resume(uint32_t pll_id)
+{
+	mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
+		      ddr_data.cru_plls_con_save[pll_id][1] | 0xc0000000);
+
+	pm_pll_wait_lock(pll_id);
+
+	if (PLL_IS_NORM_MODE(ddr_data.cru_mode_save, pll_id))
+		mmio_write_32(CRU_BASE + CRU_CRU_MODE,
+			      PLL_NORM_MODE(pll_id));
+}
+
+static void pm_plls_suspend(void)
+{
+	ddr_data.cru_mode_save = mmio_read_32(CRU_BASE + CRU_CRU_MODE);
+	ddr_data.clk_sel0 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(0));
+	ddr_data.clk_sel1 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(1));
+	ddr_data.clk_sel18 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(18));
+	ddr_data.clk_sel20 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(20));
+	ddr_data.clk_sel24 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(24));
+	ddr_data.clk_sel38 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(38));
+	pll_suspend(NPLL_ID);
+	pll_suspend(CPLL_ID);
+	pll_suspend(GPLL_ID);
+	pll_suspend(APLL_ID);
+
+	/* core */
+	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(0),
+		      BITS_WITH_WMASK(0, 0x1f, 0));
+
+	/* pclk_dbg */
+	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(1),
+		      BITS_WITH_WMASK(0, 0xf, 0));
+
+	/* crypto */
+	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(20),
+		      BITS_WITH_WMASK(0, 0x1f, 0));
+
+	/* pwm0 */
+	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(24),
+		      BITS_WITH_WMASK(0, 0x7f, 8));
+
+	/* uart2 from 24M */
+	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(18),
+		      BITS_WITH_WMASK(2, 0x3, 8));
+
+	/* clk_rtc32k */
+	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(38),
+		      BITS_WITH_WMASK(767, 0x3fff, 0) |
+		      BITS_WITH_WMASK(2, 0x3, 14));
+}
+
+static void pm_plls_resume(void)
+{
+	/* clk_rtc32k */
+	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(38),
+		      ddr_data.clk_sel38 |
+		      BITS_WMSK(0x3fff, 0) |
+		      BITS_WMSK(0x3, 14));
+
+	/* uart2 */
+	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(18),
+		      ddr_data.clk_sel18 | BITS_WMSK(0x3, 8));
+
+	/* pwm0 */
+	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(24),
+		      ddr_data.clk_sel24 | BITS_WMSK(0x7f, 8));
+
+	/* crypto */
+	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(20),
+		      ddr_data.clk_sel20 | BITS_WMSK(0x1f, 0));
+
+	/* pclk_dbg */
+	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(1),
+		      ddr_data.clk_sel1 | BITS_WMSK(0xf, 0));
+
+	/* core */
+	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(0),
+		      ddr_data.clk_sel0 | BITS_WMSK(0x1f, 0));
+
+	pll_pwr_dwn(APLL_ID, pmu_pd_on);
+	pll_pwr_dwn(GPLL_ID, pmu_pd_on);
+	pll_pwr_dwn(CPLL_ID, pmu_pd_on);
+	pll_pwr_dwn(NPLL_ID, pmu_pd_on);
+
+	pll_resume(APLL_ID);
+	pll_resume(GPLL_ID);
+	pll_resume(CPLL_ID);
+	pll_resume(NPLL_ID);
+}
+
+#define ARCH_TIMER_TICKS_PER_US (SYS_COUNTER_FREQ_IN_TICKS / 1000000)
+
+static __sramfunc void sram_udelay(uint32_t us)
+{
+	uint64_t pct_orig, pct_now;
+	uint64_t to_wait = ARCH_TIMER_TICKS_PER_US * us;
+
+	isb();
+	pct_orig = read_cntpct_el0();
+
+	do {
+		isb();
+		pct_now = read_cntpct_el0();
+	} while ((pct_now - pct_orig) <= to_wait);
+}
+
+/*
+ * For PMIC RK805, its sleep pin is connect with gpio2_d2 from rk3328.
+ * If the PMIC is configed for responding the sleep pin
+ * to get it into sleep mode,
+ * once the pin is output high,  it will get the pmic into sleep mode.
+ */
+__sramfunc void rk3328_pmic_suspend(void)
+{
+	sram_data.pmic_sleep_save = mmio_read_32(GRF_BASE + PMIC_SLEEP_REG);
+	sram_data.pmic_sleep_gpio_save[1] = mmio_read_32(GPIO2_BASE + 4);
+	sram_data.pmic_sleep_gpio_save[0] = mmio_read_32(GPIO2_BASE);
+	mmio_write_32(GRF_BASE + PMIC_SLEEP_REG, BITS_WITH_WMASK(0, 0x3, 4));
+	mmio_write_32(GPIO2_BASE + 4,
+		      sram_data.pmic_sleep_gpio_save[1] | BIT(26));
+	mmio_write_32(GPIO2_BASE,
+		      sram_data.pmic_sleep_gpio_save[0] | BIT(26));
+}
+
+__sramfunc void  rk3328_pmic_resume(void)
+{
+	mmio_write_32(GPIO2_BASE, sram_data.pmic_sleep_gpio_save[0]);
+	mmio_write_32(GPIO2_BASE + 4, sram_data.pmic_sleep_gpio_save[1]);
+	mmio_write_32(GRF_BASE + PMIC_SLEEP_REG,
+		      sram_data.pmic_sleep_save | BITS_WMSK(0xffff, 0));
+	/* Resuming volt need a lot of time */
+	sram_udelay(100);
+}
+
+static __sramfunc void ddr_suspend(void)
+{
+	sram_data.pd_sr_idle_save = mmio_read_32(DDR_UPCTL_BASE +
+						 DDR_PCTL2_PWRCTL);
+	sram_data.pd_sr_idle_save &= SELFREF_EN;
+
+	mmio_clrbits_32(DDR_UPCTL_BASE + DDR_PCTL2_PWRCTL, SELFREF_EN);
+	sram_data.ddr_grf_con0 = mmio_read_32(DDR_GRF_BASE +
+					      DDRGRF_SOC_CON(0));
+	mmio_write_32(DDR_GRF_BASE, BIT_WITH_WMSK(14) | WMSK_BIT(15));
+
+	/*
+	 * Override csysreq from ddrc and
+	 * send valid csysreq signal to PMU,
+	 * csysreq is controlled by ddrc only
+	 */
+
+	/* in self-refresh */
+	mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(0));
+	while ((mmio_read_32(DDR_GRF_BASE + DDRGRF_SOC_STATUS(1)) &
+	       (0x03 << 12)) !=  (0x02 << 12))
+		;
+	/* ddr retention */
+	mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(2));
+
+	/* ddr gating */
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(0),
+		      BITS_WITH_WMASK(0x7, 0x7, 4));
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(7),
+		      BITS_WITH_WMASK(1, 1, 4));
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18),
+		      BITS_WITH_WMASK(0x1ff, 0x1ff, 1));
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(27),
+		      BITS_WITH_WMASK(0x3, 0x3, 0));
+
+	dpll_suspend();
+}
+
+__sramfunc  void dmc_restore(void)
+{
+	dpll_resume();
+
+	/* ddr gating */
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(0),
+		      BITS_WITH_WMASK(0, 0x7, 4));
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(7),
+		      BITS_WITH_WMASK(0, 1, 4));
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18),
+		      BITS_WITH_WMASK(0, 0x1ff, 1));
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(27),
+		      BITS_WITH_WMASK(0, 0x3, 0));
+
+	/* ddr de_retention */
+	mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(2));
+	/* exit self-refresh */
+	mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(0));
+	while ((mmio_read_32(DDR_GRF_BASE + DDRGRF_SOC_STATUS(1)) &
+		(0x03 << 12)) !=  (0x00 << 12))
+		;
+
+	mmio_write_32(DDR_GRF_BASE, sram_data.ddr_grf_con0 | 0xc0000000);
+	if (sram_data.pd_sr_idle_save)
+		mmio_setbits_32(DDR_UPCTL_BASE + DDR_PCTL2_PWRCTL,
+				SELFREF_EN);
+}
+
+static __sramfunc void sram_dbg_uart_suspend(void)
+{
+	sram_data.uart2_ier = mmio_read_32(UART2_BASE + UART_IER);
+	mmio_write_32(UART2_BASE + UART_IER, UART_INT_DISABLE);
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(16), 0x20002000);
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(2), 0x00040004);
+}
+
+__sramfunc void sram_dbg_uart_resume(void)
+{
+	/* restore uart clk and reset fifo */
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(16), 0x20000000);
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(2), 0x00040000);
+	mmio_write_32(UART2_BASE + UART_FCR, UART_FIFO_RESET);
+	mmio_write_32(UART2_BASE + UART_IER, sram_data.uart2_ier);
+}
+
+static __sramfunc void sram_soc_enter_lp(void)
+{
+	uint32_t apm_value;
+
+	apm_value = BIT(core_pm_en) |
+		    BIT(core_pm_dis_int) |
+		    BIT(core_pm_int_wakeup_en);
+	mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(PD_CPU0), apm_value);
+
+	dsb();
+	isb();
+err_loop:
+	wfi();
+	/*
+	 *Soc will enter low power mode and
+	 *do not return to here.
+	 */
+	goto err_loop;
+}
+
+__sramfunc void sram_suspend(void)
+{
+	/* disable mmu and icache */
+	tlbialle3();
+	disable_mmu_icache_el3();
+
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
+		      ((uintptr_t)&pmu_cpuson_entrypoint >> CPU_BOOT_ADDR_ALIGN) |
+		      CPU_BOOT_ADDR_WMASK);
+
+	/* ddr self-refresh and gating phy */
+	ddr_suspend();
+
+	rk3328_pmic_suspend();
+
+	sram_dbg_uart_suspend();
+
+	sram_soc_enter_lp();
+}
+
+void __dead2 rockchip_soc_sys_pd_pwr_dn_wfi(void)
+{
+	sram_suspend();
+
+	/* should never reach here */
+	psci_power_down_wfi();
+}
+
+int rockchip_soc_sys_pwr_dm_suspend(void)
+{
+	clks_gating_suspend(clk_ungt_msk);
+
+	pm_plls_suspend();
+
+	return 0;
+}
+
+int rockchip_soc_sys_pwr_dm_resume(void)
+{
+	pm_plls_resume();
+
+	clks_gating_resume();
+
+	plat_rockchip_gic_cpuif_enable();
+
+	return 0;
+}
+
+void rockchip_plat_mmu_el3(void)
+{
+	/* TODO: support the el3 for rk3328 SoCs */
+}
+
+void plat_rockchip_pmu_init(void)
+{
+	uint32_t cpu;
+
+	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
+		cpuson_flags[cpu] = 0;
+
+	cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
+
+	/* the warm booting address of cpus */
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
+		      (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
+		      CPU_BOOT_ADDR_WMASK);
+
+	nonboot_cpus_off();
+
+	INFO("%s: pd status 0x%x\n",
+	     __func__, mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
+}
diff --git a/plat/rockchip/rk3328/drivers/pmu/pmu.h b/plat/rockchip/rk3328/drivers/pmu/pmu.h
new file mode 100644
index 0000000..9d2819a
--- /dev/null
+++ b/plat/rockchip/rk3328/drivers/pmu/pmu.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PMU_H__
+#define __PMU_H__
+
+#include <soc.h>
+
+struct rk3328_sleep_ddr_data {
+	uint32_t pmu_debug_enable;
+	uint32_t debug_iomux_save;
+	uint32_t pmic_sleep_save;
+	uint32_t pmu_wakeup_conf0;
+	uint32_t pmu_pwrmd_com;
+	uint32_t cru_mode_save;
+	uint32_t clk_sel0, clk_sel1, clk_sel18,
+		 clk_sel20, clk_sel24, clk_sel38;
+	uint32_t clk_ungt_save[CRU_CLKGATE_NUMS];
+	uint32_t cru_plls_con_save[MAX_PLL][CRU_PLL_CON_NUMS];
+};
+
+struct rk3328_sleep_sram_data {
+	uint32_t pmic_sleep_save;
+	uint32_t pmic_sleep_gpio_save[2];
+	uint32_t ddr_grf_con0;
+	uint32_t dpll_con_save[CRU_PLL_CON_NUMS];
+	uint32_t pd_sr_idle_save;
+	uint32_t uart2_ier;
+};
+
+/*****************************************************************************
+ * The ways of cores power domain contorlling
+ *****************************************************************************/
+enum cores_pm_ctr_mode {
+	core_pwr_pd = 0,
+	core_pwr_wfi = 1,
+	core_pwr_wfi_int = 2
+};
+
+enum pmu_cores_pm_by_wfi {
+	core_pm_en = 0,
+	core_pm_int_wakeup_en,
+	core_pm_dis_int,
+	core_pm_sft_wakeup_en
+};
+
+extern void *pmu_cpuson_entrypoint_start;
+extern void *pmu_cpuson_entrypoint_end;
+extern uint64_t cpuson_entry_point[PLATFORM_CORE_COUNT];
+extern uint32_t cpuson_flags[PLATFORM_CORE_COUNT];
+
+#define CORES_PM_DISABLE	0x0
+
+/*****************************************************************************
+ * pmu con,reg
+ *****************************************************************************/
+#define PMU_WAKEUP_CFG0		0x00
+#define PMU_PWRDN_CON		0x0c
+#define PMU_PWRDN_ST		0x10
+#define PMU_PWRMD_COM		0x18
+#define PMU_SFT_CON		0x1c
+#define PMU_INT_CON		0x20
+#define PMU_INT_ST		0x24
+#define PMU_POWER_ST		0x44
+#define PMU_CPUAPM_CON(n)	(0x80 + (n) * 4)
+#define PMU_SYS_REG(n)		(0xa0 + (n) * 4)
+
+#define CHECK_CPU_WFIE_BASE		(GRF_BASE + GRF_CPU_STATUS(1))
+
+enum pmu_core_pwrst_shift {
+	clst_cpu_wfe = 0,
+	clst_cpu_wfi = 4,
+};
+
+#define clstl_cpu_wfe (clst_cpu_wfe)
+#define clstb_cpu_wfe (clst_cpu_wfe)
+
+enum pmu_pd_id {
+	PD_CPU0 = 0,
+	PD_CPU1,
+	PD_CPU2,
+	PD_CPU3,
+};
+
+enum pmu_power_mode_common {
+	pmu_mode_en = 0,
+	sref_enter_en,
+	global_int_disable_cfg,
+	cpu0_pd_en,
+	wait_wakeup_begin_cfg = 4,
+	l2_flush_en,
+	l2_idle_en,
+	ddrio_ret_de_req,
+	ddrio_ret_en = 8,
+};
+
+enum pmu_sft_con {
+	upctl_c_sysreq_cfg = 0,
+	l2flushreq_req,
+	ddr_io_ret_cfg,
+	pmu_sft_ret_cfg,
+};
+
+#define CKECK_WFE_MSK		0x1
+#define CKECK_WFI_MSK		0x10
+#define CKECK_WFEI_MSK		0x11
+
+#define PD_CTR_LOOP		500
+#define CHK_CPU_LOOP		500
+#define MAX_WAIT_CONUT		1000
+
+#define WAKEUP_INT_CLUSTER_EN	0x1
+#define PMIC_SLEEP_REG		0x34
+
+#define PLL_IS_NORM_MODE(mode, pll_id)	\
+		((mode & (PLL_NORM_MODE(pll_id)) & 0xffff) != 0)
+
+#define CTLR_ENABLE_G1_BIT	BIT(1)
+#define UART_FIFO_EMPTY		BIT(6)
+
+#define UART_IER		0x04
+#define UART_FCR		0x08
+#define UART_LSR		0x14
+
+#define UART_INT_DISABLE	0x00
+#define UART_FIFO_RESET		0x07
+
+#endif /* __PMU_H__ */
diff --git a/plat/rockchip/rk3328/drivers/soc/soc.c b/plat/rockchip/rk3328/drivers/soc/soc.c
new file mode 100644
index 0000000..ce344d6
--- /dev/null
+++ b/plat/rockchip/rk3328/drivers/soc/soc.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <console.h>
+#include <ddr_parameter.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <mmio.h>
+#include <plat_private.h>
+#include <platform_def.h>
+#include <rk3328_def.h>
+#include <soc.h>
+
+/* Table of regions to map using the MMU. */
+const mmap_region_t plat_rk_mmap[] = {
+	MAP_REGION_FLAT(UART2_BASE, UART2_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(PMU_BASE, PMU_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(SGRF_BASE, SGRF_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(GPIO0_BASE, GPIO0_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(GPIO1_BASE, GPIO1_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(GPIO2_BASE, GPIO2_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(GPIO3_BASE, GPIO3_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(CRU_BASE, CRU_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(GRF_BASE, GRF_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(FIREWALL_DDR_BASE, FIREWALL_DDR_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(FIREWALL_CFG_BASE, FIREWALL_CFG_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(STIME_BASE, STIME_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(GIC400_BASE, GIC400_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(PMUSRAM_BASE, PMUSRAM_SIZE,
+			MT_MEMORY | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(SHARE_MEM_BASE, SHARE_MEM_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(DDR_GRF_BASE, DDR_GRF_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(DDR_UPCTL_BASE, DDR_UPCTL_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(PWM_BASE, PWM_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(DDR_PARAM_BASE, DDR_PARAM_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(EFUSE8_BASE, EFUSE8_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(EFUSE32_BASE, EFUSE32_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(DDR_PHY_BASE, DDR_PHY_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(SERVER_MSCH_BASE, SERVER_MSCH_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(DDR_MONITOR_BASE, DDR_MONITOR_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(VOP_BASE, VOP_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+
+	{ 0 }
+};
+
+/* The RockChip power domain tree descriptor */
+const unsigned char rockchip_power_domain_tree_desc[] = {
+	/* No of root nodes */
+	PLATFORM_SYSTEM_COUNT,
+	/* No of children for the root node */
+	PLATFORM_CLUSTER_COUNT,
+	/* No of children for the first cluster node */
+	PLATFORM_CLUSTER0_CORE_COUNT,
+};
+
+void secure_timer_init(void)
+{
+	mmio_write_32(STIMER_CHN_BASE(1) + TIMER_LOADE_COUNT0, 0xffffffff);
+	mmio_write_32(STIMER_CHN_BASE(1) + TIMER_LOADE_COUNT1, 0xffffffff);
+	/* auto reload & enable the timer */
+	mmio_write_32(STIMER_CHN_BASE(1) + TIMER_CONTROL_REG, TIMER_EN);
+}
+
+void sgrf_init(void)
+{
+	uint32_t i, val;
+	struct param_ddr_usage usg;
+
+	/* general secure regions */
+	usg = ddr_region_usage_parse(DDR_PARAM_BASE,
+				     PLAT_MAX_DDR_CAPACITY_MB);
+	for (i = 0; i < usg.s_nr; i++) {
+		/* enable secure */
+		val = mmio_read_32(FIREWALL_DDR_BASE +
+			      FIREWALL_DDR_FW_DDR_CON_REG);
+		val |= BIT(7 - i);
+		mmio_write_32(FIREWALL_DDR_BASE +
+			      FIREWALL_DDR_FW_DDR_CON_REG, val);
+		/* map top and base */
+		mmio_write_32(FIREWALL_DDR_BASE +
+			      FIREWALL_DDR_FW_DDR_RGN(7 - i),
+			      RG_MAP_SECURE(usg.s_top[i], usg.s_base[i]));
+	}
+
+	/* set ddr rgn0_top and rga0_top as 0 */
+	mmio_write_32(FIREWALL_DDR_BASE + FIREWALL_DDR_FW_DDR_RGN(0), 0x0);
+
+	/* set all slave ip into no-secure, except stimer */
+	mmio_write_32(FIREWALL_CFG_BASE + FIREWALL_CFG_FW_SYS_CON(0),
+		      SGRF_SLV_S_ALL_NS);
+	mmio_write_32(FIREWALL_CFG_BASE + FIREWALL_CFG_FW_SYS_CON(1),
+		      SGRF_SLV_S_ALL_NS);
+	mmio_write_32(FIREWALL_CFG_BASE + FIREWALL_CFG_FW_SYS_CON(2),
+		      SGRF_SLV_S_ALL_NS | STIMER_S);
+	mmio_write_32(FIREWALL_CFG_BASE + FIREWALL_CFG_FW_SYS_CON(3),
+		      SGRF_SLV_S_ALL_NS);
+
+	/* set all master ip into no-secure */
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2), 0xf0000000);
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(3), SGRF_MST_S_ALL_NS);
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(4), SGRF_MST_S_ALL_NS);
+
+	/* set DMAC into no-secure */
+	mmio_write_32(SGRF_BASE + SGRF_DMAC_CON(3), DMA_IRQ_BOOT_NS);
+	mmio_write_32(SGRF_BASE + SGRF_DMAC_CON(4), DMA_PERI_CH_NS_15_0);
+	mmio_write_32(SGRF_BASE + SGRF_DMAC_CON(5), DMA_PERI_CH_NS_19_16);
+	mmio_write_32(SGRF_BASE + SGRF_DMAC_CON(5), DMA_MANAGER_BOOT_NS);
+
+	/* soft reset dma before use */
+	mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(3), DMA_SOFTRST_REQ);
+	udelay(5);
+	mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(3), DMA_SOFTRST_RLS);
+}
+
+void plat_rockchip_soc_init(void)
+{
+	secure_timer_init();
+	sgrf_init();
+
+	NOTICE("BL31:Rockchip release version: v%d.%d\n",
+	       MAJOR_VERSION, MINOR_VERSION);
+}
diff --git a/plat/rockchip/rk3328/drivers/soc/soc.h b/plat/rockchip/rk3328/drivers/soc/soc.h
new file mode 100644
index 0000000..2c04ae2
--- /dev/null
+++ b/plat/rockchip/rk3328/drivers/soc/soc.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SOC_H__
+#define __SOC_H__
+
+/******************************* stimer ***************************************/
+#define TIMER_LOADE_COUNT0	0x00
+#define TIMER_LOADE_COUNT1	0x04
+#define TIMER_CURRENT_VALUE0	0x08
+#define TIMER_CURRENT_VALUE1	0x0C
+#define TIMER_CONTROL_REG	0x10
+#define TIMER_INTSTATUS		0x18
+#define TIMER_EN		0x1
+
+extern const unsigned char rockchip_power_domain_tree_desc[];
+
+/**************************** read/write **************************************/
+#ifndef BITS_WMSK
+#define BITS_WMSK(msk, shift)	((msk) << (shift + REG_MSK_SHIFT))
+#endif
+
+/**************************** cru *********************************************/
+enum plls_id {
+	APLL_ID = 0,
+	DPLL_ID,
+	CPLL_ID,
+	GPLL_ID,
+	REVERVE,
+	NPLL_ID,
+	MAX_PLL,
+};
+
+#define CRU_CRU_MODE		0x0080
+#define CRU_CRU_MISC		0x0084
+#define CRU_GLB_SRST_FST	0x009c
+#define CRU_GLB_SRST_FST_VALUE	0xfdb9
+#define PLL_CONS(id, i)		(0x020 * (id) + ((i) * 4))
+#define CRU_CLKSEL_CON(i)	(0x100 + ((i) * 4))
+#define CRU_CLKSEL_NUMS		53
+#define CRU_CLKGATE_CON(i)	(0x200 + ((i) * 4))
+#define CRU_CLKGATE_NUMS	29
+#define CRU_SOFTRSTS_CON(n)	(0x300 + ((n) * 4))
+#define CRU_SOFTRSTS_NUMS	12
+#define CRU_PLL_CON_NUMS	5
+
+/* PLLn_CON1 */
+#define PLL_IS_LOCKED		BIT(10)
+/* PLLn_CON0 */
+#define PLL_BYPASS		BITS_WITH_WMASK(1, 0x1, 15)
+#define PLL_NO_BYPASS		BITS_WITH_WMASK(0, 0x1, 15)
+/* CRU_MODE */
+#define PLL_SLOW_MODE(id)	((id) == NPLL_ID) ?		\
+				BITS_WITH_WMASK(0, 0x1, 1) :	\
+				BITS_WITH_WMASK(0, 0x1, ((id) * 4))
+#define PLL_NORM_MODE(id)	((id) == NPLL_ID) ?		\
+				BITS_WITH_WMASK(1, 0x1, 1) :	\
+				BITS_WITH_WMASK(1, 0x1, ((id) * 4))
+
+#define CRU_GATEID_CONS(ID)	(0x200 + (ID / 16) * 4)
+#define CRU_CONS_GATEID(i)	(16 * (i))
+#define GATE_ID(reg, bit)	((reg * 16) + bit)
+
+#define PLL_LOCKED_TIMEOUT 600000U
+
+#define STIMER_CHN_BASE(n)	(STIME_BASE + 0x20 * (n))
+/************************** config regs ***************************************/
+#define FIREWALL_CFG_FW_SYS_CON(n)	(0x000 + (n) * 4)
+#define FIREWALL_DDR_FW_DDR_RGN(n)	(0x000 + (n) * 4)
+#define FIREWALL_DDR_FW_DDR_MST(n)	(0x020 + (n) * 4)
+#define FIREWALL_DDR_FW_DDR_CON_REG	(0x040)
+#define GRF_SOC_CON(n)			(0x400 + (n) * 4)
+#define GRF_SOC_STATUS(n)		(0x480 + (n) * 4)
+#define GRF_CPU_STATUS(n)		(0x520 + (n) * 4)
+#define GRF_OS_REG(n)			(0x5c8 + (n) * 4)
+#define DDRGRF_SOC_CON(n)		(0x000 + (n) * 4)
+#define DDRGRF_SOC_STATUS(n)		(0x100 + (n) * 4)
+#define SGRF_SOC_CON(n)			(0x000 + (n) * 4)
+#define SGRF_DMAC_CON(n)		(0x100 + (n) * 4)
+#define SGRF_HDCP_KEY_CON(n)		(0x280 + (n) * 4)
+
+#define DDR_PCTL2_PWRCTL		0x30
+/************************** regs func *****************************************/
+#define STIMER_S			BIT(23)
+#define SGRF_SLV_S_ALL_NS		0x0
+#define SGRF_MST_S_ALL_NS		0xffffffff
+#define DMA_IRQ_BOOT_NS			0xffffffff
+#define DMA_MANAGER_BOOT_NS		0x80008000
+#define DMA_PERI_CH_NS_15_0		0xffffffff
+#define DMA_PERI_CH_NS_19_16		0x000f000f
+#define DMA_SOFTRST_REQ			0x01000100
+#define DMA_SOFTRST_RLS			0x01000000
+
+#define SELFREF_EN			BIT(0)
+/************************** cpu ***********************************************/
+#define CPU_BOOT_ADDR_WMASK		0xffff0000
+#define CPU_BOOT_ADDR_ALIGN		16
+
+/************************** ddr secure region *********************************/
+#define PLAT_MAX_DDR_CAPACITY_MB	4096
+#define RG_MAP_SECURE(top, base)	((((top) - 1) << 16) | (base))
+
+/************************** gpio2_d2 ******************************************/
+#define SWPORTA_DR		0x00
+#define SWPORTA_DDR		0x04
+#define GPIO2_D2		BIT(26)
+#define GPIO2_D2_GPIO_MODE	0x30
+#define GRF_GPIO2D_IOMUX	0x34
+
+#endif /* __SOC_H__ */
diff --git a/plat/rockchip/rk3328/include/plat.ld.S b/plat/rockchip/rk3328/include/plat.ld.S
new file mode 100644
index 0000000..b3559b2
--- /dev/null
+++ b/plat/rockchip/rk3328/include/plat.ld.S
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __ROCKCHIP_PLAT_LD_S__
+#define __ROCKCHIP_PLAT_LD_S__
+
+MEMORY {
+    PMUSRAM (rwx): ORIGIN = PMUSRAM_BASE, LENGTH = PMUSRAM_RSIZE
+}
+
+SECTIONS
+{
+	. = PMUSRAM_BASE;
+
+	/*
+	 * pmu_cpuson_entrypoint request address
+	 * align 64K when resume, so put it in the
+	 * start of pmusram
+	 */
+	.text_pmusram : {
+		ASSERT(. == ALIGN(64 * 1024),
+			".pmusram.entry request 64K aligned.");
+		*(.pmusram.entry)
+		__bl31_pmusram_text_start = .;
+		*(.pmusram.text)
+		*(.pmusram.rodata)
+		__bl31_pmusram_text_end = .;
+		__bl31_pmusram_data_start = .;
+		*(.pmusram.data)
+		__bl31_pmusram_data_end = .;
+
+	} >PMUSRAM
+}
+
+#endif /* __ROCKCHIP_PLAT_LD_S__ */
diff --git a/plat/rockchip/rk3328/include/platform_def.h b/plat/rockchip/rk3328/include/platform_def.h
new file mode 100644
index 0000000..39d3c21
--- /dev/null
+++ b/plat/rockchip/rk3328/include/platform_def.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#include <arch.h>
+#include <common_def.h>
+#include <rk3328_def.h>
+
+#define DEBUG_XLAT_TABLE 0
+
+/*******************************************************************************
+ * Platform binary types for linking
+ ******************************************************************************/
+#define PLATFORM_LINKER_FORMAT		"elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH		aarch64
+
+/*******************************************************************************
+ * Generic platform constants
+ ******************************************************************************/
+
+/* Size of cacheable stacks */
+#if DEBUG_XLAT_TABLE
+#define PLATFORM_STACK_SIZE 0x800
+#elif defined(IMAGE_BL1)
+#define PLATFORM_STACK_SIZE 0x440
+#elif defined(IMAGE_BL2)
+#define PLATFORM_STACK_SIZE 0x400
+#elif defined(IMAGE_BL31)
+#define PLATFORM_STACK_SIZE 0x800
+#elif defined(IMAGE_BL32)
+#define PLATFORM_STACK_SIZE 0x440
+#endif
+
+#define FIRMWARE_WELCOME_STR		"Booting Trusted Firmware\n"
+
+#define PLATFORM_MAX_AFFLVL		MPIDR_AFFLVL2
+#define PLATFORM_SYSTEM_COUNT		1
+#define PLATFORM_CLUSTER_COUNT		1
+#define PLATFORM_CLUSTER0_CORE_COUNT	4
+#define PLATFORM_CLUSTER1_CORE_COUNT	0
+#define PLATFORM_CORE_COUNT		(PLATFORM_CLUSTER1_CORE_COUNT +	\
+					 PLATFORM_CLUSTER0_CORE_COUNT)
+
+#define PLATFORM_NUM_AFFS		(PLATFORM_SYSTEM_COUNT +	\
+					 PLATFORM_CLUSTER_COUNT +	\
+					 PLATFORM_CORE_COUNT)
+
+#define PLAT_MAX_PWR_LVL		MPIDR_AFFLVL2
+
+#define PLAT_RK_CLST_TO_CPUID_SHIFT	6
+
+/*
+ * This macro defines the deepest retention state possible. A higher state
+ * id will represent an invalid or a power down state.
+ */
+#define PLAT_MAX_RET_STATE		1
+
+/*
+ * This macro defines the deepest power down states possible. Any state ID
+ * higher than this is invalid.
+ */
+#define PLAT_MAX_OFF_STATE		2
+
+/*******************************************************************************
+ * Platform memory map related constants
+ ******************************************************************************/
+/* TF txet, ro, rw, Size: 512KB */
+#define TZRAM_BASE		(0x0)
+#define TZRAM_SIZE		(0x80000)
+
+/*******************************************************************************
+ * BL31 specific defines.
+ ******************************************************************************/
+/*
+ * Put BL3-1 at the top of the Trusted RAM
+ */
+#define BL31_BASE		(TZRAM_BASE + 0x10000)
+#define BL31_LIMIT		(TZRAM_BASE + TZRAM_SIZE)
+
+/*******************************************************************************
+ * Platform specific page table and MMU setup constants
+ ******************************************************************************/
+#define ADDR_SPACE_SIZE		(1ull << 32)
+#define MAX_XLAT_TABLES		9
+#define MAX_MMAP_REGIONS	33
+
+/*******************************************************************************
+ * Declarations and constants to access the mailboxes safely. Each mailbox is
+ * aligned on the biggest cache line size in the platform. This is known only
+ * to the platform as it might have a combination of integrated and external
+ * caches. Such alignment ensures that two maiboxes do not sit on the same cache
+ * line at any cache level. They could belong to different cpus/clusters &
+ * get written while being protected by different locks causing corruption of
+ * a valid mailbox address.
+ ******************************************************************************/
+#define CACHE_WRITEBACK_SHIFT	6
+#define CACHE_WRITEBACK_GRANULE	(1 << CACHE_WRITEBACK_SHIFT)
+
+/*
+ * Define GICD and GICC and GICR base
+ */
+#define PLAT_RK_GICD_BASE	RK3328_GICD_BASE
+#define PLAT_RK_GICC_BASE	RK3328_GICC_BASE
+
+/*
+ * Define a list of Group 1 Secure and Group 0 interrupts as per GICv3
+ * terminology. On a GICv2 system or mode, the lists will be merged and treated
+ * as Group 0 interrupts.
+ */
+#define PLAT_RK_G1S_IRQS	RK_G1S_IRQS
+
+#define PLAT_RK_UART_BASE	RK3328_UART2_BASE
+#define PLAT_RK_UART_CLOCK	RK3328_UART_CLOCK
+#define PLAT_RK_UART_BAUDRATE	RK3328_BAUDRATE
+
+#define PLAT_RK_PRIMARY_CPU	0x0
+
+#define PSRAM_DO_DDR_RESUME	0
+#define PSRAM_CHECK_WAKEUP_CPU	0
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/rockchip/rk3328/platform.mk b/plat/rockchip/rk3328/platform.mk
new file mode 100644
index 0000000..8863fb4
--- /dev/null
+++ b/plat/rockchip/rk3328/platform.mk
@@ -0,0 +1,54 @@
+#
+# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+RK_PLAT			:=	plat/rockchip
+RK_PLAT_SOC		:=	${RK_PLAT}/${PLAT}
+RK_PLAT_COMMON		:=	${RK_PLAT}/common
+
+PLAT_INCLUDES		:=	-Idrivers/arm/gic/common/			\
+				-Idrivers/arm/gic/v2/			\
+				-Iinclude/plat/common/				\
+				-I${RK_PLAT_COMMON}/                            \
+				-I${RK_PLAT_COMMON}/include/                    \
+				-I${RK_PLAT_COMMON}/pmusram                     \
+				-I${RK_PLAT_COMMON}/drivers/pmu/                \
+				-I${RK_PLAT_COMMON}/drivers/parameter/		\
+				-I${RK_PLAT_SOC}/				\
+				-I${RK_PLAT_SOC}/drivers/pmu/			\
+				-I${RK_PLAT_SOC}/drivers/soc/			\
+				-I${RK_PLAT_SOC}/include/
+
+RK_GIC_SOURCES		:=	drivers/arm/gic/common/gic_common.c		\
+				drivers/arm/gic/v2/gicv2_main.c			\
+				drivers/arm/gic/v2/gicv2_helpers.c		\
+				plat/common/plat_gicv2.c			\
+				${RK_PLAT}/common/rockchip_gicv2.c
+
+PLAT_BL_COMMON_SOURCES	:=	lib/aarch64/xlat_tables.c			\
+				plat/common/aarch64/plat_psci_common.c
+
+BL31_SOURCES		+=	${RK_GIC_SOURCES}				\
+				drivers/arm/cci/cci.c				\
+				drivers/console/console.S			\
+				drivers/ti/uart/16550_console.S			\
+				drivers/delay_timer/delay_timer.c		\
+				drivers/delay_timer/generic_delay_timer.c	\
+				lib/cpus/aarch64/aem_generic.S			\
+				lib/cpus/aarch64/cortex_a53.S			\
+				${RK_PLAT_COMMON}/drivers/parameter/ddr_parameter.c	\
+				${RK_PLAT_COMMON}/aarch64/plat_helpers.S	\
+				${RK_PLAT_COMMON}/bl31_plat_setup.c		\
+				${RK_PLAT_COMMON}/pmusram/pmu_sram_cpus_on.S	\
+				${RK_PLAT_COMMON}/plat_pm.c			\
+				${RK_PLAT_COMMON}/plat_topology.c		\
+				${RK_PLAT_COMMON}/aarch64/platform_common.c	\
+				${RK_PLAT_SOC}/drivers/pmu/pmu.c		\
+				${RK_PLAT_SOC}/drivers/soc/soc.c
+
+ENABLE_PLAT_COMPAT 	:=      0
+
+$(eval $(call add_define,PLAT_EXTRA_LD_SCRIPT))
+$(eval $(call add_define,PLAT_SKIP_OPTEE_S_EL1_INT_REGISTER))
diff --git a/plat/rockchip/rk3328/rk3328_def.h b/plat/rockchip/rk3328/rk3328_def.h
new file mode 100644
index 0000000..062c9cc
--- /dev/null
+++ b/plat/rockchip/rk3328/rk3328_def.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_DEF_H__
+#define __PLAT_DEF_H__
+
+#define MAJOR_VERSION		(1)
+#define MINOR_VERSION		(2)
+
+#define SIZE_K(n)		((n) * 1024)
+
+/* Special value used to verify platform parameters from BL2 to BL3-1 */
+#define RK_BL31_PLAT_PARAM_VAL	0x0f1e2d3c4b5a6978ULL
+
+#define UART2_BASE		0xff130000
+#define UART2_SIZE		SIZE_K(64)
+
+#define PMU_BASE		0xff140000
+#define PMU_SIZE		SIZE_K(64)
+
+#define SGRF_BASE		0xff0d0000
+#define SGRF_SIZE		SIZE_K(64)
+
+#define CRU_BASE		0xff440000
+#define CRU_SIZE		SIZE_K(64)
+
+#define GRF_BASE		0xff100000
+#define GRF_SIZE		SIZE_K(64)
+
+#define GPIO0_BASE		0xff210000
+#define GPIO0_SIZE		SIZE_K(32)
+
+#define GPIO1_BASE		0xff220000
+#define GPIO1_SIZE		SIZE_K(32)
+
+#define GPIO2_BASE		0xff230000
+#define GPIO2_SIZE		SIZE_K(64)
+
+#define GPIO3_BASE		0xff240000
+#define GPIO3_SIZE		SIZE_K(64)
+
+#define STIME_BASE		0xff1d0000
+#define STIME_SIZE		SIZE_K(64)
+
+#define INTMEM_BASE		0xff090000
+#define INTMEM_SIZE		SIZE_K(32)
+
+#define SRAM_LDS_BASE		(INTMEM_BASE + SIZE_K(4))
+#define SRAM_LDS_SIZE		(INTMEM_SIZE - SIZE_K(4))
+
+#define PMUSRAM_BASE		INTMEM_BASE
+#define PMUSRAM_SIZE		SIZE_K(4)
+#define PMUSRAM_RSIZE		SIZE_K(4)
+
+#define VOP_BASE		0xff370000
+#define VOP_SIZE		SIZE_K(16)
+
+#define DDR_PHY_BASE		0xff400000
+#define DDR_PHY_SIZE		SIZE_K(4)
+
+#define SERVER_MSCH_BASE	0xff720000
+#define SERVER_MSCH_SIZE	SIZE_K(4)
+
+#define DDR_UPCTL_BASE		0xff780000
+#define DDR_UPCTL_SIZE		SIZE_K(12)
+
+#define DDR_MONITOR_BASE	0xff790000
+#define DDR_MONITOR_SIZE	SIZE_K(4)
+
+#define FIREWALL_DDR_BASE	0xff7c0000
+#define FIREWALL_DDR_SIZE	SIZE_K(64)
+
+#define FIREWALL_CFG_BASE	0xff7d0000
+#define FIREWALL_CFG_SIZE	SIZE_K(64)
+
+#define GIC400_BASE		0xff810000
+#define GIC400_SIZE		SIZE_K(64)
+
+#define DDR_GRF_BASE		0xff798000
+#define DDR_GRF_SIZE		SIZE_K(16)
+
+#define PWM_BASE		0xff1b0000
+#define PWM_SIZE		SIZE_K(64)
+
+#define DDR_PARAM_BASE		0x02000000
+#define DDR_PARAM_SIZE		SIZE_K(4)
+
+#define EFUSE8_BASE		0xff260000
+#define EFUSE8_SIZE		SIZE_K(4)
+
+#define EFUSE32_BASE		0xff0b0000
+#define EFUSE32_SIZE		SIZE_K(4)
+
+/**************************************************************************
+ * UART related constants
+ **************************************************************************/
+#define RK3328_UART2_BASE	UART2_BASE
+#define RK3328_BAUDRATE	1500000
+#define RK3328_UART_CLOCK	24000000
+
+/******************************************************************************
+ * System counter frequency related constants
+ ******************************************************************************/
+#define SYS_COUNTER_FREQ_IN_TICKS	24000000U
+#define SYS_COUNTER_FREQ_IN_MHZ		24
+
+/******************************************************************************
+ * GIC-400 & interrupt handling related constants
+ ******************************************************************************/
+
+/* Base rk_platform compatible GIC memory map */
+#define RK3328_GICD_BASE		(GIC400_BASE + 0x1000)
+#define RK3328_GICC_BASE		(GIC400_BASE + 0x2000)
+#define RK3328_GICR_BASE		0	/* no GICR in GIC-400 */
+
+/******************************************************************************
+ * sgi, ppi
+ ******************************************************************************/
+#define RK_IRQ_SEC_PHY_TIMER	29
+
+#define RK_IRQ_SEC_SGI_0	8
+#define RK_IRQ_SEC_SGI_1	9
+#define RK_IRQ_SEC_SGI_2	10
+#define RK_IRQ_SEC_SGI_3	11
+#define RK_IRQ_SEC_SGI_4	12
+#define RK_IRQ_SEC_SGI_5	13
+#define RK_IRQ_SEC_SGI_6	14
+#define RK_IRQ_SEC_SGI_7	15
+
+/*
+ * Define a list of Group 1 Secure and Group 0 interrupts as per GICv3
+ * terminology. On a GICv2 system or mode, the lists will be merged and treated
+ * as Group 0 interrupts.
+ */
+#define RK_G1S_IRQS		RK_IRQ_SEC_PHY_TIMER, RK_IRQ_SEC_SGI_6
+
+#define SHARE_MEM_BASE          0x100000/* [1MB, 1MB+60K]*/
+#define SHARE_MEM_PAGE_NUM      15
+#define SHARE_MEM_SIZE          SIZE_K(SHARE_MEM_PAGE_NUM * 4)
+
+#endif /* __PLAT_DEF_H__ */
diff --git a/plat/rockchip/rk3368/drivers/ddr/ddr_rk3368.c b/plat/rockchip/rk3368/drivers/ddr/ddr_rk3368.c
new file mode 100644
index 0000000..1c33763
--- /dev/null
+++ b/plat/rockchip/rk3368/drivers/ddr/ddr_rk3368.c
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <mmio.h>
+#include <ddr_rk3368.h>
+#include <debug.h>
+#include <platform_def.h>
+#include <pmu.h>
+#include <rk3368_def.h>
+#include <soc.h>
+#include <stdint.h>
+#include <string.h>
+
+/* GRF_SOC_STATUS0 */
+#define DPLL_LOCK		(0x1 << 2)
+
+/* GRF_DDRC0_CON0 */
+#define GRF_DDR_16BIT_EN	(((0x1 << 3) << 16) | (0x1 << 3))
+#define GRF_DDR_32BIT_EN	(((0x1 << 3) << 16) | (0x0 << 3))
+#define GRF_MOBILE_DDR_EN	(((0x1 << 4) << 16) | (0x1 << 4))
+#define GRF_MOBILE_DDR_DISB	(((0x1 << 4) << 16) | (0x0 << 4))
+#define GRF_DDR3_EN		(((0x1 << 2) << 16) | (0x1 << 2))
+#define GRF_LPDDR2_3_EN		(((0x1 << 2) << 16) | (0x0 << 2))
+
+/* PMUGRF_SOC_CON0 */
+#define ddrphy_bufferen_io_en(n)	((0x1 << (9 + 16)) | (n << 9))
+#define ddrphy_bufferen_core_en(n)	((0x1 << (8 + 16)) | (n << 8))
+
+struct PCTRL_TIMING_TAG {
+	uint32_t ddrfreq;
+	uint32_t TOGCNT1U;
+	uint32_t TINIT;
+	uint32_t TRSTH;
+	uint32_t TOGCNT100N;
+	uint32_t TREFI;
+	uint32_t TMRD;
+	uint32_t TRFC;
+	uint32_t TRP;
+	uint32_t TRTW;
+	uint32_t TAL;
+	uint32_t TCL;
+	uint32_t TCWL;
+	uint32_t TRAS;
+	uint32_t TRC;
+	uint32_t TRCD;
+	uint32_t TRRD;
+	uint32_t TRTP;
+	uint32_t TWR;
+	uint32_t TWTR;
+	uint32_t TEXSR;
+	uint32_t TXP;
+	uint32_t TXPDLL;
+	uint32_t TZQCS;
+	uint32_t TZQCSI;
+	uint32_t TDQS;
+	uint32_t TCKSRE;
+	uint32_t TCKSRX;
+	uint32_t TCKE;
+	uint32_t TMOD;
+	uint32_t TRSTL;
+	uint32_t TZQCL;
+	uint32_t TMRR;
+	uint32_t TCKESR;
+	uint32_t TDPD;
+	uint32_t TREFI_MEM_DDR3;
+};
+
+struct MSCH_SAVE_REG_TAG {
+	uint32_t ddrconf;
+	uint32_t ddrtiming;
+	uint32_t ddrmode;
+	uint32_t readlatency;
+	uint32_t activate;
+	uint32_t devtodev;
+};
+
+/* ddr suspend need save reg */
+struct PCTL_SAVE_REG_TAG {
+	uint32_t SCFG;
+	uint32_t CMDTSTATEN;
+	uint32_t MCFG1;
+	uint32_t MCFG;
+	uint32_t PPCFG;
+	struct PCTRL_TIMING_TAG pctl_timing;
+	/* DFI Control Registers */
+	uint32_t DFITCTRLDELAY;
+	uint32_t DFIODTCFG;
+	uint32_t DFIODTCFG1;
+	uint32_t DFIODTRANKMAP;
+	/* DFI Write Data Registers */
+	uint32_t DFITPHYWRDATA;
+	uint32_t DFITPHYWRLAT;
+	uint32_t DFITPHYWRDATALAT;
+	/* DFI Read Data Registers */
+	uint32_t DFITRDDATAEN;
+	uint32_t DFITPHYRDLAT;
+	/* DFI Update Registers */
+	uint32_t DFITPHYUPDTYPE0;
+	uint32_t DFITPHYUPDTYPE1;
+	uint32_t DFITPHYUPDTYPE2;
+	uint32_t DFITPHYUPDTYPE3;
+	uint32_t DFITCTRLUPDMIN;
+	uint32_t DFITCTRLUPDMAX;
+	uint32_t DFITCTRLUPDDLY;
+	uint32_t DFIUPDCFG;
+	uint32_t DFITREFMSKI;
+	uint32_t DFITCTRLUPDI;
+	/* DFI Status Registers */
+	uint32_t DFISTCFG0;
+	uint32_t DFISTCFG1;
+	uint32_t DFITDRAMCLKEN;
+	uint32_t DFITDRAMCLKDIS;
+	uint32_t DFISTCFG2;
+	/* DFI Low Power Register */
+	uint32_t DFILPCFG0;
+};
+
+struct DDRPHY_SAVE_REG_TAG {
+	uint32_t PHY_REG0;
+	uint32_t PHY_REG1;
+	uint32_t PHY_REGB;
+	uint32_t PHY_REGC;
+	uint32_t PHY_REG11;
+	uint32_t PHY_REG13;
+	uint32_t PHY_REG14;
+	uint32_t PHY_REG16;
+	uint32_t PHY_REG20;
+	uint32_t PHY_REG21;
+	uint32_t PHY_REG26;
+	uint32_t PHY_REG27;
+	uint32_t PHY_REG28;
+	uint32_t PHY_REG30;
+	uint32_t PHY_REG31;
+	uint32_t PHY_REG36;
+	uint32_t PHY_REG37;
+	uint32_t PHY_REG38;
+	uint32_t PHY_REG40;
+	uint32_t PHY_REG41;
+	uint32_t PHY_REG46;
+	uint32_t PHY_REG47;
+	uint32_t PHY_REG48;
+	uint32_t PHY_REG50;
+	uint32_t PHY_REG51;
+	uint32_t PHY_REG56;
+	uint32_t PHY_REG57;
+	uint32_t PHY_REG58;
+	uint32_t PHY_REGDLL;
+	uint32_t PHY_REGEC;
+	uint32_t PHY_REGED;
+	uint32_t PHY_REGEE;
+	uint32_t PHY_REGEF;
+	uint32_t PHY_REGFB;
+	uint32_t PHY_REGFC;
+	uint32_t PHY_REGFD;
+	uint32_t PHY_REGFE;
+};
+
+struct BACKUP_REG_TAG {
+	uint32_t tag;
+	uint32_t pctladdr;
+	struct PCTL_SAVE_REG_TAG pctl;
+	uint32_t phyaddr;
+	struct DDRPHY_SAVE_REG_TAG phy;
+	uint32_t nocaddr;
+	struct MSCH_SAVE_REG_TAG noc;
+	uint32_t pllselect;
+	uint32_t phypllockaddr;
+	uint32_t phyplllockmask;
+	uint32_t phyplllockval;
+	uint32_t pllpdstat;
+	uint32_t dpllmodeaddr;
+	uint32_t dpllslowmode;
+	uint32_t dpllnormalmode;
+	uint32_t dpllresetaddr;
+	uint32_t dpllreset;
+	uint32_t dplldereset;
+	uint32_t dpllconaddr;
+	uint32_t dpllcon[4];
+	uint32_t dplllockaddr;
+	uint32_t dplllockmask;
+	uint32_t dplllockval;
+	uint32_t ddrpllsrcdivaddr;
+	uint32_t ddrpllsrcdiv;
+	uint32_t retendisaddr;
+	uint32_t retendisval;
+	uint32_t grfregaddr;
+	uint32_t grfddrcreg;
+	uint32_t crupctlphysoftrstaddr;
+	uint32_t cruresetpctlphy;
+	uint32_t cruderesetphy;
+	uint32_t cruderesetpctlphy;
+	uint32_t physoftrstaddr;
+	uint32_t endtag;
+};
+
+static uint32_t ddr_get_phy_pll_freq(void)
+{
+	uint32_t ret = 0;
+	uint32_t fb_div, pre_div;
+
+	fb_div = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGEC);
+	fb_div |= (mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGED) & 0x1) << 8;
+
+	pre_div = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGEE) & 0xff;
+	ret = 2 * 24 * fb_div / (4 * pre_div);
+
+	return ret;
+}
+
+static void ddr_copy(uint32_t *pdest, uint32_t *psrc, uint32_t words)
+{
+	uint32_t i;
+
+	for (i = 0; i < words; i++)
+		pdest[i] = psrc[i];
+}
+
+static void ddr_get_dpll_cfg(uint32_t *p)
+{
+	uint32_t nmhz, NO, NF, NR;
+
+	nmhz = ddr_get_phy_pll_freq();
+	if (nmhz <= 150)
+		NO = 6;
+	else if (nmhz <= 250)
+		NO = 4;
+	else if (nmhz <= 500)
+		NO = 2;
+	else
+		NO = 1;
+
+	NR = 1;
+	NF = 2 * nmhz * NR * NO / 24;
+
+	p[0] = SET_NR(NR) | SET_NO(NO);
+	p[1] = SET_NF(NF);
+	p[2] = SET_NB(NF / 2);
+}
+
+void ddr_reg_save(uint32_t pllpdstat, uint64_t base_addr)
+{
+	struct BACKUP_REG_TAG *p_ddr_reg = (struct BACKUP_REG_TAG *)base_addr;
+	struct PCTL_SAVE_REG_TAG *pctl_tim = &p_ddr_reg->pctl;
+
+	p_ddr_reg->tag = 0x56313031;
+	p_ddr_reg->pctladdr = DDR_PCTL_BASE;
+	p_ddr_reg->phyaddr = DDR_PHY_BASE;
+	p_ddr_reg->nocaddr = SERVICE_BUS_BASE;
+
+	/* PCTLR */
+	ddr_copy((uint32_t *)&pctl_tim->pctl_timing.TOGCNT1U,
+		 (uint32_t *)(DDR_PCTL_BASE + DDR_PCTL_TOGCNT1U), 35);
+	pctl_tim->pctl_timing.TREFI |= DDR_UPD_REF_ENABLE;
+	pctl_tim->SCFG = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_SCFG);
+	pctl_tim->CMDTSTATEN = mmio_read_32(DDR_PCTL_BASE +
+					    DDR_PCTL_CMDTSTATEN);
+	pctl_tim->MCFG1 = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_MCFG1);
+	pctl_tim->MCFG = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_MCFG);
+	pctl_tim->PPCFG = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_PPCFG);
+	pctl_tim->pctl_timing.ddrfreq = mmio_read_32(DDR_PCTL_BASE +
+						     DDR_PCTL_TOGCNT1U * 2);
+	pctl_tim->DFITCTRLDELAY = mmio_read_32(DDR_PCTL_BASE +
+					       DDR_PCTL_DFITCTRLDELAY);
+	pctl_tim->DFIODTCFG = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_DFIODTCFG);
+	pctl_tim->DFIODTCFG1 = mmio_read_32(DDR_PCTL_BASE +
+					    DDR_PCTL_DFIODTCFG1);
+	pctl_tim->DFIODTRANKMAP = mmio_read_32(DDR_PCTL_BASE +
+					       DDR_PCTL_DFIODTRANKMAP);
+	pctl_tim->DFITPHYWRDATA = mmio_read_32(DDR_PCTL_BASE +
+					       DDR_PCTL_DFITPHYWRDATA);
+	pctl_tim->DFITPHYWRLAT = mmio_read_32(DDR_PCTL_BASE +
+					      DDR_PCTL_DFITPHYWRLAT);
+	pctl_tim->DFITPHYWRDATALAT = mmio_read_32(DDR_PCTL_BASE +
+						  DDR_PCTL_DFITPHYWRDATALAT);
+	pctl_tim->DFITRDDATAEN = mmio_read_32(DDR_PCTL_BASE +
+					      DDR_PCTL_DFITRDDATAEN);
+	pctl_tim->DFITPHYRDLAT = mmio_read_32(DDR_PCTL_BASE +
+					      DDR_PCTL_DFITPHYRDLAT);
+	pctl_tim->DFITPHYUPDTYPE0 = mmio_read_32(DDR_PCTL_BASE +
+						 DDR_PCTL_DFITPHYUPDTYPE0);
+	pctl_tim->DFITPHYUPDTYPE1 = mmio_read_32(DDR_PCTL_BASE +
+						 DDR_PCTL_DFITPHYUPDTYPE1);
+	pctl_tim->DFITPHYUPDTYPE2 = mmio_read_32(DDR_PCTL_BASE +
+						 DDR_PCTL_DFITPHYUPDTYPE2);
+	pctl_tim->DFITPHYUPDTYPE3 = mmio_read_32(DDR_PCTL_BASE +
+						 DDR_PCTL_DFITPHYUPDTYPE3);
+	pctl_tim->DFITCTRLUPDMIN = mmio_read_32(DDR_PCTL_BASE +
+						DDR_PCTL_DFITCTRLUPDMIN);
+	pctl_tim->DFITCTRLUPDMAX = mmio_read_32(DDR_PCTL_BASE +
+						DDR_PCTL_DFITCTRLUPDMAX);
+	pctl_tim->DFITCTRLUPDDLY = mmio_read_32(DDR_PCTL_BASE +
+						DDR_PCTL_DFITCTRLUPDDLY);
+
+	pctl_tim->DFIUPDCFG = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_DFIUPDCFG);
+	pctl_tim->DFITREFMSKI = mmio_read_32(DDR_PCTL_BASE +
+					     DDR_PCTL_DFITREFMSKI);
+	pctl_tim->DFITCTRLUPDI = mmio_read_32(DDR_PCTL_BASE +
+					      DDR_PCTL_DFITCTRLUPDI);
+	pctl_tim->DFISTCFG0 = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_DFISTCFG0);
+	pctl_tim->DFISTCFG1 = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_DFISTCFG1);
+	pctl_tim->DFITDRAMCLKEN = mmio_read_32(DDR_PCTL_BASE +
+					       DDR_PCTL_DFITDRAMCLKEN);
+	pctl_tim->DFITDRAMCLKDIS = mmio_read_32(DDR_PCTL_BASE +
+						DDR_PCTL_DFITDRAMCLKDIS);
+	pctl_tim->DFISTCFG2 = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_DFISTCFG2);
+	pctl_tim->DFILPCFG0 = mmio_read_32(DDR_PCTL_BASE + DDR_PCTL_DFILPCFG0);
+
+	/* PHY */
+	p_ddr_reg->phy.PHY_REG0 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG0);
+	p_ddr_reg->phy.PHY_REG1 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG1);
+	p_ddr_reg->phy.PHY_REGB = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGB);
+	p_ddr_reg->phy.PHY_REGC = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGC);
+	p_ddr_reg->phy.PHY_REG11 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG11);
+	p_ddr_reg->phy.PHY_REG13 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG13);
+	p_ddr_reg->phy.PHY_REG14 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG14);
+	p_ddr_reg->phy.PHY_REG16 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG16);
+	p_ddr_reg->phy.PHY_REG20 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG20);
+	p_ddr_reg->phy.PHY_REG21 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG21);
+	p_ddr_reg->phy.PHY_REG26 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG26);
+	p_ddr_reg->phy.PHY_REG27 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG27);
+	p_ddr_reg->phy.PHY_REG28 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG28);
+	p_ddr_reg->phy.PHY_REG30 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG30);
+	p_ddr_reg->phy.PHY_REG31 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG31);
+	p_ddr_reg->phy.PHY_REG36 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG36);
+	p_ddr_reg->phy.PHY_REG37 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG37);
+	p_ddr_reg->phy.PHY_REG38 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG38);
+	p_ddr_reg->phy.PHY_REG40 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG40);
+	p_ddr_reg->phy.PHY_REG41 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG41);
+	p_ddr_reg->phy.PHY_REG46 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG46);
+	p_ddr_reg->phy.PHY_REG47 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG47);
+	p_ddr_reg->phy.PHY_REG48 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG48);
+	p_ddr_reg->phy.PHY_REG50 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG50);
+	p_ddr_reg->phy.PHY_REG51 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG51);
+	p_ddr_reg->phy.PHY_REG56 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG56);
+	p_ddr_reg->phy.PHY_REG57 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG57);
+	p_ddr_reg->phy.PHY_REG58 = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG58);
+	p_ddr_reg->phy.PHY_REGDLL = mmio_read_32(DDR_PHY_BASE +
+						 DDR_PHY_REGDLL);
+	p_ddr_reg->phy.PHY_REGEC = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGEC);
+	p_ddr_reg->phy.PHY_REGED = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGED);
+	p_ddr_reg->phy.PHY_REGEE = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGEE);
+	p_ddr_reg->phy.PHY_REGEF = 0;
+
+	if (mmio_read_32(DDR_PHY_BASE + DDR_PHY_REG2) & 0x2) {
+		p_ddr_reg->phy.PHY_REGFB = mmio_read_32(DDR_PHY_BASE +
+							DDR_PHY_REG2C);
+		p_ddr_reg->phy.PHY_REGFC = mmio_read_32(DDR_PHY_BASE +
+							DDR_PHY_REG3C);
+		p_ddr_reg->phy.PHY_REGFD = mmio_read_32(DDR_PHY_BASE +
+							DDR_PHY_REG4C);
+		p_ddr_reg->phy.PHY_REGFE = mmio_read_32(DDR_PHY_BASE +
+							DDR_PHY_REG5C);
+	} else {
+		p_ddr_reg->phy.PHY_REGFB = mmio_read_32(DDR_PHY_BASE +
+							DDR_PHY_REGFB);
+		p_ddr_reg->phy.PHY_REGFC = mmio_read_32(DDR_PHY_BASE +
+							DDR_PHY_REGFC);
+		p_ddr_reg->phy.PHY_REGFD = mmio_read_32(DDR_PHY_BASE +
+							DDR_PHY_REGFD);
+		p_ddr_reg->phy.PHY_REGFE = mmio_read_32(DDR_PHY_BASE +
+							DDR_PHY_REGFE);
+	}
+
+	/* NOC */
+	p_ddr_reg->noc.ddrconf = mmio_read_32(SERVICE_BUS_BASE + MSCH_DDRCONF);
+	p_ddr_reg->noc.ddrtiming = mmio_read_32(SERVICE_BUS_BASE +
+						MSCH_DDRTIMING);
+	p_ddr_reg->noc.ddrmode = mmio_read_32(SERVICE_BUS_BASE + MSCH_DDRMODE);
+	p_ddr_reg->noc.readlatency = mmio_read_32(SERVICE_BUS_BASE +
+						  MSCH_READLATENCY);
+	p_ddr_reg->noc.activate = mmio_read_32(SERVICE_BUS_BASE +
+					       MSCH_ACTIVATE);
+	p_ddr_reg->noc.devtodev = mmio_read_32(SERVICE_BUS_BASE +
+					       MSCH_DEVTODEV);
+
+	p_ddr_reg->pllselect = mmio_read_32(DDR_PHY_BASE + DDR_PHY_REGEE) * 0x1;
+	p_ddr_reg->phypllockaddr = GRF_BASE + GRF_SOC_STATUS0;
+	p_ddr_reg->phyplllockmask = GRF_DDRPHY_LOCK;
+	p_ddr_reg->phyplllockval = 0;
+
+	/* PLLPD */
+	p_ddr_reg->pllpdstat = pllpdstat;
+	/* DPLL */
+	p_ddr_reg->dpllmodeaddr = CRU_BASE + PLL_CONS(DPLL_ID, 3);
+	/* slow mode and power on */
+	p_ddr_reg->dpllslowmode = DPLL_WORK_SLOW_MODE | DPLL_POWER_DOWN;
+	p_ddr_reg->dpllnormalmode = DPLL_WORK_NORMAL_MODE;
+	p_ddr_reg->dpllresetaddr = CRU_BASE + PLL_CONS(DPLL_ID, 3);
+	p_ddr_reg->dpllreset = DPLL_RESET_CONTROL_NORMAL;
+	p_ddr_reg->dplldereset = DPLL_RESET_CONTROL_RESET;
+	p_ddr_reg->dpllconaddr = CRU_BASE + PLL_CONS(DPLL_ID, 0);
+
+	if (p_ddr_reg->pllselect == 0) {
+		p_ddr_reg->dpllcon[0] = (mmio_read_32(CRU_BASE +
+						      PLL_CONS(DPLL_ID, 0))
+							& 0xffff) |
+					(0xFFFF << 16);
+		p_ddr_reg->dpllcon[1] = (mmio_read_32(CRU_BASE +
+						      PLL_CONS(DPLL_ID, 1))
+							& 0xffff);
+		p_ddr_reg->dpllcon[2] = (mmio_read_32(CRU_BASE +
+						      PLL_CONS(DPLL_ID, 2))
+							& 0xffff);
+		p_ddr_reg->dpllcon[3] = (mmio_read_32(CRU_BASE +
+						      PLL_CONS(DPLL_ID, 3))
+							& 0xffff) |
+					(0xFFFF << 16);
+	} else {
+		ddr_get_dpll_cfg(&p_ddr_reg->dpllcon[0]);
+	}
+
+	p_ddr_reg->pllselect = 0;
+	p_ddr_reg->dplllockaddr = CRU_BASE + PLL_CONS(DPLL_ID, 1);
+	p_ddr_reg->dplllockmask = DPLL_STATUS_LOCK;
+	p_ddr_reg->dplllockval = DPLL_STATUS_LOCK;
+
+	/* SET_DDR_PLL_SRC */
+	p_ddr_reg->ddrpllsrcdivaddr = CRU_BASE + CRU_CLKSELS_CON(13);
+	p_ddr_reg->ddrpllsrcdiv = (mmio_read_32(CRU_BASE + CRU_CLKSELS_CON(13))
+					& DDR_PLL_SRC_MASK)
+					| (DDR_PLL_SRC_MASK << 16);
+	p_ddr_reg->retendisaddr = PMU_BASE + PMU_PWRMD_COM;
+	p_ddr_reg->retendisval = PD_PERI_PWRDN_ENABLE;
+	p_ddr_reg->grfregaddr = GRF_BASE + GRF_DDRC0_CON0;
+	p_ddr_reg->grfddrcreg = (mmio_read_32(GRF_BASE + GRF_DDRC0_CON0) &
+					      DDR_PLL_SRC_MASK) |
+				 (DDR_PLL_SRC_MASK << 16);
+
+	/* pctl phy soft reset */
+	p_ddr_reg->crupctlphysoftrstaddr = CRU_BASE + CRU_SOFTRSTS_CON(10);
+	p_ddr_reg->cruresetpctlphy = DDRCTRL0_PSRSTN_REQ(1) |
+				     DDRCTRL0_SRSTN_REQ(1) |
+				     DDRPHY0_PSRSTN_REQ(1) |
+				     DDRPHY0_SRSTN_REQ(1);
+	p_ddr_reg->cruderesetphy = DDRCTRL0_PSRSTN_REQ(1) |
+				   DDRCTRL0_SRSTN_REQ(1) |
+				   DDRPHY0_PSRSTN_REQ(0) |
+				   DDRPHY0_SRSTN_REQ(0);
+
+	p_ddr_reg->cruderesetpctlphy = DDRCTRL0_PSRSTN_REQ(0) |
+				       DDRCTRL0_SRSTN_REQ(0) |
+				       DDRPHY0_PSRSTN_REQ(0) |
+				       DDRPHY0_SRSTN_REQ(0);
+
+	p_ddr_reg->physoftrstaddr = DDR_PHY_BASE + DDR_PHY_REG0;
+
+	p_ddr_reg->endtag = 0xFFFFFFFF;
+}
+
+/*
+ * "rk3368_ddr_reg_resume_V1.05.bin" is an executable bin which is generated
+ * by ARM DS5 for resuming ddr controller. If the soc wakes up from system
+ * suspend, ddr needs to be resumed and the resuming code needs to be run in
+ * sram. But there is not a way to pointing the resuming code to the PMUSRAM
+ * when linking .o files of bl31, so we use the
+ * "rk3368_ddr_reg_resume_V1.05.bin" whose code is position-independent and
+ * it can be loaded anywhere and run.
+ */
+static __aligned(4) unsigned int ddr_reg_resume[] = {
+	#include "rk3368_ddr_reg_resume_V1.05.bin"
+};
+
+uint32_t ddr_get_resume_code_size(void)
+{
+	return sizeof(ddr_reg_resume);
+}
+
+uint32_t ddr_get_resume_data_size(void)
+{
+	return sizeof(struct BACKUP_REG_TAG);
+}
+
+uint32_t *ddr_get_resume_code_base(void)
+{
+	return (unsigned int *)ddr_reg_resume;
+}
diff --git a/plat/rockchip/rk3368/drivers/ddr/ddr_rk3368.h b/plat/rockchip/rk3368/drivers/ddr/ddr_rk3368.h
new file mode 100644
index 0000000..057d961
--- /dev/null
+++ b/plat/rockchip/rk3368/drivers/ddr/ddr_rk3368.h
@@ -0,0 +1,247 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __DDR_RK3368_H__
+#define __DDR_RK3368_H__
+
+#define DDR_PCTL_SCFG		0x0
+#define DDR_PCTL_SCTL		0x4
+#define DDR_PCTL_STAT		0x8
+#define DDR_PCTL_INTRSTAT	0xc
+
+#define DDR_PCTL_MCMD		0x40
+#define DDR_PCTL_POWCTL		0x44
+#define DDR_PCTL_POWSTAT	0x48
+#define DDR_PCTL_CMDTSTAT	0x4c
+#define DDR_PCTL_CMDTSTATEN	0x50
+#define DDR_PCTL_MRRCFG0	0x60
+#define DDR_PCTL_MRRSTAT0	0x64
+#define DDR_PCTL_MRRSTAT1	0x68
+#define DDR_PCTL_MCFG1		0x7c
+#define DDR_PCTL_MCFG		0x80
+#define DDR_PCTL_PPCFG		0x84
+#define DDR_PCTL_MSTAT		0x88
+#define DDR_PCTL_LPDDR2ZQCFG	0x8c
+#define DDR_PCTL_DTUPDES		0x94
+#define DDR_PCTL_DTUNA			0x98
+#define DDR_PCTL_DTUNE			0x9c
+#define DDR_PCTL_DTUPRD0		0xa0
+#define DDR_PCTL_DTUPRD1		0xa4
+#define DDR_PCTL_DTUPRD2		0xa8
+#define DDR_PCTL_DTUPRD3		0xac
+#define DDR_PCTL_DTUAWDT		0xb0
+#define DDR_PCTL_TOGCNT1U		0xc0
+#define DDR_PCTL_TINIT			0xc4
+#define DDR_PCTL_TRSTH			0xc8
+#define DDR_PCTL_TOGCNT100N		0xcc
+#define DDR_PCTL_TREFI			0xd0
+#define DDR_PCTL_TMRD			0xd4
+#define DDR_PCTL_TRFC			0xd8
+#define DDR_PCTL_TRP			0xdc
+#define DDR_PCTL_TRTW			0xe0
+#define DDR_PCTL_TAL			0xe4
+#define DDR_PCTL_TCL			0xe8
+#define DDR_PCTL_TCWL			0xec
+#define DDR_PCTL_TRAS			0xf0
+#define DDR_PCTL_TRC			0xf4
+#define DDR_PCTL_TRCD			0xf8
+#define DDR_PCTL_TRRD			0xfc
+#define DDR_PCTL_TRTP			0x100
+#define DDR_PCTL_TWR			0x104
+#define DDR_PCTL_TWTR			0x108
+#define DDR_PCTL_TEXSR			0x10c
+#define DDR_PCTL_TXP			0x110
+#define DDR_PCTL_TXPDLL			0x114
+#define DDR_PCTL_TZQCS			0x118
+#define DDR_PCTL_TZQCSI			0x11c
+#define DDR_PCTL_TDQS			0x120
+#define DDR_PCTL_TCKSRE			0x124
+#define DDR_PCTL_TCKSRX			0x128
+#define DDR_PCTL_TCKE			0x12c
+#define DDR_PCTL_TMOD			0x130
+#define DDR_PCTL_TRSTL			0x134
+#define DDR_PCTL_TZQCL			0x138
+#define DDR_PCTL_TMRR			0x13c
+#define DDR_PCTL_TCKESR			0x140
+#define DDR_PCTL_TDPD			0x144
+#define DDR_PCTL_TREFI_MEM_DDR3	0x148
+#define DDR_PCTL_ECCCFG			0x180
+#define DDR_PCTL_ECCTST			0x184
+#define DDR_PCTL_ECCCLR			0x188
+#define DDR_PCTL_ECCLOG			0x18c
+#define DDR_PCTL_DTUWACTL		0x200
+#define DDR_PCTL_DTURACTL		0x204
+#define DDR_PCTL_DTUCFG			0x208
+#define DDR_PCTL_DTUECTL		0x20c
+#define DDR_PCTL_DTUWD0			0x210
+#define DDR_PCTL_DTUWD1			0x214
+#define DDR_PCTL_DTUWD2			0x218
+#define DDR_PCTL_DTUWD3			0x21c
+#define DDR_PCTL_DTUWDM			0x220
+#define DDR_PCTL_DTURD0			0x224
+#define DDR_PCTL_DTURD1			0x228
+#define DDR_PCTL_DTURD2			0x22c
+#define DDR_PCTL_DTURD3			0x230
+#define DDR_PCTL_DTULFSRWD		0x234
+#define DDR_PCTL_DTULFSRRD		0x238
+#define DDR_PCTL_DTUEAF			0x23c
+#define DDR_PCTL_DFITCTRLDELAY	0x240
+#define DDR_PCTL_DFIODTCFG		0x244
+#define DDR_PCTL_DFIODTCFG1		0x248
+#define DDR_PCTL_DFIODTRANKMAP		0x24c
+#define DDR_PCTL_DFITPHYWRDATA		0x250
+#define DDR_PCTL_DFITPHYWRLAT		0x254
+#define DDR_PCTL_DFITPHYWRDATALAT	0x258
+#define DDR_PCTL_DFITRDDATAEN		0x260
+#define DDR_PCTL_DFITPHYRDLAT		0x264
+#define DDR_PCTL_DFITPHYUPDTYPE0	0x270
+#define DDR_PCTL_DFITPHYUPDTYPE1	0x274
+#define DDR_PCTL_DFITPHYUPDTYPE2	0x278
+#define DDR_PCTL_DFITPHYUPDTYPE3	0x27c
+#define DDR_PCTL_DFITCTRLUPDMIN		0x280
+#define DDR_PCTL_DFITCTRLUPDMAX		0x284
+#define DDR_PCTL_DFITCTRLUPDDLY		0x288
+#define DDR_PCTL_DFIUPDCFG			0x290
+#define DDR_PCTL_DFITREFMSKI		0x294
+#define DDR_PCTL_DFITCTRLUPDI		0x298
+#define DDR_PCTL_DFITRCFG0			0x2ac
+#define DDR_PCTL_DFITRSTAT0			0x2b0
+#define DDR_PCTL_DFITRWRLVLEN		0x2b4
+#define DDR_PCTL_DFITRRDLVLEN		0x2b8
+#define DDR_PCTL_DFITRRDLVLGATEEN	0x2bc
+#define DDR_PCTL_DFISTSTAT0			0x2c0
+#define DDR_PCTL_DFISTCFG0			0x2c4
+#define DDR_PCTL_DFISTCFG1			0x2c8
+#define DDR_PCTL_DFITDRAMCLKEN		0x2d0
+#define DDR_PCTL_DFITDRAMCLKDIS		0x2d4
+#define DDR_PCTL_DFISTCFG2			0x2d8
+#define DDR_PCTL_DFISTPARCLR		0x2dc
+#define DDR_PCTL_DFISTPARLOG		0x2e0
+#define DDR_PCTL_DFILPCFG0			0x2f0
+#define DDR_PCTL_DFITRWRLVLRESP0	0x300
+#define DDR_PCTL_DFITRWRLVLRESP1	0x304
+#define DDR_PCTL_DFITRWRLVLRESP2	0x308
+#define DDR_PCTL_DFITRRDLVLRESP0	0x30c
+#define DDR_PCTL_DFITRRDLVLRESP1	0x310
+#define DDR_PCTL_DFITRRDLVLRESP2	0x314
+#define DDR_PCTL_DFITRWRLVLDELAY0	0x318
+#define DDR_PCTL_DFITRWRLVLDELAY1	0x31c
+#define DDR_PCTL_DFITRWRLVLDELAY2	0x320
+#define DDR_PCTL_DFITRRDLVLDELAY0	0x324
+#define DDR_PCTL_DFITRRDLVLDELAY1	0x328
+#define DDR_PCTL_DFITRRDLVLDELAY2	0x32c
+#define DDR_PCTL_DFITRRDLVLGATEDELAY0	0x330
+#define DDR_PCTL_DFITRRDLVLGATEDELAY1	0x334
+#define DDR_PCTL_DFITRRDLVLGATEDELAY2	0x338
+#define DDR_PCTL_DFITRCMD			0x33c
+#define DDR_PCTL_IPVR				0x3f8
+#define DDR_PCTL_IPTR				0x3fc
+
+/* DDR PHY REG */
+#define DDR_PHY_REG0		0x0
+#define DDR_PHY_REG1		0x4
+#define DDR_PHY_REG2		0x8
+#define DDR_PHY_REG3		0xc
+#define DDR_PHY_REG4		0x10
+#define DDR_PHY_REG5		0x14
+#define DDR_PHY_REG6		0x18
+#define DDR_PHY_REGB		0x2c
+#define DDR_PHY_REGC		0x30
+#define DDR_PHY_REG11		0x44
+#define DDR_PHY_REG12		0x48
+#define DDR_PHY_REG13		0x4c
+#define DDR_PHY_REG14		0x50
+#define DDR_PHY_REG16		0x58
+#define DDR_PHY_REG20		0x80
+#define DDR_PHY_REG21		0x84
+#define DDR_PHY_REG26		0x98
+#define DDR_PHY_REG27		0x9c
+#define DDR_PHY_REG28		0xa0
+#define DDR_PHY_REG2C		0xb0
+#define DDR_PHY_REG30		0xc0
+#define DDR_PHY_REG31		0xc4
+#define DDR_PHY_REG36		0xd8
+#define DDR_PHY_REG37		0xdc
+#define DDR_PHY_REG38		0xe0
+#define DDR_PHY_REG3C		0xf0
+#define DDR_PHY_REG40		0x100
+#define DDR_PHY_REG41		0x104
+#define DDR_PHY_REG46		0x118
+#define DDR_PHY_REG47		0x11c
+#define DDR_PHY_REG48		0x120
+#define DDR_PHY_REG4C		0x130
+#define DDR_PHY_REG50		0x140
+#define DDR_PHY_REG51		0x144
+#define DDR_PHY_REG56		0x158
+#define DDR_PHY_REG57		0x15c
+#define DDR_PHY_REG58		0x160
+#define DDR_PHY_REG5C		0x170
+#define DDR_PHY_REGDLL		0x290
+#define DDR_PHY_REGEC		0x3b0
+#define DDR_PHY_REGED		0x3b4
+#define DDR_PHY_REGEE		0x3b8
+#define DDR_PHY_REGEF		0x3bc
+#define DDR_PHY_REGF0		0x3c0
+#define DDR_PHY_REGF1		0x3c4
+#define DDR_PHY_REGF2		0x3c8
+#define DDR_PHY_REGFA		0x3e8
+#define DDR_PHY_REGFB		0x3ec
+#define DDR_PHY_REGFC		0x3f0
+#define DDR_PHY_REGFD		0x3f4
+#define DDR_PHY_REGFE		0x3f8
+#define DDR_PHY_REGFF		0x3fc
+
+/* MSCH REG define */
+#define MSCH_COREID			0x0
+#define MSCH_DDRCONF		0x8
+#define MSCH_DDRTIMING		0xc
+#define MSCH_DDRMODE		0x10
+#define MSCH_READLATENCY	0x14
+#define MSCH_ACTIVATE		0x38
+#define MSCH_DEVTODEV		0x3c
+
+#define SET_NR(n)      ((0x3f << (8 + 16)) | ((n - 1) << 8))
+#define SET_NO(n)      ((0xf << (0 + 16)) | ((n - 1) << 0))
+#define SET_NF(n)      ((n - 1) & 0x1fff)
+#define SET_NB(n)      ((n - 1) & 0xfff)
+#define PLLMODE(n)     ((0x3 << (8 + 16)) | (n << 8))
+
+/* GRF REG define */
+#define GRF_SOC_STATUS0		0x480
+#define GRF_DDRPHY_LOCK		(0x1 << 15)
+#define GRF_DDRC0_CON0		0x600
+
+/* CRU softreset ddr pctl, phy */
+#define DDRMSCH0_SRSTN_REQ(n)  (((0x1 << 10) << 16) | (n << 10))
+#define DDRCTRL0_PSRSTN_REQ(n) (((0x1 << 3) << 16) | (n << 3))
+#define DDRCTRL0_SRSTN_REQ(n)  (((0x1 << 2) << 16) | (n << 2))
+#define DDRPHY0_PSRSTN_REQ(n)  (((0x1 << 1) << 16) | (n << 1))
+#define DDRPHY0_SRSTN_REQ(n)   (((0x1 << 0) << 16) | (n << 0))
+
+/* CRU_DPLL_CON2 */
+#define DPLL_STATUS_LOCK		(1 << 31)
+
+/* CRU_DPLL_CON3 */
+#define DPLL_POWER_DOWN			((0x1 << (1 + 16)) | (0 << 1))
+#define DPLL_WORK_NORMAL_MODE		((0x3 << (8 + 16)) | (0 << 8))
+#define DPLL_WORK_SLOW_MODE		((0x3 << (8 + 16)) | (1 << 8))
+#define DPLL_RESET_CONTROL_NORMAL	((0x1 << (5 + 16)) | (0x0 << 5))
+#define DPLL_RESET_CONTROL_RESET	((0x1 << (5 + 16)) | (0x1 << 5))
+
+/* PMU_PWRDN_CON */
+#define PD_PERI_PWRDN_ENABLE		(1 << 13)
+
+#define DDR_PLL_SRC_MASK		0x13
+
+/* DDR_PCTL_TREFI */
+#define DDR_UPD_REF_ENABLE		(0X1 << 31)
+
+uint32_t ddr_get_resume_code_size(void);
+uint32_t ddr_get_resume_data_size(void);
+uint32_t *ddr_get_resume_code_base(void);
+void ddr_reg_save(uint32_t pllpdstat, uint64_t base_addr);
+
+#endif
diff --git a/plat/rockchip/rk3368/drivers/ddr/rk3368_ddr_reg_resume_V1.05.bin b/plat/rockchip/rk3368/drivers/ddr/rk3368_ddr_reg_resume_V1.05.bin
new file mode 100644
index 0000000..cecd694
--- /dev/null
+++ b/plat/rockchip/rk3368/drivers/ddr/rk3368_ddr_reg_resume_V1.05.bin
@@ -0,0 +1,461 @@
+	0x14000088,
+	0xd10043ff,
+	0x5283ffe1,
+	0x52824902,
+	0x1b020400,
+	0x530d7c00,
+	0xb9000fe0,
+	0xb9400fe0,
+	0x340000a0,
+	0xb9400fe0,
+	0x51000401,
+	0xb9000fe1,
+	0x35ffffa0,
+	0x910043ff,
+	0xd65f03c0,
+	0x340000e2,
+	0xb9400023,
+	0xb9000003,
+	0x91001021,
+	0x91001000,
+	0x51000442,
+	0x35ffff62,
+	0xd65f03c0,
+	0xd10043ff,
+	0xb9400801,
+	0x12000821,
+	0xb9000fe1,
+	0xb9400fe1,
+	0x7100043f,
+	0x54000320,
+	0x52800021,
+	0x52800082,
+	0xb9400fe3,
+	0x34000143,
+	0x71000c7f,
+	0x54000100,
+	0x7100147f,
+	0x54000161,
+	0xb9000402,
+	0xb9400803,
+	0x12000863,
+	0x71000c7f,
+	0x54ffffa1,
+	0xb9000401,
+	0xb9400803,
+	0x12000863,
+	0x7100047f,
+	0x54ffffa1,
+	0xb9400803,
+	0x12000863,
+	0xb9000fe3,
+	0xb9400fe3,
+	0x7100047f,
+	0x54fffd61,
+	0x910043ff,
+	0xd65f03c0,
+	0xd10043ff,
+	0xb9400801,
+	0x12000821,
+	0xb9000fe1,
+	0xb9400fe1,
+	0x7100143f,
+	0x54000400,
+	0x52800021,
+	0x52800042,
+	0x52800063,
+	0xb9400fe4,
+	0x340000c4,
+	0x7100049f,
+	0x54000120,
+	0x71000c9f,
+	0x54000180,
+	0x14000010,
+	0xb9000401,
+	0xb9400804,
+	0x12000884,
+	0x7100049f,
+	0x54ffffa1,
+	0xb9000402,
+	0xb9400804,
+	0x12000884,
+	0x71000c9f,
+	0x54ffffa1,
+	0xb9000403,
+	0xb9400804,
+	0x12000884,
+	0x7100149f,
+	0x54ffffa1,
+	0xb9400804,
+	0x12000884,
+	0xb9000fe4,
+	0xb9400fe4,
+	0x7100149f,
+	0x54fffca1,
+	0x910043ff,
+	0xd65f03c0,
+	0xd10043ff,
+	0xb9400801,
+	0x12000821,
+	0xb9000fe1,
+	0xb9400fe1,
+	0x71000c3f,
+	0x54000400,
+	0x52800021,
+	0x52800042,
+	0x52800083,
+	0xb9400fe4,
+	0x34000164,
+	0x7100049f,
+	0x540001c0,
+	0x7100149f,
+	0x54000221,
+	0xb9000403,
+	0xb9400804,
+	0x12000884,
+	0x71000c9f,
+	0x54ffffa1,
+	0x1400000b,
+	0xb9000401,
+	0xb9400804,
+	0x12000884,
+	0x7100049f,
+	0x54ffffa1,
+	0xb9000402,
+	0xb9400804,
+	0x12000884,
+	0x71000c9f,
+	0x54ffffa1,
+	0xb9400804,
+	0x12000884,
+	0xb9000fe4,
+	0xb9400fe4,
+	0x71000c9f,
+	0x54fffca1,
+	0x910043ff,
+	0xd65f03c0,
+	0xd10103ff,
+	0xa9037bfd,
+	0x9100c3fd,
+	0xa9025ff6,
+	0xa90157f4,
+	0xf90007f3,
+	0xaa0003f3,
+	0xb9400674,
+	0xb9411276,
+	0xb941c660,
+	0xb941aa75,
+	0x7100041f,
+	0x54000261,
+	0xb9418e60,
+	0x321f0000,
+	0xb903b6c0,
+	0xb9418a60,
+	0xb903b2c0,
+	0xb9419260,
+	0xb903bac0,
+	0xb9418e60,
+	0x121e7800,
+	0xb903b6c0,
+	0xb941ca60,
+	0xb941ce61,
+	0xb941d262,
+	0xb9400003,
+	0xa030023,
+	0x6b22407f,
+	0x54ffffa0,
+	0x1400003b,
+	0xb941d660,
+	0x7100041f,
+	0x54000701,
+	0xb941da60,
+	0x3100041f,
+	0x54000080,
+	0xb941de61,
+	0x53007c00,
+	0xb9000001,
+	0xb941e660,
+	0x3100041f,
+	0x54000080,
+	0xb941ea61,
+	0x53007c00,
+	0xb9000001,
+	0xb941f260,
+	0x3100041f,
+	0x54000120,
+	0xaa1f03e1,
+	0x53007c00,
+	0x9107d262,
+	0xb8616843,
+	0xb8216803,
+	0x91001021,
+	0xf100203f,
+	0x54ffff81,
+	0x52800020,
+	0x97ffff3f,
+	0xb941e660,
+	0x3100041f,
+	0x54000080,
+	0xb941ee61,
+	0x53007c00,
+	0xb9000001,
+	0x52800020,
+	0x97ffff37,
+	0xb9420660,
+	0x3100041f,
+	0x54000100,
+	0xb9420a61,
+	0xb9420e62,
+	0x53007c00,
+	0xb9400003,
+	0xa030023,
+	0x6b22407f,
+	0x54ffffa1,
+	0xb9421260,
+	0x3100041f,
+	0x54000080,
+	0xb9421661,
+	0x53007c00,
+	0xb9000001,
+	0xb941da60,
+	0x3100041f,
+	0x54000080,
+	0xb941e261,
+	0x53007c00,
+	0xb9000001,
+	0xb9419660,
+	0xb903bec0,
+	0xb9422a60,
+	0x34000400,
+	0xb9422e61,
+	0x53007c17,
+	0xb90002e1,
+	0x52800140,
+	0x97ffff18,
+	0xb9423260,
+	0xb90002e0,
+	0x52800140,
+	0x97ffff14,
+	0xb9423660,
+	0xb90002e0,
+	0x52800140,
+	0x97ffff10,
+	0xb9423a60,
+	0x34000220,
+	0x53007c17,
+	0xb94002e0,
+	0x121c7400,
+	0xb90002e0,
+	0x52800020,
+	0x97ffff08,
+	0xb94002e0,
+	0x321e0000,
+	0xb90002e0,
+	0x528000a0,
+	0x97ffff03,
+	0xb94002e0,
+	0x321d0000,
+	0xb90002e0,
+	0x52800020,
+	0x97fffefe,
+	0xb9412a60,
+	0xb9004ec0,
+	0xb9412e60,
+	0xb90052c0,
+	0xb9413e60,
+	0xb9009ac0,
+	0xb9414260,
+	0xb9009ec0,
+	0xb9415260,
+	0xb900dac0,
+	0xb9415660,
+	0xb900dec0,
+	0xb9416660,
+	0xb9011ac0,
+	0xb9416a60,
+	0xb9011ec0,
+	0xb9417a60,
+	0xb9015ac0,
+	0xb9417e60,
+	0xb9015ec0,
+	0xb9418660,
+	0xb90292c0,
+	0xb9414660,
+	0xb900a2c0,
+	0xb9415a60,
+	0xb900e2c0,
+	0xb9416e60,
+	0xb90122c0,
+	0xb9418260,
+	0xb90162c0,
+	0xb9411660,
+	0xb90002c0,
+	0xb9411a60,
+	0xb90006c0,
+	0xb9411e60,
+	0xb9002ec0,
+	0xb9412260,
+	0xb90032c0,
+	0xb9412660,
+	0xb90046c0,
+	0xb9413260,
+	0xb9005ac0,
+	0xb9413660,
+	0xb90082c0,
+	0xb9413a60,
+	0xb90086c0,
+	0xb9414a60,
+	0xb900c2c0,
+	0xb9414e60,
+	0xb900c6c0,
+	0xb9415e60,
+	0xb90102c0,
+	0xb9416260,
+	0xb90106c0,
+	0xb9417260,
+	0xb90142c0,
+	0xb9417660,
+	0xb90146c0,
+	0x52800040,
+	0xb9000ac0,
+	0xb9411261,
+	0xb9419a60,
+	0xb900b020,
+	0xb9419a60,
+	0xb900b420,
+	0xb9419e60,
+	0xb900f020,
+	0xb9419e60,
+	0xb900f420,
+	0xb941a260,
+	0xb9013020,
+	0xb941a260,
+	0xb9013420,
+	0xb941a660,
+	0xb9017020,
+	0xb941a662,
+	0xaa1f03e0,
+	0xb9017422,
+	0x91008261,
+	0xb8606822,
+	0x8b000283,
+	0xb900c062,
+	0x91001000,
+	0xf102301f,
+	0x54ffff61,
+	0xb9400a60,
+	0xb9000280,
+	0xb9400e60,
+	0xb9005280,
+	0xb9401260,
+	0xb9007e80,
+	0xb9401660,
+	0xb9008280,
+	0xb9401a60,
+	0xb9008680,
+	0xb940ae60,
+	0xb9024280,
+	0xb940b260,
+	0xb9024680,
+	0xb940b660,
+	0xb9024a80,
+	0xb940ba60,
+	0xb9024e80,
+	0xb940be60,
+	0xb9025280,
+	0xb940c260,
+	0xb9025680,
+	0xb940c660,
+	0xb9025a80,
+	0xb940ca60,
+	0xb9026280,
+	0xb940ce60,
+	0xb9026680,
+	0xb940d260,
+	0xb9027280,
+	0xb940d660,
+	0xb9027680,
+	0xb940da60,
+	0xb9027a80,
+	0xb940de60,
+	0xb9027e80,
+	0xb940e260,
+	0xb9028280,
+	0xb940e660,
+	0xb9028680,
+	0xb940ea60,
+	0xb9028a80,
+	0xb940ee60,
+	0xb9029280,
+	0xb940f260,
+	0xb9029680,
+	0xb940f660,
+	0xb9029a80,
+	0xb940fa60,
+	0xb902c680,
+	0xb940fe60,
+	0xb902ca80,
+	0xb9410260,
+	0xb902d280,
+	0xb9410660,
+	0xb902d680,
+	0xb9410a60,
+	0xb902da80,
+	0xb9410e60,
+	0xb902f280,
+	0xb9422260,
+	0x3100041f,
+	0x540000c0,
+	0xb9422661,
+	0x53007c00,
+	0xb9000001,
+	0x52800020,
+	0x97fffe65,
+	0x52800020,
+	0xb9004680,
+	0xb9404a80,
+	0x3607ffe0,
+	0xb941ae60,
+	0xb9000aa0,
+	0xb941b260,
+	0xb9000ea0,
+	0xb941b660,
+	0xb90012a0,
+	0xb941ba60,
+	0xb90016a0,
+	0xb941be60,
+	0xb9003aa0,
+	0xb941c260,
+	0xb9003ea0,
+	0xb9422260,
+	0x3100041f,
+	0x54000080,
+	0x53007c00,
+	0x320083e1,
+	0xb9000001,
+	0xaa1403e0,
+	0x97fffe84,
+	0xb9421a60,
+	0x3100041f,
+	0x54000100,
+	0x53007c00,
+	0xb9421e61,
+	0xb9400002,
+	0x2a010041,
+	0xb9000001,
+	0x52800020,
+	0x97fffe43,
+	0xaa1403e0,
+	0x97fffea0,
+	0xb9422260,
+	0x3100041f,
+	0x54000080,
+	0x53007c00,
+	0x52a00021,
+	0xb9000001,
+	0xf94007f3,
+	0xa94157f4,
+	0xa9425ff6,
+	0xa9437bfd,
+	0x910103ff,
+	0xd65f03c0,
diff --git a/plat/rockchip/rk3368/drivers/pmu/plat_pmu_macros.S b/plat/rockchip/rk3368/drivers/pmu/plat_pmu_macros.S
new file mode 100644
index 0000000..399f61c
--- /dev/null
+++ b/plat/rockchip/rk3368/drivers/pmu/plat_pmu_macros.S
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+.macro	func_rockchip_clst_warmboot
+	/* Nothing to do for rk3368 */
+.endm
+
+.macro rockchip_clst_warmboot_data
+	/* Nothing to do for rk3368 */
+.endm
diff --git a/plat/rockchip/rk3368/drivers/pmu/pmu.c b/plat/rockchip/rk3368/drivers/pmu/pmu.c
new file mode 100644
index 0000000..cb323e6
--- /dev/null
+++ b/plat/rockchip/rk3368/drivers/pmu/pmu.c
@@ -0,0 +1,370 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <ddr_rk3368.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <errno.h>
+#include <mmio.h>
+#include <plat_private.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <pmu.h>
+#include <pmu_com.h>
+#include <rk3368_def.h>
+#include <soc.h>
+
+DEFINE_BAKERY_LOCK(rockchip_pd_lock);
+
+static uint32_t cpu_warm_boot_addr;
+
+void rk3368_flash_l2_b(void)
+{
+	uint32_t wait_cnt = 0;
+
+	regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b);
+	dsb();
+
+	while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)
+		& BIT(clst_b_l2_flsh_done))) {
+		wait_cnt++;
+		if (!(wait_cnt % MAX_WAIT_CONUT))
+			WARN("%s:reg %x,wait\n", __func__,
+			     mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
+	}
+
+	regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b);
+}
+
+static inline int rk3368_pmu_bus_idle(uint32_t req, uint32_t idle)
+{
+	uint32_t mask = BIT(req);
+	uint32_t idle_mask = 0;
+	uint32_t idle_target = 0;
+	uint32_t val;
+	uint32_t wait_cnt = 0;
+
+	switch (req) {
+	case bus_ide_req_clst_l:
+		idle_mask = BIT(pmu_idle_ack_cluster_l);
+		idle_target = (idle << pmu_idle_ack_cluster_l);
+		break;
+
+	case bus_ide_req_clst_b:
+		idle_mask = BIT(pmu_idle_ack_cluster_b);
+		idle_target = (idle << pmu_idle_ack_cluster_b);
+		break;
+
+	case bus_ide_req_cxcs:
+		idle_mask = BIT(pmu_idle_ack_cxcs);
+		idle_target = ((!idle) << pmu_idle_ack_cxcs);
+		break;
+
+	case bus_ide_req_cci400:
+		idle_mask = BIT(pmu_idle_ack_cci400);
+		idle_target = ((!idle) << pmu_idle_ack_cci400);
+		break;
+
+	case bus_ide_req_gpu:
+		idle_mask = BIT(pmu_idle_ack_gpu) | BIT(pmu_idle_gpu);
+		idle_target = (idle << pmu_idle_ack_gpu) |
+			      (idle << pmu_idle_gpu);
+		break;
+
+	case bus_ide_req_core:
+		idle_mask = BIT(pmu_idle_ack_core) | BIT(pmu_idle_core);
+		idle_target = (idle << pmu_idle_ack_core) |
+			      (idle << pmu_idle_core);
+		break;
+
+	case bus_ide_req_bus:
+		idle_mask = BIT(pmu_idle_ack_bus) | BIT(pmu_idle_bus);
+		idle_target = (idle << pmu_idle_ack_bus) |
+			      (idle << pmu_idle_bus);
+		break;
+	case bus_ide_req_dma:
+		idle_mask = BIT(pmu_idle_ack_dma) | BIT(pmu_idle_dma);
+		idle_target = (idle << pmu_idle_ack_dma) |
+			      (idle << pmu_idle_dma);
+		break;
+
+	case bus_ide_req_peri:
+		idle_mask = BIT(pmu_idle_ack_peri) | BIT(pmu_idle_peri);
+		idle_target = (idle << pmu_idle_ack_peri) |
+			      (idle << pmu_idle_peri);
+		break;
+
+	case bus_ide_req_video:
+		idle_mask = BIT(pmu_idle_ack_video) | BIT(pmu_idle_video);
+		idle_target = (idle << pmu_idle_ack_video) |
+			      (idle << pmu_idle_video);
+		break;
+
+	case bus_ide_req_vio:
+		idle_mask = BIT(pmu_idle_ack_vio) | BIT(pmu_idle_vio);
+		idle_target = (pmu_idle_ack_vio) |
+			      (idle << pmu_idle_vio);
+		break;
+
+	case bus_ide_req_alive:
+		idle_mask = BIT(pmu_idle_ack_alive) | BIT(pmu_idle_alive);
+		idle_target = (idle << pmu_idle_ack_alive) |
+			      (idle << pmu_idle_alive);
+		break;
+
+	case bus_ide_req_pmu:
+		idle_mask = BIT(pmu_idle_ack_pmu) | BIT(pmu_idle_pmu);
+		idle_target = (idle << pmu_idle_ack_pmu) |
+			      (idle << pmu_idle_pmu);
+		break;
+
+	case bus_ide_req_msch:
+		idle_mask = BIT(pmu_idle_ack_msch) | BIT(pmu_idle_msch);
+		idle_target = (idle << pmu_idle_ack_msch) |
+			      (idle << pmu_idle_msch);
+		break;
+
+	case bus_ide_req_cci:
+		idle_mask = BIT(pmu_idle_ack_cci) | BIT(pmu_idle_cci);
+		idle_target = (idle << pmu_idle_ack_cci) |
+			      (idle << pmu_idle_cci);
+		break;
+
+	default:
+		ERROR("%s: Unsupported the idle request\n", __func__);
+		break;
+	}
+
+	val = mmio_read_32(PMU_BASE + PMU_BUS_IDE_REQ);
+	if (idle)
+		val |=	mask;
+	else
+		val &= ~mask;
+
+	mmio_write_32(PMU_BASE + PMU_BUS_IDE_REQ, val);
+
+	while ((mmio_read_32(PMU_BASE +
+	       PMU_BUS_IDE_ST) & idle_mask) != idle_target) {
+		wait_cnt++;
+		if (!(wait_cnt % MAX_WAIT_CONUT))
+			WARN("%s:st=%x(%x)\n", __func__,
+			     mmio_read_32(PMU_BASE + PMU_BUS_IDE_ST),
+			     idle_mask);
+	}
+
+	return 0;
+}
+
+void pmu_scu_b_pwrup(void)
+{
+	regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b);
+	rk3368_pmu_bus_idle(bus_ide_req_clst_b, 0);
+}
+
+static void pmu_scu_b_pwrdn(void)
+{
+	uint32_t wait_cnt = 0;
+
+	if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) &
+	     PM_PWRDM_CPUSB_MSK) != PM_PWRDM_CPUSB_MSK) {
+		ERROR("%s: not all cpus is off\n", __func__);
+		return;
+	}
+
+	rk3368_flash_l2_b();
+
+	regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b);
+
+	while (!(mmio_read_32(PMU_BASE +
+	       PMU_CORE_PWR_ST) & BIT(clst_b_l2_wfi))) {
+		wait_cnt++;
+		if (!(wait_cnt % MAX_WAIT_CONUT))
+			ERROR("%s:wait cluster-b l2(%x)\n", __func__,
+			      mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
+	}
+	rk3368_pmu_bus_idle(bus_ide_req_clst_b, 1);
+}
+
+static void pmu_sleep_mode_config(void)
+{
+	uint32_t pwrmd_core, pwrmd_com;
+
+	pwrmd_core = BIT(pmu_mdcr_cpu0_pd) |
+		     BIT(pmu_mdcr_scu_l_pd) |
+		     BIT(pmu_mdcr_l2_flush) |
+		     BIT(pmu_mdcr_l2_idle) |
+		     BIT(pmu_mdcr_clr_clst_l) |
+		     BIT(pmu_mdcr_clr_core) |
+		     BIT(pmu_mdcr_clr_cci) |
+		     BIT(pmu_mdcr_core_pd);
+
+	pwrmd_com = BIT(pmu_mode_en) |
+		    BIT(pmu_mode_sref_enter) |
+		    BIT(pmu_mode_pwr_off);
+
+	regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_l_wkup_en);
+	regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_b_wkup_en);
+	regs_updata_bit_clr(PMU_BASE + PMU_WKUP_CFG2, pmu_gpio_wkup_en);
+
+	mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(2));
+	mmio_write_32(PMU_BASE + PMU_PLLRST_CNT, CYCL_24M_CNT_US(100));
+	mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_24M_CNT_MS(2));
+	mmio_write_32(PMU_BASE + PMU_PWRMD_CORE, pwrmd_core);
+	mmio_write_32(PMU_BASE + PMU_PWRMD_COM, pwrmd_com);
+	dsb();
+}
+
+static void pmu_set_sleep_mode(void)
+{
+	pmu_sleep_mode_config();
+	soc_sleep_config();
+	regs_updata_bit_set(PMU_BASE + PMU_PWRMD_CORE, pmu_mdcr_global_int_dis);
+	regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_glbl_int_dis_b);
+	pmu_scu_b_pwrdn();
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
+		      ((uintptr_t)&pmu_cpuson_entrypoint >>
+			CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK);
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2),
+		      ((uintptr_t)&pmu_cpuson_entrypoint >>
+			CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK);
+}
+
+static int cpus_id_power_domain(uint32_t cluster,
+				uint32_t cpu,
+				uint32_t pd_state,
+				uint32_t wfie_msk)
+{
+	uint32_t pd;
+	uint64_t mpidr;
+
+	if (cluster)
+		pd = PD_CPUB0 + cpu;
+	else
+		pd = PD_CPUL0 + cpu;
+
+	if (pmu_power_domain_st(pd) == pd_state)
+		return 0;
+
+	if (pd_state == pmu_pd_off) {
+		mpidr = (cluster << MPIDR_AFF1_SHIFT) | cpu;
+		if (check_cpu_wfie(mpidr, wfie_msk))
+			return -EINVAL;
+	}
+
+	return pmu_power_domain_ctr(pd, pd_state);
+}
+
+static void nonboot_cpus_off(void)
+{
+	uint32_t boot_cpu, boot_cluster, cpu;
+
+	boot_cpu = MPIDR_AFFLVL0_VAL(read_mpidr_el1());
+	boot_cluster = MPIDR_AFFLVL1_VAL(read_mpidr_el1());
+
+	/* turn off noboot cpus */
+	for (cpu = 0; cpu < PLATFORM_CLUSTER0_CORE_COUNT; cpu++) {
+		if (!boot_cluster && (cpu == boot_cpu))
+			continue;
+		cpus_id_power_domain(0, cpu, pmu_pd_off, CKECK_WFEI_MSK);
+	}
+
+	for (cpu = 0; cpu < PLATFORM_CLUSTER1_CORE_COUNT; cpu++) {
+		if (boot_cluster && (cpu == boot_cpu))
+			continue;
+		cpus_id_power_domain(1, cpu, pmu_pd_off, CKECK_WFEI_MSK);
+	}
+}
+
+void sram_save(void)
+{
+	/* TODO: support the sdram save for rk3368 SoCs*/
+}
+
+void sram_restore(void)
+{
+	/* TODO: support the sdram restore for rk3368 SoCs */
+}
+
+int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
+{
+	uint32_t cpu, cluster;
+	uint32_t cpuon_id;
+
+	cpu = MPIDR_AFFLVL0_VAL(mpidr);
+	cluster = MPIDR_AFFLVL1_VAL(mpidr);
+
+	/* Make sure the cpu is off,Before power up the cpu! */
+	cpus_id_power_domain(cluster, cpu, pmu_pd_off, CKECK_WFEI_MSK);
+
+	cpuon_id = (cluster * PLATFORM_CLUSTER0_CORE_COUNT) + cpu;
+	assert(cpuon_id < PLATFORM_CORE_COUNT);
+	assert(cpuson_flags[cpuon_id] == 0);
+	cpuson_flags[cpuon_id] = PMU_CPU_HOTPLUG;
+	cpuson_entry_point[cpuon_id] = entrypoint;
+
+	/* Switch boot addr to pmusram */
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster),
+		      (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
+		      CPU_BOOT_ADDR_WMASK);
+	dsb();
+
+	cpus_id_power_domain(cluster, cpu, pmu_pd_on, CKECK_WFEI_MSK);
+
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster),
+		      (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
+		      CPU_BOOT_ADDR_WMASK);
+
+	return 0;
+}
+
+int rockchip_soc_cores_pwr_dm_on_finish(void)
+{
+	return 0;
+}
+
+int rockchip_soc_sys_pwr_dm_resume(void)
+{
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
+		      (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
+		      CPU_BOOT_ADDR_WMASK);
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2),
+		      (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
+		      CPU_BOOT_ADDR_WMASK);
+	pm_plls_resume();
+	pmu_scu_b_pwrup();
+
+	return 0;
+}
+
+int rockchip_soc_sys_pwr_dm_suspend(void)
+{
+	nonboot_cpus_off();
+	pmu_set_sleep_mode();
+
+	return 0;
+}
+
+void rockchip_plat_mmu_el3(void)
+{
+	/* TODO: support the el3 for rk3368 SoCs */
+}
+
+void plat_rockchip_pmu_init(void)
+{
+	uint32_t cpu;
+
+	/* register requires 32bits mode, switch it to 32 bits */
+	cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
+
+	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
+		cpuson_flags[cpu] = 0;
+
+	nonboot_cpus_off();
+	INFO("%s(%d): pd status %x\n", __func__, __LINE__,
+	     mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
+}
diff --git a/plat/rockchip/rk3368/drivers/pmu/pmu.h b/plat/rockchip/rk3368/drivers/pmu/pmu.h
new file mode 100644
index 0000000..4c02107
--- /dev/null
+++ b/plat/rockchip/rk3368/drivers/pmu/pmu.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PMU_H__
+#define __PMU_H__
+
+/* Allocate sp reginon in pmusram */
+#define PSRAM_SP_SIZE		0x80
+#define PSRAM_SP_BOTTOM		(PSRAM_SP_TOP - PSRAM_SP_SIZE)
+
+/*****************************************************************************
+ * pmu con,reg
+ *****************************************************************************/
+#define PMU_WKUP_CFG0		0x0
+#define PMU_WKUP_CFG1		0x4
+#define PMU_WKUP_CFG2		0x8
+#define PMU_TIMEOUT_CNT		0x7c
+#define PMU_PWRDN_CON		0xc
+#define PMU_PWRDN_ST		0x10
+#define PMU_CORE_PWR_ST		0x38
+
+#define PMU_PWRMD_CORE		0x14
+#define PMU_PWRMD_COM		0x18
+#define PMU_SFT_CON			0x1c
+#define PMU_BUS_IDE_REQ		0x3c
+#define PMU_BUS_IDE_ST		0x40
+#define PMU_OSC_CNT			0x48
+#define PMU_PLLLOCK_CNT		0x4c
+#define PMU_PLLRST_CNT		0x50
+#define PMU_STABLE_CNT		0x54
+#define PMU_DDRIO_PWR_CNT	0x58
+#define PMU_WKUPRST_CNT		0x5c
+
+enum pmu_powermode_core {
+	pmu_mdcr_global_int_dis = 0,
+	pmu_mdcr_core_src_gt,
+	pmu_mdcr_clr_cci,
+	pmu_mdcr_cpu0_pd,
+	pmu_mdcr_clr_clst_l = 4,
+	pmu_mdcr_clr_core,
+	pmu_mdcr_scu_l_pd,
+	pmu_mdcr_core_pd,
+	pmu_mdcr_l2_idle = 8,
+	pmu_mdcr_l2_flush
+};
+
+/*
+ * the shift of bits for cores status
+ */
+enum pmu_core_pwrst_shift {
+	clstl_cpu_wfe = 2,
+	clstl_cpu_wfi = 6,
+	clstb_cpu_wfe = 12,
+	clstb_cpu_wfi = 16
+};
+
+enum pmu_pdid {
+	PD_CPUL0 = 0,
+	PD_CPUL1,
+	PD_CPUL2,
+	PD_CPUL3,
+	PD_SCUL,
+	PD_CPUB0 = 5,
+	PD_CPUB1,
+	PD_CPUB2,
+	PD_CPUB3,
+	PD_SCUB = 9,
+	PD_PERI = 13,
+	PD_VIDEO,
+	PD_VIO,
+	PD_GPU0,
+	PD_GPU1,
+	PD_END
+};
+
+enum pmu_bus_ide {
+	bus_ide_req_clst_l = 0,
+	bus_ide_req_clst_b,
+	bus_ide_req_gpu,
+	bus_ide_req_core,
+	bus_ide_req_bus = 4,
+	bus_ide_req_dma,
+	bus_ide_req_peri,
+	bus_ide_req_video,
+	bus_ide_req_vio = 8,
+	bus_ide_req_res0,
+	bus_ide_req_cxcs,
+	bus_ide_req_alive,
+	bus_ide_req_pmu = 12,
+	bus_ide_req_msch,
+	bus_ide_req_cci,
+	bus_ide_req_cci400 = 15,
+	bus_ide_req_end
+};
+
+enum pmu_powermode_common {
+	pmu_mode_en = 0,
+	pmu_mode_res0,
+	pmu_mode_bus_pd,
+	pmu_mode_wkup_rst,
+	pmu_mode_pll_pd = 4,
+	pmu_mode_pwr_off,
+	pmu_mode_pmu_use_if,
+	pmu_mode_pmu_alive_use_if,
+	pmu_mode_osc_dis = 8,
+	pmu_mode_input_clamp,
+	pmu_mode_sref_enter,
+	pmu_mode_ddrc_gt,
+	pmu_mode_ddrio_ret = 12,
+	pmu_mode_ddrio_ret_deq,
+	pmu_mode_clr_pmu,
+	pmu_mode_clr_alive,
+	pmu_mode_clr_bus = 16,
+	pmu_mode_clr_dma,
+	pmu_mode_clr_msch,
+	pmu_mode_clr_peri,
+	pmu_mode_clr_video = 20,
+	pmu_mode_clr_vio,
+	pmu_mode_clr_gpu,
+	pmu_mode_clr_mcu,
+	pmu_mode_clr_cxcs = 24,
+	pmu_mode_clr_cci400,
+	pmu_mode_res1,
+	pmu_mode_res2,
+	pmu_mode_res3 = 28,
+	pmu_mode_mclst
+};
+
+enum pmu_core_power_st {
+	clst_l_cpu_wfe = 2,
+	clst_l_cpu_wfi = 6,
+	clst_b_l2_flsh_done = 10,
+	clst_b_l2_wfi = 11,
+	clst_b_cpu_wfe = 12,
+	clst_b_cpu_wfi = 16,
+	mcu_sleeping = 20,
+};
+
+enum pmu_sft_con {
+	pmu_sft_acinactm_clst_b = 5,
+	pmu_sft_l2flsh_clst_b,
+	pmu_sft_glbl_int_dis_b = 9,
+	pmu_sft_ddrio_ret_cfg = 11,
+};
+
+enum pmu_wkup_cfg2 {
+	pmu_cluster_l_wkup_en = 0,
+	pmu_cluster_b_wkup_en,
+	pmu_gpio_wkup_en,
+	pmu_sdio_wkup_en,
+	pmu_sdmmc_wkup_en,
+	pmu_sim_wkup_en,
+	pmu_timer_wkup_en,
+	pmu_usbdev_wkup_en,
+	pmu_sft_wkup_en,
+	pmu_wdt_mcu_wkup_en,
+	pmu_timeout_wkup_en,
+};
+
+enum pmu_bus_idle_st {
+	pmu_idle_ack_cluster_l = 0,
+	pmu_idle_ack_cluster_b,
+	pmu_idle_ack_gpu,
+	pmu_idle_ack_core,
+	pmu_idle_ack_bus,
+	pmu_idle_ack_dma,
+	pmu_idle_ack_peri,
+	pmu_idle_ack_video,
+	pmu_idle_ack_vio,
+	pmu_idle_ack_cci = 10,
+	pmu_idle_ack_msch,
+	pmu_idle_ack_alive,
+	pmu_idle_ack_pmu,
+	pmu_idle_ack_cxcs,
+	pmu_idle_ack_cci400,
+	pmu_inactive_cluster_l,
+	pmu_inactive_cluster_b,
+	pmu_idle_gpu,
+	pmu_idle_core,
+	pmu_idle_bus,
+	pmu_idle_dma,
+	pmu_idle_peri,
+	pmu_idle_video,
+	pmu_idle_vio,
+	pmu_idle_cci = 26,
+	pmu_idle_msch,
+	pmu_idle_alive,
+	pmu_idle_pmu,
+	pmu_active_cxcs,
+	pmu_active_cci,
+};
+
+#define PM_PWRDM_CPUSB_MSK (0xf << 5)
+
+#define CKECK_WFE_MSK		0x1
+#define CKECK_WFI_MSK		0x10
+#define CKECK_WFEI_MSK		0x11
+
+#define PD_CTR_LOOP		500
+#define CHK_CPU_LOOP		500
+
+#define MAX_WAIT_CONUT 1000
+
+#endif /* __PMU_H__ */
diff --git a/plat/rockchip/rk3368/drivers/soc/soc.c b/plat/rockchip/rk3368/drivers/soc/soc.c
new file mode 100644
index 0000000..d6979a8
--- /dev/null
+++ b/plat/rockchip/rk3368/drivers/soc/soc.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <mmio.h>
+#include <plat_private.h>
+#include <platform_def.h>
+#include <rk3368_def.h>
+#include <soc.h>
+
+static uint32_t plls_con[END_PLL_ID][4];
+
+/* Table of regions to map using the MMU. */
+const mmap_region_t plat_rk_mmap[] = {
+	MAP_REGION_FLAT(CCI400_BASE, CCI400_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(GIC400_BASE, GIC400_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(STIME_BASE, STIME_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(SGRF_BASE, SGRF_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(PMUSRAM_BASE, PMUSRAM_SIZE,
+			MT_MEMORY | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(PMU_BASE, PMU_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(UART_DBG_BASE, UART_DBG_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(CRU_BASE, CRU_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(DDR_PCTL_BASE, DDR_PCTL_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(DDR_PHY_BASE, DDR_PHY_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(GRF_BASE, GRF_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(SERVICE_BUS_BASE, SERVICE_BUS_SISE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	{ 0 }
+};
+
+/* The RockChip power domain tree descriptor */
+const unsigned char rockchip_power_domain_tree_desc[] = {
+	/* No of root nodes */
+	PLATFORM_SYSTEM_COUNT,
+	/* No of children for the root node */
+	PLATFORM_CLUSTER_COUNT,
+	/* No of children for the first cluster node */
+	PLATFORM_CLUSTER0_CORE_COUNT,
+	/* No of children for the second cluster node */
+	PLATFORM_CLUSTER1_CORE_COUNT
+};
+
+void secure_timer_init(void)
+{
+	mmio_write_32(STIMER1_BASE + TIMER_LOADE_COUNT0, 0xffffffff);
+	mmio_write_32(STIMER1_BASE + TIMER_LOADE_COUNT1, 0xffffffff);
+
+	/* auto reload & enable the timer */
+	mmio_write_32(STIMER1_BASE + TIMER_CONTROL_REG, TIMER_EN);
+}
+
+void sgrf_init(void)
+{
+	/* setting all configurable ip into no-secure */
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(5), SGRF_SOC_CON_NS);
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(6), SGRF_SOC_CON7_BITS);
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(7), SGRF_SOC_CON_NS);
+
+	/* secure dma to no sesure */
+	mmio_write_32(SGRF_BASE + SGRF_BUSDMAC_CON(0), SGRF_BUSDMAC_CON0_NS);
+	mmio_write_32(SGRF_BASE + SGRF_BUSDMAC_CON(1), SGRF_BUSDMAC_CON1_NS);
+	dsb();
+
+	/* rst dma1 */
+	mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(1),
+		      RST_DMA1_MSK | (RST_DMA1_MSK << 16));
+	/* rst dma2 */
+	mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(4),
+		      RST_DMA2_MSK | (RST_DMA2_MSK << 16));
+
+	dsb();
+
+	/* release dma1 rst*/
+	mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(1), (RST_DMA1_MSK << 16));
+	/* release dma2 rst*/
+	mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(4), (RST_DMA2_MSK << 16));
+}
+
+void plat_rockchip_soc_init(void)
+{
+	secure_timer_init();
+	sgrf_init();
+}
+
+void regs_updata_bits(uintptr_t addr, uint32_t val,
+		      uint32_t mask, uint32_t shift)
+{
+	uint32_t tmp, orig;
+
+	orig = mmio_read_32(addr);
+
+	tmp = orig & ~(mask << shift);
+	tmp |= (val & mask) << shift;
+
+	if (tmp != orig)
+		mmio_write_32(addr, tmp);
+	dsb();
+}
+
+static void plls_suspend(uint32_t pll_id)
+{
+	plls_con[pll_id][0] = mmio_read_32(CRU_BASE + PLL_CONS((pll_id), 0));
+	plls_con[pll_id][1] = mmio_read_32(CRU_BASE + PLL_CONS((pll_id), 1));
+	plls_con[pll_id][2] = mmio_read_32(CRU_BASE + PLL_CONS((pll_id), 2));
+	plls_con[pll_id][3] = mmio_read_32(CRU_BASE + PLL_CONS((pll_id), 3));
+
+	mmio_write_32(CRU_BASE + PLL_CONS((pll_id), 3), PLL_SLOW_BITS);
+	mmio_write_32(CRU_BASE + PLL_CONS((pll_id), 3), PLL_BYPASS);
+}
+
+static void pm_plls_suspend(void)
+{
+	plls_suspend(NPLL_ID);
+	plls_suspend(CPLL_ID);
+	plls_suspend(GPLL_ID);
+	plls_suspend(ABPLL_ID);
+	plls_suspend(ALPLL_ID);
+}
+
+static inline void plls_resume(void)
+{
+	mmio_write_32(CRU_BASE + PLL_CONS(ABPLL_ID, 3),
+		      plls_con[ABPLL_ID][3] | PLL_BYPASS_W_MSK);
+	mmio_write_32(CRU_BASE + PLL_CONS(ALPLL_ID, 3),
+		      plls_con[ALPLL_ID][3] | PLL_BYPASS_W_MSK);
+	mmio_write_32(CRU_BASE + PLL_CONS(GPLL_ID, 3),
+		      plls_con[GPLL_ID][3] | PLL_BYPASS_W_MSK);
+	mmio_write_32(CRU_BASE + PLL_CONS(CPLL_ID, 3),
+		      plls_con[CPLL_ID][3] | PLL_BYPASS_W_MSK);
+	mmio_write_32(CRU_BASE + PLL_CONS(NPLL_ID, 3),
+		      plls_con[NPLL_ID][3] | PLL_BYPASS_W_MSK);
+}
+
+void soc_sleep_config(void)
+{
+	int i = 0;
+
+	for (i = 0; i < CRU_CLKGATES_CON_CNT; i++)
+		mmio_write_32(CRU_BASE + CRU_CLKGATES_CON(i), 0xffff0000);
+	pm_plls_suspend();
+
+	for (i = 0; i < CRU_CLKGATES_CON_CNT; i++)
+		mmio_write_32(CRU_BASE + CRU_CLKGATES_CON(i), 0xffff0000);
+}
+
+void pm_plls_resume(void)
+{
+	plls_resume();
+
+	mmio_write_32(CRU_BASE + PLL_CONS(ABPLL_ID, 3),
+		      plls_con[ABPLL_ID][3] | PLLS_MODE_WMASK);
+	mmio_write_32(CRU_BASE + PLL_CONS(ALPLL_ID, 3),
+		      plls_con[ALPLL_ID][3] | PLLS_MODE_WMASK);
+	mmio_write_32(CRU_BASE + PLL_CONS(GPLL_ID, 3),
+		      plls_con[GPLL_ID][3] | PLLS_MODE_WMASK);
+	mmio_write_32(CRU_BASE + PLL_CONS(CPLL_ID, 3),
+		      plls_con[CPLL_ID][3] | PLLS_MODE_WMASK);
+	mmio_write_32(CRU_BASE + PLL_CONS(NPLL_ID, 3),
+		      plls_con[NPLL_ID][3] | PLLS_MODE_WMASK);
+}
+
+void __dead2 rockchip_soc_soft_reset(void)
+{
+	uint32_t temp_val;
+
+	mmio_write_32(CRU_BASE + PLL_CONS((GPLL_ID), 3), PLL_SLOW_BITS);
+	mmio_write_32(CRU_BASE + PLL_CONS((CPLL_ID), 3), PLL_SLOW_BITS);
+	mmio_write_32(CRU_BASE + PLL_CONS((NPLL_ID), 3), PLL_SLOW_BITS);
+	mmio_write_32(CRU_BASE + PLL_CONS((ABPLL_ID), 3), PLL_SLOW_BITS);
+	mmio_write_32(CRU_BASE + PLL_CONS((ALPLL_ID), 3), PLL_SLOW_BITS);
+
+	temp_val = mmio_read_32(CRU_BASE + CRU_GLB_RST_CON) |
+		   PMU_RST_BY_SECOND_SFT;
+
+	mmio_write_32(CRU_BASE + CRU_GLB_RST_CON, temp_val);
+	mmio_write_32(CRU_BASE + CRU_GLB_SRST_SND, 0xeca8);
+
+	/*
+	 * Maybe the HW needs some times to reset the system,
+	 * so we do not hope the core to excute valid codes.
+	 */
+	while (1)
+	;
+}
diff --git a/plat/rockchip/rk3368/drivers/soc/soc.h b/plat/rockchip/rk3368/drivers/soc/soc.h
new file mode 100644
index 0000000..38d7332
--- /dev/null
+++ b/plat/rockchip/rk3368/drivers/soc/soc.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SOC_H__
+#define __SOC_H__
+
+enum plls_id {
+	ABPLL_ID = 0,
+	ALPLL_ID,
+	DPLL_ID,
+	CPLL_ID,
+	GPLL_ID,
+	NPLL_ID,
+	END_PLL_ID,
+};
+
+/*****************************************************************************
+ * secure timer
+ *****************************************************************************/
+#define TIMER_LOADE_COUNT0	0x00
+#define TIMER_LOADE_COUNT1	0x04
+#define TIMER_CURRENT_VALUE0	0x08
+#define TIMER_CURRENT_VALUE1	0x0C
+#define TIMER_CONTROL_REG	0x10
+#define TIMER_INTSTATUS		0x18
+
+#define TIMER_EN		0x1
+
+#define STIMER1_BASE		(STIME_BASE + 0x20)
+
+#define CYCL_24M_CNT_US(us)	(24 * us)
+#define CYCL_24M_CNT_MS(ms)	(ms * CYCL_24M_CNT_US(1000))
+
+/*****************************************************************************
+ * sgrf reg, offset
+ *****************************************************************************/
+#define SGRF_SOC_CON(n)		(0x0 + (n) * 4)
+#define SGRF_BUSDMAC_CON(n)	(0x100 + (n) * 4)
+
+#define SGRF_SOC_CON_NS		0xffff0000
+
+/*****************************************************************************
+ * con6[2]pmusram is security.
+ * con6[6]stimer is security.
+ *****************************************************************************/
+#define PMUSRAM_S_SHIFT		2
+#define PMUSRAM_S		1
+#define STIMER_S_SHIFT		6
+#define STIMER_S		1
+#define SGRF_SOC_CON7_BITS	((0xffff << 16) | \
+				 (PMUSRAM_S << PMUSRAM_S_SHIFT) | \
+				 (STIMER_S << STIMER_S_SHIFT))
+
+#define SGRF_BUSDMAC_CON0_NS	0xfffcfff8
+#define SGRF_BUSDMAC_CON1_NS	0xffff0fff
+
+/*
+ * sgrf_soc_con1~2, mask and offset
+ */
+#define CPU_BOOT_ADDR_WMASK	0xffff0000
+#define CPU_BOOT_ADDR_ALIGN	16
+
+/*****************************************************************************
+ * cru reg, offset
+ *****************************************************************************/
+#define CRU_SOFTRST_CON		0x300
+#define CRU_SOFTRSTS_CON(n)	(CRU_SOFTRST_CON + ((n) * 4))
+#define CRU_SOFTRSTS_CON_CNT	15
+
+#define SOFTRST_DMA1		0x40004
+#define SOFTRST_DMA2		0x10001
+
+#define RST_DMA1_MSK		0x4
+#define RST_DMA2_MSK		0x0
+
+#define CRU_CLKSEL_CON		0x100
+#define CRU_CLKSELS_CON(i)	(CRU_CLKSEL_CON + ((i) * 4))
+#define CRU_CLKSEL_CON_CNT	56
+
+#define CRU_CLKGATE_CON		0x200
+#define CRU_CLKGATES_CON(i)	(CRU_CLKGATE_CON + ((i) * 4))
+#define CRU_CLKGATES_CON_CNT	25
+
+#define CRU_GLB_SRST_FST	0x280
+#define CRU_GLB_SRST_SND	0x284
+#define CRU_GLB_RST_CON		0x388
+
+#define CRU_CONS_GATEID(i)	(16 * (i))
+#define GATE_ID(reg, bit)	((reg * 16) + bit)
+
+#define PMU_RST_BY_SECOND_SFT	(BIT(1) << 2)
+#define PMU_RST_NOT_BY_SFT	(BIT(1) << 2)
+
+/***************************************************************************
+ * pll
+ ***************************************************************************/
+#define PLL_PWR_DN_MSK		(0x1 << 1)
+#define PLL_PWR_DN		REG_WMSK_BITS(1, 1, 0x1)
+#define PLL_PWR_ON		REG_WMSK_BITS(0, 1, 0x1)
+#define PLL_RESET		REG_WMSK_BITS(1, 5, 0x1)
+#define PLL_RESET_RESUME	REG_WMSK_BITS(0, 5, 0x1)
+#define PLL_BYPASS_MSK		(0x1 << 0)
+#define PLL_BYPASS_W_MSK	(PLL_BYPASS_MSK << 16)
+#define PLL_BYPASS		REG_WMSK_BITS(1, 0, 0x1)
+#define PLL_NO_BYPASS		REG_WMSK_BITS(0, 0, 0x1)
+#define PLL_MODE_SHIFT		8
+#define PLL_MODE_MSK		0x3
+#define PLLS_MODE_WMASK		(PLL_MODE_MSK << (16 + PLL_MODE_SHIFT))
+#define PLL_SLOW		0x0
+#define PLL_NORM		0x1
+#define PLL_DEEP		0x2
+#define PLL_SLOW_BITS		REG_WMSK_BITS(PLL_SLOW, 8, 0x3)
+#define PLL_NORM_BITS		REG_WMSK_BITS(PLL_NORM, 8, 0x3)
+#define PLL_DEEP_BITS		REG_WMSK_BITS(PLL_DEEP, 8, 0x3)
+
+#define PLL_CONS(id, i)		((id) * 0x10 + ((i) * 4))
+
+#define REG_W_MSK(bits_shift, msk) \
+		((msk) << ((bits_shift) + 16))
+#define REG_VAL_CLRBITS(val, bits_shift, msk) \
+		(val & (~(msk << bits_shift)))
+#define REG_SET_BITS(bits, bits_shift, msk) \
+		(((bits) & (msk)) << (bits_shift))
+#define REG_WMSK_BITS(bits, bits_shift, msk) \
+		(REG_W_MSK(bits_shift, msk) | \
+		REG_SET_BITS(bits, bits_shift, msk))
+
+#define regs_updata_bit_set(addr, shift) \
+		regs_updata_bits((addr), 0x1, 0x1, (shift))
+#define regs_updata_bit_clr(addr, shift) \
+		regs_updata_bits((addr), 0x0, 0x1, (shift))
+
+void regs_updata_bits(uintptr_t addr, uint32_t val,
+		      uint32_t mask, uint32_t shift);
+void soc_sleep_config(void);
+void pm_plls_resume(void);
+
+#endif /* __SOC_H__ */
diff --git a/plat/rockchip/rk3368/include/plat.ld.S b/plat/rockchip/rk3368/include/plat.ld.S
new file mode 100644
index 0000000..b3559b2
--- /dev/null
+++ b/plat/rockchip/rk3368/include/plat.ld.S
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __ROCKCHIP_PLAT_LD_S__
+#define __ROCKCHIP_PLAT_LD_S__
+
+MEMORY {
+    PMUSRAM (rwx): ORIGIN = PMUSRAM_BASE, LENGTH = PMUSRAM_RSIZE
+}
+
+SECTIONS
+{
+	. = PMUSRAM_BASE;
+
+	/*
+	 * pmu_cpuson_entrypoint request address
+	 * align 64K when resume, so put it in the
+	 * start of pmusram
+	 */
+	.text_pmusram : {
+		ASSERT(. == ALIGN(64 * 1024),
+			".pmusram.entry request 64K aligned.");
+		*(.pmusram.entry)
+		__bl31_pmusram_text_start = .;
+		*(.pmusram.text)
+		*(.pmusram.rodata)
+		__bl31_pmusram_text_end = .;
+		__bl31_pmusram_data_start = .;
+		*(.pmusram.data)
+		__bl31_pmusram_data_end = .;
+
+	} >PMUSRAM
+}
+
+#endif /* __ROCKCHIP_PLAT_LD_S__ */
diff --git a/plat/rockchip/rk3368/include/plat_sip_calls.h b/plat/rockchip/rk3368/include/plat_sip_calls.h
new file mode 100644
index 0000000..ead187e
--- /dev/null
+++ b/plat/rockchip/rk3368/include/plat_sip_calls.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_SIP_CALLS_H__
+#define __PLAT_SIP_CALLS_H__
+
+#define RK_PLAT_SIP_NUM_CALLS	0
+
+#endif /* __PLAT_SIP_CALLS_H__ */
diff --git a/plat/rockchip/rk3368/include/platform_def.h b/plat/rockchip/rk3368/include/platform_def.h
new file mode 100644
index 0000000..07b91e2
--- /dev/null
+++ b/plat/rockchip/rk3368/include/platform_def.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#include <arch.h>
+#include <common_def.h>
+#include <rk3368_def.h>
+
+#define DEBUG_XLAT_TABLE 0
+
+/*******************************************************************************
+ * Platform binary types for linking
+ ******************************************************************************/
+#define PLATFORM_LINKER_FORMAT		"elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH		aarch64
+
+/*******************************************************************************
+ * Generic platform constants
+ ******************************************************************************/
+
+/* Size of cacheable stacks */
+#if DEBUG_XLAT_TABLE
+#define PLATFORM_STACK_SIZE 0x800
+#elif defined(IMAGE_BL1)
+#define PLATFORM_STACK_SIZE 0x440
+#elif defined(IMAGE_BL2)
+#define PLATFORM_STACK_SIZE 0x400
+#elif defined(IMAGE_BL31)
+#define PLATFORM_STACK_SIZE 0x800
+#elif defined(IMAGE_BL32)
+#define PLATFORM_STACK_SIZE 0x440
+#endif
+
+#define FIRMWARE_WELCOME_STR		"Booting Trusted Firmware\n"
+
+#define PLATFORM_MAX_AFFLVL		MPIDR_AFFLVL2
+#define PLATFORM_SYSTEM_COUNT		1
+#define PLATFORM_CLUSTER_COUNT		2
+#define PLATFORM_CLUSTER0_CORE_COUNT	4
+#define PLATFORM_CLUSTER1_CORE_COUNT	4
+#define PLATFORM_CORE_COUNT		(PLATFORM_CLUSTER1_CORE_COUNT +	\
+					 PLATFORM_CLUSTER0_CORE_COUNT)
+#define PLATFORM_MAX_CPUS_PER_CLUSTER	4
+#define PLATFORM_NUM_AFFS		(PLATFORM_SYSTEM_COUNT +	\
+					 PLATFORM_CLUSTER_COUNT +	\
+					 PLATFORM_CORE_COUNT)
+
+#define PLAT_RK_CLST_TO_CPUID_SHIFT	8
+
+#define PLAT_MAX_PWR_LVL		MPIDR_AFFLVL2
+
+/*
+ * This macro defines the deepest retention state possible. A higher state
+ * id will represent an invalid or a power down state.
+ */
+#define PLAT_MAX_RET_STATE		1
+
+/*
+ * This macro defines the deepest power down states possible. Any state ID
+ * higher than this is invalid.
+ */
+#define PLAT_MAX_OFF_STATE		2
+
+/*******************************************************************************
+ * Platform memory map related constants
+ ******************************************************************************/
+/* TF txet, ro, rw, Size: 512KB */
+#define TZRAM_BASE		(0x0)
+#define TZRAM_SIZE		(0x80000)
+
+/*******************************************************************************
+ * BL31 specific defines.
+ ******************************************************************************/
+/*
+ * Put BL3-1 at the top of the Trusted RAM
+ */
+#define BL31_BASE		(TZRAM_BASE + 0x10000)
+#define BL31_LIMIT	(TZRAM_BASE + TZRAM_SIZE)
+
+/*******************************************************************************
+ * Platform specific page table and MMU setup constants
+ ******************************************************************************/
+#define ADDR_SPACE_SIZE		(1ull << 32)
+#define MAX_XLAT_TABLES		8
+#define MAX_MMAP_REGIONS	16
+
+/*******************************************************************************
+ * Declarations and constants to access the mailboxes safely. Each mailbox is
+ * aligned on the biggest cache line size in the platform. This is known only
+ * to the platform as it might have a combination of integrated and external
+ * caches. Such alignment ensures that two maiboxes do not sit on the same cache
+ * line at any cache level. They could belong to different cpus/clusters &
+ * get written while being protected by different locks causing corruption of
+ * a valid mailbox address.
+ ******************************************************************************/
+#define CACHE_WRITEBACK_SHIFT	6
+#define CACHE_WRITEBACK_GRANULE	(1 << CACHE_WRITEBACK_SHIFT)
+
+/*
+ * Define GICD and GICC and GICR base
+ */
+#define PLAT_RK_GICD_BASE	RK3368_GICD_BASE
+#define PLAT_RK_GICC_BASE	RK3368_GICC_BASE
+
+/*
+ * Define a list of Group 1 Secure and Group 0 interrupts as per GICv3
+ * terminology. On a GICv2 system or mode, the lists will be merged and treated
+ * as Group 0 interrupts.
+ */
+#define PLAT_RK_G1S_IRQS	RK_G1S_IRQS
+
+#define PLAT_RK_UART_BASE	RK3368_UART2_BASE
+#define PLAT_RK_UART_CLOCK	RK3368_UART_CLOCK
+#define PLAT_RK_UART_BAUDRATE	RK3368_BAUDRATE
+
+#define PLAT_RK_CCI_BASE	CCI400_BASE
+
+#define PLAT_RK_PRIMARY_CPU	0x0
+
+#define PSRAM_DO_DDR_RESUME	0
+#define PSRAM_CHECK_WAKEUP_CPU	0
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/rockchip/rk3368/plat_sip_calls.c b/plat/rockchip/rk3368/plat_sip_calls.c
new file mode 100644
index 0000000..7383d2f
--- /dev/null
+++ b/plat/rockchip/rk3368/plat_sip_calls.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <mmio.h>
+#include <plat_sip_calls.h>
+#include <rockchip_sip_svc.h>
+#include <runtime_svc.h>
+
+uint64_t rockchip_plat_sip_handler(uint32_t smc_fid,
+				   uint64_t x1,
+				   uint64_t x2,
+				   uint64_t x3,
+				   uint64_t x4,
+				   void *cookie,
+				   void *handle,
+				   uint64_t flags)
+{
+	switch (smc_fid) {
+	default:
+		ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
+		SMC_RET1(handle, SMC_UNK);
+	}
+}
diff --git a/plat/rockchip/rk3368/platform.mk b/plat/rockchip/rk3368/platform.mk
new file mode 100644
index 0000000..f6960cf
--- /dev/null
+++ b/plat/rockchip/rk3368/platform.mk
@@ -0,0 +1,53 @@
+#
+# Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+RK_PLAT			:=	plat/rockchip
+RK_PLAT_SOC		:=	${RK_PLAT}/${PLAT}
+RK_PLAT_COMMON		:=	${RK_PLAT}/common
+
+PLAT_INCLUDES		:=	-I${RK_PLAT_COMMON}/				\
+				-I${RK_PLAT_COMMON}/include/			\
+				-I${RK_PLAT_COMMON}/pmusram			\
+				-I${RK_PLAT_COMMON}/drivers/pmu/			\
+				-I${RK_PLAT_SOC}/				\
+				-I${RK_PLAT_SOC}/drivers/pmu/			\
+				-I${RK_PLAT_SOC}/drivers/soc/			\
+				-I${RK_PLAT_SOC}/drivers/ddr/			\
+				-I${RK_PLAT_SOC}/include/
+
+RK_GIC_SOURCES         :=	drivers/arm/gic/common/gic_common.c		\
+				drivers/arm/gic/v2/gicv2_main.c			\
+				drivers/arm/gic/v2/gicv2_helpers.c		\
+				plat/common/plat_gicv2.c			\
+				${RK_PLAT}/common/rockchip_gicv2.c
+
+PLAT_BL_COMMON_SOURCES	:=	lib/xlat_tables/xlat_tables_common.c		\
+				lib/xlat_tables/aarch64/xlat_tables.c		\
+				plat/common/plat_psci_common.c
+
+BL31_SOURCES		+=	${RK_GIC_SOURCES}				\
+				drivers/arm/cci/cci.c				\
+				drivers/console/aarch64/console.S		\
+				drivers/ti/uart/aarch64/16550_console.S		\
+				drivers/delay_timer/delay_timer.c		\
+				drivers/delay_timer/generic_delay_timer.c	\
+				lib/cpus/aarch64/cortex_a53.S			\
+				${RK_PLAT_COMMON}/aarch64/plat_helpers.S	\
+				${RK_PLAT_COMMON}/bl31_plat_setup.c		\
+				${RK_PLAT_COMMON}/params_setup.c                \
+				${RK_PLAT_COMMON}/pmusram/pmu_sram_cpus_on.S		\
+				${RK_PLAT_COMMON}/plat_pm.c			\
+				${RK_PLAT_COMMON}/plat_topology.c		\
+				${RK_PLAT_COMMON}/aarch64/platform_common.c	\
+				${RK_PLAT_COMMON}/rockchip_sip_svc.c		\
+				${RK_PLAT_SOC}/plat_sip_calls.c			\
+				${RK_PLAT_SOC}/drivers/pmu/pmu.c		\
+				${RK_PLAT_SOC}/drivers/soc/soc.c		\
+				${RK_PLAT_SOC}/drivers/ddr/ddr_rk3368.c		\
+
+ENABLE_PLAT_COMPAT	:=      0
+
+$(eval $(call add_define,PLAT_EXTRA_LD_SCRIPT))
diff --git a/plat/rockchip/rk3368/rk3368_def.h b/plat/rockchip/rk3368/rk3368_def.h
new file mode 100644
index 0000000..7cb82da
--- /dev/null
+++ b/plat/rockchip/rk3368/rk3368_def.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_DEF_H__
+#define __PLAT_DEF_H__
+
+/* Special value used to verify platform parameters from BL2 to BL3-1 */
+#define RK_BL31_PLAT_PARAM_VAL	0x0f1e2d3c4b5a6978ULL
+
+#define CCI400_BASE		0xffb90000
+#define CCI400_SIZE		0x10000
+
+#define GIC400_BASE		0xffb70000
+#define GIC400_SIZE		0x10000
+
+#define STIME_BASE		0xff830000
+#define STIME_SIZE		0x10000
+
+#define CRU_BASE		0xff760000
+#define CRU_SIZE		0x10000
+
+#define GRF_BASE		0xff770000
+#define GRF_SIZE		0x10000
+
+#define SGRF_BASE		0xff740000
+#define SGRF_SIZE		0x10000
+
+#define PMU_BASE		0xff730000
+#define PMU_GRF_BASE		0xff738000
+#define PMU_SIZE		0x10000
+
+#define RK_INTMEM_BASE		0xff8c0000
+#define RK_INTMEM_SIZE		0x10000
+
+#define UART_DBG_BASE		0xff690000
+#define UART_DBG_SIZE		0x10000
+
+#define CRU_BASE		0xff760000
+
+#define PMUSRAM_BASE            0xff720000
+#define PMUSRAM_SIZE            0x10000
+#define PMUSRAM_RSIZE           0x1000
+
+#define DDR_PCTL_BASE		0xff610000
+#define DDR_PCTL_SIZE		0x10000
+
+#define DDR_PHY_BASE		0xff620000
+#define DDR_PHY_SIZE		0x10000
+
+#define SERVICE_BUS_BASE	0xffac0000
+#define SERVICE_BUS_SISE	0x50000
+
+#define COLD_BOOT_BASE		0xffff0000
+/**************************************************************************
+ * UART related constants
+ **************************************************************************/
+#define RK3368_UART2_BASE	UART_DBG_BASE
+#define RK3368_BAUDRATE		115200
+#define RK3368_UART_CLOCK	24000000
+
+/******************************************************************************
+ * System counter frequency related constants
+ ******************************************************************************/
+#define SYS_COUNTER_FREQ_IN_TICKS	24000000
+
+/******************************************************************************
+ * GIC-400 & interrupt handling related constants
+ ******************************************************************************/
+
+/* Base rk_platform compatible GIC memory map */
+#define RK3368_GICD_BASE		(GIC400_BASE + 0x1000)
+#define RK3368_GICC_BASE		(GIC400_BASE + 0x2000)
+#define RK3368_GICR_BASE		0	/* no GICR in GIC-400 */
+
+/*****************************************************************************
+ * CCI-400 related constants
+ ******************************************************************************/
+#define PLAT_RK_CCI_CLUSTER0_SL_IFACE_IX	3
+#define PLAT_RK_CCI_CLUSTER1_SL_IFACE_IX	4
+
+/******************************************************************************
+ * sgi, ppi
+ ******************************************************************************/
+#define RK_IRQ_SEC_PHY_TIMER	29
+
+#define RK_IRQ_SEC_SGI_0	8
+#define RK_IRQ_SEC_SGI_1	9
+#define RK_IRQ_SEC_SGI_2	10
+#define RK_IRQ_SEC_SGI_3	11
+#define RK_IRQ_SEC_SGI_4	12
+#define RK_IRQ_SEC_SGI_5	13
+#define RK_IRQ_SEC_SGI_6	14
+#define RK_IRQ_SEC_SGI_7	15
+
+/*
+ * Define a list of Group 1 Secure and Group 0 interrupts as per GICv3
+ * terminology. On a GICv2 system or mode, the lists will be merged and treated
+ * as Group 0 interrupts.
+ */
+#define RK_G1S_IRQS		(RK_IRQ_SEC_PHY_TIMER)
+
+#endif /* __PLAT_DEF_H__ */
diff --git a/plat/rockchip/rk3399/drivers/dp/cdn_dp.c b/plat/rockchip/rk3399/drivers/dp/cdn_dp.c
new file mode 100644
index 0000000..d0e5986
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dp/cdn_dp.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cdn_dp.h>
+#include <smcc.h>
+#include <stdlib.h>
+#include <string.h>
+
+__asm__(
+	".pushsection .text.hdcp_handler, \"ax\", %progbits\n"
+	".global hdcp_handler\n"
+	".balign 4\n"
+	"hdcp_handler:\n"
+	".incbin \"" __XSTRING(HDCPFW) "\"\n"
+	".type hdcp_handler, %function\n"
+	".size hdcp_handler, .- hdcp_handler\n"
+	".popsection\n"
+);
+
+static uint64_t *hdcp_key_pdata;
+static struct cdn_dp_hdcp_key_1x key;
+
+int hdcp_handler(struct cdn_dp_hdcp_key_1x *key);
+
+uint64_t dp_hdcp_ctrl(uint64_t type)
+{
+	switch (type) {
+	case HDCP_KEY_DATA_START_TRANSFER:
+		memset(&key, 0x00, sizeof(key));
+		hdcp_key_pdata = (uint64_t *)&key;
+		return 0;
+	case HDCP_KEY_DATA_START_DECRYPT:
+		if (hdcp_key_pdata == (uint64_t *)(&key + 1))
+			return hdcp_handler(&key);
+		else
+			return PSCI_E_INVALID_PARAMS;
+	default:
+		return SMC_UNK;
+	}
+}
+
+uint64_t dp_hdcp_store_key(uint64_t x1,
+			   uint64_t x2,
+			   uint64_t x3,
+			   uint64_t x4,
+			   uint64_t x5,
+			   uint64_t x6)
+{
+	if (hdcp_key_pdata < (uint64_t *)&key ||
+		hdcp_key_pdata + 6 > (uint64_t *)(&key + 1))
+		return PSCI_E_INVALID_PARAMS;
+
+	hdcp_key_pdata[0] = x1;
+	hdcp_key_pdata[1] = x2;
+	hdcp_key_pdata[2] = x3;
+	hdcp_key_pdata[3] = x4;
+	hdcp_key_pdata[4] = x5;
+	hdcp_key_pdata[5] = x6;
+	hdcp_key_pdata += 6;
+
+	return 0;
+}
diff --git a/plat/rockchip/rk3399/drivers/dp/cdn_dp.h b/plat/rockchip/rk3399/drivers/dp/cdn_dp.h
new file mode 100644
index 0000000..3b9a108
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dp/cdn_dp.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SOC_ROCKCHIP_RK3399_DP_H__
+#define __SOC_ROCKCHIP_RK3399_DP_H__
+#include <plat_private.h>
+
+enum {
+	CDN_DP_HDCP_1X_KSV_LEN = 5,
+	CDN_DP_HDCP_KSV_LEN = 8,
+	CDN_DP_HDCP_RESERVED_LEN = 10,
+	CDN_DP_HDCP_UID_LEN = 16,
+	CDN_DP_HDCP_SHA_LEN = 20,
+	CDN_DP_HDCP_DPK_LEN = 280,
+	CDN_DP_HDCP_1X_KEYS_LEN	= 285,
+	CDN_DP_HDCP_KEY_LEN = 326,
+};
+
+struct cdn_dp_hdcp_key_1x {
+	uint8_t ksv[CDN_DP_HDCP_KSV_LEN];
+	uint8_t device_key[CDN_DP_HDCP_DPK_LEN];
+	uint8_t sha1[CDN_DP_HDCP_SHA_LEN];
+	uint8_t uid[CDN_DP_HDCP_UID_LEN];
+	uint16_t seed;
+	uint8_t reserved[CDN_DP_HDCP_RESERVED_LEN];
+};
+
+#define HDCP_KEY_DATA_START_TRANSFER	0
+#define HDCP_KEY_DATA_START_DECRYPT	1
+#define HDCP_KEY_1X_STORE_DATA_ALIGN_SIZE	(6 * 64) / 8
+
+/* Checks the cdn_dp_hdcp_key_1x must be aligned on 6 x 64-bit word boundary */
+CASSERT(sizeof(struct cdn_dp_hdcp_key_1x) % HDCP_KEY_1X_STORE_DATA_ALIGN_SIZE, \
+	assert_hdcp_key_1x_store_data_align_size_mismatch);
+
+uint64_t dp_hdcp_ctrl(uint64_t type);
+
+uint64_t dp_hdcp_store_key(uint64_t x1,
+			   uint64_t x2,
+			   uint64_t x3,
+			   uint64_t x4,
+			   uint64_t x5,
+			   uint64_t x6);
+#endif
diff --git a/plat/rockchip/rk3399/drivers/dp/hdcp.bin b/plat/rockchip/rk3399/drivers/dp/hdcp.bin
new file mode 100644
index 0000000..28db923
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dp/hdcp.bin
Binary files differ
diff --git a/plat/rockchip/rk3399/drivers/dram/dfs.c b/plat/rockchip/rk3399/drivers/dram/dfs.c
new file mode 100644
index 0000000..d629e4b
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dram/dfs.c
@@ -0,0 +1,2112 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <m0_ctl.h>
+#include <mmio.h>
+#include <plat_private.h>
+#include "dfs.h"
+#include "dram.h"
+#include "dram_spec_timing.h"
+#include "pmu.h"
+#include "soc.h"
+#include "string.h"
+
+#define ENPER_CS_TRAINING_FREQ	(666)
+#define TDFI_LAT_THRESHOLD_FREQ	(928)
+#define PHY_DLL_BYPASS_FREQ	(260)
+
+static const struct pll_div dpll_rates_table[] = {
+
+	/* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2 */
+	{.mhz = 928, .refdiv = 1, .fbdiv = 116, .postdiv1 = 3, .postdiv2 = 1},
+	{.mhz = 800, .refdiv = 1, .fbdiv = 100, .postdiv1 = 3, .postdiv2 = 1},
+	{.mhz = 732, .refdiv = 1, .fbdiv = 61, .postdiv1 = 2, .postdiv2 = 1},
+	{.mhz = 666, .refdiv = 1, .fbdiv = 111, .postdiv1 = 4, .postdiv2 = 1},
+	{.mhz = 600, .refdiv = 1, .fbdiv = 50, .postdiv1 = 2, .postdiv2 = 1},
+	{.mhz = 528, .refdiv = 1, .fbdiv = 66, .postdiv1 = 3, .postdiv2 = 1},
+	{.mhz = 400, .refdiv = 1, .fbdiv = 50, .postdiv1 = 3, .postdiv2 = 1},
+	{.mhz = 300, .refdiv = 1, .fbdiv = 50, .postdiv1 = 4, .postdiv2 = 1},
+	{.mhz = 200, .refdiv = 1, .fbdiv = 50, .postdiv1 = 3, .postdiv2 = 2},
+};
+
+struct rk3399_dram_status {
+	uint32_t current_index;
+	uint32_t index_freq[2];
+	uint32_t boot_freq;
+	uint32_t low_power_stat;
+	struct timing_related_config timing_config;
+	struct drv_odt_lp_config drv_odt_lp_cfg;
+};
+
+struct rk3399_saved_status {
+	uint32_t freq;
+	uint32_t low_power_stat;
+	uint32_t odt;
+};
+
+static struct rk3399_dram_status rk3399_dram_status;
+static struct rk3399_saved_status rk3399_suspend_status;
+static uint32_t wrdqs_delay_val[2][2][4];
+static uint32_t rddqs_delay_ps;
+
+static struct rk3399_sdram_default_config ddr3_default_config = {
+	.bl = 8,
+	.ap = 0,
+	.burst_ref_cnt = 1,
+	.zqcsi = 0
+};
+
+static struct rk3399_sdram_default_config lpddr3_default_config = {
+	.bl = 8,
+	.ap = 0,
+	.burst_ref_cnt = 1,
+	.zqcsi = 0
+};
+
+static struct rk3399_sdram_default_config lpddr4_default_config = {
+	.bl = 16,
+	.ap = 0,
+	.caodt = 240,
+	.burst_ref_cnt = 1,
+	.zqcsi = 0
+};
+
+static uint32_t get_cs_die_capability(struct rk3399_sdram_params *sdram_config,
+		uint8_t channel, uint8_t cs)
+{
+	struct rk3399_sdram_channel *ch = &sdram_config->ch[channel];
+	uint32_t bandwidth;
+	uint32_t die_bandwidth;
+	uint32_t die;
+	uint32_t cs_cap;
+	uint32_t row;
+
+	row = cs == 0 ? ch->cs0_row : ch->cs1_row;
+	bandwidth = 8 * (1 << ch->bw);
+	die_bandwidth = 8 * (1 << ch->dbw);
+	die = bandwidth / die_bandwidth;
+	cs_cap = (1 << (row + ((1 << ch->bk) / 4 + 1) + ch->col +
+		  (bandwidth / 16)));
+	if (ch->row_3_4)
+		cs_cap = cs_cap * 3 / 4;
+
+	return (cs_cap / die);
+}
+
+static void get_dram_drv_odt_val(uint32_t dram_type,
+				struct drv_odt_lp_config *drv_config)
+{
+	uint32_t tmp;
+	uint32_t mr1_val, mr3_val, mr11_val;
+
+	switch (dram_type) {
+	case DDR3:
+		mr1_val = (mmio_read_32(CTL_REG(0, 133)) >> 16) & 0xffff;
+		tmp = ((mr1_val >> 1) & 1) | ((mr1_val >> 4) & 1);
+		if (tmp)
+			drv_config->dram_side_drv = 34;
+		else
+			drv_config->dram_side_drv = 40;
+		tmp = ((mr1_val >> 2) & 1) | ((mr1_val >> 5) & 1) |
+		      ((mr1_val >> 7) & 1);
+		if (tmp == 0)
+			drv_config->dram_side_dq_odt = 0;
+		else if (tmp == 1)
+			drv_config->dram_side_dq_odt = 60;
+		else if (tmp == 3)
+			drv_config->dram_side_dq_odt = 40;
+		else
+			drv_config->dram_side_dq_odt = 120;
+		break;
+	case LPDDR3:
+		mr3_val = mmio_read_32(CTL_REG(0, 138)) & 0xf;
+		mr11_val = (mmio_read_32(CTL_REG(0, 139)) >> 24) & 0x3;
+		if (mr3_val == 0xb)
+			drv_config->dram_side_drv = 3448;
+		else if (mr3_val == 0xa)
+			drv_config->dram_side_drv = 4048;
+		else if (mr3_val == 0x9)
+			drv_config->dram_side_drv = 3440;
+		else if (mr3_val == 0x4)
+			drv_config->dram_side_drv = 60;
+		else if (mr3_val == 0x3)
+			drv_config->dram_side_drv = 48;
+		else if (mr3_val == 0x2)
+			drv_config->dram_side_drv = 40;
+		else
+			drv_config->dram_side_drv = 34;
+
+		if (mr11_val == 1)
+			drv_config->dram_side_dq_odt = 60;
+		else if (mr11_val == 2)
+			drv_config->dram_side_dq_odt = 120;
+		else if (mr11_val == 0)
+			drv_config->dram_side_dq_odt = 0;
+		else
+			drv_config->dram_side_dq_odt = 240;
+		break;
+	case LPDDR4:
+	default:
+		mr3_val = (mmio_read_32(CTL_REG(0, 138)) >> 3) & 0x7;
+		mr11_val = (mmio_read_32(CTL_REG(0, 139)) >> 24) & 0xff;
+
+		if ((mr3_val == 0) || (mr3_val == 7))
+			drv_config->dram_side_drv = 40;
+		else
+			drv_config->dram_side_drv = 240 / mr3_val;
+
+		tmp = mr11_val & 0x7;
+		if ((tmp == 7) || (tmp == 0))
+			drv_config->dram_side_dq_odt = 0;
+		else
+			drv_config->dram_side_dq_odt = 240 / tmp;
+
+		tmp = (mr11_val >> 4) & 0x7;
+		if ((tmp == 7) || (tmp == 0))
+			drv_config->dram_side_ca_odt = 0;
+		else
+			drv_config->dram_side_ca_odt = 240 / tmp;
+		break;
+	}
+}
+
+static void sdram_timing_cfg_init(struct timing_related_config *ptiming_config,
+				  struct rk3399_sdram_params *sdram_params,
+				  struct drv_odt_lp_config *drv_config)
+{
+	uint32_t i, j;
+
+	for (i = 0; i < sdram_params->num_channels; i++) {
+		ptiming_config->dram_info[i].speed_rate = DDR3_DEFAULT;
+		ptiming_config->dram_info[i].cs_cnt = sdram_params->ch[i].rank;
+		for (j = 0; j < sdram_params->ch[i].rank; j++) {
+			ptiming_config->dram_info[i].per_die_capability[j] =
+			    get_cs_die_capability(sdram_params, i, j);
+		}
+	}
+	ptiming_config->dram_type = sdram_params->dramtype;
+	ptiming_config->ch_cnt = sdram_params->num_channels;
+	switch (sdram_params->dramtype) {
+	case DDR3:
+		ptiming_config->bl = ddr3_default_config.bl;
+		ptiming_config->ap = ddr3_default_config.ap;
+		break;
+	case LPDDR3:
+		ptiming_config->bl = lpddr3_default_config.bl;
+		ptiming_config->ap = lpddr3_default_config.ap;
+		break;
+	case LPDDR4:
+		ptiming_config->bl = lpddr4_default_config.bl;
+		ptiming_config->ap = lpddr4_default_config.ap;
+		ptiming_config->rdbi = 0;
+		ptiming_config->wdbi = 0;
+		break;
+	}
+	ptiming_config->dramds = drv_config->dram_side_drv;
+	ptiming_config->dramodt = drv_config->dram_side_dq_odt;
+	ptiming_config->caodt = drv_config->dram_side_ca_odt;
+	ptiming_config->odt = (mmio_read_32(PHY_REG(0, 5)) >> 16) & 0x1;
+}
+
+struct lat_adj_pair {
+	uint32_t cl;
+	uint32_t rdlat_adj;
+	uint32_t cwl;
+	uint32_t wrlat_adj;
+};
+
+const struct lat_adj_pair ddr3_lat_adj[] = {
+	{6, 5, 5, 4},
+	{8, 7, 6, 5},
+	{10, 9, 7, 6},
+	{11, 9, 8, 7},
+	{13, 0xb, 9, 8},
+	{14, 0xb, 0xa, 9}
+};
+
+const struct lat_adj_pair lpddr3_lat_adj[] = {
+	{3, 2, 1, 0},
+	{6, 5, 3, 2},
+	{8, 7, 4, 3},
+	{9, 8, 5, 4},
+	{10, 9, 6, 5},
+	{11, 9, 6, 5},
+	{12, 0xa, 6, 5},
+	{14, 0xc, 8, 7},
+	{16, 0xd, 8, 7}
+};
+
+const struct lat_adj_pair lpddr4_lat_adj[] = {
+	{6, 5, 4, 2},
+	{10, 9, 6, 4},
+	{14, 0xc, 8, 6},
+	{20, 0x11, 0xa, 8},
+	{24, 0x15, 0xc, 0xa},
+	{28, 0x18, 0xe, 0xc},
+	{32, 0x1b, 0x10, 0xe},
+	{36, 0x1e, 0x12, 0x10}
+};
+
+static uint32_t get_rdlat_adj(uint32_t dram_type, uint32_t cl)
+{
+	const struct lat_adj_pair *p;
+	uint32_t cnt;
+	uint32_t i;
+
+	if (dram_type == DDR3) {
+		p = ddr3_lat_adj;
+		cnt = ARRAY_SIZE(ddr3_lat_adj);
+	} else if (dram_type == LPDDR3) {
+		p = lpddr3_lat_adj;
+		cnt = ARRAY_SIZE(lpddr3_lat_adj);
+	} else {
+		p = lpddr4_lat_adj;
+		cnt = ARRAY_SIZE(lpddr4_lat_adj);
+	}
+
+	for (i = 0; i < cnt; i++) {
+		if (cl == p[i].cl)
+			return p[i].rdlat_adj;
+	}
+	/* fail */
+	return 0xff;
+}
+
+static uint32_t get_wrlat_adj(uint32_t dram_type, uint32_t cwl)
+{
+	const struct lat_adj_pair *p;
+	uint32_t cnt;
+	uint32_t i;
+
+	if (dram_type == DDR3) {
+		p = ddr3_lat_adj;
+		cnt = ARRAY_SIZE(ddr3_lat_adj);
+	} else if (dram_type == LPDDR3) {
+		p = lpddr3_lat_adj;
+		cnt = ARRAY_SIZE(lpddr3_lat_adj);
+	} else {
+		p = lpddr4_lat_adj;
+		cnt = ARRAY_SIZE(lpddr4_lat_adj);
+	}
+
+	for (i = 0; i < cnt; i++) {
+		if (cwl == p[i].cwl)
+			return p[i].wrlat_adj;
+	}
+	/* fail */
+	return 0xff;
+}
+
+#define PI_REGS_DIMM_SUPPORT	(0)
+#define PI_ADD_LATENCY	(0)
+#define PI_DOUBLEFREEK	(1)
+
+#define PI_PAD_DELAY_PS_VALUE	(1000)
+#define PI_IE_ENABLE_VALUE	(3000)
+#define PI_TSEL_ENABLE_VALUE	(700)
+
+static uint32_t get_pi_rdlat_adj(struct dram_timing_t *pdram_timing)
+{
+	/*[DLLSUBTYPE2] == "STD_DENALI_HS" */
+	uint32_t rdlat, delay_adder, ie_enable, hs_offset, tsel_adder,
+	    extra_adder, tsel_enable;
+
+	ie_enable = PI_IE_ENABLE_VALUE;
+	tsel_enable = PI_TSEL_ENABLE_VALUE;
+
+	rdlat = pdram_timing->cl + PI_ADD_LATENCY;
+	delay_adder = ie_enable / (1000000 / pdram_timing->mhz);
+	if ((ie_enable % (1000000 / pdram_timing->mhz)) != 0)
+		delay_adder++;
+	hs_offset = 0;
+	tsel_adder = 0;
+	extra_adder = 0;
+	/* rdlat = rdlat - (PREAMBLE_SUPPORT & 0x1); */
+	tsel_adder = tsel_enable / (1000000 / pdram_timing->mhz);
+	if ((tsel_enable % (1000000 / pdram_timing->mhz)) != 0)
+		tsel_adder++;
+	delay_adder = delay_adder - 1;
+	if (tsel_adder > delay_adder)
+		extra_adder = tsel_adder - delay_adder;
+	else
+		extra_adder = 0;
+	if (PI_REGS_DIMM_SUPPORT && PI_DOUBLEFREEK)
+		hs_offset = 2;
+	else
+		hs_offset = 1;
+
+	if (delay_adder > (rdlat - 1 - hs_offset)) {
+		rdlat = rdlat - tsel_adder;
+	} else {
+		if ((rdlat - delay_adder) < 2)
+			rdlat = 2;
+		else
+			rdlat = rdlat - delay_adder - extra_adder;
+	}
+
+	return rdlat;
+}
+
+static uint32_t get_pi_wrlat(struct dram_timing_t *pdram_timing,
+			     struct timing_related_config *timing_config)
+{
+	uint32_t tmp;
+
+	if (timing_config->dram_type == LPDDR3) {
+		tmp = pdram_timing->cl;
+		if (tmp >= 14)
+			tmp = 8;
+		else if (tmp >= 10)
+			tmp = 6;
+		else if (tmp == 9)
+			tmp = 5;
+		else if (tmp == 8)
+			tmp = 4;
+		else if (tmp == 6)
+			tmp = 3;
+		else
+			tmp = 1;
+	} else {
+		tmp = 1;
+	}
+
+	return tmp;
+}
+
+static uint32_t get_pi_wrlat_adj(struct dram_timing_t *pdram_timing,
+				 struct timing_related_config *timing_config)
+{
+	return get_pi_wrlat(pdram_timing, timing_config) + PI_ADD_LATENCY - 1;
+}
+
+static uint32_t get_pi_tdfi_phy_rdlat(struct dram_timing_t *pdram_timing,
+			struct timing_related_config *timing_config)
+{
+	/* [DLLSUBTYPE2] == "STD_DENALI_HS" */
+	uint32_t cas_lat, delay_adder, ie_enable, hs_offset, ie_delay_adder;
+	uint32_t mem_delay_ps, round_trip_ps;
+	uint32_t phy_internal_delay, lpddr_adder, dfi_adder, rdlat_delay;
+
+	ie_enable = PI_IE_ENABLE_VALUE;
+
+	delay_adder = ie_enable / (1000000 / pdram_timing->mhz);
+	if ((ie_enable % (1000000 / pdram_timing->mhz)) != 0)
+		delay_adder++;
+	delay_adder = delay_adder - 1;
+	if (PI_REGS_DIMM_SUPPORT && PI_DOUBLEFREEK)
+		hs_offset = 2;
+	else
+		hs_offset = 1;
+
+	cas_lat = pdram_timing->cl + PI_ADD_LATENCY;
+
+	if (delay_adder > (cas_lat - 1 - hs_offset)) {
+		ie_delay_adder = 0;
+	} else {
+		ie_delay_adder = ie_enable / (1000000 / pdram_timing->mhz);
+		if ((ie_enable % (1000000 / pdram_timing->mhz)) != 0)
+			ie_delay_adder++;
+	}
+
+	if (timing_config->dram_type == DDR3) {
+		mem_delay_ps = 0;
+	} else if (timing_config->dram_type == LPDDR4) {
+		mem_delay_ps = 3600;
+	} else if (timing_config->dram_type == LPDDR3) {
+		mem_delay_ps = 5500;
+	} else {
+		NOTICE("get_pi_tdfi_phy_rdlat:dramtype unsupport\n");
+		return 0;
+	}
+	round_trip_ps = 1100 + 500 + mem_delay_ps + 500 + 600;
+	delay_adder = round_trip_ps / (1000000 / pdram_timing->mhz);
+	if ((round_trip_ps % (1000000 / pdram_timing->mhz)) != 0)
+		delay_adder++;
+
+	phy_internal_delay = 5 + 2 + 4;
+	lpddr_adder = mem_delay_ps / (1000000 / pdram_timing->mhz);
+	if ((mem_delay_ps % (1000000 / pdram_timing->mhz)) != 0)
+		lpddr_adder++;
+	dfi_adder = 0;
+	phy_internal_delay = phy_internal_delay + 2;
+	rdlat_delay = delay_adder + phy_internal_delay +
+	    ie_delay_adder + lpddr_adder + dfi_adder;
+
+	rdlat_delay = rdlat_delay + 2;
+	return rdlat_delay;
+}
+
+static uint32_t get_pi_todtoff_min(struct dram_timing_t *pdram_timing,
+				   struct timing_related_config *timing_config)
+{
+	uint32_t tmp, todtoff_min_ps;
+
+	if (timing_config->dram_type == LPDDR3)
+		todtoff_min_ps = 2500;
+	else if (timing_config->dram_type == LPDDR4)
+		todtoff_min_ps = 1500;
+	else
+		todtoff_min_ps = 0;
+	/* todtoff_min */
+	tmp = todtoff_min_ps / (1000000 / pdram_timing->mhz);
+	if ((todtoff_min_ps % (1000000 / pdram_timing->mhz)) != 0)
+		tmp++;
+	return tmp;
+}
+
+static uint32_t get_pi_todtoff_max(struct dram_timing_t *pdram_timing,
+				   struct timing_related_config *timing_config)
+{
+	uint32_t tmp, todtoff_max_ps;
+
+	if ((timing_config->dram_type == LPDDR4)
+	    || (timing_config->dram_type == LPDDR3))
+		todtoff_max_ps = 3500;
+	else
+		todtoff_max_ps = 0;
+
+	/* todtoff_max */
+	tmp = todtoff_max_ps / (1000000 / pdram_timing->mhz);
+	if ((todtoff_max_ps % (1000000 / pdram_timing->mhz)) != 0)
+		tmp++;
+	return tmp;
+}
+
+static void gen_rk3399_ctl_params_f0(struct timing_related_config
+				     *timing_config,
+				     struct dram_timing_t *pdram_timing)
+{
+	uint32_t i;
+	uint32_t tmp, tmp1;
+
+	for (i = 0; i < timing_config->ch_cnt; i++) {
+		if (timing_config->dram_type == DDR3) {
+			tmp = ((700000 + 10) * timing_config->freq +
+				999) / 1000;
+			tmp += pdram_timing->txsnr + (pdram_timing->tmrd * 3) +
+			    pdram_timing->tmod + pdram_timing->tzqinit;
+			mmio_write_32(CTL_REG(i, 5), tmp);
+
+			mmio_clrsetbits_32(CTL_REG(i, 22), 0xffff,
+					   pdram_timing->tdllk);
+
+			mmio_write_32(CTL_REG(i, 32),
+				      (pdram_timing->tmod << 8) |
+				       pdram_timing->tmrd);
+
+			mmio_clrsetbits_32(CTL_REG(i, 59), 0xffff << 16,
+					   (pdram_timing->txsr -
+					    pdram_timing->trcd) << 16);
+		} else if (timing_config->dram_type == LPDDR4) {
+			mmio_write_32(CTL_REG(i, 5), pdram_timing->tinit1 +
+						     pdram_timing->tinit3);
+			mmio_write_32(CTL_REG(i, 32),
+				      (pdram_timing->tmrd << 8) |
+				      pdram_timing->tmrd);
+			mmio_clrsetbits_32(CTL_REG(i, 59), 0xffff << 16,
+					   pdram_timing->txsr << 16);
+		} else {
+			mmio_write_32(CTL_REG(i, 5), pdram_timing->tinit1);
+			mmio_write_32(CTL_REG(i, 7), pdram_timing->tinit4);
+			mmio_write_32(CTL_REG(i, 32),
+				      (pdram_timing->tmrd << 8) |
+				      pdram_timing->tmrd);
+			mmio_clrsetbits_32(CTL_REG(i, 59), 0xffff << 16,
+					   pdram_timing->txsr << 16);
+		}
+		mmio_write_32(CTL_REG(i, 6), pdram_timing->tinit3);
+		mmio_write_32(CTL_REG(i, 8), pdram_timing->tinit5);
+		mmio_clrsetbits_32(CTL_REG(i, 23), (0x7f << 16),
+				   ((pdram_timing->cl * 2) << 16));
+		mmio_clrsetbits_32(CTL_REG(i, 23), (0x1f << 24),
+				   (pdram_timing->cwl << 24));
+		mmio_clrsetbits_32(CTL_REG(i, 24), 0x3f, pdram_timing->al);
+		mmio_clrsetbits_32(CTL_REG(i, 26), 0xffff << 16,
+				   (pdram_timing->trc << 24) |
+				   (pdram_timing->trrd << 16));
+		mmio_write_32(CTL_REG(i, 27),
+			      (pdram_timing->tfaw << 24) |
+			      (pdram_timing->trppb << 16) |
+			      (pdram_timing->twtr << 8) |
+			      pdram_timing->tras_min);
+
+		mmio_clrsetbits_32(CTL_REG(i, 31), 0xff << 24,
+				   max(4, pdram_timing->trtp) << 24);
+		mmio_write_32(CTL_REG(i, 33), (pdram_timing->tcke << 24) |
+					      pdram_timing->tras_max);
+		mmio_clrsetbits_32(CTL_REG(i, 34), 0xff,
+				   max(1, pdram_timing->tckesr));
+		mmio_clrsetbits_32(CTL_REG(i, 39),
+				   (0x3f << 16) | (0xff << 8),
+				   (pdram_timing->twr << 16) |
+				   (pdram_timing->trcd << 8));
+		mmio_clrsetbits_32(CTL_REG(i, 42), 0x1f << 16,
+				   pdram_timing->tmrz << 16);
+		tmp = pdram_timing->tdal ? pdram_timing->tdal :
+		      (pdram_timing->twr + pdram_timing->trp);
+		mmio_clrsetbits_32(CTL_REG(i, 44), 0xff, tmp);
+		mmio_clrsetbits_32(CTL_REG(i, 45), 0xff, pdram_timing->trp);
+		mmio_write_32(CTL_REG(i, 48),
+			      ((pdram_timing->trefi - 8) << 16) |
+			      pdram_timing->trfc);
+		mmio_clrsetbits_32(CTL_REG(i, 52), 0xffff, pdram_timing->txp);
+		mmio_clrsetbits_32(CTL_REG(i, 53), 0xffff << 16,
+				   pdram_timing->txpdll << 16);
+		mmio_clrsetbits_32(CTL_REG(i, 55), 0xf << 24,
+				   pdram_timing->tcscke << 24);
+		mmio_clrsetbits_32(CTL_REG(i, 55), 0xff, pdram_timing->tmrri);
+		mmio_write_32(CTL_REG(i, 56),
+			      (pdram_timing->tzqcke << 24) |
+			      (pdram_timing->tmrwckel << 16) |
+			      (pdram_timing->tckehcs << 8) |
+			      pdram_timing->tckelcs);
+		mmio_clrsetbits_32(CTL_REG(i, 60), 0xffff, pdram_timing->txsnr);
+		mmio_clrsetbits_32(CTL_REG(i, 62), 0xffff << 16,
+				   (pdram_timing->tckehcmd << 24) |
+				   (pdram_timing->tckelcmd << 16));
+		mmio_write_32(CTL_REG(i, 63),
+			      (pdram_timing->tckelpd << 24) |
+			      (pdram_timing->tescke << 16) |
+			      (pdram_timing->tsr << 8) |
+			      pdram_timing->tckckel);
+		mmio_clrsetbits_32(CTL_REG(i, 64), 0xfff,
+				   (pdram_timing->tcmdcke << 8) |
+				   pdram_timing->tcsckeh);
+		mmio_clrsetbits_32(CTL_REG(i, 92), 0xffff << 8,
+				   (pdram_timing->tcksrx << 16) |
+				   (pdram_timing->tcksre << 8));
+		mmio_clrsetbits_32(CTL_REG(i, 108), 0x1 << 24,
+				   (timing_config->dllbp << 24));
+		mmio_clrsetbits_32(CTL_REG(i, 122), 0x3ff << 16,
+				   (pdram_timing->tvrcg_enable << 16));
+		mmio_write_32(CTL_REG(i, 123), (pdram_timing->tfc_long << 16) |
+					       pdram_timing->tvrcg_disable);
+		mmio_write_32(CTL_REG(i, 124),
+			      (pdram_timing->tvref_long << 16) |
+			      (pdram_timing->tckfspx << 8) |
+			      pdram_timing->tckfspe);
+		mmio_write_32(CTL_REG(i, 133), (pdram_timing->mr[1] << 16) |
+					       pdram_timing->mr[0]);
+		mmio_clrsetbits_32(CTL_REG(i, 134), 0xffff,
+				   pdram_timing->mr[2]);
+		mmio_clrsetbits_32(CTL_REG(i, 138), 0xffff,
+				   pdram_timing->mr[3]);
+		mmio_clrsetbits_32(CTL_REG(i, 139), 0xff << 24,
+				   pdram_timing->mr11 << 24);
+		mmio_write_32(CTL_REG(i, 147),
+			      (pdram_timing->mr[1] << 16) |
+			      pdram_timing->mr[0]);
+		mmio_clrsetbits_32(CTL_REG(i, 148), 0xffff,
+				   pdram_timing->mr[2]);
+		mmio_clrsetbits_32(CTL_REG(i, 152), 0xffff,
+				   pdram_timing->mr[3]);
+		mmio_clrsetbits_32(CTL_REG(i, 153), 0xff << 24,
+				   pdram_timing->mr11 << 24);
+		if (timing_config->dram_type == LPDDR4) {
+			mmio_clrsetbits_32(CTL_REG(i, 140), 0xffff << 16,
+					   pdram_timing->mr12 << 16);
+			mmio_clrsetbits_32(CTL_REG(i, 142), 0xffff << 16,
+					   pdram_timing->mr14 << 16);
+			mmio_clrsetbits_32(CTL_REG(i, 145), 0xffff << 16,
+					   pdram_timing->mr22 << 16);
+			mmio_clrsetbits_32(CTL_REG(i, 154), 0xffff << 16,
+					   pdram_timing->mr12 << 16);
+			mmio_clrsetbits_32(CTL_REG(i, 156), 0xffff << 16,
+					   pdram_timing->mr14 << 16);
+			mmio_clrsetbits_32(CTL_REG(i, 159), 0xffff << 16,
+					   pdram_timing->mr22 << 16);
+		}
+		mmio_clrsetbits_32(CTL_REG(i, 179), 0xfff << 8,
+				   pdram_timing->tzqinit << 8);
+		mmio_write_32(CTL_REG(i, 180), (pdram_timing->tzqcs << 16) |
+					       (pdram_timing->tzqinit / 2));
+		mmio_write_32(CTL_REG(i, 181), (pdram_timing->tzqlat << 16) |
+					       pdram_timing->tzqcal);
+		mmio_clrsetbits_32(CTL_REG(i, 212), 0xff << 8,
+				   pdram_timing->todton << 8);
+
+		if (timing_config->odt) {
+			mmio_setbits_32(CTL_REG(i, 213), 1 << 16);
+			if (timing_config->freq < 400)
+				tmp = 4 << 24;
+			else
+				tmp = 8 << 24;
+		} else {
+			mmio_clrbits_32(CTL_REG(i, 213), 1 << 16);
+			tmp = 2 << 24;
+		}
+
+		mmio_clrsetbits_32(CTL_REG(i, 216), 0x1f << 24, tmp);
+		mmio_clrsetbits_32(CTL_REG(i, 221), (0x3 << 16) | (0xf << 8),
+				   (pdram_timing->tdqsck << 16) |
+				   (pdram_timing->tdqsck_max << 8));
+		tmp =
+		    (get_wrlat_adj(timing_config->dram_type, pdram_timing->cwl)
+		     << 8) | get_rdlat_adj(timing_config->dram_type,
+					   pdram_timing->cl);
+		mmio_clrsetbits_32(CTL_REG(i, 284), 0xffff, tmp);
+		mmio_clrsetbits_32(CTL_REG(i, 82), 0xffff << 16,
+				   (4 * pdram_timing->trefi) << 16);
+
+		mmio_clrsetbits_32(CTL_REG(i, 83), 0xffff,
+				   (2 * pdram_timing->trefi) & 0xffff);
+
+		if ((timing_config->dram_type == LPDDR3) ||
+		    (timing_config->dram_type == LPDDR4)) {
+			tmp = get_pi_wrlat(pdram_timing, timing_config);
+			tmp1 = get_pi_todtoff_max(pdram_timing, timing_config);
+			tmp = (tmp > tmp1) ? (tmp - tmp1) : 0;
+		} else {
+			tmp = 0;
+		}
+		mmio_clrsetbits_32(CTL_REG(i, 214), 0x3f << 16,
+				   (tmp & 0x3f) << 16);
+
+		if ((timing_config->dram_type == LPDDR3) ||
+		    (timing_config->dram_type == LPDDR4)) {
+			/* min_rl_preamble = cl+TDQSCK_MIN -1 */
+			tmp = pdram_timing->cl +
+			    get_pi_todtoff_min(pdram_timing, timing_config) - 1;
+			/* todtoff_max */
+			tmp1 = get_pi_todtoff_max(pdram_timing, timing_config);
+			tmp = (tmp > tmp1) ? (tmp - tmp1) : 0;
+		} else {
+			tmp = pdram_timing->cl - pdram_timing->cwl;
+		}
+		mmio_clrsetbits_32(CTL_REG(i, 215), 0x3f << 8,
+				   (tmp & 0x3f) << 8);
+
+		mmio_clrsetbits_32(CTL_REG(i, 275), 0xff << 16,
+				   (get_pi_tdfi_phy_rdlat(pdram_timing,
+							  timing_config) &
+				    0xff) << 16);
+
+		mmio_clrsetbits_32(CTL_REG(i, 277), 0xffff,
+				   (2 * pdram_timing->trefi) & 0xffff);
+
+		mmio_clrsetbits_32(CTL_REG(i, 282), 0xffff,
+				   (2 * pdram_timing->trefi) & 0xffff);
+
+		mmio_write_32(CTL_REG(i, 283), 20 * pdram_timing->trefi);
+
+		/* CTL_308 TDFI_CALVL_CAPTURE_F0:RW:16:10 */
+		tmp1 = 20000 / (1000000 / pdram_timing->mhz) + 1;
+		if ((20000 % (1000000 / pdram_timing->mhz)) != 0)
+			tmp1++;
+		tmp = (tmp1 >> 1) + (tmp1 % 2) + 5;
+		mmio_clrsetbits_32(CTL_REG(i, 308), 0x3ff << 16, tmp << 16);
+
+		/* CTL_308 TDFI_CALVL_CC_F0:RW:0:10 */
+		tmp = tmp + 18;
+		mmio_clrsetbits_32(CTL_REG(i, 308), 0x3ff, tmp);
+
+		/* CTL_314 TDFI_WRCSLAT_F0:RW:8:8 */
+		tmp1 = get_pi_wrlat_adj(pdram_timing, timing_config);
+		if (timing_config->freq <= TDFI_LAT_THRESHOLD_FREQ) {
+			if (tmp1 == 0)
+				tmp = 0;
+			else if (tmp1 < 5)
+				tmp = tmp1 - 1;
+			else
+				tmp = tmp1 - 5;
+		} else {
+			tmp = tmp1 - 2;
+		}
+		mmio_clrsetbits_32(CTL_REG(i, 314), 0xff << 8, tmp << 8);
+
+		/* CTL_314 TDFI_RDCSLAT_F0:RW:0:8 */
+		if ((timing_config->freq <= TDFI_LAT_THRESHOLD_FREQ) &&
+		    (pdram_timing->cl >= 5))
+			tmp = pdram_timing->cl - 5;
+		else
+			tmp = pdram_timing->cl - 2;
+		mmio_clrsetbits_32(CTL_REG(i, 314), 0xff, tmp);
+	}
+}
+
+static void gen_rk3399_ctl_params_f1(struct timing_related_config
+				     *timing_config,
+				     struct dram_timing_t *pdram_timing)
+{
+	uint32_t i;
+	uint32_t tmp, tmp1;
+
+	for (i = 0; i < timing_config->ch_cnt; i++) {
+		if (timing_config->dram_type == DDR3) {
+			tmp =
+			    ((700000 + 10) * timing_config->freq + 999) / 1000;
+			tmp += pdram_timing->txsnr + (pdram_timing->tmrd * 3) +
+			       pdram_timing->tmod + pdram_timing->tzqinit;
+			mmio_write_32(CTL_REG(i, 9), tmp);
+			mmio_clrsetbits_32(CTL_REG(i, 22), 0xffff << 16,
+					   pdram_timing->tdllk << 16);
+			mmio_clrsetbits_32(CTL_REG(i, 34), 0xffffff00,
+					   (pdram_timing->tmod << 24) |
+					   (pdram_timing->tmrd << 16) |
+					   (pdram_timing->trtp << 8));
+			mmio_clrsetbits_32(CTL_REG(i, 60), 0xffff << 16,
+					   (pdram_timing->txsr -
+					    pdram_timing->trcd) << 16);
+		} else if (timing_config->dram_type == LPDDR4) {
+			mmio_write_32(CTL_REG(i, 9), pdram_timing->tinit1 +
+						     pdram_timing->tinit3);
+			mmio_clrsetbits_32(CTL_REG(i, 34), 0xffffff00,
+					   (pdram_timing->tmrd << 24) |
+					   (pdram_timing->tmrd << 16) |
+					   (pdram_timing->trtp << 8));
+			mmio_clrsetbits_32(CTL_REG(i, 60), 0xffff << 16,
+					   pdram_timing->txsr << 16);
+		} else {
+			mmio_write_32(CTL_REG(i, 9), pdram_timing->tinit1);
+			mmio_write_32(CTL_REG(i, 11), pdram_timing->tinit4);
+			mmio_clrsetbits_32(CTL_REG(i, 34), 0xffffff00,
+					   (pdram_timing->tmrd << 24) |
+					   (pdram_timing->tmrd << 16) |
+					   (pdram_timing->trtp << 8));
+			mmio_clrsetbits_32(CTL_REG(i, 60), 0xffff << 16,
+					   pdram_timing->txsr << 16);
+		}
+		mmio_write_32(CTL_REG(i, 10), pdram_timing->tinit3);
+		mmio_write_32(CTL_REG(i, 12), pdram_timing->tinit5);
+		mmio_clrsetbits_32(CTL_REG(i, 24), (0x7f << 8),
+				   ((pdram_timing->cl * 2) << 8));
+		mmio_clrsetbits_32(CTL_REG(i, 24), (0x1f << 16),
+				   (pdram_timing->cwl << 16));
+		mmio_clrsetbits_32(CTL_REG(i, 24), 0x3f << 24,
+				   pdram_timing->al << 24);
+		mmio_clrsetbits_32(CTL_REG(i, 28), 0xffffff00,
+				   (pdram_timing->tras_min << 24) |
+				   (pdram_timing->trc << 16) |
+				   (pdram_timing->trrd << 8));
+		mmio_clrsetbits_32(CTL_REG(i, 29), 0xffffff,
+				   (pdram_timing->tfaw << 16) |
+				   (pdram_timing->trppb << 8) |
+				   pdram_timing->twtr);
+		mmio_write_32(CTL_REG(i, 35), (pdram_timing->tcke << 24) |
+					      pdram_timing->tras_max);
+		mmio_clrsetbits_32(CTL_REG(i, 36), 0xff,
+				   max(1, pdram_timing->tckesr));
+		mmio_clrsetbits_32(CTL_REG(i, 39), (0xff << 24),
+				   (pdram_timing->trcd << 24));
+		mmio_clrsetbits_32(CTL_REG(i, 40), 0x3f, pdram_timing->twr);
+		mmio_clrsetbits_32(CTL_REG(i, 42), 0x1f << 24,
+				   pdram_timing->tmrz << 24);
+		tmp = pdram_timing->tdal ? pdram_timing->tdal :
+		      (pdram_timing->twr + pdram_timing->trp);
+		mmio_clrsetbits_32(CTL_REG(i, 44), 0xff << 8, tmp << 8);
+		mmio_clrsetbits_32(CTL_REG(i, 45), 0xff << 8,
+				   pdram_timing->trp << 8);
+		mmio_write_32(CTL_REG(i, 49),
+			      ((pdram_timing->trefi - 8) << 16) |
+			      pdram_timing->trfc);
+		mmio_clrsetbits_32(CTL_REG(i, 52), 0xffff << 16,
+				   pdram_timing->txp << 16);
+		mmio_clrsetbits_32(CTL_REG(i, 54), 0xffff,
+				   pdram_timing->txpdll);
+		mmio_clrsetbits_32(CTL_REG(i, 55), 0xff << 8,
+				   pdram_timing->tmrri << 8);
+		mmio_write_32(CTL_REG(i, 57), (pdram_timing->tmrwckel << 24) |
+					      (pdram_timing->tckehcs << 16) |
+					      (pdram_timing->tckelcs << 8) |
+					      pdram_timing->tcscke);
+		mmio_clrsetbits_32(CTL_REG(i, 58), 0xf, pdram_timing->tzqcke);
+		mmio_clrsetbits_32(CTL_REG(i, 61), 0xffff, pdram_timing->txsnr);
+		mmio_clrsetbits_32(CTL_REG(i, 64), 0xffff << 16,
+				   (pdram_timing->tckehcmd << 24) |
+				   (pdram_timing->tckelcmd << 16));
+		mmio_write_32(CTL_REG(i, 65), (pdram_timing->tckelpd << 24) |
+					      (pdram_timing->tescke << 16) |
+					      (pdram_timing->tsr << 8) |
+					      pdram_timing->tckckel);
+		mmio_clrsetbits_32(CTL_REG(i, 66), 0xfff,
+				   (pdram_timing->tcmdcke << 8) |
+				   pdram_timing->tcsckeh);
+		mmio_clrsetbits_32(CTL_REG(i, 92), (0xff << 24),
+				   (pdram_timing->tcksre << 24));
+		mmio_clrsetbits_32(CTL_REG(i, 93), 0xff,
+				   pdram_timing->tcksrx);
+		mmio_clrsetbits_32(CTL_REG(i, 108), (0x1 << 25),
+				   (timing_config->dllbp << 25));
+		mmio_write_32(CTL_REG(i, 125),
+			      (pdram_timing->tvrcg_disable << 16) |
+			      pdram_timing->tvrcg_enable);
+		mmio_write_32(CTL_REG(i, 126), (pdram_timing->tckfspx << 24) |
+					       (pdram_timing->tckfspe << 16) |
+					       pdram_timing->tfc_long);
+		mmio_clrsetbits_32(CTL_REG(i, 127), 0xffff,
+				   pdram_timing->tvref_long);
+		mmio_clrsetbits_32(CTL_REG(i, 134), 0xffff << 16,
+				   pdram_timing->mr[0] << 16);
+		mmio_write_32(CTL_REG(i, 135), (pdram_timing->mr[2] << 16) |
+					       pdram_timing->mr[1]);
+		mmio_clrsetbits_32(CTL_REG(i, 138), 0xffff << 16,
+				   pdram_timing->mr[3] << 16);
+		mmio_clrsetbits_32(CTL_REG(i, 140), 0xff, pdram_timing->mr11);
+		mmio_clrsetbits_32(CTL_REG(i, 148), 0xffff << 16,
+				   pdram_timing->mr[0] << 16);
+		mmio_write_32(CTL_REG(i, 149), (pdram_timing->mr[2] << 16) |
+					       pdram_timing->mr[1]);
+		mmio_clrsetbits_32(CTL_REG(i, 152), 0xffff << 16,
+				   pdram_timing->mr[3] << 16);
+		mmio_clrsetbits_32(CTL_REG(i, 154), 0xff, pdram_timing->mr11);
+		if (timing_config->dram_type == LPDDR4) {
+			mmio_clrsetbits_32(CTL_REG(i, 141), 0xffff,
+					   pdram_timing->mr12);
+			mmio_clrsetbits_32(CTL_REG(i, 143), 0xffff,
+					   pdram_timing->mr14);
+			mmio_clrsetbits_32(CTL_REG(i, 146), 0xffff,
+					   pdram_timing->mr22);
+			mmio_clrsetbits_32(CTL_REG(i, 155), 0xffff,
+					   pdram_timing->mr12);
+			mmio_clrsetbits_32(CTL_REG(i, 157), 0xffff,
+					   pdram_timing->mr14);
+			mmio_clrsetbits_32(CTL_REG(i, 160), 0xffff,
+					   pdram_timing->mr22);
+		}
+		mmio_write_32(CTL_REG(i, 182),
+			      ((pdram_timing->tzqinit / 2) << 16) |
+			      pdram_timing->tzqinit);
+		mmio_write_32(CTL_REG(i, 183), (pdram_timing->tzqcal << 16) |
+					       pdram_timing->tzqcs);
+		mmio_clrsetbits_32(CTL_REG(i, 184), 0x3f, pdram_timing->tzqlat);
+		mmio_clrsetbits_32(CTL_REG(i, 188), 0xfff,
+				   pdram_timing->tzqreset);
+		mmio_clrsetbits_32(CTL_REG(i, 212), 0xff << 16,
+				   pdram_timing->todton << 16);
+
+		if (timing_config->odt) {
+			mmio_setbits_32(CTL_REG(i, 213), (1 << 24));
+			if (timing_config->freq < 400)
+				tmp = 4 << 24;
+			else
+				tmp = 8 << 24;
+		} else {
+			mmio_clrbits_32(CTL_REG(i, 213), (1 << 24));
+			tmp = 2 << 24;
+		}
+		mmio_clrsetbits_32(CTL_REG(i, 217), 0x1f << 24, tmp);
+		mmio_clrsetbits_32(CTL_REG(i, 221), 0xf << 24,
+				   (pdram_timing->tdqsck_max << 24));
+		mmio_clrsetbits_32(CTL_REG(i, 222), 0x3, pdram_timing->tdqsck);
+		mmio_clrsetbits_32(CTL_REG(i, 291), 0xffff,
+				   (get_wrlat_adj(timing_config->dram_type,
+						  pdram_timing->cwl) << 8) |
+				   get_rdlat_adj(timing_config->dram_type,
+						 pdram_timing->cl));
+
+		mmio_clrsetbits_32(CTL_REG(i, 84), 0xffff,
+				   (4 * pdram_timing->trefi) & 0xffff);
+
+		mmio_clrsetbits_32(CTL_REG(i, 84), 0xffff << 16,
+				   ((2 * pdram_timing->trefi) & 0xffff) << 16);
+
+		if ((timing_config->dram_type == LPDDR3) ||
+		    (timing_config->dram_type == LPDDR4)) {
+			tmp = get_pi_wrlat(pdram_timing, timing_config);
+			tmp1 = get_pi_todtoff_max(pdram_timing, timing_config);
+			tmp = (tmp > tmp1) ? (tmp - tmp1) : 0;
+		} else {
+			tmp = 0;
+		}
+		mmio_clrsetbits_32(CTL_REG(i, 214), 0x3f << 24,
+				   (tmp & 0x3f) << 24);
+
+		if ((timing_config->dram_type == LPDDR3) ||
+		    (timing_config->dram_type == LPDDR4)) {
+			/* min_rl_preamble = cl + TDQSCK_MIN - 1 */
+			tmp = pdram_timing->cl +
+			      get_pi_todtoff_min(pdram_timing, timing_config);
+			tmp--;
+			/* todtoff_max */
+			tmp1 = get_pi_todtoff_max(pdram_timing, timing_config);
+			tmp = (tmp > tmp1) ? (tmp - tmp1) : 0;
+		} else {
+			tmp = pdram_timing->cl - pdram_timing->cwl;
+		}
+		mmio_clrsetbits_32(CTL_REG(i, 215), 0x3f << 16,
+				   (tmp & 0x3f) << 16);
+
+		mmio_clrsetbits_32(CTL_REG(i, 275), 0xff << 24,
+				   (get_pi_tdfi_phy_rdlat(pdram_timing,
+							  timing_config) &
+				    0xff) << 24);
+
+		mmio_clrsetbits_32(CTL_REG(i, 284), 0xffff << 16,
+				   ((2 * pdram_timing->trefi) & 0xffff) << 16);
+
+		mmio_clrsetbits_32(CTL_REG(i, 289), 0xffff,
+				   (2 * pdram_timing->trefi) & 0xffff);
+
+		mmio_write_32(CTL_REG(i, 290), 20 * pdram_timing->trefi);
+
+		/* CTL_309 TDFI_CALVL_CAPTURE_F1:RW:16:10 */
+		tmp1 = 20000 / (1000000 / pdram_timing->mhz) + 1;
+		if ((20000 % (1000000 / pdram_timing->mhz)) != 0)
+			tmp1++;
+		tmp = (tmp1 >> 1) + (tmp1 % 2) + 5;
+		mmio_clrsetbits_32(CTL_REG(i, 309), 0x3ff << 16, tmp << 16);
+
+		/* CTL_309 TDFI_CALVL_CC_F1:RW:0:10 */
+		tmp = tmp + 18;
+		mmio_clrsetbits_32(CTL_REG(i, 309), 0x3ff, tmp);
+
+		/* CTL_314 TDFI_WRCSLAT_F1:RW:24:8 */
+		tmp1 = get_pi_wrlat_adj(pdram_timing, timing_config);
+		if (timing_config->freq <= TDFI_LAT_THRESHOLD_FREQ) {
+			if (tmp1 == 0)
+				tmp = 0;
+			else if (tmp1 < 5)
+				tmp = tmp1 - 1;
+			else
+				tmp = tmp1 - 5;
+		} else {
+			tmp = tmp1 - 2;
+		}
+
+		mmio_clrsetbits_32(CTL_REG(i, 314), 0xff << 24, tmp << 24);
+
+		/* CTL_314 TDFI_RDCSLAT_F1:RW:16:8 */
+		if ((timing_config->freq <= TDFI_LAT_THRESHOLD_FREQ) &&
+		    (pdram_timing->cl >= 5))
+			tmp = pdram_timing->cl - 5;
+		else
+			tmp = pdram_timing->cl - 2;
+		mmio_clrsetbits_32(CTL_REG(i, 314), 0xff << 16, tmp << 16);
+	}
+}
+
+static void gen_rk3399_enable_training(uint32_t ch_cnt, uint32_t nmhz)
+{
+		uint32_t i, tmp;
+
+		if (nmhz <= PHY_DLL_BYPASS_FREQ)
+			tmp = 0;
+		else
+			tmp = 1;
+
+		for (i = 0; i < ch_cnt; i++) {
+			mmio_clrsetbits_32(CTL_REG(i, 305), 1 << 16, tmp << 16);
+			mmio_clrsetbits_32(CTL_REG(i, 71), 1, tmp);
+			mmio_clrsetbits_32(CTL_REG(i, 70), 1 << 8, 1 << 8);
+		}
+}
+
+static void gen_rk3399_disable_training(uint32_t ch_cnt)
+{
+	uint32_t i;
+
+	for (i = 0; i < ch_cnt; i++) {
+		mmio_clrbits_32(CTL_REG(i, 305), 1 << 16);
+		mmio_clrbits_32(CTL_REG(i, 71), 1);
+		mmio_clrbits_32(CTL_REG(i, 70), 1 << 8);
+	}
+}
+
+static void gen_rk3399_ctl_params(struct timing_related_config *timing_config,
+				  struct dram_timing_t *pdram_timing,
+				  uint32_t fn)
+{
+	if (fn == 0)
+		gen_rk3399_ctl_params_f0(timing_config, pdram_timing);
+	else
+		gen_rk3399_ctl_params_f1(timing_config, pdram_timing);
+}
+
+static void gen_rk3399_pi_params_f0(struct timing_related_config *timing_config,
+				    struct dram_timing_t *pdram_timing)
+{
+	uint32_t tmp, tmp1, tmp2;
+	uint32_t i;
+
+	for (i = 0; i < timing_config->ch_cnt; i++) {
+		/* PI_02 PI_TDFI_PHYMSTR_MAX_F0:RW:0:32 */
+		tmp = 4 * pdram_timing->trefi;
+		mmio_write_32(PI_REG(i, 2), tmp);
+		/* PI_03 PI_TDFI_PHYMSTR_RESP_F0:RW:0:16 */
+		tmp = 2 * pdram_timing->trefi;
+		mmio_clrsetbits_32(PI_REG(i, 3), 0xffff, tmp);
+		/* PI_07 PI_TDFI_PHYUPD_RESP_F0:RW:16:16 */
+		mmio_clrsetbits_32(PI_REG(i, 7), 0xffff << 16, tmp << 16);
+
+		/* PI_42 PI_TDELAY_RDWR_2_BUS_IDLE_F0:RW:0:8 */
+		if (timing_config->dram_type == LPDDR4)
+			tmp = 2;
+		else
+			tmp = 0;
+		tmp = (pdram_timing->bl / 2) + 4 +
+		      (get_pi_rdlat_adj(pdram_timing) - 2) + tmp +
+		      get_pi_tdfi_phy_rdlat(pdram_timing, timing_config);
+		mmio_clrsetbits_32(PI_REG(i, 42), 0xff, tmp);
+		/* PI_43 PI_WRLAT_F0:RW:0:5 */
+		if (timing_config->dram_type == LPDDR3) {
+			tmp = get_pi_wrlat(pdram_timing, timing_config);
+			mmio_clrsetbits_32(PI_REG(i, 43), 0x1f, tmp);
+		}
+		/* PI_43 PI_ADDITIVE_LAT_F0:RW:8:6 */
+		mmio_clrsetbits_32(PI_REG(i, 43), 0x3f << 8,
+				   PI_ADD_LATENCY << 8);
+
+		/* PI_43 PI_CASLAT_LIN_F0:RW:16:7 */
+		mmio_clrsetbits_32(PI_REG(i, 43), 0x7f << 16,
+				   (pdram_timing->cl * 2) << 16);
+		/* PI_46 PI_TREF_F0:RW:16:16 */
+		mmio_clrsetbits_32(PI_REG(i, 46), 0xffff << 16,
+				   pdram_timing->trefi << 16);
+		/* PI_46 PI_TRFC_F0:RW:0:10 */
+		mmio_clrsetbits_32(PI_REG(i, 46), 0x3ff, pdram_timing->trfc);
+		/* PI_66 PI_TODTL_2CMD_F0:RW:24:8 */
+		if (timing_config->dram_type == LPDDR3) {
+			tmp = get_pi_todtoff_max(pdram_timing, timing_config);
+			mmio_clrsetbits_32(PI_REG(i, 66), 0xff << 24,
+					   tmp << 24);
+		}
+		/* PI_72 PI_WR_TO_ODTH_F0:RW:16:6 */
+		if ((timing_config->dram_type == LPDDR3) ||
+		    (timing_config->dram_type == LPDDR4)) {
+			tmp1 = get_pi_wrlat(pdram_timing, timing_config);
+			tmp2 = get_pi_todtoff_max(pdram_timing, timing_config);
+			if (tmp1 > tmp2)
+				tmp = tmp1 - tmp2;
+			else
+				tmp = 0;
+		} else if (timing_config->dram_type == DDR3) {
+			tmp = 0;
+		}
+		mmio_clrsetbits_32(PI_REG(i, 72), 0x3f << 16, tmp << 16);
+		/* PI_73 PI_RD_TO_ODTH_F0:RW:8:6 */
+		if ((timing_config->dram_type == LPDDR3) ||
+		    (timing_config->dram_type == LPDDR4)) {
+			/* min_rl_preamble = cl + TDQSCK_MIN - 1 */
+			tmp1 = pdram_timing->cl;
+			tmp1 += get_pi_todtoff_min(pdram_timing, timing_config);
+			tmp1--;
+			/* todtoff_max */
+			tmp2 = get_pi_todtoff_max(pdram_timing, timing_config);
+			if (tmp1 > tmp2)
+				tmp = tmp1 - tmp2;
+			else
+				tmp = 0;
+		} else if (timing_config->dram_type == DDR3) {
+			tmp = pdram_timing->cl - pdram_timing->cwl;
+		}
+		mmio_clrsetbits_32(PI_REG(i, 73), 0x3f << 8, tmp << 8);
+		/* PI_89 PI_RDLAT_ADJ_F0:RW:16:8 */
+		tmp = get_pi_rdlat_adj(pdram_timing);
+		mmio_clrsetbits_32(PI_REG(i, 89), 0xff << 16, tmp << 16);
+		/* PI_90 PI_WRLAT_ADJ_F0:RW:16:8 */
+		tmp = get_pi_wrlat_adj(pdram_timing, timing_config);
+		mmio_clrsetbits_32(PI_REG(i, 90), 0xff << 16, tmp << 16);
+		/* PI_91 PI_TDFI_WRCSLAT_F0:RW:16:8 */
+		tmp1 = tmp;
+		if (tmp1 == 0)
+			tmp = 0;
+		else if (tmp1 < 5)
+			tmp = tmp1 - 1;
+		else
+			tmp = tmp1 - 5;
+		mmio_clrsetbits_32(PI_REG(i, 91), 0xff << 16, tmp << 16);
+		/* PI_95 PI_TDFI_CALVL_CAPTURE_F0:RW:16:10 */
+		tmp1 = 20000 / (1000000 / pdram_timing->mhz) + 1;
+		if ((20000 % (1000000 / pdram_timing->mhz)) != 0)
+			tmp1++;
+		tmp = (tmp1 >> 1) + (tmp1 % 2) + 5;
+		mmio_clrsetbits_32(PI_REG(i, 95), 0x3ff << 16, tmp << 16);
+		/* PI_95 PI_TDFI_CALVL_CC_F0:RW:0:10 */
+		mmio_clrsetbits_32(PI_REG(i, 95), 0x3ff, tmp + 18);
+		/* PI_102 PI_TMRZ_F0:RW:8:5 */
+		mmio_clrsetbits_32(PI_REG(i, 102), 0x1f << 8,
+				   pdram_timing->tmrz << 8);
+		/* PI_111 PI_TDFI_CALVL_STROBE_F0:RW:8:4 */
+		tmp1 = 2 * 1000 / (1000000 / pdram_timing->mhz);
+		if ((2 * 1000 % (1000000 / pdram_timing->mhz)) != 0)
+			tmp1++;
+		/* pi_tdfi_calvl_strobe=tds_train+5 */
+		tmp = tmp1 + 5;
+		mmio_clrsetbits_32(PI_REG(i, 111), 0xf << 8, tmp << 8);
+		/* PI_116 PI_TCKEHDQS_F0:RW:16:6 */
+		tmp = 10000 / (1000000 / pdram_timing->mhz);
+		if ((10000 % (1000000 / pdram_timing->mhz)) != 0)
+			tmp++;
+		if (pdram_timing->mhz <= 100)
+			tmp = tmp + 1;
+		else
+			tmp = tmp + 8;
+		mmio_clrsetbits_32(PI_REG(i, 116), 0x3f << 16, tmp << 16);
+		/* PI_125 PI_MR1_DATA_F0_0:RW+:8:16 */
+		mmio_clrsetbits_32(PI_REG(i, 125), 0xffff << 8,
+				   pdram_timing->mr[1] << 8);
+		/* PI_133 PI_MR1_DATA_F0_1:RW+:0:16 */
+		mmio_clrsetbits_32(PI_REG(i, 133), 0xffff, pdram_timing->mr[1]);
+		/* PI_140 PI_MR1_DATA_F0_2:RW+:16:16 */
+		mmio_clrsetbits_32(PI_REG(i, 140), 0xffff << 16,
+				   pdram_timing->mr[1] << 16);
+		/* PI_148 PI_MR1_DATA_F0_3:RW+:0:16 */
+		mmio_clrsetbits_32(PI_REG(i, 148), 0xffff, pdram_timing->mr[1]);
+		/* PI_126 PI_MR2_DATA_F0_0:RW+:0:16 */
+		mmio_clrsetbits_32(PI_REG(i, 126), 0xffff, pdram_timing->mr[2]);
+		/* PI_133 PI_MR2_DATA_F0_1:RW+:16:16 */
+		mmio_clrsetbits_32(PI_REG(i, 133), 0xffff << 16,
+				   pdram_timing->mr[2] << 16);
+		/* PI_141 PI_MR2_DATA_F0_2:RW+:0:16 */
+		mmio_clrsetbits_32(PI_REG(i, 141), 0xffff, pdram_timing->mr[2]);
+		/* PI_148 PI_MR2_DATA_F0_3:RW+:16:16 */
+		mmio_clrsetbits_32(PI_REG(i, 148), 0xffff << 16,
+				   pdram_timing->mr[2] << 16);
+		/* PI_156 PI_TFC_F0:RW:0:10 */
+		mmio_clrsetbits_32(PI_REG(i, 156), 0x3ff,
+				   pdram_timing->tfc_long);
+		/* PI_158 PI_TWR_F0:RW:24:6 */
+		mmio_clrsetbits_32(PI_REG(i, 158), 0x3f << 24,
+				   pdram_timing->twr << 24);
+		/* PI_158 PI_TWTR_F0:RW:16:6 */
+		mmio_clrsetbits_32(PI_REG(i, 158), 0x3f << 16,
+				   pdram_timing->twtr << 16);
+		/* PI_158 PI_TRCD_F0:RW:8:8 */
+		mmio_clrsetbits_32(PI_REG(i, 158), 0xff << 8,
+				   pdram_timing->trcd << 8);
+		/* PI_158 PI_TRP_F0:RW:0:8 */
+		mmio_clrsetbits_32(PI_REG(i, 158), 0xff, pdram_timing->trp);
+		/* PI_157 PI_TRTP_F0:RW:24:8 */
+		mmio_clrsetbits_32(PI_REG(i, 157), 0xff << 24,
+				   pdram_timing->trtp << 24);
+		/* PI_159 PI_TRAS_MIN_F0:RW:24:8 */
+		mmio_clrsetbits_32(PI_REG(i, 159), 0xff << 24,
+				   pdram_timing->tras_min << 24);
+		/* PI_159 PI_TRAS_MAX_F0:RW:0:17 */
+		tmp = pdram_timing->tras_max * 99 / 100;
+		mmio_clrsetbits_32(PI_REG(i, 159), 0x1ffff, tmp);
+		/* PI_160 PI_TMRD_F0:RW:16:6 */
+		mmio_clrsetbits_32(PI_REG(i, 160), 0x3f << 16,
+				   pdram_timing->tmrd << 16);
+		/*PI_160 PI_TDQSCK_MAX_F0:RW:0:4 */
+		mmio_clrsetbits_32(PI_REG(i, 160), 0xf,
+				   pdram_timing->tdqsck_max);
+		/* PI_187 PI_TDFI_CTRLUPD_MAX_F0:RW:8:16 */
+		mmio_clrsetbits_32(PI_REG(i, 187), 0xffff << 8,
+				   (2 * pdram_timing->trefi) << 8);
+		/* PI_188 PI_TDFI_CTRLUPD_INTERVAL_F0:RW:0:32 */
+		mmio_clrsetbits_32(PI_REG(i, 188), 0xffffffff,
+				   20 * pdram_timing->trefi);
+	}
+}
+
+static void gen_rk3399_pi_params_f1(struct timing_related_config *timing_config,
+				    struct dram_timing_t *pdram_timing)
+{
+	uint32_t tmp, tmp1, tmp2;
+	uint32_t i;
+
+	for (i = 0; i < timing_config->ch_cnt; i++) {
+		/* PI_04 PI_TDFI_PHYMSTR_MAX_F1:RW:0:32 */
+		tmp = 4 * pdram_timing->trefi;
+		mmio_write_32(PI_REG(i, 4), tmp);
+		/* PI_05 PI_TDFI_PHYMSTR_RESP_F1:RW:0:16 */
+		tmp = 2 * pdram_timing->trefi;
+		mmio_clrsetbits_32(PI_REG(i, 5), 0xffff, tmp);
+		/* PI_12 PI_TDFI_PHYUPD_RESP_F1:RW:0:16 */
+		mmio_clrsetbits_32(PI_REG(i, 12), 0xffff, tmp);
+
+		/* PI_42 PI_TDELAY_RDWR_2_BUS_IDLE_F1:RW:8:8 */
+		if (timing_config->dram_type == LPDDR4)
+			tmp = 2;
+		else
+			tmp = 0;
+		tmp = (pdram_timing->bl / 2) + 4 +
+		      (get_pi_rdlat_adj(pdram_timing) - 2) + tmp +
+		      get_pi_tdfi_phy_rdlat(pdram_timing, timing_config);
+		mmio_clrsetbits_32(PI_REG(i, 42), 0xff << 8, tmp << 8);
+		/* PI_43 PI_WRLAT_F1:RW:24:5 */
+		if (timing_config->dram_type == LPDDR3) {
+			tmp = get_pi_wrlat(pdram_timing, timing_config);
+			mmio_clrsetbits_32(PI_REG(i, 43), 0x1f << 24,
+					   tmp << 24);
+		}
+		/* PI_44 PI_ADDITIVE_LAT_F1:RW:0:6 */
+		mmio_clrsetbits_32(PI_REG(i, 44), 0x3f, PI_ADD_LATENCY);
+		/* PI_44 PI_CASLAT_LIN_F1:RW:8:7:=0x18 */
+		mmio_clrsetbits_32(PI_REG(i, 44), 0x7f << 8,
+				   (pdram_timing->cl * 2) << 8);
+		/* PI_47 PI_TREF_F1:RW:16:16 */
+		mmio_clrsetbits_32(PI_REG(i, 47), 0xffff << 16,
+				   pdram_timing->trefi << 16);
+		/* PI_47 PI_TRFC_F1:RW:0:10 */
+		mmio_clrsetbits_32(PI_REG(i, 47), 0x3ff, pdram_timing->trfc);
+		/* PI_67 PI_TODTL_2CMD_F1:RW:8:8 */
+		if (timing_config->dram_type == LPDDR3) {
+			tmp = get_pi_todtoff_max(pdram_timing, timing_config);
+			mmio_clrsetbits_32(PI_REG(i, 67), 0xff << 8, tmp << 8);
+		}
+		/* PI_72 PI_WR_TO_ODTH_F1:RW:24:6 */
+		if ((timing_config->dram_type == LPDDR3) ||
+		    (timing_config->dram_type == LPDDR4)) {
+			tmp1 = get_pi_wrlat(pdram_timing, timing_config);
+			tmp2 = get_pi_todtoff_max(pdram_timing, timing_config);
+			if (tmp1 > tmp2)
+				tmp = tmp1 - tmp2;
+			else
+				tmp = 0;
+		} else if (timing_config->dram_type == DDR3) {
+			tmp = 0;
+		}
+		mmio_clrsetbits_32(PI_REG(i, 72), 0x3f << 24, tmp << 24);
+		/* PI_73 PI_RD_TO_ODTH_F1:RW:16:6 */
+		if ((timing_config->dram_type == LPDDR3) ||
+		    (timing_config->dram_type == LPDDR4)) {
+			/* min_rl_preamble = cl + TDQSCK_MIN - 1 */
+			tmp1 = pdram_timing->cl +
+			       get_pi_todtoff_min(pdram_timing, timing_config);
+			tmp1--;
+			/* todtoff_max */
+			tmp2 = get_pi_todtoff_max(pdram_timing, timing_config);
+			if (tmp1 > tmp2)
+				tmp = tmp1 - tmp2;
+			else
+				tmp = 0;
+		} else if (timing_config->dram_type == DDR3)
+			tmp = pdram_timing->cl - pdram_timing->cwl;
+
+		mmio_clrsetbits_32(PI_REG(i, 73), 0x3f << 16, tmp << 16);
+		/*P I_89 PI_RDLAT_ADJ_F1:RW:24:8 */
+		tmp = get_pi_rdlat_adj(pdram_timing);
+		mmio_clrsetbits_32(PI_REG(i, 89), 0xff << 24, tmp << 24);
+		/* PI_90 PI_WRLAT_ADJ_F1:RW:24:8 */
+		tmp = get_pi_wrlat_adj(pdram_timing, timing_config);
+		mmio_clrsetbits_32(PI_REG(i, 90), 0xff << 24, tmp << 24);
+		/* PI_91 PI_TDFI_WRCSLAT_F1:RW:24:8 */
+		tmp1 = tmp;
+		if (tmp1 == 0)
+			tmp = 0;
+		else if (tmp1 < 5)
+			tmp = tmp1 - 1;
+		else
+			tmp = tmp1 - 5;
+		mmio_clrsetbits_32(PI_REG(i, 91), 0xff << 24, tmp << 24);
+		/*PI_96 PI_TDFI_CALVL_CAPTURE_F1:RW:16:10 */
+		/* tadr=20ns */
+		tmp1 = 20000 / (1000000 / pdram_timing->mhz) + 1;
+		if ((20000 % (1000000 / pdram_timing->mhz)) != 0)
+			tmp1++;
+		tmp = (tmp1 >> 1) + (tmp1 % 2) + 5;
+		mmio_clrsetbits_32(PI_REG(i, 96), 0x3ff << 16, tmp << 16);
+		/* PI_96 PI_TDFI_CALVL_CC_F1:RW:0:10 */
+		tmp = tmp + 18;
+		mmio_clrsetbits_32(PI_REG(i, 96), 0x3ff, tmp);
+		/*PI_103 PI_TMRZ_F1:RW:0:5 */
+		mmio_clrsetbits_32(PI_REG(i, 103), 0x1f, pdram_timing->tmrz);
+		/*PI_111 PI_TDFI_CALVL_STROBE_F1:RW:16:4 */
+		/* tds_train=ceil(2/ns) */
+		tmp1 = 2 * 1000 / (1000000 / pdram_timing->mhz);
+		if ((2 * 1000 % (1000000 / pdram_timing->mhz)) != 0)
+			tmp1++;
+		/* pi_tdfi_calvl_strobe=tds_train+5 */
+		tmp = tmp1 + 5;
+		mmio_clrsetbits_32(PI_REG(i, 111), 0xf << 16,
+				   tmp << 16);
+		/* PI_116 PI_TCKEHDQS_F1:RW:24:6 */
+		tmp = 10000 / (1000000 / pdram_timing->mhz);
+		if ((10000 % (1000000 / pdram_timing->mhz)) != 0)
+			tmp++;
+		if (pdram_timing->mhz <= 100)
+			tmp = tmp + 1;
+		else
+			tmp = tmp + 8;
+		mmio_clrsetbits_32(PI_REG(i, 116), 0x3f << 24,
+				   tmp << 24);
+		/* PI_128 PI_MR1_DATA_F1_0:RW+:0:16 */
+		mmio_clrsetbits_32(PI_REG(i, 128), 0xffff, pdram_timing->mr[1]);
+		/* PI_135 PI_MR1_DATA_F1_1:RW+:8:16 */
+		mmio_clrsetbits_32(PI_REG(i, 135), 0xffff << 8,
+				   pdram_timing->mr[1] << 8);
+		/* PI_143 PI_MR1_DATA_F1_2:RW+:0:16 */
+		mmio_clrsetbits_32(PI_REG(i, 143), 0xffff, pdram_timing->mr[1]);
+		/* PI_150 PI_MR1_DATA_F1_3:RW+:8:16 */
+		mmio_clrsetbits_32(PI_REG(i, 150), 0xffff << 8,
+				   pdram_timing->mr[1] << 8);
+		/* PI_128 PI_MR2_DATA_F1_0:RW+:16:16 */
+		mmio_clrsetbits_32(PI_REG(i, 128), 0xffff << 16,
+				   pdram_timing->mr[2] << 16);
+		/* PI_136 PI_MR2_DATA_F1_1:RW+:0:16 */
+		mmio_clrsetbits_32(PI_REG(i, 136), 0xffff, pdram_timing->mr[2]);
+		/* PI_143 PI_MR2_DATA_F1_2:RW+:16:16 */
+		mmio_clrsetbits_32(PI_REG(i, 143), 0xffff << 16,
+				   pdram_timing->mr[2] << 16);
+		/* PI_151 PI_MR2_DATA_F1_3:RW+:0:16 */
+		mmio_clrsetbits_32(PI_REG(i, 151), 0xffff, pdram_timing->mr[2]);
+		/* PI_156 PI_TFC_F1:RW:16:10 */
+		mmio_clrsetbits_32(PI_REG(i, 156), 0x3ff << 16,
+				   pdram_timing->tfc_long << 16);
+		/* PI_162 PI_TWR_F1:RW:8:6 */
+		mmio_clrsetbits_32(PI_REG(i, 162), 0x3f << 8,
+				   pdram_timing->twr << 8);
+		/* PI_162 PI_TWTR_F1:RW:0:6 */
+		mmio_clrsetbits_32(PI_REG(i, 162), 0x3f, pdram_timing->twtr);
+		/* PI_161 PI_TRCD_F1:RW:24:8 */
+		mmio_clrsetbits_32(PI_REG(i, 161), 0xff << 24,
+				   pdram_timing->trcd << 24);
+		/* PI_161 PI_TRP_F1:RW:16:8 */
+		mmio_clrsetbits_32(PI_REG(i, 161), 0xff << 16,
+				   pdram_timing->trp << 16);
+		/* PI_161 PI_TRTP_F1:RW:8:8 */
+		mmio_clrsetbits_32(PI_REG(i, 161), 0xff << 8,
+				   pdram_timing->trtp << 8);
+		/* PI_163 PI_TRAS_MIN_F1:RW:24:8 */
+		mmio_clrsetbits_32(PI_REG(i, 163), 0xff << 24,
+				   pdram_timing->tras_min << 24);
+		/* PI_163 PI_TRAS_MAX_F1:RW:0:17 */
+		mmio_clrsetbits_32(PI_REG(i, 163), 0x1ffff,
+				   pdram_timing->tras_max * 99 / 100);
+		/* PI_164 PI_TMRD_F1:RW:16:6 */
+		mmio_clrsetbits_32(PI_REG(i, 164), 0x3f << 16,
+				   pdram_timing->tmrd << 16);
+		/* PI_164 PI_TDQSCK_MAX_F1:RW:0:4 */
+		mmio_clrsetbits_32(PI_REG(i, 164), 0xf,
+				   pdram_timing->tdqsck_max);
+		/* PI_189 PI_TDFI_CTRLUPD_MAX_F1:RW:0:16 */
+		mmio_clrsetbits_32(PI_REG(i, 189), 0xffff,
+				   2 * pdram_timing->trefi);
+		/* PI_190 PI_TDFI_CTRLUPD_INTERVAL_F1:RW:0:32 */
+		mmio_clrsetbits_32(PI_REG(i, 190), 0xffffffff,
+				   20 * pdram_timing->trefi);
+	}
+}
+
+static void gen_rk3399_pi_params(struct timing_related_config *timing_config,
+				 struct dram_timing_t *pdram_timing,
+				 uint32_t fn)
+{
+	if (fn == 0)
+		gen_rk3399_pi_params_f0(timing_config, pdram_timing);
+	else
+		gen_rk3399_pi_params_f1(timing_config, pdram_timing);
+}
+
+static void gen_rk3399_set_odt(uint32_t odt_en)
+{
+	uint32_t drv_odt_val;
+	uint32_t i;
+
+	for (i = 0; i < rk3399_dram_status.timing_config.ch_cnt; i++) {
+		drv_odt_val = (odt_en | (0 << 1) | (0 << 2)) << 16;
+		mmio_clrsetbits_32(PHY_REG(i, 5), 0x7 << 16, drv_odt_val);
+		mmio_clrsetbits_32(PHY_REG(i, 133), 0x7 << 16, drv_odt_val);
+		mmio_clrsetbits_32(PHY_REG(i, 261), 0x7 << 16, drv_odt_val);
+		mmio_clrsetbits_32(PHY_REG(i, 389), 0x7 << 16, drv_odt_val);
+		drv_odt_val = (odt_en | (0 << 1) | (0 << 2)) << 24;
+		mmio_clrsetbits_32(PHY_REG(i, 6), 0x7 << 24, drv_odt_val);
+		mmio_clrsetbits_32(PHY_REG(i, 134), 0x7 << 24, drv_odt_val);
+		mmio_clrsetbits_32(PHY_REG(i, 262), 0x7 << 24, drv_odt_val);
+		mmio_clrsetbits_32(PHY_REG(i, 390), 0x7 << 24, drv_odt_val);
+	}
+}
+
+static void gen_rk3399_phy_dll_bypass(uint32_t mhz, uint32_t ch,
+		uint32_t index, uint32_t dram_type)
+{
+	uint32_t sw_master_mode = 0;
+	uint32_t rddqs_gate_delay, rddqs_latency, total_delay;
+	uint32_t i;
+
+	if (dram_type == DDR3)
+		total_delay = PI_PAD_DELAY_PS_VALUE;
+	else if (dram_type == LPDDR3)
+		total_delay = PI_PAD_DELAY_PS_VALUE + 2500;
+	else
+		total_delay = PI_PAD_DELAY_PS_VALUE + 1500;
+	/* total_delay + 0.55tck */
+	total_delay +=  (55 * 10000)/mhz;
+	rddqs_latency = total_delay * mhz / 1000000;
+	total_delay -= rddqs_latency * 1000000 / mhz;
+	rddqs_gate_delay = total_delay * 0x200 * mhz / 1000000;
+	if (mhz <= PHY_DLL_BYPASS_FREQ) {
+		sw_master_mode = 0xc;
+		mmio_setbits_32(PHY_REG(ch, 514), 1);
+		mmio_setbits_32(PHY_REG(ch, 642), 1);
+		mmio_setbits_32(PHY_REG(ch, 770), 1);
+
+		/* setting bypass mode slave delay */
+		for (i = 0; i < 4; i++) {
+			/* wr dq delay = -180deg + (0x60 / 4) * 20ps */
+			mmio_clrsetbits_32(PHY_REG(ch, 1 + 128 * i), 0x7ff << 8,
+					   0x4a0 << 8);
+			/* rd dqs/dq delay = (0x60 / 4) * 20ps */
+			mmio_clrsetbits_32(PHY_REG(ch, 11 + 128 * i), 0x3ff,
+					   0xa0);
+			/* rd rddqs_gate delay */
+			mmio_clrsetbits_32(PHY_REG(ch, 2 + 128 * i), 0x3ff,
+					   rddqs_gate_delay);
+			mmio_clrsetbits_32(PHY_REG(ch, 78 + 128 * i), 0xf,
+					   rddqs_latency);
+		}
+		for (i = 0; i < 3; i++)
+			/* adr delay */
+			mmio_clrsetbits_32(PHY_REG(ch, 513 + 128 * i),
+					   0x7ff << 16, 0x80 << 16);
+
+		if ((mmio_read_32(PHY_REG(ch, 86)) & 0xc00) == 0) {
+			/*
+			 * old status is normal mode,
+			 * and saving the wrdqs slave delay
+			 */
+			for (i = 0; i < 4; i++) {
+				/* save and clear wr dqs slave delay */
+				wrdqs_delay_val[ch][index][i] = 0x3ff &
+					(mmio_read_32(PHY_REG(ch, 63 + i * 128))
+					>> 16);
+				mmio_clrsetbits_32(PHY_REG(ch, 63 + i * 128),
+						   0x03ff << 16, 0 << 16);
+				/*
+				 * in normal mode the cmd may delay 1cycle by
+				 * wrlvl and in bypass mode making dqs also
+				 * delay 1cycle.
+				 */
+				mmio_clrsetbits_32(PHY_REG(ch, 78 + i * 128),
+						   0x07 << 8, 0x1 << 8);
+			}
+		}
+	} else if (mmio_read_32(PHY_REG(ch, 86)) & 0xc00) {
+		/* old status is bypass mode and restore wrlvl resume */
+		for (i = 0; i < 4; i++) {
+			mmio_clrsetbits_32(PHY_REG(ch, 63 + i * 128),
+					   0x03ff << 16,
+					   (wrdqs_delay_val[ch][index][i] &
+					    0x3ff) << 16);
+			/* resume phy_write_path_lat_add */
+			mmio_clrbits_32(PHY_REG(ch, 78 + i * 128), 0x07 << 8);
+		}
+	}
+
+	/* phy_sw_master_mode_X PHY_86/214/342/470 4bits offset_8 */
+	mmio_clrsetbits_32(PHY_REG(ch, 86), 0xf << 8, sw_master_mode << 8);
+	mmio_clrsetbits_32(PHY_REG(ch, 214), 0xf << 8, sw_master_mode << 8);
+	mmio_clrsetbits_32(PHY_REG(ch, 342), 0xf << 8, sw_master_mode << 8);
+	mmio_clrsetbits_32(PHY_REG(ch, 470), 0xf << 8, sw_master_mode << 8);
+
+	/* phy_adrctl_sw_master_mode PHY_547/675/803 4bits offset_16 */
+	mmio_clrsetbits_32(PHY_REG(ch, 547), 0xf << 16, sw_master_mode << 16);
+	mmio_clrsetbits_32(PHY_REG(ch, 675), 0xf << 16, sw_master_mode << 16);
+	mmio_clrsetbits_32(PHY_REG(ch, 803), 0xf << 16, sw_master_mode << 16);
+}
+
+static void gen_rk3399_phy_params(struct timing_related_config *timing_config,
+				  struct drv_odt_lp_config *drv_config,
+				  struct dram_timing_t *pdram_timing,
+				  uint32_t fn)
+{
+	uint32_t tmp, i, div, j;
+	uint32_t mem_delay_ps, pad_delay_ps, total_delay_ps, delay_frac_ps;
+	uint32_t trpre_min_ps, gate_delay_ps, gate_delay_frac_ps;
+	uint32_t ie_enable, tsel_enable, cas_lat, rddata_en_ie_dly, tsel_adder;
+	uint32_t extra_adder, delta, hs_offset;
+
+	for (i = 0; i < timing_config->ch_cnt; i++) {
+
+		pad_delay_ps = PI_PAD_DELAY_PS_VALUE;
+		ie_enable = PI_IE_ENABLE_VALUE;
+		tsel_enable = PI_TSEL_ENABLE_VALUE;
+
+		mmio_clrsetbits_32(PHY_REG(i, 896), (0x3 << 8) | 1, fn << 8);
+
+		/* PHY_LOW_FREQ_SEL */
+		/* DENALI_PHY_913 1bit offset_0 */
+		if (timing_config->freq > 400)
+			mmio_clrbits_32(PHY_REG(i, 913), 1);
+		else
+			mmio_setbits_32(PHY_REG(i, 913), 1);
+
+		/* PHY_RPTR_UPDATE_x */
+		/* DENALI_PHY_87/215/343/471 4bit offset_16 */
+		tmp = 2500 / (1000000 / pdram_timing->mhz) + 3;
+		if ((2500 % (1000000 / pdram_timing->mhz)) != 0)
+			tmp++;
+		mmio_clrsetbits_32(PHY_REG(i, 87), 0xf << 16, tmp << 16);
+		mmio_clrsetbits_32(PHY_REG(i, 215), 0xf << 16, tmp << 16);
+		mmio_clrsetbits_32(PHY_REG(i, 343), 0xf << 16, tmp << 16);
+		mmio_clrsetbits_32(PHY_REG(i, 471), 0xf << 16, tmp << 16);
+
+		/* PHY_PLL_CTRL */
+		/* DENALI_PHY_911 13bits offset_0 */
+		/* PHY_LP4_BOOT_PLL_CTRL */
+		/* DENALI_PHY_919 13bits offset_0 */
+		tmp = (1 << 12) | (2 << 7) | (1 << 1);
+		mmio_clrsetbits_32(PHY_REG(i, 911), 0x1fff, tmp);
+		mmio_clrsetbits_32(PHY_REG(i, 919), 0x1fff, tmp);
+
+		/* PHY_PLL_CTRL_CA */
+		/* DENALI_PHY_911 13bits offset_16 */
+		/* PHY_LP4_BOOT_PLL_CTRL_CA */
+		/* DENALI_PHY_919 13bits offset_16 */
+		tmp = (2 << 7) | (1 << 5) | (1 << 1);
+		mmio_clrsetbits_32(PHY_REG(i, 911), 0x1fff << 16, tmp << 16);
+		mmio_clrsetbits_32(PHY_REG(i, 919), 0x1fff << 16, tmp << 16);
+
+		/* PHY_TCKSRE_WAIT */
+		/* DENALI_PHY_922 4bits offset_24 */
+		if (pdram_timing->mhz <= 400)
+			tmp = 1;
+		else if (pdram_timing->mhz <= 800)
+			tmp = 3;
+		else if (pdram_timing->mhz <= 1000)
+			tmp = 4;
+		else
+			tmp = 5;
+		mmio_clrsetbits_32(PHY_REG(i, 922), 0xf << 24, tmp << 24);
+		/* PHY_CAL_CLK_SELECT_0:RW8:3 */
+		div = pdram_timing->mhz / (2 * 20);
+		for (j = 2, tmp = 1; j <= 128; j <<= 1, tmp++) {
+			if (div < j)
+				break;
+		}
+		mmio_clrsetbits_32(PHY_REG(i, 947), 0x7 << 8, tmp << 8);
+
+		if (timing_config->dram_type == DDR3) {
+			mem_delay_ps = 0;
+			trpre_min_ps = 1000;
+		} else if (timing_config->dram_type == LPDDR4) {
+			mem_delay_ps = 1500;
+			trpre_min_ps = 900;
+		} else if (timing_config->dram_type == LPDDR3) {
+			mem_delay_ps = 2500;
+			trpre_min_ps = 900;
+		} else {
+			ERROR("gen_rk3399_phy_params:dramtype unsupport\n");
+			return;
+		}
+		total_delay_ps = mem_delay_ps + pad_delay_ps;
+		delay_frac_ps = 1000 * total_delay_ps /
+				(1000000 / pdram_timing->mhz);
+		gate_delay_ps = delay_frac_ps + 1000 - (trpre_min_ps / 2);
+		gate_delay_frac_ps = gate_delay_ps % 1000;
+		tmp = gate_delay_frac_ps * 0x200 / 1000;
+		/* PHY_RDDQS_GATE_SLAVE_DELAY */
+		/* DENALI_PHY_77/205/333/461 10bits offset_16 */
+		mmio_clrsetbits_32(PHY_REG(i, 77), 0x2ff << 16, tmp << 16);
+		mmio_clrsetbits_32(PHY_REG(i, 205), 0x2ff << 16, tmp << 16);
+		mmio_clrsetbits_32(PHY_REG(i, 333), 0x2ff << 16, tmp << 16);
+		mmio_clrsetbits_32(PHY_REG(i, 461), 0x2ff << 16, tmp << 16);
+
+		tmp = gate_delay_ps / 1000;
+		/* PHY_LP4_BOOT_RDDQS_LATENCY_ADJUST */
+		/* DENALI_PHY_10/138/266/394 4bit offset_0 */
+		mmio_clrsetbits_32(PHY_REG(i, 10), 0xf, tmp);
+		mmio_clrsetbits_32(PHY_REG(i, 138), 0xf, tmp);
+		mmio_clrsetbits_32(PHY_REG(i, 266), 0xf, tmp);
+		mmio_clrsetbits_32(PHY_REG(i, 394), 0xf, tmp);
+		/* PHY_GTLVL_LAT_ADJ_START */
+		/* DENALI_PHY_80/208/336/464 4bits offset_16 */
+		tmp = rddqs_delay_ps / (1000000 / pdram_timing->mhz) + 2;
+		mmio_clrsetbits_32(PHY_REG(i, 80), 0xf << 16, tmp << 16);
+		mmio_clrsetbits_32(PHY_REG(i, 208), 0xf << 16, tmp << 16);
+		mmio_clrsetbits_32(PHY_REG(i, 336), 0xf << 16, tmp << 16);
+		mmio_clrsetbits_32(PHY_REG(i, 464), 0xf << 16, tmp << 16);
+
+		cas_lat = pdram_timing->cl + PI_ADD_LATENCY;
+		rddata_en_ie_dly = ie_enable / (1000000 / pdram_timing->mhz);
+		if ((ie_enable % (1000000 / pdram_timing->mhz)) != 0)
+			rddata_en_ie_dly++;
+		rddata_en_ie_dly = rddata_en_ie_dly - 1;
+		tsel_adder = tsel_enable / (1000000 / pdram_timing->mhz);
+		if ((tsel_enable % (1000000 / pdram_timing->mhz)) != 0)
+			tsel_adder++;
+		if (rddata_en_ie_dly > tsel_adder)
+			extra_adder = rddata_en_ie_dly - tsel_adder;
+		else
+			extra_adder = 0;
+		delta = cas_lat - rddata_en_ie_dly;
+		if (PI_REGS_DIMM_SUPPORT && PI_DOUBLEFREEK)
+			hs_offset = 2;
+		else
+			hs_offset = 1;
+		if (rddata_en_ie_dly > (cas_lat - 1 - hs_offset))
+			tmp = 0;
+		else if ((delta == 2) || (delta == 1))
+			tmp = rddata_en_ie_dly - 0 - extra_adder;
+		else
+			tmp = extra_adder;
+		/* PHY_LP4_BOOT_RDDATA_EN_TSEL_DLY */
+		/* DENALI_PHY_9/137/265/393 4bit offset_16 */
+		mmio_clrsetbits_32(PHY_REG(i, 9), 0xf << 16, tmp << 16);
+		mmio_clrsetbits_32(PHY_REG(i, 137), 0xf << 16, tmp << 16);
+		mmio_clrsetbits_32(PHY_REG(i, 265), 0xf << 16, tmp << 16);
+		mmio_clrsetbits_32(PHY_REG(i, 393), 0xf << 16, tmp << 16);
+		/* PHY_RDDATA_EN_TSEL_DLY */
+		/* DENALI_PHY_86/214/342/470 4bit offset_0 */
+		mmio_clrsetbits_32(PHY_REG(i, 86), 0xf, tmp);
+		mmio_clrsetbits_32(PHY_REG(i, 214), 0xf, tmp);
+		mmio_clrsetbits_32(PHY_REG(i, 342), 0xf, tmp);
+		mmio_clrsetbits_32(PHY_REG(i, 470), 0xf, tmp);
+
+		if (tsel_adder > rddata_en_ie_dly)
+			extra_adder = tsel_adder - rddata_en_ie_dly;
+		else
+			extra_adder = 0;
+		if (rddata_en_ie_dly > (cas_lat - 1 - hs_offset))
+			tmp = tsel_adder;
+		else
+			tmp = rddata_en_ie_dly - 0 + extra_adder;
+		/* PHY_LP4_BOOT_RDDATA_EN_DLY */
+		/* DENALI_PHY_9/137/265/393 4bit offset_8 */
+		mmio_clrsetbits_32(PHY_REG(i, 9), 0xf << 8, tmp << 8);
+		mmio_clrsetbits_32(PHY_REG(i, 137), 0xf << 8, tmp << 8);
+		mmio_clrsetbits_32(PHY_REG(i, 265), 0xf << 8, tmp << 8);
+		mmio_clrsetbits_32(PHY_REG(i, 393), 0xf << 8, tmp << 8);
+		/* PHY_RDDATA_EN_DLY */
+		/* DENALI_PHY_85/213/341/469 4bit offset_24 */
+		mmio_clrsetbits_32(PHY_REG(i, 85), 0xf << 24, tmp << 24);
+		mmio_clrsetbits_32(PHY_REG(i, 213), 0xf << 24, tmp << 24);
+		mmio_clrsetbits_32(PHY_REG(i, 341), 0xf << 24, tmp << 24);
+		mmio_clrsetbits_32(PHY_REG(i, 469), 0xf << 24, tmp << 24);
+
+		if (pdram_timing->mhz <= ENPER_CS_TRAINING_FREQ) {
+			/*
+			 * Note:Per-CS Training is not compatible at speeds
+			 * under 533 MHz. If the PHY is running at a speed
+			 * less than 533MHz, all phy_per_cs_training_en_X
+			 * parameters must be cleared to 0.
+			 */
+
+			/*DENALI_PHY_84/212/340/468 1bit offset_16 */
+			mmio_clrbits_32(PHY_REG(i, 84), 0x1 << 16);
+			mmio_clrbits_32(PHY_REG(i, 212), 0x1 << 16);
+			mmio_clrbits_32(PHY_REG(i, 340), 0x1 << 16);
+			mmio_clrbits_32(PHY_REG(i, 468), 0x1 << 16);
+		} else {
+			mmio_setbits_32(PHY_REG(i, 84), 0x1 << 16);
+			mmio_setbits_32(PHY_REG(i, 212), 0x1 << 16);
+			mmio_setbits_32(PHY_REG(i, 340), 0x1 << 16);
+			mmio_setbits_32(PHY_REG(i, 468), 0x1 << 16);
+		}
+		gen_rk3399_phy_dll_bypass(pdram_timing->mhz, i, fn,
+					  timing_config->dram_type);
+	}
+}
+
+static int to_get_clk_index(unsigned int mhz)
+{
+	int pll_cnt, i;
+
+	pll_cnt = ARRAY_SIZE(dpll_rates_table);
+
+	/* Assumming rate_table is in descending order */
+	for (i = 0; i < pll_cnt; i++) {
+		if (mhz >= dpll_rates_table[i].mhz)
+			break;
+	}
+
+	/* if mhz lower than lowest frequency in table, use lowest frequency */
+	if (i == pll_cnt)
+		i = pll_cnt - 1;
+
+	return i;
+}
+
+uint32_t ddr_get_rate(void)
+{
+	uint32_t refdiv, postdiv1, fbdiv, postdiv2;
+
+	refdiv = mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, 1)) & 0x3f;
+	fbdiv = mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, 0)) & 0xfff;
+	postdiv1 =
+		(mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, 1)) >> 8) & 0x7;
+	postdiv2 =
+		(mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, 1)) >> 12) & 0x7;
+
+	return (24 / refdiv * fbdiv / postdiv1 / postdiv2) * 1000 * 1000;
+}
+
+/*
+ * return: bit12: channel 1, external self-refresh
+ *         bit11: channel 1, stdby_mode
+ *         bit10: channel 1, self-refresh with controller and memory clock gate
+ *         bit9: channel 1, self-refresh
+ *         bit8: channel 1, power-down
+ *
+ *         bit4: channel 1, external self-refresh
+ *         bit3: channel 0, stdby_mode
+ *         bit2: channel 0, self-refresh with controller and memory clock gate
+ *         bit1: channel 0, self-refresh
+ *         bit0: channel 0, power-down
+ */
+uint32_t exit_low_power(void)
+{
+	uint32_t low_power = 0;
+	uint32_t channel_mask;
+	uint32_t tmp, i;
+
+	channel_mask = (mmio_read_32(PMUGRF_BASE + PMUGRF_OSREG(2)) >> 28) &
+			0x3;
+	for (i = 0; i < 2; i++) {
+		if (!(channel_mask & (1 << i)))
+			continue;
+
+		/* exit stdby mode */
+		mmio_write_32(CIC_BASE + CIC_CTRL1,
+			      (1 << (i + 16)) | (0 << i));
+		/* exit external self-refresh */
+		tmp = i ? 12 : 8;
+		low_power |= ((mmio_read_32(PMU_BASE + PMU_SFT_CON) >> tmp) &
+			      0x1) << (4 + 8 * i);
+		mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, 1 << tmp);
+		while (!(mmio_read_32(PMU_BASE + PMU_DDR_SREF_ST) & (1 << i)))
+			;
+		/* exit auto low-power */
+		mmio_clrbits_32(CTL_REG(i, 101), 0x7);
+		/* lp_cmd to exit */
+		if (((mmio_read_32(CTL_REG(i, 100)) >> 24) & 0x7f) !=
+		    0x40) {
+			while (mmio_read_32(CTL_REG(i, 200)) & 0x1)
+				;
+			mmio_clrsetbits_32(CTL_REG(i, 93), 0xff << 24,
+					   0x69 << 24);
+			while (((mmio_read_32(CTL_REG(i, 100)) >> 24) & 0x7f) !=
+			       0x40)
+				;
+		}
+	}
+	return low_power;
+}
+
+void resume_low_power(uint32_t low_power)
+{
+	uint32_t channel_mask;
+	uint32_t tmp, i, val;
+
+	channel_mask = (mmio_read_32(PMUGRF_BASE + PMUGRF_OSREG(2)) >> 28) &
+		       0x3;
+	for (i = 0; i < 2; i++) {
+		if (!(channel_mask & (1 << i)))
+			continue;
+
+		/* resume external self-refresh */
+		tmp = i ? 12 : 8;
+		val = (low_power >> (4 + 8 * i)) & 0x1;
+		mmio_setbits_32(PMU_BASE + PMU_SFT_CON, val << tmp);
+		/* resume auto low-power */
+		val = (low_power >> (8 * i)) & 0x7;
+		mmio_setbits_32(CTL_REG(i, 101), val);
+		/* resume stdby mode */
+		val = (low_power >> (3 + 8 * i)) & 0x1;
+		mmio_write_32(CIC_BASE + CIC_CTRL1,
+			      (1 << (i + 16)) | (val << i));
+	}
+}
+
+static void dram_low_power_config(void)
+{
+	uint32_t tmp, i;
+	uint32_t ch_cnt = rk3399_dram_status.timing_config.ch_cnt;
+	uint32_t dram_type = rk3399_dram_status.timing_config.dram_type;
+
+	if (dram_type == DDR3)
+		tmp = (2 << 16) | (0x7 << 8);
+	else
+		tmp = (3 << 16) | (0x7 << 8);
+
+	for (i = 0; i < ch_cnt; i++)
+		mmio_clrsetbits_32(CTL_REG(i, 101), 0x70f0f, tmp);
+
+	/* standby idle */
+	mmio_write_32(CIC_BASE + CIC_CG_WAIT_TH, 0x640008);
+
+	if (ch_cnt == 2) {
+		mmio_write_32(GRF_BASE + GRF_DDRC1_CON1,
+			      (((0x1<<4) | (0x1<<5) | (0x1<<6) |
+				(0x1<<7)) << 16) |
+			      ((0x1<<4) | (0x0<<5) | (0x1<<6) | (0x1<<7)));
+		mmio_write_32(CIC_BASE + CIC_CTRL1, 0x002a0028);
+	}
+
+	mmio_write_32(GRF_BASE + GRF_DDRC0_CON1,
+		      (((0x1<<4) | (0x1<<5) | (0x1<<6) | (0x1<<7)) << 16) |
+		      ((0x1<<4) | (0x0<<5) | (0x1<<6) | (0x1<<7)));
+	mmio_write_32(CIC_BASE + CIC_CTRL1, 0x00150014);
+}
+
+void dram_dfs_init(void)
+{
+	uint32_t trefi0, trefi1, boot_freq;
+	uint32_t rddqs_adjust, rddqs_slave;
+
+	/* get sdram config for os reg */
+	get_dram_drv_odt_val(sdram_config.dramtype,
+			     &rk3399_dram_status.drv_odt_lp_cfg);
+	sdram_timing_cfg_init(&rk3399_dram_status.timing_config,
+			      &sdram_config,
+			      &rk3399_dram_status.drv_odt_lp_cfg);
+
+	trefi0 = ((mmio_read_32(CTL_REG(0, 48)) >> 16) & 0xffff) + 8;
+	trefi1 = ((mmio_read_32(CTL_REG(0, 49)) >> 16) & 0xffff) + 8;
+
+	rk3399_dram_status.index_freq[0] = trefi0 * 10 / 39;
+	rk3399_dram_status.index_freq[1] = trefi1 * 10 / 39;
+	rk3399_dram_status.current_index =
+		(mmio_read_32(CTL_REG(0, 111)) >> 16) & 0x3;
+	if (rk3399_dram_status.timing_config.dram_type == DDR3) {
+		rk3399_dram_status.index_freq[0] /= 2;
+		rk3399_dram_status.index_freq[1] /= 2;
+	}
+	boot_freq =
+		rk3399_dram_status.index_freq[rk3399_dram_status.current_index];
+	boot_freq = dpll_rates_table[to_get_clk_index(boot_freq)].mhz;
+	rk3399_dram_status.boot_freq = boot_freq;
+	rk3399_dram_status.index_freq[rk3399_dram_status.current_index] =
+		boot_freq;
+	rk3399_dram_status.index_freq[(rk3399_dram_status.current_index + 1) &
+				      0x1] = 0;
+	rk3399_dram_status.low_power_stat = 0;
+	/*
+	 * following register decide if NOC stall the access request
+	 * or return error when NOC being idled. when doing ddr frequency
+	 * scaling in M0 or DCF, we need to make sure noc stall the access
+	 * request, if return error cpu may data abort when ddr frequency
+	 * changing. it don't need to set this register every times,
+	 * so we init this register in function dram_dfs_init().
+	 */
+	mmio_write_32(GRF_BASE + GRF_SOC_CON(0), 0xffffffff);
+	mmio_write_32(GRF_BASE + GRF_SOC_CON(1), 0xffffffff);
+	mmio_write_32(GRF_BASE + GRF_SOC_CON(2), 0xffffffff);
+	mmio_write_32(GRF_BASE + GRF_SOC_CON(3), 0xffffffff);
+	mmio_write_32(GRF_BASE + GRF_SOC_CON(4), 0x70007000);
+
+	/* Disable multicast */
+	mmio_clrbits_32(PHY_REG(0, 896), 1);
+	mmio_clrbits_32(PHY_REG(1, 896), 1);
+	dram_low_power_config();
+
+	/*
+	 * If boot_freq isn't in the bypass mode, it can get the
+	 * rddqs_delay_ps from the result of gate training
+	 */
+	if (((mmio_read_32(PHY_REG(0, 86)) >> 8) & 0xf) != 0xc) {
+
+		/*
+		 * Select PHY's frequency set to current_index
+		 * index for get the result of gate Training
+		 * from registers
+		 */
+		mmio_clrsetbits_32(PHY_REG(0, 896), 0x3 << 8,
+				   rk3399_dram_status.current_index << 8);
+		rddqs_slave = (mmio_read_32(PHY_REG(0, 77)) >> 16) & 0x3ff;
+		rddqs_slave = rddqs_slave * 1000000 / boot_freq / 512;
+
+		rddqs_adjust = mmio_read_32(PHY_REG(0, 78)) & 0xf;
+		rddqs_adjust = rddqs_adjust * 1000000 / boot_freq;
+		rddqs_delay_ps = rddqs_slave + rddqs_adjust -
+				(1000000 / boot_freq / 2);
+	} else {
+		rddqs_delay_ps = 3500;
+	}
+}
+
+/*
+ * arg0: bit0-7: sr_idle; bit8-15:sr_mc_gate_idle; bit16-31: standby idle
+ * arg1: bit0-11: pd_idle; bit 16-27: srpd_lite_idle
+ * arg2: bit0: if odt en
+ */
+uint32_t dram_set_odt_pd(uint32_t arg0, uint32_t arg1, uint32_t arg2)
+{
+	struct drv_odt_lp_config *lp_cfg = &rk3399_dram_status.drv_odt_lp_cfg;
+	uint32_t *low_power = &rk3399_dram_status.low_power_stat;
+	uint32_t dram_type, ch_count, pd_tmp, sr_tmp, i;
+
+	dram_type = rk3399_dram_status.timing_config.dram_type;
+	ch_count = rk3399_dram_status.timing_config.ch_cnt;
+
+	lp_cfg->sr_idle = arg0 & 0xff;
+	lp_cfg->sr_mc_gate_idle = (arg0 >> 8) & 0xff;
+	lp_cfg->standby_idle = (arg0 >> 16) & 0xffff;
+	lp_cfg->pd_idle = arg1 & 0xfff;
+	lp_cfg->srpd_lite_idle = (arg1 >> 16) & 0xfff;
+
+	rk3399_dram_status.timing_config.odt = arg2 & 0x1;
+
+	exit_low_power();
+
+	*low_power = 0;
+
+	/* pd_idle en */
+	if (lp_cfg->pd_idle)
+		*low_power |= ((1 << 0) | (1 << 8));
+	/* sr_idle en srpd_lite_idle */
+	if (lp_cfg->sr_idle | lp_cfg->srpd_lite_idle)
+		*low_power |= ((1 << 1) | (1 << 9));
+	/* sr_mc_gate_idle */
+	if (lp_cfg->sr_mc_gate_idle)
+		*low_power |= ((1 << 2) | (1 << 10));
+	/* standbyidle */
+	if (lp_cfg->standby_idle) {
+		if (rk3399_dram_status.timing_config.ch_cnt == 2)
+			*low_power |= ((1 << 3) | (1 << 11));
+		else
+			*low_power |= (1 << 3);
+	}
+
+	pd_tmp = arg1;
+	if (dram_type != LPDDR4)
+		pd_tmp = arg1 & 0xfff;
+	sr_tmp = arg0 & 0xffff;
+	for (i = 0; i < ch_count; i++) {
+		mmio_write_32(CTL_REG(i, 102), pd_tmp);
+		mmio_clrsetbits_32(CTL_REG(i, 103), 0xffff, sr_tmp);
+	}
+	mmio_write_32(CIC_BASE + CIC_IDLE_TH, (arg0 >> 16) & 0xffff);
+
+	return 0;
+}
+
+static void m0_configure_ddr(struct pll_div pll_div, uint32_t ddr_index)
+{
+	/* set PARAM to M0_FUNC_DRAM */
+	mmio_write_32(M0_PARAM_ADDR + PARAM_M0_FUNC, M0_FUNC_DRAM);
+
+	mmio_write_32(M0_PARAM_ADDR + PARAM_DPLL_CON0, FBDIV(pll_div.fbdiv));
+	mmio_write_32(M0_PARAM_ADDR + PARAM_DPLL_CON1,
+		      POSTDIV2(pll_div.postdiv2) | POSTDIV1(pll_div.postdiv1) |
+		      REFDIV(pll_div.refdiv));
+
+	mmio_write_32(M0_PARAM_ADDR + PARAM_DRAM_FREQ, pll_div.mhz);
+
+	mmio_write_32(M0_PARAM_ADDR + PARAM_FREQ_SELECT, ddr_index << 4);
+	dmbst();
+}
+
+static uint32_t prepare_ddr_timing(uint32_t mhz)
+{
+	uint32_t index;
+	struct dram_timing_t dram_timing;
+
+	rk3399_dram_status.timing_config.freq = mhz;
+
+	if (mhz < 300)
+		rk3399_dram_status.timing_config.dllbp = 1;
+	else
+		rk3399_dram_status.timing_config.dllbp = 0;
+
+	if (rk3399_dram_status.timing_config.odt == 1)
+		gen_rk3399_set_odt(1);
+
+	index = (rk3399_dram_status.current_index + 1) & 0x1;
+
+	/*
+	 * checking if having available gate traiing timing for
+	 * target freq.
+	 */
+	dram_get_parameter(&rk3399_dram_status.timing_config, &dram_timing);
+	gen_rk3399_ctl_params(&rk3399_dram_status.timing_config,
+			      &dram_timing, index);
+	gen_rk3399_pi_params(&rk3399_dram_status.timing_config,
+			     &dram_timing, index);
+	gen_rk3399_phy_params(&rk3399_dram_status.timing_config,
+			      &rk3399_dram_status.drv_odt_lp_cfg,
+			      &dram_timing, index);
+	rk3399_dram_status.index_freq[index] = mhz;
+
+	return index;
+}
+
+uint32_t ddr_set_rate(uint32_t hz)
+{
+	uint32_t low_power, index, ddr_index;
+	uint32_t mhz = hz / (1000 * 1000);
+
+	if (mhz ==
+	    rk3399_dram_status.index_freq[rk3399_dram_status.current_index])
+		return mhz;
+
+	index = to_get_clk_index(mhz);
+	mhz = dpll_rates_table[index].mhz;
+
+	ddr_index = prepare_ddr_timing(mhz);
+	gen_rk3399_enable_training(rk3399_dram_status.timing_config.ch_cnt,
+				   mhz);
+	if (ddr_index > 1)
+		goto out;
+
+	/*
+	 * Make sure the clock is enabled. The M0 clocks should be on all of the
+	 * time during S0.
+	 */
+	m0_configure_ddr(dpll_rates_table[index], ddr_index);
+	m0_start();
+	m0_wait_done();
+	m0_stop();
+
+	if (rk3399_dram_status.timing_config.odt == 0)
+		gen_rk3399_set_odt(0);
+
+	rk3399_dram_status.current_index = ddr_index;
+	low_power = rk3399_dram_status.low_power_stat;
+	resume_low_power(low_power);
+out:
+	gen_rk3399_disable_training(rk3399_dram_status.timing_config.ch_cnt);
+	return mhz;
+}
+
+uint32_t ddr_round_rate(uint32_t hz)
+{
+	int index;
+	uint32_t mhz = hz / (1000 * 1000);
+
+	index = to_get_clk_index(mhz);
+
+	return dpll_rates_table[index].mhz * 1000 * 1000;
+}
+
+void ddr_prepare_for_sys_suspend(void)
+{
+	uint32_t mhz =
+		rk3399_dram_status.index_freq[rk3399_dram_status.current_index];
+
+	/*
+	 * If we're not currently at the boot (assumed highest) frequency, we
+	 * need to change frequencies to configure out current index.
+	 */
+	rk3399_suspend_status.freq = mhz;
+	exit_low_power();
+	rk3399_suspend_status.low_power_stat =
+		rk3399_dram_status.low_power_stat;
+	rk3399_suspend_status.odt = rk3399_dram_status.timing_config.odt;
+	rk3399_dram_status.low_power_stat = 0;
+	rk3399_dram_status.timing_config.odt = 1;
+	if (mhz != rk3399_dram_status.boot_freq)
+		ddr_set_rate(rk3399_dram_status.boot_freq * 1000 * 1000);
+
+	/*
+	 * This will configure the other index to be the same frequency as the
+	 * current one. We retrain both indices on resume, so both have to be
+	 * setup for the same frequency.
+	 */
+	prepare_ddr_timing(rk3399_dram_status.boot_freq);
+}
+
+void ddr_prepare_for_sys_resume(void)
+{
+	/* Disable multicast */
+	mmio_clrbits_32(PHY_REG(0, 896), 1);
+	mmio_clrbits_32(PHY_REG(1, 896), 1);
+
+	/* The suspend code changes the current index, so reset it now. */
+	rk3399_dram_status.current_index =
+		(mmio_read_32(CTL_REG(0, 111)) >> 16) & 0x3;
+	rk3399_dram_status.low_power_stat =
+		rk3399_suspend_status.low_power_stat;
+	rk3399_dram_status.timing_config.odt = rk3399_suspend_status.odt;
+
+	/*
+	 * Set the saved frequency from suspend if it's different than the
+	 * current frequency.
+	 */
+	if (rk3399_suspend_status.freq !=
+	    rk3399_dram_status.index_freq[rk3399_dram_status.current_index]) {
+		ddr_set_rate(rk3399_suspend_status.freq * 1000 * 1000);
+		return;
+	}
+
+	gen_rk3399_set_odt(rk3399_dram_status.timing_config.odt);
+	resume_low_power(rk3399_dram_status.low_power_stat);
+}
diff --git a/plat/rockchip/rk3399/drivers/dram/dfs.h b/plat/rockchip/rk3399/drivers/dram/dfs.h
new file mode 100644
index 0000000..679216c
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dram/dfs.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SOC_ROCKCHIP_RK3399_DFS_H__
+#define __SOC_ROCKCHIP_RK3399_DFS_H__
+
+struct rk3399_sdram_default_config {
+	unsigned char bl;
+	/* 1:auto precharge, 0:never auto precharge */
+	unsigned char ap;
+	/* dram driver strength */
+	unsigned char dramds;
+	/* dram ODT, if odt=0, this parameter invalid */
+	unsigned char dramodt;
+	/* ca ODT, if odt=0, this parameter invalid
+	 * only used by LPDDR4
+	 */
+	unsigned char caodt;
+	unsigned char burst_ref_cnt;
+	/* zqcs period, unit(s) */
+	unsigned char zqcsi;
+};
+
+struct drv_odt_lp_config {
+	uint32_t pd_idle;
+	uint32_t sr_idle;
+	uint32_t sr_mc_gate_idle;
+	uint32_t srpd_lite_idle;
+	uint32_t standby_idle;
+	uint32_t odt_en;
+
+	uint32_t dram_side_drv;
+	uint32_t dram_side_dq_odt;
+	uint32_t dram_side_ca_odt;
+};
+
+uint32_t ddr_set_rate(uint32_t hz);
+uint32_t ddr_round_rate(uint32_t hz);
+uint32_t ddr_get_rate(void);
+uint32_t dram_set_odt_pd(uint32_t arg0, uint32_t arg1, uint32_t arg2);
+void dram_dfs_init(void);
+void ddr_prepare_for_sys_suspend(void);
+void ddr_prepare_for_sys_resume(void);
+
+#endif
diff --git a/plat/rockchip/rk3399/drivers/dram/dram.c b/plat/rockchip/rk3399/drivers/dram/dram.c
new file mode 100644
index 0000000..42b6294
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dram/dram.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <dram.h>
+#include <plat_private.h>
+#include <rk3399_def.h>
+#include <secure.h>
+#include <soc.h>
+
+__pmusramdata struct rk3399_sdram_params sdram_config;
+
+void dram_init(void)
+{
+	uint32_t os_reg2_val, i;
+
+	os_reg2_val = mmio_read_32(PMUGRF_BASE + PMUGRF_OSREG(2));
+	sdram_config.dramtype = SYS_REG_DEC_DDRTYPE(os_reg2_val);
+	sdram_config.num_channels = SYS_REG_DEC_NUM_CH(os_reg2_val);
+	sdram_config.stride = (mmio_read_32(SGRF_BASE + SGRF_SOC_CON3_7(4)) >>
+				10) & 0x1f;
+
+	for (i = 0; i < 2; i++) {
+		struct rk3399_sdram_channel *ch = &sdram_config.ch[i];
+		struct rk3399_msch_timings *noc = &ch->noc_timings;
+
+		if (!(SYS_REG_DEC_CHINFO(os_reg2_val, i)))
+			continue;
+
+		ch->rank = SYS_REG_DEC_RANK(os_reg2_val, i);
+		ch->col = SYS_REG_DEC_COL(os_reg2_val, i);
+		ch->bk = SYS_REG_DEC_BK(os_reg2_val, i);
+		ch->bw = SYS_REG_DEC_BW(os_reg2_val, i);
+		ch->dbw = SYS_REG_DEC_DBW(os_reg2_val, i);
+		ch->row_3_4 = SYS_REG_DEC_ROW_3_4(os_reg2_val, i);
+		ch->cs0_row = SYS_REG_DEC_CS0_ROW(os_reg2_val, i);
+		ch->cs1_row = SYS_REG_DEC_CS1_ROW(os_reg2_val, i);
+		ch->ddrconfig = mmio_read_32(MSCH_BASE(i) + MSCH_DEVICECONF);
+
+		noc->ddrtiminga0.d32 = mmio_read_32(MSCH_BASE(i) +
+				MSCH_DDRTIMINGA0);
+		noc->ddrtimingb0.d32 = mmio_read_32(MSCH_BASE(i) +
+				MSCH_DDRTIMINGB0);
+		noc->ddrtimingc0.d32 = mmio_read_32(MSCH_BASE(i) +
+				MSCH_DDRTIMINGC0);
+		noc->devtodev0.d32 = mmio_read_32(MSCH_BASE(i) +
+				MSCH_DEVTODEV0);
+		noc->ddrmode.d32 = mmio_read_32(MSCH_BASE(i) + MSCH_DDRMODE);
+		noc->agingx0 = mmio_read_32(MSCH_BASE(i) + MSCH_AGINGX0);
+	}
+}
diff --git a/plat/rockchip/rk3399/drivers/dram/dram.h b/plat/rockchip/rk3399/drivers/dram/dram.h
new file mode 100644
index 0000000..0780fc3
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dram/dram.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SOC_ROCKCHIP_RK3399_DRAM_H__
+#define __SOC_ROCKCHIP_RK3399_DRAM_H__
+
+#include <dram_regs.h>
+#include <plat_private.h>
+#include <stdint.h>
+
+enum {
+	DDR3 = 3,
+	LPDDR2 = 5,
+	LPDDR3 = 6,
+	LPDDR4 = 7,
+	UNUSED = 0xff
+};
+
+struct rk3399_ddr_pctl_regs {
+	uint32_t denali_ctl[CTL_REG_NUM];
+};
+
+struct rk3399_ddr_publ_regs {
+	/*
+	 * PHY registers from 0 to 90 for slice1.
+	 * These are used to restore slice1-4 on resume.
+	 */
+	uint32_t phy0[91];
+	/*
+	 * PHY registers from 512 to 895.
+	 * Only registers 0-37 of each 128 register range are used.
+	 */
+	uint32_t phy512[3][38];
+	uint32_t phy896[63];
+};
+
+struct rk3399_ddr_pi_regs {
+	uint32_t denali_pi[PI_REG_NUM];
+};
+union noc_ddrtiminga0 {
+	uint32_t d32;
+	struct {
+		unsigned acttoact : 6;
+		unsigned reserved0 : 2;
+		unsigned rdtomiss : 6;
+		unsigned reserved1 : 2;
+		unsigned wrtomiss : 6;
+		unsigned reserved2 : 2;
+		unsigned readlatency : 8;
+	} b;
+};
+
+union noc_ddrtimingb0 {
+	uint32_t d32;
+	struct {
+		unsigned rdtowr : 5;
+		unsigned reserved0 : 3;
+		unsigned wrtord : 5;
+		unsigned reserved1 : 3;
+		unsigned rrd : 4;
+		unsigned reserved2 : 4;
+		unsigned faw : 6;
+		unsigned reserved3 : 2;
+	} b;
+};
+
+union noc_ddrtimingc0 {
+	uint32_t d32;
+	struct {
+		unsigned burstpenalty : 4;
+		unsigned reserved0 : 4;
+		unsigned wrtomwr : 6;
+		unsigned reserved1 : 18;
+	} b;
+};
+
+union noc_devtodev0 {
+	uint32_t d32;
+	struct {
+		unsigned busrdtord : 3;
+		unsigned reserved0 : 1;
+		unsigned busrdtowr : 3;
+		unsigned reserved1 : 1;
+		unsigned buswrtord : 3;
+		unsigned reserved2 : 1;
+		unsigned buswrtowr : 3;
+		unsigned reserved3 : 17;
+	} b;
+};
+
+union noc_ddrmode {
+	uint32_t d32;
+	struct {
+		unsigned autoprecharge : 1;
+		unsigned bypassfiltering : 1;
+		unsigned fawbank : 1;
+		unsigned burstsize : 2;
+		unsigned mwrsize : 2;
+		unsigned reserved2 : 1;
+		unsigned forceorder : 8;
+		unsigned forceorderstate : 8;
+		unsigned reserved3 : 8;
+	} b;
+};
+
+struct rk3399_msch_timings {
+	union noc_ddrtiminga0 ddrtiminga0;
+	union noc_ddrtimingb0 ddrtimingb0;
+	union noc_ddrtimingc0 ddrtimingc0;
+	union noc_devtodev0 devtodev0;
+	union noc_ddrmode ddrmode;
+	uint32_t agingx0;
+};
+
+struct rk3399_sdram_channel {
+	unsigned char rank;
+	/* col = 0, means this channel is invalid */
+	unsigned char col;
+	/* 3:8bank, 2:4bank */
+	unsigned char bk;
+	/* channel buswidth, 2:32bit, 1:16bit, 0:8bit */
+	unsigned char bw;
+	/* die buswidth, 2:32bit, 1:16bit, 0:8bit */
+	unsigned char dbw;
+	/* row_3_4 = 1: 6Gb or 12Gb die
+	 * row_3_4 = 0: normal die, power of 2
+	 */
+	unsigned char row_3_4;
+	unsigned char cs0_row;
+	unsigned char cs1_row;
+	uint32_t ddrconfig;
+	struct rk3399_msch_timings noc_timings;
+};
+
+struct rk3399_sdram_params {
+	struct rk3399_sdram_channel ch[2];
+	uint32_t ddr_freq;
+	unsigned char dramtype;
+	unsigned char num_channels;
+	unsigned char stride;
+	unsigned char odt;
+	struct rk3399_ddr_pctl_regs pctl_regs;
+	struct rk3399_ddr_pi_regs pi_regs;
+	struct rk3399_ddr_publ_regs phy_regs;
+	uint32_t rx_cal_dqs[2][4];
+};
+
+extern __sramdata struct rk3399_sdram_params sdram_config;
+
+void dram_init(void);
+
+#endif
diff --git a/plat/rockchip/rk3399/drivers/dram/dram_spec_timing.c b/plat/rockchip/rk3399/drivers/dram/dram_spec_timing.c
new file mode 100644
index 0000000..2e196b5
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dram/dram_spec_timing.c
@@ -0,0 +1,1318 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <dram.h>
+#include <stdint.h>
+#include <string.h>
+#include <utils.h>
+#include "dram_spec_timing.h"
+
+static const uint8_t ddr3_cl_cwl[][7] = {
+	/*
+	 * speed 0~330 331 ~ 400 401 ~ 533 534~666 667~800 801~933 934~1066
+	 * tCK>3 2.5~3 1.875~2.5 1.5~1.875 1.25~1.5 1.07~1.25 0.938~1.07
+	 * cl<<4, cwl  cl<<4, cwl  cl<<4, cwl
+	 */
+	/* DDR3_800D (5-5-5) */
+	{((5 << 4) | 5), ((5 << 4) | 5), 0, 0, 0, 0, 0},
+	/* DDR3_800E (6-6-6) */
+	{((5 << 4) | 5), ((6 << 4) | 5), 0, 0, 0, 0, 0},
+	/* DDR3_1066E (6-6-6) */
+	{((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), 0, 0, 0, 0},
+	/* DDR3_1066F (7-7-7) */
+	{((5 << 4) | 5), ((6 << 4) | 5), ((7 << 4) | 6), 0, 0, 0, 0},
+	/* DDR3_1066G (8-8-8) */
+	{((5 << 4) | 5), ((6 << 4) | 5), ((8 << 4) | 6), 0, 0, 0, 0},
+	/* DDR3_1333F (7-7-7) */
+	{((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), ((7 << 4) | 7),
+	 0, 0, 0},
+	/* DDR3_1333G (8-8-8) */
+	{((5 << 4) | 5), ((5 << 4) | 5), ((7 << 4) | 6), ((8 << 4) | 7),
+	 0, 0, 0},
+	/* DDR3_1333H (9-9-9) */
+	{((5 << 4) | 5), ((6 << 4) | 5), ((8 << 4) | 6), ((9 << 4) | 7),
+	 0, 0, 0},
+	/* DDR3_1333J (10-10-10) */
+	{((5 << 4) | 5), ((6 << 4) | 5), ((8 << 4) | 6), ((10 << 4) | 7),
+	 0, 0, 0},
+	/* DDR3_1600G (8-8-8) */
+	{((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), ((7 << 4) | 7),
+	 ((8 << 4) | 8), 0, 0},
+	/* DDR3_1600H (9-9-9) */
+	{((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), ((8 << 4) | 7),
+	 ((9 << 4) | 8), 0, 0},
+	/* DDR3_1600J (10-10-10) */
+	{((5 << 4) | 5), ((5 << 4) | 5), ((7 << 4) | 6), ((9 << 4) | 7),
+	 ((10 << 4) | 8), 0, 0},
+	/* DDR3_1600K (11-11-11) */
+	{((5 << 4) | 5), ((6 << 4) | 5), ((8 << 4) | 6), ((10 << 4) | 7),
+	 ((11 << 4) | 8), 0, 0},
+	/* DDR3_1866J (10-10-10) */
+	{((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), ((8 << 4) | 7),
+	 ((9 << 4) | 8), ((11 << 4) | 9), 0},
+	/* DDR3_1866K (11-11-11) */
+	{((5 << 4) | 5), ((5 << 4) | 5), ((7 << 4) | 6), ((8 << 4) | 7),
+	 ((10 << 4) | 8), ((11 << 4) | 9), 0},
+	/* DDR3_1866L (12-12-12) */
+	{((6 << 4) | 5), ((6 << 4) | 5), ((7 << 4) | 6), ((9 << 4) | 7),
+	 ((11 << 4) | 8), ((12 << 4) | 9), 0},
+	/* DDR3_1866M (13-13-13) */
+	{((6 << 4) | 5), ((6 << 4) | 5), ((8 << 4) | 6), ((10 << 4) | 7),
+	 ((11 << 4) | 8), ((13 << 4) | 9), 0},
+	/* DDR3_2133K (11-11-11) */
+	{((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), ((7 << 4) | 7),
+	 ((9 << 4) | 8), ((10 << 4) | 9), ((11 << 4) | 10)},
+	/* DDR3_2133L (12-12-12) */
+	{((5 << 4) | 5), ((5 << 4) | 5), ((6 << 4) | 6), ((8 << 4) | 7),
+	 ((9 << 4) | 8), ((11 << 4) | 9), ((12 << 4) | 10)},
+	/* DDR3_2133M (13-13-13) */
+	{((5 << 4) | 5), ((5 << 4) | 5), ((7 << 4) | 6), ((9 << 4) | 7),
+	 ((10 << 4) | 8), ((12 << 4) | 9), ((13 << 4) | 10)},
+	/* DDR3_2133N (14-14-14) */
+	{((6 << 4) | 5), ((6 << 4) | 5), ((7 << 4) | 6), ((9 << 4) | 7),
+	 ((11 << 4) | 8), ((13 << 4) | 9), ((14 << 4) | 10)},
+	/* DDR3_DEFAULT */
+	{((6 << 4) | 5), ((6 << 4) | 5), ((8 << 4) | 6), ((10 << 4) | 7),
+	 ((11 << 4) | 8), ((13 << 4) | 9), ((14 << 4) | 10)}
+};
+
+static const uint16_t ddr3_trc_tfaw[] = {
+	/* tRC      tFAW */
+	((50 << 8) | 50),	/* DDR3_800D (5-5-5) */
+	((53 << 8) | 50),	/* DDR3_800E (6-6-6) */
+
+	((49 << 8) | 50),	/* DDR3_1066E (6-6-6) */
+	((51 << 8) | 50),	/* DDR3_1066F (7-7-7) */
+	((53 << 8) | 50),	/* DDR3_1066G (8-8-8) */
+
+	((47 << 8) | 45),	/* DDR3_1333F (7-7-7) */
+	((48 << 8) | 45),	/* DDR3_1333G (8-8-8) */
+	((50 << 8) | 45),	/* DDR3_1333H (9-9-9) */
+	((51 << 8) | 45),	/* DDR3_1333J (10-10-10) */
+
+	((45 << 8) | 40),	/* DDR3_1600G (8-8-8) */
+	((47 << 8) | 40),	/* DDR3_1600H (9-9-9)*/
+	((48 << 8) | 40),	/* DDR3_1600J (10-10-10) */
+	((49 << 8) | 40),	/* DDR3_1600K (11-11-11) */
+
+	((45 << 8) | 35),	/* DDR3_1866J (10-10-10) */
+	((46 << 8) | 35),	/* DDR3_1866K (11-11-11) */
+	((47 << 8) | 35),	/* DDR3_1866L (12-12-12) */
+	((48 << 8) | 35),	/* DDR3_1866M (13-13-13) */
+
+	((44 << 8) | 35),	/* DDR3_2133K (11-11-11) */
+	((45 << 8) | 35),	/* DDR3_2133L (12-12-12) */
+	((46 << 8) | 35),	/* DDR3_2133M (13-13-13) */
+	((47 << 8) | 35),	/* DDR3_2133N (14-14-14) */
+
+	((53 << 8) | 50)	/* DDR3_DEFAULT */
+};
+
+static uint32_t get_max_speed_rate(struct timing_related_config *timing_config)
+{
+	if (timing_config->ch_cnt > 1)
+		return max(timing_config->dram_info[0].speed_rate,
+					timing_config->dram_info[1].speed_rate);
+	else
+		return timing_config->dram_info[0].speed_rate;
+}
+
+static uint32_t
+get_max_die_capability(struct timing_related_config *timing_config)
+{
+	uint32_t die_cap = 0;
+	uint32_t cs, ch;
+
+	for (ch = 0; ch < timing_config->ch_cnt; ch++) {
+		for (cs = 0; cs < timing_config->dram_info[ch].cs_cnt; cs++) {
+			die_cap = max(die_cap,
+				      timing_config->
+				      dram_info[ch].per_die_capability[cs]);
+		}
+	}
+	return die_cap;
+}
+
+/* tRSTL, 100ns */
+#define DDR3_TRSTL		(100)
+/* trsth, 500us */
+#define DDR3_TRSTH		(500000)
+/* trefi, 7.8us */
+#define DDR3_TREFI_7_8_US	(7800)
+/* tWR, 15ns */
+#define DDR3_TWR		(15)
+/* tRTP, max(4 tCK,7.5ns) */
+#define DDR3_TRTP		(7)
+/* tRRD = max(4nCK, 10ns) */
+#define DDR3_TRRD		(10)
+/* tCK */
+#define DDR3_TCCD		(4)
+/*tWTR, max(4 tCK,7.5ns)*/
+#define DDR3_TWTR		(7)
+/* tCK */
+#define DDR3_TRTW		(0)
+/* tRAS, 37.5ns(400MHz) 37.5ns(533MHz) */
+#define DDR3_TRAS		(37)
+/* ns */
+#define DDR3_TRFC_512MBIT	(90)
+/* ns */
+#define DDR3_TRFC_1GBIT		(110)
+/* ns */
+#define DDR3_TRFC_2GBIT		(160)
+/* ns */
+#define DDR3_TRFC_4GBIT		(300)
+/* ns */
+#define DDR3_TRFC_8GBIT		(350)
+
+/*pd and sr*/
+#define DDR3_TXP		(7) /* tXP, max(3 tCK, 7.5ns)( < 933MHz) */
+#define DDR3_TXPDLL		(24) /* tXPDLL, max(10 tCK, 24ns) */
+#define DDR3_TDLLK		(512) /* tXSR, tDLLK=512 tCK */
+#define DDR3_TCKE_400MHZ	(7) /* tCKE, max(3 tCK,7.5ns)(400MHz) */
+#define DDR3_TCKE_533MHZ	(6) /* tCKE, max(3 tCK,5.625ns)(533MHz) */
+#define DDR3_TCKSRE		(10) /* tCKSRX, max(5 tCK, 10ns) */
+
+/*mode register timing*/
+#define DDR3_TMOD		(15) /* tMOD, max(12 tCK,15ns) */
+#define DDR3_TMRD		(4) /* tMRD, 4 tCK */
+
+/* ZQ */
+#define DDR3_TZQINIT		(640) /* tZQinit, max(512 tCK, 640ns) */
+#define DDR3_TZQCS		(80) /* tZQCS, max(64 tCK, 80ns) */
+#define DDR3_TZQOPER		(320) /* tZQoper, max(256 tCK, 320ns) */
+
+/* Write leveling */
+#define DDR3_TWLMRD		(40) /* tCK */
+#define DDR3_TWLO		(9) /* max 7.5ns */
+#define DDR3_TWLDQSEN		(25) /* tCK */
+
+/*
+ * Description: depend on input parameter "timing_config",
+ *		and calculate all ddr3
+ *		spec timing to "pdram_timing"
+ * parameters:
+ *   input: timing_config
+ *   output: pdram_timing
+ */
+static void ddr3_get_parameter(struct timing_related_config *timing_config,
+			       struct dram_timing_t *pdram_timing)
+{
+	uint32_t nmhz = timing_config->freq;
+	uint32_t ddr_speed_bin = get_max_speed_rate(timing_config);
+	uint32_t ddr_capability_per_die = get_max_die_capability(timing_config);
+	uint32_t tmp;
+
+	zeromem((void *)pdram_timing, sizeof(struct dram_timing_t));
+	pdram_timing->mhz = nmhz;
+	pdram_timing->al = 0;
+	pdram_timing->bl = timing_config->bl;
+	if (nmhz <= 330)
+		tmp = 0;
+	else if (nmhz <= 400)
+		tmp = 1;
+	else if (nmhz <= 533)
+		tmp = 2;
+	else if (nmhz <= 666)
+		tmp = 3;
+	else if (nmhz <= 800)
+		tmp = 4;
+	else if (nmhz <= 933)
+		tmp = 5;
+	else
+		tmp = 6;
+
+	/* when dll bypss cl = cwl = 6 */
+	if (nmhz < 300) {
+		pdram_timing->cl = 6;
+		pdram_timing->cwl = 6;
+	} else {
+		pdram_timing->cl = (ddr3_cl_cwl[ddr_speed_bin][tmp] >> 4) & 0xf;
+		pdram_timing->cwl = ddr3_cl_cwl[ddr_speed_bin][tmp] & 0xf;
+	}
+
+	switch (timing_config->dramds) {
+	case 40:
+		tmp = DDR3_DS_40;
+		break;
+	case 34:
+	default:
+		tmp = DDR3_DS_34;
+		break;
+	}
+
+	if (timing_config->odt)
+		switch (timing_config->dramodt) {
+		case 60:
+			pdram_timing->mr[1] = tmp | DDR3_RTT_NOM_60;
+			break;
+		case 40:
+			pdram_timing->mr[1] = tmp | DDR3_RTT_NOM_40;
+			break;
+		case 120:
+			pdram_timing->mr[1] = tmp | DDR3_RTT_NOM_120;
+			break;
+		case 0:
+		default:
+			pdram_timing->mr[1] = tmp | DDR3_RTT_NOM_DIS;
+			break;
+		}
+	else
+		pdram_timing->mr[1] = tmp | DDR3_RTT_NOM_DIS;
+
+	pdram_timing->mr[2] = DDR3_MR2_CWL(pdram_timing->cwl);
+	pdram_timing->mr[3] = 0;
+
+	pdram_timing->trstl = ((DDR3_TRSTL * nmhz + 999) / 1000);
+	pdram_timing->trsth = ((DDR3_TRSTH * nmhz + 999) / 1000);
+	/* tREFI, average periodic refresh interval, 7.8us */
+	pdram_timing->trefi = ((DDR3_TREFI_7_8_US * nmhz + 999) / 1000);
+	/* base timing */
+	pdram_timing->trcd = pdram_timing->cl;
+	pdram_timing->trp = pdram_timing->cl;
+	pdram_timing->trppb = pdram_timing->cl;
+	tmp = ((DDR3_TWR * nmhz + 999) / 1000);
+	pdram_timing->twr = tmp;
+	pdram_timing->tdal = tmp + pdram_timing->trp;
+	if (tmp < 9) {
+		tmp = tmp - 4;
+	} else {
+		tmp += (tmp & 0x1) ? 1 : 0;
+		tmp = tmp >> 1;
+	}
+	if (pdram_timing->bl == 4)
+		pdram_timing->mr[0] = DDR3_BC4
+				| DDR3_CL(pdram_timing->cl)
+				| DDR3_WR(tmp);
+	else
+		pdram_timing->mr[0] = DDR3_BL8
+				| DDR3_CL(pdram_timing->cl)
+				| DDR3_WR(tmp);
+	tmp = ((DDR3_TRTP * nmhz + (nmhz >> 1) + 999) / 1000);
+	pdram_timing->trtp = max(4, tmp);
+	pdram_timing->trc =
+		(((ddr3_trc_tfaw[ddr_speed_bin] >> 8) * nmhz + 999) / 1000);
+	tmp = ((DDR3_TRRD * nmhz + 999) / 1000);
+	pdram_timing->trrd = max(4, tmp);
+	pdram_timing->tccd = DDR3_TCCD;
+	tmp = ((DDR3_TWTR * nmhz + (nmhz >> 1) + 999) / 1000);
+	pdram_timing->twtr = max(4, tmp);
+	pdram_timing->trtw = DDR3_TRTW;
+	pdram_timing->tras_max = 9 * pdram_timing->trefi;
+	pdram_timing->tras_min = ((DDR3_TRAS * nmhz + (nmhz >> 1) + 999)
+		/ 1000);
+	pdram_timing->tfaw =
+		(((ddr3_trc_tfaw[ddr_speed_bin] & 0x0ff) * nmhz + 999)
+						/ 1000);
+	/* tRFC, 90ns(512Mb),110ns(1Gb),160ns(2Gb),300ns(4Gb),350ns(8Gb) */
+	if (ddr_capability_per_die <= 0x4000000)
+		tmp = DDR3_TRFC_512MBIT;
+	else if (ddr_capability_per_die <= 0x8000000)
+		tmp = DDR3_TRFC_1GBIT;
+	else if (ddr_capability_per_die <= 0x10000000)
+		tmp = DDR3_TRFC_2GBIT;
+	else if (ddr_capability_per_die <= 0x20000000)
+		tmp = DDR3_TRFC_4GBIT;
+	else
+		tmp = DDR3_TRFC_8GBIT;
+	pdram_timing->trfc = (tmp * nmhz + 999) / 1000;
+	pdram_timing->txsnr = max(5, (((tmp + 10) * nmhz + 999) / 1000));
+	pdram_timing->tdqsck_max = 0;
+	/*pd and sr*/
+	pdram_timing->txsr = DDR3_TDLLK;
+	tmp = ((DDR3_TXP * nmhz + (nmhz >> 1) + 999) / 1000);
+	pdram_timing->txp = max(3, tmp);
+	tmp = ((DDR3_TXPDLL * nmhz + 999) / 1000);
+	pdram_timing->txpdll = max(10, tmp);
+	pdram_timing->tdllk = DDR3_TDLLK;
+	if (nmhz >= 533)
+		tmp = ((DDR3_TCKE_533MHZ * nmhz + 999) / 1000);
+	else
+		tmp = ((DDR3_TCKE_400MHZ * nmhz + (nmhz >> 1) + 999) / 1000);
+	pdram_timing->tcke = max(3, tmp);
+	pdram_timing->tckesr = (pdram_timing->tcke + 1);
+	tmp = ((DDR3_TCKSRE * nmhz + 999) / 1000);
+	pdram_timing->tcksre = max(5, tmp);
+	pdram_timing->tcksrx = max(5, tmp);
+	/*mode register timing*/
+	tmp = ((DDR3_TMOD * nmhz + 999) / 1000);
+	pdram_timing->tmod = max(12, tmp);
+	pdram_timing->tmrd = DDR3_TMRD;
+	pdram_timing->tmrr = 0;
+	/*ODT*/
+	pdram_timing->todton = pdram_timing->cwl - 2;
+	/*ZQ*/
+	tmp = ((DDR3_TZQINIT * nmhz + 999) / 1000);
+	pdram_timing->tzqinit = max(512, tmp);
+	tmp = ((DDR3_TZQCS * nmhz + 999) / 1000);
+	pdram_timing->tzqcs = max(64, tmp);
+	tmp = ((DDR3_TZQOPER * nmhz + 999) / 1000);
+	pdram_timing->tzqoper = max(256, tmp);
+	/* write leveling */
+	pdram_timing->twlmrd = DDR3_TWLMRD;
+	pdram_timing->twldqsen = DDR3_TWLDQSEN;
+	pdram_timing->twlo = ((DDR3_TWLO * nmhz + (nmhz >> 1) + 999) / 1000);
+}
+
+#define LPDDR2_TINIT1		(100) /* ns */
+#define LPDDR2_TINIT2		(5) /* tCK */
+#define LPDDR2_TINIT3		(200000) /* 200us */
+#define LPDDR2_TINIT4		(1000) /* 1us */
+#define LPDDR2_TINIT5		(10000) /* 10us */
+#define LPDDR2_TRSTL		(0) /* tCK */
+#define LPDDR2_TRSTH		(500000) /* 500us */
+#define LPDDR2_TREFI_3_9_US	(3900) /* 3.9us */
+#define LPDDR2_TREFI_7_8_US	(7800) /* 7.8us */
+
+/* base timing */
+#define LPDDR2_TRCD		(24) /* tRCD,15ns(Fast)18ns(Typ)24ns(Slow) */
+#define LPDDR2_TRP_PB		(18) /* tRPpb,15ns(Fast)18ns(Typ)24ns(Slow) */
+#define LPDDR2_TRP_AB_8_BANK	(21) /* tRPab,18ns(Fast)21ns(Typ)27ns(Slow) */
+#define LPDDR2_TWR		(15) /* tWR, max(3tCK,15ns) */
+#define LPDDR2_TRTP		(7) /* tRTP, max(2tCK, 7.5ns) */
+#define LPDDR2_TRRD		(10) /* tRRD, max(2tCK,10ns) */
+#define LPDDR2_TCCD		(2) /* tCK */
+#define LPDDR2_TWTR_GREAT_200MHZ	(7) /* ns */
+#define LPDDR2_TWTR_LITTLE_200MHZ	(10) /* ns */
+#define LPDDR2_TRTW		(0) /* tCK */
+#define LPDDR2_TRAS_MAX		(70000) /* 70us */
+#define LPDDR2_TRAS		(42) /* tRAS, max(3tCK,42ns) */
+#define LPDDR2_TFAW_GREAT_200MHZ	(50) /* max(8tCK,50ns) */
+#define LPDDR2_TFAW_LITTLE_200MHZ	(60) /* max(8tCK,60ns) */
+#define LPDDR2_TRFC_8GBIT	(210) /* ns */
+#define LPDDR2_TRFC_4GBIT	(130) /* ns */
+#define LPDDR2_TDQSCK_MIN	(2) /* tDQSCKmin, 2.5ns */
+#define LPDDR2_TDQSCK_MAX	(5) /* tDQSCKmax, 5.5ns */
+
+/*pd and sr*/
+#define LPDDR2_TXP		(7) /* tXP, max(2tCK,7.5ns) */
+#define LPDDR2_TXPDLL		(0)
+#define LPDDR2_TDLLK		(0) /* tCK */
+#define LPDDR2_TCKE		(3) /* tCK */
+#define LPDDR2_TCKESR		(15) /* tCKESR, max(3tCK,15ns) */
+#define LPDDR2_TCKSRE		(1) /* tCK */
+#define LPDDR2_TCKSRX		(2) /* tCK */
+
+/*mode register timing*/
+#define LPDDR2_TMOD		(0)
+#define LPDDR2_TMRD		(5) /* tMRD, (=tMRW), 5 tCK */
+#define LPDDR2_TMRR		(2) /* tCK */
+
+/*ZQ*/
+#define LPDDR2_TZQINIT		(1000) /* ns */
+#define LPDDR2_TZQCS		(90) /* tZQCS, max(6tCK,90ns) */
+#define LPDDR2_TZQCL		(360) /* tZQCL, max(6tCK,360ns) */
+#define LPDDR2_TZQRESET		(50) /* ZQreset, max(3tCK,50ns) */
+
+/*
+ * Description: depend on input parameter "timing_config",
+ *		and calculate all lpddr2
+ *		spec timing to "pdram_timing"
+ * parameters:
+ *   input: timing_config
+ *   output: pdram_timing
+ */
+static void lpddr2_get_parameter(struct timing_related_config *timing_config,
+				 struct dram_timing_t *pdram_timing)
+{
+	uint32_t nmhz = timing_config->freq;
+	uint32_t ddr_capability_per_die = get_max_die_capability(timing_config);
+	uint32_t tmp, trp_tmp, trppb_tmp, tras_tmp, twr_tmp, bl_tmp;
+
+	zeromem((void *)pdram_timing, sizeof(struct dram_timing_t));
+	pdram_timing->mhz = nmhz;
+	pdram_timing->al = 0;
+	pdram_timing->bl = timing_config->bl;
+
+	/*	   1066 933 800 667 533 400 333
+	 * RL,	 8	 7	 6	 5	 4	 3	 3
+	 * WL,	 4	 4	 3	 2	 2	 1	 1
+	 */
+	if (nmhz <= 266) {
+		pdram_timing->cl = 4;
+		pdram_timing->cwl = 2;
+		pdram_timing->mr[2] = LPDDR2_RL4_WL2;
+	} else if (nmhz <= 333) {
+		pdram_timing->cl = 5;
+		pdram_timing->cwl = 2;
+		pdram_timing->mr[2] = LPDDR2_RL5_WL2;
+	} else if (nmhz <= 400) {
+		pdram_timing->cl = 6;
+		pdram_timing->cwl = 3;
+		pdram_timing->mr[2] = LPDDR2_RL6_WL3;
+	} else if (nmhz <= 466) {
+		pdram_timing->cl = 7;
+		pdram_timing->cwl = 4;
+		pdram_timing->mr[2] = LPDDR2_RL7_WL4;
+	} else {
+		pdram_timing->cl = 8;
+		pdram_timing->cwl = 4;
+		pdram_timing->mr[2] = LPDDR2_RL8_WL4;
+	}
+	switch (timing_config->dramds) {
+	case 120:
+		pdram_timing->mr[3] = LPDDR2_DS_120;
+		break;
+	case 80:
+		pdram_timing->mr[3] = LPDDR2_DS_80;
+		break;
+	case 60:
+		pdram_timing->mr[3] = LPDDR2_DS_60;
+		break;
+	case 48:
+		pdram_timing->mr[3] = LPDDR2_DS_48;
+		break;
+	case 40:
+		pdram_timing->mr[3] = LPDDR2_DS_40;
+		break;
+	case 34:
+	default:
+		pdram_timing->mr[3] = LPDDR2_DS_34;
+		break;
+	}
+	pdram_timing->mr[0] = 0;
+
+	pdram_timing->tinit1 = (LPDDR2_TINIT1 * nmhz + 999) / 1000;
+	pdram_timing->tinit2 = LPDDR2_TINIT2;
+	pdram_timing->tinit3 = (LPDDR2_TINIT3 * nmhz + 999) / 1000;
+	pdram_timing->tinit4 = (LPDDR2_TINIT4 * nmhz + 999) / 1000;
+	pdram_timing->tinit5 = (LPDDR2_TINIT5 * nmhz + 999) / 1000;
+	pdram_timing->trstl = LPDDR2_TRSTL;
+	pdram_timing->trsth = (LPDDR2_TRSTH * nmhz + 999) / 1000;
+	/*
+	 * tREFI, average periodic refresh interval,
+	 * 15.6us(<256Mb) 7.8us(256Mb-1Gb) 3.9us(2Gb-8Gb)
+	 */
+	if (ddr_capability_per_die >= 0x10000000)
+		pdram_timing->trefi = (LPDDR2_TREFI_3_9_US * nmhz + 999)
+							/ 1000;
+	else
+		pdram_timing->trefi = (LPDDR2_TREFI_7_8_US * nmhz + 999)
+							/ 1000;
+	/* base timing */
+	tmp = ((LPDDR2_TRCD * nmhz + 999) / 1000);
+	pdram_timing->trcd = max(3, tmp);
+	/*
+	 * tRPpb, max(3tCK, 15ns(Fast) 18ns(Typ) 24ns(Slow),
+	 */
+	trppb_tmp = ((LPDDR2_TRP_PB * nmhz + 999) / 1000);
+	trppb_tmp = max(3, trppb_tmp);
+	pdram_timing->trppb = trppb_tmp;
+	/*
+	 * tRPab, max(3tCK, 4-bank:15ns(Fast) 18ns(Typ) 24ns(Slow),
+	 *	8-bank:18ns(Fast) 21ns(Typ) 27ns(Slow))
+	 */
+	trp_tmp = ((LPDDR2_TRP_AB_8_BANK * nmhz + 999) / 1000);
+	trp_tmp = max(3, trp_tmp);
+	pdram_timing->trp = trp_tmp;
+	twr_tmp = ((LPDDR2_TWR * nmhz + 999) / 1000);
+	twr_tmp = max(3, twr_tmp);
+	pdram_timing->twr = twr_tmp;
+	bl_tmp = (pdram_timing->bl == 16) ? LPDDR2_BL16 :
+			((pdram_timing->bl == 8) ? LPDDR2_BL8 : LPDDR2_BL4);
+	pdram_timing->mr[1] = bl_tmp | LPDDR2_N_WR(twr_tmp);
+	tmp = ((LPDDR2_TRTP * nmhz + (nmhz >> 1) + 999) / 1000);
+	pdram_timing->trtp = max(2, tmp);
+	tras_tmp = ((LPDDR2_TRAS * nmhz + 999) / 1000);
+	tras_tmp = max(3, tras_tmp);
+	pdram_timing->tras_min = tras_tmp;
+	pdram_timing->tras_max = ((LPDDR2_TRAS_MAX * nmhz + 999) / 1000);
+	pdram_timing->trc = (tras_tmp + trp_tmp);
+	tmp = ((LPDDR2_TRRD * nmhz + 999) / 1000);
+	pdram_timing->trrd = max(2, tmp);
+	pdram_timing->tccd = LPDDR2_TCCD;
+	/* tWTR, max(2tCK, 7.5ns(533-266MHz)  10ns(200-166MHz)) */
+	if (nmhz > 200)
+		tmp = ((LPDDR2_TWTR_GREAT_200MHZ * nmhz + (nmhz >> 1) +
+			  999) / 1000);
+	else
+		tmp = ((LPDDR2_TWTR_LITTLE_200MHZ * nmhz + 999) / 1000);
+	pdram_timing->twtr = max(2, tmp);
+	pdram_timing->trtw = LPDDR2_TRTW;
+	if (nmhz <= 200)
+		pdram_timing->tfaw = (LPDDR2_TFAW_LITTLE_200MHZ * nmhz + 999)
+							/ 1000;
+	else
+		pdram_timing->tfaw = (LPDDR2_TFAW_GREAT_200MHZ * nmhz + 999)
+							/ 1000;
+	/* tRFC, 90ns(<=512Mb) 130ns(1Gb-4Gb) 210ns(8Gb) */
+	if (ddr_capability_per_die >= 0x40000000) {
+		pdram_timing->trfc =
+			(LPDDR2_TRFC_8GBIT * nmhz + 999) / 1000;
+		tmp = (((LPDDR2_TRFC_8GBIT + 10) * nmhz + 999) / 1000);
+	} else {
+		pdram_timing->trfc =
+			(LPDDR2_TRFC_4GBIT * nmhz + 999) / 1000;
+		tmp = (((LPDDR2_TRFC_4GBIT + 10) * nmhz + 999) / 1000);
+	}
+	if (tmp < 2)
+		tmp = 2;
+	pdram_timing->txsr = tmp;
+	pdram_timing->txsnr = tmp;
+	/* tdqsck use rounded down */
+	pdram_timing->tdqsck = ((LPDDR2_TDQSCK_MIN * nmhz + (nmhz >> 1))
+					/ 1000);
+	pdram_timing->tdqsck_max =
+			((LPDDR2_TDQSCK_MAX * nmhz + (nmhz >> 1) + 999)
+					/ 1000);
+	/* pd and sr */
+	tmp = ((LPDDR2_TXP * nmhz + (nmhz >> 1) + 999) / 1000);
+	pdram_timing->txp = max(2, tmp);
+	pdram_timing->txpdll = LPDDR2_TXPDLL;
+	pdram_timing->tdllk = LPDDR2_TDLLK;
+	pdram_timing->tcke = LPDDR2_TCKE;
+	tmp = ((LPDDR2_TCKESR * nmhz + 999) / 1000);
+	pdram_timing->tckesr = max(3, tmp);
+	pdram_timing->tcksre = LPDDR2_TCKSRE;
+	pdram_timing->tcksrx = LPDDR2_TCKSRX;
+	/* mode register timing */
+	pdram_timing->tmod = LPDDR2_TMOD;
+	pdram_timing->tmrd = LPDDR2_TMRD;
+	pdram_timing->tmrr = LPDDR2_TMRR;
+	/* ZQ */
+	pdram_timing->tzqinit = (LPDDR2_TZQINIT * nmhz + 999) / 1000;
+	tmp = ((LPDDR2_TZQCS * nmhz + 999) / 1000);
+	pdram_timing->tzqcs = max(6, tmp);
+	tmp = ((LPDDR2_TZQCL * nmhz + 999) / 1000);
+	pdram_timing->tzqoper = max(6, tmp);
+	tmp = ((LPDDR2_TZQRESET * nmhz + 999) / 1000);
+	pdram_timing->tzqreset = max(3, tmp);
+}
+
+#define LPDDR3_TINIT1		(100) /* ns */
+#define LPDDR3_TINIT2		(5) /* tCK */
+#define LPDDR3_TINIT3		(200000) /* 200us */
+#define LPDDR3_TINIT4		(1000) /* 1us */
+#define LPDDR3_TINIT5		(10000) /* 10us */
+#define LPDDR3_TRSTL		(0)
+#define LPDDR3_TRSTH		(0) /* 500us */
+#define LPDDR3_TREFI_3_9_US	(3900) /* 3.9us */
+
+/* base timging */
+#define LPDDR3_TRCD	(18) /* tRCD,15ns(Fast)18ns(Typ)24ns(Slow) */
+#define LPDDR3_TRP_PB	(18) /* tRPpb, 15ns(Fast) 18ns(Typ) 24ns(Slow) */
+#define LPDDR3_TRP_AB	(21) /* tRPab, 18ns(Fast) 21ns(Typ) 27ns(Slow) */
+#define LPDDR3_TWR	(15) /* tWR, max(4tCK,15ns) */
+#define LPDDR3_TRTP	(7) /* tRTP, max(4tCK, 7.5ns) */
+#define LPDDR3_TRRD	(10) /* tRRD, max(2tCK,10ns) */
+#define LPDDR3_TCCD	(4) /* tCK */
+#define LPDDR3_TWTR	(7) /* tWTR, max(4tCK, 7.5ns) */
+#define LPDDR3_TRTW	(0) /* tCK register min valid value */
+#define LPDDR3_TRAS_MAX	(70000) /* 70us */
+#define LPDDR3_TRAS	(42) /* tRAS, max(3tCK,42ns) */
+#define LPDDR3_TFAW	(50) /* tFAW,max(8tCK, 50ns) */
+#define LPDDR3_TRFC_8GBIT	(210) /* tRFC, 130ns(4Gb) 210ns(>4Gb) */
+#define LPDDR3_TRFC_4GBIT	(130) /* ns */
+#define LPDDR3_TDQSCK_MIN	(2) /* tDQSCKmin,2.5ns */
+#define LPDDR3_TDQSCK_MAX	(5) /* tDQSCKmax,5.5ns */
+
+/* pd and sr */
+#define LPDDR3_TXP	(7) /* tXP, max(3tCK,7.5ns) */
+#define LPDDR3_TXPDLL	(0)
+#define LPDDR3_TCKE	(7) /* tCKE, (max 7.5ns,3 tCK) */
+#define LPDDR3_TCKESR	(15) /* tCKESR, max(3tCK,15ns) */
+#define LPDDR3_TCKSRE	(2) /* tCKSRE=tCPDED, 2 tCK */
+#define LPDDR3_TCKSRX	(2) /* tCKSRX, 2 tCK */
+
+/* mode register timing */
+#define LPDDR3_TMOD	(0)
+#define LPDDR3_TMRD	(14) /* tMRD, (=tMRW), max(14ns, 10 tCK) */
+#define LPDDR3_TMRR	(4) /* tMRR, 4 tCK */
+#define LPDDR3_TMRRI	LPDDR3_TRCD
+
+/* ODT */
+#define LPDDR3_TODTON	(3) /* 3.5ns */
+
+/* ZQ */
+#define LPDDR3_TZQINIT	(1000) /* 1us */
+#define LPDDR3_TZQCS	(90) /* tZQCS, 90ns */
+#define LPDDR3_TZQCL	(360) /* 360ns */
+#define LPDDR3_TZQRESET	(50) /* ZQreset, max(3tCK,50ns) */
+/* write leveling */
+#define LPDDR3_TWLMRD	(40) /* ns */
+#define LPDDR3_TWLO	(20) /* ns */
+#define LPDDR3_TWLDQSEN	(25) /* ns */
+/* CA training */
+#define LPDDR3_TCACKEL	(10) /* tCK */
+#define LPDDR3_TCAENT	(10) /* tCK */
+#define LPDDR3_TCAMRD	(20) /* tCK */
+#define LPDDR3_TCACKEH	(10) /* tCK */
+#define LPDDR3_TCAEXT	(10) /* tCK */
+#define LPDDR3_TADR	(20) /* ns */
+#define LPDDR3_TMRZ	(3) /* ns */
+
+/* FSP */
+#define LPDDR3_TFC_LONG	(250) /* ns */
+
+/*
+ * Description: depend on input parameter "timing_config",
+ *		and calculate all lpddr3
+ *		spec timing to "pdram_timing"
+ * parameters:
+ *   input: timing_config
+ *   output: pdram_timing
+ */
+static void lpddr3_get_parameter(struct timing_related_config *timing_config,
+				 struct dram_timing_t *pdram_timing)
+{
+	uint32_t nmhz = timing_config->freq;
+	uint32_t ddr_capability_per_die = get_max_die_capability(timing_config);
+	uint32_t tmp, trp_tmp, trppb_tmp, tras_tmp, twr_tmp, bl_tmp;
+
+	zeromem((void *)pdram_timing, sizeof(struct dram_timing_t));
+	pdram_timing->mhz = nmhz;
+	pdram_timing->al = 0;
+	pdram_timing->bl = timing_config->bl;
+
+	/*
+	 * Only support Write Latency Set A here
+	 *     1066 933 800 733 667 600 533 400 166
+	 * RL, 16   14  12  11  10  9   8   6   3
+	 * WL, 8    8   6   6   6   5   4   3   1
+	 */
+	if (nmhz <= 400) {
+		pdram_timing->cl = 6;
+		pdram_timing->cwl = 3;
+		pdram_timing->mr[2] = LPDDR3_RL6_WL3;
+	} else if (nmhz <= 533) {
+		pdram_timing->cl = 8;
+		pdram_timing->cwl = 4;
+		pdram_timing->mr[2] = LPDDR3_RL8_WL4;
+	} else if (nmhz <= 600) {
+		pdram_timing->cl = 9;
+		pdram_timing->cwl = 5;
+		pdram_timing->mr[2] = LPDDR3_RL9_WL5;
+	} else if (nmhz <= 667) {
+		pdram_timing->cl = 10;
+		pdram_timing->cwl = 6;
+		pdram_timing->mr[2] = LPDDR3_RL10_WL6;
+	} else if (nmhz <= 733) {
+		pdram_timing->cl = 11;
+		pdram_timing->cwl = 6;
+		pdram_timing->mr[2] = LPDDR3_RL11_WL6;
+	} else if (nmhz <= 800) {
+		pdram_timing->cl = 12;
+		pdram_timing->cwl = 6;
+		pdram_timing->mr[2] = LPDDR3_RL12_WL6;
+	} else if (nmhz <= 933) {
+		pdram_timing->cl = 14;
+		pdram_timing->cwl = 8;
+		pdram_timing->mr[2] = LPDDR3_RL14_WL8;
+	} else {
+		pdram_timing->cl = 16;
+		pdram_timing->cwl = 8;
+		pdram_timing->mr[2] = LPDDR3_RL16_WL8;
+	}
+	switch (timing_config->dramds) {
+	case 80:
+		pdram_timing->mr[3] = LPDDR3_DS_80;
+		break;
+	case 60:
+		pdram_timing->mr[3] = LPDDR3_DS_60;
+		break;
+	case 48:
+		pdram_timing->mr[3] = LPDDR3_DS_48;
+		break;
+	case 40:
+		pdram_timing->mr[3] = LPDDR3_DS_40;
+		break;
+	case 3440:
+		pdram_timing->mr[3] = LPDDR3_DS_34D_40U;
+		break;
+	case 4048:
+		pdram_timing->mr[3] = LPDDR3_DS_40D_48U;
+		break;
+	case 3448:
+		pdram_timing->mr[3] = LPDDR3_DS_34D_48U;
+		break;
+	case 34:
+	default:
+		pdram_timing->mr[3] = LPDDR3_DS_34;
+		break;
+	}
+	pdram_timing->mr[0] = 0;
+	if (timing_config->odt)
+		switch (timing_config->dramodt) {
+		case 60:
+			pdram_timing->mr11 = LPDDR3_ODT_60;
+			break;
+		case 120:
+			pdram_timing->mr11 = LPDDR3_ODT_120;
+			break;
+		case 240:
+		default:
+			pdram_timing->mr11 = LPDDR3_ODT_240;
+			break;
+		}
+	else
+		pdram_timing->mr11 = LPDDR3_ODT_DIS;
+
+	pdram_timing->tinit1 = (LPDDR3_TINIT1 * nmhz + 999) / 1000;
+	pdram_timing->tinit2 = LPDDR3_TINIT2;
+	pdram_timing->tinit3 = (LPDDR3_TINIT3 * nmhz + 999) / 1000;
+	pdram_timing->tinit4 = (LPDDR3_TINIT4 * nmhz + 999) / 1000;
+	pdram_timing->tinit5 = (LPDDR3_TINIT5 * nmhz + 999) / 1000;
+	pdram_timing->trstl = LPDDR3_TRSTL;
+	pdram_timing->trsth = (LPDDR3_TRSTH * nmhz + 999) / 1000;
+	/* tREFI, average periodic refresh interval, 3.9us(4Gb-16Gb) */
+	pdram_timing->trefi = (LPDDR3_TREFI_3_9_US * nmhz + 999) / 1000;
+	/* base timing */
+	tmp = ((LPDDR3_TRCD * nmhz + 999) / 1000);
+	pdram_timing->trcd = max(3, tmp);
+	trppb_tmp = ((LPDDR3_TRP_PB * nmhz + 999) / 1000);
+	trppb_tmp = max(3, trppb_tmp);
+	pdram_timing->trppb = trppb_tmp;
+	trp_tmp = ((LPDDR3_TRP_AB * nmhz + 999) / 1000);
+	trp_tmp = max(3, trp_tmp);
+	pdram_timing->trp = trp_tmp;
+	twr_tmp = ((LPDDR3_TWR * nmhz + 999) / 1000);
+	twr_tmp = max(4, twr_tmp);
+	pdram_timing->twr = twr_tmp;
+	if (twr_tmp <= 6)
+		twr_tmp = 6;
+	else if (twr_tmp <= 8)
+		twr_tmp = 8;
+	else if (twr_tmp <= 12)
+		twr_tmp = twr_tmp;
+	else if (twr_tmp <= 14)
+		twr_tmp = 14;
+	else
+		twr_tmp = 16;
+	if (twr_tmp > 9)
+		pdram_timing->mr[2] |= (1 << 4); /*enable nWR > 9*/
+	twr_tmp = (twr_tmp > 9) ? (twr_tmp - 10) : (twr_tmp - 2);
+	bl_tmp = LPDDR3_BL8;
+	pdram_timing->mr[1] = bl_tmp | LPDDR3_N_WR(twr_tmp);
+	tmp = ((LPDDR3_TRTP * nmhz + (nmhz >> 1) + 999) / 1000);
+	pdram_timing->trtp = max(4, tmp);
+	tras_tmp = ((LPDDR3_TRAS * nmhz + 999) / 1000);
+	tras_tmp = max(3, tras_tmp);
+	pdram_timing->tras_min = tras_tmp;
+	pdram_timing->trc = (tras_tmp + trp_tmp);
+	tmp = ((LPDDR3_TRRD * nmhz + 999) / 1000);
+	pdram_timing->trrd = max(2, tmp);
+	pdram_timing->tccd = LPDDR3_TCCD;
+	tmp = ((LPDDR3_TWTR * nmhz + (nmhz >> 1) + 999) / 1000);
+	pdram_timing->twtr = max(4, tmp);
+	pdram_timing->trtw =  ((LPDDR3_TRTW * nmhz + 999) / 1000);
+	pdram_timing->tras_max = ((LPDDR3_TRAS_MAX * nmhz + 999) / 1000);
+	tmp = (LPDDR3_TFAW * nmhz + 999) / 1000;
+	pdram_timing->tfaw = max(8, tmp);
+	if (ddr_capability_per_die > 0x20000000) {
+		pdram_timing->trfc =
+			(LPDDR3_TRFC_8GBIT * nmhz + 999) / 1000;
+		tmp = (((LPDDR3_TRFC_8GBIT + 10) * nmhz + 999) / 1000);
+	} else {
+		pdram_timing->trfc =
+			(LPDDR3_TRFC_4GBIT * nmhz + 999) / 1000;
+		tmp = (((LPDDR3_TRFC_4GBIT + 10) * nmhz + 999) / 1000);
+	}
+	pdram_timing->txsr = max(2, tmp);
+	pdram_timing->txsnr = max(2, tmp);
+	/* tdqsck use rounded down */
+	pdram_timing->tdqsck =
+			((LPDDR3_TDQSCK_MIN * nmhz + (nmhz >> 1))
+					/ 1000);
+	pdram_timing->tdqsck_max =
+			((LPDDR3_TDQSCK_MAX * nmhz + (nmhz >> 1) + 999)
+					/ 1000);
+	/*pd and sr*/
+	tmp = ((LPDDR3_TXP * nmhz + (nmhz >> 1) + 999) / 1000);
+	pdram_timing->txp = max(3, tmp);
+	pdram_timing->txpdll = LPDDR3_TXPDLL;
+	tmp = ((LPDDR3_TCKE * nmhz + (nmhz >> 1) + 999) / 1000);
+	pdram_timing->tcke = max(3, tmp);
+	tmp = ((LPDDR3_TCKESR * nmhz + 999) / 1000);
+	pdram_timing->tckesr = max(3, tmp);
+	pdram_timing->tcksre = LPDDR3_TCKSRE;
+	pdram_timing->tcksrx = LPDDR3_TCKSRX;
+	/*mode register timing*/
+	pdram_timing->tmod = LPDDR3_TMOD;
+	tmp = ((LPDDR3_TMRD * nmhz + 999) / 1000);
+	pdram_timing->tmrd = max(10, tmp);
+	pdram_timing->tmrr = LPDDR3_TMRR;
+	tmp = ((LPDDR3_TRCD * nmhz + 999) / 1000);
+	pdram_timing->tmrri = max(3, tmp);
+	/* ODT */
+	pdram_timing->todton = (LPDDR3_TODTON * nmhz + (nmhz >> 1) + 999)
+				/ 1000;
+	/* ZQ */
+	pdram_timing->tzqinit = (LPDDR3_TZQINIT * nmhz + 999) / 1000;
+	pdram_timing->tzqcs =
+		((LPDDR3_TZQCS * nmhz + 999) / 1000);
+	pdram_timing->tzqoper =
+		((LPDDR3_TZQCL * nmhz + 999) / 1000);
+	tmp = ((LPDDR3_TZQRESET * nmhz + 999) / 1000);
+	pdram_timing->tzqreset = max(3, tmp);
+	/* write leveling */
+	pdram_timing->twlmrd = (LPDDR3_TWLMRD * nmhz + 999) / 1000;
+	pdram_timing->twlo = (LPDDR3_TWLO * nmhz + 999) / 1000;
+	pdram_timing->twldqsen = (LPDDR3_TWLDQSEN * nmhz + 999) / 1000;
+	/* CA training */
+	pdram_timing->tcackel = LPDDR3_TCACKEL;
+	pdram_timing->tcaent = LPDDR3_TCAENT;
+	pdram_timing->tcamrd = LPDDR3_TCAMRD;
+	pdram_timing->tcackeh = LPDDR3_TCACKEH;
+	pdram_timing->tcaext = LPDDR3_TCAEXT;
+	pdram_timing->tadr = (LPDDR3_TADR * nmhz + 999) / 1000;
+	pdram_timing->tmrz = (LPDDR3_TMRZ * nmhz + 999) / 1000;
+	pdram_timing->tcacd = pdram_timing->tadr + 2;
+
+	/* FSP */
+	pdram_timing->tfc_long = (LPDDR3_TFC_LONG * nmhz + 999) / 1000;
+}
+
+#define LPDDR4_TINIT1	(200000) /* 200us */
+#define LPDDR4_TINIT2	(10) /* 10ns */
+#define LPDDR4_TINIT3	(2000000) /* 2ms */
+#define LPDDR4_TINIT4	(5) /* tCK */
+#define LPDDR4_TINIT5	(2000) /* 2us */
+#define LPDDR4_TRSTL		LPDDR4_TINIT1
+#define LPDDR4_TRSTH		LPDDR4_TINIT3
+#define LPDDR4_TREFI_3_9_US	(3900) /* 3.9us */
+
+/* base timging */
+#define LPDDR4_TRCD	(18) /* tRCD, max(18ns,4tCK) */
+#define LPDDR4_TRP_PB	(18) /* tRPpb, max(18ns, 4tCK) */
+#define LPDDR4_TRP_AB	(21) /* tRPab, max(21ns, 4tCK) */
+#define LPDDR4_TRRD	(10) /* tRRD, max(4tCK,10ns) */
+#define LPDDR4_TCCD_BL16	(8) /* tCK */
+#define LPDDR4_TCCD_BL32	(16) /* tCK */
+#define LPDDR4_TWTR	(10) /* tWTR, max(8tCK, 10ns) */
+#define LPDDR4_TRTW	(0) /* tCK register min valid value */
+#define LPDDR4_TRAS_MAX (70000) /* 70us */
+#define LPDDR4_TRAS	(42) /* tRAS, max(3tCK,42ns) */
+#define LPDDR4_TFAW	(40) /* tFAW,min 40ns) */
+#define LPDDR4_TRFC_12GBIT	(280) /* tRFC, 280ns(>=12Gb) */
+#define LPDDR4_TRFC_6GBIT	(180) /* 6Gb/8Gb 180ns */
+#define LPDDR4_TRFC_4GBIT	(130) /* 4Gb 130ns */
+#define LPDDR4_TDQSCK_MIN	(1) /* tDQSCKmin,1.5ns */
+#define LPDDR4_TDQSCK_MAX	(3) /* tDQSCKmax,3.5ns */
+#define LPDDR4_TPPD		(4) /* tCK */
+
+/* pd and sr */
+#define LPDDR4_TXP	(7) /* tXP, max(5tCK,7.5ns) */
+#define LPDDR4_TCKE	(7) /* tCKE, max(7.5ns,4 tCK) */
+#define LPDDR4_TESCKE	(1) /* tESCKE, max(1.75ns, 3tCK) */
+#define LPDDR4_TSR	(15) /* tSR, max(15ns, 3tCK) */
+#define LPDDR4_TCMDCKE	(1) /* max(1.75ns, 3tCK) */
+#define LPDDR4_TCSCKE	(1) /* 1.75ns */
+#define LPDDR4_TCKELCS	(5) /* max(5ns, 5tCK) */
+#define LPDDR4_TCSCKEH	(1) /* 1.75ns */
+#define LPDDR4_TCKEHCS	(7) /* max(7.5ns, 5tCK) */
+#define LPDDR4_TMRWCKEL	(14) /* max(14ns, 10tCK) */
+#define LPDDR4_TCKELCMD	(7) /* max(7.5ns, 3tCK) */
+#define LPDDR4_TCKEHCMD	(7) /* max(7.5ns, 3tCK) */
+#define LPDDR4_TCKELPD	(7) /* max(7.5ns, 3tCK) */
+#define LPDDR4_TCKCKEL	(7) /* max(7.5ns, 3tCK) */
+
+/* mode register timing */
+#define LPDDR4_TMRD	(14) /* tMRD, (=tMRW), max(14ns, 10 tCK) */
+#define LPDDR4_TMRR	(8) /* tMRR, 8 tCK */
+
+/* ODT */
+#define LPDDR4_TODTON	(3) /* 3.5ns */
+
+/* ZQ */
+#define LPDDR4_TZQCAL	(1000) /* 1us */
+#define LPDDR4_TZQLAT	(30) /* tZQLAT, max(30ns,8tCK) */
+#define LPDDR4_TZQRESET (50) /* ZQreset, max(3tCK,50ns) */
+#define LPDDR4_TZQCKE	(1) /* tZQCKE, max(1.75ns, 3tCK) */
+
+/* write leveling */
+#define LPDDR4_TWLMRD	(40) /* tCK */
+#define LPDDR4_TWLO	(20) /* ns */
+#define LPDDR4_TWLDQSEN (20) /* tCK */
+
+/* CA training */
+#define LPDDR4_TCAENT	(250) /* ns */
+#define LPDDR4_TADR	(20) /* ns */
+#define LPDDR4_TMRZ	(1) /* 1.5ns */
+#define LPDDR4_TVREF_LONG	(250) /* ns */
+#define LPDDR4_TVREF_SHORT	(100) /* ns */
+
+/* VRCG */
+#define LPDDR4_TVRCG_ENABLE	(200) /* ns */
+#define LPDDR4_TVRCG_DISABLE	(100) /* ns */
+
+/* FSP */
+#define LPDDR4_TFC_LONG		(250) /* ns */
+#define LPDDR4_TCKFSPE		(7) /* max(7.5ns, 4tCK) */
+#define LPDDR4_TCKFSPX		(7) /* max(7.5ns, 4tCK) */
+
+/*
+ * Description: depend on input parameter "timing_config",
+ *              and calculate all lpddr4
+ *              spec timing to "pdram_timing"
+ * parameters:
+ *   input: timing_config
+ *   output: pdram_timing
+ */
+static void lpddr4_get_parameter(struct timing_related_config *timing_config,
+				 struct dram_timing_t *pdram_timing)
+{
+	uint32_t nmhz = timing_config->freq;
+	uint32_t ddr_capability_per_die = get_max_die_capability(timing_config);
+	uint32_t tmp, trp_tmp, trppb_tmp, tras_tmp;
+
+	zeromem((void *)pdram_timing, sizeof(struct dram_timing_t));
+	pdram_timing->mhz = nmhz;
+	pdram_timing->al = 0;
+	pdram_timing->bl = timing_config->bl;
+
+	/*
+	 * Only support Write Latency Set A here
+	 *      2133 1866 1600 1333 1066 800 533 266
+	 *  RL, 36   32   28   24   20   14  10  6
+	 *  WL, 18   16   14   12   10   8   6   4
+	 * nWR, 40   34   30   24   20   16  10  6
+	 * nRTP,16   14   12   10   8    8   8   8
+	 */
+	tmp = (timing_config->bl == 32) ? 1 : 0;
+
+	/*
+	 * we always use WR preamble = 2tCK
+	 * RD preamble = Static
+	 */
+	tmp |= (1 << 2);
+	if (nmhz <= 266) {
+		pdram_timing->cl = 6;
+		pdram_timing->cwl = 4;
+		pdram_timing->twr = 6;
+		pdram_timing->trtp = 8;
+		pdram_timing->mr[2] = LPDDR4_RL6_NRTP8 | LPDDR4_A_WL4;
+	} else if (nmhz <= 533) {
+		if (timing_config->rdbi) {
+			pdram_timing->cl = 12;
+			pdram_timing->mr[2] = LPDDR4_RL12_NRTP8 | LPDDR4_A_WL6;
+		} else {
+			pdram_timing->cl = 10;
+			pdram_timing->mr[2] = LPDDR4_RL10_NRTP8 | LPDDR4_A_WL6;
+		}
+		pdram_timing->cwl = 6;
+		pdram_timing->twr = 10;
+		pdram_timing->trtp = 8;
+		tmp |= (1 << 4);
+	} else if (nmhz <= 800) {
+		if (timing_config->rdbi) {
+			pdram_timing->cl = 16;
+			pdram_timing->mr[2] = LPDDR4_RL16_NRTP8 | LPDDR4_A_WL8;
+		} else {
+			pdram_timing->cl = 14;
+			pdram_timing->mr[2] = LPDDR4_RL14_NRTP8 | LPDDR4_A_WL8;
+		}
+		pdram_timing->cwl = 8;
+		pdram_timing->twr = 16;
+		pdram_timing->trtp = 8;
+		tmp |= (2 << 4);
+	} else if (nmhz <= 1066) {
+		if (timing_config->rdbi) {
+			pdram_timing->cl = 22;
+			pdram_timing->mr[2] = LPDDR4_RL22_NRTP8 | LPDDR4_A_WL10;
+		} else {
+			pdram_timing->cl = 20;
+			pdram_timing->mr[2] = LPDDR4_RL20_NRTP8 | LPDDR4_A_WL10;
+		}
+		pdram_timing->cwl = 10;
+		pdram_timing->twr = 20;
+		pdram_timing->trtp = 8;
+		tmp |= (3 << 4);
+	} else if (nmhz <= 1333) {
+		if (timing_config->rdbi) {
+			pdram_timing->cl = 28;
+			pdram_timing->mr[2] = LPDDR4_RL28_NRTP10 |
+						LPDDR4_A_WL12;
+		} else {
+			pdram_timing->cl = 24;
+			pdram_timing->mr[2] = LPDDR4_RL24_NRTP10 |
+						LPDDR4_A_WL12;
+		}
+		pdram_timing->cwl = 12;
+		pdram_timing->twr = 24;
+		pdram_timing->trtp = 10;
+		tmp |= (4 << 4);
+	} else if (nmhz <= 1600) {
+		if (timing_config->rdbi) {
+			pdram_timing->cl = 32;
+			pdram_timing->mr[2] = LPDDR4_RL32_NRTP12 |
+						LPDDR4_A_WL14;
+		} else {
+			pdram_timing->cl = 28;
+			pdram_timing->mr[2] = LPDDR4_RL28_NRTP12 |
+						LPDDR4_A_WL14;
+		}
+		pdram_timing->cwl = 14;
+		pdram_timing->twr = 30;
+		pdram_timing->trtp = 12;
+		tmp |= (5 << 4);
+	} else if (nmhz <= 1866) {
+		if (timing_config->rdbi) {
+			pdram_timing->cl = 36;
+			pdram_timing->mr[2] = LPDDR4_RL36_NRTP14 |
+						LPDDR4_A_WL16;
+		} else {
+			pdram_timing->cl = 32;
+			pdram_timing->mr[2] = LPDDR4_RL32_NRTP14 |
+						LPDDR4_A_WL16;
+		}
+		pdram_timing->cwl = 16;
+		pdram_timing->twr = 34;
+		pdram_timing->trtp = 14;
+		tmp |= (6 << 4);
+	} else {
+		if (timing_config->rdbi) {
+			pdram_timing->cl = 40;
+			pdram_timing->mr[2] = LPDDR4_RL40_NRTP16 |
+						LPDDR4_A_WL18;
+		} else {
+			pdram_timing->cl = 36;
+			pdram_timing->mr[2] = LPDDR4_RL36_NRTP16 |
+						LPDDR4_A_WL18;
+		}
+		pdram_timing->cwl = 18;
+		pdram_timing->twr = 40;
+		pdram_timing->trtp = 16;
+		tmp |= (7 << 4);
+	}
+	pdram_timing->mr[1] = tmp;
+	tmp = (timing_config->rdbi ? LPDDR4_DBI_RD_EN : 0) |
+	      (timing_config->wdbi ? LPDDR4_DBI_WR_EN : 0);
+	switch (timing_config->dramds) {
+	case 240:
+		pdram_timing->mr[3] = LPDDR4_PDDS_240 | tmp;
+		break;
+	case 120:
+		pdram_timing->mr[3] = LPDDR4_PDDS_120 | tmp;
+		break;
+	case 80:
+		pdram_timing->mr[3] = LPDDR4_PDDS_80 | tmp;
+		break;
+	case 60:
+		pdram_timing->mr[3] = LPDDR4_PDDS_60 | tmp;
+		break;
+	case 48:
+		pdram_timing->mr[3] = LPDDR4_PDDS_48 | tmp;
+		break;
+	case 40:
+	default:
+		pdram_timing->mr[3] = LPDDR4_PDDS_40 | tmp;
+		break;
+	}
+	pdram_timing->mr[0] = 0;
+	if (timing_config->odt) {
+		switch (timing_config->dramodt) {
+		case 240:
+			tmp = LPDDR4_DQODT_240;
+			break;
+		case 120:
+			tmp = LPDDR4_DQODT_120;
+			break;
+		case 80:
+			tmp = LPDDR4_DQODT_80;
+			break;
+		case 60:
+			tmp = LPDDR4_DQODT_60;
+			break;
+		case 48:
+			tmp = LPDDR4_DQODT_48;
+			break;
+		case 40:
+		default:
+			tmp = LPDDR4_DQODT_40;
+			break;
+		}
+
+		switch (timing_config->caodt) {
+		case 240:
+			pdram_timing->mr11 = LPDDR4_CAODT_240 | tmp;
+			break;
+		case 120:
+			pdram_timing->mr11 = LPDDR4_CAODT_120 | tmp;
+			break;
+		case 80:
+			pdram_timing->mr11 = LPDDR4_CAODT_80 | tmp;
+			break;
+		case 60:
+			pdram_timing->mr11 = LPDDR4_CAODT_60 | tmp;
+			break;
+		case 48:
+			pdram_timing->mr11 = LPDDR4_CAODT_48 | tmp;
+			break;
+		case 40:
+		default:
+			pdram_timing->mr11 = LPDDR4_CAODT_40 | tmp;
+			break;
+		}
+	} else {
+		pdram_timing->mr11 = LPDDR4_CAODT_DIS | tmp;
+	}
+
+	pdram_timing->tinit1 = (LPDDR4_TINIT1 * nmhz + 999) / 1000;
+	pdram_timing->tinit2 = (LPDDR4_TINIT2 * nmhz + 999) / 1000;
+	pdram_timing->tinit3 = (LPDDR4_TINIT3 * nmhz + 999) / 1000;
+	pdram_timing->tinit4 = (LPDDR4_TINIT4 * nmhz + 999) / 1000;
+	pdram_timing->tinit5 = (LPDDR4_TINIT5 * nmhz + 999) / 1000;
+	pdram_timing->trstl = (LPDDR4_TRSTL * nmhz + 999) / 1000;
+	pdram_timing->trsth = (LPDDR4_TRSTH * nmhz + 999) / 1000;
+	/* tREFI, average periodic refresh interval, 3.9us(4Gb-16Gb) */
+	pdram_timing->trefi = (LPDDR4_TREFI_3_9_US * nmhz + 999) / 1000;
+	/* base timing */
+	tmp = ((LPDDR4_TRCD * nmhz + 999) / 1000);
+	pdram_timing->trcd = max(4, tmp);
+	trppb_tmp = ((LPDDR4_TRP_PB * nmhz + 999) / 1000);
+	trppb_tmp = max(4, trppb_tmp);
+	pdram_timing->trppb = trppb_tmp;
+	trp_tmp = ((LPDDR4_TRP_AB * nmhz + 999) / 1000);
+	trp_tmp = max(4, trp_tmp);
+	pdram_timing->trp = trp_tmp;
+	tras_tmp = ((LPDDR4_TRAS * nmhz + 999) / 1000);
+	tras_tmp = max(3, tras_tmp);
+	pdram_timing->tras_min = tras_tmp;
+	pdram_timing->trc = (tras_tmp + trp_tmp);
+	tmp = ((LPDDR4_TRRD * nmhz + 999) / 1000);
+	pdram_timing->trrd = max(4, tmp);
+	if (timing_config->bl == 32)
+		pdram_timing->tccd = LPDDR4_TCCD_BL16;
+	else
+		pdram_timing->tccd = LPDDR4_TCCD_BL32;
+	pdram_timing->tccdmw = 4 * pdram_timing->tccd;
+	tmp = ((LPDDR4_TWTR * nmhz + 999) / 1000);
+	pdram_timing->twtr = max(8, tmp);
+	pdram_timing->trtw =  ((LPDDR4_TRTW * nmhz + 999) / 1000);
+	pdram_timing->tras_max = ((LPDDR4_TRAS_MAX * nmhz + 999) / 1000);
+	pdram_timing->tfaw = (LPDDR4_TFAW * nmhz + 999) / 1000;
+	if (ddr_capability_per_die > 0x60000000) {
+		/* >= 12Gb */
+		pdram_timing->trfc =
+			(LPDDR4_TRFC_12GBIT * nmhz + 999) / 1000;
+		tmp = (((LPDDR4_TRFC_12GBIT + 7) * nmhz + (nmhz >> 1) +
+				999) / 1000);
+	} else if (ddr_capability_per_die > 0x30000000) {
+		pdram_timing->trfc =
+			(LPDDR4_TRFC_6GBIT * nmhz + 999) / 1000;
+		tmp = (((LPDDR4_TRFC_6GBIT + 7) * nmhz + (nmhz >> 1) +
+				999) / 1000);
+	} else {
+		pdram_timing->trfc =
+			(LPDDR4_TRFC_4GBIT * nmhz + 999) / 1000;
+		tmp = (((LPDDR4_TRFC_4GBIT + 7) * nmhz + (nmhz >> 1) +
+				999) / 1000);
+	}
+	pdram_timing->txsr = max(2, tmp);
+	pdram_timing->txsnr = max(2, tmp);
+	/* tdqsck use rounded down */
+	pdram_timing->tdqsck =  ((LPDDR4_TDQSCK_MIN * nmhz +
+				(nmhz >> 1)) / 1000);
+	pdram_timing->tdqsck_max =  ((LPDDR4_TDQSCK_MAX * nmhz +
+				(nmhz >> 1) + 999) / 1000);
+	pdram_timing->tppd = LPDDR4_TPPD;
+	/* pd and sr */
+	tmp = ((LPDDR4_TXP * nmhz + (nmhz >> 1) + 999) / 1000);
+	pdram_timing->txp = max(5, tmp);
+	tmp = ((LPDDR4_TCKE * nmhz + (nmhz >> 1) + 999) / 1000);
+	pdram_timing->tcke = max(4, tmp);
+	tmp = ((LPDDR4_TESCKE * nmhz +
+		((nmhz * 3) / 4) +
+		999) / 1000);
+	pdram_timing->tescke = max(3, tmp);
+	tmp = ((LPDDR4_TSR * nmhz + 999) / 1000);
+	pdram_timing->tsr = max(3, tmp);
+	tmp = ((LPDDR4_TCMDCKE * nmhz +
+		((nmhz * 3) / 4) +
+		999) / 1000);
+	pdram_timing->tcmdcke = max(3, tmp);
+	pdram_timing->tcscke = ((LPDDR4_TCSCKE * nmhz +
+		((nmhz * 3) / 4) +
+		999) / 1000);
+	tmp = ((LPDDR4_TCKELCS * nmhz + 999) / 1000);
+	pdram_timing->tckelcs = max(5, tmp);
+	pdram_timing->tcsckeh = ((LPDDR4_TCSCKEH * nmhz +
+		((nmhz * 3) / 4) +
+		999) / 1000);
+	tmp = ((LPDDR4_TCKEHCS * nmhz +
+		(nmhz >> 1) + 999) / 1000);
+	pdram_timing->tckehcs = max(5, tmp);
+	tmp = ((LPDDR4_TMRWCKEL * nmhz + 999) / 1000);
+	pdram_timing->tmrwckel = max(10, tmp);
+	tmp = ((LPDDR4_TCKELCMD * nmhz + (nmhz >> 1) +
+		999) / 1000);
+	pdram_timing->tckelcmd = max(3, tmp);
+	tmp = ((LPDDR4_TCKEHCMD * nmhz + (nmhz >> 1) +
+		999) / 1000);
+	pdram_timing->tckehcmd = max(3, tmp);
+	tmp = ((LPDDR4_TCKELPD * nmhz + (nmhz >> 1) +
+		999) / 1000);
+	pdram_timing->tckelpd = max(3, tmp);
+	tmp = ((LPDDR4_TCKCKEL * nmhz + (nmhz >> 1) +
+		999) / 1000);
+	pdram_timing->tckckel = max(3, tmp);
+	/* mode register timing */
+	tmp = ((LPDDR4_TMRD * nmhz + 999) / 1000);
+	pdram_timing->tmrd = max(10, tmp);
+	pdram_timing->tmrr = LPDDR4_TMRR;
+	pdram_timing->tmrri = pdram_timing->trcd + 3;
+	/* ODT */
+	pdram_timing->todton = (LPDDR4_TODTON * nmhz + (nmhz >> 1) + 999)
+				/ 1000;
+	/* ZQ */
+	pdram_timing->tzqcal = (LPDDR4_TZQCAL * nmhz + 999) / 1000;
+	tmp = ((LPDDR4_TZQLAT * nmhz + 999) / 1000);
+	pdram_timing->tzqlat = max(8, tmp);
+	tmp = ((LPDDR4_TZQRESET * nmhz + 999) / 1000);
+	pdram_timing->tzqreset = max(3, tmp);
+	tmp = ((LPDDR4_TZQCKE * nmhz +
+		((nmhz * 3) / 4) +
+		999) / 1000);
+	pdram_timing->tzqcke = max(3, tmp);
+	/* write leveling */
+	pdram_timing->twlmrd = LPDDR4_TWLMRD;
+	pdram_timing->twlo = (LPDDR4_TWLO * nmhz + 999) / 1000;
+	pdram_timing->twldqsen = LPDDR4_TWLDQSEN;
+	/* CA training */
+	pdram_timing->tcaent = (LPDDR4_TCAENT * nmhz + 999) / 1000;
+	pdram_timing->tadr = (LPDDR4_TADR * nmhz + 999) / 1000;
+	pdram_timing->tmrz = (LPDDR4_TMRZ * nmhz + (nmhz >> 1) + 999) / 1000;
+	pdram_timing->tvref_long = (LPDDR4_TVREF_LONG * nmhz + 999) / 1000;
+	pdram_timing->tvref_short = (LPDDR4_TVREF_SHORT * nmhz + 999) / 1000;
+	/* VRCG */
+	pdram_timing->tvrcg_enable = (LPDDR4_TVRCG_ENABLE * nmhz +
+					999) / 1000;
+	pdram_timing->tvrcg_disable = (LPDDR4_TVRCG_DISABLE * nmhz +
+					999) / 1000;
+	/* FSP */
+	pdram_timing->tfc_long = (LPDDR4_TFC_LONG * nmhz + 999) / 1000;
+	tmp = (LPDDR4_TCKFSPE * nmhz + (nmhz >> 1) + 999) / 1000;
+	pdram_timing->tckfspe = max(4, tmp);
+	tmp = (LPDDR4_TCKFSPX * nmhz + (nmhz >> 1) + 999) / 1000;
+	pdram_timing->tckfspx = max(4, tmp);
+}
+
+/*
+ * Description: depend on input parameter "timing_config",
+ *              and calculate correspond "dram_type"
+ *              spec timing to "pdram_timing"
+ * parameters:
+ *   input: timing_config
+ *   output: pdram_timing
+ * NOTE: MR ODT is set, need to disable by controller
+ */
+void dram_get_parameter(struct timing_related_config *timing_config,
+			struct dram_timing_t *pdram_timing)
+{
+	switch (timing_config->dram_type) {
+	case DDR3:
+		ddr3_get_parameter(timing_config, pdram_timing);
+		break;
+	case LPDDR2:
+		lpddr2_get_parameter(timing_config, pdram_timing);
+		break;
+	case LPDDR3:
+		lpddr3_get_parameter(timing_config, pdram_timing);
+		break;
+	case LPDDR4:
+		lpddr4_get_parameter(timing_config, pdram_timing);
+		break;
+	}
+}
diff --git a/plat/rockchip/rk3399/drivers/dram/dram_spec_timing.h b/plat/rockchip/rk3399/drivers/dram/dram_spec_timing.h
new file mode 100644
index 0000000..30d3aea
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dram/dram_spec_timing.h
@@ -0,0 +1,506 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _DRAM_SPEC_TIMING_HEAD_
+#define _DRAM_SPEC_TIMING_HEAD_
+#include <stdint.h>
+
+enum ddr3_speed_rate {
+	/* 5-5-5 */
+	DDR3_800D = 0,
+	/* 6-6-6 */
+	DDR3_800E = 1,
+	/* 6-6-6 */
+	DDR3_1066E = 2,
+	/* 7-7-7 */
+	DDR3_1066F = 3,
+	/* 8-8-8 */
+	DDR3_1066G = 4,
+	/* 7-7-7 */
+	DDR3_1333F = 5,
+	/* 8-8-8 */
+	DDR3_1333G = 6,
+	/* 9-9-9 */
+	DDR3_1333H = 7,
+	/* 10-10-10 */
+	DDR3_1333J = 8,
+	/* 8-8-8 */
+	DDR3_1600G = 9,
+	/* 9-9-9 */
+	DDR3_1600H = 10,
+	/* 10-10-10 */
+	DDR3_1600J = 11,
+	/* 11-11-11 */
+	DDR3_1600K = 12,
+	/* 10-10-10 */
+	DDR3_1866J = 13,
+	/* 11-11-11 */
+	DDR3_1866K = 14,
+	/* 12-12-12 */
+	DDR3_1866L = 15,
+	/* 13-13-13 */
+	DDR3_1866M = 16,
+	/* 11-11-11 */
+	DDR3_2133K = 17,
+	/* 12-12-12 */
+	DDR3_2133L = 18,
+	/* 13-13-13 */
+	DDR3_2133M = 19,
+	/* 14-14-14 */
+	DDR3_2133N = 20,
+	DDR3_DEFAULT = 21,
+};
+
+#define max(a, b)  (((a) > (b)) ? (a) : (b))
+#define range(mi, val, ma)  (((ma) > (val)) ? (max(mi, val)) : (ma))
+
+struct dram_timing_t {
+	/* unit MHz */
+	uint32_t mhz;
+	/* some timing unit is us */
+	uint32_t tinit1;
+	uint32_t tinit2;
+	uint32_t tinit3;
+	uint32_t tinit4;
+	uint32_t tinit5;
+	/* reset low, DDR3:200us */
+	uint32_t trstl;
+	/* reset high to CKE high, DDR3:500us  */
+	uint32_t trsth;
+	uint32_t trefi;
+	/* base */
+	uint32_t trcd;
+	/* trp per bank */
+	uint32_t trppb;
+	/* trp all bank */
+	uint32_t trp;
+	uint32_t twr;
+	uint32_t tdal;
+	uint32_t trtp;
+	uint32_t trc;
+	uint32_t trrd;
+	uint32_t tccd;
+	uint32_t twtr;
+	uint32_t trtw;
+	uint32_t tras_max;
+	uint32_t tras_min;
+	uint32_t tfaw;
+	uint32_t trfc;
+	uint32_t tdqsck;
+	uint32_t tdqsck_max;
+	/* pd or sr */
+	uint32_t txsr;
+	uint32_t txsnr;
+	uint32_t txp;
+	uint32_t txpdll;
+	uint32_t tdllk;
+	uint32_t tcke;
+	uint32_t tckesr;
+	uint32_t tcksre;
+	uint32_t tcksrx;
+	uint32_t tdpd;
+	/* mode regiter timing */
+	uint32_t tmod;
+	uint32_t tmrd;
+	uint32_t tmrr;
+	uint32_t tmrri;
+	/* ODT */
+	uint32_t todton;
+	/* ZQ */
+	uint32_t tzqinit;
+	uint32_t tzqcs;
+	uint32_t tzqoper;
+	uint32_t tzqreset;
+	/* Write Leveling */
+	uint32_t twlmrd;
+	uint32_t twlo;
+	uint32_t twldqsen;
+	/* CA Training */
+	uint32_t tcackel;
+	uint32_t tcaent;
+	uint32_t tcamrd;
+	uint32_t tcackeh;
+	uint32_t tcaext;
+	uint32_t tadr;
+	uint32_t tmrz;
+	uint32_t tcacd;
+	/* mode register */
+	uint32_t mr[4];
+	uint32_t mr11;
+	/* lpddr4 spec */
+	uint32_t mr12;
+	uint32_t mr13;
+	uint32_t mr14;
+	uint32_t mr16;
+	uint32_t mr17;
+	uint32_t mr20;
+	uint32_t mr22;
+	uint32_t tccdmw;
+	uint32_t tppd;
+	uint32_t tescke;
+	uint32_t tsr;
+	uint32_t tcmdcke;
+	uint32_t tcscke;
+	uint32_t tckelcs;
+	uint32_t tcsckeh;
+	uint32_t tckehcs;
+	uint32_t tmrwckel;
+	uint32_t tzqcal;
+	uint32_t tzqlat;
+	uint32_t tzqcke;
+	uint32_t tvref_long;
+	uint32_t tvref_short;
+	uint32_t tvrcg_enable;
+	uint32_t tvrcg_disable;
+	uint32_t tfc_long;
+	uint32_t tckfspe;
+	uint32_t tckfspx;
+	uint32_t tckehcmd;
+	uint32_t tckelcmd;
+	uint32_t tckelpd;
+	uint32_t tckckel;
+	/* other */
+	uint32_t al;
+	uint32_t cl;
+	uint32_t cwl;
+	uint32_t bl;
+};
+
+struct dram_info_t {
+	/* speed_rate only used when DDR3 */
+	enum ddr3_speed_rate speed_rate;
+	/* 1: use CS0, 2: use CS0 and CS1 */
+	uint32_t cs_cnt;
+	/* give the max per-die capability on each rank/cs */
+	uint32_t per_die_capability[2];
+};
+
+struct timing_related_config {
+	struct dram_info_t dram_info[2];
+	uint32_t dram_type;
+	/* MHz */
+	uint32_t freq;
+	uint32_t ch_cnt;
+	uint32_t bl;
+	/* 1:auto precharge, 0:never auto precharge */
+	uint32_t ap;
+	/*
+	 * 1:dll bypass, 0:dll normal
+	 * dram and controller dll bypass at the same time
+	 */
+	uint32_t dllbp;
+	/* 1:odt enable, 0:odt disable */
+	uint32_t odt;
+	/* 1:enable, 0:disabe */
+	uint32_t rdbi;
+	uint32_t wdbi;
+	/* dram driver strength */
+	uint32_t dramds;
+	/* dram ODT, if odt=0, this parameter invalid */
+	uint32_t dramodt;
+	/*
+	 * ca ODT, if odt=0, this parameter invalid
+	 * it only used by LPDDR4
+	 */
+	uint32_t caodt;
+};
+
+/* mr0 for ddr3 */
+#define DDR3_BL8		(0)
+#define DDR3_BC4_8		(1)
+#define DDR3_BC4		(2)
+#define DDR3_CL(n)		(((((n) - 4) & 0x7) << 4)\
+				| ((((n) - 4) & 0x8) >> 1))
+#define DDR3_WR(n)		(((n) & 0x7) << 9)
+#define DDR3_DLL_RESET		(1 << 8)
+#define DDR3_DLL_DERESET	(0 << 8)
+
+/* mr1 for ddr3 */
+#define DDR3_DLL_ENABLE		(0)
+#define DDR3_DLL_DISABLE	(1)
+#define DDR3_MR1_AL(n)		(((n) & 0x3) << 3)
+
+#define DDR3_DS_40		(0)
+#define DDR3_DS_34		(1 << 1)
+#define DDR3_RTT_NOM_DIS	(0)
+#define DDR3_RTT_NOM_60		(1 << 2)
+#define DDR3_RTT_NOM_120	(1 << 6)
+#define DDR3_RTT_NOM_40		((1 << 2) | (1 << 6))
+#define DDR3_TDQS		(1 << 11)
+
+/* mr2 for ddr3 */
+#define DDR3_MR2_CWL(n)		((((n) - 5) & 0x7) << 3)
+#define DDR3_RTT_WR_DIS		(0)
+#define DDR3_RTT_WR_60		(1 << 9)
+#define DDR3_RTT_WR_120		(2 << 9)
+
+/*
+ * MR0 (Device Information)
+ * 0:DAI complete, 1:DAI still in progress
+ */
+#define LPDDR2_DAI		(0x1)
+/* 0:S2 or S4 SDRAM, 1:NVM */
+#define LPDDR2_DI		(0x1 << 1)
+/* 0:DNV not supported, 1:DNV supported */
+#define LPDDR2_DNVI		(0x1 << 2)
+#define LPDDR2_RZQI		(0x3 << 3)
+
+/*
+ * 00:RZQ self test not supported,
+ * 01:ZQ-pin may connect to VDDCA or float
+ * 10:ZQ-pin may short to GND.
+ * 11:ZQ-pin self test completed, no error condition detected.
+ */
+
+/* MR1 (Device Feature) */
+#define LPDDR2_BL4		(0x2)
+#define LPDDR2_BL8		(0x3)
+#define LPDDR2_BL16		(0x4)
+#define LPDDR2_N_WR(n)		(((n) - 2) << 5)
+
+/* MR2 (Device Feature 2) */
+#define LPDDR2_RL3_WL1		(0x1)
+#define LPDDR2_RL4_WL2		(0x2)
+#define LPDDR2_RL5_WL2		(0x3)
+#define LPDDR2_RL6_WL3		(0x4)
+#define LPDDR2_RL7_WL4		(0x5)
+#define LPDDR2_RL8_WL4		(0x6)
+
+/* MR3 (IO Configuration 1) */
+#define LPDDR2_DS_34		(0x1)
+#define LPDDR2_DS_40		(0x2)
+#define LPDDR2_DS_48		(0x3)
+#define LPDDR2_DS_60		(0x4)
+#define LPDDR2_DS_80		(0x6)
+/* optional */
+#define LPDDR2_DS_120		(0x7)
+
+/* MR4 (Device Temperature) */
+#define LPDDR2_TREF_MASK	(0x7)
+#define LPDDR2_4_TREF		(0x1)
+#define LPDDR2_2_TREF		(0x2)
+#define LPDDR2_1_TREF		(0x3)
+#define LPDDR2_025_TREF		(0x5)
+#define LPDDR2_025_TREF_DERATE	(0x6)
+
+#define LPDDR2_TUF		(0x1 << 7)
+
+/* MR8 (Basic configuration 4) */
+#define LPDDR2_S4		(0x0)
+#define LPDDR2_S2		(0x1)
+#define LPDDR2_N		(0x2)
+/* Unit:MB */
+#define LPDDR2_DENSITY(mr8)	(8 << (((mr8) >> 2) & 0xf))
+#define LPDDR2_IO_WIDTH(mr8)	(32 >> (((mr8) >> 6) & 0x3))
+
+/* MR10 (Calibration) */
+#define LPDDR2_ZQINIT		(0xff)
+#define LPDDR2_ZQCL		(0xab)
+#define LPDDR2_ZQCS		(0x56)
+#define LPDDR2_ZQRESET		(0xc3)
+
+/* MR16 (PASR Bank Mask), S2 SDRAM Only */
+#define LPDDR2_PASR_FULL	(0x0)
+#define LPDDR2_PASR_1_2		(0x1)
+#define LPDDR2_PASR_1_4		(0x2)
+#define LPDDR2_PASR_1_8		(0x3)
+
+/*
+ * MR0 (Device Information)
+ * 0:DAI complete,
+ * 1:DAI still in progress
+ */
+#define LPDDR3_DAI		(0x1)
+/*
+ * 00:RZQ self test not supported,
+ * 01:ZQ-pin may connect to VDDCA or float
+ * 10:ZQ-pin may short to GND.
+ * 11:ZQ-pin self test completed, no error condition detected.
+ */
+#define LPDDR3_RZQI		(0x3 << 3)
+/*
+ * 0:DRAM does not support WL(Set B),
+ * 1:DRAM support WL(Set B)
+ */
+#define LPDDR3_WL_SUPOT		(1 << 6)
+/*
+ * 0:DRAM does not support RL=3,nWR=3,WL=1;
+ * 1:DRAM supports RL=3,nWR=3,WL=1 for frequencies <=166
+ */
+#define LPDDR3_RL3_SUPOT	(1 << 7)
+
+/* MR1 (Device Feature) */
+#define LPDDR3_BL8		(0x3)
+#define LPDDR3_N_WR(n)		((n) << 5)
+
+/* MR2 (Device Feature 2), WL Set A,default */
+/* <=166MHz,optional*/
+#define LPDDR3_RL3_WL1		(0x1)
+/* <=400MHz*/
+#define LPDDR3_RL6_WL3		(0x4)
+/* <=533MHz*/
+#define LPDDR3_RL8_WL4		(0x6)
+/* <=600MHz*/
+#define LPDDR3_RL9_WL5		(0x7)
+/* <=667MHz,default*/
+#define LPDDR3_RL10_WL6		(0x8)
+/* <=733MHz*/
+#define LPDDR3_RL11_WL6		(0x9)
+/* <=800MHz*/
+#define LPDDR3_RL12_WL6		(0xa)
+/* <=933MHz*/
+#define LPDDR3_RL14_WL8		(0xc)
+/* <=1066MHz*/
+#define LPDDR3_RL16_WL8		(0xe)
+
+/* WL Set B, optional */
+/* <=667MHz,default*/
+#define LPDDR3_RL10_WL8		(0x8)
+/* <=733MHz*/
+#define LPDDR3_RL11_WL9		(0x9)
+/* <=800MHz*/
+#define LPDDR3_RL12_WL9		(0xa)
+/* <=933MHz*/
+#define LPDDR3_RL14_WL11	(0xc)
+/* <=1066MHz*/
+#define LPDDR3_RL16_WL13	(0xe)
+
+/* 1:enable nWR programming > 9(default)*/
+#define LPDDR3_N_WRE		(1 << 4)
+/* 1:Select WL Set B*/
+#define LPDDR3_WL_S		(1 << 6)
+/* 1:enable*/
+#define LPDDR3_WR_LEVEL		(1 << 7)
+
+/* MR3 (IO Configuration 1) */
+#define LPDDR3_DS_34		(0x1)
+#define LPDDR3_DS_40		(0x2)
+#define LPDDR3_DS_48		(0x3)
+#define LPDDR3_DS_60		(0x4)
+#define LPDDR3_DS_80		(0x6)
+#define LPDDR3_DS_34D_40U	(0x9)
+#define LPDDR3_DS_40D_48U	(0xa)
+#define LPDDR3_DS_34D_48U	(0xb)
+
+/* MR4 (Device Temperature) */
+#define LPDDR3_TREF_MASK	(0x7)
+/* SDRAM Low temperature operating limit exceeded */
+#define LPDDR3_LT_EXED		(0x0)
+#define LPDDR3_4_TREF		(0x1)
+#define LPDDR3_2_TREF		(0x2)
+#define LPDDR3_1_TREF		(0x3)
+#define LPDDR3_05_TREF		(0x4)
+#define LPDDR3_025_TREF		(0x5)
+#define LPDDR3_025_TREF_DERATE	(0x6)
+/* SDRAM High temperature operating limit exceeded */
+#define LPDDR3_HT_EXED		(0x7)
+
+/* 1:value has changed since last read of MR4 */
+#define LPDDR3_TUF		(0x1 << 7)
+
+/* MR8 (Basic configuration 4) */
+#define LPDDR3_S8		(0x3)
+#define LPDDR3_DENSITY(mr8)	(8 << (((mr8) >> 2) & 0xf))
+#define LPDDR3_IO_WIDTH(mr8)	(32 >> (((mr8) >> 6) & 0x3))
+
+/* MR10 (Calibration) */
+#define LPDDR3_ZQINIT		(0xff)
+#define LPDDR3_ZQCL		(0xab)
+#define LPDDR3_ZQCS		(0x56)
+#define LPDDR3_ZQRESET		(0xc3)
+
+/* MR11 (ODT Control) */
+#define LPDDR3_ODT_60		(1)
+#define LPDDR3_ODT_120		(2)
+#define LPDDR3_ODT_240		(3)
+#define LPDDR3_ODT_DIS		(0)
+
+/* MR2 (Device Feature 2) */
+/* RL & nRTP for DBI-RD Disabled */
+#define LPDDR4_RL6_NRTP8	(0x0)
+#define LPDDR4_RL10_NRTP8	(0x1)
+#define LPDDR4_RL14_NRTP8	(0x2)
+#define LPDDR4_RL20_NRTP8	(0x3)
+#define LPDDR4_RL24_NRTP10	(0x4)
+#define LPDDR4_RL28_NRTP12	(0x5)
+#define LPDDR4_RL32_NRTP14	(0x6)
+#define LPDDR4_RL36_NRTP16	(0x7)
+/* RL & nRTP for DBI-RD Disabled */
+#define LPDDR4_RL12_NRTP8	(0x1)
+#define LPDDR4_RL16_NRTP8	(0x2)
+#define LPDDR4_RL22_NRTP8	(0x3)
+#define LPDDR4_RL28_NRTP10	(0x4)
+#define LPDDR4_RL32_NRTP12	(0x5)
+#define LPDDR4_RL36_NRTP14	(0x6)
+#define LPDDR4_RL40_NRTP16	(0x7)
+/* WL Set A,default */
+#define LPDDR4_A_WL4		(0x0)
+#define LPDDR4_A_WL6		(0x1)
+#define LPDDR4_A_WL8		(0x2)
+#define LPDDR4_A_WL10		(0x3)
+#define LPDDR4_A_WL12		(0x4)
+#define LPDDR4_A_WL14		(0x5)
+#define LPDDR4_A_WL16		(0x6)
+#define LPDDR4_A_WL18		(0x7)
+/* WL Set B, optional */
+#define LPDDR4_B_WL4		(0x0 << 3)
+#define LPDDR4_B_WL8		(0x1 << 3)
+#define LPDDR4_B_WL12		(0x2 << 3)
+#define LPDDR4_B_WL18		(0x3 << 3)
+#define LPDDR4_B_WL22		(0x4 << 3)
+#define LPDDR4_B_WL26		(0x5 << 3)
+#define LPDDR4_B_WL30		(0x6 << 3)
+#define LPDDR4_B_WL34		(0x7 << 3)
+/* 1:Select WL Set B*/
+#define LPDDR4_WL_B		(1 << 6)
+/* 1:enable*/
+#define LPDDR4_WR_LEVEL		(1 << 7)
+
+/* MR3 */
+#define LPDDR4_VDDQ_2_5		(0)
+#define LPDDR4_VDDQ_3		(1)
+#define LPDDR4_WRPST_0_5_TCK	(0 << 1)
+#define LPDDR4_WRPST_1_5_TCK	(1 << 1)
+#define LPDDR4_PPR_EN		(1 << 2)
+/* PDDS */
+#define LPDDR4_PDDS_240		(0x1 << 3)
+#define LPDDR4_PDDS_120		(0x2 << 3)
+#define LPDDR4_PDDS_80		(0x3 << 3)
+#define LPDDR4_PDDS_60		(0x4 << 3)
+#define LPDDR4_PDDS_48		(0x5 << 3)
+#define LPDDR4_PDDS_40		(0x6 << 3)
+#define LPDDR4_DBI_RD_EN	(1 << 6)
+#define LPDDR4_DBI_WR_EN	(1 << 7)
+
+/* MR11 (ODT Control) */
+#define LPDDR4_DQODT_240	(1)
+#define LPDDR4_DQODT_120	(2)
+#define LPDDR4_DQODT_80		(3)
+#define LPDDR4_DQODT_60		(4)
+#define LPDDR4_DQODT_48		(5)
+#define LPDDR4_DQODT_40		(6)
+#define LPDDR4_DQODT_DIS	(0)
+#define LPDDR4_CAODT_240	(1 << 4)
+#define LPDDR4_CAODT_120	(2 << 4)
+#define LPDDR4_CAODT_80		(3 << 4)
+#define LPDDR4_CAODT_60		(4 << 4)
+#define LPDDR4_CAODT_48		(5 << 4)
+#define LPDDR4_CAODT_40		(6 << 4)
+#define LPDDR4_CAODT_DIS	(0 << 4)
+
+/*
+ * Description: depend on input parameter "timing_config",
+ *		and calculate correspond "dram_type"
+ *		spec timing to "pdram_timing"
+ * parameters:
+ *   input: timing_config
+ *   output: pdram_timing
+ * NOTE: MR ODT is set, need to disable by controller
+ */
+void dram_get_parameter(struct timing_related_config *timing_config,
+			struct dram_timing_t *pdram_timing);
+
+#endif /* _DRAM_SPEC_TIMING_HEAD_ */
diff --git a/plat/rockchip/rk3399/drivers/dram/suspend.c b/plat/rockchip/rk3399/drivers/dram/suspend.c
new file mode 100644
index 0000000..f66150a
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dram/suspend.c
@@ -0,0 +1,772 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <dram.h>
+#include <plat_private.h>
+#include <platform_def.h>
+#include <pmu_regs.h>
+#include <rk3399_def.h>
+#include <secure.h>
+#include <soc.h>
+#include <suspend.h>
+
+#define PMUGRF_OS_REG0			0x300
+#define PMUGRF_OS_REG1			0x304
+#define PMUGRF_OS_REG2			0x308
+#define PMUGRF_OS_REG3			0x30c
+
+#define CRU_SFTRST_DDR_CTRL(ch, n)	((0x1 << (8 + 16 + (ch) * 4)) | \
+					 ((n) << (8 + (ch) * 4)))
+#define CRU_SFTRST_DDR_PHY(ch, n)	((0x1 << (9 + 16 + (ch) * 4)) | \
+					 ((n) << (9 + (ch) * 4)))
+
+#define FBDIV_ENC(n)			((n) << 16)
+#define FBDIV_DEC(n)			(((n) >> 16) & 0xfff)
+#define POSTDIV2_ENC(n)			((n) << 12)
+#define POSTDIV2_DEC(n)			(((n) >> 12) & 0x7)
+#define POSTDIV1_ENC(n)			((n) << 8)
+#define POSTDIV1_DEC(n)			(((n) >> 8) & 0x7)
+#define REFDIV_ENC(n)			(n)
+#define REFDIV_DEC(n)			((n) & 0x3f)
+
+/* PMU CRU */
+#define PMUCRU_RSTNHOLD_CON0		0x120
+#define PMUCRU_RSTNHOLD_CON1		0x124
+
+#define PRESET_GPIO0_HOLD(n)		(((n) << 7) | WMSK_BIT(7))
+#define PRESET_GPIO1_HOLD(n)		(((n) << 8) | WMSK_BIT(8))
+
+#define SYS_COUNTER_FREQ_IN_MHZ		(SYS_COUNTER_FREQ_IN_TICKS / 1000000)
+
+__pmusramdata uint32_t dpll_data[PLL_CON_COUNT];
+__pmusramdata uint32_t cru_clksel_con6;
+
+/*
+ * Copy @num registers from @src to @dst
+ */
+static __pmusramfunc void sram_regcpy(uintptr_t dst, uintptr_t src,
+		uint32_t num)
+{
+	while (num--) {
+		mmio_write_32(dst, mmio_read_32(src));
+		dst += sizeof(uint32_t);
+		src += sizeof(uint32_t);
+	}
+}
+
+/*
+ * Copy @num registers from @src to @dst
+ * This is intentionally a copy of the sram_regcpy function. PMUSRAM functions
+ * cannot be called from code running in DRAM.
+ */
+static void dram_regcpy(uintptr_t dst, uintptr_t src, uint32_t num)
+{
+	while (num--) {
+		mmio_write_32(dst, mmio_read_32(src));
+		dst += sizeof(uint32_t);
+		src += sizeof(uint32_t);
+	}
+}
+
+static __pmusramfunc uint32_t sram_get_timer_value(void)
+{
+	/*
+	 * Generic delay timer implementation expects the timer to be a down
+	 * counter. We apply bitwise NOT operator to the tick values returned
+	 * by read_cntpct_el0() to simulate the down counter.
+	 */
+	return (uint32_t)(~read_cntpct_el0());
+}
+
+static __pmusramfunc void sram_udelay(uint32_t usec)
+{
+	uint32_t start, cnt, delta, delta_us;
+
+	/* counter is decreasing */
+	start = sram_get_timer_value();
+	do {
+		cnt = sram_get_timer_value();
+		if (cnt > start) {
+			delta = UINT32_MAX - cnt;
+			delta += start;
+		} else
+			delta = start - cnt;
+		delta_us = (delta * SYS_COUNTER_FREQ_IN_MHZ);
+	} while (delta_us < usec);
+}
+
+static __pmusramfunc void configure_sgrf(void)
+{
+	/*
+	 * SGRF_DDR_RGN_DPLL_CLK and SGRF_DDR_RGN_RTC_CLK:
+	 * IC ECO bug, need to set this register.
+	 *
+	 * SGRF_DDR_RGN_BYPS:
+	 * After the PD_CENTER suspend/resume, the DDR region
+	 * related registers in the SGRF will be reset, we
+	 * need to re-initialize them.
+	 */
+	mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(16),
+		      SGRF_DDR_RGN_DPLL_CLK |
+		      SGRF_DDR_RGN_RTC_CLK |
+		      SGRF_DDR_RGN_BYPS);
+}
+
+static __pmusramfunc void rkclk_ddr_reset(uint32_t channel, uint32_t ctl,
+		uint32_t phy)
+{
+	channel &= 0x1;
+	ctl &= 0x1;
+	phy &= 0x1;
+	mmio_write_32(CRU_BASE + CRU_SOFTRST_CON(4),
+		      CRU_SFTRST_DDR_CTRL(channel, ctl) |
+		      CRU_SFTRST_DDR_PHY(channel, phy));
+}
+
+static __pmusramfunc void phy_pctrl_reset(uint32_t ch)
+{
+	rkclk_ddr_reset(ch, 1, 1);
+	sram_udelay(10);
+	rkclk_ddr_reset(ch, 1, 0);
+	sram_udelay(10);
+	rkclk_ddr_reset(ch, 0, 0);
+	sram_udelay(10);
+}
+
+static __pmusramfunc void set_cs_training_index(uint32_t ch, uint32_t rank)
+{
+	uint32_t byte;
+
+	/* PHY_8/136/264/392 phy_per_cs_training_index_X 1bit offset_24 */
+	for (byte = 0; byte < 4; byte++)
+		mmio_clrsetbits_32(PHY_REG(ch, 8 + (128 * byte)), 0x1 << 24,
+				   rank << 24);
+}
+
+static __pmusramfunc void select_per_cs_training_index(uint32_t ch,
+		uint32_t rank)
+{
+	/* PHY_84 PHY_PER_CS_TRAINING_EN_0 1bit offset_16 */
+	if ((mmio_read_32(PHY_REG(ch, 84)) >> 16) & 1)
+		set_cs_training_index(ch, rank);
+}
+
+static __pmusramfunc void override_write_leveling_value(uint32_t ch)
+{
+	uint32_t byte;
+
+	for (byte = 0; byte < 4; byte++) {
+		/*
+		 * PHY_8/136/264/392
+		 * phy_per_cs_training_multicast_en_X 1bit offset_16
+		 */
+		mmio_clrsetbits_32(PHY_REG(ch, 8 + (128 * byte)), 0x1 << 16,
+				   1 << 16);
+		mmio_clrsetbits_32(PHY_REG(ch, 63 + (128 * byte)),
+				   0xffff << 16,
+				   0x200 << 16);
+	}
+
+	/* CTL_200 ctrlupd_req 1bit offset_8 */
+	mmio_clrsetbits_32(CTL_REG(ch, 200), 0x1 << 8, 0x1 << 8);
+}
+
+static __pmusramfunc int data_training(uint32_t ch,
+		struct rk3399_sdram_params *sdram_params,
+		uint32_t training_flag)
+{
+	uint32_t obs_0, obs_1, obs_2, obs_3, obs_err = 0;
+	uint32_t rank = sdram_params->ch[ch].rank;
+	uint32_t rank_mask;
+	uint32_t i, tmp;
+
+	if (sdram_params->dramtype == LPDDR4)
+		rank_mask = (rank == 1) ? 0x5 : 0xf;
+	else
+		rank_mask = (rank == 1) ? 0x1 : 0x3;
+
+	/* PHY_927 PHY_PAD_DQS_DRIVE  RPULL offset_22 */
+	mmio_setbits_32(PHY_REG(ch, 927), (1 << 22));
+
+	if (training_flag == PI_FULL_TRAINING) {
+		if (sdram_params->dramtype == LPDDR4) {
+			training_flag = PI_WRITE_LEVELING |
+					PI_READ_GATE_TRAINING |
+					PI_READ_LEVELING |
+					PI_WDQ_LEVELING;
+		} else if (sdram_params->dramtype == LPDDR3) {
+			training_flag = PI_CA_TRAINING | PI_WRITE_LEVELING |
+					PI_READ_GATE_TRAINING;
+		} else if (sdram_params->dramtype == DDR3) {
+			training_flag = PI_WRITE_LEVELING |
+					PI_READ_GATE_TRAINING |
+					PI_READ_LEVELING;
+		}
+	}
+
+	/* ca training(LPDDR4,LPDDR3 support) */
+	if ((training_flag & PI_CA_TRAINING) == PI_CA_TRAINING) {
+		for (i = 0; i < 4; i++) {
+			if (!(rank_mask & (1 << i)))
+				continue;
+
+			select_per_cs_training_index(ch, i);
+			/* PI_100 PI_CALVL_EN:RW:8:2 */
+			mmio_clrsetbits_32(PI_REG(ch, 100), 0x3 << 8, 0x2 << 8);
+
+			/* PI_92 PI_CALVL_REQ:WR:16:1,PI_CALVL_CS:RW:24:2 */
+			mmio_clrsetbits_32(PI_REG(ch, 92),
+					   (0x1 << 16) | (0x3 << 24),
+					   (0x1 << 16) | (i << 24));
+			while (1) {
+				/* PI_174 PI_INT_STATUS:RD:8:18 */
+				tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
+
+				/*
+				 * check status obs
+				 * PHY_532/660/788 phy_adr_calvl_obs1_:0:32
+				 */
+				obs_0 = mmio_read_32(PHY_REG(ch, 532));
+				obs_1 = mmio_read_32(PHY_REG(ch, 660));
+				obs_2 = mmio_read_32(PHY_REG(ch, 788));
+				if (((obs_0 >> 30) & 0x3) ||
+				    ((obs_1 >> 30) & 0x3) ||
+				    ((obs_2 >> 30) & 0x3))
+					obs_err = 1;
+				if ((((tmp >> 11) & 0x1) == 0x1) &&
+				    (((tmp >> 13) & 0x1) == 0x1) &&
+				    (((tmp >> 5) & 0x1) == 0x0) &&
+				    (obs_err == 0))
+					break;
+				else if ((((tmp >> 5) & 0x1) == 0x1) ||
+					 (obs_err == 1))
+					return -1;
+			}
+			/* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
+			mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
+		}
+		mmio_clrbits_32(PI_REG(ch, 100), 0x3 << 8);
+	}
+
+	/* write leveling(LPDDR4,LPDDR3,DDR3 support) */
+	if ((training_flag & PI_WRITE_LEVELING) == PI_WRITE_LEVELING) {
+		for (i = 0; i < rank; i++) {
+			select_per_cs_training_index(ch, i);
+			/* PI_60 PI_WRLVL_EN:RW:8:2 */
+			mmio_clrsetbits_32(PI_REG(ch, 60), 0x3 << 8, 0x2 << 8);
+			/* PI_59 PI_WRLVL_REQ:WR:8:1,PI_WRLVL_CS:RW:16:2 */
+			mmio_clrsetbits_32(PI_REG(ch, 59),
+					   (0x1 << 8) | (0x3 << 16),
+					   (0x1 << 8) | (i << 16));
+
+			while (1) {
+				/* PI_174 PI_INT_STATUS:RD:8:18 */
+				tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
+
+				/*
+				 * check status obs, if error maybe can not
+				 * get leveling done PHY_40/168/296/424
+				 * phy_wrlvl_status_obs_X:0:13
+				 */
+				obs_0 = mmio_read_32(PHY_REG(ch, 40));
+				obs_1 = mmio_read_32(PHY_REG(ch, 168));
+				obs_2 = mmio_read_32(PHY_REG(ch, 296));
+				obs_3 = mmio_read_32(PHY_REG(ch, 424));
+				if (((obs_0 >> 12) & 0x1) ||
+				    ((obs_1 >> 12) & 0x1) ||
+				    ((obs_2 >> 12) & 0x1) ||
+				    ((obs_3 >> 12) & 0x1))
+					obs_err = 1;
+				if ((((tmp >> 10) & 0x1) == 0x1) &&
+				    (((tmp >> 13) & 0x1) == 0x1) &&
+				    (((tmp >> 4) & 0x1) == 0x0) &&
+				    (obs_err == 0))
+					break;
+				else if ((((tmp >> 4) & 0x1) == 0x1) ||
+					 (obs_err == 1))
+					return -1;
+			}
+
+			/* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
+			mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
+		}
+		override_write_leveling_value(ch);
+		mmio_clrbits_32(PI_REG(ch, 60), 0x3 << 8);
+	}
+
+	/* read gate training(LPDDR4,LPDDR3,DDR3 support) */
+	if ((training_flag & PI_READ_GATE_TRAINING) == PI_READ_GATE_TRAINING) {
+		for (i = 0; i < rank; i++) {
+			select_per_cs_training_index(ch, i);
+			/* PI_80 PI_RDLVL_GATE_EN:RW:24:2 */
+			mmio_clrsetbits_32(PI_REG(ch, 80), 0x3 << 24,
+					   0x2 << 24);
+			/*
+			 * PI_74 PI_RDLVL_GATE_REQ:WR:16:1
+			 * PI_RDLVL_CS:RW:24:2
+			 */
+			mmio_clrsetbits_32(PI_REG(ch, 74),
+					   (0x1 << 16) | (0x3 << 24),
+					   (0x1 << 16) | (i << 24));
+
+			while (1) {
+				/* PI_174 PI_INT_STATUS:RD:8:18 */
+				tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
+
+				/*
+				 * check status obs
+				 * PHY_43/171/299/427
+				 *     PHY_GTLVL_STATUS_OBS_x:16:8
+				 */
+				obs_0 = mmio_read_32(PHY_REG(ch, 43));
+				obs_1 = mmio_read_32(PHY_REG(ch, 171));
+				obs_2 = mmio_read_32(PHY_REG(ch, 299));
+				obs_3 = mmio_read_32(PHY_REG(ch, 427));
+				if (((obs_0 >> (16 + 6)) & 0x3) ||
+				    ((obs_1 >> (16 + 6)) & 0x3) ||
+				    ((obs_2 >> (16 + 6)) & 0x3) ||
+				    ((obs_3 >> (16 + 6)) & 0x3))
+					obs_err = 1;
+				if ((((tmp >> 9) & 0x1) == 0x1) &&
+				    (((tmp >> 13) & 0x1) == 0x1) &&
+				    (((tmp >> 3) & 0x1) == 0x0) &&
+				    (obs_err == 0))
+					break;
+				else if ((((tmp >> 3) & 0x1) == 0x1) ||
+					 (obs_err == 1))
+					return -1;
+			}
+			/* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
+			mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
+		}
+		mmio_clrbits_32(PI_REG(ch, 80), 0x3 << 24);
+	}
+
+	/* read leveling(LPDDR4,LPDDR3,DDR3 support) */
+	if ((training_flag & PI_READ_LEVELING) == PI_READ_LEVELING) {
+		for (i = 0; i < rank; i++) {
+			select_per_cs_training_index(ch, i);
+			/* PI_80 PI_RDLVL_EN:RW:16:2 */
+			mmio_clrsetbits_32(PI_REG(ch, 80), 0x3 << 16,
+					   0x2 << 16);
+			/* PI_74 PI_RDLVL_REQ:WR:8:1,PI_RDLVL_CS:RW:24:2 */
+			mmio_clrsetbits_32(PI_REG(ch, 74),
+					   (0x1 << 8) | (0x3 << 24),
+					   (0x1 << 8) | (i << 24));
+			while (1) {
+				/* PI_174 PI_INT_STATUS:RD:8:18 */
+				tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
+
+				/*
+				 * make sure status obs not report error bit
+				 * PHY_46/174/302/430
+				 *     phy_rdlvl_status_obs_X:16:8
+				 */
+				if ((((tmp >> 8) & 0x1) == 0x1) &&
+				    (((tmp >> 13) & 0x1) == 0x1) &&
+				    (((tmp >> 2) & 0x1) == 0x0))
+					break;
+				else if (((tmp >> 2) & 0x1) == 0x1)
+					return -1;
+			}
+			/* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
+			mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
+		}
+		mmio_clrbits_32(PI_REG(ch, 80), 0x3 << 16);
+	}
+
+	/* wdq leveling(LPDDR4 support) */
+	if ((training_flag & PI_WDQ_LEVELING) == PI_WDQ_LEVELING) {
+		for (i = 0; i < 4; i++) {
+			if (!(rank_mask & (1 << i)))
+				continue;
+
+			select_per_cs_training_index(ch, i);
+			/*
+			 * disable PI_WDQLVL_VREF_EN before wdq leveling?
+			 * PI_181 PI_WDQLVL_VREF_EN:RW:8:1
+			 */
+			mmio_clrbits_32(PI_REG(ch, 181), 0x1 << 8);
+			/* PI_124 PI_WDQLVL_EN:RW:16:2 */
+			mmio_clrsetbits_32(PI_REG(ch, 124), 0x3 << 16,
+					   0x2 << 16);
+			/* PI_121 PI_WDQLVL_REQ:WR:8:1,PI_WDQLVL_CS:RW:16:2 */
+			mmio_clrsetbits_32(PI_REG(ch, 121),
+					   (0x1 << 8) | (0x3 << 16),
+					   (0x1 << 8) | (i << 16));
+			while (1) {
+				/* PI_174 PI_INT_STATUS:RD:8:18 */
+				tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
+				if ((((tmp >> 12) & 0x1) == 0x1) &&
+				    (((tmp >> 13) & 0x1) == 0x1) &&
+				    (((tmp >> 6) & 0x1) == 0x0))
+					break;
+				else if (((tmp >> 6) & 0x1) == 0x1)
+					return -1;
+			}
+			/* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
+			mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
+		}
+		mmio_clrbits_32(PI_REG(ch, 124), 0x3 << 16);
+	}
+
+	/* PHY_927 PHY_PAD_DQS_DRIVE  RPULL offset_22 */
+	mmio_clrbits_32(PHY_REG(ch, 927), (1 << 22));
+
+	return 0;
+}
+
+static __pmusramfunc void set_ddrconfig(
+		struct rk3399_sdram_params *sdram_params,
+		unsigned char channel, uint32_t ddrconfig)
+{
+	/* only need to set ddrconfig */
+	struct rk3399_sdram_channel *ch = &sdram_params->ch[channel];
+	unsigned int cs0_cap = 0;
+	unsigned int cs1_cap = 0;
+
+	cs0_cap = (1 << (ch->cs0_row + ch->col + ch->bk + ch->bw - 20));
+	if (ch->rank > 1)
+		cs1_cap = cs0_cap >> (ch->cs0_row - ch->cs1_row);
+	if (ch->row_3_4) {
+		cs0_cap = cs0_cap * 3 / 4;
+		cs1_cap = cs1_cap * 3 / 4;
+	}
+
+	mmio_write_32(MSCH_BASE(channel) + MSCH_DEVICECONF,
+		      ddrconfig | (ddrconfig << 6));
+	mmio_write_32(MSCH_BASE(channel) + MSCH_DEVICESIZE,
+		      ((cs0_cap / 32) & 0xff) | (((cs1_cap / 32) & 0xff) << 8));
+}
+
+static __pmusramfunc void dram_all_config(
+		struct rk3399_sdram_params *sdram_params)
+{
+	unsigned int i;
+
+	for (i = 0; i < 2; i++) {
+		struct rk3399_sdram_channel *info = &sdram_params->ch[i];
+		struct rk3399_msch_timings *noc = &info->noc_timings;
+
+		if (sdram_params->ch[i].col == 0)
+			continue;
+
+		mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGA0,
+			      noc->ddrtiminga0.d32);
+		mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGB0,
+			      noc->ddrtimingb0.d32);
+		mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGC0,
+			      noc->ddrtimingc0.d32);
+		mmio_write_32(MSCH_BASE(i) + MSCH_DEVTODEV0,
+			      noc->devtodev0.d32);
+		mmio_write_32(MSCH_BASE(i) + MSCH_DDRMODE, noc->ddrmode.d32);
+
+		/* rank 1 memory clock disable (dfi_dram_clk_disable = 1) */
+		if (sdram_params->ch[i].rank == 1)
+			mmio_setbits_32(CTL_REG(i, 276), 1 << 17);
+	}
+
+	DDR_STRIDE(sdram_params->stride);
+
+	/* reboot hold register set */
+	mmio_write_32(PMUCRU_BASE + CRU_PMU_RSTHOLD_CON(1),
+		      CRU_PMU_SGRF_RST_RLS |
+		      PRESET_GPIO0_HOLD(1) |
+		      PRESET_GPIO1_HOLD(1));
+	mmio_clrsetbits_32(CRU_BASE + CRU_GLB_RST_CON, 0x3, 0x3);
+}
+
+static __pmusramfunc void pctl_cfg(uint32_t ch,
+		struct rk3399_sdram_params *sdram_params)
+{
+	const uint32_t *params_ctl = sdram_params->pctl_regs.denali_ctl;
+	const uint32_t *params_pi = sdram_params->pi_regs.denali_pi;
+	const struct rk3399_ddr_publ_regs *phy_regs = &sdram_params->phy_regs;
+	uint32_t tmp, tmp1, tmp2, i;
+
+	/*
+	 * Workaround controller bug:
+	 * Do not program DRAM_CLASS until NO_PHY_IND_TRAIN_INT is programmed
+	 */
+	sram_regcpy(CTL_REG(ch, 1), (uintptr_t)&params_ctl[1],
+		    CTL_REG_NUM - 1);
+	mmio_write_32(CTL_REG(ch, 0), params_ctl[0]);
+	sram_regcpy(PI_REG(ch, 0), (uintptr_t)&params_pi[0],
+		    PI_REG_NUM);
+
+	sram_regcpy(PHY_REG(ch, 910), (uintptr_t)&phy_regs->phy896[910 - 896],
+		    3);
+
+	mmio_clrsetbits_32(CTL_REG(ch, 68), PWRUP_SREFRESH_EXIT,
+				PWRUP_SREFRESH_EXIT);
+
+	/* PHY_DLL_RST_EN */
+	mmio_clrsetbits_32(PHY_REG(ch, 957), 0x3 << 24, 1 << 24);
+	dmbst();
+
+	mmio_setbits_32(PI_REG(ch, 0), START);
+	mmio_setbits_32(CTL_REG(ch, 0), START);
+
+	/* wait lock */
+	while (1) {
+		tmp = mmio_read_32(PHY_REG(ch, 920));
+		tmp1 = mmio_read_32(PHY_REG(ch, 921));
+		tmp2 = mmio_read_32(PHY_REG(ch, 922));
+		if ((((tmp >> 16) & 0x1) == 0x1) &&
+		     (((tmp1 >> 16) & 0x1) == 0x1) &&
+		     (((tmp1 >> 0) & 0x1) == 0x1) &&
+		     (((tmp2 >> 0) & 0x1) == 0x1))
+			break;
+		/* if PLL bypass,don't need wait lock */
+		if (mmio_read_32(PHY_REG(ch, 911)) & 0x1)
+			break;
+	}
+
+	sram_regcpy(PHY_REG(ch, 896), (uintptr_t)&phy_regs->phy896[0], 63);
+
+	for (i = 0; i < 4; i++)
+		sram_regcpy(PHY_REG(ch, 128 * i),
+			    (uintptr_t)&phy_regs->phy0[0], 91);
+
+	for (i = 0; i < 3; i++)
+		sram_regcpy(PHY_REG(ch, 512 + 128 * i),
+				(uintptr_t)&phy_regs->phy512[i][0], 38);
+}
+
+static __pmusramfunc int dram_switch_to_next_index(
+		struct rk3399_sdram_params *sdram_params)
+{
+	uint32_t ch, ch_count;
+	uint32_t fn = ((mmio_read_32(CTL_REG(0, 111)) >> 16) + 1) & 0x1;
+
+	mmio_write_32(CIC_BASE + CIC_CTRL0,
+		      (((0x3 << 4) | (1 << 2) | 1) << 16) |
+		      (fn << 4) | (1 << 2) | 1);
+	while (!(mmio_read_32(CIC_BASE + CIC_STATUS0) & (1 << 2)))
+		;
+
+	mmio_write_32(CIC_BASE + CIC_CTRL0, 0x20002);
+	while (!(mmio_read_32(CIC_BASE + CIC_STATUS0) & (1 << 0)))
+		;
+
+	ch_count = sdram_params->num_channels;
+
+	/* LPDDR4 f2 cann't do training, all training will fail */
+	for (ch = 0; ch < ch_count; ch++) {
+		mmio_clrsetbits_32(PHY_REG(ch, 896), (0x3 << 8) | 1,
+				   fn << 8);
+
+		/* data_training failed */
+		if (data_training(ch, sdram_params, PI_FULL_TRAINING))
+			return -1;
+	}
+
+	return 0;
+}
+
+/*
+ * Needs to be done for both channels at once in case of a shared reset signal
+ * between channels.
+ */
+static __pmusramfunc int pctl_start(uint32_t channel_mask,
+		struct rk3399_sdram_params *sdram_params)
+{
+	uint32_t count;
+	uint32_t byte;
+
+	mmio_setbits_32(CTL_REG(0, 68), PWRUP_SREFRESH_EXIT);
+	mmio_setbits_32(CTL_REG(1, 68), PWRUP_SREFRESH_EXIT);
+
+	/* need de-access IO retention before controller START */
+	if (channel_mask & (1 << 0))
+		mmio_setbits_32(PMU_BASE + PMU_PWRMODE_CON, (1 << 19));
+	if (channel_mask & (1 << 1))
+		mmio_setbits_32(PMU_BASE + PMU_PWRMODE_CON, (1 << 23));
+
+	/* PHY_DLL_RST_EN */
+	if (channel_mask & (1 << 0))
+		mmio_clrsetbits_32(PHY_REG(0, 957), 0x3 << 24,
+				   0x2 << 24);
+	if (channel_mask & (1 << 1))
+		mmio_clrsetbits_32(PHY_REG(1, 957), 0x3 << 24,
+				   0x2 << 24);
+
+	/* check ERROR bit */
+	if (channel_mask & (1 << 0)) {
+		count = 0;
+		while (!(mmio_read_32(CTL_REG(0, 203)) & (1 << 3))) {
+			/* CKE is low, loop 10ms */
+			if (count > 100)
+				return -1;
+
+			sram_udelay(100);
+			count++;
+		}
+
+		mmio_clrbits_32(CTL_REG(0, 68), PWRUP_SREFRESH_EXIT);
+
+		/* Restore the PHY_RX_CAL_DQS value */
+		for (byte = 0; byte < 4; byte++)
+			mmio_clrsetbits_32(PHY_REG(0, 57 + 128 * byte),
+					   0xfff << 16,
+					   sdram_params->rx_cal_dqs[0][byte]);
+	}
+	if (channel_mask & (1 << 1)) {
+		count = 0;
+		while (!(mmio_read_32(CTL_REG(1, 203)) & (1 << 3))) {
+			/* CKE is low, loop 10ms */
+			if (count > 100)
+				return -1;
+
+			sram_udelay(100);
+			count++;
+		}
+
+		mmio_clrbits_32(CTL_REG(1, 68), PWRUP_SREFRESH_EXIT);
+
+		/* Restore the PHY_RX_CAL_DQS value */
+		for (byte = 0; byte < 4; byte++)
+			mmio_clrsetbits_32(PHY_REG(1, 57 + 128 * byte),
+					   0xfff << 16,
+					   sdram_params->rx_cal_dqs[1][byte]);
+	}
+
+	return 0;
+}
+
+__pmusramfunc static void pmusram_restore_pll(int pll_id, uint32_t *src)
+{
+	mmio_write_32((CRU_BASE + CRU_PLL_CON(pll_id, 3)), PLL_SLOW_MODE);
+
+	mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 0), src[0] | REG_SOC_WMSK);
+	mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 1), src[1] | REG_SOC_WMSK);
+	mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 2), src[2]);
+	mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 4), src[4] | REG_SOC_WMSK);
+	mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 5), src[5] | REG_SOC_WMSK);
+
+	mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), src[3] | REG_SOC_WMSK);
+
+	while ((mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 2)) &
+		(1 << 31)) == 0x0)
+		;
+}
+
+void dmc_suspend(void)
+{
+	struct rk3399_sdram_params *sdram_params = &sdram_config;
+	struct rk3399_ddr_publ_regs *phy_regs;
+	uint32_t *params_ctl;
+	uint32_t *params_pi;
+	uint32_t refdiv, postdiv2, postdiv1, fbdiv;
+	uint32_t ch, byte, i;
+
+	phy_regs = &sdram_params->phy_regs;
+	params_ctl = sdram_params->pctl_regs.denali_ctl;
+	params_pi = sdram_params->pi_regs.denali_pi;
+
+	/* save dpll register and ddr clock register value to pmusram */
+	cru_clksel_con6 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON6);
+	for (i = 0; i < PLL_CON_COUNT; i++)
+		dpll_data[i] = mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, i));
+
+	fbdiv = dpll_data[0] & 0xfff;
+	postdiv2 = POSTDIV2_DEC(dpll_data[1]);
+	postdiv1 = POSTDIV1_DEC(dpll_data[1]);
+	refdiv = REFDIV_DEC(dpll_data[1]);
+
+	sdram_params->ddr_freq = ((fbdiv * 24) /
+				(refdiv * postdiv1 * postdiv2)) * MHz;
+
+	INFO("sdram_params->ddr_freq = %d\n", sdram_params->ddr_freq);
+	sdram_params->odt = (((mmio_read_32(PHY_REG(0, 5)) >> 16) &
+			       0x7) != 0) ? 1 : 0;
+
+	/* copy the registers CTL PI and PHY */
+	dram_regcpy((uintptr_t)&params_ctl[0], CTL_REG(0, 0), CTL_REG_NUM);
+
+	/* mask DENALI_CTL_00_DATA.START, only copy here, will trigger later */
+	params_ctl[0] &= ~(0x1 << 0);
+
+	dram_regcpy((uintptr_t)&params_pi[0], PI_REG(0, 0),
+		    PI_REG_NUM);
+
+	/* mask DENALI_PI_00_DATA.START, only copy here, will trigger later*/
+	params_pi[0] &= ~(0x1 << 0);
+
+	dram_regcpy((uintptr_t)&phy_regs->phy0[0],
+			    PHY_REG(0, 0), 91);
+
+	for (i = 0; i < 3; i++)
+		dram_regcpy((uintptr_t)&phy_regs->phy512[i][0],
+			    PHY_REG(0, 512 + 128 * i), 38);
+
+	dram_regcpy((uintptr_t)&phy_regs->phy896[0], PHY_REG(0, 896), 63);
+
+	for (ch = 0; ch < sdram_params->num_channels; ch++) {
+		for (byte = 0; byte < 4; byte++)
+			sdram_params->rx_cal_dqs[ch][byte] = (0xfff << 16) &
+				mmio_read_32(PHY_REG(ch, 57 + byte * 128));
+	}
+
+	/* set DENALI_PHY_957_DATA.PHY_DLL_RST_EN = 0x1 */
+	phy_regs->phy896[957 - 896] &= ~(0x3 << 24);
+	phy_regs->phy896[957 - 896] |= 1 << 24;
+	phy_regs->phy896[0] |= 1;
+	phy_regs->phy896[0] &= ~(0x3 << 8);
+}
+
+__pmusramfunc void dmc_resume(void)
+{
+	struct rk3399_sdram_params *sdram_params = &sdram_config;
+	uint32_t channel_mask = 0;
+	uint32_t channel;
+
+	sram_secure_timer_init();
+
+	/*
+	 * we switch ddr clock to abpll when suspend,
+	 * we set back to dpll here
+	 */
+	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON6,
+			cru_clksel_con6 | REG_SOC_WMSK);
+	pmusram_restore_pll(DPLL_ID, dpll_data);
+
+	configure_sgrf();
+
+retry:
+	for (channel = 0; channel < sdram_params->num_channels; channel++) {
+		phy_pctrl_reset(channel);
+		pctl_cfg(channel, sdram_params);
+	}
+
+	for (channel = 0; channel < 2; channel++) {
+		if (sdram_params->ch[channel].col)
+			channel_mask |= 1 << channel;
+	}
+
+	if (pctl_start(channel_mask, sdram_params) < 0)
+		goto retry;
+
+	for (channel = 0; channel < sdram_params->num_channels; channel++) {
+		/* LPDDR2/LPDDR3 need to wait DAI complete, max 10us */
+		if (sdram_params->dramtype == LPDDR3)
+			sram_udelay(10);
+
+		/* If traning fail, retry to do it again. */
+		if (data_training(channel, sdram_params, PI_FULL_TRAINING))
+			goto retry;
+
+		set_ddrconfig(sdram_params, channel,
+			      sdram_params->ch[channel].ddrconfig);
+	}
+
+	dram_all_config(sdram_params);
+
+	/* Switch to index 1 and prepare for DDR frequency switch. */
+	dram_switch_to_next_index(sdram_params);
+}
diff --git a/plat/rockchip/rk3399/drivers/dram/suspend.h b/plat/rockchip/rk3399/drivers/dram/suspend.h
new file mode 100644
index 0000000..a8a8641
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/dram/suspend.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SOC_ROCKCHIP_RK3399_SUSPEND_H__
+#define __SOC_ROCKCHIP_RK3399_SUSPEND_H__
+#include <dram.h>
+
+#define KHz (1000)
+#define MHz (1000 * KHz)
+#define GHz (1000 * MHz)
+
+#define PI_CA_TRAINING		(1 << 0)
+#define PI_WRITE_LEVELING	(1 << 1)
+#define PI_READ_GATE_TRAINING	(1 << 2)
+#define PI_READ_LEVELING	(1 << 3)
+#define PI_WDQ_LEVELING		(1 << 4)
+#define PI_FULL_TRAINING	(0xff)
+
+void dmc_suspend(void);
+__pmusramfunc void dmc_resume(void);
+
+#endif /* __DRAM_H__ */
diff --git a/plat/rockchip/rk3399/drivers/gpio/rk3399_gpio.c b/plat/rockchip/rk3399/drivers/gpio/rk3399_gpio.c
new file mode 100644
index 0000000..e74c4d9
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/gpio/rk3399_gpio.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <assert.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <errno.h>
+#include <gpio.h>
+#include <mmio.h>
+#include <plat_private.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <soc.h>
+
+uint32_t gpio_port[] = {
+	GPIO0_BASE,
+	GPIO1_BASE,
+	GPIO2_BASE,
+	GPIO3_BASE,
+	GPIO4_BASE,
+};
+
+struct {
+	uint32_t swporta_dr;
+	uint32_t swporta_ddr;
+	uint32_t inten;
+	uint32_t intmask;
+	uint32_t inttype_level;
+	uint32_t int_polarity;
+	uint32_t debounce;
+	uint32_t ls_sync;
+} store_gpio[3];
+
+static uint32_t store_grf_gpio[(GRF_GPIO2D_HE - GRF_GPIO2A_IOMUX) / 4 + 1];
+
+#define SWPORTA_DR	0x00
+#define SWPORTA_DDR	0x04
+#define INTEN		0x30
+#define INTMASK		0x34
+#define INTTYPE_LEVEL	0x38
+#define INT_POLARITY	0x3c
+#define DEBOUNCE	0x48
+#define LS_SYNC		0x60
+
+#define EXT_PORTA	0x50
+#define PMU_GPIO_PORT0	0
+#define PMU_GPIO_PORT1	1
+#define GPIO_PORT2	2
+#define GPIO_PORT3	3
+#define GPIO_PORT4	4
+
+#define PMU_GRF_GPIO0A_P	0x40
+#define GRF_GPIO2A_P		0xe040
+#define GPIO_P_MASK		0x03
+
+#define GET_GPIO_PORT(pin)	(pin / 32)
+#define GET_GPIO_NUM(pin)	(pin % 32)
+#define GET_GPIO_BANK(pin)	((pin % 32) / 8)
+#define GET_GPIO_ID(pin)	((pin % 32) % 8)
+
+/* returns old clock state, enables clock, in order to do GPIO access */
+static int gpio_get_clock(uint32_t gpio_number)
+{
+	uint32_t port = GET_GPIO_PORT(gpio_number);
+	uint32_t clock_state = 0;
+
+	assert(port < 5);
+
+	switch (port) {
+	case PMU_GPIO_PORT0:
+		clock_state = (mmio_read_32(PMUCRU_BASE +
+					    CRU_PMU_CLKGATE_CON(1)) >>
+					    PCLK_GPIO0_GATE_SHIFT) & 0x01;
+		mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKGATE_CON(1),
+			      BITS_WITH_WMASK(0, CLK_GATE_MASK,
+					      PCLK_GPIO0_GATE_SHIFT));
+		break;
+	case PMU_GPIO_PORT1:
+		clock_state = (mmio_read_32(PMUCRU_BASE +
+					    CRU_PMU_CLKGATE_CON(1)) >>
+					    PCLK_GPIO1_GATE_SHIFT) & 0x01;
+		mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKGATE_CON(1),
+			      BITS_WITH_WMASK(0, CLK_GATE_MASK,
+					      PCLK_GPIO1_GATE_SHIFT));
+		break;
+	case GPIO_PORT2:
+		clock_state = (mmio_read_32(CRU_BASE +
+					    CRU_CLKGATE_CON(31)) >>
+					    PCLK_GPIO2_GATE_SHIFT) & 0x01;
+		mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31),
+			      BITS_WITH_WMASK(0, CLK_GATE_MASK,
+					      PCLK_GPIO2_GATE_SHIFT));
+		break;
+	case GPIO_PORT3:
+		clock_state = (mmio_read_32(CRU_BASE +
+					    CRU_CLKGATE_CON(31)) >>
+					    PCLK_GPIO3_GATE_SHIFT) & 0x01;
+		mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31),
+			      BITS_WITH_WMASK(0, CLK_GATE_MASK,
+					      PCLK_GPIO3_GATE_SHIFT));
+		break;
+	case GPIO_PORT4:
+		clock_state = (mmio_read_32(CRU_BASE +
+					    CRU_CLKGATE_CON(31)) >>
+					    PCLK_GPIO4_GATE_SHIFT) & 0x01;
+		mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31),
+			      BITS_WITH_WMASK(0, CLK_GATE_MASK,
+					      PCLK_GPIO4_GATE_SHIFT));
+		break;
+	default:
+		break;
+	}
+
+	return clock_state;
+}
+
+/* restores old state of gpio clock */
+void gpio_put_clock(uint32_t gpio_number, uint32_t clock_state)
+{
+	uint32_t port = GET_GPIO_PORT(gpio_number);
+
+	switch (port) {
+	case PMU_GPIO_PORT0:
+		mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKGATE_CON(1),
+			      BITS_WITH_WMASK(clock_state, CLK_GATE_MASK,
+					      PCLK_GPIO0_GATE_SHIFT));
+		break;
+	case PMU_GPIO_PORT1:
+		mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKGATE_CON(1),
+			      BITS_WITH_WMASK(clock_state, CLK_GATE_MASK,
+					      PCLK_GPIO1_GATE_SHIFT));
+		break;
+	case GPIO_PORT2:
+		mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31),
+			      BITS_WITH_WMASK(clock_state, CLK_GATE_MASK,
+					      PCLK_GPIO2_GATE_SHIFT));
+		break;
+	case GPIO_PORT3:
+		mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31),
+			      BITS_WITH_WMASK(clock_state, CLK_GATE_MASK,
+					      PCLK_GPIO3_GATE_SHIFT));
+
+		break;
+	case GPIO_PORT4:
+		mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31),
+			      BITS_WITH_WMASK(clock_state, CLK_GATE_MASK,
+					      PCLK_GPIO4_GATE_SHIFT));
+		break;
+	default:
+		break;
+	}
+}
+
+static int get_pull(int gpio)
+{
+	uint32_t port = GET_GPIO_PORT(gpio);
+	uint32_t bank = GET_GPIO_BANK(gpio);
+	uint32_t id = GET_GPIO_ID(gpio);
+	uint32_t val, clock_state;
+
+	assert((port < 5) && (bank < 4));
+
+	clock_state = gpio_get_clock(gpio);
+
+	if (port == PMU_GPIO_PORT0 || port == PMU_GPIO_PORT1) {
+		val = mmio_read_32(PMUGRF_BASE + PMU_GRF_GPIO0A_P +
+				   port * 16 + bank * 4);
+		val = (val >> (id * 2)) & GPIO_P_MASK;
+	} else {
+		val = mmio_read_32(GRF_BASE + GRF_GPIO2A_P +
+				   (port - 2) * 16 + bank * 4);
+		val = (val >> (id * 2)) & GPIO_P_MASK;
+	}
+	gpio_put_clock(gpio, clock_state);
+
+	/*
+	 * in gpio0a, gpio0b, gpio2c, gpio2d,
+	 * 00: Z
+	 * 01: pull down
+	 * 10: Z
+	 * 11: pull up
+	 * different with other gpio, so need to correct it
+	 */
+	if (((port == 0) && (bank < 2)) || ((port == 2) && (bank > 1))) {
+		if (val == 3)
+			val = GPIO_PULL_UP;
+		else if (val == 1)
+			val = GPIO_PULL_DOWN;
+		else
+			val = 0;
+	}
+
+	return val;
+}
+
+static void set_pull(int gpio, int pull)
+{
+	uint32_t port = GET_GPIO_PORT(gpio);
+	uint32_t bank = GET_GPIO_BANK(gpio);
+	uint32_t id = GET_GPIO_ID(gpio);
+	uint32_t clock_state;
+
+	assert((port < 5) && (bank < 4));
+
+	clock_state = gpio_get_clock(gpio);
+
+	/*
+	 * in gpio0a, gpio0b, gpio2c, gpio2d,
+	 * 00: Z
+	 * 01: pull down
+	 * 10: Z
+	 * 11: pull up
+	 * different with other gpio, so need to correct it
+	 */
+	if (((port == 0) && (bank < 2)) || ((port == 2) && (bank > 1))) {
+		if (pull == GPIO_PULL_UP)
+			pull = 3;
+		else if (pull == GPIO_PULL_DOWN)
+			pull = 1;
+		else
+			pull = 0;
+	}
+
+	if (port == PMU_GPIO_PORT0 || port == PMU_GPIO_PORT1) {
+		mmio_write_32(PMUGRF_BASE + PMU_GRF_GPIO0A_P +
+			      port * 16 + bank * 4,
+			      BITS_WITH_WMASK(pull, GPIO_P_MASK, id * 2));
+	} else {
+		mmio_write_32(GRF_BASE + GRF_GPIO2A_P +
+			      (port - 2) * 16 + bank * 4,
+			      BITS_WITH_WMASK(pull, GPIO_P_MASK, id * 2));
+	}
+	gpio_put_clock(gpio, clock_state);
+}
+
+static void set_direction(int gpio, int direction)
+{
+	uint32_t port = GET_GPIO_PORT(gpio);
+	uint32_t num = GET_GPIO_NUM(gpio);
+	uint32_t clock_state;
+
+	assert((port < 5) && (num < 32));
+
+	clock_state = gpio_get_clock(gpio);
+
+	/*
+	 * in gpio.h
+	 * #define GPIO_DIR_OUT	0
+	 * #define GPIO_DIR_IN	1
+	 * but rk3399 gpio direction 1: output, 0: input
+	 * so need to revert direction value
+	 */
+	mmio_setbits_32(gpio_port[port] + SWPORTA_DDR, !direction << num);
+	gpio_put_clock(gpio, clock_state);
+}
+
+static int get_direction(int gpio)
+{
+	uint32_t port = GET_GPIO_PORT(gpio);
+	uint32_t num = GET_GPIO_NUM(gpio);
+	int direction, clock_state;
+
+	assert((port < 5) && (num < 32));
+
+	clock_state = gpio_get_clock(gpio);
+
+	/*
+	 * in gpio.h
+	 * #define GPIO_DIR_OUT	0
+	 * #define GPIO_DIR_IN	1
+	 * but rk3399 gpio direction 1: output, 0: input
+	 * so need to revert direction value
+	 */
+	direction = !((mmio_read_32(gpio_port[port] +
+				    SWPORTA_DDR) >> num) & 0x1);
+	gpio_put_clock(gpio, clock_state);
+
+	return direction;
+}
+
+static int get_value(int gpio)
+{
+	uint32_t port = GET_GPIO_PORT(gpio);
+	uint32_t num = GET_GPIO_NUM(gpio);
+	int value, clock_state;
+
+	assert((port < 5) && (num < 32));
+
+	clock_state = gpio_get_clock(gpio);
+	value = (mmio_read_32(gpio_port[port] + EXT_PORTA) >> num) & 0x1;
+	gpio_put_clock(gpio, clock_state);
+
+	return value;
+}
+
+static void set_value(int gpio, int value)
+{
+	uint32_t port = GET_GPIO_PORT(gpio);
+	uint32_t num = GET_GPIO_NUM(gpio);
+	uint32_t clock_state;
+
+	assert((port < 5) && (num < 32));
+
+	clock_state = gpio_get_clock(gpio);
+	mmio_clrsetbits_32(gpio_port[port] + SWPORTA_DR, 1 << num,
+							 !!value << num);
+	gpio_put_clock(gpio, clock_state);
+}
+
+void plat_rockchip_save_gpio(void)
+{
+	int i;
+	uint32_t cru_gate_save;
+
+	cru_gate_save = mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(31));
+
+	/*
+	 * when shutdown logic, we need to save gpio2 ~ gpio4 register,
+	 * we need to enable gpio2 ~ gpio4 clock here, since it may be gating,
+	 * and we do not care gpio0 and gpio1 clock gate, since we never
+	 * gating them
+	 */
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31),
+		      BITS_WITH_WMASK(0, 0x07, PCLK_GPIO2_GATE_SHIFT));
+
+	/*
+	 * since gpio0, gpio1 are pmugpio, they will keep ther value
+	 * when shutdown logic power rail, so only need to save gpio2 ~ gpio4
+	 * register value
+	 */
+	for (i = 2; i < 5; i++) {
+		store_gpio[i - 2].swporta_dr =
+			mmio_read_32(gpio_port[i] + SWPORTA_DR);
+		store_gpio[i - 2].swporta_ddr =
+			mmio_read_32(gpio_port[i] + SWPORTA_DDR);
+		store_gpio[i - 2].inten =
+			mmio_read_32(gpio_port[i] + INTEN);
+		store_gpio[i - 2].intmask =
+			mmio_read_32(gpio_port[i] + INTMASK);
+		store_gpio[i - 2].inttype_level =
+			mmio_read_32(gpio_port[i] + INTTYPE_LEVEL);
+		store_gpio[i - 2].int_polarity =
+			mmio_read_32(gpio_port[i] + INT_POLARITY);
+		store_gpio[i - 2].debounce =
+			mmio_read_32(gpio_port[i] + DEBOUNCE);
+		store_gpio[i - 2].ls_sync =
+			mmio_read_32(gpio_port[i] + LS_SYNC);
+	}
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31),
+			cru_gate_save | REG_SOC_WMSK);
+
+	/*
+	 * gpio0, gpio1 in pmuiomux, they will keep ther value
+	 * when shutdown logic power rail, so only need to save gpio2 ~ gpio4
+	 * iomux register value
+	 */
+	for (i = 0; i < ARRAY_SIZE(store_grf_gpio); i++)
+		store_grf_gpio[i] =
+			mmio_read_32(GRF_BASE + GRF_GPIO2A_IOMUX + i * 4);
+}
+
+void plat_rockchip_restore_gpio(void)
+{
+	int i;
+	uint32_t cru_gate_save;
+
+	for (i = 0; i < ARRAY_SIZE(store_grf_gpio); i++)
+		mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX + i * 4,
+		      REG_SOC_WMSK | store_grf_gpio[i]);
+
+	cru_gate_save = mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(31));
+
+	/*
+	 * when shutdown logic, we need to save gpio2 ~ gpio4 register,
+	 * we need to enable gpio2 ~ gpio4 clock here, since it may be gating,
+	 * and we do not care gpio0 and gpio1 clock gate, since we never
+	 * gating them
+	 */
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31),
+		      BITS_WITH_WMASK(0, 0x07, PCLK_GPIO2_GATE_SHIFT));
+
+	for (i = 2; i < 5; i++) {
+		mmio_write_32(gpio_port[i] + SWPORTA_DR,
+				store_gpio[i - 2].swporta_dr);
+		mmio_write_32(gpio_port[i] + SWPORTA_DDR,
+				store_gpio[i - 2].swporta_ddr);
+		mmio_write_32(gpio_port[i] + INTEN, store_gpio[i - 2].inten);
+		mmio_write_32(gpio_port[i] + INTMASK,
+				store_gpio[i - 2].intmask);
+		mmio_write_32(gpio_port[i] + INTTYPE_LEVEL,
+				store_gpio[i - 2].inttype_level);
+		mmio_write_32(gpio_port[i] + INT_POLARITY,
+				store_gpio[i - 2].int_polarity);
+		mmio_write_32(gpio_port[i] + DEBOUNCE,
+				store_gpio[i - 2].debounce);
+		mmio_write_32(gpio_port[i] + LS_SYNC,
+				store_gpio[i - 2].ls_sync);
+	}
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31),
+			cru_gate_save | REG_SOC_WMSK);
+}
+
+const gpio_ops_t rk3399_gpio_ops = {
+	.get_direction = get_direction,
+	.set_direction = set_direction,
+	.get_value = get_value,
+	.set_value = set_value,
+	.set_pull = set_pull,
+	.get_pull = get_pull,
+};
+
+void plat_rockchip_gpio_init(void)
+{
+	gpio_init(&rk3399_gpio_ops);
+}
diff --git a/plat/rockchip/rk3399/drivers/m0/Makefile b/plat/rockchip/rk3399/drivers/m0/Makefile
new file mode 100644
index 0000000..f6bdbf2
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/m0/Makefile
@@ -0,0 +1,109 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Cross Compile
+M0_CROSS_COMPILE ?= arm-none-eabi-
+
+# Build architecture
+ARCH		:= cortex-m0
+
+# Build platform
+PLAT_M0		?= rk3399m0
+
+ifeq (${V},0)
+	Q=@
+else
+	Q=
+endif
+export Q
+
+.SUFFIXES:
+
+INCLUDES		+= -Iinclude/ \
+			   -I../../include/shared/
+
+# NOTE: Add C source files here
+C_SOURCES		:= src/startup.c \
+			   src/main.c	\
+			   src/suspend.c \
+			   src/dram.c	\
+			   src/stopwatch.c
+
+# Flags definition
+COMMON_FLAGS		:= -g -mcpu=$(ARCH) -mthumb -Wall -O3 -nostdlib -mfloat-abi=soft
+CFLAGS			:= -ffunction-sections -fdata-sections -fomit-frame-pointer -fno-common
+ASFLAGS			:= -Wa,--gdwarf-2
+LDFLAGS			:= -Wl,--gc-sections -Wl,--build-id=none
+
+# Cross tool
+CC			:= ${M0_CROSS_COMPILE}gcc
+CPP			:= ${M0_CROSS_COMPILE}cpp
+AR			:= ${M0_CROSS_COMPILE}ar
+OC			:= ${M0_CROSS_COMPILE}objcopy
+OD			:= ${M0_CROSS_COMPILE}objdump
+NM			:= ${M0_CROSS_COMPILE}nm
+
+# NOTE: The line continuation '\' is required in the next define otherwise we
+# end up with a line-feed characer at the end of the last c filename.
+# Also bare this issue in mind if extending the list of supported filetypes.
+define SOURCES_TO_OBJS
+	$(notdir $(patsubst %.c,%.o,$(filter %.c,$(1)))) \
+	$(notdir $(patsubst %.S,%.o,$(filter %.S,$(1))))
+endef
+
+SOURCES 		:= $(C_SOURCES)
+OBJS 			:= $(addprefix $(BUILD)/,$(call SOURCES_TO_OBJS,$(SOURCES)))
+LINKERFILE		:= $(BUILD)/$(PLAT_M0).ld
+MAPFILE			:= $(BUILD)/$(PLAT_M0).map
+ELF 			:= $(BUILD)/$(PLAT_M0).elf
+BIN 			:= $(BUILD)/$(PLAT_M0).bin
+LINKERFILE_SRC		:= src/$(PLAT_M0).ld.S
+
+# Function definition related compilation
+define MAKE_C
+$(eval OBJ := $(1)/$(patsubst %.c,%.o,$(notdir $(2))))
+-include $(patsubst %.o,%.d,$(OBJ))
+
+$(OBJ) : $(2)
+	@echo "  CC      $$<"
+	$$(Q)$$(CC) $$(COMMON_FLAGS) $$(CFLAGS) $$(INCLUDES) -MMD -MT $$@ -c $$< -o $$@
+endef
+
+define MAKE_S
+$(eval OBJ := $(1)/$(patsubst %.S,%.o,$(notdir $(2))))
+
+$(OBJ) : $(2)
+	@echo "  AS      $$<"
+	$$(Q)$$(CC) -x assembler-with-cpp $$(COMMON_FLAGS) $$(ASFLAGS) -c $$< -o $$@
+endef
+
+define MAKE_OBJS
+	$(eval C_OBJS := $(filter %.c,$(2)))
+	$(eval REMAIN := $(filter-out %.c,$(2)))
+	$(eval $(foreach obj,$(C_OBJS),$(call MAKE_C,$(1),$(obj),$(3))))
+
+	$(eval S_OBJS := $(filter %.S,$(REMAIN)))
+	$(eval REMAIN := $(filter-out %.S,$(REMAIN)))
+	$(eval $(foreach obj,$(S_OBJS),$(call MAKE_S,$(1),$(obj),$(3))))
+
+	$(and $(REMAIN),$(error Unexpected source files present: $(REMAIN)))
+endef
+
+.DEFAULT_GOAL := $(BIN)
+
+$(LINKERFILE): $(LINKERFILE_SRC)
+	$(CC) $(COMMON_FLAGS) $(INCLUDES) -P -E -D__LINKER__ -MMD -MF $@.d -MT $@ -o $@ $<
+-include $(LINKERFILE).d
+
+$(ELF) : $(OBJS) $(LINKERFILE)
+	@echo "  LD      $@"
+	$(Q)$(CC) -o $@ $(COMMON_FLAGS) $(LDFLAGS) -Wl,-Map=$(MAPFILE) -Wl,-T$(LINKERFILE) $(OBJS)
+
+$(BIN) : $(ELF)
+	@echo "  BIN     $@"
+	$(Q)$(OC) -O binary $< $@
+
+$(eval $(call MAKE_OBJS,$(BUILD),$(SOURCES),$(1)))
diff --git a/plat/rockchip/rk3399/drivers/m0/include/addressmap.h b/plat/rockchip/rk3399/drivers/m0/include/addressmap.h
new file mode 100644
index 0000000..5635e79
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/m0/include/addressmap.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ROCKCHIP_RK3399_M0_INCLUDE_SHARED_ADDRESSMAP_H__
+#define __ROCKCHIP_RK3399_M0_INCLUDE_SHARED_ADDRESSMAP_H__
+
+#include <addressmap_shared.h>
+
+/* Registers base address for M0 */
+#define MMIO_BASE			0x40000000
+
+#endif /* __ROCKCHIP_RK3399_M0_INCLUDE_SHARED_ADDRESSMAP_H__ */
diff --git a/plat/rockchip/rk3399/drivers/m0/include/rk3399_mcu.h b/plat/rockchip/rk3399/drivers/m0/include/rk3399_mcu.h
new file mode 100644
index 0000000..472cbc9
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/m0/include/rk3399_mcu.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __RK3399_MCU_H__
+#define __RK3399_MCU_H__
+
+#include <addressmap.h>
+
+typedef unsigned int uint32_t;
+
+#define mmio_read_32(c)	({unsigned int __v = \
+				(*(volatile unsigned int *)(c)); __v; })
+#define mmio_write_32(c, v)	((*(volatile unsigned int *)(c)) = (v))
+
+#define mmio_clrbits_32(addr, clear) \
+		mmio_write_32(addr, (mmio_read_32(addr) & ~(clear)))
+#define mmio_setbits_32(addr, set) \
+		mmio_write_32(addr, (mmio_read_32(addr)) | (set))
+#define mmio_clrsetbits_32(addr, clear, set) \
+		mmio_write_32(addr, (mmio_read_32(addr) & ~(clear)) | (set))
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+void handle_suspend(void);
+void handle_dram(void);
+void stopwatch_init_usecs_expire(unsigned int usecs);
+int stopwatch_expired(void);
+void stopwatch_reset(void);
+
+#endif /* __RK3399_MCU_H__ */
diff --git a/plat/rockchip/rk3399/drivers/m0/src/dram.c b/plat/rockchip/rk3399/drivers/m0/src/dram.c
new file mode 100644
index 0000000..c6a9259
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/m0/src/dram.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <dram_regs.h>
+#include <m0_param.h>
+#include <pmu_bits.h>
+#include <pmu_regs.h>
+#include "misc_regs.h"
+#include "rk3399_mcu.h"
+
+static uint32_t gatedis_con0;
+
+static void idle_port(void)
+{
+	gatedis_con0 = mmio_read_32(PMUCRU_BASE + PMU_CRU_GATEDIS_CON0);
+	mmio_write_32(PMUCRU_BASE + PMU_CRU_GATEDIS_CON0, 0x3fffffff);
+
+	mmio_setbits_32(PMU_BASE + PMU_BUS_IDLE_REQ,
+			(1 << PMU_IDLE_REQ_MSCH0) | (1 << PMU_IDLE_REQ_MSCH1));
+	while ((mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) &
+		((1 << PMU_IDLE_ST_MSCH1) | (1 << PMU_IDLE_ST_MSCH0))) !=
+		((1 << PMU_IDLE_ST_MSCH1) | (1 << PMU_IDLE_ST_MSCH0)))
+		continue;
+}
+
+static void deidle_port(void)
+{
+	mmio_clrbits_32(PMU_BASE + PMU_BUS_IDLE_REQ,
+			(1 << PMU_IDLE_REQ_MSCH0) | (1 << PMU_IDLE_REQ_MSCH1));
+	while (mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) &
+	       ((1 << PMU_IDLE_ST_MSCH1) | (1 << PMU_IDLE_ST_MSCH0)))
+		continue;
+
+	/* document is wrong, PMU_CRU_GATEDIS_CON0 do not need set MASK BIT */
+	mmio_write_32(PMUCRU_BASE + PMU_CRU_GATEDIS_CON0, gatedis_con0);
+}
+
+static void ddr_set_pll(void)
+{
+	mmio_write_32(CRU_BASE + CRU_DPLL_CON3, PLL_MODE(PLL_SLOW_MODE));
+
+	mmio_write_32(CRU_BASE + CRU_DPLL_CON3, PLL_POWER_DOWN(1));
+	mmio_write_32(CRU_BASE + CRU_DPLL_CON0,
+		      mmio_read_32(PARAM_ADDR + PARAM_DPLL_CON0));
+	mmio_write_32(CRU_BASE + CRU_DPLL_CON1,
+		      mmio_read_32(PARAM_ADDR + PARAM_DPLL_CON1));
+	mmio_write_32(CRU_BASE + CRU_DPLL_CON3, PLL_POWER_DOWN(0));
+
+	while ((mmio_read_32(CRU_BASE + CRU_DPLL_CON2) & (1u << 31)) == 0)
+		continue;
+
+	mmio_write_32(CRU_BASE + CRU_DPLL_CON3, PLL_MODE(PLL_NORMAL_MODE));
+}
+
+void handle_dram(void)
+{
+	mmio_setbits_32(PHY_REG(0, 927), (1 << 22));
+	mmio_setbits_32(PHY_REG(1, 927), (1 << 22));
+	idle_port();
+
+	mmio_write_32(CIC_BASE + CIC_CTRL0,
+		      (((0x3 << 4) | (1 << 2) | 1) << 16) |
+		      (1 << 2) | 1 |
+		      mmio_read_32(PARAM_ADDR + PARAM_FREQ_SELECT));
+	while ((mmio_read_32(CIC_BASE + CIC_STATUS0) & (1 << 2)) == 0)
+		continue;
+
+	ddr_set_pll();
+	mmio_write_32(CIC_BASE + CIC_CTRL0, 0x20002);
+	while ((mmio_read_32(CIC_BASE + CIC_STATUS0) & (1 << 0)) == 0)
+		continue;
+
+	deidle_port();
+	mmio_clrbits_32(PHY_REG(0, 927), (1 << 22));
+	mmio_clrbits_32(PHY_REG(1, 927), (1 << 22));
+}
diff --git a/plat/rockchip/rk3399/drivers/m0/src/main.c b/plat/rockchip/rk3399/drivers/m0/src/main.c
new file mode 100644
index 0000000..0ed818d
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/m0/src/main.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <m0_param.h>
+#include "rk3399_mcu.h"
+
+__attribute__((noreturn)) void main(void)
+{
+	switch (mmio_read_32(PARAM_ADDR + PARAM_M0_FUNC)) {
+	case M0_FUNC_SUSPEND:
+		handle_suspend();
+		break;
+	case M0_FUNC_DRAM:
+		handle_dram();
+		break;
+	default:
+		break;
+	}
+
+	mmio_write_32(PARAM_ADDR + PARAM_M0_DONE, M0_DONE_FLAG);
+
+	for (;;)
+		__asm__ volatile ("wfi");
+}
diff --git a/plat/rockchip/rk3399/drivers/m0/src/rk3399m0.ld.S b/plat/rockchip/rk3399/drivers/m0/src/rk3399m0.ld.S
new file mode 100644
index 0000000..bfe054e
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/m0/src/rk3399m0.ld.S
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <m0_param.h>
+
+OUTPUT_FORMAT("elf32-littlearm")
+
+SECTIONS {
+	.m0_bin 0 : {
+		KEEP(*(.isr_vector))
+		ASSERT(. == 0xc0, "ISR vector has the wrong size.");
+		ASSERT(. == PARAM_ADDR, "M0 params should go right behind ISR table.");
+		. += PARAM_M0_SIZE;
+		*(.text*)
+		*(.rodata*)
+		*(.data*)
+		*(.bss*)
+		. = ALIGN(8);
+		*(.co_stack*)
+	}
+
+	/DISCARD/ : { *(.comment) *(.note*) }
+}
diff --git a/plat/rockchip/rk3399/drivers/m0/src/startup.c b/plat/rockchip/rk3399/drivers/m0/src/startup.c
new file mode 100644
index 0000000..68f5b2d
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/m0/src/startup.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "rk3399_mcu.h"
+
+/* Stack configuration */
+#define STACK_SIZE	0x00000100
+__attribute__ ((section(".co_stack")))
+unsigned long pstack[STACK_SIZE];
+
+/* Macro definition */
+#define WEAK __attribute__ ((weak))
+
+/* System exception vector handler */
+__attribute__ ((used))
+void WEAK reset_handler(void);
+void WEAK nmi_handler(void);
+void WEAK hardware_fault_handler(void);
+void WEAK svc_handler(void);
+void WEAK pend_sv_handler(void);
+void WEAK systick_handler(void);
+
+extern int main(void);
+
+/* Function prototypes */
+static void default_reset_handler(void);
+static void default_handler(void);
+
+/*
+ * The minimal vector table for a Cortex M3.  Note that the proper constructs
+ * must be placed on this to ensure that it ends up at physical address
+ * 0x00000000.
+ */
+__attribute__ ((used, section(".isr_vector")))
+void (* const g_pfnVectors[])(void) = {
+	/* core Exceptions */
+	(void *)&pstack[STACK_SIZE], /* the initial stack pointer */
+	reset_handler,
+	nmi_handler,
+	hardware_fault_handler,
+	0, 0, 0, 0, 0, 0, 0,
+	svc_handler,
+	0, 0,
+	pend_sv_handler,
+	systick_handler,
+
+	/* external exceptions */
+	0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0
+};
+
+/**
+ * This is the code that gets called when the processor first
+ * starts execution following a reset event. Only the absolutely
+ * necessary set is performed, after which the application
+ * supplied main() routine is called.
+ */
+static void default_reset_handler(void)
+{
+	/* call the application's entry point */
+	main();
+}
+
+/**
+ * Provide weak aliases for each Exception handler to the Default_Handler.
+ * As they are weak aliases, any function with the same name will override
+ * this definition.
+ */
+#pragma weak reset_handler = default_reset_handler
+#pragma weak nmi_handler = default_handler
+#pragma weak hardware_fault_handler = default_handler
+#pragma weak svc_handler = default_handler
+#pragma weak pend_sv_handler = default_handler
+#pragma weak systick_handler = default_handler
+
+/**
+ * This is the code that gets called when the processor receives
+ * an unexpected interrupt.  This simply enters an infinite loop,
+ * preserving the system state for examination by a debugger.
+ */
+static void default_handler(void)
+{
+    /* go into an infinite loop. */
+	while (1)
+		;
+}
diff --git a/plat/rockchip/rk3399/drivers/m0/src/stopwatch.c b/plat/rockchip/rk3399/drivers/m0/src/stopwatch.c
new file mode 100644
index 0000000..5af8caa
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/m0/src/stopwatch.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <m0_param.h>
+#include "rk3399_mcu.h"
+
+/* use 24MHz SysTick */
+#define US_TO_CYCLE(US)	(US * 24)
+
+#define SYST_CST	0xe000e010
+/* enable counter */
+#define ENABLE		(1 << 0)
+/* count down to 0 does not cause SysTick exception to pend */
+#define TICKINT		(1 << 1)
+/* core clock used for SysTick */
+#define CLKSOURCE	(1 << 2)
+
+#define COUNTFLAG	(1 << 16)
+#define SYST_RVR	0xe000e014
+#define MAX_VALUE	0xffffff
+#define MAX_USECS	(MAX_VALUE / US_TO_CYCLE(1))
+#define SYST_CVR	0xe000e018
+#define SYST_CALIB	0xe000e01c
+
+unsigned int remaining_usecs;
+
+static inline void stopwatch_set_usecs(void)
+{
+	unsigned int cycle;
+	unsigned int usecs = MIN(MAX_USECS, remaining_usecs);
+
+	remaining_usecs -= usecs;
+	cycle = US_TO_CYCLE(usecs);
+	mmio_write_32(SYST_RVR, cycle);
+	mmio_write_32(SYST_CVR, 0);
+
+	mmio_write_32(SYST_CST, ENABLE | TICKINT | CLKSOURCE);
+}
+
+void stopwatch_init_usecs_expire(unsigned int usecs)
+{
+	/*
+	 * Enter an inifite loop if the stopwatch is in use. This will allow the
+	 * state to be analyzed with a debugger.
+	 */
+	if (mmio_read_32(SYST_CST) & ENABLE)
+		while (1)
+			;
+
+	remaining_usecs = usecs;
+	stopwatch_set_usecs();
+}
+
+int stopwatch_expired(void)
+{
+	int val = mmio_read_32(SYST_CST);
+	if ((val & COUNTFLAG) || !(val & ENABLE)) {
+		if (!remaining_usecs)
+			return 1;
+
+		stopwatch_set_usecs();
+	}
+
+	return 0;
+}
+
+void stopwatch_reset(void)
+{
+	mmio_clrbits_32(SYST_CST, ENABLE);
+	remaining_usecs = 0;
+}
diff --git a/plat/rockchip/rk3399/drivers/m0/src/suspend.c b/plat/rockchip/rk3399/drivers/m0/src/suspend.c
new file mode 100644
index 0000000..af29a11
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/m0/src/suspend.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <pmu_regs.h>
+#include "rk3399_mcu.h"
+
+#define M0_SCR			0xe000ed10  /* System Control Register (SCR) */
+
+#define SCR_SLEEPDEEP_SHIFT	(1 << 2)
+
+void handle_suspend(void)
+{
+	unsigned int status_value;
+
+	while (1) {
+		status_value = mmio_read_32(PMU_BASE + PMU_POWER_ST);
+		if (status_value) {
+			mmio_clrbits_32(PMU_BASE + PMU_PWRMODE_CON, 0x01);
+			return;
+		}
+	}
+
+	/* m0 enter deep sleep mode */
+	mmio_setbits_32(M0_SCR, SCR_SLEEPDEEP_SHIFT);
+}
diff --git a/plat/rockchip/rk3399/drivers/pmu/m0_ctl.c b/plat/rockchip/rk3399/drivers/pmu/m0_ctl.c
new file mode 100644
index 0000000..61849e5
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/pmu/m0_ctl.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <m0_ctl.h>
+#include <mmio.h>
+#include <plat_private.h>
+#include <rk3399_def.h>
+#include <secure.h>
+#include <soc.h>
+
+void m0_init(void)
+{
+	/* secure config for M0 */
+	mmio_write_32(SGRF_BASE + SGRF_PMU_CON(0), WMSK_BIT(7));
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(6), WMSK_BIT(12));
+
+	/* set the execute address for M0 */
+	mmio_write_32(SGRF_BASE + SGRF_PMU_CON(3),
+		      BITS_WITH_WMASK((M0_BINCODE_BASE >> 12) & 0xffff,
+				      0xffff, 0));
+	mmio_write_32(SGRF_BASE + SGRF_PMU_CON(7),
+		      BITS_WITH_WMASK((M0_BINCODE_BASE >> 28) & 0xf,
+				      0xf, 0));
+
+	/* document is wrong, PMU_CRU_GATEDIS_CON0 do not need set MASK BIT */
+	mmio_setbits_32(PMUCRU_BASE + PMUCRU_GATEDIS_CON0, 0x02);
+
+	/*
+	 * To switch the parent to xin24M and div == 1,
+	 *
+	 * We need to close most of the PLLs and clocks except the OSC 24MHz
+	 * durning suspend, and this should be enough to supplies the ddrfreq,
+	 * For the simple handle, we just keep the fixed 24MHz to supply the
+	 * suspend and ddrfreq directly.
+	 */
+	mmio_write_32(PMUCRU_BASE + PMUCRU_CLKSEL_CON0,
+		      BIT_WITH_WMSK(15) | BITS_WITH_WMASK(0x0, 0x1f, 8));
+
+	mmio_write_32(PMUCRU_BASE + PMUCRU_CLKGATE_CON2, WMSK_BIT(5));
+}
+
+void m0_start(void)
+{
+	/* enable clocks for M0 */
+	mmio_write_32(PMUCRU_BASE + PMUCRU_CLKGATE_CON2,
+		      BITS_WITH_WMASK(0x0, 0xf, 0));
+
+	/* clean the PARAM_M0_DONE flag, mean that M0 will start working */
+	mmio_write_32(M0_PARAM_ADDR + PARAM_M0_DONE, 0);
+	dmbst();
+
+	mmio_write_32(PMUCRU_BASE + PMUCRU_SOFTRST_CON0,
+		      BITS_WITH_WMASK(0x0, 0x4, 0));
+
+	udelay(5);
+	/* start M0 */
+	mmio_write_32(PMUCRU_BASE + PMUCRU_SOFTRST_CON0,
+		      BITS_WITH_WMASK(0x0, 0x20, 0));
+	dmbst();
+}
+
+void m0_stop(void)
+{
+	/* stop M0 */
+	mmio_write_32(PMUCRU_BASE + PMUCRU_SOFTRST_CON0,
+		      BITS_WITH_WMASK(0x24, 0x24, 0));
+
+	/* disable clocks for M0 */
+	mmio_write_32(PMUCRU_BASE + PMUCRU_CLKGATE_CON2,
+		      BITS_WITH_WMASK(0xf, 0xf, 0));
+}
+
+void m0_wait_done(void)
+{
+	do {
+		/*
+		 * Don't starve the M0 for access to SRAM, so delay before
+		 * reading the PARAM_M0_DONE value again.
+		 */
+		udelay(5);
+		dsb();
+	} while (mmio_read_32(M0_PARAM_ADDR + PARAM_M0_DONE) != M0_DONE_FLAG);
+
+	/*
+	 * Let the M0 settle into WFI before we leave. This is so we don't reset
+	 * the M0 in a bad spot which can cause problems with the M0.
+	 */
+	udelay(10);
+	dsb();
+}
diff --git a/plat/rockchip/rk3399/drivers/pmu/m0_ctl.h b/plat/rockchip/rk3399/drivers/pmu/m0_ctl.h
new file mode 100644
index 0000000..b313ec6
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/pmu/m0_ctl.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __M0_CTL_H__
+#define __M0_CTL_H__
+
+#include <m0_param.h>
+
+#define M0_BINCODE_BASE 	((uintptr_t)rk3399m0_bin)
+#define M0_PARAM_ADDR		(M0_BINCODE_BASE + PARAM_ADDR)
+
+/* pmu_fw.c */
+extern char rk3399m0_bin[];
+extern char rk3399m0_bin_end[];
+
+extern void m0_init(void);
+extern void m0_start(void);
+extern void m0_stop(void);
+extern void m0_wait_done(void);
+#endif /* __M0_CTL_H__ */
diff --git a/plat/rockchip/rk3399/drivers/pmu/plat_pmu_macros.S b/plat/rockchip/rk3399/drivers/pmu/plat_pmu_macros.S
new file mode 100644
index 0000000..70fd9bf
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/pmu/plat_pmu_macros.S
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+#include <pmu_regs.h>
+
+	.globl	clst_warmboot_data
+
+	.macro sram_func _name
+	.cfi_sections .debug_frame
+	.section .sram.text, "ax"
+	.type \_name, %function
+	.func \_name
+	.cfi_startproc
+	\_name:
+	.endm
+
+#define CRU_CLKSEL_CON6	0x118
+
+#define DDRCTL0_C_SYSREQ_CFG 0x0100
+#define DDRCTL1_C_SYSREQ_CFG 0x1000
+
+#define DDRC0_SREF_DONE_EXT 0x01
+#define DDRC1_SREF_DONE_EXT 0x04
+
+#define PLL_MODE_SHIFT	(0x8)
+#define PLL_NORMAL_MODE	((0x3 << (PLL_MODE_SHIFT + 16)) | \
+						 (0x1 << PLL_MODE_SHIFT))
+#define MPIDR_CLST_L_BITS 0x0
+	/*
+	 * For different socs, if we want to speed up warmboot,
+	 * we need to config some regs here.
+	 * If scu was suspend, we must resume related clk
+	 * from slow (24M) mode to normal mode first.
+	 * X0: MPIDR_EL1 & MPIDR_CLUSTER_MASK
+	 */
+.macro	func_rockchip_clst_warmboot
+	adr	x4, clst_warmboot_data
+	lsr	x5, x0, #6
+	ldr	w3, [x4, x5]
+	str	wzr, [x4, x5]
+	cmp	w3, #PMU_CLST_RET
+	b.ne	clst_warmboot_end
+	ldr	w6, =(PLL_NORMAL_MODE)
+	/*
+	 * core_l offset is CRU_BASE + 0xc,
+	 * core_b offset is CRU_BASE + 0x2c
+	 */
+	ldr	x7, =(CRU_BASE + 0xc)
+	lsr	x2, x0, #3
+	str	w6, [x7, x2]
+clst_warmboot_end:
+.endm
+
+.macro rockchip_clst_warmboot_data
+clst_warmboot_data:
+	.rept	PLATFORM_CLUSTER_COUNT
+	.word	0
+	.endr
+.endm
+
+	/* -----------------------------------------------
+	 * void sram_func_set_ddrctl_pll(uint32_t pll_src)
+	 * Function to switch the PLL source for ddrctrl
+	 * In: x0 - The PLL of the clk_ddrc clock source
+	 * out: None
+	 * Clobber list : x0 - x3, x5, x8 - x10
+	 * -----------------------------------------------
+	 */
+
+	.globl	sram_func_set_ddrctl_pll
+
+sram_func sram_func_set_ddrctl_pll
+	/* backup parameter */
+	mov	x8, x0
+
+	/* disable the MMU at EL3 */
+	mrs 	x9, sctlr_el3
+	bic	x10, x9, #(SCTLR_M_BIT)
+	msr 	sctlr_el3, x10
+	isb
+	dsb 	sy
+
+	/* enable ddrctl0_1 idle request */
+	mov	x5, PMU_BASE
+	ldr	w0, [x5, #PMU_SFT_CON]
+	orr	w0, w0, #DDRCTL0_C_SYSREQ_CFG
+	orr	w0, w0, #DDRCTL1_C_SYSREQ_CFG
+	str	w0, [x5, #PMU_SFT_CON]
+
+check_ddrc0_1_sref_enter:
+	ldr	w1, [x5, #PMU_DDR_SREF_ST]
+	and	w2, w1, #DDRC0_SREF_DONE_EXT
+	and	w3, w1, #DDRC1_SREF_DONE_EXT
+	orr	w2, w2, w3
+	cmp	w2, #(DDRC0_SREF_DONE_EXT | DDRC1_SREF_DONE_EXT)
+	b.eq	check_ddrc0_1_sref_enter
+
+	/*
+	 * select a PLL for ddrctrl:
+	 * x0 = 0: ALPLL
+	 * x0 = 1: ABPLL
+	 * x0 = 2: DPLL
+	 * x0 = 3: GPLLL
+	 */
+	mov     x5, CRU_BASE
+	lsl	w0, w8, #4
+	orr	w0, w0, #0x00300000
+	str 	w0, [x5, #CRU_CLKSEL_CON6]
+
+	/* disable ddrctl0_1 idle request */
+	mov	x5, PMU_BASE
+	ldr	w0, [x5, #PMU_SFT_CON]
+	bic	w0, w0, #DDRCTL0_C_SYSREQ_CFG
+	bic	w0, w0, #DDRCTL1_C_SYSREQ_CFG
+	str	w0, [x5, #PMU_SFT_CON]
+
+check_ddrc0_1_sref_exit:
+	ldr	w1, [x5, #PMU_DDR_SREF_ST]
+	and	w2, w1, #DDRC0_SREF_DONE_EXT
+	and	w3, w1, #DDRC1_SREF_DONE_EXT
+	orr	w2, w2, w3
+	cmp	w2, #0x0
+	b.eq	check_ddrc0_1_sref_exit
+
+	/* reenable the MMU at EL3 */
+	msr 	sctlr_el3, x9
+	isb
+	dsb 	sy
+
+	ret
+endfunc sram_func_set_ddrctl_pll
diff --git a/plat/rockchip/rk3399/drivers/pmu/pmu.c b/plat/rockchip/rk3399/drivers/pmu/pmu.c
new file mode 100644
index 0000000..c666c3c
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/pmu/pmu.c
@@ -0,0 +1,1599 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bakery_lock.h>
+#include <bl31.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <dfs.h>
+#include <errno.h>
+#include <gpio.h>
+#include <m0_ctl.h>
+#include <mmio.h>
+#include <plat_params.h>
+#include <plat_private.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <pmu.h>
+#include <pmu_com.h>
+#include <pwm.h>
+#include <rk3399_def.h>
+#include <secure.h>
+#include <soc.h>
+#include <string.h>
+#include <suspend.h>
+
+DEFINE_BAKERY_LOCK(rockchip_pd_lock);
+
+static uint32_t cpu_warm_boot_addr;
+static char store_sram[SRAM_BIN_LIMIT + SRAM_TEXT_LIMIT + SRAM_DATA_LIMIT];
+static uint32_t store_cru[CRU_SDIO0_CON1 / 4];
+static uint32_t store_usbphy0[7];
+static uint32_t store_usbphy1[7];
+static uint32_t store_grf_io_vsel;
+static uint32_t store_grf_soc_con0;
+static uint32_t store_grf_soc_con1;
+static uint32_t store_grf_soc_con2;
+static uint32_t store_grf_soc_con3;
+static uint32_t store_grf_soc_con4;
+static uint32_t store_grf_soc_con7;
+static uint32_t store_grf_ddrc_con[4];
+static uint32_t store_wdt0[2];
+static uint32_t store_wdt1[2];
+
+/*
+ * There are two ways to powering on or off on core.
+ * 1) Control it power domain into on or off in PMU_PWRDN_CON reg,
+ *    it is core_pwr_pd mode
+ * 2) Enable the core power manage in PMU_CORE_PM_CON reg,
+ *     then, if the core enter into wfi, it power domain will be
+ *     powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode
+ * so we need core_pm_cfg_info to distinguish which method be used now.
+ */
+
+static uint32_t core_pm_cfg_info[PLATFORM_CORE_COUNT]
+#if USE_COHERENT_MEM
+__attribute__ ((section("tzfw_coherent_mem")))
+#endif
+;/* coheront */
+
+static void pmu_bus_idle_req(uint32_t bus, uint32_t state)
+{
+	uint32_t bus_id = BIT(bus);
+	uint32_t bus_req;
+	uint32_t wait_cnt = 0;
+	uint32_t bus_state, bus_ack;
+
+	if (state)
+		bus_req = BIT(bus);
+	else
+		bus_req = 0;
+
+	mmio_clrsetbits_32(PMU_BASE + PMU_BUS_IDLE_REQ, bus_id, bus_req);
+
+	do {
+		bus_state = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & bus_id;
+		bus_ack = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK) & bus_id;
+		wait_cnt++;
+	} while ((bus_state != bus_req || bus_ack != bus_req) &&
+		 (wait_cnt < MAX_WAIT_COUNT));
+
+	if (bus_state != bus_req || bus_ack != bus_req) {
+		INFO("%s:st=%x(%x)\n", __func__,
+		     mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST),
+		     bus_state);
+		INFO("%s:st=%x(%x)\n", __func__,
+		     mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK),
+		     bus_ack);
+	}
+}
+
+struct pmu_slpdata_s pmu_slpdata;
+
+static void qos_save(void)
+{
+	if (pmu_power_domain_st(PD_GPU) == pmu_pd_on)
+		RESTORE_QOS(pmu_slpdata.gpu_qos, GPU);
+	if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) {
+		RESTORE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0);
+		RESTORE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1);
+	}
+	if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) {
+		RESTORE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0);
+		RESTORE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1);
+	}
+	if (pmu_power_domain_st(PD_VO) == pmu_pd_on) {
+		RESTORE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R);
+		RESTORE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W);
+		RESTORE_QOS(pmu_slpdata.vop_little, VOP_LITTLE);
+	}
+	if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on)
+		RESTORE_QOS(pmu_slpdata.hdcp_qos, HDCP);
+	if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on)
+		RESTORE_QOS(pmu_slpdata.gmac_qos, GMAC);
+	if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) {
+		RESTORE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0);
+		RESTORE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1);
+	}
+	if (pmu_power_domain_st(PD_SD) == pmu_pd_on)
+		RESTORE_QOS(pmu_slpdata.sdmmc_qos, SDMMC);
+	if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on)
+		RESTORE_QOS(pmu_slpdata.emmc_qos, EMMC);
+	if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on)
+		RESTORE_QOS(pmu_slpdata.sdio_qos, SDIO);
+	if (pmu_power_domain_st(PD_GIC) == pmu_pd_on)
+		RESTORE_QOS(pmu_slpdata.gic_qos, GIC);
+	if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) {
+		RESTORE_QOS(pmu_slpdata.rga_r_qos, RGA_R);
+		RESTORE_QOS(pmu_slpdata.rga_w_qos, RGA_W);
+	}
+	if (pmu_power_domain_st(PD_IEP) == pmu_pd_on)
+		RESTORE_QOS(pmu_slpdata.iep_qos, IEP);
+	if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) {
+		RESTORE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0);
+		RESTORE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1);
+	}
+	if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) {
+		RESTORE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0);
+		RESTORE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1);
+		RESTORE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP);
+	}
+	if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) {
+		RESTORE_QOS(pmu_slpdata.dmac0_qos, DMAC0);
+		RESTORE_QOS(pmu_slpdata.dmac1_qos, DMAC1);
+		RESTORE_QOS(pmu_slpdata.dcf_qos, DCF);
+		RESTORE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0);
+		RESTORE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1);
+		RESTORE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP);
+		RESTORE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP);
+		RESTORE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1);
+	}
+	if (pmu_power_domain_st(PD_VDU) == pmu_pd_on)
+		RESTORE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0);
+	if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) {
+		RESTORE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R);
+		RESTORE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W);
+	}
+}
+
+static void qos_restore(void)
+{
+	if (pmu_power_domain_st(PD_GPU) == pmu_pd_on)
+		SAVE_QOS(pmu_slpdata.gpu_qos, GPU);
+	if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) {
+		SAVE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0);
+		SAVE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1);
+	}
+	if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) {
+		SAVE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0);
+		SAVE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1);
+	}
+	if (pmu_power_domain_st(PD_VO) == pmu_pd_on) {
+		SAVE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R);
+		SAVE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W);
+		SAVE_QOS(pmu_slpdata.vop_little, VOP_LITTLE);
+	}
+	if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on)
+		SAVE_QOS(pmu_slpdata.hdcp_qos, HDCP);
+	if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on)
+		SAVE_QOS(pmu_slpdata.gmac_qos, GMAC);
+	if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) {
+		SAVE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0);
+		SAVE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1);
+	}
+	if (pmu_power_domain_st(PD_SD) == pmu_pd_on)
+		SAVE_QOS(pmu_slpdata.sdmmc_qos, SDMMC);
+	if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on)
+		SAVE_QOS(pmu_slpdata.emmc_qos, EMMC);
+	if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on)
+		SAVE_QOS(pmu_slpdata.sdio_qos, SDIO);
+	if (pmu_power_domain_st(PD_GIC) == pmu_pd_on)
+		SAVE_QOS(pmu_slpdata.gic_qos, GIC);
+	if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) {
+		SAVE_QOS(pmu_slpdata.rga_r_qos, RGA_R);
+		SAVE_QOS(pmu_slpdata.rga_w_qos, RGA_W);
+	}
+	if (pmu_power_domain_st(PD_IEP) == pmu_pd_on)
+		SAVE_QOS(pmu_slpdata.iep_qos, IEP);
+	if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) {
+		SAVE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0);
+		SAVE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1);
+	}
+	if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) {
+		SAVE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0);
+		SAVE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1);
+		SAVE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP);
+	}
+	if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) {
+		SAVE_QOS(pmu_slpdata.dmac0_qos, DMAC0);
+		SAVE_QOS(pmu_slpdata.dmac1_qos, DMAC1);
+		SAVE_QOS(pmu_slpdata.dcf_qos, DCF);
+		SAVE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0);
+		SAVE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1);
+		SAVE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP);
+		SAVE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP);
+		SAVE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1);
+	}
+	if (pmu_power_domain_st(PD_VDU) == pmu_pd_on)
+		SAVE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0);
+	if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) {
+		SAVE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R);
+		SAVE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W);
+	}
+}
+
+static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state)
+{
+	uint32_t state;
+
+	if (pmu_power_domain_st(pd_id) == pd_state)
+		goto out;
+
+	if (pd_state == pmu_pd_on)
+		pmu_power_domain_ctr(pd_id, pd_state);
+
+	state = (pd_state == pmu_pd_off) ? BUS_IDLE : BUS_ACTIVE;
+
+	switch (pd_id) {
+	case PD_GPU:
+		pmu_bus_idle_req(BUS_ID_GPU, state);
+		break;
+	case PD_VIO:
+		pmu_bus_idle_req(BUS_ID_VIO, state);
+		break;
+	case PD_ISP0:
+		pmu_bus_idle_req(BUS_ID_ISP0, state);
+		break;
+	case PD_ISP1:
+		pmu_bus_idle_req(BUS_ID_ISP1, state);
+		break;
+	case PD_VO:
+		pmu_bus_idle_req(BUS_ID_VOPB, state);
+		pmu_bus_idle_req(BUS_ID_VOPL, state);
+		break;
+	case PD_HDCP:
+		pmu_bus_idle_req(BUS_ID_HDCP, state);
+		break;
+	case PD_TCPD0:
+		break;
+	case PD_TCPD1:
+		break;
+	case PD_GMAC:
+		pmu_bus_idle_req(BUS_ID_GMAC, state);
+		break;
+	case PD_CCI:
+		pmu_bus_idle_req(BUS_ID_CCIM0, state);
+		pmu_bus_idle_req(BUS_ID_CCIM1, state);
+		break;
+	case PD_SD:
+		pmu_bus_idle_req(BUS_ID_SD, state);
+		break;
+	case PD_EMMC:
+		pmu_bus_idle_req(BUS_ID_EMMC, state);
+		break;
+	case PD_EDP:
+		pmu_bus_idle_req(BUS_ID_EDP, state);
+		break;
+	case PD_SDIOAUDIO:
+		pmu_bus_idle_req(BUS_ID_SDIOAUDIO, state);
+		break;
+	case PD_GIC:
+		pmu_bus_idle_req(BUS_ID_GIC, state);
+		break;
+	case PD_RGA:
+		pmu_bus_idle_req(BUS_ID_RGA, state);
+		break;
+	case PD_VCODEC:
+		pmu_bus_idle_req(BUS_ID_VCODEC, state);
+		break;
+	case PD_VDU:
+		pmu_bus_idle_req(BUS_ID_VDU, state);
+		break;
+	case PD_IEP:
+		pmu_bus_idle_req(BUS_ID_IEP, state);
+		break;
+	case PD_USB3:
+		pmu_bus_idle_req(BUS_ID_USB3, state);
+		break;
+	case PD_PERIHP:
+		pmu_bus_idle_req(BUS_ID_PERIHP, state);
+		break;
+	default:
+		break;
+	}
+
+	if (pd_state == pmu_pd_off)
+		pmu_power_domain_ctr(pd_id, pd_state);
+
+out:
+	return 0;
+}
+
+static uint32_t pmu_powerdomain_state;
+
+static void pmu_power_domains_suspend(void)
+{
+	clk_gate_con_save();
+	clk_gate_con_disable();
+	qos_save();
+	pmu_powerdomain_state = mmio_read_32(PMU_BASE + PMU_PWRDN_ST);
+	pmu_set_power_domain(PD_GPU, pmu_pd_off);
+	pmu_set_power_domain(PD_TCPD0, pmu_pd_off);
+	pmu_set_power_domain(PD_TCPD1, pmu_pd_off);
+	pmu_set_power_domain(PD_VO, pmu_pd_off);
+	pmu_set_power_domain(PD_ISP0, pmu_pd_off);
+	pmu_set_power_domain(PD_ISP1, pmu_pd_off);
+	pmu_set_power_domain(PD_HDCP, pmu_pd_off);
+	pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_off);
+	pmu_set_power_domain(PD_GMAC, pmu_pd_off);
+	pmu_set_power_domain(PD_EDP, pmu_pd_off);
+	pmu_set_power_domain(PD_IEP, pmu_pd_off);
+	pmu_set_power_domain(PD_RGA, pmu_pd_off);
+	pmu_set_power_domain(PD_VCODEC, pmu_pd_off);
+	pmu_set_power_domain(PD_VDU, pmu_pd_off);
+	pmu_set_power_domain(PD_USB3, pmu_pd_off);
+	pmu_set_power_domain(PD_EMMC, pmu_pd_off);
+	pmu_set_power_domain(PD_VIO, pmu_pd_off);
+	pmu_set_power_domain(PD_SD, pmu_pd_off);
+	pmu_set_power_domain(PD_PERIHP, pmu_pd_off);
+	clk_gate_con_restore();
+}
+
+static void pmu_power_domains_resume(void)
+{
+	clk_gate_con_save();
+	clk_gate_con_disable();
+	if (!(pmu_powerdomain_state & BIT(PD_VDU)))
+		pmu_set_power_domain(PD_VDU, pmu_pd_on);
+	if (!(pmu_powerdomain_state & BIT(PD_VCODEC)))
+		pmu_set_power_domain(PD_VCODEC, pmu_pd_on);
+	if (!(pmu_powerdomain_state & BIT(PD_RGA)))
+		pmu_set_power_domain(PD_RGA, pmu_pd_on);
+	if (!(pmu_powerdomain_state & BIT(PD_IEP)))
+		pmu_set_power_domain(PD_IEP, pmu_pd_on);
+	if (!(pmu_powerdomain_state & BIT(PD_EDP)))
+		pmu_set_power_domain(PD_EDP, pmu_pd_on);
+	if (!(pmu_powerdomain_state & BIT(PD_GMAC)))
+		pmu_set_power_domain(PD_GMAC, pmu_pd_on);
+	if (!(pmu_powerdomain_state & BIT(PD_SDIOAUDIO)))
+		pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_on);
+	if (!(pmu_powerdomain_state & BIT(PD_HDCP)))
+		pmu_set_power_domain(PD_HDCP, pmu_pd_on);
+	if (!(pmu_powerdomain_state & BIT(PD_ISP1)))
+		pmu_set_power_domain(PD_ISP1, pmu_pd_on);
+	if (!(pmu_powerdomain_state & BIT(PD_ISP0)))
+		pmu_set_power_domain(PD_ISP0, pmu_pd_on);
+	if (!(pmu_powerdomain_state & BIT(PD_VO)))
+		pmu_set_power_domain(PD_VO, pmu_pd_on);
+	if (!(pmu_powerdomain_state & BIT(PD_TCPD1)))
+		pmu_set_power_domain(PD_TCPD1, pmu_pd_on);
+	if (!(pmu_powerdomain_state & BIT(PD_TCPD0)))
+		pmu_set_power_domain(PD_TCPD0, pmu_pd_on);
+	if (!(pmu_powerdomain_state & BIT(PD_GPU)))
+		pmu_set_power_domain(PD_GPU, pmu_pd_on);
+	if (!(pmu_powerdomain_state & BIT(PD_USB3)))
+		pmu_set_power_domain(PD_USB3, pmu_pd_on);
+	if (!(pmu_powerdomain_state & BIT(PD_EMMC)))
+		pmu_set_power_domain(PD_EMMC, pmu_pd_on);
+	if (!(pmu_powerdomain_state & BIT(PD_VIO)))
+		pmu_set_power_domain(PD_VIO, pmu_pd_on);
+	if (!(pmu_powerdomain_state & BIT(PD_SD)))
+		pmu_set_power_domain(PD_SD, pmu_pd_on);
+	if (!(pmu_powerdomain_state & BIT(PD_PERIHP)))
+		pmu_set_power_domain(PD_PERIHP, pmu_pd_on);
+	qos_restore();
+	clk_gate_con_restore();
+}
+
+void rk3399_flush_l2_b(void)
+{
+	uint32_t wait_cnt = 0;
+
+	mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B));
+	dsb();
+
+	/*
+	 * The Big cluster flush L2 cache took ~4ms by default, give 10ms for
+	 * the enough margin.
+	 */
+	while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) &
+		 BIT(L2_FLUSHDONE_CLUSTER_B))) {
+		wait_cnt++;
+		udelay(10);
+		if (wait_cnt == 10000 / 10)
+			WARN("L2 cache flush on suspend took longer than 10ms\n");
+	}
+
+	mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B));
+}
+
+static void pmu_scu_b_pwrdn(void)
+{
+	uint32_t wait_cnt = 0;
+
+	if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) &
+	     (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) !=
+	     (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) {
+		ERROR("%s: not all cpus is off\n", __func__);
+		return;
+	}
+
+	rk3399_flush_l2_b();
+
+	mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG));
+
+	while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) &
+		 BIT(STANDBY_BY_WFIL2_CLUSTER_B))) {
+		wait_cnt++;
+		if (wait_cnt >= MAX_WAIT_COUNT)
+			ERROR("%s:wait cluster-b l2(%x)\n", __func__,
+			      mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
+	}
+}
+
+static void pmu_scu_b_pwrup(void)
+{
+	mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG));
+}
+
+static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id)
+{
+	assert(cpu_id < PLATFORM_CORE_COUNT);
+	return core_pm_cfg_info[cpu_id];
+}
+
+static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value)
+{
+	assert(cpu_id < PLATFORM_CORE_COUNT);
+	core_pm_cfg_info[cpu_id] = value;
+#if !USE_COHERENT_MEM
+	flush_dcache_range((uintptr_t)&core_pm_cfg_info[cpu_id],
+			   sizeof(uint32_t));
+#endif
+}
+
+static int cpus_power_domain_on(uint32_t cpu_id)
+{
+	uint32_t cfg_info;
+	uint32_t cpu_pd = PD_CPUL0 + cpu_id;
+	/*
+	  * There are two ways to powering on or off on core.
+	  * 1) Control it power domain into on or off in PMU_PWRDN_CON reg
+	  * 2) Enable the core power manage in PMU_CORE_PM_CON reg,
+	  *     then, if the core enter into wfi, it power domain will be
+	  *     powered off automatically.
+	  */
+
+	cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id);
+
+	if (cfg_info == core_pwr_pd) {
+		/* disable core_pm cfg */
+		mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
+			      CORES_PM_DISABLE);
+		/* if the cores have be on, power off it firstly */
+		if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
+			mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 0);
+			pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
+		}
+
+		pmu_power_domain_ctr(cpu_pd, pmu_pd_on);
+	} else {
+		if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
+			WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id);
+			return -EINVAL;
+		}
+
+		mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
+			      BIT(core_pm_sft_wakeup_en));
+		dsb();
+	}
+
+	return 0;
+}
+
+static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg)
+{
+	uint32_t cpu_pd;
+	uint32_t core_pm_value;
+
+	cpu_pd = PD_CPUL0 + cpu_id;
+	if (pmu_power_domain_st(cpu_pd) == pmu_pd_off)
+		return 0;
+
+	if (pd_cfg == core_pwr_pd) {
+		if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK))
+			return -EINVAL;
+
+		/* disable core_pm cfg */
+		mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
+			      CORES_PM_DISABLE);
+
+		set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg);
+		pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
+	} else {
+		set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg);
+
+		core_pm_value = BIT(core_pm_en);
+		if (pd_cfg == core_pwr_wfi_int)
+			core_pm_value |= BIT(core_pm_int_wakeup_en);
+		mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
+			      core_pm_value);
+		dsb();
+	}
+
+	return 0;
+}
+
+static inline void clst_pwr_domain_suspend(plat_local_state_t lvl_state)
+{
+	uint32_t cpu_id = plat_my_core_pos();
+	uint32_t pll_id, clst_st_msk, clst_st_chk_msk, pmu_st;
+
+	assert(cpu_id < PLATFORM_CORE_COUNT);
+
+	if (lvl_state == PLAT_MAX_OFF_STATE) {
+		if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) {
+			pll_id = ALPLL_ID;
+			clst_st_msk = CLST_L_CPUS_MSK;
+		} else {
+			pll_id = ABPLL_ID;
+			clst_st_msk = CLST_B_CPUS_MSK <<
+				       PLATFORM_CLUSTER0_CORE_COUNT;
+		}
+
+		clst_st_chk_msk = clst_st_msk & ~(BIT(cpu_id));
+
+		pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST);
+
+		pmu_st &= clst_st_msk;
+
+		if (pmu_st == clst_st_chk_msk) {
+			mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3),
+				      PLL_SLOW_MODE);
+
+			clst_warmboot_data[pll_id] = PMU_CLST_RET;
+
+			pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST);
+			pmu_st &= clst_st_msk;
+			if (pmu_st == clst_st_chk_msk)
+				return;
+			/*
+			 * it is mean that others cpu is up again,
+			 * we must resume the cfg at once.
+			 */
+			mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3),
+				      PLL_NOMAL_MODE);
+			clst_warmboot_data[pll_id] = 0;
+		}
+	}
+}
+
+static int clst_pwr_domain_resume(plat_local_state_t lvl_state)
+{
+	uint32_t cpu_id = plat_my_core_pos();
+	uint32_t pll_id, pll_st;
+
+	assert(cpu_id < PLATFORM_CORE_COUNT);
+
+	if (lvl_state == PLAT_MAX_OFF_STATE) {
+		if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT)
+			pll_id = ALPLL_ID;
+		else
+			pll_id = ABPLL_ID;
+
+		pll_st = mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 3)) >>
+				 PLL_MODE_SHIFT;
+
+		if (pll_st != NORMAL_MODE) {
+			WARN("%s: clst (%d) is in error mode (%d)\n",
+			     __func__, pll_id, pll_st);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static void nonboot_cpus_off(void)
+{
+	uint32_t boot_cpu, cpu;
+
+	boot_cpu = plat_my_core_pos();
+
+	/* turn off noboot cpus */
+	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) {
+		if (cpu == boot_cpu)
+			continue;
+		cpus_power_domain_off(cpu, core_pwr_pd);
+	}
+}
+
+int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
+{
+	uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
+
+	assert(cpu_id < PLATFORM_CORE_COUNT);
+	assert(cpuson_flags[cpu_id] == 0);
+	cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG;
+	cpuson_entry_point[cpu_id] = entrypoint;
+	dsb();
+
+	cpus_power_domain_on(cpu_id);
+
+	return PSCI_E_SUCCESS;
+}
+
+int rockchip_soc_cores_pwr_dm_off(void)
+{
+	uint32_t cpu_id = plat_my_core_pos();
+
+	cpus_power_domain_off(cpu_id, core_pwr_wfi);
+
+	return PSCI_E_SUCCESS;
+}
+
+int rockchip_soc_hlvl_pwr_dm_off(uint32_t lvl,
+				 plat_local_state_t lvl_state)
+{
+	switch (lvl) {
+	case MPIDR_AFFLVL1:
+		clst_pwr_domain_suspend(lvl_state);
+		break;
+	default:
+		break;
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+int rockchip_soc_cores_pwr_dm_suspend(void)
+{
+	uint32_t cpu_id = plat_my_core_pos();
+
+	assert(cpu_id < PLATFORM_CORE_COUNT);
+	assert(cpuson_flags[cpu_id] == 0);
+	cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN;
+	cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint();
+	dsb();
+
+	cpus_power_domain_off(cpu_id, core_pwr_wfi_int);
+
+	return PSCI_E_SUCCESS;
+}
+
+int rockchip_soc_hlvl_pwr_dm_suspend(uint32_t lvl, plat_local_state_t lvl_state)
+{
+	switch (lvl) {
+	case MPIDR_AFFLVL1:
+		clst_pwr_domain_suspend(lvl_state);
+		break;
+	default:
+		break;
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+int rockchip_soc_cores_pwr_dm_on_finish(void)
+{
+	uint32_t cpu_id = plat_my_core_pos();
+
+	mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
+		      CORES_PM_DISABLE);
+	return PSCI_E_SUCCESS;
+}
+
+int rockchip_soc_hlvl_pwr_dm_on_finish(uint32_t lvl,
+				       plat_local_state_t lvl_state)
+{
+	switch (lvl) {
+	case MPIDR_AFFLVL1:
+		clst_pwr_domain_resume(lvl_state);
+		break;
+	default:
+		break;
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+int rockchip_soc_cores_pwr_dm_resume(void)
+{
+	uint32_t cpu_id = plat_my_core_pos();
+
+	/* Disable core_pm */
+	mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE);
+
+	return PSCI_E_SUCCESS;
+}
+
+int rockchip_soc_hlvl_pwr_dm_resume(uint32_t lvl, plat_local_state_t lvl_state)
+{
+	switch (lvl) {
+	case MPIDR_AFFLVL1:
+		clst_pwr_domain_resume(lvl_state);
+	default:
+		break;
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+/**
+ * init_pmu_counts - Init timing counts in the PMU register area
+ *
+ * At various points when we power up or down parts of the system we need
+ * a delay to wait for power / clocks to become stable.  The PMU has counters
+ * to help software do the delay properly.  Basically, it works like this:
+ * - Software sets up counter values
+ * - When software turns on something in the PMU, the counter kicks off
+ * - The hardware sets a bit automatically when the counter has finished and
+ *   software knows that the initialization is done.
+ *
+ * It's software's job to setup these counters.  The hardware power on default
+ * for these settings is conservative, setting everything to 0x5dc0
+ * (750 ms in 32 kHz counts or 1 ms in 24 MHz counts).
+ *
+ * Note that some of these counters are only really used at suspend/resume
+ * time (for instance, that's the only time we turn off/on the oscillator) and
+ * others are used during normal runtime (like turning on/off a CPU or GPU) but
+ * it doesn't hurt to init everything at boot.
+ *
+ * Also note that these counters can run off the 32 kHz clock or the 24 MHz
+ * clock.  While the 24 MHz clock can give us more precision, it's not always
+ * available (like when we turn the oscillator off at sleep time). The
+ * pmu_use_lf (lf: low freq) is available in power mode.  Current understanding
+ * is that counts work like this:
+ *    IF (pmu_use_lf == 0) || (power_mode_en == 0)
+ *      use the 24M OSC for counts
+ *    ELSE
+ *      use the 32K OSC for counts
+ *
+ * Notes:
+ * - There is a separate bit for the PMU called PMU_24M_EN_CFG.  At the moment
+ *   we always keep that 0.  This apparently choose between using the PLL as
+ *   the source for the PMU vs. the 24M clock.  If we ever set it to 1 we
+ *   should consider how it affects these counts (if at all).
+ * - The power_mode_en is documented to auto-clear automatically when we leave
+ *   "power mode".  That's why most clocks are on 24M.  Only timings used when
+ *   in "power mode" are 32k.
+ * - In some cases the kernel may override these counts.
+ *
+ * The PMU_STABLE_CNT / PMU_OSC_CNT / PMU_PLLLOCK_CNT are important CNTs
+ * in power mode, we need to ensure that they are available.
+ */
+static void init_pmu_counts(void)
+{
+	/* COUNTS FOR INSIDE POWER MODE */
+
+	/*
+	 * From limited testing, need PMU stable >= 2ms, but go overkill
+	 * and choose 30 ms to match testing on past SoCs.  Also let
+	 * OSC have 30 ms for stabilization.
+	 */
+	mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_32K_CNT_MS(30));
+	mmio_write_32(PMU_BASE + PMU_OSC_CNT, CYCL_32K_CNT_MS(30));
+
+	/* Unclear what these should be; try 3 ms */
+	mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_CNT, CYCL_32K_CNT_MS(3));
+
+	/* Unclear what this should be, but set the default explicitly */
+	mmio_write_32(PMU_BASE + PMU_TIMEOUT_CNT, 0x5dc0);
+
+	/* COUNTS FOR OUTSIDE POWER MODE */
+
+	/* Put something sorta conservative here until we know better */
+	mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(3));
+	mmio_write_32(PMU_BASE + PMU_DDRIO_PWRON_CNT, CYCL_24M_CNT_MS(1));
+	mmio_write_32(PMU_BASE + PMU_CENTER_PWRDN_CNT, CYCL_24M_CNT_MS(1));
+	mmio_write_32(PMU_BASE + PMU_CENTER_PWRUP_CNT, CYCL_24M_CNT_MS(1));
+
+	/*
+	 * when we enable PMU_CLR_PERILP, it will shut down the SRAM, but
+	 * M0 code run in SRAM, and we need it to check whether cpu enter
+	 * FSM status, so we must wait M0 finish their code and enter WFI,
+	 * then we can shutdown SRAM, according FSM order:
+	 * ST_NORMAL->..->ST_SCU_L_PWRDN->..->ST_CENTER_PWRDN->ST_PERILP_PWRDN
+	 * we can add delay when shutdown ST_SCU_L_PWRDN to guarantee M0 get
+	 * the FSM status and enter WFI, then enable PMU_CLR_PERILP.
+	 */
+	mmio_write_32(PMU_BASE + PMU_SCU_L_PWRDN_CNT, CYCL_24M_CNT_MS(5));
+	mmio_write_32(PMU_BASE + PMU_SCU_L_PWRUP_CNT, CYCL_24M_CNT_US(1));
+
+	/*
+	 * Set CPU/GPU to 1 us.
+	 *
+	 * NOTE: Even though ATF doesn't configure the GPU we'll still setup
+	 * counts here.  After all ATF controls all these other bits and also
+	 * chooses which clock these counters use.
+	 */
+	mmio_write_32(PMU_BASE + PMU_SCU_B_PWRDN_CNT, CYCL_24M_CNT_US(1));
+	mmio_write_32(PMU_BASE + PMU_SCU_B_PWRUP_CNT, CYCL_24M_CNT_US(1));
+	mmio_write_32(PMU_BASE + PMU_GPU_PWRDN_CNT, CYCL_24M_CNT_US(1));
+	mmio_write_32(PMU_BASE + PMU_GPU_PWRUP_CNT, CYCL_24M_CNT_US(1));
+}
+
+static uint32_t clk_ddrc_save;
+
+static void sys_slp_config(void)
+{
+	uint32_t slp_mode_cfg = 0;
+
+	/* keep enabling clk_ddrc_bpll_src_en gate for DDRC */
+	clk_ddrc_save = mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(3));
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), WMSK_BIT(1));
+
+	prepare_abpll_for_ddrctrl();
+	sram_func_set_ddrctl_pll(ABPLL_ID);
+
+	mmio_write_32(GRF_BASE + GRF_SOC_CON4, CCI_FORCE_WAKEUP);
+	mmio_write_32(PMU_BASE + PMU_CCI500_CON,
+		      BIT_WITH_WMSK(PMU_CLR_PREQ_CCI500_HW) |
+		      BIT_WITH_WMSK(PMU_CLR_QREQ_CCI500_HW) |
+		      BIT_WITH_WMSK(PMU_QGATING_CCI500_CFG));
+
+	mmio_write_32(PMU_BASE + PMU_ADB400_CON,
+		      BIT_WITH_WMSK(PMU_CLR_CORE_L_HW) |
+		      BIT_WITH_WMSK(PMU_CLR_CORE_L_2GIC_HW) |
+		      BIT_WITH_WMSK(PMU_CLR_GIC2_CORE_L_HW));
+
+	slp_mode_cfg = BIT(PMU_PWR_MODE_EN) |
+		       BIT(PMU_INPUT_CLAMP_EN) |
+		       BIT(PMU_POWER_OFF_REQ_CFG) |
+		       BIT(PMU_CPU0_PD_EN) |
+		       BIT(PMU_L2_FLUSH_EN) |
+		       BIT(PMU_L2_IDLE_EN) |
+		       BIT(PMU_SCU_PD_EN) |
+		       BIT(PMU_CCI_PD_EN) |
+		       BIT(PMU_CLK_CORE_SRC_GATE_EN) |
+		       BIT(PMU_ALIVE_USE_LF) |
+		       BIT(PMU_SREF0_ENTER_EN) |
+		       BIT(PMU_SREF1_ENTER_EN) |
+		       BIT(PMU_DDRC0_GATING_EN) |
+		       BIT(PMU_DDRC1_GATING_EN) |
+		       BIT(PMU_DDRIO0_RET_EN) |
+		       BIT(PMU_DDRIO0_RET_DE_REQ) |
+		       BIT(PMU_DDRIO1_RET_EN) |
+		       BIT(PMU_DDRIO1_RET_DE_REQ) |
+		       BIT(PMU_DDRIO_RET_HW_DE_REQ) |
+		       BIT(PMU_CENTER_PD_EN) |
+		       BIT(PMU_PERILP_PD_EN) |
+		       BIT(PMU_CLK_PERILP_SRC_GATE_EN) |
+		       BIT(PMU_PLL_PD_EN) |
+		       BIT(PMU_CLK_CENTER_SRC_GATE_EN) |
+		       BIT(PMU_OSC_DIS) |
+		       BIT(PMU_PMU_USE_LF);
+
+	mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, BIT(PMU_GPIO_WKUP_EN));
+	mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, slp_mode_cfg);
+
+	mmio_write_32(PMU_BASE + PMU_PLL_CON, PLL_PD_HW);
+	mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON0, EXTERNAL_32K);
+	mmio_write_32(PMUGRF_BASE, IOMUX_CLK_32K); /* 32k iomux */
+}
+
+static void set_hw_idle(uint32_t hw_idle)
+{
+	mmio_setbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle);
+}
+
+static void clr_hw_idle(uint32_t hw_idle)
+{
+	mmio_clrbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle);
+}
+
+static uint32_t iomux_status[12];
+static uint32_t pull_mode_status[12];
+static uint32_t gpio_direction[3];
+static uint32_t gpio_2_4_clk_gate;
+
+static void suspend_apio(void)
+{
+	struct apio_info *suspend_apio;
+	int i;
+
+	suspend_apio = plat_get_rockchip_suspend_apio();
+
+	if (!suspend_apio)
+		return;
+
+	/* save gpio2 ~ gpio4 iomux and pull mode */
+	for (i = 0; i < 12; i++) {
+		iomux_status[i] = mmio_read_32(GRF_BASE +
+				GRF_GPIO2A_IOMUX + i * 4);
+		pull_mode_status[i] = mmio_read_32(GRF_BASE +
+				GRF_GPIO2A_P + i * 4);
+	}
+
+	/* store gpio2 ~ gpio4 clock gate state */
+	gpio_2_4_clk_gate = (mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(31)) >>
+				PCLK_GPIO2_GATE_SHIFT) & 0x07;
+
+	/* enable gpio2 ~ gpio4 clock gate */
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31),
+		      BITS_WITH_WMASK(0, 0x07, PCLK_GPIO2_GATE_SHIFT));
+
+	/* save gpio2 ~ gpio4 direction */
+	gpio_direction[0] = mmio_read_32(GPIO2_BASE + 0x04);
+	gpio_direction[1] = mmio_read_32(GPIO3_BASE + 0x04);
+	gpio_direction[2] = mmio_read_32(GPIO4_BASE + 0x04);
+
+	/* apio1 charge gpio3a0 ~ gpio3c7 */
+	if (suspend_apio->apio1) {
+
+		/* set gpio3a0 ~ gpio3c7 iomux to gpio */
+		mmio_write_32(GRF_BASE + GRF_GPIO3A_IOMUX,
+			      REG_SOC_WMSK | GRF_IOMUX_GPIO);
+		mmio_write_32(GRF_BASE + GRF_GPIO3B_IOMUX,
+			      REG_SOC_WMSK | GRF_IOMUX_GPIO);
+		mmio_write_32(GRF_BASE + GRF_GPIO3C_IOMUX,
+			      REG_SOC_WMSK | GRF_IOMUX_GPIO);
+
+		/* set gpio3a0 ~ gpio3c7 pull mode to pull none */
+		mmio_write_32(GRF_BASE + GRF_GPIO3A_P, REG_SOC_WMSK | 0);
+		mmio_write_32(GRF_BASE + GRF_GPIO3B_P, REG_SOC_WMSK | 0);
+		mmio_write_32(GRF_BASE + GRF_GPIO3C_P, REG_SOC_WMSK | 0);
+
+		/* set gpio3a0 ~ gpio3c7 to input */
+		mmio_clrbits_32(GPIO3_BASE + 0x04, 0x00ffffff);
+	}
+
+	/* apio2 charge gpio2a0 ~ gpio2b4 */
+	if (suspend_apio->apio2) {
+
+		/* set gpio2a0 ~ gpio2b4 iomux to gpio */
+		mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX,
+			      REG_SOC_WMSK | GRF_IOMUX_GPIO);
+		mmio_write_32(GRF_BASE + GRF_GPIO2B_IOMUX,
+			      REG_SOC_WMSK | GRF_IOMUX_GPIO);
+
+		/* set gpio2a0 ~ gpio2b4 pull mode to pull none */
+		mmio_write_32(GRF_BASE + GRF_GPIO2A_P, REG_SOC_WMSK | 0);
+		mmio_write_32(GRF_BASE + GRF_GPIO2B_P, REG_SOC_WMSK | 0);
+
+		/* set gpio2a0 ~ gpio2b4 to input */
+		mmio_clrbits_32(GPIO2_BASE + 0x04, 0x00001fff);
+	}
+
+	/* apio3 charge gpio2c0 ~ gpio2d4*/
+	if (suspend_apio->apio3) {
+
+		/* set gpio2a0 ~ gpio2b4 iomux to gpio */
+		mmio_write_32(GRF_BASE + GRF_GPIO2C_IOMUX,
+			      REG_SOC_WMSK | GRF_IOMUX_GPIO);
+		mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX,
+			      REG_SOC_WMSK | GRF_IOMUX_GPIO);
+
+		/* set gpio2c0 ~ gpio2d4 pull mode to pull none */
+		mmio_write_32(GRF_BASE + GRF_GPIO2C_P, REG_SOC_WMSK | 0);
+		mmio_write_32(GRF_BASE + GRF_GPIO2D_P, REG_SOC_WMSK | 0);
+
+		/* set gpio2c0 ~ gpio2d4 to input */
+		mmio_clrbits_32(GPIO2_BASE + 0x04, 0x1fff0000);
+	}
+
+	/* apio4 charge gpio4c0 ~ gpio4c7, gpio4d0 ~ gpio4d6 */
+	if (suspend_apio->apio4) {
+
+		/* set gpio4c0 ~ gpio4d6 iomux to gpio */
+		mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX,
+			      REG_SOC_WMSK | GRF_IOMUX_GPIO);
+		mmio_write_32(GRF_BASE + GRF_GPIO4D_IOMUX,
+			      REG_SOC_WMSK | GRF_IOMUX_GPIO);
+
+		/* set gpio4c0 ~ gpio4d6 pull mode to pull none */
+		mmio_write_32(GRF_BASE + GRF_GPIO4C_P, REG_SOC_WMSK | 0);
+		mmio_write_32(GRF_BASE + GRF_GPIO4D_P, REG_SOC_WMSK | 0);
+
+		/* set gpio4c0 ~ gpio4d6 to input */
+		mmio_clrbits_32(GPIO4_BASE + 0x04, 0x7fff0000);
+	}
+
+	/* apio5 charge gpio3d0 ~ gpio3d7, gpio4a0 ~ gpio4a7*/
+	if (suspend_apio->apio5) {
+		/* set gpio3d0 ~ gpio4a7 iomux to gpio */
+		mmio_write_32(GRF_BASE + GRF_GPIO3D_IOMUX,
+			      REG_SOC_WMSK | GRF_IOMUX_GPIO);
+		mmio_write_32(GRF_BASE + GRF_GPIO4A_IOMUX,
+			      REG_SOC_WMSK | GRF_IOMUX_GPIO);
+
+		/* set gpio3d0 ~ gpio4a7 pull mode to pull none */
+		mmio_write_32(GRF_BASE + GRF_GPIO3D_P, REG_SOC_WMSK | 0);
+		mmio_write_32(GRF_BASE + GRF_GPIO4A_P, REG_SOC_WMSK | 0);
+
+		/* set gpio4c0 ~ gpio4d6 to input */
+		mmio_clrbits_32(GPIO3_BASE + 0x04, 0xff000000);
+		mmio_clrbits_32(GPIO4_BASE + 0x04, 0x000000ff);
+	}
+}
+
+static void resume_apio(void)
+{
+	struct apio_info *suspend_apio;
+	int i;
+
+	suspend_apio = plat_get_rockchip_suspend_apio();
+
+	if (!suspend_apio)
+		return;
+
+	for (i = 0; i < 12; i++) {
+		mmio_write_32(GRF_BASE + GRF_GPIO2A_P + i * 4,
+			      REG_SOC_WMSK | pull_mode_status[i]);
+		mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX + i * 4,
+			      REG_SOC_WMSK | iomux_status[i]);
+	}
+
+	/* set gpio2 ~ gpio4 direction back to store value */
+	mmio_write_32(GPIO2_BASE + 0x04, gpio_direction[0]);
+	mmio_write_32(GPIO3_BASE + 0x04, gpio_direction[1]);
+	mmio_write_32(GPIO4_BASE + 0x04, gpio_direction[2]);
+
+	/* set gpio2 ~ gpio4 clock gate back to store value */
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31),
+		      BITS_WITH_WMASK(gpio_2_4_clk_gate, 0x07,
+				      PCLK_GPIO2_GATE_SHIFT));
+}
+
+static void suspend_gpio(void)
+{
+	struct gpio_info *suspend_gpio;
+	uint32_t count;
+	int i;
+
+	suspend_gpio = plat_get_rockchip_suspend_gpio(&count);
+
+	for (i = 0; i < count; i++) {
+		gpio_set_value(suspend_gpio[i].index, suspend_gpio[i].polarity);
+		gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT);
+		udelay(1);
+	}
+}
+
+static void resume_gpio(void)
+{
+	struct gpio_info *suspend_gpio;
+	uint32_t count;
+	int i;
+
+	suspend_gpio = plat_get_rockchip_suspend_gpio(&count);
+
+	for (i = count - 1; i >= 0; i--) {
+		gpio_set_value(suspend_gpio[i].index,
+			       !suspend_gpio[i].polarity);
+		gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT);
+		udelay(1);
+	}
+}
+
+static void m0_configure_suspend(void)
+{
+	/* set PARAM to M0_FUNC_SUSPEND */
+	mmio_write_32(M0_PARAM_ADDR + PARAM_M0_FUNC, M0_FUNC_SUSPEND);
+}
+
+void sram_save(void)
+{
+	size_t text_size = (char *)&__bl31_sram_text_real_end -
+			   (char *)&__bl31_sram_text_start;
+	size_t data_size = (char *)&__bl31_sram_data_real_end -
+			   (char *)&__bl31_sram_data_start;
+	size_t incbin_size = (char *)&__sram_incbin_real_end -
+			     (char *)&__sram_incbin_start;
+
+	memcpy(&store_sram[0], &__bl31_sram_text_start, text_size);
+	memcpy(&store_sram[text_size], &__bl31_sram_data_start, data_size);
+	memcpy(&store_sram[text_size + data_size], &__sram_incbin_start,
+	       incbin_size);
+}
+
+void sram_restore(void)
+{
+	size_t text_size = (char *)&__bl31_sram_text_real_end -
+			   (char *)&__bl31_sram_text_start;
+	size_t data_size = (char *)&__bl31_sram_data_real_end -
+			   (char *)&__bl31_sram_data_start;
+	size_t incbin_size = (char *)&__sram_incbin_real_end -
+			     (char *)&__sram_incbin_start;
+
+	memcpy(&__bl31_sram_text_start, &store_sram[0], text_size);
+	memcpy(&__bl31_sram_data_start, &store_sram[text_size], data_size);
+	memcpy(&__sram_incbin_start, &store_sram[text_size + data_size],
+	       incbin_size);
+}
+
+struct uart_debug {
+	uint32_t uart_dll;
+	uint32_t uart_dlh;
+	uint32_t uart_ier;
+	uint32_t uart_fcr;
+	uint32_t uart_mcr;
+	uint32_t uart_lcr;
+};
+
+#define UART_DLL	0x00
+#define UART_DLH	0x04
+#define UART_IER	0x04
+#define UART_FCR	0x08
+#define UART_LCR	0x0c
+#define UART_MCR	0x10
+#define UARTSRR		0x88
+
+#define UART_RESET	BIT(0)
+#define UARTFCR_FIFOEN	BIT(0)
+#define RCVR_FIFO_RESET	BIT(1)
+#define XMIT_FIFO_RESET	BIT(2)
+#define DIAGNOSTIC_MODE	BIT(4)
+#define UARTLCR_DLAB	BIT(7)
+
+static struct uart_debug uart_save;
+
+void suspend_uart(void)
+{
+	uart_save.uart_lcr = mmio_read_32(PLAT_RK_UART_BASE + UART_LCR);
+	uart_save.uart_ier = mmio_read_32(PLAT_RK_UART_BASE + UART_IER);
+	uart_save.uart_mcr = mmio_read_32(PLAT_RK_UART_BASE + UART_MCR);
+	mmio_write_32(PLAT_RK_UART_BASE + UART_LCR,
+		      uart_save.uart_lcr | UARTLCR_DLAB);
+	uart_save.uart_dll = mmio_read_32(PLAT_RK_UART_BASE + UART_DLL);
+	uart_save.uart_dlh = mmio_read_32(PLAT_RK_UART_BASE + UART_DLH);
+	mmio_write_32(PLAT_RK_UART_BASE + UART_LCR, uart_save.uart_lcr);
+}
+
+void resume_uart(void)
+{
+	uint32_t uart_lcr;
+
+	mmio_write_32(PLAT_RK_UART_BASE + UARTSRR,
+		      XMIT_FIFO_RESET | RCVR_FIFO_RESET | UART_RESET);
+
+	uart_lcr = mmio_read_32(PLAT_RK_UART_BASE + UART_LCR);
+	mmio_write_32(PLAT_RK_UART_BASE + UART_MCR, DIAGNOSTIC_MODE);
+	mmio_write_32(PLAT_RK_UART_BASE + UART_LCR, uart_lcr | UARTLCR_DLAB);
+	mmio_write_32(PLAT_RK_UART_BASE + UART_DLL, uart_save.uart_dll);
+	mmio_write_32(PLAT_RK_UART_BASE + UART_DLH, uart_save.uart_dlh);
+	mmio_write_32(PLAT_RK_UART_BASE + UART_LCR, uart_save.uart_lcr);
+	mmio_write_32(PLAT_RK_UART_BASE + UART_IER, uart_save.uart_ier);
+	mmio_write_32(PLAT_RK_UART_BASE + UART_FCR, UARTFCR_FIFOEN);
+	mmio_write_32(PLAT_RK_UART_BASE + UART_MCR, uart_save.uart_mcr);
+}
+
+void save_usbphy(void)
+{
+	store_usbphy0[0] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL0);
+	store_usbphy0[1] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL2);
+	store_usbphy0[2] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL3);
+	store_usbphy0[3] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL12);
+	store_usbphy0[4] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL13);
+	store_usbphy0[5] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL15);
+	store_usbphy0[6] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL16);
+
+	store_usbphy1[0] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL0);
+	store_usbphy1[1] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL2);
+	store_usbphy1[2] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL3);
+	store_usbphy1[3] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL12);
+	store_usbphy1[4] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL13);
+	store_usbphy1[5] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL15);
+	store_usbphy1[6] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL16);
+}
+
+void restore_usbphy(void)
+{
+	mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL0,
+		      REG_SOC_WMSK | store_usbphy0[0]);
+	mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL2,
+		      REG_SOC_WMSK | store_usbphy0[1]);
+	mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL3,
+		      REG_SOC_WMSK | store_usbphy0[2]);
+	mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL12,
+		      REG_SOC_WMSK | store_usbphy0[3]);
+	mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL13,
+		      REG_SOC_WMSK | store_usbphy0[4]);
+	mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL15,
+		      REG_SOC_WMSK | store_usbphy0[5]);
+	mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL16,
+		      REG_SOC_WMSK | store_usbphy0[6]);
+
+	mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL0,
+		      REG_SOC_WMSK | store_usbphy1[0]);
+	mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL2,
+		      REG_SOC_WMSK | store_usbphy1[1]);
+	mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL3,
+		      REG_SOC_WMSK | store_usbphy1[2]);
+	mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL12,
+		      REG_SOC_WMSK | store_usbphy1[3]);
+	mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL13,
+		      REG_SOC_WMSK | store_usbphy1[4]);
+	mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL15,
+		      REG_SOC_WMSK | store_usbphy1[5]);
+	mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL16,
+		      REG_SOC_WMSK | store_usbphy1[6]);
+}
+
+void grf_register_save(void)
+{
+	int i;
+
+	store_grf_soc_con0 = mmio_read_32(GRF_BASE + GRF_SOC_CON(0));
+	store_grf_soc_con1 = mmio_read_32(GRF_BASE + GRF_SOC_CON(1));
+	store_grf_soc_con2 = mmio_read_32(GRF_BASE + GRF_SOC_CON(2));
+	store_grf_soc_con3 = mmio_read_32(GRF_BASE + GRF_SOC_CON(3));
+	store_grf_soc_con4 = mmio_read_32(GRF_BASE + GRF_SOC_CON(4));
+	store_grf_soc_con7 = mmio_read_32(GRF_BASE + GRF_SOC_CON(7));
+
+	for (i = 0; i < 4; i++)
+		store_grf_ddrc_con[i] =
+			mmio_read_32(GRF_BASE + GRF_DDRC0_CON0 + i * 4);
+
+	store_grf_io_vsel = mmio_read_32(GRF_BASE + GRF_IO_VSEL);
+}
+
+void grf_register_restore(void)
+{
+	int i;
+
+	mmio_write_32(GRF_BASE + GRF_SOC_CON(0),
+		      REG_SOC_WMSK | store_grf_soc_con0);
+	mmio_write_32(GRF_BASE + GRF_SOC_CON(1),
+		      REG_SOC_WMSK | store_grf_soc_con1);
+	mmio_write_32(GRF_BASE + GRF_SOC_CON(2),
+		      REG_SOC_WMSK | store_grf_soc_con2);
+	mmio_write_32(GRF_BASE + GRF_SOC_CON(3),
+		      REG_SOC_WMSK | store_grf_soc_con3);
+	mmio_write_32(GRF_BASE + GRF_SOC_CON(4),
+		      REG_SOC_WMSK | store_grf_soc_con4);
+	mmio_write_32(GRF_BASE + GRF_SOC_CON(7),
+		      REG_SOC_WMSK | store_grf_soc_con7);
+
+	for (i = 0; i < 4; i++)
+		mmio_write_32(GRF_BASE + GRF_DDRC0_CON0 + i * 4,
+			      REG_SOC_WMSK | store_grf_ddrc_con[i]);
+
+	mmio_write_32(GRF_BASE + GRF_IO_VSEL, REG_SOC_WMSK | store_grf_io_vsel);
+}
+
+void cru_register_save(void)
+{
+	int i;
+
+	for (i = 0; i <= CRU_SDIO0_CON1; i = i + 4)
+		store_cru[i / 4] = mmio_read_32(CRU_BASE + i);
+}
+
+void cru_register_restore(void)
+{
+	int i;
+
+	for (i = 0; i <= CRU_SDIO0_CON1; i = i + 4) {
+
+		/*
+		 * since DPLL, CRU_CLKSEL_CON6 have been restore in
+		 * dmc_resume, ABPLL will resote later, so skip them
+		 */
+		if ((i == CRU_CLKSEL_CON6) ||
+		    (i >= CRU_PLL_CON(ABPLL_ID, 0) &&
+		     i <= CRU_PLL_CON(DPLL_ID, 5)))
+			continue;
+
+		if ((i == CRU_PLL_CON(ALPLL_ID, 2)) ||
+		    (i == CRU_PLL_CON(CPLL_ID, 2)) ||
+		    (i == CRU_PLL_CON(GPLL_ID, 2)) ||
+		    (i == CRU_PLL_CON(NPLL_ID, 2)) ||
+		    (i == CRU_PLL_CON(VPLL_ID, 2)))
+			mmio_write_32(CRU_BASE + i, store_cru[i / 4]);
+		/*
+		 * CRU_GLB_CNT_TH and CRU_CLKSEL_CON97~CRU_CLKSEL_CON107
+		 * not need do high 16bit mask
+		 */
+		else if ((i > 0x27c && i < 0x2b0) || (i == 0x508))
+			mmio_write_32(CRU_BASE + i, store_cru[i / 4]);
+		else
+			mmio_write_32(CRU_BASE + i,
+				      REG_SOC_WMSK | store_cru[i / 4]);
+	}
+}
+
+void wdt_register_save(void)
+{
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		store_wdt0[i] = mmio_read_32(WDT0_BASE + i * 4);
+		store_wdt1[i] = mmio_read_32(WDT1_BASE + i * 4);
+	}
+}
+
+void wdt_register_restore(void)
+{
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		mmio_write_32(WDT0_BASE + i * 4, store_wdt0[i]);
+		mmio_write_32(WDT1_BASE + i * 4, store_wdt1[i]);
+	}
+}
+
+int rockchip_soc_sys_pwr_dm_suspend(void)
+{
+	uint32_t wait_cnt = 0;
+	uint32_t status = 0;
+
+	ddr_prepare_for_sys_suspend();
+	dmc_suspend();
+	pmu_scu_b_pwrdn();
+
+	/* need to save usbphy before shutdown PERIHP PD */
+	save_usbphy();
+
+	pmu_power_domains_suspend();
+	set_hw_idle(BIT(PMU_CLR_CENTER1) |
+		    BIT(PMU_CLR_ALIVE) |
+		    BIT(PMU_CLR_MSCH0) |
+		    BIT(PMU_CLR_MSCH1) |
+		    BIT(PMU_CLR_CCIM0) |
+		    BIT(PMU_CLR_CCIM1) |
+		    BIT(PMU_CLR_CENTER) |
+		    BIT(PMU_CLR_PERILP) |
+		    BIT(PMU_CLR_PERILPM0) |
+		    BIT(PMU_CLR_GIC));
+	set_pmu_rsthold();
+	sys_slp_config();
+
+	m0_configure_suspend();
+	m0_start();
+
+	pmu_sgrf_rst_hld();
+
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
+		      ((uintptr_t)&pmu_cpuson_entrypoint >>
+			CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK);
+
+	mmio_write_32(PMU_BASE + PMU_ADB400_CON,
+		      BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) |
+		      BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_SW) |
+		      BIT_WITH_WMSK(PMU_PWRDWN_REQ_GIC2_CORE_B_SW));
+	dsb();
+	status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) |
+		BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) |
+		BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST);
+	while ((mmio_read_32(PMU_BASE +
+	       PMU_ADB400_ST) & status) != status) {
+		wait_cnt++;
+		if (wait_cnt >= MAX_WAIT_COUNT) {
+			ERROR("%s:wait cluster-b l2(%x)\n", __func__,
+			      mmio_read_32(PMU_BASE + PMU_ADB400_ST));
+			panic();
+		}
+	}
+	mmio_setbits_32(PMU_BASE + PMU_PWRDN_CON, BIT(PMU_SCU_B_PWRDWN_EN));
+
+	secure_watchdog_disable();
+
+	/*
+	 * Disabling PLLs/PWM/DVFS is approaching WFI which is
+	 * the last steps in suspend.
+	 */
+	disable_dvfs_plls();
+	disable_pwms();
+	disable_nodvfs_plls();
+
+	suspend_apio();
+	suspend_gpio();
+	suspend_uart();
+	grf_register_save();
+	cru_register_save();
+	wdt_register_save();
+	sram_save();
+	plat_rockchip_save_gpio();
+
+	return 0;
+}
+
+int rockchip_soc_sys_pwr_dm_resume(void)
+{
+	uint32_t wait_cnt = 0;
+	uint32_t status = 0;
+
+	plat_rockchip_restore_gpio();
+	wdt_register_restore();
+	cru_register_restore();
+	grf_register_restore();
+	resume_uart();
+	resume_apio();
+	resume_gpio();
+	enable_nodvfs_plls();
+	enable_pwms();
+	/* PWM regulators take time to come up; give 300us to be safe. */
+	udelay(300);
+	enable_dvfs_plls();
+
+	secure_watchdog_enable();
+	secure_sgrf_init();
+	secure_sgrf_ddr_rgn_init();
+
+	/* restore clk_ddrc_bpll_src_en gate */
+	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3),
+		      BITS_WITH_WMASK(clk_ddrc_save, 0xff, 0));
+
+	/*
+	 * The wakeup status is not cleared by itself, we need to clear it
+	 * manually. Otherwise we will alway query some interrupt next time.
+	 *
+	 * NOTE: If the kernel needs to query this, we might want to stash it
+	 * somewhere.
+	 */
+	mmio_write_32(PMU_BASE + PMU_WAKEUP_STATUS, 0xffffffff);
+	mmio_write_32(PMU_BASE + PMU_WKUP_CFG4, 0x00);
+
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
+		      (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
+		      CPU_BOOT_ADDR_WMASK);
+
+	mmio_write_32(PMU_BASE + PMU_CCI500_CON,
+		      WMSK_BIT(PMU_CLR_PREQ_CCI500_HW) |
+		      WMSK_BIT(PMU_CLR_QREQ_CCI500_HW) |
+		      WMSK_BIT(PMU_QGATING_CCI500_CFG));
+	dsb();
+	mmio_clrbits_32(PMU_BASE + PMU_PWRDN_CON,
+			BIT(PMU_SCU_B_PWRDWN_EN));
+
+	mmio_write_32(PMU_BASE + PMU_ADB400_CON,
+		      WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) |
+		      WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_SW) |
+		      WMSK_BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW) |
+		      WMSK_BIT(PMU_CLR_CORE_L_HW) |
+		      WMSK_BIT(PMU_CLR_CORE_L_2GIC_HW) |
+		      WMSK_BIT(PMU_CLR_GIC2_CORE_L_HW));
+
+	status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) |
+		BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) |
+		BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST);
+
+	while ((mmio_read_32(PMU_BASE +
+	   PMU_ADB400_ST) & status)) {
+		wait_cnt++;
+		if (wait_cnt >= MAX_WAIT_COUNT) {
+			ERROR("%s:wait cluster-b l2(%x)\n", __func__,
+			      mmio_read_32(PMU_BASE + PMU_ADB400_ST));
+			panic();
+		}
+	}
+
+	pmu_sgrf_rst_hld_release();
+	pmu_scu_b_pwrup();
+	pmu_power_domains_resume();
+
+	restore_abpll();
+	restore_pmu_rsthold();
+	clr_hw_idle(BIT(PMU_CLR_CENTER1) |
+				BIT(PMU_CLR_ALIVE) |
+				BIT(PMU_CLR_MSCH0) |
+				BIT(PMU_CLR_MSCH1) |
+				BIT(PMU_CLR_CCIM0) |
+				BIT(PMU_CLR_CCIM1) |
+				BIT(PMU_CLR_CENTER) |
+				BIT(PMU_CLR_PERILP) |
+				BIT(PMU_CLR_PERILPM0) |
+				BIT(PMU_CLR_GIC));
+
+	plat_rockchip_gic_cpuif_enable();
+	m0_stop();
+
+	restore_usbphy();
+
+	ddr_prepare_for_sys_resume();
+
+	return 0;
+}
+
+void __dead2 rockchip_soc_soft_reset(void)
+{
+	struct gpio_info *rst_gpio;
+
+	rst_gpio = plat_get_rockchip_gpio_reset();
+
+	if (rst_gpio) {
+		gpio_set_direction(rst_gpio->index, GPIO_DIR_OUT);
+		gpio_set_value(rst_gpio->index, rst_gpio->polarity);
+	} else {
+		soc_global_soft_reset();
+	}
+
+	while (1)
+		;
+}
+
+void __dead2 rockchip_soc_system_off(void)
+{
+	struct gpio_info *poweroff_gpio;
+
+	poweroff_gpio = plat_get_rockchip_gpio_poweroff();
+
+	if (poweroff_gpio) {
+		/*
+		 * if use tsadc over temp pin(GPIO1A6) as shutdown gpio,
+		 * need to set this pin iomux back to gpio function
+		 */
+		if (poweroff_gpio->index == TSADC_INT_PIN) {
+			mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1A_IOMUX,
+				      GPIO1A6_IOMUX);
+		}
+		gpio_set_direction(poweroff_gpio->index, GPIO_DIR_OUT);
+		gpio_set_value(poweroff_gpio->index, poweroff_gpio->polarity);
+	} else {
+		WARN("Do nothing when system off\n");
+	}
+
+	while (1)
+		;
+}
+
+void rockchip_plat_mmu_el3(void)
+{
+	size_t sram_size;
+
+	/* sram.text size */
+	sram_size = (char *)&__bl31_sram_text_end -
+		    (char *)&__bl31_sram_text_start;
+	mmap_add_region((unsigned long)&__bl31_sram_text_start,
+			(unsigned long)&__bl31_sram_text_start,
+			sram_size, MT_MEMORY | MT_RO | MT_SECURE);
+
+	/* sram.data size */
+	sram_size = (char *)&__bl31_sram_data_end -
+		    (char *)&__bl31_sram_data_start;
+	mmap_add_region((unsigned long)&__bl31_sram_data_start,
+			(unsigned long)&__bl31_sram_data_start,
+			sram_size, MT_MEMORY | MT_RW | MT_SECURE);
+
+	sram_size = (char *)&__bl31_sram_stack_end -
+		    (char *)&__bl31_sram_stack_start;
+	mmap_add_region((unsigned long)&__bl31_sram_stack_start,
+			(unsigned long)&__bl31_sram_stack_start,
+			sram_size, MT_MEMORY | MT_RW | MT_SECURE);
+
+	sram_size = (char *)&__sram_incbin_end - (char *)&__sram_incbin_start;
+	mmap_add_region((unsigned long)&__sram_incbin_start,
+			(unsigned long)&__sram_incbin_start,
+			sram_size, MT_NON_CACHEABLE | MT_RW | MT_SECURE);
+}
+
+void plat_rockchip_pmu_init(void)
+{
+	uint32_t cpu;
+
+	rockchip_pd_lock_init();
+
+	/* register requires 32bits mode, switch it to 32 bits */
+	cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
+
+	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
+		cpuson_flags[cpu] = 0;
+
+	for (cpu = 0; cpu < PLATFORM_CLUSTER_COUNT; cpu++)
+		clst_warmboot_data[cpu] = 0;
+
+	/* config cpu's warm boot address */
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
+		      (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
+		      CPU_BOOT_ADDR_WMASK);
+	mmio_write_32(PMU_BASE + PMU_NOC_AUTO_ENA, NOC_AUTO_ENABLE);
+
+	/*
+	 * Enable Schmitt trigger for better 32 kHz input signal, which is
+	 * important for suspend/resume reliability among other things.
+	 */
+	mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_SMT, GPIO0A0_SMT_ENABLE);
+
+	init_pmu_counts();
+
+	nonboot_cpus_off();
+
+	INFO("%s(%d): pd status %x\n", __func__, __LINE__,
+	     mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
+}
diff --git a/plat/rockchip/rk3399/drivers/pmu/pmu.h b/plat/rockchip/rk3399/drivers/pmu/pmu.h
new file mode 100644
index 0000000..5c0ab4d
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/pmu/pmu.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PMU_H__
+#define __PMU_H__
+
+#include <pmu_bits.h>
+#include <pmu_regs.h>
+#include <soc.h>
+
+/* Allocate sp reginon in pmusram */
+#define PSRAM_SP_SIZE		0x80
+#define PSRAM_SP_BOTTOM		(PSRAM_SP_TOP - PSRAM_SP_SIZE)
+
+/*****************************************************************************
+ * Common define for per soc pmu.h
+ *****************************************************************************/
+/* The ways of cores power domain contorlling */
+enum cores_pm_ctr_mode {
+	core_pwr_pd = 0,
+	core_pwr_wfi = 1,
+	core_pwr_wfi_int = 2
+};
+
+/*****************************************************************************
+ * pmu con,reg
+ *****************************************************************************/
+#define PMU_WKUP_CFG(n)	((n) * 4)
+
+#define PMU_CORE_PM_CON(cpu)		(0xc0 + (cpu * 4))
+
+/* the shift of bits for cores status */
+enum pmu_core_pwrst_shift {
+	clstl_cpu_wfe = 2,
+	clstl_cpu_wfi = 6,
+	clstb_cpu_wfe = 12,
+	clstb_cpu_wfi = 16
+};
+
+#define CKECK_WFE_MSK		0x1
+#define CKECK_WFI_MSK		0x10
+#define CKECK_WFEI_MSK		0x11
+
+/* Specific features required  */
+#define AP_PWROFF		0x0a
+
+#define GPIO0A0_SMT_ENABLE	BITS_WITH_WMASK(1, 3, 0)
+#define GPIO1A6_IOMUX		BITS_WITH_WMASK(0, 3, 12)
+
+#define TSADC_INT_PIN		38
+#define CORES_PM_DISABLE	0x0
+
+#define PD_CTR_LOOP		500
+#define CHK_CPU_LOOP		500
+#define MAX_WAIT_COUNT		1000
+
+#define	GRF_SOC_CON4		0x0e210
+
+#define PMUGRF_GPIO0A_SMT	0x0120
+#define PMUGRF_SOC_CON0		0x0180
+
+#define CCI_FORCE_WAKEUP	WMSK_BIT(8)
+#define EXTERNAL_32K		WMSK_BIT(0)
+
+#define PLL_PD_HW		0xff
+#define IOMUX_CLK_32K		0x00030002
+#define NOC_AUTO_ENABLE		0x3fffffff
+
+#define SAVE_QOS(array, NAME) \
+	RK3399_CPU_AXI_SAVE_QOS(array, CPU_AXI_##NAME##_QOS_BASE)
+#define RESTORE_QOS(array, NAME) \
+	RK3399_CPU_AXI_RESTORE_QOS(array, CPU_AXI_##NAME##_QOS_BASE)
+
+#define RK3399_CPU_AXI_SAVE_QOS(array, base) do { \
+	array[0] = mmio_read_32(base + CPU_AXI_QOS_ID_COREID); \
+	array[1] = mmio_read_32(base + CPU_AXI_QOS_REVISIONID); \
+	array[2] = mmio_read_32(base + CPU_AXI_QOS_PRIORITY); \
+	array[3] = mmio_read_32(base + CPU_AXI_QOS_MODE); \
+	array[4] = mmio_read_32(base + CPU_AXI_QOS_BANDWIDTH); \
+	array[5] = mmio_read_32(base + CPU_AXI_QOS_SATURATION); \
+	array[6] = mmio_read_32(base + CPU_AXI_QOS_EXTCONTROL); \
+} while (0)
+
+#define RK3399_CPU_AXI_RESTORE_QOS(array, base) do { \
+	mmio_write_32(base + CPU_AXI_QOS_ID_COREID, array[0]); \
+	mmio_write_32(base + CPU_AXI_QOS_REVISIONID, array[1]); \
+	mmio_write_32(base + CPU_AXI_QOS_PRIORITY, array[2]); \
+	mmio_write_32(base + CPU_AXI_QOS_MODE, array[3]); \
+	mmio_write_32(base + CPU_AXI_QOS_BANDWIDTH, array[4]); \
+	mmio_write_32(base + CPU_AXI_QOS_SATURATION, array[5]); \
+	mmio_write_32(base + CPU_AXI_QOS_EXTCONTROL, array[6]); \
+} while (0)
+
+struct pmu_slpdata_s {
+	uint32_t cci_m0_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t cci_m1_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t dmac0_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t dmac1_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t dcf_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t crypto0_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t crypto1_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t pmu_cm0_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t peri_cm1_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t gic_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t sdmmc_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t gmac_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t emmc_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t usb_otg0_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t usb_otg1_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t usb_host0_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t usb_host1_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t gpu_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t video_m0_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t video_m1_r_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t video_m1_w_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t rga_r_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t rga_w_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t vop_big_r[CPU_AXI_QOS_NUM_REGS];
+	uint32_t vop_big_w[CPU_AXI_QOS_NUM_REGS];
+	uint32_t vop_little[CPU_AXI_QOS_NUM_REGS];
+	uint32_t iep_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t isp1_m0_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t isp1_m1_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t isp0_m0_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t isp0_m1_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t hdcp_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t perihp_nsp_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t perilp_nsp_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t perilpslv_nsp_qos[CPU_AXI_QOS_NUM_REGS];
+	uint32_t sdio_qos[CPU_AXI_QOS_NUM_REGS];
+};
+
+extern uint32_t clst_warmboot_data[PLATFORM_CLUSTER_COUNT];
+
+extern void sram_func_set_ddrctl_pll(uint32_t pll_src);
+
+#endif /* __PMU_H__ */
diff --git a/plat/rockchip/rk3399/drivers/pmu/pmu_fw.c b/plat/rockchip/rk3399/drivers/pmu/pmu_fw.c
new file mode 100644
index 0000000..d299116
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/pmu/pmu_fw.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* convoluted way to make sure that the define is pasted just the right way */
+#define _INCBIN(file, sym) \
+	__asm__( \
+		".section .sram.incbin\n" \
+		".global " #sym "\n" \
+		".type " #sym ", %object\n" \
+		".align 4\n" \
+		#sym ":\n" \
+		".incbin \"" #file "\"\n" \
+		".size " #sym ", .-" #sym "\n" \
+		".global " #sym "_end\n" \
+		#sym "_end:\n" \
+	)
+
+#define INCBIN(file, sym) _INCBIN(file, sym)
+
+INCBIN(RK3399M0FW, rk3399m0_bin);
diff --git a/plat/rockchip/rk3399/drivers/pwm/pwm.c b/plat/rockchip/rk3399/drivers/pwm/pwm.c
new file mode 100644
index 0000000..11c1565
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/pwm/pwm.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <plat_private.h>
+#include <pmu.h>
+#include <pwm.h>
+#include <soc.h>
+
+#define PWM0_IOMUX_PWM_EN		(1 << 0)
+#define PWM1_IOMUX_PWM_EN		(1 << 1)
+#define PWM2_IOMUX_PWM_EN		(1 << 2)
+#define PWM3_IOMUX_PWM_EN		(1 << 3)
+
+struct pwm_data_s {
+	uint32_t iomux_bitmask;
+	uint32_t enable_bitmask;
+};
+
+static struct pwm_data_s pwm_data;
+
+/*
+ * Disable the PWMs.
+ */
+void disable_pwms(void)
+{
+	uint32_t i, val;
+
+	pwm_data.iomux_bitmask = 0;
+
+	/* Save PWMs pinmux and change PWMs pinmux to GPIOs */
+	val = mmio_read_32(GRF_BASE + GRF_GPIO4C_IOMUX);
+	if (((val >> GRF_GPIO4C2_IOMUX_SHIFT) &
+		GRF_IOMUX_2BIT_MASK) == GRF_GPIO4C2_IOMUX_PWM) {
+		pwm_data.iomux_bitmask |= PWM0_IOMUX_PWM_EN;
+		val = BITS_WITH_WMASK(GRF_IOMUX_GPIO, GRF_IOMUX_2BIT_MASK,
+				    GRF_GPIO4C2_IOMUX_SHIFT);
+		mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, val);
+	}
+
+	val = mmio_read_32(GRF_BASE + GRF_GPIO4C_IOMUX);
+	if (((val >> GRF_GPIO4C6_IOMUX_SHIFT) &
+		GRF_IOMUX_2BIT_MASK) == GRF_GPIO4C6_IOMUX_PWM) {
+		pwm_data.iomux_bitmask |= PWM1_IOMUX_PWM_EN;
+		val = BITS_WITH_WMASK(GRF_IOMUX_GPIO, GRF_IOMUX_2BIT_MASK,
+				    GRF_GPIO4C6_IOMUX_SHIFT);
+		mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, val);
+	}
+
+	val = mmio_read_32(PMUGRF_BASE + PMUGRF_GPIO1C_IOMUX);
+	if (((val >> PMUGRF_GPIO1C3_IOMUX_SHIFT) &
+		GRF_IOMUX_2BIT_MASK) == PMUGRF_GPIO1C3_IOMUX_PWM) {
+		pwm_data.iomux_bitmask |= PWM2_IOMUX_PWM_EN;
+		val = BITS_WITH_WMASK(GRF_IOMUX_GPIO, GRF_IOMUX_2BIT_MASK,
+				    PMUGRF_GPIO1C3_IOMUX_SHIFT);
+		mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1C_IOMUX, val);
+	}
+
+	val = mmio_read_32(PMUGRF_BASE + PMUGRF_GPIO0A_IOMUX);
+	if (((val >> PMUGRF_GPIO0A6_IOMUX_SHIFT) &
+		GRF_IOMUX_2BIT_MASK) == PMUGRF_GPIO0A6_IOMUX_PWM) {
+		pwm_data.iomux_bitmask |= PWM3_IOMUX_PWM_EN;
+		val = BITS_WITH_WMASK(GRF_IOMUX_GPIO, GRF_IOMUX_2BIT_MASK,
+				    PMUGRF_GPIO0A6_IOMUX_SHIFT);
+		mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_IOMUX, val);
+	}
+
+	/* Disable the pwm channel */
+	pwm_data.enable_bitmask = 0;
+	for (i = 0; i < 4; i++) {
+		val = mmio_read_32(PWM_BASE + PWM_CTRL(i));
+		if ((val & PWM_ENABLE) != PWM_ENABLE)
+			continue;
+		pwm_data.enable_bitmask |= (1 << i);
+		mmio_write_32(PWM_BASE + PWM_CTRL(i), val & ~PWM_ENABLE);
+	}
+}
+
+/*
+ * Enable the PWMs.
+ */
+void enable_pwms(void)
+{
+	uint32_t i, val;
+
+	for (i = 0; i < 4; i++) {
+		val = mmio_read_32(PWM_BASE + PWM_CTRL(i));
+		if (!(pwm_data.enable_bitmask & (1 << i)))
+			continue;
+		mmio_write_32(PWM_BASE + PWM_CTRL(i), val | PWM_ENABLE);
+	}
+
+	/* Restore all IOMUXes */
+	if (pwm_data.iomux_bitmask & PWM3_IOMUX_PWM_EN) {
+		val = BITS_WITH_WMASK(PMUGRF_GPIO0A6_IOMUX_PWM,
+				    GRF_IOMUX_2BIT_MASK,
+				    PMUGRF_GPIO0A6_IOMUX_SHIFT);
+		mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_IOMUX, val);
+	}
+
+	if (pwm_data.iomux_bitmask & PWM2_IOMUX_PWM_EN) {
+		val = BITS_WITH_WMASK(PMUGRF_GPIO1C3_IOMUX_PWM,
+				    GRF_IOMUX_2BIT_MASK,
+				    PMUGRF_GPIO1C3_IOMUX_SHIFT);
+		mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1C_IOMUX, val);
+	}
+
+	if (pwm_data.iomux_bitmask & PWM1_IOMUX_PWM_EN) {
+		val = BITS_WITH_WMASK(GRF_GPIO4C6_IOMUX_PWM,
+				    GRF_IOMUX_2BIT_MASK,
+				    GRF_GPIO4C6_IOMUX_SHIFT);
+		mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, val);
+	}
+
+	if (pwm_data.iomux_bitmask & PWM0_IOMUX_PWM_EN) {
+		val = BITS_WITH_WMASK(GRF_GPIO4C2_IOMUX_PWM,
+				    GRF_IOMUX_2BIT_MASK,
+				    GRF_GPIO4C2_IOMUX_SHIFT);
+		mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, val);
+	}
+}
diff --git a/plat/rockchip/rk3399/drivers/pwm/pwm.h b/plat/rockchip/rk3399/drivers/pwm/pwm.h
new file mode 100644
index 0000000..57cfd1b
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/pwm/pwm.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PWM_H__
+#define __PWM_H__
+
+void disable_pwms(void);
+void enable_pwms(void);
+
+#endif
diff --git a/plat/rockchip/rk3399/drivers/secure/secure.c b/plat/rockchip/rk3399/drivers/secure/secure.c
new file mode 100644
index 0000000..589d833
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/secure/secure.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <plat_private.h>
+#include <secure.h>
+#include <soc.h>
+
+static void sgrf_ddr_rgn_global_bypass(uint32_t bypass)
+{
+	if (bypass)
+		/* set bypass (non-secure regions) for whole ddr regions */
+		mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(16),
+			      SGRF_DDR_RGN_BYPS);
+	else
+		/* cancel bypass for whole ddr regions */
+		mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(16),
+			      SGRF_DDR_RGN_NO_BYPS);
+}
+
+/**
+ * There are 8 + 1 regions for DDR secure control:
+ * DDR_RGN_0 ~ DDR_RGN_7: Per DDR_RGNs grain size is 1MB
+ * DDR_RGN_X - the memories of exclude DDR_RGN_0 ~ DDR_RGN_7
+ *
+ * DDR_RGN_0 - start address of the RGN0
+ * DDR_RGN_8 - end address of the RGN0
+ * DDR_RGN_1 - start address of the RGN1
+ * DDR_RGN_9 - end address of the RGN1
+ * ...
+ * DDR_RGN_7 - start address of the RGN7
+ * DDR_RGN_15 - end address of the RGN7
+ * DDR_RGN_16 - bit 0 ~ 7 is bitmap for RGN0~7 secure,0: disable, 1: enable
+ *              bit 8 is setting for RGNx, the rest of the memory and region
+ *                which excludes RGN0~7, 0: disable, 1: enable
+ *              bit 9, the global secure configuration via bypass, 0: disable
+ *                bypass, 1: enable bypass
+ *
+ * @rgn - the DDR regions 0 ~ 7 which are can be configured.
+ * The @st_mb and @ed_mb indicate the start and end addresses for which to set
+ * the security, and the unit is megabyte. When the st_mb == 0, ed_mb == 0, the
+ * address range 0x0 ~ 0xfffff is secure.
+ *
+ * For example, if we would like to set the range [0, 32MB) is security via
+ * DDR_RGN0, then rgn == 0, st_mb == 0, ed_mb == 31.
+ */
+static void sgrf_ddr_rgn_config(uint32_t rgn,
+				uintptr_t st, uintptr_t ed)
+{
+	uintptr_t st_mb, ed_mb;
+
+	assert(rgn <= 7);
+	assert(st < ed);
+
+	/* check aligned 1MB */
+	assert(st % SIZE_M(1) == 0);
+	assert(ed % SIZE_M(1) == 0);
+
+	st_mb = st / SIZE_M(1);
+	ed_mb = ed / SIZE_M(1);
+
+	/* set ddr region addr start */
+	mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(rgn),
+		      BITS_WITH_WMASK(st_mb, SGRF_DDR_RGN_0_16_WMSK, 0));
+
+	/* set ddr region addr end */
+	mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(rgn + 8),
+		      BITS_WITH_WMASK((ed_mb - 1), SGRF_DDR_RGN_0_16_WMSK, 0));
+
+	mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(16),
+		      BIT_WITH_WMSK(rgn));
+}
+
+void secure_watchdog_disable(void)
+{
+	/**
+	 * Disable CA53 and CM0 wdt pclk
+	 * BIT[8]: ca53 wdt pclk, 0: enable 1: disable
+	 * BIT[10]: cm0 wdt pclk, 0: enable 1: disable
+	 */
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(3),
+		      BIT_WITH_WMSK(PCLK_WDT_CA53_GATE_SHIFT) |
+		      BIT_WITH_WMSK(PCLK_WDT_CM0_GATE_SHIFT));
+}
+
+void secure_watchdog_enable(void)
+{
+	/**
+	 * Enable CA53 and CM0 wdt pclk
+	 * BIT[8]: ca53 wdt pclk, 0: enable 1: disable
+	 * BIT[10]: cm0 wdt pclk, 0: enable 1: disable
+	 */
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(3),
+		      WMSK_BIT(PCLK_WDT_CA53_GATE_SHIFT) |
+		      WMSK_BIT(PCLK_WDT_CM0_GATE_SHIFT));
+}
+
+__pmusramfunc void sram_secure_timer_init(void)
+{
+	mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_END_COUNT0, 0xffffffff);
+	mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_END_COUNT1, 0xffffffff);
+
+	mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_INIT_COUNT0, 0x0);
+	mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_INIT_COUNT0, 0x0);
+
+	/* auto reload & enable the timer */
+	mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_CONTROL_REG,
+		      TIMER_EN | TIMER_FMODE);
+}
+
+void secure_timer_init(void)
+{
+	mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_END_COUNT0, 0xffffffff);
+	mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_END_COUNT1, 0xffffffff);
+
+	mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_INIT_COUNT0, 0x0);
+	mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_INIT_COUNT0, 0x0);
+
+	/* auto reload & enable the timer */
+	mmio_write_32(STIMER1_CHN_BASE(5) + TIMER_CONTROL_REG,
+		      TIMER_EN | TIMER_FMODE);
+}
+
+void secure_sgrf_init(void)
+{
+	/* security config for master */
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(5),
+		      REG_SOC_WMSK | SGRF_SOC_ALLMST_NS);
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(6),
+		      REG_SOC_WMSK | SGRF_SOC_ALLMST_NS);
+	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(7),
+		      REG_SOC_WMSK | SGRF_SOC_ALLMST_NS);
+
+	/* security config for slave */
+	mmio_write_32(SGRF_BASE + SGRF_PMU_SLV_CON0_1(0),
+		      SGRF_PMU_SLV_S_CFGED |
+		      SGRF_PMU_SLV_CRYPTO1_NS);
+	mmio_write_32(SGRF_BASE + SGRF_PMU_SLV_CON0_1(1),
+		      SGRF_SLV_S_WMSK | SGRF_PMUSRAM_S);
+	mmio_write_32(SGRF_BASE + SGRF_SLV_SECURE_CON0_4(0),
+		      SGRF_SLV_S_WMSK | SGRF_SLV_S_ALL_NS);
+	mmio_write_32(SGRF_BASE + SGRF_SLV_SECURE_CON0_4(1),
+		      SGRF_SLV_S_WMSK | SGRF_SLV_S_ALL_NS);
+	mmio_write_32(SGRF_BASE + SGRF_SLV_SECURE_CON0_4(2),
+		      SGRF_SLV_S_WMSK | SGRF_SLV_S_ALL_NS);
+	mmio_write_32(SGRF_BASE + SGRF_SLV_SECURE_CON0_4(3),
+		      SGRF_SLV_S_WMSK | SGRF_SLV_S_ALL_NS);
+	mmio_write_32(SGRF_BASE + SGRF_SLV_SECURE_CON0_4(4),
+		      SGRF_SLV_S_WMSK | SGRF_INTSRAM_S);
+}
+
+void secure_sgrf_ddr_rgn_init(void)
+{
+	sgrf_ddr_rgn_config(0, TZRAM_BASE, TZRAM_SIZE);
+	sgrf_ddr_rgn_global_bypass(0);
+}
diff --git a/plat/rockchip/rk3399/drivers/secure/secure.h b/plat/rockchip/rk3399/drivers/secure/secure.h
new file mode 100644
index 0000000..334805d
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/secure/secure.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_ROCKCHIP_RK3399_DRIVER_SECURE_H__
+#define __PLAT_ROCKCHIP_RK3399_DRIVER_SECURE_H__
+
+/**************************************************
+ * sgrf reg, offset
+ **************************************************/
+#define SGRF_SOC_CON0_1(n)		(0xc000 + (n) * 4)
+#define SGRF_SOC_CON3_7(n)		(0xe00c + ((n) - 3) * 4)
+#define SGRF_SOC_CON8_15(n)		(0x8020 + ((n) - 8) * 4)
+#define SGRF_SOC_CON(n) 		(n < 3 ? SGRF_SOC_CON0_1(n) :\
+						(n < 8 ? SGRF_SOC_CON3_7(n) :\
+							 SGRF_SOC_CON8_15(n)))
+
+#define SGRF_PMU_SLV_CON0_1(n)		(0xc240 + ((n) - 0) * 4)
+#define SGRF_SLV_SECURE_CON0_4(n)	(0xe3c0 + ((n) - 0) * 4)
+#define SGRF_DDRRGN_CON0_16(n)		((n) * 4)
+#define SGRF_DDRRGN_CON20_34(n)		(0x50 + ((n) - 20) * 4)
+
+/* All of master in ns */
+#define SGRF_SOC_ALLMST_NS		0xffff
+
+/* security config for slave */
+#define SGRF_SLV_S_WMSK			0xffff0000
+#define SGRF_SLV_S_ALL_NS		0x0
+
+/* security config pmu slave ip */
+/* All of slaves  is ns */
+#define SGRF_PMU_SLV_S_NS		BIT_WITH_WMSK(0)
+/* slaves secure attr is configed */
+#define SGRF_PMU_SLV_S_CFGED		WMSK_BIT(0)
+#define SGRF_PMU_SLV_CRYPTO1_NS		WMSK_BIT(1)
+
+#define SGRF_PMUSRAM_S			BIT(8)
+
+#define SGRF_INTSRAM_S			BIT(13)
+
+/* ddr region */
+#define SGRF_DDR_RGN_0_16_WMSK		0x0fff  /* DDR RGN 0~16 size mask */
+
+#define SGRF_DDR_RGN_DPLL_CLK		BIT_WITH_WMSK(15) /* DDR PLL output clock */
+#define SGRF_DDR_RGN_RTC_CLK		BIT_WITH_WMSK(14) /* 32K clock for DDR PLL */
+
+/* All security of the DDR RGNs are bypass */
+#define SGRF_DDR_RGN_BYPS		BIT_WITH_WMSK(9)
+/* All security of the DDR RGNs are not bypass */
+#define SGRF_DDR_RGN_NO_BYPS		WMSK_BIT(9)
+
+/* The MST access the ddr rgn n with secure attribution */
+#define SGRF_L_MST_S_DDR_RGN(n)		BIT_WITH_WMSK((n))
+/* bits[16:8]*/
+#define SGRF_H_MST_S_DDR_RGN(n)		BIT_WITH_WMSK((n) + 8)
+
+#define SGRF_PMU_CON0			0x0c100
+#define SGRF_PMU_CON(n)   		(SGRF_PMU_CON0 + (n) * 4)
+
+/**************************************************
+ * secure timer
+ **************************************************/
+/* chanal0~5 */
+#define STIMER0_CHN_BASE(n)		(STIME_BASE + 0x20 * (n))
+/* chanal6~11 */
+#define STIMER1_CHN_BASE(n)		(STIME_BASE + 0x8000 + 0x20 * (n))
+
+ /* low 32 bits */
+#define TIMER_END_COUNT0		0x00
+ /* high 32 bits */
+#define TIMER_END_COUNT1		0x04
+
+#define TIMER_CURRENT_VALUE0		0x08
+#define TIMER_CURRENT_VALUE1		0x0C
+
+ /* low 32 bits */
+#define TIMER_INIT_COUNT0		0x10
+ /* high 32 bits */
+#define TIMER_INIT_COUNT1		0x14
+
+#define TIMER_INTSTATUS			0x18
+#define TIMER_CONTROL_REG		0x1c
+
+#define TIMER_EN			0x1
+
+#define TIMER_FMODE			(0x0 << 1)
+#define TIMER_RMODE			(0x1 << 1)
+
+/**************************************************
+ * secure WDT
+ **************************************************/
+#define PCLK_WDT_CA53_GATE_SHIFT	8
+#define PCLK_WDT_CM0_GATE_SHIFT		10
+
+/* export secure operating APIs */
+void secure_watchdog_disable(void);
+void secure_watchdog_enable(void);
+void secure_timer_init(void);
+void secure_sgrf_init(void);
+void secure_sgrf_ddr_rgn_init(void);
+__pmusramfunc void sram_secure_timer_init(void);
+
+#endif /* __PLAT_ROCKCHIP_RK3399_DRIVER_SECURE_H__ */
diff --git a/plat/rockchip/rk3399/drivers/soc/soc.c b/plat/rockchip/rk3399/drivers/soc/soc.c
new file mode 100644
index 0000000..7dd0b72
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/soc/soc.c
@@ -0,0 +1,333 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <dfs.h>
+#include <dram.h>
+#include <m0_ctl.h>
+#include <mmio.h>
+#include <plat_private.h>
+#include <platform_def.h>
+#include <rk3399_def.h>
+#include <secure.h>
+#include <soc.h>
+
+/* Table of regions to map using the MMU.  */
+const mmap_region_t plat_rk_mmap[] = {
+	MAP_REGION_FLAT(DEV_RNG0_BASE, DEV_RNG0_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+	MAP_REGION_FLAT(PMUSRAM_BASE, PMUSRAM_SIZE,
+			MT_MEMORY | MT_RW | MT_SECURE),
+
+	{ 0 }
+};
+
+/* The RockChip power domain tree descriptor */
+const unsigned char rockchip_power_domain_tree_desc[] = {
+	/* No of root nodes */
+	PLATFORM_SYSTEM_COUNT,
+	/* No of children for the root node */
+	PLATFORM_CLUSTER_COUNT,
+	/* No of children for the first cluster node */
+	PLATFORM_CLUSTER0_CORE_COUNT,
+	/* No of children for the second cluster node */
+	PLATFORM_CLUSTER1_CORE_COUNT
+};
+
+/* sleep data for pll suspend */
+static struct deepsleep_data_s slp_data;
+
+static void set_pll_slow_mode(uint32_t pll_id)
+{
+	if (pll_id == PPLL_ID)
+		mmio_write_32(PMUCRU_BASE + PMUCRU_PPLL_CON(3), PLL_SLOW_MODE);
+	else
+		mmio_write_32((CRU_BASE +
+			      CRU_PLL_CON(pll_id, 3)), PLL_SLOW_MODE);
+}
+
+static void set_pll_normal_mode(uint32_t pll_id)
+{
+	if (pll_id == PPLL_ID)
+		mmio_write_32(PMUCRU_BASE + PMUCRU_PPLL_CON(3), PLL_NOMAL_MODE);
+	else
+		mmio_write_32(CRU_BASE +
+			      CRU_PLL_CON(pll_id, 3), PLL_NOMAL_MODE);
+}
+
+static void set_pll_bypass(uint32_t pll_id)
+{
+	if (pll_id == PPLL_ID)
+		mmio_write_32(PMUCRU_BASE +
+			      PMUCRU_PPLL_CON(3), PLL_BYPASS_MODE);
+	else
+		mmio_write_32(CRU_BASE +
+			      CRU_PLL_CON(pll_id, 3), PLL_BYPASS_MODE);
+}
+
+static void _pll_suspend(uint32_t pll_id)
+{
+	set_pll_slow_mode(pll_id);
+	set_pll_bypass(pll_id);
+}
+
+/**
+ * disable_dvfs_plls - To suspend the specific PLLs
+ *
+ * When we close the center logic, the DPLL will be closed,
+ * so we need to keep the ABPLL and switch to it to supply
+ * clock for DDR during suspend, then we should not close
+ * the ABPLL and exclude ABPLL_ID.
+ */
+void disable_dvfs_plls(void)
+{
+	_pll_suspend(CPLL_ID);
+	_pll_suspend(NPLL_ID);
+	_pll_suspend(VPLL_ID);
+	_pll_suspend(GPLL_ID);
+	_pll_suspend(ALPLL_ID);
+}
+
+/**
+ * disable_nodvfs_plls - To suspend the PPLL
+ */
+void disable_nodvfs_plls(void)
+{
+	_pll_suspend(PPLL_ID);
+}
+
+/**
+ * restore_pll - Copy PLL settings from memory to a PLL.
+ *
+ * This will copy PLL settings from an array in memory to the memory mapped
+ * registers for a PLL.
+ *
+ * Note that: above the PLL exclude PPLL.
+ *
+ * pll_id: One of the values from enum plls_id
+ * src: Pointer to the array of values to restore from
+ */
+static void restore_pll(int pll_id, uint32_t *src)
+{
+	/* Nice to have PLL off while configuring */
+	mmio_write_32((CRU_BASE + CRU_PLL_CON(pll_id, 3)), PLL_SLOW_MODE);
+
+	mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 0), src[0] | REG_SOC_WMSK);
+	mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 1), src[1] | REG_SOC_WMSK);
+	mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 2), src[2]);
+	mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 4), src[4] | REG_SOC_WMSK);
+	mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 5), src[5] | REG_SOC_WMSK);
+
+	/* Do PLL_CON3 since that will enable things */
+	mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), src[3] | REG_SOC_WMSK);
+
+	/* Wait for PLL lock done */
+	while ((mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 2)) &
+		0x80000000) == 0x0)
+		;
+}
+
+/**
+ * save_pll - Copy PLL settings a PLL to memory
+ *
+ * This will copy PLL settings from the memory mapped registers for a PLL to
+ * an array in memory.
+ *
+ * Note that: above the PLL exclude PPLL.
+ *
+ * pll_id: One of the values from enum plls_id
+ * src: Pointer to the array of values to save to.
+ */
+static void save_pll(uint32_t *dst, int pll_id)
+{
+	int i;
+
+	for (i = 0; i < PLL_CON_COUNT; i++)
+		dst[i] = mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, i));
+}
+
+/**
+ * prepare_abpll_for_ddrctrl - Copy DPLL settings to ABPLL
+ *
+ * This will copy DPLL settings from the memory mapped registers for a PLL to
+ * an array in memory.
+ */
+void prepare_abpll_for_ddrctrl(void)
+{
+	save_pll(slp_data.plls_con[ABPLL_ID], ABPLL_ID);
+	save_pll(slp_data.plls_con[DPLL_ID], DPLL_ID);
+
+	restore_pll(ABPLL_ID, slp_data.plls_con[DPLL_ID]);
+}
+
+void restore_abpll(void)
+{
+	restore_pll(ABPLL_ID, slp_data.plls_con[ABPLL_ID]);
+}
+
+void clk_gate_con_save(void)
+{
+	uint32_t i = 0;
+
+	for (i = 0; i < PMUCRU_GATE_COUNT; i++)
+		slp_data.pmucru_gate_con[i] =
+			mmio_read_32(PMUCRU_BASE + PMUCRU_GATE_CON(i));
+
+	for (i = 0; i < CRU_GATE_COUNT; i++)
+		slp_data.cru_gate_con[i] =
+			mmio_read_32(CRU_BASE + CRU_GATE_CON(i));
+}
+
+void clk_gate_con_disable(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < PMUCRU_GATE_COUNT; i++)
+		mmio_write_32(PMUCRU_BASE + PMUCRU_GATE_CON(i), REG_SOC_WMSK);
+
+	for (i = 0; i < CRU_GATE_COUNT; i++)
+		mmio_write_32(CRU_BASE + CRU_GATE_CON(i), REG_SOC_WMSK);
+}
+
+void clk_gate_con_restore(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < PMUCRU_GATE_COUNT; i++)
+		mmio_write_32(PMUCRU_BASE + PMUCRU_GATE_CON(i),
+			      REG_SOC_WMSK | slp_data.pmucru_gate_con[i]);
+
+	for (i = 0; i < CRU_GATE_COUNT; i++)
+		mmio_write_32(CRU_BASE + CRU_GATE_CON(i),
+			      REG_SOC_WMSK | slp_data.cru_gate_con[i]);
+}
+
+static void set_plls_nobypass(uint32_t pll_id)
+{
+	if (pll_id == PPLL_ID)
+		mmio_write_32(PMUCRU_BASE + PMUCRU_PPLL_CON(3),
+			      PLL_NO_BYPASS_MODE);
+	else
+		mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3),
+			      PLL_NO_BYPASS_MODE);
+}
+
+static void _pll_resume(uint32_t pll_id)
+{
+	set_plls_nobypass(pll_id);
+	set_pll_normal_mode(pll_id);
+}
+
+void set_pmu_rsthold(void)
+{
+	uint32_t rstnhold_cofig0;
+	uint32_t rstnhold_cofig1;
+
+	slp_data.pmucru_rstnhold_con0 = mmio_read_32(PMUCRU_BASE +
+					    PMUCRU_RSTNHOLD_CON0);
+	slp_data.pmucru_rstnhold_con1 = mmio_read_32(PMUCRU_BASE +
+					    PMUCRU_RSTNHOLD_CON1);
+	rstnhold_cofig0 = BIT_WITH_WMSK(PRESETN_NOC_PMU_HOLD) |
+			  BIT_WITH_WMSK(PRESETN_INTMEM_PMU_HOLD) |
+			  BIT_WITH_WMSK(HRESETN_CM0S_PMU_HOLD) |
+			  BIT_WITH_WMSK(HRESETN_CM0S_NOC_PMU_HOLD) |
+			  BIT_WITH_WMSK(DRESETN_CM0S_PMU_HOLD) |
+			  BIT_WITH_WMSK(POESETN_CM0S_PMU_HOLD) |
+			  BIT_WITH_WMSK(PRESETN_TIMER_PMU_0_1_HOLD) |
+			  BIT_WITH_WMSK(RESETN_TIMER_PMU_0_HOLD) |
+			  BIT_WITH_WMSK(RESETN_TIMER_PMU_1_HOLD) |
+			  BIT_WITH_WMSK(PRESETN_UART_M0_PMU_HOLD) |
+			  BIT_WITH_WMSK(RESETN_UART_M0_PMU_HOLD) |
+			  BIT_WITH_WMSK(PRESETN_WDT_PMU_HOLD);
+	rstnhold_cofig1 = BIT_WITH_WMSK(PRESETN_RKPWM_PMU_HOLD) |
+			  BIT_WITH_WMSK(PRESETN_PMUGRF_HOLD) |
+			  BIT_WITH_WMSK(PRESETN_SGRF_HOLD) |
+			  BIT_WITH_WMSK(PRESETN_GPIO0_HOLD) |
+			  BIT_WITH_WMSK(PRESETN_GPIO1_HOLD) |
+			  BIT_WITH_WMSK(PRESETN_CRU_PMU_HOLD) |
+			  BIT_WITH_WMSK(PRESETN_PVTM_PMU_HOLD);
+
+	mmio_write_32(PMUCRU_BASE + PMUCRU_RSTNHOLD_CON0, rstnhold_cofig0);
+	mmio_write_32(PMUCRU_BASE + PMUCRU_RSTNHOLD_CON1, rstnhold_cofig1);
+}
+
+void restore_pmu_rsthold(void)
+{
+	mmio_write_32(PMUCRU_BASE + PMUCRU_RSTNHOLD_CON0,
+		      slp_data.pmucru_rstnhold_con0 | REG_SOC_WMSK);
+	mmio_write_32(PMUCRU_BASE + PMUCRU_RSTNHOLD_CON1,
+		      slp_data.pmucru_rstnhold_con1 | REG_SOC_WMSK);
+}
+
+/**
+ * enable_dvfs_plls - To resume the specific PLLs
+ *
+ * Please see the comment at the disable_dvfs_plls()
+ * we don't suspend the ABPLL, so don't need resume
+ * it too.
+ */
+void enable_dvfs_plls(void)
+{
+	_pll_resume(ALPLL_ID);
+	_pll_resume(GPLL_ID);
+	_pll_resume(VPLL_ID);
+	_pll_resume(NPLL_ID);
+	_pll_resume(CPLL_ID);
+}
+
+/**
+ * enable_nodvfs_plls - To resume the PPLL
+ */
+void enable_nodvfs_plls(void)
+{
+	_pll_resume(PPLL_ID);
+}
+
+void soc_global_soft_reset_init(void)
+{
+	mmio_write_32(PMUCRU_BASE + CRU_PMU_RSTHOLD_CON(1),
+		      CRU_PMU_SGRF_RST_RLS);
+
+	mmio_clrbits_32(CRU_BASE + CRU_GLB_RST_CON,
+			CRU_PMU_WDTRST_MSK | CRU_PMU_FIRST_SFTRST_MSK);
+}
+
+void __dead2 soc_global_soft_reset(void)
+{
+	set_pll_slow_mode(VPLL_ID);
+	set_pll_slow_mode(NPLL_ID);
+	set_pll_slow_mode(GPLL_ID);
+	set_pll_slow_mode(CPLL_ID);
+	set_pll_slow_mode(PPLL_ID);
+	set_pll_slow_mode(ABPLL_ID);
+	set_pll_slow_mode(ALPLL_ID);
+
+	dsb();
+
+	mmio_write_32(CRU_BASE + CRU_GLB_SRST_FST, GLB_SRST_FST_CFG_VAL);
+
+	/*
+	 * Maybe the HW needs some times to reset the system,
+	 * so we do not hope the core to excute valid codes.
+	 */
+	while (1)
+		;
+}
+
+void plat_rockchip_soc_init(void)
+{
+	secure_timer_init();
+	secure_sgrf_init();
+	secure_sgrf_ddr_rgn_init();
+	soc_global_soft_reset_init();
+	plat_rockchip_gpio_init();
+	m0_init();
+	dram_init();
+	dram_dfs_init();
+}
diff --git a/plat/rockchip/rk3399/drivers/soc/soc.h b/plat/rockchip/rk3399/drivers/soc/soc.h
new file mode 100644
index 0000000..6100d95
--- /dev/null
+++ b/plat/rockchip/rk3399/drivers/soc/soc.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SOC_H__
+#define __SOC_H__
+
+#include <utils.h>
+
+#define GLB_SRST_FST_CFG_VAL	0xfdb9
+#define GLB_SRST_SND_CFG_VAL	0xeca8
+
+#define PMUCRU_PPLL_CON(n)		((n) * 4)
+#define CRU_PLL_CON(pll_id, n)	((pll_id)  * 0x20 + (n) * 4)
+#define PLL_MODE_MSK			0x03
+#define PLL_MODE_SHIFT			0x08
+#define PLL_BYPASS_MSK			0x01
+#define PLL_BYPASS_SHIFT		0x01
+#define PLL_PWRDN_MSK			0x01
+#define PLL_PWRDN_SHIFT			0x0
+#define PLL_BYPASS			BIT(1)
+#define PLL_PWRDN			BIT(0)
+
+#define NO_PLL_BYPASS			(0x00)
+#define NO_PLL_PWRDN			(0x00)
+
+#define FBDIV(n)		((0xfff << 16) | n)
+#define POSTDIV2(n)		((0x7 << (12 + 16)) | (n << 12))
+#define POSTDIV1(n)		((0x7 << (8 + 16)) | (n << 8))
+#define REFDIV(n)		((0x3F << 16) | n)
+#define PLL_LOCK(n)		((n >> 31) & 0x1)
+
+#define PLL_SLOW_MODE			BITS_WITH_WMASK(SLOW_MODE,\
+						PLL_MODE_MSK, PLL_MODE_SHIFT)
+
+#define PLL_NOMAL_MODE			BITS_WITH_WMASK(NORMAL_MODE,\
+						PLL_MODE_MSK, PLL_MODE_SHIFT)
+
+#define PLL_BYPASS_MODE			BIT_WITH_WMSK(PLL_BYPASS_SHIFT)
+#define PLL_NO_BYPASS_MODE		WMSK_BIT(PLL_BYPASS_SHIFT)
+
+#define PLL_CON_COUNT			0x06
+#define CRU_CLKSEL_COUNT		108
+#define CRU_CLKSEL_CON(n)		(0x100 + (n) * 4)
+
+#define PMUCRU_CLKSEL_CONUT		0x06
+#define PMUCRU_CLKSEL_OFFSET		0x080
+#define REG_SIZE			0x04
+#define REG_SOC_WMSK			0xffff0000
+#define CLK_GATE_MASK			0x01
+
+#define PMUCRU_GATE_COUNT	0x03
+#define CRU_GATE_COUNT		0x23
+#define PMUCRU_GATE_CON(n)	(0x100 + (n) * 4)
+#define CRU_GATE_CON(n)	(0x300 + (n) * 4)
+
+#define PMUCRU_RSTNHOLD_CON0	0x120
+enum {
+	PRESETN_NOC_PMU_HOLD = 1,
+	PRESETN_INTMEM_PMU_HOLD,
+	HRESETN_CM0S_PMU_HOLD,
+	HRESETN_CM0S_NOC_PMU_HOLD,
+	DRESETN_CM0S_PMU_HOLD,
+	POESETN_CM0S_PMU_HOLD,
+	PRESETN_SPI3_HOLD,
+	RESETN_SPI3_HOLD,
+	PRESETN_TIMER_PMU_0_1_HOLD,
+	RESETN_TIMER_PMU_0_HOLD,
+	RESETN_TIMER_PMU_1_HOLD,
+	PRESETN_UART_M0_PMU_HOLD,
+	RESETN_UART_M0_PMU_HOLD,
+	PRESETN_WDT_PMU_HOLD
+};
+
+#define PMUCRU_RSTNHOLD_CON1	0x124
+enum {
+	PRESETN_I2C0_HOLD,
+	PRESETN_I2C4_HOLD,
+	PRESETN_I2C8_HOLD,
+	PRESETN_MAILBOX_PMU_HOLD,
+	PRESETN_RKPWM_PMU_HOLD,
+	PRESETN_PMUGRF_HOLD,
+	PRESETN_SGRF_HOLD,
+	PRESETN_GPIO0_HOLD,
+	PRESETN_GPIO1_HOLD,
+	PRESETN_CRU_PMU_HOLD,
+	PRESETN_INTR_ARB_HOLD,
+	PRESETN_PVTM_PMU_HOLD,
+	RESETN_I2C0_HOLD,
+	RESETN_I2C4_HOLD,
+	RESETN_I2C8_HOLD
+};
+
+enum plls_id {
+	ALPLL_ID = 0,
+	ABPLL_ID,
+	DPLL_ID,
+	CPLL_ID,
+	GPLL_ID,
+	NPLL_ID,
+	VPLL_ID,
+	PPLL_ID,
+	END_PLL_ID,
+};
+
+#define CLST_L_CPUS_MSK (0xf)
+#define CLST_B_CPUS_MSK (0x3)
+
+enum pll_work_mode {
+	SLOW_MODE = 0x00,
+	NORMAL_MODE = 0x01,
+	DEEP_SLOW_MODE = 0x02,
+};
+
+enum glb_sft_reset {
+	PMU_RST_BY_FIRST_SFT,
+	PMU_RST_BY_SECOND_SFT = BIT(2),
+	PMU_RST_NOT_BY_SFT = BIT(3),
+};
+
+struct pll_div {
+	uint32_t mhz;
+	uint32_t refdiv;
+	uint32_t fbdiv;
+	uint32_t postdiv1;
+	uint32_t postdiv2;
+	uint32_t frac;
+	uint32_t freq;
+};
+
+struct deepsleep_data_s {
+	uint32_t plls_con[END_PLL_ID][PLL_CON_COUNT];
+	uint32_t cru_gate_con[CRU_GATE_COUNT];
+	uint32_t pmucru_gate_con[PMUCRU_GATE_COUNT];
+	uint32_t pmucru_rstnhold_con0;
+	uint32_t pmucru_rstnhold_con1;
+};
+
+/**************************************************
+ * pmugrf reg, offset
+ **************************************************/
+#define PMUGRF_OSREG(n)		(0x300 + (n) * 4)
+
+/**************************************************
+ * DCF reg, offset
+ **************************************************/
+#define DCF_DCF_CTRL		0x0
+#define DCF_DCF_ADDR		0x8
+#define DCF_DCF_ISR		0xc
+#define DCF_DCF_TOSET		0x14
+#define DCF_DCF_TOCMD		0x18
+#define DCF_DCF_CMD_CFG		0x1c
+
+/* DCF_DCF_ISR */
+#define DCF_TIMEOUT		(1 << 2)
+#define DCF_ERR			(1 << 1)
+#define	DCF_DONE		(1 << 0)
+
+/* DCF_DCF_CTRL */
+#define DCF_VOP_HW_EN		(1 << 2)
+#define DCF_STOP		(1 << 1)
+#define DCF_START		(1 << 0)
+
+#define CYCL_24M_CNT_US(us)	(24 * us)
+#define CYCL_24M_CNT_MS(ms)	(ms * CYCL_24M_CNT_US(1000))
+#define CYCL_32K_CNT_MS(ms)	(ms * 32)
+
+/**************************************************
+ * cru reg, offset
+ **************************************************/
+#define CRU_SOFTRST_CON(n)	(0x400 + (n) * 4)
+
+#define CRU_DMAC0_RST		BIT_WITH_WMSK(3)
+ /* reset release*/
+#define CRU_DMAC0_RST_RLS	WMSK_BIT(3)
+
+#define CRU_DMAC1_RST		BIT_WITH_WMSK(4)
+ /* reset release*/
+#define CRU_DMAC1_RST_RLS	WMSK_BIT(4)
+
+#define CRU_GLB_RST_CON		0x0510
+#define CRU_GLB_SRST_FST	0x0500
+#define CRU_GLB_SRST_SND	0x0504
+
+#define CRU_CLKGATE_CON(n)	(0x300 + n * 4)
+#define PCLK_GPIO2_GATE_SHIFT	3
+#define PCLK_GPIO3_GATE_SHIFT	4
+#define PCLK_GPIO4_GATE_SHIFT	5
+
+/**************************************************
+ * pmu cru reg, offset
+ **************************************************/
+#define CRU_PMU_RSTHOLD_CON(n)		(0x120 + n * 4)
+/* reset hold*/
+#define CRU_PMU_SGRF_RST_HOLD		BIT_WITH_WMSK(6)
+/* reset hold release*/
+#define CRU_PMU_SGRF_RST_RLS		WMSK_BIT(6)
+
+#define CRU_PMU_WDTRST_MSK		(0x1 << 4)
+#define CRU_PMU_WDTRST_EN		0x0
+
+#define CRU_PMU_FIRST_SFTRST_MSK	(0x3 << 2)
+#define CRU_PMU_FIRST_SFTRST_EN		0x0
+
+#define CRU_PMU_CLKGATE_CON(n)		(0x100 + n * 4)
+#define PCLK_GPIO0_GATE_SHIFT		3
+#define PCLK_GPIO1_GATE_SHIFT		4
+
+#define CPU_BOOT_ADDR_WMASK	0xffff0000
+#define CPU_BOOT_ADDR_ALIGN	16
+
+#define GRF_IOMUX_2BIT_MASK     0x3
+#define GRF_IOMUX_GPIO          0x0
+
+#define GRF_GPIO4C2_IOMUX_SHIFT         4
+#define GRF_GPIO4C2_IOMUX_PWM           0x1
+#define GRF_GPIO4C6_IOMUX_SHIFT         12
+#define GRF_GPIO4C6_IOMUX_PWM           0x1
+
+#define PWM_CNT(n)			(0x0000 + 0x10 * (n))
+#define PWM_PERIOD_HPR(n)		(0x0004 + 0x10 * (n))
+#define PWM_DUTY_LPR(n)			(0x0008 + 0x10 * (n))
+#define PWM_CTRL(n)			(0x000c + 0x10 * (n))
+
+#define PWM_DISABLE			(0 << 0)
+#define PWM_ENABLE			(1 << 0)
+
+/* grf reg offset */
+#define GRF_USBPHY0_CTRL0	0x4480
+#define GRF_USBPHY0_CTRL2	0x4488
+#define GRF_USBPHY0_CTRL3	0x448c
+#define GRF_USBPHY0_CTRL12	0x44b0
+#define GRF_USBPHY0_CTRL13	0x44b4
+#define GRF_USBPHY0_CTRL15	0x44bc
+#define GRF_USBPHY0_CTRL16	0x44c0
+
+#define GRF_USBPHY1_CTRL0	0x4500
+#define GRF_USBPHY1_CTRL2	0x4508
+#define GRF_USBPHY1_CTRL3	0x450c
+#define GRF_USBPHY1_CTRL12	0x4530
+#define GRF_USBPHY1_CTRL13	0x4534
+#define GRF_USBPHY1_CTRL15	0x453c
+#define GRF_USBPHY1_CTRL16	0x4540
+
+#define GRF_GPIO2A_IOMUX	0xe000
+#define GRF_GPIO2D_HE		0xe18c
+#define GRF_DDRC0_CON0		0xe380
+#define GRF_DDRC0_CON1		0xe384
+#define GRF_DDRC1_CON0		0xe388
+#define GRF_DDRC1_CON1		0xe38c
+#define GRF_SOC_CON_BASE	0xe200
+#define GRF_SOC_CON(n)		(GRF_SOC_CON_BASE + (n) * 4)
+#define GRF_IO_VSEL		0xe640
+
+#define CRU_CLKSEL_CON0		0x0100
+#define CRU_CLKSEL_CON6		0x0118
+#define CRU_SDIO0_CON1		0x058c
+#define PMUCRU_CLKSEL_CON0	0x0080
+#define PMUCRU_CLKGATE_CON2	0x0108
+#define PMUCRU_SOFTRST_CON0	0x0110
+#define PMUCRU_GATEDIS_CON0 0x0130
+#define PMUCRU_SOFTRST_CON(n)   (PMUCRU_SOFTRST_CON0 + (n) * 4)
+
+/*
+ * When system reset in running state, we want the cpus to be reboot
+ * from maskrom (system reboot),
+ * the pmusgrf reset-hold bits needs to be released.
+ * When system wake up from system deep suspend, some soc will be reset
+ * when waked up,
+ * we want the bootcpu to be reboot from pmusram,
+ * the pmusgrf reset-hold bits needs to be held.
+ */
+static inline void pmu_sgrf_rst_hld_release(void)
+{
+	mmio_write_32(PMUCRU_BASE + CRU_PMU_RSTHOLD_CON(1),
+		      CRU_PMU_SGRF_RST_RLS);
+}
+
+static inline void pmu_sgrf_rst_hld(void)
+{
+	mmio_write_32(PMUCRU_BASE + CRU_PMU_RSTHOLD_CON(1),
+		      CRU_PMU_SGRF_RST_HOLD);
+}
+
+/* export related and operating SoC APIs */
+void __dead2 soc_global_soft_reset(void);
+void disable_dvfs_plls(void);
+void disable_nodvfs_plls(void);
+void enable_dvfs_plls(void);
+void enable_nodvfs_plls(void);
+void prepare_abpll_for_ddrctrl(void);
+void restore_abpll(void);
+void clk_gate_con_save(void);
+void clk_gate_con_disable(void);
+void clk_gate_con_restore(void);
+void set_pmu_rsthold(void);
+void restore_pmu_rsthold(void);
+#endif /* __SOC_H__ */
diff --git a/plat/rockchip/rk3399/include/addressmap.h b/plat/rockchip/rk3399/include/addressmap.h
new file mode 100644
index 0000000..023050c
--- /dev/null
+++ b/plat/rockchip/rk3399/include/addressmap.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ROCKCHIP_RK3399_INCLUDE_ADDRESSMAP_H__
+#define __ROCKCHIP_RK3399_INCLUDE_ADDRESSMAP_H__
+
+#include <addressmap_shared.h>
+
+/* Registers base address */
+#define MMIO_BASE		0xF8000000
+
+/* Aggregate of all devices in the first GB */
+#define DEV_RNG0_BASE		MMIO_BASE
+#define DEV_RNG0_SIZE		SIZE_M(125)
+
+#endif /* __ROCKCHIP_RK3399_INCLUDE_ADDRESSMAP_H__ */
diff --git a/plat/rockchip/rk3399/include/plat.ld.S b/plat/rockchip/rk3399/include/plat.ld.S
new file mode 100644
index 0000000..c42d9a9
--- /dev/null
+++ b/plat/rockchip/rk3399/include/plat.ld.S
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __ROCKCHIP_PLAT_LD_S__
+#define __ROCKCHIP_PLAT_LD_S__
+
+MEMORY {
+    SRAM (rwx): ORIGIN = SRAM_BASE, LENGTH = SRAM_SIZE
+    PMUSRAM (rwx): ORIGIN = PMUSRAM_BASE, LENGTH = PMUSRAM_RSIZE
+}
+
+SECTIONS
+{
+	. = SRAM_BASE;
+	ASSERT(. == ALIGN(4096),
+		"SRAM_BASE address is not aligned on a page boundary.")
+
+	/*
+	 * The SRAM space allocation for RK3399
+	 * ----------------
+	 * | m0 code bin
+	 * ----------------
+	 * | sram text
+	 * ----------------
+	 * | sram data
+	 * ----------------
+	 */
+	.incbin_sram : ALIGN(4096) {
+		__sram_incbin_start = .;
+		*(.sram.incbin)
+		 __sram_incbin_real_end = .;
+		. = ALIGN(4096);
+		__sram_incbin_end = .;
+	} >SRAM
+	ASSERT((__sram_incbin_real_end - __sram_incbin_start) <=
+		SRAM_BIN_LIMIT, ".incbin_sram has exceeded its limit")
+
+	.text_sram : ALIGN(4096) {
+		__bl31_sram_text_start = .;
+		*(.sram.text)
+		*(.sram.rodata)
+		__bl31_sram_text_real_end = .;
+		. = ALIGN(4096);
+		__bl31_sram_text_end = .;
+	} >SRAM
+	ASSERT((__bl31_sram_text_real_end - __bl31_sram_text_start) <=
+		SRAM_TEXT_LIMIT, ".text_sram has exceeded its limit")
+
+	.data_sram : ALIGN(4096) {
+		__bl31_sram_data_start = .;
+		*(.sram.data)
+		__bl31_sram_data_real_end = .;
+		. = ALIGN(4096);
+		__bl31_sram_data_end = .;
+	} >SRAM
+	ASSERT((__bl31_sram_data_real_end - __bl31_sram_data_start) <=
+		SRAM_DATA_LIMIT, ".data_sram has exceeded its limit")
+
+	.stack_sram : ALIGN(4096) {
+		__bl31_sram_stack_start = .;
+		. += 4096;
+		__bl31_sram_stack_end = .;
+	} >SRAM
+
+	. = PMUSRAM_BASE;
+
+	/*
+	 * pmu_cpuson_entrypoint request address
+	 * align 64K when resume, so put it in the
+	 * start of pmusram
+	 */
+	.pmusram : {
+		ASSERT(. == ALIGN(64 * 1024),
+			".pmusram.entry request 64K aligned.");
+		*(.pmusram.entry)
+		__bl31_pmusram_text_start = .;
+		*(.pmusram.text)
+		*(.pmusram.rodata)
+		__bl31_pmusram_text_end = .;
+		__bl31_pmusram_data_start = .;
+		*(.pmusram.data)
+		__bl31_pmusram_data_end = .;
+
+	} >PMUSRAM
+}
+
+#endif /* __ROCKCHIP_PLAT_LD_S__ */
diff --git a/plat/rockchip/rk3399/include/plat_sip_calls.h b/plat/rockchip/rk3399/include/plat_sip_calls.h
new file mode 100644
index 0000000..ead187e
--- /dev/null
+++ b/plat/rockchip/rk3399/include/plat_sip_calls.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_SIP_CALLS_H__
+#define __PLAT_SIP_CALLS_H__
+
+#define RK_PLAT_SIP_NUM_CALLS	0
+
+#endif /* __PLAT_SIP_CALLS_H__ */
diff --git a/plat/rockchip/rk3399/include/platform_def.h b/plat/rockchip/rk3399/include/platform_def.h
new file mode 100644
index 0000000..3df2f7d
--- /dev/null
+++ b/plat/rockchip/rk3399/include/platform_def.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#include <arch.h>
+#include <bl31_param.h>
+#include <common_def.h>
+#include <rk3399_def.h>
+
+#define DEBUG_XLAT_TABLE 0
+
+/*******************************************************************************
+ * Platform binary types for linking
+ ******************************************************************************/
+#define PLATFORM_LINKER_FORMAT		"elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH		aarch64
+
+/*******************************************************************************
+ * Generic platform constants
+ ******************************************************************************/
+
+/* Size of cacheable stacks */
+#if DEBUG_XLAT_TABLE
+#define PLATFORM_STACK_SIZE 0x800
+#elif defined(IMAGE_BL1)
+#define PLATFORM_STACK_SIZE 0x440
+#elif defined(IMAGE_BL2)
+#define PLATFORM_STACK_SIZE 0x400
+#elif defined(IMAGE_BL31)
+#define PLATFORM_STACK_SIZE 0x800
+#elif defined(IMAGE_BL32)
+#define PLATFORM_STACK_SIZE 0x440
+#endif
+
+#define FIRMWARE_WELCOME_STR		"Booting Trusted Firmware\n"
+
+#define PLATFORM_MAX_AFFLVL		MPIDR_AFFLVL2
+#define PLATFORM_SYSTEM_COUNT		1
+#define PLATFORM_CLUSTER_COUNT		2
+#define PLATFORM_CLUSTER0_CORE_COUNT	4
+#define PLATFORM_CLUSTER1_CORE_COUNT	2
+#define PLATFORM_CORE_COUNT		(PLATFORM_CLUSTER1_CORE_COUNT +	\
+					 PLATFORM_CLUSTER0_CORE_COUNT)
+#define PLATFORM_MAX_CPUS_PER_CLUSTER	4
+#define PLATFORM_NUM_AFFS		(PLATFORM_SYSTEM_COUNT +	\
+					 PLATFORM_CLUSTER_COUNT +	\
+					 PLATFORM_CORE_COUNT)
+#define PLAT_RK_CLST_TO_CPUID_SHIFT	6
+#define PLAT_MAX_PWR_LVL		MPIDR_AFFLVL2
+
+/*
+ * This macro defines the deepest retention state possible. A higher state
+ * id will represent an invalid or a power down state.
+ */
+#define PLAT_MAX_RET_STATE		1
+
+/*
+ * This macro defines the deepest power down states possible. Any state ID
+ * higher than this is invalid.
+ */
+#define PLAT_MAX_OFF_STATE		2
+
+/*******************************************************************************
+ * Platform specific page table and MMU setup constants
+ ******************************************************************************/
+#define ADDR_SPACE_SIZE		(1ull << 32)
+#define MAX_XLAT_TABLES		20
+#define MAX_MMAP_REGIONS	25
+
+/*******************************************************************************
+ * Declarations and constants to access the mailboxes safely. Each mailbox is
+ * aligned on the biggest cache line size in the platform. This is known only
+ * to the platform as it might have a combination of integrated and external
+ * caches. Such alignment ensures that two maiboxes do not sit on the same cache
+ * line at any cache level. They could belong to different cpus/clusters &
+ * get written while being protected by different locks causing corruption of
+ * a valid mailbox address.
+ ******************************************************************************/
+#define CACHE_WRITEBACK_SHIFT	6
+#define CACHE_WRITEBACK_GRANULE	(1 << CACHE_WRITEBACK_SHIFT)
+
+/*
+ * Define GICD and GICC and GICR base
+ */
+#define PLAT_RK_GICD_BASE	BASE_GICD_BASE
+#define PLAT_RK_GICR_BASE	BASE_GICR_BASE
+#define PLAT_RK_GICC_BASE	0
+
+/*
+ * Define a list of Group 1 Secure and Group 0 interrupts as per GICv3
+ * terminology. On a GICv2 system or mode, the lists will be merged and treated
+ * as Group 0 interrupts.
+ */
+#define PLAT_RK_G1S_IRQS		RK3399_G1S_IRQS
+#define PLAT_RK_G0_IRQS			RK3399_G0_IRQS
+
+#define PLAT_RK_UART_BASE		UART2_BASE
+#define PLAT_RK_UART_CLOCK		RK3399_UART_CLOCK
+#define PLAT_RK_UART_BAUDRATE		RK3399_BAUDRATE
+
+#define PLAT_RK_CCI_BASE		CCI500_BASE
+
+#define PLAT_RK_PRIMARY_CPU		0x0
+
+#define PSRAM_DO_DDR_RESUME	1
+#define PSRAM_CHECK_WAKEUP_CPU	0
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/rockchip/rk3399/include/shared/addressmap_shared.h b/plat/rockchip/rk3399/include/shared/addressmap_shared.h
new file mode 100644
index 0000000..dc5c8d5
--- /dev/null
+++ b/plat/rockchip/rk3399/include/shared/addressmap_shared.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ROCKCHIP_RK3399_INCLUDE_SHARED_ADDRESSMAP_SHARED_H__
+#define __ROCKCHIP_RK3399_INCLUDE_SHARED_ADDRESSMAP_SHARED_H__
+
+#define SIZE_K(n)		((n) * 1024)
+#define SIZE_M(n)		((n) * 1024 * 1024)
+#define SRAM_TEXT_LIMIT		(4 * 1024)
+#define SRAM_DATA_LIMIT		(4 * 1024)
+#define SRAM_BIN_LIMIT		(4 * 1024)
+
+/*
+ * The parts of the shared defined registers address with AP and M0,
+ * let's note and mark the previous defines like this:
+ */
+#define GIC500_BASE		(MMIO_BASE + 0x06E00000)
+#define UART0_BASE		(MMIO_BASE + 0x07180000)
+#define UART1_BASE		(MMIO_BASE + 0x07190000)
+#define UART2_BASE		(MMIO_BASE + 0x071A0000)
+#define UART3_BASE		(MMIO_BASE + 0x071B0000)
+
+#define PMU_BASE		(MMIO_BASE + 0x07310000)
+#define PMUGRF_BASE		(MMIO_BASE + 0x07320000)
+#define SGRF_BASE		(MMIO_BASE + 0x07330000)
+#define PMUSRAM_BASE		(MMIO_BASE + 0x073B0000)
+#define PWM_BASE		(MMIO_BASE + 0x07420000)
+
+#define CIC_BASE		(MMIO_BASE + 0x07620000)
+#define PD_BUS0_BASE		(MMIO_BASE + 0x07650000)
+#define DCF_BASE		(MMIO_BASE + 0x076A0000)
+#define GPIO0_BASE		(MMIO_BASE + 0x07720000)
+#define GPIO1_BASE		(MMIO_BASE + 0x07730000)
+#define PMUCRU_BASE		(MMIO_BASE + 0x07750000)
+#define CRU_BASE		(MMIO_BASE + 0x07760000)
+#define GRF_BASE		(MMIO_BASE + 0x07770000)
+#define GPIO2_BASE		(MMIO_BASE + 0x07780000)
+#define GPIO3_BASE		(MMIO_BASE + 0x07788000)
+#define GPIO4_BASE		(MMIO_BASE + 0x07790000)
+#define WDT1_BASE		(MMIO_BASE + 0x07840000)
+#define WDT0_BASE		(MMIO_BASE + 0x07848000)
+#define TIMER_BASE		(MMIO_BASE + 0x07850000)
+#define STIME_BASE		(MMIO_BASE + 0x07860000)
+#define SRAM_BASE		(MMIO_BASE + 0x078C0000)
+#define SERVICE_NOC_0_BASE	(MMIO_BASE + 0x07A50000)
+#define DDRC0_BASE		(MMIO_BASE + 0x07A80000)
+#define SERVICE_NOC_1_BASE	(MMIO_BASE + 0x07A84000)
+#define DDRC1_BASE		(MMIO_BASE + 0x07A88000)
+#define SERVICE_NOC_2_BASE	(MMIO_BASE + 0x07A8C000)
+#define SERVICE_NOC_3_BASE	(MMIO_BASE + 0x07A90000)
+#define CCI500_BASE		(MMIO_BASE + 0x07B00000)
+#define COLD_BOOT_BASE		(MMIO_BASE + 0x07FF0000)
+
+/* Registers size */
+#define GIC500_SIZE		SIZE_M(2)
+#define UART0_SIZE		SIZE_K(64)
+#define UART1_SIZE		SIZE_K(64)
+#define UART2_SIZE		SIZE_K(64)
+#define UART3_SIZE		SIZE_K(64)
+#define PMU_SIZE		SIZE_K(64)
+#define PMUGRF_SIZE		SIZE_K(64)
+#define SGRF_SIZE		SIZE_K(64)
+#define PMUSRAM_SIZE		SIZE_K(64)
+#define PMUSRAM_RSIZE		SIZE_K(8)
+#define PWM_SIZE		SIZE_K(64)
+#define CIC_SIZE		SIZE_K(4)
+#define DCF_SIZE		SIZE_K(4)
+#define GPIO0_SIZE		SIZE_K(64)
+#define GPIO1_SIZE		SIZE_K(64)
+#define PMUCRU_SIZE		SIZE_K(64)
+#define CRU_SIZE		SIZE_K(64)
+#define GRF_SIZE		SIZE_K(64)
+#define GPIO2_SIZE		SIZE_K(32)
+#define GPIO3_SIZE		SIZE_K(32)
+#define GPIO4_SIZE		SIZE_K(32)
+#define STIME_SIZE		SIZE_K(64)
+#define SRAM_SIZE		SIZE_K(192)
+#define SERVICE_NOC_0_SIZE	SIZE_K(192)
+#define DDRC0_SIZE		SIZE_K(32)
+#define SERVICE_NOC_1_SIZE	SIZE_K(16)
+#define DDRC1_SIZE		SIZE_K(32)
+#define SERVICE_NOC_2_SIZE	SIZE_K(16)
+#define SERVICE_NOC_3_SIZE	SIZE_K(448)
+#define CCI500_SIZE		SIZE_M(1)
+#define PD_BUS0_SIZE		SIZE_K(448)
+
+/* DDR Registers address */
+#define CTL_BASE(ch)		(DDRC0_BASE + (ch) * 0x8000)
+#define CTL_REG(ch, n)		(CTL_BASE(ch) + (n) * 0x4)
+
+#define PI_OFFSET		0x800
+#define PI_BASE(ch)		(CTL_BASE(ch) + PI_OFFSET)
+#define PI_REG(ch, n)		(PI_BASE(ch) + (n) * 0x4)
+
+#define PHY_OFFSET		0x2000
+#define PHY_BASE(ch)		(CTL_BASE(ch) + PHY_OFFSET)
+#define PHY_REG(ch, n)		(PHY_BASE(ch) + (n) * 0x4)
+
+#define MSCH_BASE(ch)		(SERVICE_NOC_1_BASE + (ch) * 0x8000)
+
+#endif /* __ROCKCHIP_RK3399_INCLUDE_SHARED_ADDRESSMAP_SHARED_H__ */
diff --git a/plat/rockchip/rk3399/include/shared/bl31_param.h b/plat/rockchip/rk3399/include/shared/bl31_param.h
new file mode 100644
index 0000000..85a0f4a
--- /dev/null
+++ b/plat/rockchip/rk3399/include/shared/bl31_param.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_ROCKCHIP_RK3399_INCLUDE_SHARED_BL31_PARAM_H__
+#define __PLAT_ROCKCHIP_RK3399_INCLUDE_SHARED_BL31_PARAM_H__
+
+/*******************************************************************************
+ * Platform memory map related constants
+ ******************************************************************************/
+/* TF text, ro, rw, Size: 1MB */
+#define TZRAM_BASE		(0x0)
+#define TZRAM_SIZE		(0x100000)
+
+/*******************************************************************************
+ * BL31 specific defines.
+ ******************************************************************************/
+/*
+ * Put BL3-1 at the top of the Trusted RAM
+ */
+#define BL31_BASE		(TZRAM_BASE + 0x1000)
+#define BL31_LIMIT		(TZRAM_BASE + TZRAM_SIZE)
+
+#endif /*__PLAT_ROCKCHIP_RK3399_INCLUDE_SHARED_BL31_PARAM_H__*/
diff --git a/plat/rockchip/rk3399/include/shared/dram_regs.h b/plat/rockchip/rk3399/include/shared/dram_regs.h
new file mode 100644
index 0000000..afe16bb
--- /dev/null
+++ b/plat/rockchip/rk3399/include/shared/dram_regs.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __DRAM_REGS_H__
+#define __DRAM_REGS_H__
+
+#define CTL_REG_NUM		332
+#define PHY_REG_NUM		959
+#define PI_REG_NUM		200
+
+#define MSCH_ID_COREID		0x0
+#define MSCH_ID_REVISIONID	0x4
+#define MSCH_DEVICECONF		0x8
+#define MSCH_DEVICESIZE		0xc
+#define MSCH_DDRTIMINGA0	0x10
+#define MSCH_DDRTIMINGB0	0x14
+#define MSCH_DDRTIMINGC0	0x18
+#define MSCH_DEVTODEV0		0x1c
+#define MSCH_DDRMODE		0x110
+#define MSCH_AGINGX0		0x1000
+
+#define CIC_CTRL0		0x0
+#define CIC_CTRL1		0x4
+#define CIC_IDLE_TH		0x8
+#define CIC_CG_WAIT_TH		0xc
+#define CIC_STATUS0		0x10
+#define CIC_STATUS1		0x14
+#define CIC_CTRL2		0x18
+#define CIC_CTRL3		0x1c
+#define CIC_CTRL4		0x20
+
+/* DENALI_CTL_00 */
+#define START			1
+
+/* DENALI_CTL_68 */
+#define PWRUP_SREFRESH_EXIT	(1 << 16)
+
+/* DENALI_CTL_274 */
+#define MEM_RST_VALID		1
+
+#define PHY_DRV_ODT_Hi_Z	0x0
+#define PHY_DRV_ODT_240		0x1
+#define PHY_DRV_ODT_120		0x8
+#define PHY_DRV_ODT_80		0x9
+#define PHY_DRV_ODT_60		0xc
+#define PHY_DRV_ODT_48		0xd
+#define PHY_DRV_ODT_40		0xe
+#define PHY_DRV_ODT_34_3	0xf
+
+/*
+ * sys_reg bitfield struct
+ * [31] row_3_4_ch1
+ * [30] row_3_4_ch0
+ * [29:28] chinfo
+ * [27] rank_ch1
+ * [26:25] col_ch1
+ * [24] bk_ch1
+ * [23:22] cs0_row_ch1
+ * [21:20] cs1_row_ch1
+ * [19:18] bw_ch1
+ * [17:16] dbw_ch1;
+ * [15:13] ddrtype
+ * [12] channelnum
+ * [11] rank_ch0
+ * [10:9] col_ch0
+ * [8] bk_ch0
+ * [7:6] cs0_row_ch0
+ * [5:4] cs1_row_ch0
+ * [3:2] bw_ch0
+ * [1:0] dbw_ch0
+ */
+#define SYS_REG_ENC_ROW_3_4(n, ch)	((n) << (30 + (ch)))
+#define SYS_REG_DEC_ROW_3_4(n, ch)	(((n) >> (30 + (ch))) & 0x1)
+#define SYS_REG_ENC_CHINFO(ch)		(1 << (28 + (ch)))
+#define SYS_REG_DEC_CHINFO(n, ch)	(((n) >> (28 + (ch))) & 0x1)
+#define SYS_REG_ENC_DDRTYPE(n)		((n) << 13)
+#define SYS_REG_DEC_DDRTYPE(n)		(((n) >> 13) & 0x7)
+#define SYS_REG_ENC_NUM_CH(n)		(((n) - 1) << 12)
+#define SYS_REG_DEC_NUM_CH(n)		(1 + (((n) >> 12) & 0x1))
+#define SYS_REG_ENC_RANK(n, ch)		(((n) - 1) << (11 + (ch) * 16))
+#define SYS_REG_DEC_RANK(n, ch)		(1 + (((n) >> (11 + (ch) * 16)) & 0x1))
+#define SYS_REG_ENC_COL(n, ch)		(((n) - 9) << (9 + (ch) * 16))
+#define SYS_REG_DEC_COL(n, ch)		(9 + (((n) >> (9 + (ch) * 16)) & 0x3))
+#define SYS_REG_ENC_BK(n, ch)		(((n) == 3 ? 0 : 1) << (8 + (ch) * 16))
+#define SYS_REG_DEC_BK(n, ch)		(3 - (((n) >> (8 + (ch) * 16)) & 0x1))
+#define SYS_REG_ENC_CS0_ROW(n, ch)	(((n) - 13) << (6 + (ch) * 16))
+#define SYS_REG_DEC_CS0_ROW(n, ch)	(13 + (((n) >> (6 + (ch) * 16)) & 0x3))
+#define SYS_REG_ENC_CS1_ROW(n, ch)	(((n) - 13) << (4 + (ch) * 16))
+#define SYS_REG_DEC_CS1_ROW(n, ch)	(13 + (((n) >> (4 + (ch) * 16)) & 0x3))
+#define SYS_REG_ENC_BW(n, ch)		((2 >> (n)) << (2 + (ch) * 16))
+#define SYS_REG_DEC_BW(n, ch)		(2 >> (((n) >> (2 + (ch) * 16)) & 0x3))
+#define SYS_REG_ENC_DBW(n, ch)		((2 >> (n)) << (0 + (ch) * 16))
+#define SYS_REG_DEC_DBW(n, ch)		(2 >> (((n) >> (0 + (ch) * 16)) & 0x3))
+#define DDR_STRIDE(n)		mmio_write_32(SGRF_BASE + SGRF_SOC_CON3_7(4), \
+					      (0x1f<<(10+16))|((n)<<10))
+
+#endif /* __DRAM_REGS_H__ */
diff --git a/plat/rockchip/rk3399/include/shared/m0_param.h b/plat/rockchip/rk3399/include/shared/m0_param.h
new file mode 100644
index 0000000..3edbf89
--- /dev/null
+++ b/plat/rockchip/rk3399/include/shared/m0_param.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __M0_PARAM_H__
+#define __M0_PARAM_H__
+
+#ifndef __LINKER__
+enum {
+	M0_FUNC_SUSPEND = 0,
+	M0_FUNC_DRAM	= 1,
+};
+#endif /* __LINKER__ */
+
+#define PARAM_ADDR		0xc0
+
+#define PARAM_M0_FUNC		0x00
+#define PARAM_DRAM_FREQ		0x04
+#define PARAM_DPLL_CON0		0x08
+#define PARAM_DPLL_CON1		0x0c
+#define PARAM_DPLL_CON2		0x10
+#define PARAM_DPLL_CON3		0x14
+#define PARAM_DPLL_CON4		0x18
+#define PARAM_DPLL_CON5		0x1c
+#define PARAM_FREQ_SELECT	0x20
+#define PARAM_M0_DONE		0x24
+#define PARAM_M0_SIZE		0x28
+#define M0_DONE_FLAG		0xf59ec39a
+
+#endif /*__M0_PARAM_H__*/
diff --git a/plat/rockchip/rk3399/include/shared/misc_regs.h b/plat/rockchip/rk3399/include/shared/misc_regs.h
new file mode 100644
index 0000000..02e001b
--- /dev/null
+++ b/plat/rockchip/rk3399/include/shared/misc_regs.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ROCKCHIP_RK3399_INCLUDE_SHARED_MISC_REGS_H__
+#define __ROCKCHIP_RK3399_INCLUDE_SHARED_MISC_REGS_H__
+
+/* CRU */
+#define CRU_DPLL_CON0		0x40
+#define CRU_DPLL_CON1		0x44
+#define CRU_DPLL_CON2		0x48
+#define CRU_DPLL_CON3		0x4c
+#define CRU_DPLL_CON4		0x50
+#define CRU_DPLL_CON5		0x54
+
+/* CRU_PLL_CON3 */
+#define PLL_SLOW_MODE		0
+#define PLL_NORMAL_MODE		1
+#define PLL_MODE(n)		((0x3 << (8 + 16)) | ((n) << 8))
+#define PLL_POWER_DOWN(n)	((0x1 << (0 + 16)) | ((n) << 0))
+
+/* PMU CRU */
+#define PMU_CRU_GATEDIS_CON0	0x130
+
+#endif /* __ROCKCHIP_RK3399_INCLUDE_SHARED_MISC_REGS_H__ */
diff --git a/plat/rockchip/rk3399/include/shared/pmu_bits.h b/plat/rockchip/rk3399/include/shared/pmu_bits.h
new file mode 100644
index 0000000..7c25761
--- /dev/null
+++ b/plat/rockchip/rk3399/include/shared/pmu_bits.h
@@ -0,0 +1,697 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PMU_BITS_H__
+#define __PMU_BITS_H__
+
+enum pmu_powerdomain_id {
+	PD_CPUL0 = 0,
+	PD_CPUL1,
+	PD_CPUL2,
+	PD_CPUL3,
+	PD_CPUB0,
+	PD_CPUB1,
+	PD_SCUL,
+	PD_SCUB,
+	PD_TCPD0,
+	PD_TCPD1,
+	PD_CCI,
+	PD_PERILP,
+	PD_PERIHP,
+	PD_CENTER,
+	PD_VIO,
+	PD_GPU,
+	PD_VCODEC,
+	PD_VDU,
+	PD_RGA,
+	PD_IEP,
+	PD_VO,
+	PD_ISP0 = 22,
+	PD_ISP1,
+	PD_HDCP,
+	PD_GMAC,
+	PD_EMMC,
+	PD_USB3,
+	PD_EDP,
+	PD_GIC,
+	PD_SD,
+	PD_SDIOAUDIO,
+	PD_END
+};
+
+enum powerdomain_state {
+	PMU_POWER_ON = 0,
+	PMU_POWER_OFF,
+};
+
+enum pmu_bus_id {
+	BUS_ID_GPU = 0,
+	BUS_ID_PERILP,
+	BUS_ID_PERIHP,
+	BUS_ID_VCODEC,
+	BUS_ID_VDU,
+	BUS_ID_RGA,
+	BUS_ID_IEP,
+	BUS_ID_VOPB,
+	BUS_ID_VOPL,
+	BUS_ID_ISP0,
+	BUS_ID_ISP1,
+	BUS_ID_HDCP,
+	BUS_ID_USB3,
+	BUS_ID_PERILPM0,
+	BUS_ID_CENTER,
+	BUS_ID_CCIM0,
+	BUS_ID_CCIM1,
+	BUS_ID_VIO,
+	BUS_ID_MSCH0,
+	BUS_ID_MSCH1,
+	BUS_ID_ALIVE,
+	BUS_ID_PMU,
+	BUS_ID_EDP,
+	BUS_ID_GMAC,
+	BUS_ID_EMMC,
+	BUS_ID_CENTER1,
+	BUS_ID_PMUM0,
+	BUS_ID_GIC,
+	BUS_ID_SD,
+	BUS_ID_SDIOAUDIO,
+};
+
+enum pmu_bus_state {
+	BUS_ACTIVE,
+	BUS_IDLE,
+};
+
+/* pmu_cpuapm bit */
+enum pmu_cores_pm_by_wfi {
+	core_pm_en = 0,
+	core_pm_int_wakeup_en,
+	core_pm_resv,
+	core_pm_sft_wakeup_en
+};
+
+enum pmu_wkup_cfg0 {
+	PMU_GPIO0A_POSE_WKUP_EN = 0,
+	PMU_GPIO0B_POSE_WKUP_EN = 8,
+	PMU_GPIO0C_POSE_WKUP_EN = 16,
+	PMU_GPIO0D_POSE_WKUP_EN = 24,
+};
+
+enum pmu_wkup_cfg1 {
+	PMU_GPIO0A_NEGEDGE_WKUP_EN = 0,
+	PMU_GPIO0B_NEGEDGE_WKUP_EN = 7,
+	PMU_GPIO0C_NEGEDGE_WKUP_EN = 16,
+	PMU_GPIO0D_NEGEDGE_WKUP_EN = 24,
+};
+
+enum pmu_wkup_cfg2 {
+	PMU_GPIO1A_POSE_WKUP_EN = 0,
+	PMU_GPIO1B_POSE_WKUP_EN = 7,
+	PMU_GPIO1C_POSE_WKUP_EN = 16,
+	PMU_GPIO1D_POSE_WKUP_EN = 24,
+};
+
+enum pmu_wkup_cfg3 {
+	PMU_GPIO1A_NEGEDGE_WKUP_EN = 0,
+	PMU_GPIO1B_NEGEDGE_WKUP_EN = 7,
+	PMU_GPIO1C_NEGEDGE_WKUP_EN = 16,
+	PMU_GPIO1D_NEGEDGE_WKUP_EN = 24,
+};
+
+/* pmu_wkup_cfg4 */
+enum pmu_wkup_cfg4 {
+	PMU_CLUSTER_L_WKUP_EN = 0,
+	PMU_CLUSTER_B_WKUP_EN,
+	PMU_GPIO_WKUP_EN,
+	PMU_SDIO_WKUP_EN,
+
+	PMU_SDMMC_WKUP_EN,
+	PMU_TIMER_WKUP_EN = 6,
+	PMU_USBDEV_WKUP_EN,
+
+	PMU_SFT_WKUP_EN,
+	PMU_M0_WDT_WKUP_EN,
+	PMU_TIMEOUT_WKUP_EN,
+	PMU_PWM_WKUP_EN,
+
+	PMU_PCIE_WKUP_EN = 13,
+};
+
+enum pmu_pwrdn_con {
+	PMU_A53_L0_PWRDWN_EN = 0,
+	PMU_A53_L1_PWRDWN_EN,
+	PMU_A53_L2_PWRDWN_EN,
+	PMU_A53_L3_PWRDWN_EN,
+
+	PMU_A72_B0_PWRDWN_EN,
+	PMU_A72_B1_PWRDWN_EN,
+	PMU_SCU_L_PWRDWN_EN,
+	PMU_SCU_B_PWRDWN_EN,
+
+	PMU_TCPD0_PWRDWN_EN,
+	PMU_TCPD1_PWRDWN_EN,
+	PMU_CCI_PWRDWN_EN,
+	PMU_PERILP_PWRDWN_EN,
+
+	PMU_PERIHP_PWRDWN_EN,
+	PMU_CENTER_PWRDWN_EN,
+	PMU_VIO_PWRDWN_EN,
+	PMU_GPU_PWRDWN_EN,
+
+	PMU_VCODEC_PWRDWN_EN,
+	PMU_VDU_PWRDWN_EN,
+	PMU_RGA_PWRDWN_EN,
+	PMU_IEP_PWRDWN_EN,
+
+	PMU_VO_PWRDWN_EN,
+	PMU_ISP0_PWRDWN_EN = 22,
+	PMU_ISP1_PWRDWN_EN,
+
+	PMU_HDCP_PWRDWN_EN,
+	PMU_GMAC_PWRDWN_EN,
+	PMU_EMMC_PWRDWN_EN,
+	PMU_USB3_PWRDWN_EN,
+
+	PMU_EDP_PWRDWN_EN,
+	PMU_GIC_PWRDWN_EN,
+	PMU_SD_PWRDWN_EN,
+	PMU_SDIOAUDIO_PWRDWN_EN,
+};
+
+enum pmu_pwrdn_st {
+	PMU_A53_L0_PWRDWN_ST = 0,
+	PMU_A53_L1_PWRDWN_ST,
+	PMU_A53_L2_PWRDWN_ST,
+	PMU_A53_L3_PWRDWN_ST,
+
+	PMU_A72_B0_PWRDWN_ST,
+	PMU_A72_B1_PWRDWN_ST,
+	PMU_SCU_L_PWRDWN_ST,
+	PMU_SCU_B_PWRDWN_ST,
+
+	PMU_TCPD0_PWRDWN_ST,
+	PMU_TCPD1_PWRDWN_ST,
+	PMU_CCI_PWRDWN_ST,
+	PMU_PERILP_PWRDWN_ST,
+
+	PMU_PERIHP_PWRDWN_ST,
+	PMU_CENTER_PWRDWN_ST,
+	PMU_VIO_PWRDWN_ST,
+	PMU_GPU_PWRDWN_ST,
+
+	PMU_VCODEC_PWRDWN_ST,
+	PMU_VDU_PWRDWN_ST,
+	PMU_RGA_PWRDWN_ST,
+	PMU_IEP_PWRDWN_ST,
+
+	PMU_VO_PWRDWN_ST,
+	PMU_ISP0_PWRDWN_ST = 22,
+	PMU_ISP1_PWRDWN_ST,
+
+	PMU_HDCP_PWRDWN_ST,
+	PMU_GMAC_PWRDWN_ST,
+	PMU_EMMC_PWRDWN_ST,
+	PMU_USB3_PWRDWN_ST,
+
+	PMU_EDP_PWRDWN_ST,
+	PMU_GIC_PWRDWN_ST,
+	PMU_SD_PWRDWN_ST,
+	PMU_SDIOAUDIO_PWRDWN_ST,
+
+};
+
+enum pmu_pll_con {
+	PMU_PLL_PD_CFG = 0,
+	PMU_SFT_PLL_PD = 8,
+};
+
+enum pmu_pwermode_con {
+	PMU_PWR_MODE_EN = 0,
+	PMU_WKUP_RST_EN,
+	PMU_INPUT_CLAMP_EN,
+	PMU_OSC_DIS,
+
+	PMU_ALIVE_USE_LF,
+	PMU_PMU_USE_LF,
+	PMU_POWER_OFF_REQ_CFG,
+	PMU_CHIP_PD_EN,
+
+	PMU_PLL_PD_EN,
+	PMU_CPU0_PD_EN,
+	PMU_L2_FLUSH_EN,
+	PMU_L2_IDLE_EN,
+
+	PMU_SCU_PD_EN,
+	PMU_CCI_PD_EN,
+	PMU_PERILP_PD_EN,
+	PMU_CENTER_PD_EN,
+
+	PMU_SREF0_ENTER_EN,
+	PMU_DDRC0_GATING_EN,
+	PMU_DDRIO0_RET_EN,
+	PMU_DDRIO0_RET_DE_REQ,
+
+	PMU_SREF1_ENTER_EN,
+	PMU_DDRC1_GATING_EN,
+	PMU_DDRIO1_RET_EN,
+	PMU_DDRIO1_RET_DE_REQ,
+
+	PMU_CLK_CENTER_SRC_GATE_EN = 26,
+	PMU_CLK_PERILP_SRC_GATE_EN,
+
+	PMU_CLK_CORE_SRC_GATE_EN,
+	PMU_DDRIO_RET_HW_DE_REQ,
+	PMU_SLP_OUTPUT_CFG,
+	PMU_MAIN_CLUSTER,
+};
+
+enum pmu_sft_con {
+	PMU_WKUP_SFT = 0,
+	PMU_INPUT_CLAMP_CFG,
+	PMU_OSC_DIS_CFG,
+	PMU_PMU_LF_EN_CFG,
+
+	PMU_ALIVE_LF_EN_CFG,
+	PMU_24M_EN_CFG,
+	PMU_DBG_PWRUP_L0_CFG,
+	PMU_WKUP_SFT_M0,
+
+	PMU_DDRCTL0_C_SYSREQ_CFG,
+	PMU_DDR0_IO_RET_CFG,
+
+	PMU_DDRCTL1_C_SYSREQ_CFG = 12,
+	PMU_DDR1_IO_RET_CFG,
+	DBG_PWRUP_B0_CFG = 15,
+
+	DBG_NOPWERDWN_L0_EN,
+	DBG_NOPWERDWN_L1_EN,
+	DBG_NOPWERDWN_L2_EN,
+	DBG_NOPWERDWN_L3_EN,
+
+	DBG_PWRUP_REQ_L_EN = 20,
+	CLUSTER_L_CLK_SRC_GATING_CFG,
+	L2_FLUSH_REQ_CLUSTER_L,
+	ACINACTM_CLUSTER_L_CFG,
+
+	DBG_NO_PWERDWN_B0_EN,
+	DBG_NO_PWERDWN_B1_EN,
+
+	DBG_PWRUP_REQ_B_EN = 28,
+	CLUSTER_B_CLK_SRC_GATING_CFG,
+	L2_FLUSH_REQ_CLUSTER_B,
+	ACINACTM_CLUSTER_B_CFG,
+};
+
+enum pmu_int_con {
+	PMU_PMU_INT_EN = 0,
+	PMU_PWRMD_WKUP_INT_EN,
+	PMU_WKUP_GPIO0_NEG_INT_EN,
+	PMU_WKUP_GPIO0_POS_INT_EN,
+	PMU_WKUP_GPIO1_NEG_INT_EN,
+	PMU_WKUP_GPIO1_POS_INT_EN,
+};
+
+enum pmu_int_st {
+	PMU_PWRMD_WKUP_INT_ST = 1,
+	PMU_WKUP_GPIO0_NEG_INT_ST,
+	PMU_WKUP_GPIO0_POS_INT_ST,
+	PMU_WKUP_GPIO1_NEG_INT_ST,
+	PMU_WKUP_GPIO1_POS_INT_ST,
+};
+
+enum pmu_gpio0_pos_int_con {
+	PMU_GPIO0A_POS_INT_EN = 0,
+	PMU_GPIO0B_POS_INT_EN = 8,
+	PMU_GPIO0C_POS_INT_EN = 16,
+	PMU_GPIO0D_POS_INT_EN = 24,
+};
+
+enum pmu_gpio0_neg_int_con {
+	PMU_GPIO0A_NEG_INT_EN = 0,
+	PMU_GPIO0B_NEG_INT_EN = 8,
+	PMU_GPIO0C_NEG_INT_EN = 16,
+	PMU_GPIO0D_NEG_INT_EN = 24,
+};
+
+enum pmu_gpio1_pos_int_con {
+	PMU_GPIO1A_POS_INT_EN = 0,
+	PMU_GPIO1B_POS_INT_EN = 8,
+	PMU_GPIO1C_POS_INT_EN = 16,
+	PMU_GPIO1D_POS_INT_EN = 24,
+};
+
+enum pmu_gpio1_neg_int_con {
+	PMU_GPIO1A_NEG_INT_EN = 0,
+	PMU_GPIO1B_NEG_INT_EN = 8,
+	PMU_GPIO1C_NEG_INT_EN = 16,
+	PMU_GPIO1D_NEG_INT_EN = 24,
+};
+
+enum pmu_gpio0_pos_int_st {
+	PMU_GPIO0A_POS_INT_ST = 0,
+	PMU_GPIO0B_POS_INT_ST = 8,
+	PMU_GPIO0C_POS_INT_ST = 16,
+	PMU_GPIO0D_POS_INT_ST = 24,
+};
+
+enum pmu_gpio0_neg_int_st {
+	PMU_GPIO0A_NEG_INT_ST = 0,
+	PMU_GPIO0B_NEG_INT_ST = 8,
+	PMU_GPIO0C_NEG_INT_ST = 16,
+	PMU_GPIO0D_NEG_INT_ST = 24,
+};
+
+enum pmu_gpio1_pos_int_st {
+	PMU_GPIO1A_POS_INT_ST = 0,
+	PMU_GPIO1B_POS_INT_ST = 8,
+	PMU_GPIO1C_POS_INT_ST = 16,
+	PMU_GPIO1D_POS_INT_ST = 24,
+};
+
+enum pmu_gpio1_neg_int_st {
+	PMU_GPIO1A_NEG_INT_ST = 0,
+	PMU_GPIO1B_NEG_INT_ST = 8,
+	PMU_GPIO1C_NEG_INT_ST = 16,
+	PMU_GPIO1D_NEG_INT_ST = 24,
+};
+
+/* pmu power down configure register 0x0050 */
+enum pmu_pwrdn_inten {
+	PMU_A53_L0_PWR_SWITCH_INT_EN = 0,
+	PMU_A53_L1_PWR_SWITCH_INT_EN,
+	PMU_A53_L2_PWR_SWITCH_INT_EN,
+	PMU_A53_L3_PWR_SWITCH_INT_EN,
+
+	PMU_A72_B0_PWR_SWITCH_INT_EN,
+	PMU_A72_B1_PWR_SWITCH_INT_EN,
+	PMU_SCU_L_PWR_SWITCH_INT_EN,
+	PMU_SCU_B_PWR_SWITCH_INT_EN,
+
+	PMU_TCPD0_PWR_SWITCH_INT_EN,
+	PMU_TCPD1_PWR_SWITCH_INT_EN,
+	PMU_CCI_PWR_SWITCH_INT_EN,
+	PMU_PERILP_PWR_SWITCH_INT_EN,
+
+	PMU_PERIHP_PWR_SWITCH_INT_EN,
+	PMU_CENTER_PWR_SWITCH_INT_EN,
+	PMU_VIO_PWR_SWITCH_INT_EN,
+	PMU_GPU_PWR_SWITCH_INT_EN,
+
+	PMU_VCODEC_PWR_SWITCH_INT_EN,
+	PMU_VDU_PWR_SWITCH_INT_EN,
+	PMU_RGA_PWR_SWITCH_INT_EN,
+	PMU_IEP_PWR_SWITCH_INT_EN,
+
+	PMU_VO_PWR_SWITCH_INT_EN,
+	PMU_ISP0_PWR_SWITCH_INT_EN = 22,
+	PMU_ISP1_PWR_SWITCH_INT_EN,
+
+	PMU_HDCP_PWR_SWITCH_INT_EN,
+	PMU_GMAC_PWR_SWITCH_INT_EN,
+	PMU_EMMC_PWR_SWITCH_INT_EN,
+	PMU_USB3_PWR_SWITCH_INT_EN,
+
+	PMU_EDP_PWR_SWITCH_INT_EN,
+	PMU_GIC_PWR_SWITCH_INT_EN,
+	PMU_SD_PWR_SWITCH_INT_EN,
+	PMU_SDIOAUDIO_PWR_SWITCH_INT_EN,
+};
+
+enum pmu_wkup_status {
+	PMU_WKUP_BY_CLSTER_L_INT = 0,
+	PMU_WKUP_BY_CLSTER_b_INT,
+	PMU_WKUP_BY_GPIO_INT,
+	PMU_WKUP_BY_SDIO_DET,
+
+	PMU_WKUP_BY_SDMMC_DET,
+	PMU_WKUP_BY_TIMER = 6,
+	PMU_WKUP_BY_USBDEV_DET,
+
+	PMU_WKUP_BY_M0_SFT,
+	PMU_WKUP_BY_M0_WDT_INT,
+	PMU_WKUP_BY_TIMEOUT,
+	PMU_WKUP_BY_PWM,
+
+	PMU_WKUP_BY_PCIE = 13,
+};
+
+enum pmu_bus_clr {
+	PMU_CLR_GPU = 0,
+	PMU_CLR_PERILP,
+	PMU_CLR_PERIHP,
+	PMU_CLR_VCODEC,
+
+	PMU_CLR_VDU,
+	PMU_CLR_RGA,
+	PMU_CLR_IEP,
+	PMU_CLR_VOPB,
+
+	PMU_CLR_VOPL,
+	PMU_CLR_ISP0,
+	PMU_CLR_ISP1,
+	PMU_CLR_HDCP,
+
+	PMU_CLR_USB3,
+	PMU_CLR_PERILPM0,
+	PMU_CLR_CENTER,
+	PMU_CLR_CCIM1,
+
+	PMU_CLR_CCIM0,
+	PMU_CLR_VIO,
+	PMU_CLR_MSCH0,
+	PMU_CLR_MSCH1,
+
+	PMU_CLR_ALIVE,
+	PMU_CLR_PMU,
+	PMU_CLR_EDP,
+	PMU_CLR_GMAC,
+
+	PMU_CLR_EMMC,
+	PMU_CLR_CENTER1,
+	PMU_CLR_PMUM0,
+	PMU_CLR_GIC,
+
+	PMU_CLR_SD,
+	PMU_CLR_SDIOAUDIO,
+};
+
+/* PMU bus idle request register */
+enum pmu_bus_idle_req {
+	PMU_IDLE_REQ_GPU = 0,
+	PMU_IDLE_REQ_PERILP,
+	PMU_IDLE_REQ_PERIHP,
+	PMU_IDLE_REQ_VCODEC,
+
+	PMU_IDLE_REQ_VDU,
+	PMU_IDLE_REQ_RGA,
+	PMU_IDLE_REQ_IEP,
+	PMU_IDLE_REQ_VOPB,
+
+	PMU_IDLE_REQ_VOPL,
+	PMU_IDLE_REQ_ISP0,
+	PMU_IDLE_REQ_ISP1,
+	PMU_IDLE_REQ_HDCP,
+
+	PMU_IDLE_REQ_USB3,
+	PMU_IDLE_REQ_PERILPM0,
+	PMU_IDLE_REQ_CENTER,
+	PMU_IDLE_REQ_CCIM0,
+
+	PMU_IDLE_REQ_CCIM1,
+	PMU_IDLE_REQ_VIO,
+	PMU_IDLE_REQ_MSCH0,
+	PMU_IDLE_REQ_MSCH1,
+
+	PMU_IDLE_REQ_ALIVE,
+	PMU_IDLE_REQ_PMU,
+	PMU_IDLE_REQ_EDP,
+	PMU_IDLE_REQ_GMAC,
+
+	PMU_IDLE_REQ_EMMC,
+	PMU_IDLE_REQ_CENTER1,
+	PMU_IDLE_REQ_PMUM0,
+	PMU_IDLE_REQ_GIC,
+
+	PMU_IDLE_REQ_SD,
+	PMU_IDLE_REQ_SDIOAUDIO,
+};
+
+/* pmu bus idle status register */
+enum pmu_bus_idle_st {
+	PMU_IDLE_ST_GPU = 0,
+	PMU_IDLE_ST_PERILP,
+	PMU_IDLE_ST_PERIHP,
+	PMU_IDLE_ST_VCODEC,
+
+	PMU_IDLE_ST_VDU,
+	PMU_IDLE_ST_RGA,
+	PMU_IDLE_ST_IEP,
+	PMU_IDLE_ST_VOPB,
+
+	PMU_IDLE_ST_VOPL,
+	PMU_IDLE_ST_ISP0,
+	PMU_IDLE_ST_ISP1,
+	PMU_IDLE_ST_HDCP,
+
+	PMU_IDLE_ST_USB3,
+	PMU_IDLE_ST_PERILPM0,
+	PMU_IDLE_ST_CENTER,
+	PMU_IDLE_ST_CCIM0,
+
+	PMU_IDLE_ST_CCIM1,
+	PMU_IDLE_ST_VIO,
+	PMU_IDLE_ST_MSCH0,
+	PMU_IDLE_ST_MSCH1,
+
+	PMU_IDLE_ST_ALIVE,
+	PMU_IDLE_ST_PMU,
+	PMU_IDLE_ST_EDP,
+	PMU_IDLE_ST_GMAC,
+
+	PMU_IDLE_ST_EMMC,
+	PMU_IDLE_ST_CENTER1,
+	PMU_IDLE_ST_PMUM0,
+	PMU_IDLE_ST_GIC,
+
+	PMU_IDLE_ST_SD,
+	PMU_IDLE_ST_SDIOAUDIO,
+};
+
+enum pmu_bus_idle_ack {
+	PMU_IDLE_ACK_GPU = 0,
+	PMU_IDLE_ACK_PERILP,
+	PMU_IDLE_ACK_PERIHP,
+	PMU_IDLE_ACK_VCODEC,
+
+	PMU_IDLE_ACK_VDU,
+	PMU_IDLE_ACK_RGA,
+	PMU_IDLE_ACK_IEP,
+	PMU_IDLE_ACK_VOPB,
+
+	PMU_IDLE_ACK_VOPL,
+	PMU_IDLE_ACK_ISP0,
+	PMU_IDLE_ACK_ISP1,
+	PMU_IDLE_ACK_HDCP,
+
+	PMU_IDLE_ACK_USB3,
+	PMU_IDLE_ACK_PERILPM0,
+	PMU_IDLE_ACK_CENTER,
+	PMU_IDLE_ACK_CCIM0,
+
+	PMU_IDLE_ACK_CCIM1,
+	PMU_IDLE_ACK_VIO,
+	PMU_IDLE_ACK_MSCH0,
+	PMU_IDLE_ACK_MSCH1,
+
+	PMU_IDLE_ACK_ALIVE,
+	PMU_IDLE_ACK_PMU,
+	PMU_IDLE_ACK_EDP,
+	PMU_IDLE_ACK_GMAC,
+
+	PMU_IDLE_ACK_EMMC,
+	PMU_IDLE_ACK_CENTER1,
+	PMU_IDLE_ACK_PMUM0,
+	PMU_IDLE_ACK_GIC,
+
+	PMU_IDLE_ACK_SD,
+	PMU_IDLE_ACK_SDIOAUDIO,
+};
+
+enum pmu_cci500_con {
+	PMU_PREQ_CCI500_CFG_SW = 0,
+	PMU_CLR_PREQ_CCI500_HW,
+	PMU_PSTATE_CCI500_0,
+	PMU_PSTATE_CCI500_1,
+
+	PMU_PSTATE_CCI500_2,
+	PMU_QREQ_CCI500_CFG_SW,
+	PMU_CLR_QREQ_CCI500_HW,
+	PMU_QGATING_CCI500_CFG,
+
+	PMU_PREQ_CCI500_CFG_SW_WMSK = 16,
+	PMU_CLR_PREQ_CCI500_HW_WMSK,
+	PMU_PSTATE_CCI500_0_WMSK,
+	PMU_PSTATE_CCI500_1_WMSK,
+
+	PMU_PSTATE_CCI500_2_WMSK,
+	PMU_QREQ_CCI500_CFG_SW_WMSK,
+	PMU_CLR_QREQ_CCI500_HW_WMSK,
+	PMU_QGATING_CCI500_CFG_WMSK,
+};
+
+enum pmu_adb400_con {
+	PMU_PWRDWN_REQ_CXCS_SW = 0,
+	PMU_PWRDWN_REQ_CORE_L_SW,
+	PMU_PWRDWN_REQ_CORE_L_2GIC_SW,
+	PMU_PWRDWN_REQ_GIC2_CORE_L_SW,
+
+	PMU_PWRDWN_REQ_CORE_B_SW,
+	PMU_PWRDWN_REQ_CORE_B_2GIC_SW,
+	PMU_PWRDWN_REQ_GIC2_CORE_B_SW,
+
+	PMU_CLR_CXCS_HW = 8,
+	PMU_CLR_CORE_L_HW,
+	PMU_CLR_CORE_L_2GIC_HW,
+	PMU_CLR_GIC2_CORE_L_HW,
+
+	PMU_CLR_CORE_B_HW,
+	PMU_CLR_CORE_B_2GIC_HW,
+	PMU_CLR_GIC2_CORE_B_HW,
+
+	PMU_PWRDWN_REQ_CXCS_SW_WMSK = 16,
+	PMU_PWRDWN_REQ_CORE_L_SW_WMSK,
+	PMU_PWRDWN_REQ_CORE_L_2GIC_SW_WMSK,
+	PMU_PWRDWN_REQ_GIC2_CORE_L_SW_WMSK,
+
+	PMU_PWRDWN_REQ_CORE_B_SW_WMSK,
+	PMU_PWRDWN_REQ_CORE_B_2GIC_SW_WMSK,
+	PMU_PWRDWN_REQ_GIC2_CORE_B_SW_WMSK,
+
+	PMU_CLR_CXCS_HW_WMSK = 24,
+	PMU_CLR_CORE_L_HW_WMSK,
+	PMU_CLR_CORE_L_2GIC_HW_WMSK,
+	PMU_CLR_GIC2_CORE_L_HW_WMSK,
+
+	PMU_CLR_CORE_B_HW_WMSK,
+	PMU_CLR_CORE_B_2GIC_HW_WMSK,
+	PMU_CLR_GIC2_CORE_B_HW_WMSK,
+};
+
+enum pmu_adb400_st {
+	PMU_PWRDWN_REQ_CXCS_SW_ST = 0,
+	PMU_PWRDWN_REQ_CORE_L_SW_ST,
+	PMU_PWRDWN_REQ_CORE_L_2GIC_SW_ST,
+	PMU_PWRDWN_REQ_GIC2_CORE_L_SW_ST,
+
+	PMU_PWRDWN_REQ_CORE_B_SW_ST,
+	PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST,
+	PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST,
+
+	PMU_CLR_CXCS_HW_ST = 8,
+	PMU_CLR_CORE_L_HW_ST,
+	PMU_CLR_CORE_L_2GIC_HW_ST,
+	PMU_CLR_GIC2_CORE_L_HW_ST,
+
+	PMU_CLR_CORE_B_HW_ST,
+	PMU_CLR_CORE_B_2GIC_HW_ST,
+	PMU_CLR_GIC2_CORE_B_HW_ST,
+};
+
+enum pmu_pwrdn_con1 {
+	PMU_VD_SCU_L_PWRDN_EN = 0,
+	PMU_VD_SCU_B_PWRDN_EN,
+	PMU_VD_CENTER_PWRDN_EN,
+};
+
+enum pmu_core_pwr_st {
+	L2_FLUSHDONE_CLUSTER_L = 0,
+	STANDBY_BY_WFIL2_CLUSTER_L,
+
+	L2_FLUSHDONE_CLUSTER_B = 10,
+	STANDBY_BY_WFIL2_CLUSTER_B,
+};
+
+#endif /* __PMU_BITS_H__ */
diff --git a/plat/rockchip/rk3399/include/shared/pmu_regs.h b/plat/rockchip/rk3399/include/shared/pmu_regs.h
new file mode 100644
index 0000000..41c6a24
--- /dev/null
+++ b/plat/rockchip/rk3399/include/shared/pmu_regs.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PMU_REGS_H__
+#define __PMU_REGS_H__
+
+#define PMU_WKUP_CFG0		0x00
+#define PMU_WKUP_CFG1		0x04
+#define PMU_WKUP_CFG2		0x08
+#define PMU_WKUP_CFG3		0x0c
+#define PMU_WKUP_CFG4		0x10
+#define PMU_PWRDN_CON		0x14
+#define PMU_PWRDN_ST		0x18
+#define PMU_PLL_CON		0x1c
+#define PMU_PWRMODE_CON		0x20
+#define PMU_SFT_CON		0x24
+#define PMU_INT_CON		0x28
+#define PMU_INT_ST		0x2c
+#define PMU_GPIO0_POS_INT_CON	0x30
+#define PMU_GPIO0_NEG_INT_CON	0x34
+#define PMU_GPIO1_POS_INT_CON	0x38
+#define PMU_GPIO1_NEG_INT_CON	0x3c
+#define PMU_GPIO0_POS_INT_ST	0x40
+#define PMU_GPIO0_NEG_INT_ST	0x44
+#define PMU_GPIO1_POS_INT_ST	0x48
+#define PMU_GPIO1_NEG_INT_ST	0x4c
+#define PMU_PWRDN_INTEN		0x50
+#define PMU_PWRDN_STATUS	0x54
+#define PMU_WAKEUP_STATUS	0x58
+#define PMU_BUS_CLR		0x5c
+#define PMU_BUS_IDLE_REQ	0x60
+#define PMU_BUS_IDLE_ST		0x64
+#define PMU_BUS_IDLE_ACK	0x68
+#define PMU_CCI500_CON		0x6c
+#define PMU_ADB400_CON		0x70
+#define PMU_ADB400_ST		0x74
+#define PMU_POWER_ST		0x78
+#define PMU_CORE_PWR_ST		0x7c
+#define PMU_OSC_CNT		0x80
+#define PMU_PLLLOCK_CNT		0x84
+#define PMU_PLLRST_CNT		0x88
+#define PMU_STABLE_CNT		0x8c
+#define PMU_DDRIO_PWRON_CNT	0x90
+#define PMU_WAKEUP_RST_CLR_CNT	0x94
+#define PMU_DDR_SREF_ST		0x98
+#define PMU_SCU_L_PWRDN_CNT	0x9c
+#define PMU_SCU_L_PWRUP_CNT	0xa0
+#define PMU_SCU_B_PWRDN_CNT	0xa4
+#define PMU_SCU_B_PWRUP_CNT	0xa8
+#define PMU_GPU_PWRDN_CNT	0xac
+#define PMU_GPU_PWRUP_CNT	0xb0
+#define PMU_CENTER_PWRDN_CNT	0xb4
+#define PMU_CENTER_PWRUP_CNT	0xb8
+#define PMU_TIMEOUT_CNT		0xbc
+#define PMU_CPU0APM_CON		0xc0
+#define PMU_CPU1APM_CON		0xc4
+#define PMU_CPU2APM_CON		0xc8
+#define PMU_CPU3APM_CON		0xcc
+#define PMU_CPU0BPM_CON		0xd0
+#define PMU_CPU1BPM_CON		0xd4
+#define PMU_NOC_AUTO_ENA	0xd8
+#define PMU_PWRDN_CON1		0xdc
+
+#define PMUGRF_GPIO0A_IOMUX	0x00
+#define PMUGRF_GPIO1A_IOMUX	0x10
+#define PMUGRF_GPIO1C_IOMUX	0x18
+
+#define PMUGRF_GPIO0A6_IOMUX_SHIFT      12
+#define PMUGRF_GPIO0A6_IOMUX_PWM        0x1
+#define PMUGRF_GPIO1C3_IOMUX_SHIFT      6
+#define PMUGRF_GPIO1C3_IOMUX_PWM        0x1
+
+#define CPU_AXI_QOS_ID_COREID		0x00
+#define CPU_AXI_QOS_REVISIONID		0x04
+#define CPU_AXI_QOS_PRIORITY		0x08
+#define CPU_AXI_QOS_MODE		0x0c
+#define CPU_AXI_QOS_BANDWIDTH		0x10
+#define CPU_AXI_QOS_SATURATION		0x14
+#define CPU_AXI_QOS_EXTCONTROL		0x18
+#define CPU_AXI_QOS_NUM_REGS		0x07
+
+#define CPU_AXI_CCI_M0_QOS_BASE		0xffa50000
+#define CPU_AXI_CCI_M1_QOS_BASE		0xffad8000
+#define CPU_AXI_DMAC0_QOS_BASE		0xffa64200
+#define CPU_AXI_DMAC1_QOS_BASE		0xffa64280
+#define CPU_AXI_DCF_QOS_BASE		0xffa64180
+#define CPU_AXI_CRYPTO0_QOS_BASE	0xffa64100
+#define CPU_AXI_CRYPTO1_QOS_BASE	0xffa64080
+#define CPU_AXI_PMU_CM0_QOS_BASE	0xffa68000
+#define CPU_AXI_PERI_CM1_QOS_BASE	0xffa64300
+#define CPU_AXI_GIC_QOS_BASE		0xffa78000
+#define CPU_AXI_SDIO_QOS_BASE		0xffa76000
+#define CPU_AXI_SDMMC_QOS_BASE		0xffa74000
+#define CPU_AXI_EMMC_QOS_BASE		0xffa58000
+#define CPU_AXI_GMAC_QOS_BASE		0xffa5c000
+#define CPU_AXI_USB_OTG0_QOS_BASE	0xffa70000
+#define CPU_AXI_USB_OTG1_QOS_BASE	0xffa70080
+#define CPU_AXI_USB_HOST0_QOS_BASE	0xffa60100
+#define CPU_AXI_USB_HOST1_QOS_BASE	0xffa60180
+#define CPU_AXI_GPU_QOS_BASE		0xffae0000
+#define CPU_AXI_VIDEO_M0_QOS_BASE	0xffab8000
+#define CPU_AXI_VIDEO_M1_R_QOS_BASE	0xffac0000
+#define CPU_AXI_VIDEO_M1_W_QOS_BASE	0xffac0080
+#define CPU_AXI_RGA_R_QOS_BASE		0xffab0000
+#define CPU_AXI_RGA_W_QOS_BASE		0xffab0080
+#define CPU_AXI_IEP_QOS_BASE		0xffa98000
+#define CPU_AXI_VOP_BIG_R_QOS_BASE	0xffac8000
+#define CPU_AXI_VOP_BIG_W_QOS_BASE	0xffac8080
+#define CPU_AXI_VOP_LITTLE_QOS_BASE	0xffad0000
+#define CPU_AXI_ISP0_M0_QOS_BASE	0xffaa0000
+#define CPU_AXI_ISP0_M1_QOS_BASE	0xffaa0080
+#define CPU_AXI_ISP1_M0_QOS_BASE	0xffaa8000
+#define CPU_AXI_ISP1_M1_QOS_BASE	0xffaa8080
+#define CPU_AXI_HDCP_QOS_BASE		0xffa90000
+#define CPU_AXI_PERIHP_NSP_QOS_BASE	0xffad8080
+#define CPU_AXI_PERILP_NSP_QOS_BASE	0xffad8180
+#define CPU_AXI_PERILPSLV_NSP_QOS_BASE	0xffad8100
+
+#define GRF_GPIO2A_IOMUX	0xe000
+#define GRF_GPIO2B_IOMUX	0xe004
+#define GRF_GPIO2C_IOMUX	0xe008
+#define GRF_GPIO2D_IOMUX	0xe00c
+#define GRF_GPIO3A_IOMUX	0xe010
+#define GRF_GPIO3B_IOMUX	0xe014
+#define GRF_GPIO3C_IOMUX	0xe018
+#define GRF_GPIO3D_IOMUX	0xe01c
+#define GRF_GPIO4A_IOMUX	0xe020
+#define GRF_GPIO4B_IOMUX	0xe024
+#define GRF_GPIO4C_IOMUX	0xe028
+#define GRF_GPIO4D_IOMUX	0xe02c
+
+#define GRF_GPIO2A_P		0xe040
+#define GRF_GPIO2B_P		0xe044
+#define GRF_GPIO2C_P		0xe048
+#define GRF_GPIO2D_P		0xe04C
+#define GRF_GPIO3A_P		0xe050
+#define GRF_GPIO3B_P		0xe054
+#define GRF_GPIO3C_P		0xe058
+#define GRF_GPIO3D_P		0xe05C
+#define GRF_GPIO4A_P		0xe060
+#define GRF_GPIO4B_P		0xe064
+#define GRF_GPIO4C_P		0xe068
+#define GRF_GPIO4D_P		0xe06C
+
+#endif /* __PMU_REGS_H__ */
diff --git a/plat/rockchip/rk3399/plat_sip_calls.c b/plat/rockchip/rk3399/plat_sip_calls.c
new file mode 100644
index 0000000..074dc19
--- /dev/null
+++ b/plat/rockchip/rk3399/plat_sip_calls.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cdn_dp.h>
+#include <debug.h>
+#include <dfs.h>
+#include <mmio.h>
+#include <plat_sip_calls.h>
+#include <rockchip_sip_svc.h>
+#include <runtime_svc.h>
+
+#define RK_SIP_DDR_CFG		0x82000008
+#define DRAM_INIT		0x00
+#define DRAM_SET_RATE		0x01
+#define DRAM_ROUND_RATE		0x02
+#define DRAM_SET_AT_SR		0x03
+#define DRAM_GET_BW		0x04
+#define DRAM_GET_RATE		0x05
+#define DRAM_CLR_IRQ		0x06
+#define DRAM_SET_PARAM		0x07
+#define DRAM_SET_ODT_PD		0x08
+
+#define RK_SIP_HDCP_CONTROL	0x82000009
+#define RK_SIP_HDCP_KEY_DATA64	0xC200000A
+
+uint32_t ddr_smc_handler(uint64_t arg0, uint64_t arg1,
+			 uint64_t id, uint64_t arg2)
+{
+	switch (id) {
+	case DRAM_SET_RATE:
+		return ddr_set_rate((uint32_t)arg0);
+	case DRAM_ROUND_RATE:
+		return ddr_round_rate((uint32_t)arg0);
+	case DRAM_GET_RATE:
+		return ddr_get_rate();
+	case DRAM_SET_ODT_PD:
+		dram_set_odt_pd(arg0, arg1, arg2);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+uint64_t rockchip_plat_sip_handler(uint32_t smc_fid,
+				   uint64_t x1,
+				   uint64_t x2,
+				   uint64_t x3,
+				   uint64_t x4,
+				   void *cookie,
+				   void *handle,
+				   uint64_t flags)
+{
+	uint64_t x5, x6;
+
+	switch (smc_fid) {
+	case RK_SIP_DDR_CFG:
+		SMC_RET1(handle, ddr_smc_handler(x1, x2, x3, x4));
+	case RK_SIP_HDCP_CONTROL:
+		SMC_RET1(handle, dp_hdcp_ctrl(x1));
+	case RK_SIP_HDCP_KEY_DATA64:
+		x5 = read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X5);
+		x6 = read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X6);
+		SMC_RET1(handle, dp_hdcp_store_key(x1, x2, x3, x4, x5, x6));
+	default:
+		ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
+		SMC_RET1(handle, SMC_UNK);
+	}
+}
diff --git a/plat/rockchip/rk3399/platform.mk b/plat/rockchip/rk3399/platform.mk
new file mode 100644
index 0000000..85cca1b
--- /dev/null
+++ b/plat/rockchip/rk3399/platform.mk
@@ -0,0 +1,94 @@
+#
+# Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+RK_PLAT		:=	plat/rockchip
+RK_PLAT_SOC	:=	${RK_PLAT}/${PLAT}
+RK_PLAT_COMMON	:=	${RK_PLAT}/common
+
+PLAT_INCLUDES		:=	-I${RK_PLAT_COMMON}/			\
+				-I${RK_PLAT_COMMON}/include/		\
+				-I${RK_PLAT_COMMON}/pmusram		\
+				-I${RK_PLAT_COMMON}/drivers/pmu/	\
+				-I${RK_PLAT_SOC}/			\
+				-I${RK_PLAT_SOC}/drivers/pmu/		\
+				-I${RK_PLAT_SOC}/drivers/pwm/		\
+				-I${RK_PLAT_SOC}/drivers/secure/	\
+				-I${RK_PLAT_SOC}/drivers/soc/		\
+				-I${RK_PLAT_SOC}/drivers/dram/		\
+				-I${RK_PLAT_SOC}/drivers/dp/		\
+				-I${RK_PLAT_SOC}/include/		\
+				-I${RK_PLAT_SOC}/include/shared/	\
+
+RK_GIC_SOURCES		:=	drivers/arm/gic/common/gic_common.c	\
+				drivers/arm/gic/v3/gicv3_main.c		\
+				drivers/arm/gic/v3/gicv3_helpers.c	\
+				plat/common/plat_gicv3.c		\
+				${RK_PLAT}/common/rockchip_gicv3.c
+
+PLAT_BL_COMMON_SOURCES	:=	lib/xlat_tables/xlat_tables_common.c	\
+				lib/xlat_tables/aarch64/xlat_tables.c	\
+				plat/common/plat_psci_common.c
+
+BL31_SOURCES	+=	${RK_GIC_SOURCES}				\
+			drivers/arm/cci/cci.c				\
+			drivers/console/aarch64/console.S		\
+			drivers/ti/uart/aarch64/16550_console.S		\
+			drivers/delay_timer/delay_timer.c		\
+			drivers/delay_timer/generic_delay_timer.c	\
+			drivers/gpio/gpio.c				\
+			lib/cpus/aarch64/cortex_a53.S			\
+			lib/cpus/aarch64/cortex_a72.S			\
+			${RK_PLAT_COMMON}/aarch64/plat_helpers.S	\
+			${RK_PLAT_COMMON}/bl31_plat_setup.c		\
+			${RK_PLAT_COMMON}/params_setup.c		\
+			${RK_PLAT_COMMON}/pmusram/pmu_sram_cpus_on.S	\
+			${RK_PLAT_COMMON}/plat_pm.c			\
+			${RK_PLAT_COMMON}/plat_topology.c		\
+			${RK_PLAT_COMMON}/aarch64/platform_common.c	\
+			${RK_PLAT_COMMON}/rockchip_sip_svc.c		\
+			${RK_PLAT_SOC}/plat_sip_calls.c			\
+			${RK_PLAT_SOC}/drivers/dp/cdn_dp.c		\
+			${RK_PLAT_SOC}/drivers/gpio/rk3399_gpio.c	\
+			${RK_PLAT_SOC}/drivers/pmu/pmu.c		\
+			${RK_PLAT_SOC}/drivers/pmu/pmu_fw.c		\
+			${RK_PLAT_SOC}/drivers/pmu/m0_ctl.c		\
+			${RK_PLAT_SOC}/drivers/pwm/pwm.c		\
+			${RK_PLAT_SOC}/drivers/secure/secure.c		\
+			${RK_PLAT_SOC}/drivers/soc/soc.c		\
+			${RK_PLAT_SOC}/drivers/dram/dfs.c		\
+			${RK_PLAT_SOC}/drivers/dram/dram.c		\
+			${RK_PLAT_SOC}/drivers/dram/dram_spec_timing.c	\
+			${RK_PLAT_SOC}/drivers/dram/suspend.c
+
+ENABLE_PLAT_COMPAT	:=	0
+
+$(eval $(call add_define,PLAT_EXTRA_LD_SCRIPT))
+
+# Enable workarounds for selected Cortex-A53 erratas.
+ERRATA_A53_855873	:=	1
+
+# M0 source build
+PLAT_M0                 :=      ${PLAT}m0
+BUILD_M0		:=	${BUILD_PLAT}/m0
+
+RK3399M0FW=${BUILD_M0}/${PLAT_M0}.bin
+$(eval $(call add_define,RK3399M0FW))
+
+HDCPFW=${RK_PLAT_SOC}/drivers/dp/hdcp.bin
+$(eval $(call add_define,HDCPFW))
+
+# CCACHE_EXTRAFILES is needed because ccache doesn't handle .incbin
+export CCACHE_EXTRAFILES
+${BUILD_PLAT}/bl31/pmu_fw.o: CCACHE_EXTRAFILES=$(RK3399M0FW)
+${RK_PLAT_SOC}/drivers/pmu/pmu_fw.c: $(RK3399M0FW)
+
+${BUILD_PLAT}/bl31/cdn_dp.o: CCACHE_EXTRAFILES=$(HDCPFW)
+${RK_PLAT_SOC}/drivers/dp/cdn_dp.c: $(HDCPFW)
+
+$(eval $(call MAKE_PREREQ_DIR,${BUILD_M0},))
+.PHONY: $(RK3399M0FW)
+$(RK3399M0FW): | ${BUILD_M0}
+	$(MAKE) -C ${RK_PLAT_SOC}/drivers/m0 BUILD=$(abspath ${BUILD_PLAT}/m0)
diff --git a/plat/rockchip/rk3399/rk3399_def.h b/plat/rockchip/rk3399/rk3399_def.h
new file mode 100644
index 0000000..9fc0809
--- /dev/null
+++ b/plat/rockchip/rk3399/rk3399_def.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_DEF_H__
+#define __PLAT_DEF_H__
+
+#include <addressmap.h>
+
+#define RK3399_PRIMARY_CPU		0x0
+
+/* Special value used to verify platform parameters from BL2 to BL3-1 */
+#define RK_BL31_PLAT_PARAM_VAL		0x0f1e2d3c4b5a6978ULL
+
+/**************************************************************************
+ * UART related constants
+ **************************************************************************/
+#define RK3399_BAUDRATE			115200
+#define RK3399_UART_CLOCK		24000000
+
+/******************************************************************************
+ * System counter frequency related constants
+ ******************************************************************************/
+#define SYS_COUNTER_FREQ_IN_TICKS	24000000
+
+/* Base rockchip_platform compatible GIC memory map */
+#define BASE_GICD_BASE			(GIC500_BASE)
+#define BASE_GICR_BASE			(GIC500_BASE + SIZE_M(1))
+
+/*****************************************************************************
+ * CCI-400 related constants
+ ******************************************************************************/
+#define PLAT_RK_CCI_CLUSTER0_SL_IFACE_IX	0
+#define PLAT_RK_CCI_CLUSTER1_SL_IFACE_IX	1
+
+/******************************************************************************
+ * sgi, ppi
+ ******************************************************************************/
+#define ARM_IRQ_SEC_PHY_TIMER		29
+
+#define ARM_IRQ_SEC_SGI_0		8
+#define ARM_IRQ_SEC_SGI_1		9
+#define ARM_IRQ_SEC_SGI_2		10
+#define ARM_IRQ_SEC_SGI_3		11
+#define ARM_IRQ_SEC_SGI_4		12
+#define ARM_IRQ_SEC_SGI_5		13
+#define ARM_IRQ_SEC_SGI_6		14
+#define ARM_IRQ_SEC_SGI_7		15
+
+/*
+ * Define a list of Group 1 Secure and Group 0 interrupts as per GICv3
+ * terminology. On a GICv2 system or mode, the lists will be merged and treated
+ * as Group 0 interrupts.
+ */
+#define RK3399_G1S_IRQS			ARM_IRQ_SEC_PHY_TIMER
+#define RK3399_G0_IRQS			ARM_IRQ_SEC_SGI_6
+
+#endif /* __PLAT_DEF_H__ */
diff --git a/plat/socionext/uniphier/include/plat_macros.S b/plat/socionext/uniphier/include/plat_macros.S
new file mode 100644
index 0000000..6de4dde
--- /dev/null
+++ b/plat/socionext/uniphier/include/plat_macros.S
@@ -0,0 +1,13 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_MACROS_S__
+#define __PLAT_MACROS_S__
+
+	.macro plat_crash_print_regs
+	.endm
+
+#endif /* __PLAT_MACROS_S__ */
diff --git a/plat/socionext/uniphier/include/platform_def.h b/plat/socionext/uniphier/include/platform_def.h
new file mode 100644
index 0000000..b5dc16a
--- /dev/null
+++ b/plat/socionext/uniphier/include/platform_def.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#include <common_def.h>
+#include <tbbr/tbbr_img_def.h>
+
+#define PLATFORM_STACK_SIZE		0x1000
+
+#define CACHE_WRITEBACK_SHIFT		6
+#define CACHE_WRITEBACK_GRANULE		(1 << (CACHE_WRITEBACK_SHIFT))
+
+/* topology */
+#define UNIPHIER_MAX_CPUS_PER_CLUSTER	4
+#define UNIPHIER_CLUSTER_COUNT		2
+
+#define PLATFORM_CORE_COUNT		\
+	((UNIPHIER_MAX_CPUS_PER_CLUSTER) * (UNIPHIER_CLUSTER_COUNT))
+
+#define PLAT_MAX_PWR_LVL		1
+
+#define PLAT_MAX_OFF_STATE		2
+#define PLAT_MAX_RET_STATE		1
+
+#define UNIPHIER_SEC_DRAM_BASE		0x81000000
+#define UNIPHIER_SEC_DRAM_LIMIT		0x82000000
+#define UNIPHIER_SEC_DRAM_SIZE		((UNIPHIER_SEC_DRAM_LIMIT) - \
+					 (UNIPHIER_SEC_DRAM_BASE))
+
+#define BL1_RO_BASE			0x80000000
+#define BL1_RO_LIMIT			0x80018000
+#define BL1_RW_LIMIT			(UNIPHIER_SEC_DRAM_LIMIT)
+#define BL1_RW_BASE			((BL1_RW_LIMIT) - 0x00040000)
+
+#define BL2_LIMIT			(BL1_RW_BASE)
+#define BL2_BASE			((BL2_LIMIT) - 0x00040000)
+
+#define BL31_BASE			(UNIPHIER_SEC_DRAM_BASE)
+#define BL31_LIMIT			((BL31_BASE) + 0x00080000)
+
+#define BL32_BASE			(BL31_LIMIT)
+#define BL32_LIMIT			(UNIPHIER_SEC_DRAM_LIMIT)
+
+#define UNIPHIER_BLOCK_BUF_SIZE		0x00400000
+#define UNIPHIER_BLOCK_BUF_BASE		((BL2_LIMIT) - \
+					 (UNIPHIER_BLOCK_BUF_SIZE))
+
+#define PLAT_PHY_ADDR_SPACE_SIZE	(1ULL << 32)
+#define PLAT_VIRT_ADDR_SPACE_SIZE	(1ULL << 32)
+
+#define PLAT_XLAT_TABLES_DYNAMIC	1
+#define MAX_XLAT_TABLES			7
+#define MAX_MMAP_REGIONS		6
+
+#define MAX_IO_HANDLES			2
+#define MAX_IO_DEVICES			2
+#define MAX_IO_BLOCK_DEVICES		1
+
+#define TSP_SEC_MEM_BASE		(BL32_BASE)
+#define TSP_SEC_MEM_SIZE		((BL32_LIMIT) - (BL32_BASE))
+#define TSP_PROGBITS_LIMIT		(UNIPHIER_BLOCK_BUF_BASE)
+#define TSP_IRQ_SEC_PHY_TIMER		29
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/socionext/uniphier/platform.mk b/plat/socionext/uniphier/platform.mk
new file mode 100644
index 0000000..e6f510e
--- /dev/null
+++ b/plat/socionext/uniphier/platform.mk
@@ -0,0 +1,119 @@
+#
+# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+override COLD_BOOT_SINGLE_CPU	:= 1
+override ENABLE_PLAT_COMPAT	:= 0
+override ERROR_DEPRECATED	:= 1
+override LOAD_IMAGE_V2		:= 1
+override USE_COHERENT_MEM	:= 1
+override USE_TBBR_DEFS		:= 1
+
+# Cortex-A53 revision r0p4-51rel0
+# needed for LD20, unneeded for LD11, PXs3 (no ACE)
+ERRATA_A53_855873		:= 1
+
+FIP_ALIGN			:= 512
+
+ifeq ($(NEED_BL32),yes)
+$(eval $(call add_define,UNIPHIER_LOAD_BL32))
+endif
+
+# Libraries
+include lib/xlat_tables_v2/xlat_tables.mk
+
+PLAT_PATH		:=	plat/socionext/uniphier
+PLAT_INCLUDES		:=	-I$(PLAT_PATH)/include
+
+# IO sources for BL1, BL2
+IO_SOURCES		:=	drivers/io/io_block.c			\
+				drivers/io/io_fip.c			\
+				drivers/io/io_memmap.c			\
+				drivers/io/io_storage.c			\
+				$(PLAT_PATH)/uniphier_boot_device.c	\
+				$(PLAT_PATH)/uniphier_emmc.c		\
+				$(PLAT_PATH)/uniphier_io_storage.c	\
+				$(PLAT_PATH)/uniphier_nand.c		\
+				$(PLAT_PATH)/uniphier_usb.c
+
+# common sources for BL1, BL2, BL31
+PLAT_BL_COMMON_SOURCES	+=	drivers/console/aarch64/console.S	\
+				$(PLAT_PATH)/uniphier_console.S		\
+				$(PLAT_PATH)/uniphier_helpers.S		\
+				$(PLAT_PATH)/uniphier_soc_info.c	\
+				$(PLAT_PATH)/uniphier_xlat_setup.c	\
+				${XLAT_TABLES_LIB_SRCS}
+
+BL1_SOURCES		+=	lib/cpus/aarch64/cortex_a53.S		\
+				lib/cpus/aarch64/cortex_a72.S		\
+				$(PLAT_PATH)/uniphier_bl1_helpers.S	\
+				$(PLAT_PATH)/uniphier_bl1_setup.c	\
+				$(IO_SOURCES)
+
+BL2_SOURCES		+=	common/desc_image_load.c		\
+				$(PLAT_PATH)/uniphier_bl2_setup.c	\
+				$(PLAT_PATH)/uniphier_image_desc.c	\
+				$(PLAT_PATH)/uniphier_scp.c		\
+				$(IO_SOURCES)
+
+BL31_SOURCES		+=	drivers/arm/cci/cci.c			\
+				drivers/arm/gic/common/gic_common.c	\
+				drivers/arm/gic/v3/gicv3_helpers.c	\
+				drivers/arm/gic/v3/gicv3_main.c		\
+				lib/cpus/aarch64/cortex_a53.S		\
+				lib/cpus/aarch64/cortex_a72.S		\
+				plat/common/plat_gicv3.c		\
+				plat/common/plat_psci_common.c		\
+				$(PLAT_PATH)/uniphier_bl31_setup.c	\
+				$(PLAT_PATH)/uniphier_cci.c		\
+				$(PLAT_PATH)/uniphier_gicv3.c		\
+				$(PLAT_PATH)/uniphier_psci.c		\
+				$(PLAT_PATH)/uniphier_scp.c		\
+				$(PLAT_PATH)/uniphier_smp.S		\
+				$(PLAT_PATH)/uniphier_syscnt.c		\
+				$(PLAT_PATH)/uniphier_topology.c
+
+ifeq (${TRUSTED_BOARD_BOOT},1)
+
+include drivers/auth/mbedtls/mbedtls_crypto.mk
+include drivers/auth/mbedtls/mbedtls_x509.mk
+
+PLAT_INCLUDES		+=	-Iinclude/common/tbbr
+
+TBB_SOURCES		:=	drivers/auth/auth_mod.c			\
+				drivers/auth/crypto_mod.c		\
+				drivers/auth/img_parser_mod.c		\
+				drivers/auth/tbbr/tbbr_cot.c		\
+				plat/common/tbbr/plat_tbbr.c		\
+				$(PLAT_PATH)/uniphier_rotpk.S		\
+				$(PLAT_PATH)/uniphier_tbbr.c
+
+BL1_SOURCES		+=	$(TBB_SOURCES)
+BL2_SOURCES		+=	$(TBB_SOURCES)
+
+ROT_KEY			= $(BUILD_PLAT)/rot_key.pem
+ROTPK_HASH		= $(BUILD_PLAT)/rotpk_sha256.bin
+
+$(eval $(call add_define_val,ROTPK_HASH,'"$(ROTPK_HASH)"'))
+$(BUILD_PLAT)/bl1/uniphier_rotpk.o: $(ROTPK_HASH)
+$(BUILD_PLAT)/bl2/uniphier_rotpk.o: $(ROTPK_HASH)
+
+certificates: $(ROT_KEY)
+$(ROT_KEY):
+	@echo "  OPENSSL $@"
+	$(Q)openssl genrsa 2048 > $@ 2>/dev/null
+
+$(ROTPK_HASH): $(ROT_KEY)
+	@echo "  OPENSSL $@"
+	$(Q)openssl rsa -in $< -pubout -outform DER 2>/dev/null |\
+	openssl dgst -sha256 -binary > $@ 2>/dev/null
+
+endif
+
+.PHONY: bl1_gzip
+bl1_gzip: $(BUILD_PLAT)/bl1.bin.gzip
+%.gzip: %
+	@echo " GZIP     $@"
+	$(Q)(cat $< | gzip -n -f -9 > $@) || (rm -f $@ || false)
diff --git a/plat/socionext/uniphier/tsp/tsp-uniphier.mk b/plat/socionext/uniphier/tsp/tsp-uniphier.mk
new file mode 100644
index 0000000..54d4f51
--- /dev/null
+++ b/plat/socionext/uniphier/tsp/tsp-uniphier.mk
@@ -0,0 +1,9 @@
+#
+# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+BL32_SOURCES		+=	plat/common/plat_gicv3.c		\
+				plat/common/aarch64/platform_mp_stack.S \
+				$(PLAT_PATH)/tsp/uniphier_tsp_setup.c
diff --git a/plat/socionext/uniphier/tsp/uniphier_tsp_setup.c b/plat/socionext/uniphier/tsp/uniphier_tsp_setup.c
new file mode 100644
index 0000000..7df17d3
--- /dev/null
+++ b/plat/socionext/uniphier/tsp/uniphier_tsp_setup.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+#include <xlat_mmu_helpers.h>
+
+#include "../uniphier.h"
+
+#define BL32_END		(unsigned long)(&__BL32_END__)
+#define BL32_SIZE		((BL32_END) - (BL32_BASE))
+
+void tsp_early_platform_setup(void)
+{
+	uniphier_console_setup();
+}
+
+void tsp_platform_setup(void)
+{
+}
+
+void tsp_plat_arch_setup(void)
+{
+	uniphier_mmap_setup(BL32_BASE, BL32_SIZE, NULL);
+	enable_mmu_el1(0);
+}
diff --git a/plat/socionext/uniphier/uniphier.h b/plat/socionext/uniphier/uniphier.h
new file mode 100644
index 0000000..95b29b8
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __UNIPHIER_H__
+#define __UNIPHIER_H__
+
+#include <stdint.h>
+#include <types.h>
+
+unsigned int uniphier_get_soc_type(void);
+unsigned int uniphier_get_soc_model(void);
+unsigned int uniphier_get_soc_revision(void);
+unsigned int uniphier_get_soc_id(void);
+
+#define UNIPHIER_SOC_LD11		0
+#define UNIPHIER_SOC_LD20		1
+#define UNIPHIER_SOC_PXS3		2
+#define UNIPHIER_SOC_UNKNOWN		0xffffffff
+
+unsigned int uniphier_get_boot_device(unsigned int soc);
+
+#define UNIPHIER_BOOT_DEVICE_EMMC	0
+#define UNIPHIER_BOOT_DEVICE_NAND	1
+#define UNIPHIER_BOOT_DEVICE_NOR	2
+#define UNIPHIER_BOOT_DEVICE_USB	3
+#define UNIPHIER_BOOT_DEVICE_RSV	0xffffffff
+
+unsigned int uniphier_get_boot_master(unsigned int soc);
+
+#define UNIPHIER_BOOT_MASTER_THIS	0
+#define UNIPHIER_BOOT_MASTER_SCP	1
+#define UNIPHIER_BOOT_MASTER_EXT	2
+
+void uniphier_console_setup(void);
+
+int uniphier_emmc_init(uintptr_t *block_dev_spec);
+int uniphier_nand_init(uintptr_t *block_dev_spec);
+int uniphier_usb_init(unsigned int soc, uintptr_t *block_dev_spec);
+
+int uniphier_io_setup(unsigned int soc);
+int uniphier_check_image(unsigned int image_id);
+void uniphier_image_descs_fixup(void);
+
+int uniphier_scp_is_running(void);
+void uniphier_scp_start(void);
+void uniphier_scp_open_com(void);
+void uniphier_scp_system_off(void);
+void uniphier_scp_system_reset(void);
+
+struct mmap_region;
+void uniphier_mmap_setup(uintptr_t total_base, size_t total_size,
+			 const struct mmap_region *mmap);
+
+void uniphier_cci_init(unsigned int soc);
+void uniphier_cci_enable(void);
+void uniphier_cci_disable(void);
+
+void uniphier_gic_driver_init(unsigned int soc);
+void uniphier_gic_init(void);
+void uniphier_gic_cpuif_enable(void);
+void uniphier_gic_cpuif_disable(void);
+void uniphier_gic_pcpu_init(void);
+
+unsigned int uniphier_calc_core_pos(u_register_t mpidr);
+
+#define UNIPHIER_NS_DRAM_BASE		0x84000000
+#define UNIPHIER_NS_DRAM_SIZE		0x01000000
+
+#define UNIPHIER_BL33_BASE		(UNIPHIER_NS_DRAM_BASE)
+#define UNIPHIER_BL33_MAX_SIZE		0x00100000
+
+#define UNIPHIER_SCP_BASE		((UNIPHIER_BL33_BASE) + \
+					 (UNIPHIER_BL33_MAX_SIZE))
+#define UNIPHIER_SCP_MAX_SIZE		0x00020000
+
+#endif /* __UNIPHIER_H__ */
diff --git a/plat/socionext/uniphier/uniphier_bl1_helpers.S b/plat/socionext/uniphier/uniphier_bl1_helpers.S
new file mode 100644
index 0000000..5818565
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_bl1_helpers.S
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+	.globl	plat_get_my_entrypoint
+
+func plat_get_my_entrypoint
+	mov	x0, #0
+	ret
+endfunc plat_get_my_entrypoint
diff --git a/plat/socionext/uniphier/uniphier_bl1_setup.c b/plat/socionext/uniphier/uniphier_bl1_setup.c
new file mode 100644
index 0000000..da7740a
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_bl1_setup.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <bl_common.h>
+#include <console.h>
+#include <debug.h>
+#include <errno.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <xlat_mmu_helpers.h>
+
+#include "uniphier.h"
+
+void bl1_early_platform_setup(void)
+{
+	uniphier_console_setup();
+}
+
+void bl1_plat_arch_setup(void)
+{
+	uniphier_mmap_setup(UNIPHIER_SEC_DRAM_BASE, UNIPHIER_SEC_DRAM_SIZE,
+			    NULL);
+	enable_mmu_el3(0);
+}
+
+void bl1_platform_setup(void)
+{
+	unsigned int soc;
+	int ret;
+
+	soc = uniphier_get_soc_id();
+	if (soc == UNIPHIER_SOC_UNKNOWN) {
+		ERROR("unsupported SoC\n");
+		plat_error_handler(-ENOTSUP);
+	}
+
+	ret = uniphier_io_setup(soc);
+	if (ret) {
+		ERROR("failed to setup io devices\n");
+		plat_error_handler(ret);
+	}
+}
+
+static meminfo_t uniphier_tzram_layout = {
+	.total_base = UNIPHIER_SEC_DRAM_BASE,
+	.total_size = UNIPHIER_SEC_DRAM_SIZE,
+};
+
+meminfo_t *bl1_plat_sec_mem_layout(void)
+{
+	return &uniphier_tzram_layout;
+}
diff --git a/plat/socionext/uniphier/uniphier_bl2_setup.c b/plat/socionext/uniphier/uniphier_bl2_setup.c
new file mode 100644
index 0000000..b83e700
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_bl2_setup.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <bl_common.h>
+#include <debug.h>
+#include <desc_image_load.h>
+#include <errno.h>
+#include <io/io_storage.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <xlat_tables_v2.h>
+
+#include "uniphier.h"
+
+static meminfo_t uniphier_bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
+static int uniphier_bl2_kick_scp;
+
+void bl2_early_platform_setup(meminfo_t *mem_layout)
+{
+	uniphier_bl2_tzram_layout = *mem_layout;
+
+	uniphier_console_setup();
+}
+
+static const struct mmap_region uniphier_bl2_mmap[] = {
+	/* for SCP, BL33 */
+	MAP_REGION_FLAT(UNIPHIER_NS_DRAM_BASE, UNIPHIER_NS_DRAM_SIZE,
+			MT_MEMORY | MT_RW | MT_NS),
+	{ .size = 0 },
+};
+
+void bl2_plat_arch_setup(void)
+{
+	unsigned int soc;
+	int skip_scp = 0;
+	int ret;
+
+	uniphier_mmap_setup(UNIPHIER_SEC_DRAM_BASE, UNIPHIER_SEC_DRAM_SIZE,
+			    uniphier_bl2_mmap);
+	enable_mmu_el1(0);
+
+	soc = uniphier_get_soc_id();
+	if (soc == UNIPHIER_SOC_UNKNOWN) {
+		ERROR("unsupported SoC\n");
+		plat_error_handler(-ENOTSUP);
+	}
+
+	ret = uniphier_io_setup(soc);
+	if (ret) {
+		ERROR("failed to setup io devices\n");
+		plat_error_handler(ret);
+	}
+
+	switch (uniphier_get_boot_master(soc)) {
+	case UNIPHIER_BOOT_MASTER_THIS:
+		INFO("Booting from this SoC\n");
+		skip_scp = 1;
+		break;
+	case UNIPHIER_BOOT_MASTER_SCP:
+		INFO("Booting from on-chip SCP\n");
+		if (uniphier_scp_is_running()) {
+			INFO("SCP is already running. SCP_BL2 load will be skipped.\n");
+			skip_scp = 1;
+		}
+
+		/*
+		 * SCP must be kicked every time even if it is already running
+		 * because it polls this event after the reboot of the backend.
+		 */
+		uniphier_bl2_kick_scp = 1;
+		break;
+	case UNIPHIER_BOOT_MASTER_EXT:
+		INFO("Booting from external SCP\n");
+		skip_scp = 1;
+		break;
+	default:
+		plat_error_handler(-ENOTSUP);
+	}
+
+	if (!skip_scp) {
+		ret = uniphier_check_image(SCP_BL2_IMAGE_ID);
+		if (ret) {
+			WARN("SCP_BL2 image not found. SCP_BL2 load will be skipped.\n");
+			WARN("You must setup SCP by other means.\n");
+			skip_scp = 1;
+			uniphier_bl2_kick_scp = 0;
+		}
+	}
+
+	if (skip_scp)
+		uniphier_image_descs_fixup();
+}
+
+void bl2_platform_setup(void)
+{
+}
+
+void plat_flush_next_bl_params(void)
+{
+	flush_bl_params_desc();
+}
+
+bl_load_info_t *plat_get_bl_image_load_info(void)
+{
+	return get_bl_load_info_from_mem_params_desc();
+}
+
+bl_params_t *plat_get_next_bl_params(void)
+{
+	return get_next_bl_params_from_mem_params_desc();
+}
+
+int bl2_plat_handle_post_image_load(unsigned int image_id)
+{
+	if (image_id == SCP_BL2_IMAGE_ID && uniphier_bl2_kick_scp)
+		uniphier_scp_start();
+
+	return 0;
+}
diff --git a/plat/socionext/uniphier/uniphier_bl31_setup.c b/plat/socionext/uniphier/uniphier_bl31_setup.c
new file mode 100644
index 0000000..d9c87bd
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_bl31_setup.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <debug.h>
+#include <errno.h>
+#include <mmio.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <xlat_mmu_helpers.h>
+
+#include "uniphier.h"
+
+#define BL31_END		(unsigned long)(&__BL31_END__)
+#define BL31_SIZE		((BL31_END) - (BL31_BASE))
+
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+	assert(sec_state_is_valid(type));
+	return type == NON_SECURE ? &bl33_image_ep_info : &bl32_image_ep_info;
+}
+
+void bl31_early_platform_setup(void *from_bl2, void *plat_params_from_bl2)
+{
+	bl_params_node_t *bl_params = ((bl_params_t *)from_bl2)->head;
+
+	uniphier_console_setup();
+
+	while (bl_params) {
+		if (bl_params->image_id == BL32_IMAGE_ID)
+			bl32_image_ep_info = *bl_params->ep_info;
+
+		if (bl_params->image_id == BL33_IMAGE_ID)
+			bl33_image_ep_info = *bl_params->ep_info;
+
+		bl_params = bl_params->next_params_info;
+	}
+
+	if (bl33_image_ep_info.pc == 0)
+		panic();
+}
+
+#define UNIPHIER_SYS_CNTCTL_BASE	0x60E00000
+
+void bl31_platform_setup(void)
+{
+	unsigned int soc;
+
+	soc = uniphier_get_soc_id();
+	if (soc == UNIPHIER_SOC_UNKNOWN) {
+		ERROR("unsupported SoC\n");
+		plat_error_handler(-ENOTSUP);
+	}
+
+	uniphier_cci_init(soc);
+	uniphier_cci_enable();
+
+	/* Initialize the GIC driver, cpu and distributor interfaces */
+	uniphier_gic_driver_init(soc);
+	uniphier_gic_init();
+
+	/* Enable and initialize the System level generic timer */
+	mmio_write_32(UNIPHIER_SYS_CNTCTL_BASE + CNTCR_OFF,
+			CNTCR_FCREQ(0) | CNTCR_EN);
+}
+
+void bl31_plat_arch_setup(void)
+{
+	uniphier_mmap_setup(BL31_BASE, BL31_SIZE, NULL);
+	enable_mmu_el3(0);
+}
+
+void bl31_plat_runtime_setup(void)
+{
+	/* Suppress any runtime logs unless DEBUG is defined */
+#if !DEBUG
+	console_uninit();
+#endif
+}
diff --git a/plat/socionext/uniphier/uniphier_boot_device.c b/plat/socionext/uniphier/uniphier_boot_device.c
new file mode 100644
index 0000000..78ca8ef
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_boot_device.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <mmio.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <utils_def.h>
+
+#include "uniphier.h"
+
+#define UNIPHIER_PINMON0		0x5f900100
+#define UNIPHIER_PINMON2		0x5f900108
+
+static int uniphier_ld11_is_usb_boot(uint32_t pinmon)
+{
+	return !!(~pinmon & 0x00000080);
+}
+
+static int uniphier_ld20_is_usb_boot(uint32_t pinmon)
+{
+	return !!(~pinmon & 0x00000780);
+}
+
+static int uniphier_pxs3_is_usb_boot(uint32_t pinmon)
+{
+	uint32_t pinmon2 = mmio_read_32(UNIPHIER_PINMON2);
+
+	return !!(pinmon2 & BIT(31));
+}
+
+static const unsigned int uniphier_ld11_boot_device_table[] = {
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_EMMC,
+	UNIPHIER_BOOT_DEVICE_EMMC,
+	UNIPHIER_BOOT_DEVICE_EMMC,
+	UNIPHIER_BOOT_DEVICE_EMMC,
+	UNIPHIER_BOOT_DEVICE_EMMC,
+	UNIPHIER_BOOT_DEVICE_EMMC,
+	UNIPHIER_BOOT_DEVICE_EMMC,
+	UNIPHIER_BOOT_DEVICE_NOR,
+};
+
+static unsigned int uniphier_ld11_get_boot_device(uint32_t pinmon)
+{
+	unsigned int boot_sel = (pinmon >> 1) & 0x1f;
+
+	assert(boot_sel < ARRAY_SIZE(uniphier_ld11_boot_device_table));
+
+	return uniphier_ld11_boot_device_table[boot_sel];
+}
+
+static const unsigned int uniphier_pxs3_boot_device_table[] = {
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_EMMC,
+	UNIPHIER_BOOT_DEVICE_EMMC,
+	UNIPHIER_BOOT_DEVICE_EMMC,
+	UNIPHIER_BOOT_DEVICE_EMMC,
+	UNIPHIER_BOOT_DEVICE_EMMC,
+	UNIPHIER_BOOT_DEVICE_EMMC,
+	UNIPHIER_BOOT_DEVICE_NAND,
+	UNIPHIER_BOOT_DEVICE_NAND,
+};
+
+static unsigned int uniphier_pxs3_get_boot_device(uint32_t pinmon)
+{
+	unsigned int boot_sel = (pinmon >> 1) & 0xf;
+
+	assert(boot_sel < ARRAY_SIZE(uniphier_pxs3_boot_device_table));
+
+	return uniphier_pxs3_boot_device_table[boot_sel];
+}
+
+struct uniphier_boot_device_info {
+	int (*is_usb_boot)(uint32_t pinmon);
+	unsigned int (*get_boot_device)(uint32_t pinmon);
+};
+
+static const struct uniphier_boot_device_info uniphier_boot_device_info[] = {
+	[UNIPHIER_SOC_LD11] = {
+		.is_usb_boot = uniphier_ld11_is_usb_boot,
+		.get_boot_device = uniphier_ld11_get_boot_device,
+	},
+	[UNIPHIER_SOC_LD20] = {
+		.is_usb_boot = uniphier_ld20_is_usb_boot,
+		.get_boot_device = uniphier_ld11_get_boot_device,
+	},
+	[UNIPHIER_SOC_PXS3] = {
+		.is_usb_boot = uniphier_pxs3_is_usb_boot,
+		.get_boot_device = uniphier_pxs3_get_boot_device,
+	},
+};
+
+unsigned int uniphier_get_boot_device(unsigned int soc)
+{
+	const struct uniphier_boot_device_info *info;
+	uint32_t pinmon;
+
+	assert(soc < ARRAY_SIZE(uniphier_boot_device_info));
+	info = &uniphier_boot_device_info[soc];
+
+	pinmon = mmio_read_32(UNIPHIER_PINMON0);
+
+	if (!(pinmon & BIT(29)))
+		return UNIPHIER_BOOT_DEVICE_NOR;
+
+	if (info->is_usb_boot(pinmon))
+		return UNIPHIER_BOOT_DEVICE_USB;
+
+	return info->get_boot_device(pinmon);
+}
+
+static const bool uniphier_have_onchip_scp[] = {
+	[UNIPHIER_SOC_LD11] = true,
+	[UNIPHIER_SOC_LD20] = true,
+	[UNIPHIER_SOC_PXS3] = false,
+};
+
+unsigned int uniphier_get_boot_master(unsigned int soc)
+{
+	assert(soc < ARRAY_SIZE(uniphier_have_onchip_scp));
+
+	if (uniphier_have_onchip_scp[soc]) {
+		if (mmio_read_32(UNIPHIER_PINMON0) & BIT(27))
+			return UNIPHIER_BOOT_MASTER_THIS;
+		else
+			return UNIPHIER_BOOT_MASTER_SCP;
+	} else {
+		return UNIPHIER_BOOT_MASTER_EXT;
+	}
+}
diff --git a/plat/socionext/uniphier/uniphier_cci.c b/plat/socionext/uniphier/uniphier_cci.c
new file mode 100644
index 0000000..30f4b47
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_cci.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <cci.h>
+#include <stddef.h>
+#include <utils_def.h>
+
+#include "uniphier.h"
+
+#define UNIPHIER_CCI500_BASE	0x5FD00000
+
+static const int uniphier_cci_map[] = {0, 1};
+
+static void __uniphier_cci_init(void)
+{
+	cci_init(UNIPHIER_CCI500_BASE, uniphier_cci_map,
+		 ARRAY_SIZE(uniphier_cci_map));
+}
+
+static void __uniphier_cci_enable(void)
+{
+	cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+}
+
+static void __uniphier_cci_disable(void)
+{
+	cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+}
+
+struct uniphier_cci_ops {
+	void (*init)(void);
+	void (*enable)(void);
+	void (*disable)(void);
+};
+
+static const struct uniphier_cci_ops uniphier_cci_ops_table[] = {
+	[UNIPHIER_SOC_LD11] = {
+		.init = NULL,
+		.enable = NULL,
+		.disable = NULL,
+	},
+	[UNIPHIER_SOC_LD20] = {
+		.init = __uniphier_cci_init,
+		.enable = __uniphier_cci_enable,
+		.disable = __uniphier_cci_disable,
+	},
+	[UNIPHIER_SOC_PXS3] = {
+		.init = NULL,
+		.enable = NULL,
+		.disable = NULL,
+	},
+};
+
+static struct uniphier_cci_ops uniphier_cci_ops;
+
+void uniphier_cci_init(unsigned int soc)
+{
+	uniphier_cci_ops = uniphier_cci_ops_table[soc];
+	flush_dcache_range((uint64_t)&uniphier_cci_ops,
+			   sizeof(uniphier_cci_ops));
+
+	if (uniphier_cci_ops.init)
+		uniphier_cci_ops.init();
+}
+
+void uniphier_cci_enable(void)
+{
+	if (uniphier_cci_ops.enable)
+		uniphier_cci_ops.enable();
+}
+
+void uniphier_cci_disable(void)
+{
+	if (uniphier_cci_ops.disable)
+		uniphier_cci_ops.disable();
+}
diff --git a/plat/socionext/uniphier/uniphier_console.S b/plat/socionext/uniphier/uniphier_console.S
new file mode 100644
index 0000000..03aff48
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_console.S
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+#define UNIPHIER_UART_BASE	0x54006800
+#define UNIPHIER_UART_END	0x54006c00
+#define UNIPHIER_UART_OFFSET	0x100
+
+#define UNIPHIER_UART_RX	0x00	/* In:  Receive buffer */
+#define UNIPHIER_UART_TX	0x00	/* Out: Transmit buffer */
+
+#define UNIPHIER_UART_FCR	0x0c	/* Char/FIFO Control Register */
+#define   UNIPHIER_UART_FCR_ENABLE_FIFO	0x01	/* Enable the FIFO */
+
+#define UNIPHIER_UART_LCR_MCR	0x10	/* Line/Modem Control Register */
+#define   UNIPHIER_UART_LCR_WLEN8	0x03	/* Wordlength: 8 bits */
+#define UNIPHIER_UART_LSR	0x14	/* Line Status Register */
+#define   UNIPHIER_UART_LSR_TEMT_BIT	6	/* Transmitter empty */
+#define   UNIPHIER_UART_LSR_THRE_BIT	5	/* Transmit-hold-register empty */
+#define   UNIPHIER_UART_LSR_DR_BIT	0	/* Receiver data ready */
+#define UNIPHIER_UART_DLR	0x24	/* Divisor Latch Register */
+
+/*
+ * Uncomment for debug
+ */
+/* #define UNIPHIER_UART_INIT_DIVISOR */
+#define UNIPHIER_UART_DEFAULT_BASE	(UNIPHIER_UART_BASE)
+#define UNIPHIER_UART_CLK_RATE		58820000
+#define UNIPHIER_UART_DEFAULT_BAUDRATE	115200
+
+/*
+ * In: x0 - console base address
+ *     w1 - uart clock in Hz
+ *     w2 - baud rate
+ * Out: return 1 on success, or 0 on error
+ */
+	.globl	console_core_init
+func console_core_init
+	cbz	x0, 1f
+#ifdef UNIPHIER_UART_INIT_DIVISOR
+	cbz	w1, 1f
+	cbz	w2, 1f
+	/* divisor = uart_clock / (16 * baud_rate) */
+	udiv	w2, w1, w2
+	lsr	w2, w2, #4
+#endif
+	/* Make sure the transmitter is empty before the divisor set/change */
+0:	ldr	w1, [x0, #UNIPHIER_UART_LSR]
+	tbz	w1, #UNIPHIER_UART_LSR_TEMT_BIT, 0b
+#ifdef UNIPHIER_UART_INIT_DIVISOR
+	str	w2, [x0, #UNIPHIER_UART_DLR]
+#endif
+	mov	w2, #UNIPHIER_UART_FCR_ENABLE_FIFO
+	str	w2, [x0, #UNIPHIER_UART_FCR]
+
+	mov	w2, #(UNIPHIER_UART_LCR_WLEN8 << 8)
+	str	w2, [x0, #UNIPHIER_UART_LCR_MCR]
+
+	mov	w0, #1
+	ret
+1:	mov	w0, #0
+	ret
+endfunc console_core_init
+
+/*
+ * In: w0 - character to be printed
+ *     x1 - console base address
+ * Out: return the character written, or -1 on error
+ * Clobber: x2
+ */
+	.globl	console_core_putc
+func console_core_putc
+	/* Error out if the console is not initialized */
+	cbz	x1, 2f
+
+	/* Wait until the transmitter FIFO gets empty */
+0:	ldr	w2, [x1, #UNIPHIER_UART_LSR]
+	tbz	w2, #UNIPHIER_UART_LSR_THRE_BIT, 0b
+
+	mov	w2, w0
+
+1:	str	w2, [x1, #UNIPHIER_UART_TX]
+
+	cmp	w2, #'\n'
+	b.ne	3f
+	mov	w2, #'\r'	/* Append '\r' to '\n' */
+	b	1b
+2:	mov	w0, #-1
+3:	ret
+endfunc console_core_putc
+
+/*
+ * In: x0 - console base address
+ * Out: return the character read
+ * Clobber: x1
+ */
+	.globl	console_core_getc
+func console_core_getc
+	/* Error out if the console is not initialized */
+	cbz	x0, 1f
+
+	/* Wait while the receiver FIFO is empty */
+0:	ldr	w1, [x0, #UNIPHIER_UART_LSR]
+	tbz	w1, #UNIPHIER_UART_LSR_DR_BIT, 0b
+
+	ldr	w0, [x0, #UNIPHIER_UART_RX]
+
+	ret
+1:	mov	w0, #-1
+	ret
+endfunc console_core_getc
+
+/*
+ * In:  x0 - console base address
+ * Out: return 0, or -1 on error
+ * Clobber: x1
+ */
+	.global console_core_flush
+func console_core_flush
+	/* Error out if the console is not initialized */
+	cbz	x0, 1f
+
+	/* wait until the transmitter gets empty */
+0:	ldr	w1, [x0, #UNIPHIER_UART_LSR]
+	tbz	w1, #UNIPHIER_UART_LSR_TEMT_BIT, 0b
+
+	mov	w0, #0
+	ret
+1:	mov	w0, #-1
+	ret
+endfunc console_core_flush
+
+/* find initialized UART port */
+.macro uniphier_console_get_base base, tmpx, tmpw
+	ldr	\base, =UNIPHIER_UART_BASE
+0000:	ldr	\tmpw, [\base, #UNIPHIER_UART_DLR]
+	mvn	\tmpw, \tmpw
+	uxth	\tmpw, \tmpw
+	cbnz	\tmpw, 0001f
+	add	\base, \base, #UNIPHIER_UART_OFFSET
+	ldr	\tmpx, =UNIPHIER_UART_END
+	cmp	\base, \tmpx
+	b.lo	0000b
+	mov	\base, #0
+0001:
+.endm
+
+/*
+ * int plat_crash_console_init(void)
+ * Clobber: x0-x2
+ */
+	.globl	plat_crash_console_init
+func plat_crash_console_init
+#ifdef UNIPHIER_UART_INIT_DIVISOR
+	ldr	x0, =UNIPHIER_UART_DEFAULT_BASE
+	ldr	x1, =UNIPHIER_UART_CLK_RATE
+	ldr	x2, =UNIPHIER_UART_DEFAULT_BAUDRATE
+	b	console_core_init
+#else
+	ret
+#endif
+endfunc plat_crash_console_init
+
+/*
+ * int plat_crash_console_putc(int c)
+ * Clobber: x1, x2
+ */
+	.globl	plat_crash_console_putc
+func plat_crash_console_putc
+#ifdef UNIPHIER_UART_INIT_DIVISOR
+	ldr	x1, =UNIPHIER_UART_DEFAULT_BASE
+#else
+	uniphier_console_get_base x1, x2, w2
+#endif
+	b	console_core_putc
+endfunc plat_crash_console_putc
+
+/*
+ * int plat_crash_console_flush(void)
+ * Clobber: x0, x1
+ */
+	.global plat_crash_console_flush
+func plat_crash_console_flush
+#ifdef UNIPHIER_UART_INIT_DIVISOR
+	ldr	x0, =UNIPHIER_UART_DEFAULT_BASE
+#else
+	uniphier_console_get_base x0, x1, w1
+#endif
+	b	console_core_flush
+endfunc plat_crash_console_flush
+
+/*
+ * void uniphier_console_setup(void)
+ * Clobber: x0-x2
+ */
+	.globl	uniphier_console_setup
+func uniphier_console_setup
+#ifdef UNIPHIER_UART_INIT_DIVISOR
+	ldr	x0, =UNIPHIER_UART_DEFAULT_BASE
+	ldr	w1, =UNIPHIER_UART_CLK_RATE
+	ldr	w2, =UNIPHIER_UART_DEFAULT_BAUDRATE
+#else
+	uniphier_console_get_base x0, x1, w1
+	mov	w1, #0
+	mov	w2, #0
+#endif
+	b	console_init
+endfunc uniphier_console_setup
diff --git a/plat/socionext/uniphier/uniphier_emmc.c b/plat/socionext/uniphier/uniphier_emmc.c
new file mode 100644
index 0000000..fcd4cb4
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_emmc.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <io/io_block.h>
+#include <mmio.h>
+#include <platform_def.h>
+#include <sys/types.h>
+#include <utils_def.h>
+
+#include "uniphier.h"
+
+#define MMC_CMD_SWITCH			6
+#define MMC_CMD_SELECT_CARD		7
+#define MMC_CMD_SEND_CSD		9
+#define MMC_CMD_READ_MULTIPLE_BLOCK	18
+
+#define EXT_CSD_PART_CONF		179	/* R/W */
+
+#define MMC_RSP_PRESENT BIT(0)
+#define MMC_RSP_136	BIT(1)		/* 136 bit response */
+#define MMC_RSP_CRC	BIT(2)		/* expect valid crc */
+#define MMC_RSP_BUSY	BIT(3)		/* card may send busy */
+#define MMC_RSP_OPCODE	BIT(4)		/* response contains opcode */
+
+#define MMC_RSP_NONE	(0)
+#define MMC_RSP_R1	(MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE)
+#define MMC_RSP_R1b	(MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE | \
+			MMC_RSP_BUSY)
+#define MMC_RSP_R2	(MMC_RSP_PRESENT | MMC_RSP_136 | MMC_RSP_CRC)
+#define MMC_RSP_R3	(MMC_RSP_PRESENT)
+#define MMC_RSP_R4	(MMC_RSP_PRESENT)
+#define MMC_RSP_R5	(MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE)
+#define MMC_RSP_R6	(MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE)
+#define MMC_RSP_R7	(MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE)
+
+#define SDHCI_DMA_ADDRESS	0x00
+#define SDHCI_BLOCK_SIZE	0x04
+#define  SDHCI_MAKE_BLKSZ(dma, blksz) ((((dma) & 0x7) << 12) | ((blksz) & 0xFFF))
+#define SDHCI_BLOCK_COUNT	0x06
+#define SDHCI_ARGUMENT		0x08
+#define SDHCI_TRANSFER_MODE	0x0C
+#define  SDHCI_TRNS_DMA		BIT(0)
+#define  SDHCI_TRNS_BLK_CNT_EN	BIT(1)
+#define  SDHCI_TRNS_ACMD12	BIT(2)
+#define  SDHCI_TRNS_READ	BIT(4)
+#define  SDHCI_TRNS_MULTI	BIT(5)
+#define SDHCI_COMMAND		0x0E
+#define  SDHCI_CMD_RESP_MASK	0x03
+#define  SDHCI_CMD_CRC		0x08
+#define  SDHCI_CMD_INDEX	0x10
+#define  SDHCI_CMD_DATA		0x20
+#define  SDHCI_CMD_ABORTCMD	0xC0
+#define  SDHCI_CMD_RESP_NONE	0x00
+#define  SDHCI_CMD_RESP_LONG	0x01
+#define  SDHCI_CMD_RESP_SHORT	0x02
+#define  SDHCI_CMD_RESP_SHORT_BUSY 0x03
+#define  SDHCI_MAKE_CMD(c, f) ((((c) & 0xff) << 8) | ((f) & 0xff))
+#define SDHCI_RESPONSE		0x10
+#define SDHCI_HOST_CONTROL	0x28
+#define  SDHCI_CTRL_DMA_MASK	0x18
+#define   SDHCI_CTRL_SDMA	0x00
+#define SDHCI_BLOCK_GAP_CONTROL	0x2A
+#define SDHCI_SOFTWARE_RESET	0x2F
+#define  SDHCI_RESET_CMD	0x02
+#define  SDHCI_RESET_DATA	0x04
+#define SDHCI_INT_STATUS	0x30
+#define  SDHCI_INT_RESPONSE	BIT(0)
+#define  SDHCI_INT_DATA_END	BIT(1)
+#define  SDHCI_INT_DMA_END	BIT(3)
+#define  SDHCI_INT_ERROR	BIT(15)
+#define SDHCI_SIGNAL_ENABLE	0x38
+
+/* RCA assigned by Boot ROM */
+#define UNIPHIER_EMMC_RCA	0x1000
+
+struct uniphier_mmc_cmd {
+	unsigned int cmdidx;
+	unsigned int resp_type;
+	unsigned int cmdarg;
+	unsigned int is_data;
+};
+
+static int uniphier_emmc_block_addressing;
+
+static int uniphier_emmc_send_cmd(uintptr_t host_base,
+				  struct uniphier_mmc_cmd *cmd)
+{
+	uint32_t mode = 0;
+	uint32_t end_bit;
+	uint32_t stat, flags, dma_addr;
+
+	mmio_write_32(host_base + SDHCI_INT_STATUS, -1);
+	mmio_write_32(host_base + SDHCI_SIGNAL_ENABLE, 0);
+	mmio_write_32(host_base + SDHCI_ARGUMENT, cmd->cmdarg);
+
+	if (cmd->is_data)
+		mode = SDHCI_TRNS_DMA | SDHCI_TRNS_BLK_CNT_EN |
+			SDHCI_TRNS_ACMD12 | SDHCI_TRNS_READ |
+			SDHCI_TRNS_MULTI;
+
+	mmio_write_16(host_base + SDHCI_TRANSFER_MODE, mode);
+
+	if (!(cmd->resp_type & MMC_RSP_PRESENT))
+		flags = SDHCI_CMD_RESP_NONE;
+	else if (cmd->resp_type & MMC_RSP_136)
+		flags = SDHCI_CMD_RESP_LONG;
+	else if (cmd->resp_type & MMC_RSP_BUSY)
+		flags = SDHCI_CMD_RESP_SHORT_BUSY;
+	else
+		flags = SDHCI_CMD_RESP_SHORT;
+
+	if (cmd->resp_type & MMC_RSP_CRC)
+		flags |= SDHCI_CMD_CRC;
+	if (cmd->resp_type & MMC_RSP_OPCODE)
+		flags |= SDHCI_CMD_INDEX;
+	if (cmd->is_data)
+		flags |= SDHCI_CMD_DATA;
+
+	if (cmd->resp_type & MMC_RSP_BUSY || cmd->is_data)
+		end_bit = SDHCI_INT_DATA_END;
+	else
+		end_bit = SDHCI_INT_RESPONSE;
+
+	mmio_write_16(host_base + SDHCI_COMMAND,
+		      SDHCI_MAKE_CMD(cmd->cmdidx, flags));
+
+	do {
+		stat = mmio_read_32(host_base + SDHCI_INT_STATUS);
+		if (stat & SDHCI_INT_ERROR)
+			return -EIO;
+
+		if (stat & SDHCI_INT_DMA_END) {
+			mmio_write_32(host_base + SDHCI_INT_STATUS, stat);
+			dma_addr = mmio_read_32(host_base + SDHCI_DMA_ADDRESS);
+			mmio_write_32(host_base + SDHCI_DMA_ADDRESS, dma_addr);
+		}
+	} while (!(stat & end_bit));
+
+	return 0;
+}
+
+static int uniphier_emmc_switch_part(uintptr_t host_base, int part_num)
+{
+	struct uniphier_mmc_cmd cmd = {0};
+
+	cmd.cmdidx = MMC_CMD_SWITCH;
+	cmd.resp_type = MMC_RSP_R1b;
+	cmd.cmdarg = (EXT_CSD_PART_CONF << 16) | (part_num << 8) | (3 << 24);
+
+	return uniphier_emmc_send_cmd(host_base, &cmd);
+}
+
+static int uniphier_emmc_is_over_2gb(uintptr_t host_base)
+{
+	struct uniphier_mmc_cmd cmd = {0};
+	uint32_t csd40, csd72;	/* CSD[71:40], CSD[103:72] */
+	int ret;
+
+	cmd.cmdidx = MMC_CMD_SEND_CSD;
+	cmd.resp_type = MMC_RSP_R2;
+	cmd.cmdarg = UNIPHIER_EMMC_RCA << 16;
+
+	ret = uniphier_emmc_send_cmd(host_base, &cmd);
+	if (ret)
+		return ret;
+
+	csd40 = mmio_read_32(host_base + SDHCI_RESPONSE + 4);
+	csd72 = mmio_read_32(host_base + SDHCI_RESPONSE + 8);
+
+	return !(~csd40 & 0xffc00380) && !(~csd72 & 0x3);
+}
+
+static int uniphier_emmc_load_image(uintptr_t host_base,
+				    uint32_t dev_addr,
+				    unsigned long load_addr,
+				    uint32_t block_cnt)
+{
+	struct uniphier_mmc_cmd cmd = {0};
+	uint8_t tmp;
+
+	assert((load_addr >> 32) == 0);
+
+	mmio_write_32(host_base + SDHCI_DMA_ADDRESS, load_addr);
+	mmio_write_16(host_base + SDHCI_BLOCK_SIZE, SDHCI_MAKE_BLKSZ(7, 512));
+	mmio_write_16(host_base + SDHCI_BLOCK_COUNT, block_cnt);
+
+	tmp = mmio_read_8(host_base + SDHCI_HOST_CONTROL);
+	tmp &= ~SDHCI_CTRL_DMA_MASK;
+	tmp |= SDHCI_CTRL_SDMA;
+	mmio_write_8(host_base + SDHCI_HOST_CONTROL, tmp);
+
+	tmp = mmio_read_8(host_base + SDHCI_BLOCK_GAP_CONTROL);
+	tmp &= ~1;		/* clear Stop At Block Gap Request */
+	mmio_write_8(host_base + SDHCI_BLOCK_GAP_CONTROL, tmp);
+
+	cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
+	cmd.resp_type = MMC_RSP_R1;
+	cmd.cmdarg = dev_addr;
+	cmd.is_data = 1;
+
+	return uniphier_emmc_send_cmd(host_base, &cmd);
+}
+
+static size_t uniphier_emmc_read(int lba, uintptr_t buf, size_t size)
+{
+	uintptr_t host_base = 0x5a000200;
+	int ret;
+
+	inv_dcache_range(buf, size);
+
+	if (!uniphier_emmc_block_addressing)
+		lba *= 512;
+
+	ret = uniphier_emmc_load_image(host_base, lba, buf, size / 512);
+
+	inv_dcache_range(buf, size);
+
+	return ret ? 0 : size;
+}
+
+static const struct io_block_dev_spec uniphier_emmc_dev_spec = {
+	.buffer = {
+		.offset = UNIPHIER_BLOCK_BUF_BASE,
+		.length = UNIPHIER_BLOCK_BUF_SIZE,
+	},
+	.ops = {
+		.read = uniphier_emmc_read,
+	},
+	.block_size = 512,
+};
+
+static int uniphier_emmc_hw_init(void)
+{
+	uintptr_t host_base = 0x5a000200;
+	struct uniphier_mmc_cmd cmd = {0};
+	int ret;
+
+	/*
+	 * deselect card before SEND_CSD command.
+	 * Do not check the return code.  It fails, but it is OK.
+	 */
+	cmd.cmdidx = MMC_CMD_SELECT_CARD;
+	cmd.resp_type = MMC_RSP_R1;
+
+	uniphier_emmc_send_cmd(host_base, &cmd); /* CMD7 (arg=0) */
+
+	/* reset CMD Line */
+	mmio_write_8(host_base + SDHCI_SOFTWARE_RESET,
+		     SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+	while (mmio_read_8(host_base + SDHCI_SOFTWARE_RESET))
+		;
+
+	ret = uniphier_emmc_is_over_2gb(host_base);
+	if (ret < 0)
+		return ret;
+
+	uniphier_emmc_block_addressing = ret;
+
+	cmd.cmdarg = UNIPHIER_EMMC_RCA << 16;
+
+	/* select card again */
+	ret = uniphier_emmc_send_cmd(host_base, &cmd);
+	if (ret)
+		return ret;
+
+	/* switch to Boot Partition 1 */
+	ret = uniphier_emmc_switch_part(host_base, 1);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int uniphier_emmc_init(uintptr_t *block_dev_spec)
+{
+	int ret;
+
+	ret = uniphier_emmc_hw_init();
+	if (ret)
+		return ret;
+
+	*block_dev_spec = (uintptr_t)&uniphier_emmc_dev_spec;
+
+	return 0;
+}
diff --git a/plat/socionext/uniphier/uniphier_gicv3.c b/plat/socionext/uniphier/uniphier_gicv3.c
new file mode 100644
index 0000000..93bc73a
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_gicv3.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <gicv3.h>
+#include <interrupt_props.h>
+#include <platform.h>
+#include <platform_def.h>
+
+#include "uniphier.h"
+
+static uintptr_t uniphier_rdistif_base_addrs[PLATFORM_CORE_COUNT];
+
+static const interrupt_prop_t uniphier_interrupt_props[] = {
+	/* G0 interrupts */
+
+	/* SGI0 */
+	INTR_PROP_DESC(8, GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
+			GIC_INTR_CFG_EDGE),
+	/* SGI6 */
+	INTR_PROP_DESC(14, GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
+			GIC_INTR_CFG_EDGE),
+
+	/* G1S interrupts */
+
+	/* Timer */
+	INTR_PROP_DESC(29, GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP1S,
+			GIC_INTR_CFG_LEVEL),
+	/* SGI1 */
+	INTR_PROP_DESC(9, GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP1S,
+			GIC_INTR_CFG_EDGE),
+	/* SGI2 */
+	INTR_PROP_DESC(10, GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP1S,
+			GIC_INTR_CFG_EDGE),
+	/* SGI3 */
+	INTR_PROP_DESC(11, GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP1S,
+			GIC_INTR_CFG_EDGE),
+	/* SGI4 */
+	INTR_PROP_DESC(12, GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP1S,
+			GIC_INTR_CFG_EDGE),
+	/* SGI5 */
+	INTR_PROP_DESC(13, GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP1S,
+			GIC_INTR_CFG_EDGE),
+	/* SGI7 */
+	INTR_PROP_DESC(15, GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP1S,
+			GIC_INTR_CFG_EDGE)
+};
+
+static unsigned int uniphier_mpidr_to_core_pos(u_register_t mpidr)
+{
+	return plat_core_pos_by_mpidr(mpidr);
+}
+
+static const struct gicv3_driver_data uniphier_gic_driver_data[] = {
+	[UNIPHIER_SOC_LD11] = {
+		.gicd_base = 0x5fe00000,
+		.gicr_base = 0x5fe40000,
+		.interrupt_props = uniphier_interrupt_props,
+		.interrupt_props_num = ARRAY_SIZE(uniphier_interrupt_props),
+		.rdistif_num = PLATFORM_CORE_COUNT,
+		.rdistif_base_addrs = uniphier_rdistif_base_addrs,
+		.mpidr_to_core_pos = uniphier_mpidr_to_core_pos,
+	},
+	[UNIPHIER_SOC_LD20] = {
+		.gicd_base = 0x5fe00000,
+		.gicr_base = 0x5fe80000,
+		.interrupt_props = uniphier_interrupt_props,
+		.interrupt_props_num = ARRAY_SIZE(uniphier_interrupt_props),
+		.rdistif_num = PLATFORM_CORE_COUNT,
+		.rdistif_base_addrs = uniphier_rdistif_base_addrs,
+		.mpidr_to_core_pos = uniphier_mpidr_to_core_pos,
+	},
+	[UNIPHIER_SOC_PXS3] = {
+		.gicd_base = 0x5fe00000,
+		.gicr_base = 0x5fe80000,
+		.interrupt_props = uniphier_interrupt_props,
+		.interrupt_props_num = ARRAY_SIZE(uniphier_interrupt_props),
+		.rdistif_num = PLATFORM_CORE_COUNT,
+		.rdistif_base_addrs = uniphier_rdistif_base_addrs,
+		.mpidr_to_core_pos = uniphier_mpidr_to_core_pos,
+	},
+};
+
+void uniphier_gic_driver_init(unsigned int soc)
+{
+	assert(soc < ARRAY_SIZE(uniphier_gic_driver_data));
+
+	gicv3_driver_init(&uniphier_gic_driver_data[soc]);
+}
+
+void uniphier_gic_init(void)
+{
+	gicv3_distif_init();
+	gicv3_rdistif_init(plat_my_core_pos());
+	gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+void uniphier_gic_cpuif_enable(void)
+{
+	gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+void uniphier_gic_cpuif_disable(void)
+{
+	gicv3_cpuif_disable(plat_my_core_pos());
+}
+
+void uniphier_gic_pcpu_init(void)
+{
+	gicv3_rdistif_init(plat_my_core_pos());
+}
diff --git a/plat/socionext/uniphier/uniphier_helpers.S b/plat/socionext/uniphier/uniphier_helpers.S
new file mode 100644
index 0000000..105cf9e
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_helpers.S
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <platform_def.h>
+
+	.global	uniphier_calc_core_pos
+	.global	plat_my_core_pos
+	.globl	platform_mem_init
+
+/*
+ * unsigned int uniphier_calc_core_pos(u_register_t mpidr)
+ * core_pos = (cluster_id * max_cpus_per_cluster) + core_id
+ */
+func uniphier_calc_core_pos
+	and	x1, x0, #MPIDR_CPU_MASK
+	and	x0, x0, #MPIDR_CLUSTER_MASK
+	lsr	x0, x0, #MPIDR_AFFINITY_BITS
+	mov	x2, #UNIPHIER_MAX_CPUS_PER_CLUSTER
+	madd	x0, x0, x2, x1
+	ret
+endfunc uniphier_calc_core_pos
+
+func plat_my_core_pos
+	mrs	x0, mpidr_el1
+	b	uniphier_calc_core_pos
+endfunc plat_my_core_pos
+
+func platform_mem_init
+	ret
+endfunc platform_mem_init
diff --git a/plat/socionext/uniphier/uniphier_image_desc.c b/plat/socionext/uniphier/uniphier_image_desc.c
new file mode 100644
index 0000000..1e474c5
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_image_desc.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert.h>
+#include <desc_image_load.h>
+#include <platform_def.h>
+
+#include "uniphier.h"
+
+static struct bl_mem_params_node uniphier_image_descs[] = {
+	{
+		.image_id = SCP_BL2_IMAGE_ID,
+
+		SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+				      VERSION_2, image_info_t, 0),
+		.image_info.image_base = UNIPHIER_SCP_BASE,
+		.image_info.image_max_size = UNIPHIER_SCP_MAX_SIZE,
+
+		SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+				      VERSION_2, entry_point_info_t,
+				      NON_SECURE | NON_EXECUTABLE),
+
+		.next_handoff_image_id = INVALID_IMAGE_ID,
+	},
+	{
+		.image_id = BL31_IMAGE_ID,
+
+		SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+				      VERSION_2, image_info_t, 0),
+		.image_info.image_base = BL31_BASE,
+		.image_info.image_max_size = BL31_LIMIT - BL31_BASE,
+
+		SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+				      VERSION_2, entry_point_info_t,
+				      SECURE | EXECUTABLE | EP_FIRST_EXE),
+		.ep_info.pc = BL31_BASE,
+		.ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+					DISABLE_ALL_EXCEPTIONS),
+
+#ifdef UNIPHIER_LOAD_BL32
+		.next_handoff_image_id = BL32_IMAGE_ID,
+#else
+		.next_handoff_image_id = BL33_IMAGE_ID,
+#endif
+	},
+#ifdef UNIPHIER_LOAD_BL32
+	{
+		.image_id = BL32_IMAGE_ID,
+
+		SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+				      VERSION_2, image_info_t, 0),
+		.image_info.image_base = BL32_BASE,
+		.image_info.image_max_size = BL32_LIMIT - BL32_BASE,
+
+		SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+				      VERSION_2, entry_point_info_t,
+				      SECURE | EXECUTABLE),
+		.ep_info.pc = BL32_BASE,
+		.ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+					DISABLE_ALL_EXCEPTIONS),
+
+		.next_handoff_image_id = BL33_IMAGE_ID,
+	},
+#endif
+	{
+		.image_id = BL33_IMAGE_ID,
+
+		SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+				      VERSION_2, image_info_t, 0),
+		.image_info.image_base = UNIPHIER_BL33_BASE,
+		.image_info.image_max_size = UNIPHIER_BL33_MAX_SIZE,
+
+		SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+				      VERSION_2, entry_point_info_t,
+				      NON_SECURE | EXECUTABLE),
+		.ep_info.pc = UNIPHIER_BL33_BASE,
+		.ep_info.spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
+					DISABLE_ALL_EXCEPTIONS),
+
+		.next_handoff_image_id = INVALID_IMAGE_ID,
+	},
+};
+REGISTER_BL_IMAGE_DESCS(uniphier_image_descs)
+
+/* SCP is optional.  Allow run-time fixup of the descriptor array. */
+void uniphier_image_descs_fixup(void)
+{
+	struct bl_mem_params_node *desc;
+
+	desc = get_bl_mem_params_node(SCP_BL2_IMAGE_ID);
+	assert(desc != NULL);
+	desc->image_info.h.attr |= IMAGE_ATTRIB_SKIP_LOADING;
+}
diff --git a/plat/socionext/uniphier/uniphier_io_storage.c b/plat/socionext/uniphier/uniphier_io_storage.c
new file mode 100644
index 0000000..bc31350
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_io_storage.c
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <firmware_image_package.h>
+#include <io/io_block.h>
+#include <io/io_driver.h>
+#include <io/io_fip.h>
+#include <io/io_memmap.h>
+#include <platform_def.h>
+#include <types.h>
+#include <utils_def.h>
+#include <xlat_tables_v2.h>
+
+#include "uniphier.h"
+
+#define UNIPHIER_ROM_REGION_BASE	0x00000000
+#define UNIPHIER_ROM_REGION_SIZE	0x10000000
+
+static const io_dev_connector_t *uniphier_fip_dev_con;
+static uintptr_t uniphier_fip_dev_handle;
+
+static const io_dev_connector_t *uniphier_backend_dev_con;
+static uintptr_t uniphier_backend_dev_handle;
+
+static io_block_spec_t uniphier_fip_spec = {
+	/* .offset will be set by the io_setup func */
+	.length = 0x00200000,
+};
+
+static const io_uuid_spec_t uniphier_bl2_spec = {
+	.uuid = UUID_TRUSTED_BOOT_FIRMWARE_BL2,
+};
+
+static const io_uuid_spec_t uniphier_scp_spec = {
+	.uuid = UUID_SCP_FIRMWARE_SCP_BL2,
+};
+
+static const io_uuid_spec_t uniphier_bl31_spec = {
+	.uuid = UUID_EL3_RUNTIME_FIRMWARE_BL31,
+};
+
+static const io_uuid_spec_t uniphier_bl32_spec = {
+	.uuid = UUID_SECURE_PAYLOAD_BL32,
+};
+
+static const io_uuid_spec_t uniphier_bl33_spec = {
+	.uuid = UUID_NON_TRUSTED_FIRMWARE_BL33,
+};
+
+#if TRUSTED_BOARD_BOOT
+static const io_uuid_spec_t uniphier_tb_fw_cert_spec = {
+	.uuid = UUID_TRUSTED_BOOT_FW_CERT,
+};
+
+static const io_uuid_spec_t uniphier_trusted_key_cert_spec = {
+	.uuid = UUID_TRUSTED_KEY_CERT,
+};
+
+static const io_uuid_spec_t uniphier_scp_fw_key_cert_spec = {
+	.uuid = UUID_SCP_FW_KEY_CERT,
+};
+
+static const io_uuid_spec_t uniphier_soc_fw_key_cert_spec = {
+	.uuid = UUID_SOC_FW_KEY_CERT,
+};
+
+static const io_uuid_spec_t uniphier_tos_fw_key_cert_spec = {
+	.uuid = UUID_TRUSTED_OS_FW_KEY_CERT,
+};
+
+static const io_uuid_spec_t uniphier_nt_fw_key_cert_spec = {
+	.uuid = UUID_NON_TRUSTED_FW_KEY_CERT,
+};
+
+static const io_uuid_spec_t uniphier_scp_fw_cert_spec = {
+	.uuid = UUID_SCP_FW_CONTENT_CERT,
+};
+
+static const io_uuid_spec_t uniphier_soc_fw_cert_spec = {
+	.uuid = UUID_SOC_FW_CONTENT_CERT,
+};
+
+static const io_uuid_spec_t uniphier_tos_fw_cert_spec = {
+	.uuid = UUID_TRUSTED_OS_FW_CONTENT_CERT,
+};
+
+static const io_uuid_spec_t uniphier_nt_fw_cert_spec = {
+	.uuid = UUID_NON_TRUSTED_FW_CONTENT_CERT,
+};
+#endif /* TRUSTED_BOARD_BOOT */
+
+struct uniphier_io_policy {
+	uintptr_t *dev_handle;
+	uintptr_t image_spec;
+	uintptr_t init_params;
+};
+
+static const struct uniphier_io_policy uniphier_io_policies[] = {
+	[FIP_IMAGE_ID] = {
+		.dev_handle = &uniphier_backend_dev_handle,
+		.image_spec = (uintptr_t)&uniphier_fip_spec,
+	},
+	[BL2_IMAGE_ID] = {
+		.dev_handle = &uniphier_fip_dev_handle,
+		.image_spec = (uintptr_t)&uniphier_bl2_spec,
+		.init_params = FIP_IMAGE_ID,
+	},
+	[SCP_BL2_IMAGE_ID] = {
+		.dev_handle = &uniphier_fip_dev_handle,
+		.image_spec = (uintptr_t)&uniphier_scp_spec,
+		.init_params = FIP_IMAGE_ID,
+	},
+	[BL31_IMAGE_ID] = {
+		.dev_handle = &uniphier_fip_dev_handle,
+		.image_spec = (uintptr_t)&uniphier_bl31_spec,
+		.init_params = FIP_IMAGE_ID,
+	},
+	[BL32_IMAGE_ID] = {
+		.dev_handle = &uniphier_fip_dev_handle,
+		.image_spec = (uintptr_t)&uniphier_bl32_spec,
+		.init_params = FIP_IMAGE_ID,
+	},
+	[BL33_IMAGE_ID] = {
+		.dev_handle = &uniphier_fip_dev_handle,
+		.image_spec = (uintptr_t)&uniphier_bl33_spec,
+		.init_params = FIP_IMAGE_ID,
+	},
+#if TRUSTED_BOARD_BOOT
+	[TRUSTED_BOOT_FW_CERT_ID] = {
+		.dev_handle = &uniphier_fip_dev_handle,
+		.image_spec = (uintptr_t)&uniphier_tb_fw_cert_spec,
+		.init_params = FIP_IMAGE_ID,
+	},
+	[TRUSTED_KEY_CERT_ID] = {
+		.dev_handle = &uniphier_fip_dev_handle,
+		.image_spec = (uintptr_t)&uniphier_trusted_key_cert_spec,
+		.init_params = FIP_IMAGE_ID,
+	},
+	[SCP_FW_KEY_CERT_ID] = {
+		.dev_handle = &uniphier_fip_dev_handle,
+		.image_spec = (uintptr_t)&uniphier_scp_fw_key_cert_spec,
+		.init_params = FIP_IMAGE_ID,
+	},
+	[SOC_FW_KEY_CERT_ID] = {
+		.dev_handle = &uniphier_fip_dev_handle,
+		.image_spec = (uintptr_t)&uniphier_soc_fw_key_cert_spec,
+		.init_params = FIP_IMAGE_ID,
+	},
+	[TRUSTED_OS_FW_KEY_CERT_ID] = {
+		.dev_handle = &uniphier_fip_dev_handle,
+		.image_spec = (uintptr_t)&uniphier_tos_fw_key_cert_spec,
+		.init_params = FIP_IMAGE_ID,
+	},
+	[NON_TRUSTED_FW_KEY_CERT_ID] = {
+		.dev_handle = &uniphier_fip_dev_handle,
+		.image_spec = (uintptr_t)&uniphier_nt_fw_key_cert_spec,
+		.init_params = FIP_IMAGE_ID,
+	},
+	[SCP_FW_CONTENT_CERT_ID] = {
+		.dev_handle = &uniphier_fip_dev_handle,
+		.image_spec = (uintptr_t)&uniphier_scp_fw_cert_spec,
+		.init_params = FIP_IMAGE_ID,
+	},
+	[SOC_FW_CONTENT_CERT_ID] = {
+		.dev_handle = &uniphier_fip_dev_handle,
+		.image_spec = (uintptr_t)&uniphier_soc_fw_cert_spec,
+		.init_params = FIP_IMAGE_ID,
+	},
+	[TRUSTED_OS_FW_CONTENT_CERT_ID] = {
+		.dev_handle = &uniphier_fip_dev_handle,
+		.image_spec = (uintptr_t)&uniphier_tos_fw_cert_spec,
+		.init_params = FIP_IMAGE_ID,
+	},
+	[NON_TRUSTED_FW_CONTENT_CERT_ID] = {
+		.dev_handle = &uniphier_fip_dev_handle,
+		.image_spec = (uintptr_t)&uniphier_nt_fw_cert_spec,
+		.init_params = FIP_IMAGE_ID,
+	},
+#endif
+};
+
+static int uniphier_io_block_setup(size_t fip_offset, uintptr_t block_dev_spec)
+{
+	int ret;
+
+	uniphier_fip_spec.offset = fip_offset;
+
+	ret = register_io_dev_block(&uniphier_backend_dev_con);
+	if (ret)
+		return ret;
+
+	return io_dev_open(uniphier_backend_dev_con, block_dev_spec,
+			   &uniphier_backend_dev_handle);
+}
+
+static int uniphier_io_memmap_setup(size_t fip_offset)
+{
+	int ret;
+
+	uniphier_fip_spec.offset = fip_offset;
+
+	ret = mmap_add_dynamic_region(fip_offset, fip_offset,
+				      uniphier_fip_spec.length,
+				      MT_RO_DATA | MT_SECURE);
+	if (ret)
+		return ret;
+
+	ret = register_io_dev_memmap(&uniphier_backend_dev_con);
+	if (ret)
+		return ret;
+
+	return io_dev_open(uniphier_backend_dev_con, 0,
+			   &uniphier_backend_dev_handle);
+}
+
+static int uniphier_io_fip_setup(void)
+{
+	int ret;
+
+	ret = register_io_dev_fip(&uniphier_fip_dev_con);
+	if (ret)
+		return ret;
+
+	return io_dev_open(uniphier_fip_dev_con, 0, &uniphier_fip_dev_handle);
+}
+
+static int uniphier_io_emmc_setup(unsigned int soc_id)
+{
+	uintptr_t block_dev_spec;
+	int ret;
+
+	ret = uniphier_emmc_init(&block_dev_spec);
+	if (ret)
+		return ret;
+
+	return uniphier_io_block_setup(0x20000, block_dev_spec);
+}
+
+static int uniphier_io_nand_setup(unsigned int soc_id)
+{
+	uintptr_t block_dev_spec;
+	int ret;
+
+	ret = uniphier_nand_init(&block_dev_spec);
+	if (ret)
+		return ret;
+
+	return uniphier_io_block_setup(0x20000, block_dev_spec);
+}
+
+static int uniphier_io_nor_setup(unsigned int soc_id)
+{
+	return uniphier_io_memmap_setup(0x70000);
+}
+
+static int uniphier_io_usb_setup(unsigned int soc_id)
+{
+	uintptr_t block_dev_spec;
+	int ret;
+
+	/* use ROM API for loading images from USB storage */
+	ret = mmap_add_dynamic_region(UNIPHIER_ROM_REGION_BASE,
+				      UNIPHIER_ROM_REGION_BASE,
+				      UNIPHIER_ROM_REGION_SIZE,
+				      MT_CODE | MT_SECURE);
+	if (ret)
+		return ret;
+
+	ret = uniphier_usb_init(soc_id, &block_dev_spec);
+	if (ret)
+		return ret;
+
+	return uniphier_io_block_setup(0x20000, block_dev_spec);
+}
+
+static int (* const uniphier_io_setup_table[])(unsigned int) = {
+	[UNIPHIER_BOOT_DEVICE_EMMC] = uniphier_io_emmc_setup,
+	[UNIPHIER_BOOT_DEVICE_NAND] = uniphier_io_nand_setup,
+	[UNIPHIER_BOOT_DEVICE_NOR] = uniphier_io_nor_setup,
+	[UNIPHIER_BOOT_DEVICE_USB] = uniphier_io_usb_setup,
+};
+
+int uniphier_io_setup(unsigned int soc_id)
+{
+	int (*io_setup)(unsigned int soc_id);
+	unsigned int boot_dev;
+	int ret;
+
+	boot_dev = uniphier_get_boot_device(soc_id);
+	if (boot_dev == UNIPHIER_BOOT_DEVICE_RSV)
+		return -EINVAL;
+
+	io_setup = uniphier_io_setup_table[boot_dev];
+	ret = io_setup(soc_id);
+	if (ret)
+		return ret;
+
+	ret = uniphier_io_fip_setup();
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
+			  uintptr_t *image_spec)
+{
+	uintptr_t init_params;
+
+	assert(image_id < ARRAY_SIZE(uniphier_io_policies));
+
+	*dev_handle = *(uniphier_io_policies[image_id].dev_handle);
+	*image_spec = uniphier_io_policies[image_id].image_spec;
+	init_params = uniphier_io_policies[image_id].init_params;
+
+	return io_dev_init(*dev_handle, init_params);
+}
+
+int uniphier_check_image(unsigned int image_id)
+{
+	uintptr_t dev_handle, image_spec, image_handle;
+	int ret;
+
+	ret = plat_get_image_source(image_id, &dev_handle, &image_spec);
+	if (ret)
+		return ret;
+
+	ret = io_open(dev_handle, image_spec, &image_handle);
+	if (ret)
+		return ret;
+
+	io_close(image_handle);
+
+	return 0;
+}
diff --git a/plat/socionext/uniphier/uniphier_nand.c b/plat/socionext/uniphier/uniphier_nand.c
new file mode 100644
index 0000000..a118b85
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_nand.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <io/io_block.h>
+#include <mmio.h>
+#include <platform_def.h>
+#include <sys/types.h>
+#include <utils_def.h>
+
+#include "uniphier.h"
+
+#define DIV_ROUND_UP(n, d)	(((n) + (d) - 1) / (d))
+
+#define NAND_CMD_READ0		0
+#define NAND_CMD_READSTART	0x30
+
+#define DENALI_ECC_ENABLE			0x0e0
+#define DENALI_PAGES_PER_BLOCK			0x150
+#define DENALI_DEVICE_MAIN_AREA_SIZE		0x170
+#define DENALI_DEVICE_SPARE_AREA_SIZE		0x180
+#define DENALI_TWO_ROW_ADDR_CYCLES		0x190
+#define DENALI_INTR_STATUS0			0x410
+#define   DENALI_INTR_ECC_UNCOR_ERR			BIT(1)
+#define   DENALI_INTR_DMA_CMD_COMP			BIT(2)
+#define   DENALI_INTR_INT_ACT				BIT(12)
+
+#define DENALI_DMA_ENABLE			0x700
+
+#define DENALI_HOST_ADDR			0x00
+#define DENALI_HOST_DATA			0x10
+
+#define DENALI_MAP01				(1 << 26)
+#define DENALI_MAP10				(2 << 26)
+#define DENALI_MAP11				(3 << 26)
+
+#define DENALI_MAP11_CMD			((DENALI_MAP11) | 0)
+#define DENALI_MAP11_ADDR			((DENALI_MAP11) | 1)
+#define DENALI_MAP11_DATA			((DENALI_MAP11) | 2)
+
+#define DENALI_ACCESS_DEFAULT_AREA		0x42
+
+#define UNIPHIER_NAND_BBT_UNKNOWN		0xff
+
+struct uniphier_nand {
+	uintptr_t host_base;
+	uintptr_t reg_base;
+	int pages_per_block;
+	int page_size;
+	int two_row_addr_cycles;
+	uint8_t bbt[16];
+};
+
+struct uniphier_nand uniphier_nand;
+
+static void uniphier_nand_host_write(struct uniphier_nand *nand,
+				     uint32_t addr, uint32_t data)
+{
+	mmio_write_32(nand->host_base + DENALI_HOST_ADDR, addr);
+	mmio_write_32(nand->host_base + DENALI_HOST_DATA, data);
+}
+
+static uint32_t uniphier_nand_host_read(struct uniphier_nand *nand,
+					uint32_t addr)
+{
+	mmio_write_32(nand->host_base + DENALI_HOST_ADDR, addr);
+	return mmio_read_32(nand->host_base + DENALI_HOST_DATA);
+}
+
+static int uniphier_nand_block_isbad(struct uniphier_nand *nand, int block)
+{
+	int page = nand->pages_per_block * block;
+	int column = nand->page_size;
+	uint8_t bbm;
+	uint32_t status;
+	int is_bad;
+
+	/* use cache if available */
+	if (block < ARRAY_SIZE(nand->bbt) &&
+	    nand->bbt[block] != UNIPHIER_NAND_BBT_UNKNOWN)
+		return nand->bbt[block];
+
+	mmio_write_32(nand->reg_base + DENALI_ECC_ENABLE, 0);
+
+	mmio_write_32(nand->reg_base + DENALI_INTR_STATUS0, -1);
+
+	uniphier_nand_host_write(nand, DENALI_MAP11_CMD, NAND_CMD_READ0);
+	uniphier_nand_host_write(nand, DENALI_MAP11_ADDR, column & 0xff);
+	uniphier_nand_host_write(nand, DENALI_MAP11_ADDR, (column >> 8) & 0xff);
+	uniphier_nand_host_write(nand, DENALI_MAP11_ADDR, page & 0xff);
+	uniphier_nand_host_write(nand, DENALI_MAP11_ADDR, (page >> 8) & 0xff);
+	if (!nand->two_row_addr_cycles)
+		uniphier_nand_host_write(nand, DENALI_MAP11_ADDR,
+					 (page >> 16) & 0xff);
+	uniphier_nand_host_write(nand, DENALI_MAP11_CMD, NAND_CMD_READSTART);
+
+	do {
+		status = mmio_read_32(nand->reg_base + DENALI_INTR_STATUS0);
+	} while (!(status & DENALI_INTR_INT_ACT));
+
+	bbm = uniphier_nand_host_read(nand, DENALI_MAP11_DATA);
+
+	is_bad = bbm != 0xff;
+
+	/* if possible, save the result for future re-use */
+	if (block < ARRAY_SIZE(nand->bbt))
+		nand->bbt[block] = is_bad;
+
+	if (is_bad)
+		WARN("found bad block at %d. skip.\n", block);
+
+	return is_bad;
+}
+
+static int uniphier_nand_read_pages(struct uniphier_nand *nand, uintptr_t buf,
+				    int page_start, int page_count)
+{
+	uint32_t status;
+
+	mmio_write_32(nand->reg_base + DENALI_ECC_ENABLE, 1);
+	mmio_write_32(nand->reg_base + DENALI_DMA_ENABLE, 1);
+
+	mmio_write_32(nand->reg_base + DENALI_INTR_STATUS0, -1);
+
+	/* use Data DMA (64bit) */
+	mmio_write_32(nand->host_base + DENALI_HOST_ADDR,
+		      DENALI_MAP10 | page_start);
+
+	/*
+	 * 1. setup transfer type, interrupt when complete,
+	 *    burst len = 64 bytes, the number of pages
+	 */
+	mmio_write_32(nand->host_base + DENALI_HOST_DATA,
+		      0x01002000 | (64 << 16) | page_count);
+
+	/* 2. set memory low address */
+	mmio_write_32(nand->host_base + DENALI_HOST_DATA, buf);
+
+	/* 3. set memory high address */
+	mmio_write_32(nand->host_base + DENALI_HOST_DATA, buf >> 32);
+
+	do {
+		status = mmio_read_32(nand->reg_base + DENALI_INTR_STATUS0);
+	} while (!(status & DENALI_INTR_DMA_CMD_COMP));
+
+	mmio_write_32(nand->reg_base + DENALI_DMA_ENABLE, 0);
+
+	if (status & DENALI_INTR_ECC_UNCOR_ERR) {
+		ERROR("uncorrectable error in page range %d-%d",
+		      page_start, page_start + page_count - 1);
+		return -EBADMSG;
+	}
+
+	return 0;
+}
+
+static size_t __uniphier_nand_read(struct uniphier_nand *nand, int lba,
+				   uintptr_t buf, size_t size)
+{
+	int pages_per_block = nand->pages_per_block;
+	int page_size = nand->page_size;
+	int blocks_to_skip = lba / pages_per_block;
+	int pages_to_read = DIV_ROUND_UP(size, page_size);
+	int page = lba % pages_per_block;
+	int block = 0;
+	uintptr_t p = buf;
+	int page_count, ret;
+
+	while (blocks_to_skip) {
+		ret = uniphier_nand_block_isbad(nand, block);
+		if (ret < 0)
+			goto out;
+
+		if (!ret)
+			blocks_to_skip--;
+
+		block++;
+	}
+
+	while (pages_to_read) {
+		ret = uniphier_nand_block_isbad(nand, block);
+		if (ret < 0)
+			goto out;
+
+		if (ret) {
+			block++;
+			continue;
+		}
+
+		page_count = MIN(pages_per_block - page, pages_to_read);
+
+		ret = uniphier_nand_read_pages(nand, p,
+					       block * pages_per_block + page,
+					       page_count);
+		if (ret)
+			goto out;
+
+		block++;
+		page = 0;
+		p += page_size * page_count;
+		pages_to_read -= page_count;
+	}
+
+out:
+	/* number of read bytes */
+	return MIN(size, p - buf);
+}
+
+static size_t uniphier_nand_read(int lba, uintptr_t buf, size_t size)
+{
+	size_t count;
+
+	inv_dcache_range(buf, size);
+
+	count = __uniphier_nand_read(&uniphier_nand, lba, buf, size);
+
+	inv_dcache_range(buf, size);
+
+	return count;
+}
+
+static struct io_block_dev_spec uniphier_nand_dev_spec = {
+	.buffer = {
+		.offset = UNIPHIER_BLOCK_BUF_BASE,
+		.length = UNIPHIER_BLOCK_BUF_SIZE,
+	},
+	.ops = {
+		.read = uniphier_nand_read,
+	},
+	/* fill .block_size at run-time */
+};
+
+static int uniphier_nand_hw_init(struct uniphier_nand *nand)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nand->bbt); i++)
+		nand->bbt[i] = UNIPHIER_NAND_BBT_UNKNOWN;
+
+	nand->host_base = 0x68000000;
+	nand->reg_base = 0x68100000;
+
+	nand->pages_per_block =
+			mmio_read_32(nand->reg_base + DENALI_PAGES_PER_BLOCK);
+
+	nand->page_size =
+		mmio_read_32(nand->reg_base + DENALI_DEVICE_MAIN_AREA_SIZE);
+
+	if (mmio_read_32(nand->reg_base + DENALI_TWO_ROW_ADDR_CYCLES) & BIT(0))
+		nand->two_row_addr_cycles = 1;
+
+	uniphier_nand_host_write(nand, DENALI_MAP10,
+				 DENALI_ACCESS_DEFAULT_AREA);
+
+	return 0;
+}
+
+int uniphier_nand_init(uintptr_t *block_dev_spec)
+{
+	int ret;
+
+	ret = uniphier_nand_hw_init(&uniphier_nand);
+	if (ret)
+		return ret;
+
+	uniphier_nand_dev_spec.block_size = uniphier_nand.page_size;
+
+	*block_dev_spec = (uintptr_t)&uniphier_nand_dev_spec;
+
+	return 0;
+}
diff --git a/plat/socionext/uniphier/uniphier_psci.c b/plat/socionext/uniphier/uniphier_psci.c
new file mode 100644
index 0000000..82081a0
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_psci.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <mmio.h>
+#include <psci.h>
+
+#include "uniphier.h"
+
+#define UNIPHIER_ROM_RSV0		0x59801200
+
+#define UNIPHIER_SLFRSTSEL		0x61843010
+#define   UNIPHIER_SLFRSTSEL_MASK		(0x3 << 0)
+#define UNIPHIER_SLFRSTCTL		0x61843014
+#define   UNIPHIER_SLFRSTCTL_RST		(1 << 0)
+
+#define MPIDR_AFFINITY_INVALID		((u_register_t)-1)
+
+uintptr_t uniphier_sec_entrypoint;
+
+void uniphier_warmboot_entrypoint(void);
+void __dead2 uniphier_fake_pwr_down(void);
+u_register_t uniphier_holding_pen_release;
+static int uniphier_psci_scp_mode;
+
+static int uniphier_psci_pwr_domain_on(u_register_t mpidr)
+{
+	uniphier_holding_pen_release = mpidr;
+	flush_dcache_range((uint64_t)&uniphier_holding_pen_release,
+			   sizeof(uniphier_holding_pen_release));
+
+	mmio_write_64(UNIPHIER_ROM_RSV0,
+		      (uint64_t)&uniphier_warmboot_entrypoint);
+	sev();
+
+	return PSCI_E_SUCCESS;
+}
+
+static void uniphier_psci_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	uniphier_gic_cpuif_disable();
+}
+
+static void uniphier_psci_pwr_domain_on_finish(
+					const psci_power_state_t *target_state)
+{
+	uniphier_gic_pcpu_init();
+	uniphier_gic_cpuif_enable();
+
+	uniphier_cci_enable();
+}
+
+static void __dead2 uniphier_psci_pwr_domain_pwr_down_wfi(
+					const psci_power_state_t *target_state)
+{
+	/*
+	 * The Boot ROM cannot distinguish warn and cold resets.
+	 * Instead of the CPU reset, fake it.
+	 */
+	uniphier_holding_pen_release = MPIDR_AFFINITY_INVALID;
+	flush_dcache_range((uint64_t)&uniphier_holding_pen_release,
+			   sizeof(uniphier_holding_pen_release));
+
+	uniphier_fake_pwr_down();
+}
+
+static void uniphier_self_system_reset(void)
+{
+	mmio_clrbits_32(UNIPHIER_SLFRSTSEL, UNIPHIER_SLFRSTSEL_MASK);
+	mmio_setbits_32(UNIPHIER_SLFRSTCTL, UNIPHIER_SLFRSTCTL_RST);
+}
+
+static void __dead2 uniphier_psci_system_off(void)
+{
+	if (uniphier_psci_scp_mode) {
+		uniphier_scp_system_off();
+	} else {
+		NOTICE("SCP is disabled; can't shutdown the system.\n");
+		NOTICE("Resetting the system instead.\n");
+		uniphier_self_system_reset();
+	}
+
+	wfi();
+	ERROR("UniPhier System Off: operation not handled.\n");
+	panic();
+}
+
+static void __dead2 uniphier_psci_system_reset(void)
+{
+	if (uniphier_psci_scp_mode)
+		uniphier_scp_system_reset();
+	else
+		uniphier_self_system_reset();
+
+	wfi();
+	ERROR("UniPhier System Reset: operation not handled.\n");
+	panic();
+}
+
+static const struct plat_psci_ops uniphier_psci_ops = {
+	.pwr_domain_on = uniphier_psci_pwr_domain_on,
+	.pwr_domain_off = uniphier_psci_pwr_domain_off,
+	.pwr_domain_on_finish = uniphier_psci_pwr_domain_on_finish,
+	.pwr_domain_pwr_down_wfi = uniphier_psci_pwr_domain_pwr_down_wfi,
+	.system_off = uniphier_psci_system_off,
+	.system_reset = uniphier_psci_system_reset,
+};
+
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+			const struct plat_psci_ops **psci_ops)
+{
+	uniphier_sec_entrypoint = sec_entrypoint;
+	flush_dcache_range((uint64_t)&uniphier_sec_entrypoint,
+			   sizeof(uniphier_sec_entrypoint));
+
+	uniphier_psci_scp_mode = uniphier_scp_is_running();
+	flush_dcache_range((uint64_t)&uniphier_psci_scp_mode,
+			   sizeof(uniphier_psci_scp_mode));
+
+	if (uniphier_psci_scp_mode)
+		uniphier_scp_open_com();
+
+	*psci_ops = &uniphier_psci_ops;
+
+	return 0;
+}
diff --git a/plat/socionext/uniphier/uniphier_rotpk.S b/plat/socionext/uniphier/uniphier_rotpk.S
new file mode 100644
index 0000000..21c44b6
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_rotpk.S
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+	.global uniphier_rotpk_hash
+	.global uniphier_rotpk_hash_end
+	.section .rodata.uniphier_rotpk_hash, "a"
+uniphier_rotpk_hash:
+	/* DER header */
+	.byte 0x30, 0x31, 0x30, 0x0D, 0x06, 0x09, 0x60, 0x86, 0x48
+	.byte 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20
+	/* SHA256 */
+	.incbin ROTPK_HASH
+uniphier_rotpk_hash_end:
diff --git a/plat/socionext/uniphier/uniphier_scp.c b/plat/socionext/uniphier/uniphier_scp.c
new file mode 100644
index 0000000..9a921c4
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_scp.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <mmio.h>
+#include <utils_def.h>
+
+#include "uniphier.h"
+
+#define UNIPHIER_ROM_RSV3		0x5980120c
+
+#define UNIPHIER_STMBE2COM		0x5f800030
+#define UNIPHIER_BETOSTMIRQ0PT		0x5f800070
+
+#define UNIPHIER_SCP_READY_MAGIC	0x0000b6a5
+
+#define UNIPHIER_SCP_PACKET_START	0xA0
+#define UNIPHIER_SCP_PACKET_END		0xA5
+#define UNIPHIER_SCP_PACKET_ESC		0xA6
+#define UNIPHIER_SCP_IS_CTRL_CODE(c)	(0xA0 <= (c) && (c) <= 0xA6)
+
+int uniphier_scp_is_running(void)
+{
+	return mmio_read_32(UNIPHIER_STMBE2COM) == UNIPHIER_SCP_READY_MAGIC;
+}
+
+void uniphier_scp_start(void)
+{
+	uint32_t tmp;
+
+	mmio_write_32(UNIPHIER_STMBE2COM + 4, UNIPHIER_SCP_BASE);
+	mmio_write_32(UNIPHIER_STMBE2COM, UNIPHIER_SCP_READY_MAGIC);
+
+	do {
+		tmp = mmio_read_32(UNIPHIER_ROM_RSV3);
+	} while (!(tmp & BIT(8)));
+
+	mmio_write_32(UNIPHIER_ROM_RSV3, tmp | BIT(9));
+}
+
+static void uniphier_scp_send_packet(const uint8_t *packet, int packet_len)
+{
+	uintptr_t reg = UNIPHIER_STMBE2COM;
+	uint32_t word;
+	int len, i;
+
+	while (packet_len) {
+		len = MIN(packet_len, 4);
+		word = 0;
+
+		for (i = 0; i < len; i++)
+			word |= *packet++ << (8 * i);
+
+		mmio_write_32(reg, word);
+		reg += 4;
+		packet_len -= len;
+	}
+
+	mmio_write_8(UNIPHIER_BETOSTMIRQ0PT, 0x55);
+}
+
+static void uniphier_scp_send_cmd(const uint8_t *cmd, int cmd_len)
+{
+	uint8_t packet[32];	/* long enough */
+	uint8_t *p = packet;
+	uint8_t c;
+	int i;
+
+	*p++ = UNIPHIER_SCP_PACKET_START;
+	*p++ = cmd_len;
+
+	for (i = 0; i < cmd_len; i++) {
+		c = *cmd++;
+		if (UNIPHIER_SCP_IS_CTRL_CODE(c)) {
+			*p++ = UNIPHIER_SCP_PACKET_ESC;
+			*p++ = c ^ BIT(7);
+		} else {
+			*p++ = c;
+		}
+	}
+
+	*p++ = UNIPHIER_SCP_PACKET_END;
+
+	uniphier_scp_send_packet(packet, p - packet);
+}
+
+#define UNIPHIER_SCP_CMD(name, ...)					\
+static const uint8_t __uniphier_scp_##name##_cmd[] = {			\
+	__VA_ARGS__							\
+};									\
+void uniphier_scp_##name(void)						\
+{									\
+	uniphier_scp_send_cmd(__uniphier_scp_##name##_cmd,		\
+			      ARRAY_SIZE(__uniphier_scp_##name##_cmd));	\
+}
+
+UNIPHIER_SCP_CMD(open_com, 0x00, 0x00, 0x05)
+UNIPHIER_SCP_CMD(system_off, 0x00, 0x01, 0xff, 0xff, 0xff, 0xff, 0x01)
+UNIPHIER_SCP_CMD(system_reset, 0x00, 0x02, 0x00)
diff --git a/plat/socionext/uniphier/uniphier_smp.S b/plat/socionext/uniphier/uniphier_smp.S
new file mode 100644
index 0000000..d6cb9ff
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_smp.S
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+	.globl	uniphier_warmboot_entrypoint
+	.globl	uniphier_fake_pwr_down
+
+func uniphier_warmboot_entrypoint
+	mrs	x0, mpidr_el1
+	mov_imm	x1, MPIDR_AFFINITY_MASK
+	and	x0, x0, x1
+	b	1f
+0:	wfe
+1:	ldr	x1, uniphier_holding_pen_release
+	cmp	x1, x0
+	b.ne	0b
+	ldr	x0, uniphier_sec_entrypoint
+	br	x0
+endfunc uniphier_warmboot_entrypoint
+
+func uniphier_fake_pwr_down
+	bl	disable_mmu_icache_el3
+	b	uniphier_warmboot_entrypoint
+endfunc uniphier_fake_pwr_down
diff --git a/plat/socionext/uniphier/uniphier_soc_info.c b/plat/socionext/uniphier/uniphier_soc_info.c
new file mode 100644
index 0000000..55688f3
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_soc_info.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <mmio.h>
+
+#include "uniphier.h"
+
+#define UNIPHIER_REVISION		0x5f800000
+
+static unsigned int uniphier_get_revision_field(unsigned int mask,
+						unsigned int shift)
+{
+	uint32_t revision = mmio_read_32(UNIPHIER_REVISION);
+
+	return (revision >> shift) & mask;
+}
+
+unsigned int uniphier_get_soc_type(void)
+{
+	return uniphier_get_revision_field(0xff, 16);
+}
+
+unsigned int uniphier_get_soc_model(void)
+{
+	return uniphier_get_revision_field(0x07, 8);
+}
+
+unsigned int uniphier_get_soc_revision(void)
+{
+	return uniphier_get_revision_field(0x1f, 0);
+}
+
+unsigned int uniphier_get_soc_id(void)
+{
+	uint32_t type = uniphier_get_soc_type();
+
+	switch (type) {
+	case 0x31:
+		return UNIPHIER_SOC_LD11;
+	case 0x32:
+		return UNIPHIER_SOC_LD20;
+	case 0x35:
+		return UNIPHIER_SOC_PXS3;
+	default:
+		return UNIPHIER_SOC_UNKNOWN;
+	}
+}
diff --git a/plat/socionext/uniphier/uniphier_syscnt.c b/plat/socionext/uniphier/uniphier_syscnt.c
new file mode 100644
index 0000000..d7bcaf8
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_syscnt.c
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform.h>
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+	return 50000000;
+}
diff --git a/plat/socionext/uniphier/uniphier_tbbr.c b/plat/socionext/uniphier/uniphier_tbbr.c
new file mode 100644
index 0000000..1c83411
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_tbbr.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform.h>
+
+extern char uniphier_rotpk_hash[], uniphier_rotpk_hash_end[];
+
+int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len,
+			unsigned int *flags)
+{
+	*key_ptr = uniphier_rotpk_hash;
+	*key_len = uniphier_rotpk_hash_end - uniphier_rotpk_hash;
+	*flags = ROTPK_IS_HASH;
+
+	return 0;
+}
+
+int plat_get_nv_ctr(void *cookie, unsigned int *nv_ctr)
+{
+	/*
+	 * No support for non-volatile counter.  Update the ROT key to protect
+	 * the system against rollback.
+	 */
+	*nv_ctr = 0;
+
+	return 0;
+}
+
+int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr)
+{
+	return 0;
+}
diff --git a/plat/socionext/uniphier/uniphier_topology.c b/plat/socionext/uniphier/uniphier_topology.c
new file mode 100644
index 0000000..1f96f58
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_topology.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <platform.h>
+
+#include "uniphier.h"
+
+static unsigned char uniphier_power_domain_tree_desc[UNIPHIER_CLUSTER_COUNT + 1];
+
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+	int i;
+
+	uniphier_power_domain_tree_desc[0] = UNIPHIER_CLUSTER_COUNT;
+
+	for (i = 0; i < UNIPHIER_CLUSTER_COUNT; i++)
+		uniphier_power_domain_tree_desc[i + 1] =
+						UNIPHIER_MAX_CPUS_PER_CLUSTER;
+
+	return uniphier_power_domain_tree_desc;
+}
+
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+	unsigned int cluster_id, cpu_id;
+
+	cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+	if (cluster_id >= UNIPHIER_CLUSTER_COUNT)
+		return -1;
+
+	cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
+	if (cpu_id >= UNIPHIER_MAX_CPUS_PER_CLUSTER)
+		return -1;
+
+	return uniphier_calc_core_pos(mpidr);
+}
diff --git a/plat/socionext/uniphier/uniphier_usb.c b/plat/socionext/uniphier/uniphier_usb.c
new file mode 100644
index 0000000..4be0e90
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_usb.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <io/io_block.h>
+#include <mmio.h>
+#include <platform_def.h>
+#include <sys/types.h>
+#include <utils_def.h>
+
+#include "uniphier.h"
+
+#define UNIPHIER_LD11_USB_DESC_BASE	0x30010000
+#define UNIPHIER_LD20_USB_DESC_BASE	0x30014000
+#define UNIPHIER_PXS3_USB_DESC_BASE	0x30014000
+
+#define UNIPHIER_SRB_OCM_CONT		0x61200000
+
+struct uniphier_ld11_trans_op {
+	uint8_t __pad[48];
+};
+
+struct uniphier_ld11_op {
+	uint8_t __pad[56];
+	struct uniphier_ld11_trans_op *trans_op;
+	void *__pad2;
+	void *dev_desc;
+};
+
+struct uniphier_ld20_trans_op {
+	uint8_t __pad[40];
+};
+
+struct uniphier_ld20_op {
+	uint8_t __pad[192];
+	struct uniphier_ld20_trans_op *trans_op;
+	void *__pad2;
+	void *dev_desc;
+};
+
+struct uniphier_pxs3_op {
+	uint8_t __pad[184];
+	struct uniphier_ld20_trans_op *trans_op;
+	void *__pad2;
+	void *dev_desc;
+};
+
+static int (*__uniphier_usb_read)(int lba, uintptr_t buf, size_t size);
+
+static void uniphier_ld11_usb_init(void)
+{
+	struct uniphier_ld11_op *op = (void *)UNIPHIER_LD11_USB_DESC_BASE;
+
+	op->trans_op = (void *)(op + 1);
+
+	op->dev_desc = op->trans_op + 1;
+}
+
+static int uniphier_ld11_usb_read(int lba, uintptr_t buf, size_t size)
+{
+	static int (*rom_usb_read)(uintptr_t desc, unsigned int lba,
+				   unsigned int size, uintptr_t buf);
+	uintptr_t func_addr;
+
+	func_addr = uniphier_get_soc_revision() == 1 ? 0x3880 : 0x3958;
+	rom_usb_read = (__typeof(rom_usb_read))func_addr;
+
+	return rom_usb_read(UNIPHIER_LD11_USB_DESC_BASE, lba, size, buf);
+}
+
+static void uniphier_ld20_usb_init(void)
+{
+	struct uniphier_ld20_op *op = (void *)UNIPHIER_LD20_USB_DESC_BASE;
+
+	op->trans_op = (void *)(op + 1);
+
+	op->dev_desc = op->trans_op + 1;
+}
+
+static int uniphier_ld20_usb_read(int lba, uintptr_t buf, size_t size)
+{
+	static int (*rom_usb_read)(uintptr_t desc, unsigned int lba,
+				   unsigned int size, uintptr_t buf);
+	int ret;
+
+	rom_usb_read = (__typeof(rom_usb_read))0x37f0;
+
+	mmio_write_32(UNIPHIER_SRB_OCM_CONT, 0x1ff);
+
+	/* ROM-API - return 1 on success, 0 on error */
+	ret = rom_usb_read(UNIPHIER_LD20_USB_DESC_BASE, lba, size, buf);
+
+	mmio_write_32(UNIPHIER_SRB_OCM_CONT, 0);
+
+	return ret ? 0 : -1;
+}
+
+static void uniphier_pxs3_usb_init(void)
+{
+	struct uniphier_pxs3_op *op = (void *)UNIPHIER_PXS3_USB_DESC_BASE;
+
+	op->trans_op = (void *)(op + 1);
+
+	op->dev_desc = op->trans_op + 1;
+}
+
+static int uniphier_pxs3_usb_read(int lba, uintptr_t buf, size_t size)
+{
+	static int (*rom_usb_read)(uintptr_t desc, unsigned int lba,
+				   unsigned int size, uintptr_t buf);
+	int ret;
+
+	rom_usb_read = (__typeof(rom_usb_read))0x39e8;
+
+	/* ROM-API - return 1 on success, 0 on error */
+	ret = rom_usb_read(UNIPHIER_PXS3_USB_DESC_BASE, lba, size, buf);
+
+	return ret ? 0 : -1;
+}
+
+struct uniphier_usb_rom_param {
+	void (*init)(void);
+	int (*read)(int lba, uintptr_t buf, size_t size);
+};
+
+static const struct uniphier_usb_rom_param uniphier_usb_rom_params[] = {
+	[UNIPHIER_SOC_LD11] = {
+		.init = uniphier_ld11_usb_init,
+		.read = uniphier_ld11_usb_read,
+	},
+	[UNIPHIER_SOC_LD20] = {
+		.init = uniphier_ld20_usb_init,
+		.read = uniphier_ld20_usb_read,
+	},
+	[UNIPHIER_SOC_PXS3] = {
+		.init = uniphier_pxs3_usb_init,
+		.read = uniphier_pxs3_usb_read,
+	},
+};
+
+static size_t uniphier_usb_read(int lba, uintptr_t buf, size_t size)
+{
+	int ret;
+
+	inv_dcache_range(buf, size);
+
+	ret = __uniphier_usb_read(lba, buf, size);
+
+	inv_dcache_range(buf, size);
+
+	return ret ? 0 : size;
+}
+
+static struct io_block_dev_spec uniphier_usb_dev_spec = {
+	.buffer = {
+		.offset = UNIPHIER_BLOCK_BUF_BASE,
+		.length = UNIPHIER_BLOCK_BUF_SIZE,
+	},
+	.ops = {
+		.read = uniphier_usb_read,
+	},
+	.block_size = 512,
+};
+
+int uniphier_usb_init(unsigned int soc, uintptr_t *block_dev_spec)
+{
+	const struct uniphier_usb_rom_param *param;
+
+	assert(soc < ARRAY_SIZE(uniphier_usb_rom_params));
+	param = &uniphier_usb_rom_params[soc];
+
+	if (param->init)
+		param->init();
+
+	__uniphier_usb_read = param->read;
+
+	*block_dev_spec = (uintptr_t)&uniphier_usb_dev_spec;
+
+	return 0;
+}
diff --git a/plat/socionext/uniphier/uniphier_xlat_setup.c b/plat/socionext/uniphier/uniphier_xlat_setup.c
new file mode 100644
index 0000000..6532c49
--- /dev/null
+++ b/plat/socionext/uniphier/uniphier_xlat_setup.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <platform_def.h>
+#include <xlat_tables_v2.h>
+
+#define UNIPHIER_OCM_REGION_BASE	0x30000000
+#define UNIPHIER_OCM_REGION_SIZE	0x00040000
+
+#define UNIPHIER_REG_REGION_BASE	0x50000000
+#define UNIPHIER_REG_REGION_SIZE	0x20000000
+
+void uniphier_mmap_setup(uintptr_t total_base, size_t total_size,
+			 const struct mmap_region *mmap)
+{
+	VERBOSE("Trusted RAM seen by this BL image: %p - %p\n",
+		(void *)total_base, (void *)(total_base + total_size));
+	mmap_add_region(total_base, total_base,
+			total_size,
+			MT_MEMORY | MT_RW | MT_SECURE);
+
+	/* remap the code section */
+	VERBOSE("Code region: %p - %p\n",
+		(void *)BL_CODE_BASE, (void *)BL_CODE_END);
+	mmap_add_region(BL_CODE_BASE, BL_CODE_BASE,
+			round_up(BL_CODE_END, PAGE_SIZE) - BL_CODE_BASE,
+			MT_CODE | MT_SECURE);
+
+	/* remap the coherent memory region */
+	VERBOSE("Coherent region: %p - %p\n",
+		(void *)BL_COHERENT_RAM_BASE, (void *)BL_COHERENT_RAM_END);
+	mmap_add_region(BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_BASE,
+			BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE,
+			MT_DEVICE | MT_RW | MT_SECURE);
+
+	/*
+	 * on-chip SRAM region: should be DEVICE attribute because the USB
+	 * load functions provided by the ROM use this memory region as a work
+	 * area, but do not cater to cache coherency.
+	 */
+	mmap_add_region(UNIPHIER_OCM_REGION_BASE, UNIPHIER_OCM_REGION_BASE,
+			UNIPHIER_OCM_REGION_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE);
+
+	/* register region */
+	mmap_add_region(UNIPHIER_REG_REGION_BASE, UNIPHIER_REG_REGION_BASE,
+			UNIPHIER_REG_REGION_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE);
+
+	/* additional regions if needed */
+	if (mmap)
+		mmap_add(mmap);
+
+	init_xlat_tables();
+}
diff --git a/plat/xilinx/zynqmp/aarch64/zynqmp_common.c b/plat/xilinx/zynqmp/aarch64/zynqmp_common.c
new file mode 100644
index 0000000..d7a7d4e
--- /dev/null
+++ b/plat/xilinx/zynqmp/aarch64/zynqmp_common.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <generic_delay_timer.h>
+#include <mmio.h>
+#include <platform.h>
+#include <xlat_tables.h>
+#include "../zynqmp_private.h"
+
+/*
+ * Table of regions to map using the MMU.
+ * This doesn't include TZRAM as the 'mem_layout' argument passed to
+ * configure_mmu_elx() will give the available subset of that,
+ */
+const mmap_region_t plat_arm_mmap[] = {
+	{ DEVICE0_BASE, DEVICE0_BASE, DEVICE0_SIZE, MT_DEVICE | MT_RW | MT_SECURE },
+	{ DEVICE1_BASE, DEVICE1_BASE, DEVICE1_SIZE, MT_DEVICE | MT_RW | MT_SECURE },
+	{ CRF_APB_BASE, CRF_APB_BASE, CRF_APB_SIZE, MT_DEVICE | MT_RW | MT_SECURE },
+	{0}
+};
+
+static unsigned int zynqmp_get_silicon_ver(void)
+{
+	static unsigned int ver;
+
+	if (!ver) {
+		ver = mmio_read_32(ZYNQMP_CSU_BASEADDR +
+				   ZYNQMP_CSU_VERSION_OFFSET);
+		ver &= ZYNQMP_SILICON_VER_MASK;
+		ver >>= ZYNQMP_SILICON_VER_SHIFT;
+	}
+
+	return ver;
+}
+
+unsigned int zynqmp_get_uart_clk(void)
+{
+	unsigned int ver = zynqmp_get_silicon_ver();
+
+	switch (ver) {
+	case ZYNQMP_CSU_VERSION_VELOCE:
+		return 48000;
+	case ZYNQMP_CSU_VERSION_EP108:
+		return 25000000;
+	case ZYNQMP_CSU_VERSION_QEMU:
+		return 133000000;
+	}
+
+	return 100000000;
+}
+
+#if LOG_LEVEL >= LOG_LEVEL_NOTICE
+static const struct {
+	unsigned int id;
+	char *name;
+} zynqmp_devices[] = {
+	{
+		.id = 0x10,
+		.name = "3EG",
+	},
+	{
+		.id = 0x11,
+		.name = "2EG",
+	},
+	{
+		.id = 0x20,
+		.name = "5EV",
+	},
+	{
+		.id = 0x21,
+		.name = "4EV",
+	},
+	{
+		.id = 0x30,
+		.name = "7EV",
+	},
+	{
+		.id = 0x38,
+		.name = "9EG",
+	},
+	{
+		.id = 0x39,
+		.name = "6EG",
+	},
+	{
+		.id = 0x40,
+		.name = "11EG",
+	},
+	{
+		.id = 0x50,
+		.name = "15EG",
+	},
+	{
+		.id = 0x58,
+		.name = "19EG",
+	},
+	{
+		.id = 0x59,
+		.name = "17EG",
+	},
+};
+
+static unsigned int zynqmp_get_silicon_id(void)
+{
+	uint32_t id;
+
+	id = mmio_read_32(ZYNQMP_CSU_BASEADDR + ZYNQMP_CSU_IDCODE_OFFSET);
+
+	id &= ZYNQMP_CSU_IDCODE_DEVICE_CODE_MASK | ZYNQMP_CSU_IDCODE_SVD_MASK;
+	id >>= ZYNQMP_CSU_IDCODE_SVD_SHIFT;
+
+	return id;
+}
+
+static char *zynqmp_get_silicon_idcode_name(void)
+{
+	unsigned int id;
+
+	id = zynqmp_get_silicon_id();
+	for (size_t i = 0; i < ARRAY_SIZE(zynqmp_devices); i++) {
+		if (zynqmp_devices[i].id == id)
+			return zynqmp_devices[i].name;
+	}
+	return "UNKN";
+}
+
+static unsigned int zynqmp_get_rtl_ver(void)
+{
+	uint32_t ver;
+
+	ver = mmio_read_32(ZYNQMP_CSU_BASEADDR + ZYNQMP_CSU_VERSION_OFFSET);
+	ver &= ZYNQMP_RTL_VER_MASK;
+	ver >>= ZYNQMP_RTL_VER_SHIFT;
+
+	return ver;
+}
+
+static char *zynqmp_print_silicon_idcode(void)
+{
+	uint32_t id, maskid, tmp;
+
+	id = mmio_read_32(ZYNQMP_CSU_BASEADDR + ZYNQMP_CSU_IDCODE_OFFSET);
+
+	tmp = id;
+	tmp &= ZYNQMP_CSU_IDCODE_XILINX_ID_MASK |
+	       ZYNQMP_CSU_IDCODE_FAMILY_MASK;
+	maskid = ZYNQMP_CSU_IDCODE_XILINX_ID << ZYNQMP_CSU_IDCODE_XILINX_ID_SHIFT |
+		 ZYNQMP_CSU_IDCODE_FAMILY << ZYNQMP_CSU_IDCODE_FAMILY_SHIFT;
+	if (tmp != maskid) {
+		ERROR("Incorrect XILINX IDCODE 0x%x, maskid 0x%x\n", id, maskid);
+		return "UNKN";
+	}
+	VERBOSE("Xilinx IDCODE 0x%x\n", id);
+	return zynqmp_get_silicon_idcode_name();
+}
+
+static unsigned int zynqmp_get_ps_ver(void)
+{
+	uint32_t ver = mmio_read_32(ZYNQMP_CSU_BASEADDR + ZYNQMP_CSU_VERSION_OFFSET);
+
+	ver &= ZYNQMP_PS_VER_MASK;
+	ver >>= ZYNQMP_PS_VER_SHIFT;
+
+	return ver + 1;
+}
+
+static void zynqmp_print_platform_name(void)
+{
+	unsigned int ver = zynqmp_get_silicon_ver();
+	unsigned int rtl = zynqmp_get_rtl_ver();
+	char *label = "Unknown";
+
+	switch (ver) {
+	case ZYNQMP_CSU_VERSION_VELOCE:
+		label = "VELOCE";
+		break;
+	case ZYNQMP_CSU_VERSION_EP108:
+		label = "EP108";
+		break;
+	case ZYNQMP_CSU_VERSION_QEMU:
+		label = "QEMU";
+		break;
+	case ZYNQMP_CSU_VERSION_SILICON:
+		label = "silicon";
+		break;
+	}
+
+	NOTICE("ATF running on XCZU%s/%s v%d/RTL%d.%d at 0x%x%s\n",
+	       zynqmp_print_silicon_idcode(), label, zynqmp_get_ps_ver(),
+	       (rtl & 0xf0) >> 4, rtl & 0xf, BL31_BASE,
+	       zynqmp_is_pmu_up() ? ", with PMU firmware" : "");
+}
+#else
+static inline void zynqmp_print_platform_name(void) { }
+#endif
+
+/*
+ * Indicator for PMUFW discovery:
+ *   0 = No FW found
+ *   non-zero = FW is present
+ */
+static int zynqmp_pmufw_present;
+
+/*
+ * zynqmp_discover_pmufw - Discover presence of PMUFW
+ *
+ * Discover the presence of PMUFW and store it for later run-time queries
+ * through zynqmp_is_pmu_up.
+ * NOTE: This discovery method is fragile and will break if:
+ *  - setting FW_PRESENT is done by PMUFW itself and could be left out in PMUFW
+ *    (be it by error or intentionally)
+ *  - XPPU/XMPU may restrict ATF's access to the PMU address space
+ */
+static int zynqmp_discover_pmufw(void)
+{
+	zynqmp_pmufw_present = mmio_read_32(PMU_GLOBAL_CNTRL);
+	zynqmp_pmufw_present &= PMU_GLOBAL_CNTRL_FW_IS_PRESENT;
+
+	return !!zynqmp_pmufw_present;
+}
+
+/*
+ * zynqmp_is_pmu_up - Find if PMU firmware is up and running
+ *
+ * Return 0 if firmware is not available, non 0 otherwise
+ */
+int zynqmp_is_pmu_up(void)
+{
+	return zynqmp_pmufw_present;
+}
+
+unsigned int zynqmp_get_bootmode(void)
+{
+	uint32_t r = mmio_read_32(CRL_APB_BOOT_MODE_USER);
+
+	return r & CRL_APB_BOOT_MODE_MASK;
+}
+
+void zynqmp_config_setup(void)
+{
+	zynqmp_discover_pmufw();
+	zynqmp_print_platform_name();
+	generic_delay_timer_init();
+}
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+	unsigned int ver = zynqmp_get_silicon_ver();
+
+	switch (ver) {
+	case ZYNQMP_CSU_VERSION_VELOCE:
+		return 10000;
+	case ZYNQMP_CSU_VERSION_EP108:
+		return 4000000;
+	case ZYNQMP_CSU_VERSION_QEMU:
+		return 50000000;
+	}
+
+	return mmio_read_32(IOU_SCNTRS_BASEFREQ);
+}
diff --git a/plat/xilinx/zynqmp/aarch64/zynqmp_helpers.S b/plat/xilinx/zynqmp/aarch64/zynqmp_helpers.S
new file mode 100644
index 0000000..ad960f4
--- /dev/null
+++ b/plat/xilinx/zynqmp/aarch64/zynqmp_helpers.S
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <gicv2.h>
+#include <platform_def.h>
+
+	.globl	plat_secondary_cold_boot_setup
+	.globl	plat_is_my_cpu_primary
+
+	/* -----------------------------------------------------
+	 * void plat_secondary_cold_boot_setup (void);
+	 *
+	 * This function performs any platform specific actions
+	 * needed for a secondary cpu after a cold reset e.g
+	 * mark the cpu's presence, mechanism to place it in a
+	 * holding pen etc.
+	 * TODO: Should we read the PSYS register to make sure
+	 * that the request has gone through.
+	 * -----------------------------------------------------
+	 */
+func plat_secondary_cold_boot_setup
+	mrs	x0, mpidr_el1
+
+	/* Deactivate the gic cpu interface */
+	ldr	x1, =BASE_GICC_BASE
+	mov	w0, #(IRQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP1)
+	orr	w0, w0, #(IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP0)
+	str	w0, [x1, #GICC_CTLR]
+
+	/*
+	 * There is no sane reason to come out of this wfi. This
+	 * cpu will be powered on and reset by the cpu_on pm api
+	 */
+	dsb	sy
+1:
+	no_ret	plat_panic_handler
+endfunc plat_secondary_cold_boot_setup
+
+func plat_is_my_cpu_primary
+	mov	x9, x30
+	bl	plat_my_core_pos
+	cmp	x0, #ZYNQMP_PRIMARY_CPU
+	cset	x0, eq
+	ret	x9
+endfunc plat_is_my_cpu_primary
diff --git a/plat/xilinx/zynqmp/bl31_zynqmp_setup.c b/plat/xilinx/zynqmp/bl31_zynqmp_setup.c
new file mode 100644
index 0000000..1edbd0f
--- /dev/null
+++ b/plat/xilinx/zynqmp/bl31_zynqmp_setup.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <bl31.h>
+#include <bl_common.h>
+#include <console.h>
+#include <debug.h>
+#include <errno.h>
+#include <plat_arm.h>
+#include <platform.h>
+#include "zynqmp_private.h"
+
+#define BL31_END (unsigned long)(&__BL31_END__)
+
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+/*
+ * Return a pointer to the 'entry_point_info' structure of the next image for
+ * the security state specified. BL33 corresponds to the non-secure image type
+ * while BL32 corresponds to the secure image type. A NULL pointer is returned
+ * if the image does not exist.
+ */
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+	assert(sec_state_is_valid(type));
+
+	if (type == NON_SECURE)
+		return &bl33_image_ep_info;
+
+	return &bl32_image_ep_info;
+}
+
+/*
+ * Perform any BL31 specific platform actions. Here is an opportunity to copy
+ * parameters passed by the calling EL (S-EL1 in BL2 & S-EL3 in BL1) before they
+ * are lost (potentially). This needs to be done before the MMU is initialized
+ * so that the memory layout can be used while creating page tables.
+ */
+void bl31_early_platform_setup(bl31_params_t *from_bl2,
+			       void *plat_params_from_bl2)
+{
+	/* Initialize the console to provide early debug support */
+	console_init(ZYNQMP_UART_BASE, zynqmp_get_uart_clk(),
+		     ZYNQMP_UART_BAUDRATE);
+
+	/* Initialize the platform config for future decision making */
+	zynqmp_config_setup();
+
+	/* There are no parameters from BL2 if BL31 is a reset vector */
+	assert(from_bl2 == NULL);
+	assert(plat_params_from_bl2 == NULL);
+
+	/*
+	 * Do initial security configuration to allow DRAM/device access. On
+	 * Base ZYNQMP only DRAM security is programmable (via TrustZone), but
+	 * other platforms might have more programmable security devices
+	 * present.
+	 */
+
+	/* Populate common information for BL32 and BL33 */
+	SET_PARAM_HEAD(&bl32_image_ep_info, PARAM_EP, VERSION_1, 0);
+	SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
+	SET_PARAM_HEAD(&bl33_image_ep_info, PARAM_EP, VERSION_1, 0);
+	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+
+	if (zynqmp_get_bootmode() == ZYNQMP_BOOTMODE_JTAG) {
+		/* use build time defaults in JTAG boot mode */
+		bl32_image_ep_info.pc = BL32_BASE;
+		bl32_image_ep_info.spsr = arm_get_spsr_for_bl32_entry();
+		bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
+		bl33_image_ep_info.spsr = SPSR_64(MODE_EL2, MODE_SP_ELX,
+						  DISABLE_ALL_EXCEPTIONS);
+	} else {
+		/* use parameters from FSBL */
+		fsbl_atf_handover(&bl32_image_ep_info, &bl33_image_ep_info);
+	}
+
+	NOTICE("BL31: Secure code at 0x%lx\n", bl32_image_ep_info.pc);
+	NOTICE("BL31: Non secure code at 0x%lx\n", bl33_image_ep_info.pc);
+}
+
+/* Enable the test setup */
+#ifndef ZYNQMP_TESTING
+static void zynqmp_testing_setup(void) { }
+#else
+static void zynqmp_testing_setup(void)
+{
+	uint32_t actlr_el3, actlr_el2;
+
+	/* Enable CPU ACTLR AND L2ACTLR RW access from non-secure world */
+	actlr_el3 = read_actlr_el3();
+	actlr_el2 = read_actlr_el2();
+
+	actlr_el3 |= ACTLR_EL3_L2ACTLR_BIT | ACTLR_EL3_CPUACTLR_BIT;
+	actlr_el2 |= ACTLR_EL3_L2ACTLR_BIT | ACTLR_EL3_CPUACTLR_BIT;
+	write_actlr_el3(actlr_el3);
+	write_actlr_el2(actlr_el2);
+}
+#endif
+
+void bl31_platform_setup(void)
+{
+	/* Initialize the gic cpu and distributor interfaces */
+	plat_arm_gic_driver_init();
+	plat_arm_gic_init();
+	zynqmp_testing_setup();
+}
+
+void bl31_plat_runtime_setup(void)
+{
+}
+
+/*
+ * Perform the very early platform specific architectural setup here.
+ */
+void bl31_plat_arch_setup(void)
+{
+	plat_arm_interconnect_init();
+	plat_arm_interconnect_enter_coherency();
+
+	arm_setup_page_tables(BL31_BASE,
+			      BL31_END - BL31_BASE,
+			      BL_CODE_BASE,
+			      BL_CODE_END,
+			      BL_RO_DATA_BASE,
+			      BL_RO_DATA_END,
+			      BL_COHERENT_RAM_BASE,
+			      BL_COHERENT_RAM_END);
+	enable_mmu_el3(0);
+}
diff --git a/plat/xilinx/zynqmp/include/plat_macros.S b/plat/xilinx/zynqmp/include/plat_macros.S
new file mode 100644
index 0000000..41bc174
--- /dev/null
+++ b/plat/xilinx/zynqmp/include/plat_macros.S
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef __PLAT_MACROS_S__
+#define __PLAT_MACROS_S__
+
+#include <arm_macros.S>
+#include <cci_macros.S>
+#include "../zynqmp_def.h"
+
+	/* ---------------------------------------------
+	 * The below required platform porting macro
+	 * prints out relevant GIC and CCI registers
+	 * whenever an unhandled exception is taken in
+	 * BL31.
+	 * Clobbers: x0 - x10, x16, x17, sp
+	 * ---------------------------------------------
+	 */
+	.macro plat_crash_print_regs
+	mov_imm	x17, BASE_GICC_BASE
+	mov_imm	x16, BASE_GICD_BASE
+	arm_print_gic_regs
+	print_cci_regs
+	.endm
+
+#endif /* __PLAT_MACROS_S__ */
diff --git a/plat/xilinx/zynqmp/include/platform_def.h b/plat/xilinx/zynqmp/include/platform_def.h
new file mode 100644
index 0000000..5dd8d86
--- /dev/null
+++ b/plat/xilinx/zynqmp/include/platform_def.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#include <arch.h>
+#include <gic_common.h>
+#include <interrupt_props.h>
+#include "../zynqmp_def.h"
+
+/*******************************************************************************
+ * Generic platform constants
+ ******************************************************************************/
+
+/* Size of cacheable stacks */
+#define PLATFORM_STACK_SIZE 0x440
+
+#define PLATFORM_CORE_COUNT		4
+#define PLAT_NUM_POWER_DOMAINS		5
+#define PLAT_MAX_PWR_LVL		1
+#define PLAT_MAX_RET_STATE		1
+#define PLAT_MAX_OFF_STATE		2
+
+/*******************************************************************************
+ * BL31 specific defines.
+ ******************************************************************************/
+/*
+ * Put BL31 at the top of the Trusted SRAM (just below the shared memory, if
+ * present). BL31_BASE is calculated using the current BL31 debug size plus a
+ * little space for growth.
+ */
+#ifndef ZYNQMP_ATF_MEM_BASE
+# define BL31_BASE			0xfffea000
+# define BL31_LIMIT			0xffffffff
+#else
+# define BL31_BASE			(ZYNQMP_ATF_MEM_BASE)
+# define BL31_LIMIT			(ZYNQMP_ATF_MEM_BASE + ZYNQMP_ATF_MEM_SIZE - 1)
+# ifdef ZYNQMP_ATF_MEM_PROGBITS_SIZE
+#  define BL31_PROGBITS_LIMIT		(ZYNQMP_ATF_MEM_BASE + ZYNQMP_ATF_MEM_PROGBITS_SIZE - 1)
+# endif
+#endif
+
+/*******************************************************************************
+ * BL32 specific defines.
+ ******************************************************************************/
+#ifndef ZYNQMP_BL32_MEM_BASE
+# define BL32_BASE			0x60000000
+# define BL32_LIMIT			0x7fffffff
+#else
+# define BL32_BASE			(ZYNQMP_BL32_MEM_BASE)
+# define BL32_LIMIT			(ZYNQMP_BL32_MEM_BASE + ZYNQMP_BL32_MEM_SIZE - 1)
+#endif
+
+/*******************************************************************************
+ * BL33 specific defines.
+ ******************************************************************************/
+#ifndef PRELOADED_BL33_BASE
+# define PLAT_ARM_NS_IMAGE_OFFSET	0x8000000
+#else
+# define PLAT_ARM_NS_IMAGE_OFFSET	PRELOADED_BL33_BASE
+#endif
+
+/*******************************************************************************
+ * TSP  specific defines.
+ ******************************************************************************/
+#define TSP_SEC_MEM_BASE		BL32_BASE
+#define TSP_SEC_MEM_SIZE		(BL32_LIMIT - BL32_BASE + 1)
+
+/* ID of the secure physical generic timer interrupt used by the TSP */
+#define TSP_IRQ_SEC_PHY_TIMER		ARM_IRQ_SEC_PHY_TIMER
+
+/*******************************************************************************
+ * Platform specific page table and MMU setup constants
+ ******************************************************************************/
+#define PLAT_PHY_ADDR_SPACE_SIZE	(1ull << 32)
+#define PLAT_VIRT_ADDR_SPACE_SIZE	(1ull << 32)
+#define MAX_MMAP_REGIONS		7
+#define MAX_XLAT_TABLES			5
+
+#define CACHE_WRITEBACK_SHIFT   6
+#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
+
+#define PLAT_ARM_GICD_BASE	BASE_GICD_BASE
+#define PLAT_ARM_GICC_BASE	BASE_GICC_BASE
+/*
+ * Define properties of Group 1 Secure and Group 0 interrupts as per GICv3
+ * terminology. On a GICv2 system or mode, the lists will be merged and treated
+ * as Group 0 interrupts.
+ */
+#define PLAT_ARM_G1S_IRQ_PROPS(grp) \
+	INTR_PROP_DESC(ARM_IRQ_SEC_PHY_TIMER, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_LEVEL), \
+	INTR_PROP_DESC(ARM_IRQ_SEC_SGI_0, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(ARM_IRQ_SEC_SGI_1, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(ARM_IRQ_SEC_SGI_2, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(ARM_IRQ_SEC_SGI_3, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(ARM_IRQ_SEC_SGI_4, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(ARM_IRQ_SEC_SGI_5, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(ARM_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE), \
+	INTR_PROP_DESC(ARM_IRQ_SEC_SGI_7, GIC_HIGHEST_SEC_PRIORITY, grp, \
+			GIC_INTR_CFG_EDGE)
+
+#define PLAT_ARM_G0_IRQ_PROPS(grp)
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/xilinx/zynqmp/plat_psci.c b/plat/xilinx/zynqmp/plat_psci.c
new file mode 100644
index 0000000..c9fd361
--- /dev/null
+++ b/plat/xilinx/zynqmp/plat_psci.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <gicv2.h>
+#include <mmio.h>
+#include <plat_arm.h>
+#include <platform.h>
+#include <psci.h>
+#include "pm_api_sys.h"
+#include "pm_client.h"
+#include "zynqmp_private.h"
+
+uintptr_t zynqmp_sec_entry;
+
+void zynqmp_cpu_standby(plat_local_state_t cpu_state)
+{
+	VERBOSE("%s: cpu_state: 0x%x\n", __func__, cpu_state);
+
+	dsb();
+	wfi();
+}
+
+static int zynqmp_nopmu_pwr_domain_on(u_register_t mpidr)
+{
+	uint32_t r;
+	unsigned int cpu_id = plat_core_pos_by_mpidr(mpidr);
+
+	VERBOSE("%s: mpidr: 0x%lx\n", __func__, mpidr);
+
+	if (cpu_id == -1)
+		return PSCI_E_INTERN_FAIL;
+
+	/* program RVBAR */
+	mmio_write_32(APU_RVBAR_L_0 + (cpu_id << 3), zynqmp_sec_entry);
+	mmio_write_32(APU_RVBAR_H_0 + (cpu_id << 3), zynqmp_sec_entry >> 32);
+
+	/* clear VINITHI */
+	r = mmio_read_32(APU_CONFIG_0);
+	r &= ~(1 << APU_CONFIG_0_VINITHI_SHIFT << cpu_id);
+	mmio_write_32(APU_CONFIG_0, r);
+
+	/* clear power down request */
+	r = mmio_read_32(APU_PWRCTL);
+	r &= ~(1 << cpu_id);
+	mmio_write_32(APU_PWRCTL, r);
+
+	/* power up island */
+	mmio_write_32(PMU_GLOBAL_REQ_PWRUP_EN, 1 << cpu_id);
+	mmio_write_32(PMU_GLOBAL_REQ_PWRUP_TRIG, 1 << cpu_id);
+	/* FIXME: we should have a way to break out */
+	while (mmio_read_32(PMU_GLOBAL_REQ_PWRUP_STATUS) & (1 << cpu_id))
+		;
+
+	/* release core reset */
+	r = mmio_read_32(CRF_APB_RST_FPD_APU);
+	r &= ~((CRF_APB_RST_FPD_APU_ACPU_PWRON_RESET |
+			CRF_APB_RST_FPD_APU_ACPU_RESET) << cpu_id);
+	mmio_write_32(CRF_APB_RST_FPD_APU, r);
+
+	return PSCI_E_SUCCESS;
+}
+
+static int zynqmp_pwr_domain_on(u_register_t mpidr)
+{
+	unsigned int cpu_id = plat_core_pos_by_mpidr(mpidr);
+	const struct pm_proc *proc;
+
+	VERBOSE("%s: mpidr: 0x%lx\n", __func__, mpidr);
+
+	if (cpu_id == -1)
+		return PSCI_E_INTERN_FAIL;
+
+	proc = pm_get_proc(cpu_id);
+
+	/* Send request to PMU to wake up selected APU CPU core */
+	pm_req_wakeup(proc->node_id, 1, zynqmp_sec_entry, REQ_ACK_BLOCKING);
+
+	return PSCI_E_SUCCESS;
+}
+
+static void zynqmp_nopmu_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	uint32_t r;
+	unsigned int cpu_id = plat_my_core_pos();
+
+	for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
+		VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
+			__func__, i, target_state->pwr_domain_state[i]);
+
+	/* Prevent interrupts from spuriously waking up this cpu */
+	gicv2_cpuif_disable();
+
+	/* set power down request */
+	r = mmio_read_32(APU_PWRCTL);
+	r |= (1 << cpu_id);
+	mmio_write_32(APU_PWRCTL, r);
+}
+
+static void zynqmp_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	unsigned int cpu_id = plat_my_core_pos();
+	const struct pm_proc *proc = pm_get_proc(cpu_id);
+
+	for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
+		VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
+			__func__, i, target_state->pwr_domain_state[i]);
+
+	/* Prevent interrupts from spuriously waking up this cpu */
+	gicv2_cpuif_disable();
+
+	/*
+	 * Send request to PMU to power down the appropriate APU CPU
+	 * core.
+	 * According to PSCI specification, CPU_off function does not
+	 * have resume address and CPU core can only be woken up
+	 * invoking CPU_on function, during which resume address will
+	 * be set.
+	 */
+	pm_self_suspend(proc->node_id, MAX_LATENCY, PM_STATE_CPU_IDLE, 0);
+}
+
+static void zynqmp_nopmu_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+	uint32_t r;
+	unsigned int cpu_id = plat_my_core_pos();
+
+	for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
+		VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
+			__func__, i, target_state->pwr_domain_state[i]);
+
+	/* set power down request */
+	r = mmio_read_32(APU_PWRCTL);
+	r |= (1 << cpu_id);
+	mmio_write_32(APU_PWRCTL, r);
+
+	/* program RVBAR */
+	mmio_write_32(APU_RVBAR_L_0 + (cpu_id << 3), zynqmp_sec_entry);
+	mmio_write_32(APU_RVBAR_H_0 + (cpu_id << 3), zynqmp_sec_entry >> 32);
+
+	/* clear VINITHI */
+	r = mmio_read_32(APU_CONFIG_0);
+	r &= ~(1 << APU_CONFIG_0_VINITHI_SHIFT << cpu_id);
+	mmio_write_32(APU_CONFIG_0, r);
+
+	/* enable power up on IRQ */
+	mmio_write_32(PMU_GLOBAL_REQ_PWRUP_EN, 1 << cpu_id);
+}
+
+static void zynqmp_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+	unsigned int state;
+	unsigned int cpu_id = plat_my_core_pos();
+	const struct pm_proc *proc = pm_get_proc(cpu_id);
+
+	for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
+		VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
+			__func__, i, target_state->pwr_domain_state[i]);
+
+	state = target_state->pwr_domain_state[1] > PLAT_MAX_RET_STATE ?
+		PM_STATE_SUSPEND_TO_RAM : PM_STATE_CPU_IDLE;
+
+	/* Send request to PMU to suspend this core */
+	pm_self_suspend(proc->node_id, MAX_LATENCY, state, zynqmp_sec_entry);
+
+	/* APU is to be turned off */
+	if (target_state->pwr_domain_state[1] > PLAT_MAX_RET_STATE) {
+		/* disable coherency */
+		plat_arm_interconnect_exit_coherency();
+	}
+}
+
+static void zynqmp_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+	for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
+		VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
+			__func__, i, target_state->pwr_domain_state[i]);
+
+	gicv2_cpuif_enable();
+	gicv2_pcpu_distif_init();
+}
+
+static void zynqmp_nopmu_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
+{
+	uint32_t r;
+	unsigned int cpu_id = plat_my_core_pos();
+
+	for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
+		VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
+			__func__, i, target_state->pwr_domain_state[i]);
+
+	/* disable power up on IRQ */
+	mmio_write_32(PMU_GLOBAL_REQ_PWRUP_DIS, 1 << cpu_id);
+
+	/* clear powerdown bit */
+	r = mmio_read_32(APU_PWRCTL);
+	r &= ~(1 << cpu_id);
+	mmio_write_32(APU_PWRCTL, r);
+}
+
+static void zynqmp_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
+{
+	unsigned int cpu_id = plat_my_core_pos();
+	const struct pm_proc *proc = pm_get_proc(cpu_id);
+
+	for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
+		VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
+			__func__, i, target_state->pwr_domain_state[i]);
+
+	/* Clear the APU power control register for this cpu */
+	pm_client_wakeup(proc);
+
+	/* enable coherency */
+	plat_arm_interconnect_enter_coherency();
+	/* APU was turned off */
+	if (target_state->pwr_domain_state[1] > PLAT_MAX_RET_STATE) {
+		plat_arm_gic_init();
+	} else {
+		gicv2_cpuif_enable();
+		gicv2_pcpu_distif_init();
+	}
+}
+
+/*******************************************************************************
+ * ZynqMP handlers to shutdown/reboot the system
+ ******************************************************************************/
+static void __dead2 zynqmp_nopmu_system_off(void)
+{
+	ERROR("ZynqMP System Off: operation not handled.\n");
+
+	/* disable coherency */
+	plat_arm_interconnect_exit_coherency();
+
+	panic();
+}
+
+static void __dead2 zynqmp_system_off(void)
+{
+	/* disable coherency */
+	plat_arm_interconnect_exit_coherency();
+
+	/* Send the power down request to the PMU */
+	pm_system_shutdown(PMF_SHUTDOWN_TYPE_SHUTDOWN,
+			   PMF_SHUTDOWN_SUBTYPE_SUBSYSTEM);
+
+	while (1)
+		wfi();
+}
+
+static void __dead2 zynqmp_nopmu_system_reset(void)
+{
+	/*
+	 * This currently triggers a system reset. I.e. the whole
+	 * system will be reset! Including RPUs, PMU, PL, etc.
+	 */
+
+	/* disable coherency */
+	plat_arm_interconnect_exit_coherency();
+
+	/* bypass RPLL (needed on 1.0 silicon) */
+	uint32_t reg = mmio_read_32(CRL_APB_RPLL_CTRL);
+	reg |= CRL_APB_RPLL_CTRL_BYPASS;
+	mmio_write_32(CRL_APB_RPLL_CTRL, reg);
+
+	/* trigger system reset */
+	mmio_write_32(CRL_APB_RESET_CTRL, CRL_APB_RESET_CTRL_SOFT_RESET);
+
+	while (1)
+		wfi();
+}
+
+static void __dead2 zynqmp_system_reset(void)
+{
+	/* disable coherency */
+	plat_arm_interconnect_exit_coherency();
+
+	/* Send the system reset request to the PMU */
+	pm_system_shutdown(PMF_SHUTDOWN_TYPE_RESET,
+			   PMF_SHUTDOWN_SUBTYPE_SUBSYSTEM);
+
+	while (1)
+		wfi();
+}
+
+int zynqmp_validate_power_state(unsigned int power_state,
+				psci_power_state_t *req_state)
+{
+	VERBOSE("%s: power_state: 0x%x\n", __func__, power_state);
+
+	int pstate = psci_get_pstate_type(power_state);
+
+	assert(req_state);
+
+	/* Sanity check the requested state */
+	if (pstate == PSTATE_TYPE_STANDBY)
+		req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_RET_STATE;
+	else
+		req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_OFF_STATE;
+
+	/* We expect the 'state id' to be zero */
+	if (psci_get_pstate_id(power_state))
+		return PSCI_E_INVALID_PARAMS;
+
+	return PSCI_E_SUCCESS;
+}
+
+int zynqmp_validate_ns_entrypoint(unsigned long ns_entrypoint)
+{
+	VERBOSE("%s: ns_entrypoint: 0x%lx\n", __func__, ns_entrypoint);
+
+	/* FIXME: Actually validate */
+	return PSCI_E_SUCCESS;
+}
+
+void zynqmp_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+	req_state->pwr_domain_state[PSCI_CPU_PWR_LVL] = PLAT_MAX_OFF_STATE;
+	req_state->pwr_domain_state[1] = PLAT_MAX_OFF_STATE;
+}
+
+/*******************************************************************************
+ * Export the platform handlers to enable psci to invoke them
+ ******************************************************************************/
+static const struct plat_psci_ops zynqmp_psci_ops = {
+	.cpu_standby			= zynqmp_cpu_standby,
+	.pwr_domain_on			= zynqmp_pwr_domain_on,
+	.pwr_domain_off			= zynqmp_pwr_domain_off,
+	.pwr_domain_suspend		= zynqmp_pwr_domain_suspend,
+	.pwr_domain_on_finish		= zynqmp_pwr_domain_on_finish,
+	.pwr_domain_suspend_finish	= zynqmp_pwr_domain_suspend_finish,
+	.system_off			= zynqmp_system_off,
+	.system_reset			= zynqmp_system_reset,
+	.validate_power_state		= zynqmp_validate_power_state,
+	.validate_ns_entrypoint		= zynqmp_validate_ns_entrypoint,
+	.get_sys_suspend_power_state	= zynqmp_get_sys_suspend_power_state,
+};
+
+static const struct plat_psci_ops zynqmp_nopmu_psci_ops = {
+	.cpu_standby			= zynqmp_cpu_standby,
+	.pwr_domain_on			= zynqmp_nopmu_pwr_domain_on,
+	.pwr_domain_off			= zynqmp_nopmu_pwr_domain_off,
+	.pwr_domain_suspend		= zynqmp_nopmu_pwr_domain_suspend,
+	.pwr_domain_on_finish		= zynqmp_pwr_domain_on_finish,
+	.pwr_domain_suspend_finish	= zynqmp_nopmu_pwr_domain_suspend_finish,
+	.system_off			= zynqmp_nopmu_system_off,
+	.system_reset			= zynqmp_nopmu_system_reset,
+	.validate_power_state		= zynqmp_validate_power_state,
+	.validate_ns_entrypoint		= zynqmp_validate_ns_entrypoint,
+	.get_sys_suspend_power_state	= zynqmp_get_sys_suspend_power_state,
+};
+
+/*******************************************************************************
+ * Export the platform specific power ops.
+ ******************************************************************************/
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+			const struct plat_psci_ops **psci_ops)
+{
+	zynqmp_sec_entry = sec_entrypoint;
+
+	if (zynqmp_is_pmu_up())
+		*psci_ops = &zynqmp_psci_ops;
+	else
+		*psci_ops = &zynqmp_nopmu_psci_ops;
+
+	return 0;
+}
diff --git a/plat/xilinx/zynqmp/plat_startup.c b/plat/xilinx/zynqmp/plat_startup.c
new file mode 100644
index 0000000..3ec492e
--- /dev/null
+++ b/plat/xilinx/zynqmp/plat_startup.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <mmio.h>
+#include "zynqmp_def.h"
+
+/*
+ * ATFHandoffParams
+ * Parameter		bitfield	encoding
+ * -----------------------------------------------------------------------------
+ * Exec State		0		0 -> Aarch64, 1-> Aarch32
+ * endianness		1		0 -> LE, 1 -> BE
+ * secure (TZ)		2		0 -> Non secure, 1 -> secure
+ * EL			3:4		00 -> EL0, 01 -> EL1, 10 -> EL2, 11 -> EL3
+ * CPU#			5:6		00 -> A53_0, 01 -> A53_1, 10 -> A53_2, 11 -> A53_3
+ */
+
+#define FSBL_FLAGS_ESTATE_SHIFT		0
+#define FSBL_FLAGS_ESTATE_MASK		(1 << FSBL_FLAGS_ESTATE_SHIFT)
+#define FSBL_FLAGS_ESTATE_A64		0
+#define FSBL_FLAGS_ESTATE_A32		1
+
+#define FSBL_FLAGS_ENDIAN_SHIFT		1
+#define FSBL_FLAGS_ENDIAN_MASK		(1 << FSBL_FLAGS_ENDIAN_SHIFT)
+#define FSBL_FLAGS_ENDIAN_LE		0
+#define FSBL_FLAGS_ENDIAN_BE		1
+
+#define FSBL_FLAGS_TZ_SHIFT		2
+#define FSBL_FLAGS_TZ_MASK		(1 << FSBL_FLAGS_TZ_SHIFT)
+#define FSBL_FLAGS_NON_SECURE		0
+#define FSBL_FLAGS_SECURE		1
+
+#define FSBL_FLAGS_EL_SHIFT		3
+#define FSBL_FLAGS_EL_MASK		(3 << FSBL_FLAGS_EL_SHIFT)
+#define FSBL_FLAGS_EL0			0
+#define FSBL_FLAGS_EL1			1
+#define FSBL_FLAGS_EL2			2
+#define FSBL_FLAGS_EL3			3
+
+#define FSBL_FLAGS_CPU_SHIFT		5
+#define FSBL_FLAGS_CPU_MASK		(3 << FSBL_FLAGS_CPU_SHIFT)
+#define FSBL_FLAGS_A53_0		0
+#define FSBL_FLAGS_A53_1		1
+#define FSBL_FLAGS_A53_2		2
+#define FSBL_FLAGS_A53_3		3
+
+#define FSBL_MAX_PARTITIONS		8
+
+/* Structure corresponding to each partition entry */
+struct xfsbl_partition {
+	uint64_t entry_point;
+	uint64_t flags;
+};
+
+/* Structure for handoff parameters to ARM Trusted Firmware (ATF) */
+struct xfsbl_atf_handoff_params {
+	uint8_t magic[4];
+	uint32_t num_entries;
+	struct xfsbl_partition partition[FSBL_MAX_PARTITIONS];
+};
+
+/**
+ * @partition: Pointer to partition struct
+ *
+ * Get the target CPU for @partition.
+ *
+ * Return: FSBL_FLAGS_A53_0, FSBL_FLAGS_A53_1, FSBL_FLAGS_A53_2 or FSBL_FLAGS_A53_3
+ */
+static int get_fsbl_cpu(const struct xfsbl_partition *partition)
+{
+	uint64_t flags = partition->flags & FSBL_FLAGS_CPU_MASK;
+
+	return flags >> FSBL_FLAGS_CPU_SHIFT;
+}
+
+/**
+ * @partition: Pointer to partition struct
+ *
+ * Get the target exception level for @partition.
+ *
+ * Return: FSBL_FLAGS_EL0, FSBL_FLAGS_EL1, FSBL_FLAGS_EL2 or FSBL_FLAGS_EL3
+ */
+static int get_fsbl_el(const struct xfsbl_partition *partition)
+{
+	uint64_t flags = partition->flags & FSBL_FLAGS_EL_MASK;
+
+	return flags >> FSBL_FLAGS_EL_SHIFT;
+}
+
+/**
+ * @partition: Pointer to partition struct
+ *
+ * Get the target security state for @partition.
+ *
+ * Return: FSBL_FLAGS_NON_SECURE or FSBL_FLAGS_SECURE
+ */
+static int get_fsbl_ss(const struct xfsbl_partition *partition)
+{
+	uint64_t flags = partition->flags & FSBL_FLAGS_TZ_MASK;
+
+	return flags >> FSBL_FLAGS_TZ_SHIFT;
+}
+
+/**
+ * @partition: Pointer to partition struct
+ *
+ * Get the target endianness for @partition.
+ *
+ * Return: SPSR_E_LITTLE or SPSR_E_BIG
+ */
+static int get_fsbl_endian(const struct xfsbl_partition *partition)
+{
+	uint64_t flags = partition->flags & FSBL_FLAGS_ENDIAN_MASK;
+
+	flags >>= FSBL_FLAGS_ENDIAN_SHIFT;
+
+	if (flags == FSBL_FLAGS_ENDIAN_BE)
+		return SPSR_E_BIG;
+	else
+		return SPSR_E_LITTLE;
+}
+
+/**
+ * @partition: Pointer to partition struct
+ *
+ * Get the target execution state for @partition.
+ *
+ * Return: FSBL_FLAGS_ESTATE_A32 or FSBL_FLAGS_ESTATE_A64
+ */
+static int get_fsbl_estate(const struct xfsbl_partition *partition)
+{
+	uint64_t flags = partition->flags & FSBL_FLAGS_ESTATE_MASK;
+
+	return flags >> FSBL_FLAGS_ESTATE_SHIFT;
+}
+
+/**
+ * Populates the bl32 and bl33 image info structures
+ * @bl32:	BL32 image info structure
+ * @bl33:	BL33 image info structure
+ *
+ * Process the handoff paramters from the FSBL and populate the BL32 and BL33
+ * image info structures accordingly.
+ */
+void fsbl_atf_handover(entry_point_info_t *bl32, entry_point_info_t *bl33)
+{
+	uint64_t atf_handoff_addr;
+	const struct xfsbl_atf_handoff_params *ATFHandoffParams;
+
+	atf_handoff_addr = mmio_read_32(PMU_GLOBAL_GEN_STORAGE6);
+	assert((atf_handoff_addr < BL31_BASE) ||
+	       (atf_handoff_addr > (uint64_t)&__BL31_END__));
+	if (!atf_handoff_addr) {
+		ERROR("BL31: No ATF handoff structure passed\n");
+		panic();
+	}
+
+	ATFHandoffParams = (struct xfsbl_atf_handoff_params *)atf_handoff_addr;
+	if ((ATFHandoffParams->magic[0] != 'X') ||
+	    (ATFHandoffParams->magic[1] != 'L') ||
+	    (ATFHandoffParams->magic[2] != 'N') ||
+	    (ATFHandoffParams->magic[3] != 'X')) {
+		ERROR("BL31: invalid ATF handoff structure at %lx\n",
+		      atf_handoff_addr);
+		panic();
+	}
+
+	VERBOSE("BL31: ATF handoff params at:0x%lx, entries:%u\n",
+		atf_handoff_addr, ATFHandoffParams->num_entries);
+	if (ATFHandoffParams->num_entries > FSBL_MAX_PARTITIONS) {
+		ERROR("BL31: ATF handoff params: too many partitions (%u/%u)\n",
+		      ATFHandoffParams->num_entries, FSBL_MAX_PARTITIONS);
+		panic();
+	}
+
+	/*
+	 * we loop over all passed entries but only populate two image structs
+	 * (bl32, bl33). I.e. the last applicable images in the handoff
+	 * structure will be used for the hand off
+	 */
+	for (size_t i = 0; i < ATFHandoffParams->num_entries; i++) {
+		entry_point_info_t *image;
+		int target_estate, target_secure;
+		int target_cpu, target_endianness, target_el;
+
+		VERBOSE("BL31: %zd: entry:0x%lx, flags:0x%lx\n", i,
+			ATFHandoffParams->partition[i].entry_point,
+			ATFHandoffParams->partition[i].flags);
+
+		target_cpu = get_fsbl_cpu(&ATFHandoffParams->partition[i]);
+		if (target_cpu != FSBL_FLAGS_A53_0) {
+			WARN("BL31: invalid target CPU (%i)\n", target_cpu);
+			continue;
+		}
+
+		target_el = get_fsbl_el(&ATFHandoffParams->partition[i]);
+		if ((target_el == FSBL_FLAGS_EL3) ||
+		    (target_el == FSBL_FLAGS_EL0)) {
+			WARN("BL31: invalid exception level (%i)\n", target_el);
+			continue;
+		}
+
+		target_secure = get_fsbl_ss(&ATFHandoffParams->partition[i]);
+		if (target_secure == FSBL_FLAGS_SECURE &&
+		    target_el == FSBL_FLAGS_EL2) {
+			WARN("BL31: invalid security state (%i) for exception level (%i)\n",
+			     target_secure, target_el);
+			continue;
+		}
+
+		target_estate = get_fsbl_estate(&ATFHandoffParams->partition[i]);
+		target_endianness = get_fsbl_endian(&ATFHandoffParams->partition[i]);
+
+		if (target_secure == FSBL_FLAGS_SECURE) {
+			image = bl32;
+
+			if (target_estate == FSBL_FLAGS_ESTATE_A32)
+				bl32->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
+							 target_endianness,
+							 DISABLE_ALL_EXCEPTIONS);
+			else
+				bl32->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
+						     DISABLE_ALL_EXCEPTIONS);
+		} else {
+			image = bl33;
+
+			if (target_estate == FSBL_FLAGS_ESTATE_A32) {
+				if (target_el == FSBL_FLAGS_EL2)
+					target_el = MODE32_hyp;
+				else
+					target_el = MODE32_sys;
+
+				bl33->spsr = SPSR_MODE32(target_el, SPSR_T_ARM,
+							 target_endianness,
+							 DISABLE_ALL_EXCEPTIONS);
+			} else {
+				if (target_el == FSBL_FLAGS_EL2)
+					target_el = MODE_EL2;
+				else
+					target_el = MODE_EL1;
+
+				bl33->spsr = SPSR_64(target_el, MODE_SP_ELX,
+						     DISABLE_ALL_EXCEPTIONS);
+			}
+		}
+
+		VERBOSE("Setting up %s entry point to:%lx, el:%x\n",
+			target_secure == FSBL_FLAGS_SECURE ? "BL32" : "BL33",
+			ATFHandoffParams->partition[i].entry_point,
+			target_el);
+		image->pc = ATFHandoffParams->partition[i].entry_point;
+
+		if (target_endianness == SPSR_E_BIG)
+			EP_SET_EE(image->h.attr, EP_EE_BIG);
+		else
+			EP_SET_EE(image->h.attr, EP_EE_LITTLE);
+	}
+}
diff --git a/plat/xilinx/zynqmp/plat_topology.c b/plat/xilinx/zynqmp/plat_topology.c
new file mode 100644
index 0000000..aab24aa
--- /dev/null
+++ b/plat/xilinx/zynqmp/plat_topology.c
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+static const unsigned char plat_power_domain_tree_desc[] = {1, 4};
+
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+	return plat_power_domain_tree_desc;
+}
diff --git a/plat/xilinx/zynqmp/plat_zynqmp.c b/plat/xilinx/zynqmp/plat_zynqmp.c
new file mode 100644
index 0000000..cbfa935
--- /dev/null
+++ b/plat/xilinx/zynqmp/plat_zynqmp.c
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <plat_arm.h>
+
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+	if (mpidr & MPIDR_CLUSTER_MASK)
+		return -1;
+
+	if ((mpidr & MPIDR_CPU_MASK) >= PLATFORM_CORE_COUNT)
+		return -1;
+
+	return plat_arm_calc_core_pos(mpidr);
+}
diff --git a/plat/xilinx/zynqmp/platform.mk b/plat/xilinx/zynqmp/platform.mk
new file mode 100644
index 0000000..ca87cc8
--- /dev/null
+++ b/plat/xilinx/zynqmp/platform.mk
@@ -0,0 +1,74 @@
+#
+# Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+
+override ERRATA_A53_855873 := 1
+override ENABLE_PLAT_COMPAT := 0
+override PROGRAMMABLE_RESET_ADDRESS := 1
+PSCI_EXTENDED_STATE_ID := 1
+A53_DISABLE_NON_TEMPORAL_HINT := 0
+SEPARATE_CODE_AND_RODATA := 1
+override RESET_TO_BL31 := 1
+
+ifdef ZYNQMP_ATF_MEM_BASE
+    $(eval $(call add_define,ZYNQMP_ATF_MEM_BASE))
+
+    ifndef ZYNQMP_ATF_MEM_SIZE
+        $(error "ZYNQMP_ATF_BASE defined without ZYNQMP_ATF_SIZE")
+    endif
+    $(eval $(call add_define,ZYNQMP_ATF_MEM_SIZE))
+
+    ifdef ZYNQMP_ATF_MEM_PROGBITS_SIZE
+        $(eval $(call add_define,ZYNQMP_ATF_MEM_PROGBITS_SIZE))
+    endif
+endif
+
+ifdef ZYNQMP_BL32_MEM_BASE
+    $(eval $(call add_define,ZYNQMP_BL32_MEM_BASE))
+
+    ifndef ZYNQMP_BL32_MEM_SIZE
+        $(error "ZYNQMP_BL32_BASE defined without ZYNQMP_BL32_SIZE")
+    endif
+    $(eval $(call add_define,ZYNQMP_BL32_MEM_SIZE))
+endif
+
+ZYNQMP_CONSOLE	?=	cadence
+$(eval $(call add_define_val,ZYNQMP_CONSOLE,ZYNQMP_CONSOLE_ID_${ZYNQMP_CONSOLE}))
+
+PLAT_INCLUDES		:=	-Iinclude/plat/arm/common/			\
+				-Iinclude/plat/arm/common/aarch64/		\
+				-Iplat/xilinx/zynqmp/include/			\
+				-Iplat/xilinx/zynqmp/pm_service/
+
+PLAT_BL_COMMON_SOURCES	:=	lib/xlat_tables/xlat_tables_common.c		\
+				lib/xlat_tables/aarch64/xlat_tables.c		\
+				drivers/delay_timer/delay_timer.c		\
+				drivers/delay_timer/generic_delay_timer.c	\
+				drivers/arm/gic/common/gic_common.c		\
+				drivers/arm/gic/v2/gicv2_main.c			\
+				drivers/arm/gic/v2/gicv2_helpers.c		\
+				drivers/cadence/uart/aarch64/cdns_console.S	\
+				drivers/console/aarch64/console.S		\
+				plat/arm/common/aarch64/arm_helpers.S		\
+				plat/arm/common/arm_cci.c			\
+				plat/arm/common/arm_common.c			\
+				plat/arm/common/arm_gicv2.c			\
+				plat/common/plat_gicv2.c			\
+				plat/xilinx/zynqmp/aarch64/zynqmp_helpers.S	\
+				plat/xilinx/zynqmp/aarch64/zynqmp_common.c
+
+BL31_SOURCES		+=	drivers/arm/cci/cci.c				\
+				lib/cpus/aarch64/aem_generic.S			\
+				lib/cpus/aarch64/cortex_a53.S			\
+				plat/common/plat_psci_common.c			\
+				plat/xilinx/zynqmp/bl31_zynqmp_setup.c		\
+				plat/xilinx/zynqmp/plat_psci.c			\
+				plat/xilinx/zynqmp/plat_zynqmp.c		\
+				plat/xilinx/zynqmp/plat_startup.c		\
+				plat/xilinx/zynqmp/plat_topology.c		\
+				plat/xilinx/zynqmp/sip_svc_setup.c		\
+				plat/xilinx/zynqmp/pm_service/pm_svc_main.c	\
+				plat/xilinx/zynqmp/pm_service/pm_api_sys.c	\
+				plat/xilinx/zynqmp/pm_service/pm_ipi.c		\
+				plat/xilinx/zynqmp/pm_service/pm_client.c
diff --git a/plat/xilinx/zynqmp/pm_service/pm_api_sys.c b/plat/xilinx/zynqmp/pm_service/pm_api_sys.c
new file mode 100644
index 0000000..90c670d
--- /dev/null
+++ b/plat/xilinx/zynqmp/pm_service/pm_api_sys.c
@@ -0,0 +1,548 @@
+/*
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * ZynqMP system level PM-API functions and communication with PMU via
+ * IPI interrupts
+ */
+
+#include <arch_helpers.h>
+#include <platform.h>
+#include "pm_api_sys.h"
+#include "pm_client.h"
+#include "pm_common.h"
+#include "pm_ipi.h"
+
+/**
+ * Assigning of argument values into array elements.
+ */
+#define PM_PACK_PAYLOAD1(pl, arg0) {	\
+	pl[0] = (uint32_t)(arg0);	\
+}
+
+#define PM_PACK_PAYLOAD2(pl, arg0, arg1) {	\
+	pl[1] = (uint32_t)(arg1);		\
+	PM_PACK_PAYLOAD1(pl, arg0);		\
+}
+
+#define PM_PACK_PAYLOAD3(pl, arg0, arg1, arg2) {	\
+	pl[2] = (uint32_t)(arg2);			\
+	PM_PACK_PAYLOAD2(pl, arg0, arg1);		\
+}
+
+#define PM_PACK_PAYLOAD4(pl, arg0, arg1, arg2, arg3) {	\
+	pl[3] = (uint32_t)(arg3);			\
+	PM_PACK_PAYLOAD3(pl, arg0, arg1, arg2);		\
+}
+
+#define PM_PACK_PAYLOAD5(pl, arg0, arg1, arg2, arg3, arg4) {	\
+	pl[4] = (uint32_t)(arg4);				\
+	PM_PACK_PAYLOAD4(pl, arg0, arg1, arg2, arg3);		\
+}
+
+#define PM_PACK_PAYLOAD6(pl, arg0, arg1, arg2, arg3, arg4, arg5) {	\
+	pl[5] = (uint32_t)(arg5);					\
+	PM_PACK_PAYLOAD5(pl, arg0, arg1, arg2, arg3, arg4);		\
+}
+
+/**
+ * pm_self_suspend() - PM call for processor to suspend itself
+ * @nid		Node id of the processor or subsystem
+ * @latency	Requested maximum wakeup latency (not supported)
+ * @state	Requested state
+ * @address	Resume address
+ *
+ * This is a blocking call, it will return only once PMU has responded.
+ * On a wakeup, resume address will be automatically set by PMU.
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_self_suspend(enum pm_node_id nid,
+				   unsigned int latency,
+				   unsigned int state,
+				   uintptr_t address)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+	unsigned int cpuid = plat_my_core_pos();
+	const struct pm_proc *proc = pm_get_proc(cpuid);
+
+	/*
+	 * Do client specific suspend operations
+	 * (e.g. set powerdown request bit)
+	 */
+	pm_client_suspend(proc, state);
+	/* Send request to the PMU */
+	PM_PACK_PAYLOAD6(payload, PM_SELF_SUSPEND, proc->node_id, latency,
+			 state, address, (address >> 32));
+	return pm_ipi_send_sync(proc, payload, NULL, 0);
+}
+
+/**
+ * pm_req_suspend() - PM call to request for another PU or subsystem to
+ *		      be suspended gracefully.
+ * @target	Node id of the targeted PU or subsystem
+ * @ack		Flag to specify whether acknowledge is requested
+ * @latency	Requested wakeup latency (not supported)
+ * @state	Requested state (not supported)
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_req_suspend(enum pm_node_id target,
+				  enum pm_request_ack ack,
+				  unsigned int latency, unsigned int state)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	/* Send request to the PMU */
+	PM_PACK_PAYLOAD5(payload, PM_REQ_SUSPEND, target, ack, latency, state);
+	if (ack == REQ_ACK_BLOCKING)
+		return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
+	else
+		return pm_ipi_send(primary_proc, payload);
+}
+
+/**
+ * pm_req_wakeup() - PM call for processor to wake up selected processor
+ *		     or subsystem
+ * @target	Node id of the processor or subsystem to wake up
+ * @ack		Flag to specify whether acknowledge requested
+ * @set_address	Resume address presence indicator
+ *				1 resume address specified, 0 otherwise
+ * @address	Resume address
+ *
+ * This API function is either used to power up another APU core for SMP
+ * (by PSCI) or to power up an entirely different PU or subsystem, such
+ * as RPU0, RPU, or PL_CORE_xx. Resume address for the target PU will be
+ * automatically set by PMU.
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_req_wakeup(enum pm_node_id target,
+				 unsigned int set_address,
+				 uintptr_t address,
+				 enum pm_request_ack ack)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+	uint64_t encoded_address;
+	const struct pm_proc *proc = pm_get_proc_by_node(target);
+
+	/* invoke APU-specific code for waking up another APU core */
+	pm_client_wakeup(proc);
+
+	/* encode set Address into 1st bit of address */
+	encoded_address = address;
+	encoded_address |= !!set_address;
+
+	/* Send request to the PMU to perform the wake of the PU */
+	PM_PACK_PAYLOAD5(payload, PM_REQ_WAKEUP, target, encoded_address,
+			 encoded_address >> 32, ack);
+
+	if (ack == REQ_ACK_BLOCKING)
+		return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
+	else
+		return pm_ipi_send(primary_proc, payload);
+}
+
+/**
+ * pm_force_powerdown() - PM call to request for another PU or subsystem to
+ *			  be powered down forcefully
+ * @target	Node id of the targeted PU or subsystem
+ * @ack		Flag to specify whether acknowledge is requested
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_force_powerdown(enum pm_node_id target,
+				      enum pm_request_ack ack)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	/* Send request to the PMU */
+	PM_PACK_PAYLOAD3(payload, PM_FORCE_POWERDOWN, target, ack);
+
+	if (ack == REQ_ACK_BLOCKING)
+		return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
+	else
+		return pm_ipi_send(primary_proc, payload);
+}
+
+/**
+ * pm_abort_suspend() - PM call to announce that a prior suspend request
+ *			is to be aborted.
+ * @reason	Reason for the abort
+ *
+ * Calling PU expects the PMU to abort the initiated suspend procedure.
+ * This is a non-blocking call without any acknowledge.
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_abort_suspend(enum pm_abort_reason reason)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	/*
+	 * Do client specific abort suspend operations
+	 * (e.g. enable interrupts and clear powerdown request bit)
+	 */
+	pm_client_abort_suspend();
+	/* Send request to the PMU */
+	/* TODO: allow passing the node ID of the affected CPU */
+	PM_PACK_PAYLOAD3(payload, PM_ABORT_SUSPEND, reason,
+			 primary_proc->node_id);
+	return pm_ipi_send(primary_proc, payload);
+}
+
+/**
+ * pm_set_wakeup_source() - PM call to specify the wakeup source while suspended
+ * @target	Node id of the targeted PU or subsystem
+ * @wkup_node	Node id of the wakeup peripheral
+ * @enable	Enable or disable the specified peripheral as wake source
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_set_wakeup_source(enum pm_node_id target,
+					enum pm_node_id wkup_node,
+					unsigned int enable)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	PM_PACK_PAYLOAD4(payload, PM_SET_WAKEUP_SOURCE, target, wkup_node,
+			 enable);
+	return pm_ipi_send(primary_proc, payload);
+}
+
+/**
+ * pm_system_shutdown() - PM call to request a system shutdown or restart
+ * @restart	Shutdown or restart? 0 for shutdown, 1 for restart
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_system_shutdown(unsigned int type, unsigned int subtype)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	PM_PACK_PAYLOAD3(payload, PM_SYSTEM_SHUTDOWN, type, subtype);
+	return pm_ipi_send(primary_proc, payload);
+}
+
+/* APIs for managing PM slaves: */
+
+/**
+ * pm_req_node() - PM call to request a node with specific capabilities
+ * @nid		Node id of the slave
+ * @capabilities Requested capabilities of the slave
+ * @qos		Quality of service (not supported)
+ * @ack		Flag to specify whether acknowledge is requested
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_req_node(enum pm_node_id nid,
+			       unsigned int capabilities,
+			       unsigned int qos,
+			       enum pm_request_ack ack)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	PM_PACK_PAYLOAD5(payload, PM_REQ_NODE, nid, capabilities, qos, ack);
+
+	if (ack == REQ_ACK_BLOCKING)
+		return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
+	else
+		return pm_ipi_send(primary_proc, payload);
+}
+
+/**
+ * pm_set_requirement() - PM call to set requirement for PM slaves
+ * @nid		Node id of the slave
+ * @capabilities Requested capabilities of the slave
+ * @qos		Quality of service (not supported)
+ * @ack		Flag to specify whether acknowledge is requested
+ *
+ * This API function is to be used for slaves a PU already has requested
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_set_requirement(enum pm_node_id nid,
+				      unsigned int capabilities,
+				      unsigned int qos,
+				      enum pm_request_ack ack)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	PM_PACK_PAYLOAD5(payload, PM_SET_REQUIREMENT, nid, capabilities, qos,
+			 ack);
+
+	if (ack == REQ_ACK_BLOCKING)
+		return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
+	else
+		return pm_ipi_send(primary_proc, payload);
+}
+
+/**
+ * pm_release_node() - PM call to release a node
+ * @nid		Node id of the slave
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_release_node(enum pm_node_id nid)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	PM_PACK_PAYLOAD2(payload, PM_RELEASE_NODE, nid);
+	return pm_ipi_send(primary_proc, payload);
+}
+
+/**
+ * pm_set_max_latency() - PM call to set wakeup latency requirements
+ * @nid		Node id of the slave
+ * @latency	Requested maximum wakeup latency
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_set_max_latency(enum pm_node_id nid,
+				      unsigned int latency)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	PM_PACK_PAYLOAD3(payload, PM_SET_MAX_LATENCY, nid, latency);
+	return pm_ipi_send(primary_proc, payload);
+}
+
+/* Miscellaneous API functions */
+
+/**
+ * pm_get_api_version() - Get version number of PMU PM firmware
+ * @version	Returns 32-bit version number of PMU Power Management Firmware
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_get_api_version(unsigned int *version)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	/* Send request to the PMU */
+	PM_PACK_PAYLOAD1(payload, PM_GET_API_VERSION);
+	return pm_ipi_send_sync(primary_proc, payload, version, 1);
+}
+
+/**
+ * pm_set_configuration() - PM call to set system configuration
+ * @phys_addr	Physical 32-bit address of data structure in memory
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_set_configuration(unsigned int phys_addr)
+{
+	return PM_RET_ERROR_NOTSUPPORTED;
+}
+
+/**
+ * pm_get_node_status() - PM call to request a node's current power state
+ * @nid		Node id of the slave
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_get_node_status(enum pm_node_id nid)
+{
+	/* TODO: Add power state argument!! */
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	PM_PACK_PAYLOAD2(payload, PM_GET_NODE_STATUS, nid);
+	return pm_ipi_send(primary_proc, payload);
+}
+
+/**
+ * pm_register_notifier() - Register the PU to be notified of PM events
+ * @nid		Node id of the slave
+ * @event	The event to be notified about
+ * @wake	Wake up on event
+ * @enable	Enable or disable the notifier
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_register_notifier(enum pm_node_id nid,
+					unsigned int event,
+					unsigned int wake,
+					unsigned int enable)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	PM_PACK_PAYLOAD5(payload, PM_REGISTER_NOTIFIER,
+			 nid, event, wake, enable);
+
+	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
+}
+
+/**
+ * pm_get_op_characteristic() - PM call to request operating characteristics
+ *				of a node
+ * @nid		Node id of the slave
+ * @type	Type of the operating characteristic
+ *		(power, temperature and latency)
+ * @result	Returns the operating characteristic for the requested node,
+ *		specified by the type
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_get_op_characteristic(enum pm_node_id nid,
+					    enum pm_opchar_type type,
+					    uint32_t *result)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	/* Send request to the PMU */
+	PM_PACK_PAYLOAD3(payload, PM_GET_OP_CHARACTERISTIC, nid, type);
+	return pm_ipi_send_sync(primary_proc, payload, result, 1);
+}
+
+/* Direct-Control API functions */
+
+/**
+ * pm_reset_assert() - Assert reset
+ * @reset	Reset ID
+ * @assert	Assert (1) or de-assert (0)
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_reset_assert(unsigned int reset,
+				   unsigned int assert)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	/* Send request to the PMU */
+	PM_PACK_PAYLOAD3(payload, PM_RESET_ASSERT, reset, assert);
+	return pm_ipi_send(primary_proc, payload);
+}
+
+/**
+ * pm_reset_get_status() - Get current status of a reset line
+ * @reset	Reset ID
+ * @reset_status Returns current status of selected reset line
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_reset_get_status(unsigned int reset,
+				       unsigned int *reset_status)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	/* Send request to the PMU */
+	PM_PACK_PAYLOAD2(payload, PM_RESET_GET_STATUS, reset);
+	return pm_ipi_send_sync(primary_proc, payload, reset_status, 1);
+}
+
+/**
+ * pm_mmio_write() - Perform write to protected mmio
+ * @address	Address to write to
+ * @mask	Mask to apply
+ * @value	Value to write
+ *
+ * This function provides access to PM-related control registers
+ * that may not be directly accessible by a particular PU.
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_mmio_write(uintptr_t address,
+				 unsigned int mask,
+				 unsigned int value)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	/* Send request to the PMU */
+	PM_PACK_PAYLOAD4(payload, PM_MMIO_WRITE, address, mask, value);
+	return pm_ipi_send_sync(primary_proc, payload, NULL, 0);
+}
+
+/**
+ * pm_mmio_read() - Read value from protected mmio
+ * @address	Address to write to
+ * @value	Value to write
+ *
+ * This function provides access to PM-related control registers
+ * that may not be directly accessible by a particular PU.
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_mmio_read(uintptr_t address, unsigned int *value)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	/* Send request to the PMU */
+	PM_PACK_PAYLOAD2(payload, PM_MMIO_READ, address);
+	return pm_ipi_send_sync(primary_proc, payload, value, 1);
+}
+
+/**
+ * pm_fpga_load() - Load the bitstream into the PL.
+ *
+ * This function provides access to the xilfpga library to load
+ * the Bit-stream into PL.
+ *
+ * address_low: lower 32-bit Linear memory space address
+ *
+ * address_high: higher 32-bit Linear memory space address
+ *
+ * size:	Number of 32bit words
+ *
+ * @return      Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_fpga_load(uint32_t address_low,
+				uint32_t address_high,
+				uint32_t size,
+				uint32_t flags)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	/* Send request to the PMU */
+	PM_PACK_PAYLOAD5(payload, PM_FPGA_LOAD, address_high, address_low,
+						size, flags);
+	return pm_ipi_send(primary_proc, payload);
+}
+
+/**
+ * pm_fpga_get_status() - Read value from fpga status register
+ * @value       Value to read
+ *
+ * This function provides access to the xilfpga library to get
+ * the fpga status
+ * @return      Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_fpga_get_status(unsigned int *value)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	/* Send request to the PMU */
+	PM_PACK_PAYLOAD1(payload, PM_FPGA_GET_STATUS);
+	return pm_ipi_send_sync(primary_proc, payload, value, 1);
+}
+
+/**
+ * pm_get_chipid() - Read silicon ID registers
+ * @value       Buffer for return values. Must be large enough
+ *		to hold 8 bytes.
+ *
+ * @return      Returns silicon ID registers
+ */
+enum pm_ret_status pm_get_chipid(uint32_t *value)
+{
+	uint32_t payload[PAYLOAD_ARG_CNT];
+
+	/* Send request to the PMU */
+	PM_PACK_PAYLOAD1(payload, PM_GET_CHIPID);
+	return pm_ipi_send_sync(primary_proc, payload, value, 2);
+}
+
+/**
+ * pm_get_callbackdata() - Read from IPI response buffer
+ * @data - array of PAYLOAD_ARG_CNT elements
+ *
+ * Read value from ipi buffer response buffer.
+ */
+void pm_get_callbackdata(uint32_t *data, size_t count)
+{
+
+	pm_ipi_buff_read_callb(data, count);
+	pm_ipi_irq_clear();
+}
diff --git a/plat/xilinx/zynqmp/pm_service/pm_api_sys.h b/plat/xilinx/zynqmp/pm_service/pm_api_sys.h
new file mode 100644
index 0000000..af7b252
--- /dev/null
+++ b/plat/xilinx/zynqmp/pm_service/pm_api_sys.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _PM_API_SYS_H_
+#define _PM_API_SYS_H_
+
+#include <stdint.h>
+#include "pm_defs.h"
+
+/**********************************************************
+ * System-level API function declarations
+ **********************************************************/
+enum pm_ret_status pm_req_suspend(enum pm_node_id nid,
+				  enum pm_request_ack ack,
+				  unsigned int latency,
+				  unsigned int state);
+
+enum pm_ret_status pm_self_suspend(enum pm_node_id nid,
+				   unsigned int latency,
+				   unsigned int state,
+				   uintptr_t address);
+
+enum pm_ret_status pm_force_powerdown(enum pm_node_id nid,
+				      enum pm_request_ack ack);
+
+enum pm_ret_status pm_abort_suspend(enum pm_abort_reason reason);
+
+enum pm_ret_status pm_req_wakeup(enum pm_node_id nid,
+				 unsigned int set_address,
+				 uintptr_t address,
+				 enum pm_request_ack ack);
+
+enum pm_ret_status pm_set_wakeup_source(enum pm_node_id target,
+					enum pm_node_id wkup_node,
+					unsigned int enable);
+
+enum pm_ret_status pm_system_shutdown(unsigned int type, unsigned int subtype);
+
+enum pm_ret_status pm_init_suspend_cb(enum pm_suspend_reason reason,
+				      unsigned int latency,
+				      unsigned int state,
+				      unsigned int timeout);
+
+/* API functions for managing PM Slaves */
+enum pm_ret_status pm_req_node(enum pm_node_id nid,
+			       unsigned int capabilities,
+			       unsigned int qos,
+			       enum pm_request_ack ack);
+enum pm_ret_status pm_release_node(enum pm_node_id nid);
+
+enum pm_ret_status pm_set_requirement(enum pm_node_id nid,
+				      unsigned int capabilities,
+				      unsigned int qos,
+				      enum pm_request_ack ack);
+enum pm_ret_status pm_set_max_latency(enum pm_node_id nid,
+				      unsigned int latency);
+
+/* Miscellaneous API functions */
+enum pm_ret_status pm_get_api_version(unsigned int *version);
+enum pm_ret_status pm_set_configuration(unsigned int phys_addr);
+enum pm_ret_status pm_get_node_status(enum pm_node_id node);
+enum pm_ret_status pm_register_notifier(enum pm_node_id nid,
+					unsigned int event,
+					unsigned int wake,
+					unsigned int enable);
+enum pm_ret_status pm_get_op_characteristic(enum pm_node_id nid,
+					    enum pm_opchar_type type,
+					    uint32_t *result);
+enum pm_ret_status pm_acknowledge_cb(enum pm_node_id nid,
+				     enum pm_ret_status status,
+				     unsigned int oppoint);
+enum pm_ret_status pm_notify_cb(enum pm_node_id nid,
+				unsigned int event,
+				unsigned int oppoint);
+
+/* Direct-Control API functions */
+enum pm_ret_status pm_reset_assert(unsigned int reset_id,
+				   unsigned int assert);
+enum pm_ret_status pm_reset_get_status(unsigned int reset_id,
+				       unsigned int *reset_status);
+enum pm_ret_status pm_mmio_write(uintptr_t address,
+				 unsigned int mask,
+				 unsigned int value);
+enum pm_ret_status pm_mmio_read(uintptr_t address, unsigned int *value);
+enum pm_ret_status pm_fpga_load(uint32_t address_low,
+				uint32_t address_high,
+				uint32_t size,
+				uint32_t flags);
+enum pm_ret_status pm_fpga_get_status(unsigned int *value);
+
+enum pm_ret_status pm_get_chipid(uint32_t *value);
+void pm_get_callbackdata(uint32_t *data, size_t count);
+
+#endif /* _PM_API_SYS_H_ */
diff --git a/plat/xilinx/zynqmp/pm_service/pm_client.c b/plat/xilinx/zynqmp/pm_service/pm_client.c
new file mode 100644
index 0000000..9016fd6
--- /dev/null
+++ b/plat/xilinx/zynqmp/pm_service/pm_client.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * APU specific definition of processors in the subsystem as well as functions
+ * for getting information about and changing state of the APU.
+ */
+
+#include <assert.h>
+#include <bakery_lock.h>
+#include <bl_common.h>
+#include <gic_common.h>
+#include <gicv2.h>
+#include <mmio.h>
+#include <string.h>
+#include <utils.h>
+#include "../zynqmp_def.h"
+#include "pm_api_sys.h"
+#include "pm_client.h"
+#include "pm_ipi.h"
+
+#define IRQ_MAX		84
+#define NUM_GICD_ISENABLER	((IRQ_MAX >> 5) + 1)
+#define UNDEFINED_CPUID		(~0)
+
+DEFINE_BAKERY_LOCK(pm_client_secure_lock);
+
+extern const struct pm_ipi apu_ipi;
+
+/* Order in pm_procs_all array must match cpu ids */
+static const struct pm_proc pm_procs_all[] = {
+	{
+		.node_id = NODE_APU_0,
+		.pwrdn_mask = APU_0_PWRCTL_CPUPWRDWNREQ_MASK,
+		.ipi = &apu_ipi,
+	},
+	{
+		.node_id = NODE_APU_1,
+		.pwrdn_mask = APU_1_PWRCTL_CPUPWRDWNREQ_MASK,
+		.ipi = &apu_ipi,
+	},
+	{
+		.node_id = NODE_APU_2,
+		.pwrdn_mask = APU_2_PWRCTL_CPUPWRDWNREQ_MASK,
+		.ipi = &apu_ipi,
+	},
+	{
+		.node_id = NODE_APU_3,
+		.pwrdn_mask = APU_3_PWRCTL_CPUPWRDWNREQ_MASK,
+		.ipi = &apu_ipi,
+	},
+};
+
+/* Interrupt to PM node ID map */
+static enum pm_node_id irq_node_map[IRQ_MAX + 1] = {
+	NODE_UNKNOWN,
+	NODE_UNKNOWN,
+	NODE_UNKNOWN,
+	NODE_UNKNOWN,	/* 3 */
+	NODE_UNKNOWN,
+	NODE_UNKNOWN,
+	NODE_UNKNOWN,
+	NODE_UNKNOWN,	/* 7 */
+	NODE_UNKNOWN,
+	NODE_UNKNOWN,
+	NODE_UNKNOWN,
+	NODE_UNKNOWN,	/* 11 */
+	NODE_UNKNOWN,
+	NODE_UNKNOWN,
+	NODE_NAND,
+	NODE_QSPI,	/* 15 */
+	NODE_GPIO,
+	NODE_I2C_0,
+	NODE_I2C_1,
+	NODE_SPI_0,	/* 19 */
+	NODE_SPI_1,
+	NODE_UART_0,
+	NODE_UART_1,
+	NODE_CAN_0,	/* 23 */
+	NODE_CAN_1,
+	NODE_UNKNOWN,
+	NODE_RTC,
+	NODE_RTC,	/* 27 */
+	NODE_UNKNOWN,
+	NODE_UNKNOWN,
+	NODE_UNKNOWN,
+	NODE_UNKNOWN,	/* 31 */
+	NODE_UNKNOWN,
+	NODE_UNKNOWN,
+	NODE_UNKNOWN,
+	NODE_UNKNOWN,	/* 35, NODE_IPI_APU */
+	NODE_TTC_0,
+	NODE_TTC_0,
+	NODE_TTC_0,
+	NODE_TTC_1,	/* 39 */
+	NODE_TTC_1,
+	NODE_TTC_1,
+	NODE_TTC_2,
+	NODE_TTC_2,	/* 43 */
+	NODE_TTC_2,
+	NODE_TTC_3,
+	NODE_TTC_3,
+	NODE_TTC_3,	/* 47 */
+	NODE_SD_0,
+	NODE_SD_1,
+	NODE_SD_0,
+	NODE_SD_1,	/* 51 */
+	NODE_UNKNOWN,
+	NODE_UNKNOWN,
+	NODE_UNKNOWN,
+	NODE_UNKNOWN,	/* 55 */
+	NODE_UNKNOWN,
+	NODE_ETH_0,
+	NODE_ETH_0,
+	NODE_ETH_1,	/* 59 */
+	NODE_ETH_1,
+	NODE_ETH_2,
+	NODE_ETH_2,
+	NODE_ETH_3,	/* 63 */
+	NODE_ETH_3,
+	NODE_USB_0,
+	NODE_USB_0,
+	NODE_USB_0,	/* 67 */
+	NODE_USB_0,
+	NODE_USB_0,
+	NODE_USB_1,
+	NODE_USB_1,	/* 71 */
+	NODE_USB_1,
+	NODE_USB_1,
+	NODE_USB_1,
+	NODE_USB_0,	/* 75 */
+	NODE_USB_0,
+	NODE_ADMA,
+	NODE_ADMA,
+	NODE_ADMA,	/* 79 */
+	NODE_ADMA,
+	NODE_ADMA,
+	NODE_ADMA,
+	NODE_ADMA,	/* 83 */
+	NODE_ADMA,
+};
+
+/**
+ * irq_to_pm_node - Get PM node ID corresponding to the interrupt number
+ * @irq:	Interrupt number
+ *
+ * Return:	PM node ID corresponding to the specified interrupt
+ */
+static enum pm_node_id irq_to_pm_node(unsigned int irq)
+{
+	assert(irq <= IRQ_MAX);
+	return irq_node_map[irq];
+}
+
+/**
+ * pm_client_set_wakeup_sources - Set all slaves with enabled interrupts as wake
+ *				sources in the PMU firmware
+ */
+static void pm_client_set_wakeup_sources(void)
+{
+	uint32_t reg_num;
+	uint8_t pm_wakeup_nodes_set[NODE_MAX];
+	uintptr_t isenabler1 = BASE_GICD_BASE + GICD_ISENABLER + 4;
+
+	zeromem(&pm_wakeup_nodes_set, sizeof(pm_wakeup_nodes_set));
+
+	for (reg_num = 0; reg_num < NUM_GICD_ISENABLER; reg_num++) {
+		uint32_t base_irq = reg_num << ISENABLER_SHIFT;
+		uint32_t reg = mmio_read_32(isenabler1 + (reg_num << 2));
+
+		if (!reg)
+			continue;
+
+		while (reg) {
+			enum pm_node_id node;
+			uint32_t idx, ret, irq, lowest_set = reg & (-reg);
+
+			idx = __builtin_ctz(lowest_set);
+			irq = base_irq + idx;
+
+			if (irq > IRQ_MAX)
+				break;
+
+			node = irq_to_pm_node(irq);
+			reg &= ~lowest_set;
+
+			if ((node != NODE_UNKNOWN) &&
+			    (!pm_wakeup_nodes_set[node])) {
+				ret = pm_set_wakeup_source(NODE_APU, node, 1);
+				pm_wakeup_nodes_set[node] = !ret;
+			}
+		}
+	}
+}
+
+/**
+ * pm_get_proc() - returns pointer to the proc structure
+ * @cpuid:	id of the cpu whose proc struct pointer should be returned
+ *
+ * Return: pointer to a proc structure if proc is found, otherwise NULL
+ */
+const struct pm_proc *pm_get_proc(unsigned int cpuid)
+{
+	if (cpuid < ARRAY_SIZE(pm_procs_all))
+		return &pm_procs_all[cpuid];
+
+	return NULL;
+}
+
+/**
+ * pm_get_proc_by_node() - returns pointer to the proc structure
+ * @nid:	node id of the processor
+ *
+ * Return: pointer to a proc structure if proc is found, otherwise NULL
+ */
+const struct pm_proc *pm_get_proc_by_node(enum pm_node_id nid)
+{
+	for (size_t i = 0; i < ARRAY_SIZE(pm_procs_all); i++) {
+		if (nid == pm_procs_all[i].node_id)
+			return &pm_procs_all[i];
+	}
+	return NULL;
+}
+
+/**
+ * pm_get_cpuid() - get the local cpu ID for a global node ID
+ * @nid:	node id of the processor
+ *
+ * Return: the cpu ID (starting from 0) for the subsystem
+ */
+static unsigned int pm_get_cpuid(enum pm_node_id nid)
+{
+	for (size_t i = 0; i < ARRAY_SIZE(pm_procs_all); i++) {
+		if (pm_procs_all[i].node_id == nid)
+			return i;
+	}
+	return UNDEFINED_CPUID;
+}
+
+const struct pm_proc *primary_proc = &pm_procs_all[0];
+
+/**
+ * pm_client_suspend() - Client-specific suspend actions
+ *
+ * This function should contain any PU-specific actions
+ * required prior to sending suspend request to PMU
+ * Actions taken depend on the state system is suspending to.
+ */
+void pm_client_suspend(const struct pm_proc *proc, unsigned int state)
+{
+	bakery_lock_get(&pm_client_secure_lock);
+
+	if (state == PM_STATE_SUSPEND_TO_RAM)
+		pm_client_set_wakeup_sources();
+
+	/* Set powerdown request */
+	mmio_write_32(APU_PWRCTL, mmio_read_32(APU_PWRCTL) | proc->pwrdn_mask);
+
+	bakery_lock_release(&pm_client_secure_lock);
+}
+
+
+/**
+ * pm_client_abort_suspend() - Client-specific abort-suspend actions
+ *
+ * This function should contain any PU-specific actions
+ * required for aborting a prior suspend request
+ */
+void pm_client_abort_suspend(void)
+{
+	/* Enable interrupts at processor level (for current cpu) */
+	gicv2_cpuif_enable();
+
+	bakery_lock_get(&pm_client_secure_lock);
+
+	/* Clear powerdown request */
+	mmio_write_32(APU_PWRCTL,
+		 mmio_read_32(APU_PWRCTL) & ~primary_proc->pwrdn_mask);
+
+	bakery_lock_release(&pm_client_secure_lock);
+}
+
+/**
+ * pm_client_wakeup() - Client-specific wakeup actions
+ *
+ * This function should contain any PU-specific actions
+ * required for waking up another APU core
+ */
+void pm_client_wakeup(const struct pm_proc *proc)
+{
+	unsigned int cpuid = pm_get_cpuid(proc->node_id);
+
+	if (cpuid == UNDEFINED_CPUID)
+		return;
+
+	bakery_lock_get(&pm_client_secure_lock);
+
+	/* clear powerdown bit for affected cpu */
+	uint32_t val = mmio_read_32(APU_PWRCTL);
+	val &= ~(proc->pwrdn_mask);
+	mmio_write_32(APU_PWRCTL, val);
+
+	bakery_lock_release(&pm_client_secure_lock);
+}
diff --git a/plat/xilinx/zynqmp/pm_service/pm_client.h b/plat/xilinx/zynqmp/pm_service/pm_client.h
new file mode 100644
index 0000000..16e37d5
--- /dev/null
+++ b/plat/xilinx/zynqmp/pm_service/pm_client.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * Contains APU specific macros and macros to be defined depending on
+ * the execution environment.
+ */
+
+#ifndef _PM_CLIENT_H_
+#define _PM_CLIENT_H_
+
+#include "pm_common.h"
+#include "pm_defs.h"
+
+/* Functions to be implemented by each PU */
+void pm_client_suspend(const struct pm_proc *proc, unsigned int state);
+void pm_client_abort_suspend(void);
+void pm_client_wakeup(const struct pm_proc *proc);
+enum pm_ret_status set_ocm_retention(void);
+
+/* Global variables to be set in pm_client.c */
+extern const struct pm_proc *primary_proc;
+
+#endif /* _PM_CLIENT_H_ */
diff --git a/plat/xilinx/zynqmp/pm_service/pm_common.h b/plat/xilinx/zynqmp/pm_service/pm_common.h
new file mode 100644
index 0000000..03351c2
--- /dev/null
+++ b/plat/xilinx/zynqmp/pm_service/pm_common.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * Contains definitions of commonly used macros and data types needed
+ * for PU Power Management. This file should be common for all PU's.
+ */
+
+#ifndef _PM_COMMON_H_
+#define _PM_COMMON_H_
+
+#include <debug.h>
+#include <stdint.h>
+#include "pm_defs.h"
+
+#define PAYLOAD_ARG_CNT		6U
+#define PAYLOAD_ARG_SIZE	4U	/* size in bytes */
+
+/**
+ * pm_ipi - struct for capturing IPI-channel specific info
+ * @mask	mask for enabling/disabling and triggering the IPI
+ * @base	base address for IPI
+ * @buffer_base	base address for payload buffer
+ */
+struct pm_ipi {
+	const unsigned int mask;
+	const uintptr_t base;
+	const uintptr_t buffer_base;
+};
+
+/**
+ * pm_proc - struct for capturing processor related info
+ * @node_id	node-ID of the processor
+ * @pwrdn_mask	cpu-specific mask to be used for power control register
+ * @ipi		pointer to IPI channel structure
+ *		(in APU all processors share one IPI channel)
+ */
+struct pm_proc {
+	const enum pm_node_id node_id;
+	const unsigned int pwrdn_mask;
+	const struct pm_ipi *ipi;
+};
+
+const struct pm_proc *pm_get_proc(unsigned int cpuid);
+const struct pm_proc *pm_get_proc_by_node(enum pm_node_id nid);
+
+#endif /* _PM_COMMON_H_ */
diff --git a/plat/xilinx/zynqmp/pm_service/pm_defs.h b/plat/xilinx/zynqmp/pm_service/pm_defs.h
new file mode 100644
index 0000000..ba0c52a
--- /dev/null
+++ b/plat/xilinx/zynqmp/pm_service/pm_defs.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* ZynqMP power management enums and defines */
+
+#ifndef _PM_DEFS_H_
+#define _PM_DEFS_H_
+
+/*********************************************************************
+ * Macro definitions
+ ********************************************************************/
+
+/*
+ * Version number is a 32bit value, like:
+ * (PM_VERSION_MAJOR << 16) | PM_VERSION_MINOR
+ */
+#define PM_VERSION_MAJOR	0
+#define PM_VERSION_MINOR	2
+
+#define PM_VERSION	((PM_VERSION_MAJOR << 16) | PM_VERSION_MINOR)
+
+/* Capabilities for RAM */
+#define PM_CAP_ACCESS	0x1U
+#define PM_CAP_CONTEXT	0x2U
+
+#define MAX_LATENCY	(~0U)
+#define MAX_QOS		100U
+
+/* State arguments of the self suspend */
+#define PM_STATE_CPU_IDLE		0x0U
+#define PM_STATE_SUSPEND_TO_RAM		0xFU
+
+/*********************************************************************
+ * Enum definitions
+ ********************************************************************/
+
+enum pm_api_id {
+	/* Miscellaneous API functions: */
+	PM_GET_API_VERSION = 1, /* Do not change or move */
+	PM_SET_CONFIGURATION,
+	PM_GET_NODE_STATUS,
+	PM_GET_OP_CHARACTERISTIC,
+	PM_REGISTER_NOTIFIER,
+	/* API for suspending of PUs: */
+	PM_REQ_SUSPEND,
+	PM_SELF_SUSPEND,
+	PM_FORCE_POWERDOWN,
+	PM_ABORT_SUSPEND,
+	PM_REQ_WAKEUP,
+	PM_SET_WAKEUP_SOURCE,
+	PM_SYSTEM_SHUTDOWN,
+	/* API for managing PM slaves: */
+	PM_REQ_NODE,
+	PM_RELEASE_NODE,
+	PM_SET_REQUIREMENT,
+	PM_SET_MAX_LATENCY,
+	/* Direct control API functions: */
+	PM_RESET_ASSERT,
+	PM_RESET_GET_STATUS,
+	PM_MMIO_WRITE,
+	PM_MMIO_READ,
+	PM_INIT,
+	PM_FPGA_LOAD,
+	PM_FPGA_GET_STATUS,
+	PM_GET_CHIPID,
+	PM_API_MAX
+};
+
+enum pm_node_id {
+	NODE_UNKNOWN = 0,
+	NODE_APU,
+	NODE_APU_0,
+	NODE_APU_1,
+	NODE_APU_2,
+	NODE_APU_3,
+	NODE_RPU,
+	NODE_RPU_0,
+	NODE_RPU_1,
+	NODE_PL,
+	NODE_FPD,
+	NODE_OCM_BANK_0,
+	NODE_OCM_BANK_1,
+	NODE_OCM_BANK_2,
+	NODE_OCM_BANK_3,
+	NODE_TCM_0_A,
+	NODE_TCM_0_B,
+	NODE_TCM_1_A,
+	NODE_TCM_1_B,
+	NODE_L2,
+	NODE_GPU_PP_0,
+	NODE_GPU_PP_1,
+	NODE_USB_0,
+	NODE_USB_1,
+	NODE_TTC_0,
+	NODE_TTC_1,
+	NODE_TTC_2,
+	NODE_TTC_3,
+	NODE_SATA,
+	NODE_ETH_0,
+	NODE_ETH_1,
+	NODE_ETH_2,
+	NODE_ETH_3,
+	NODE_UART_0,
+	NODE_UART_1,
+	NODE_SPI_0,
+	NODE_SPI_1,
+	NODE_I2C_0,
+	NODE_I2C_1,
+	NODE_SD_0,
+	NODE_SD_1,
+	NODE_DP,
+	NODE_GDMA,
+	NODE_ADMA,
+	NODE_NAND,
+	NODE_QSPI,
+	NODE_GPIO,
+	NODE_CAN_0,
+	NODE_CAN_1,
+	NODE_AFI,
+	NODE_APLL,
+	NODE_VPLL,
+	NODE_DPLL,
+	NODE_RPLL,
+	NODE_IOPLL,
+	NODE_DDR,
+	NODE_IPI_APU,
+	NODE_IPI_RPU_0,
+	NODE_GPU,
+	NODE_PCIE,
+	NODE_PCAP,
+	NODE_RTC,
+	NODE_MAX
+};
+
+enum pm_request_ack {
+	REQ_ACK_NO = 1,
+	REQ_ACK_BLOCKING,
+	REQ_ACK_NON_BLOCKING,
+};
+
+enum pm_abort_reason {
+	ABORT_REASON_WKUP_EVENT = 100,
+	ABORT_REASON_PU_BUSY,
+	ABORT_REASON_NO_PWRDN,
+	ABORT_REASON_UNKNOWN,
+};
+
+enum pm_suspend_reason {
+	SUSPEND_REASON_PU_REQ = 201,
+	SUSPEND_REASON_ALERT,
+	SUSPEND_REASON_SYS_SHUTDOWN,
+};
+
+enum pm_ram_state {
+	PM_RAM_STATE_OFF = 1,
+	PM_RAM_STATE_RETENTION,
+	PM_RAM_STATE_ON,
+};
+
+enum pm_opchar_type {
+	PM_OPCHAR_TYPE_POWER = 1,
+	PM_OPCHAR_TYPE_TEMP,
+	PM_OPCHAR_TYPE_LATENCY,
+};
+
+/**
+ * @PM_RET_SUCCESS:		success
+ * @PM_RET_ERROR_ARGS:		illegal arguments provided
+ * @PM_RET_ERROR_ACCESS:	access rights violation
+ * @PM_RET_ERROR_TIMEOUT:	timeout in communication with PMU
+ * @PM_RET_ERROR_NOTSUPPORTED:	feature not supported
+ * @PM_RET_ERROR_PROC:		node is not a processor node
+ * @PM_RET_ERROR_API_ID:	illegal API ID
+ * @PM_RET_ERROR_OTHER:		other error
+ */
+enum pm_ret_status {
+	PM_RET_SUCCESS,
+	PM_RET_ERROR_ARGS,
+	PM_RET_ERROR_ACCESS,
+	PM_RET_ERROR_TIMEOUT,
+	PM_RET_ERROR_NOTSUPPORTED,
+	PM_RET_ERROR_PROC,
+	PM_RET_ERROR_API_ID,
+	PM_RET_ERROR_FAILURE,
+	PM_RET_ERROR_COMMUNIC,
+	PM_RET_ERROR_DOUBLEREQ,
+	PM_RET_ERROR_OTHER,
+};
+
+/**
+ * @PM_INITIAL_BOOT:	boot is a fresh system startup
+ * @PM_RESUME:		boot is a resume
+ * @PM_BOOT_ERROR:	error, boot cause cannot be identified
+ */
+enum pm_boot_status {
+	PM_INITIAL_BOOT,
+	PM_RESUME,
+	PM_BOOT_ERROR,
+};
+
+enum pm_shutdown_type {
+	PMF_SHUTDOWN_TYPE_SHUTDOWN,
+	PMF_SHUTDOWN_TYPE_RESET,
+};
+
+enum pm_shutdown_subtype {
+	PMF_SHUTDOWN_SUBTYPE_SUBSYSTEM,
+	PMF_SHUTDOWN_SUBTYPE_PS_ONLY,
+	PMF_SHUTDOWN_SUBTYPE_SYSTEM,
+};
+
+#endif /* _PM_DEFS_H_ */
diff --git a/plat/xilinx/zynqmp/pm_service/pm_ipi.c b/plat/xilinx/zynqmp/pm_service/pm_ipi.c
new file mode 100644
index 0000000..fdffde7
--- /dev/null
+++ b/plat/xilinx/zynqmp/pm_service/pm_ipi.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <bakery_lock.h>
+#include <mmio.h>
+#include <platform.h>
+#include "../zynqmp_private.h"
+#include "pm_ipi.h"
+
+/* IPI message buffers */
+#define IPI_BUFFER_BASEADDR	0xFF990000U
+
+#define IPI_BUFFER_RPU_0_BASE	(IPI_BUFFER_BASEADDR + 0x0U)
+#define IPI_BUFFER_RPU_1_BASE	(IPI_BUFFER_BASEADDR + 0x200U)
+#define IPI_BUFFER_APU_BASE	(IPI_BUFFER_BASEADDR + 0x400U)
+#define IPI_BUFFER_PL_0_BASE	(IPI_BUFFER_BASEADDR + 0x600U)
+#define IPI_BUFFER_PL_1_BASE	(IPI_BUFFER_BASEADDR + 0x800U)
+#define IPI_BUFFER_PL_2_BASE	(IPI_BUFFER_BASEADDR + 0xA00U)
+#define IPI_BUFFER_PL_3_BASE	(IPI_BUFFER_BASEADDR + 0xC00U)
+#define IPI_BUFFER_PMU_BASE	(IPI_BUFFER_BASEADDR + 0xE00U)
+
+#define IPI_BUFFER_TARGET_RPU_0_OFFSET	0x0U
+#define IPI_BUFFER_TARGET_RPU_1_OFFSET	0x40U
+#define IPI_BUFFER_TARGET_APU_OFFSET	0x80U
+#define IPI_BUFFER_TARGET_PL_0_OFFSET	0xC0U
+#define IPI_BUFFER_TARGET_PL_1_OFFSET	0x100U
+#define IPI_BUFFER_TARGET_PL_2_OFFSET	0x140U
+#define IPI_BUFFER_TARGET_PL_3_OFFSET	0x180U
+#define IPI_BUFFER_TARGET_PMU_OFFSET	0x1C0U
+
+#define IPI_BUFFER_MAX_WORDS	8
+
+#define IPI_BUFFER_REQ_OFFSET	0x0U
+#define IPI_BUFFER_RESP_OFFSET	0x20U
+
+/* IPI Base Address */
+#define IPI_BASEADDR		0XFF300000
+
+/* APU's IPI registers */
+#define IPI_APU_ISR		(IPI_BASEADDR + 0X00000010)
+#define IPI_APU_IER		(IPI_BASEADDR + 0X00000018)
+#define IPI_APU_IDR		(IPI_BASEADDR + 0X0000001C)
+#define IPI_APU_IXR_PMU_0_MASK		(1 << 16)
+
+#define IPI_TRIG_OFFSET		0
+#define IPI_OBS_OFFSET		4
+
+/* Power Management IPI interrupt number */
+#define PM_INT_NUM		0
+#define IPI_PMU_PM_INT_BASE	(IPI_PMU_0_TRIG + (PM_INT_NUM * 0x1000))
+#define IPI_PMU_PM_INT_MASK	(IPI_APU_IXR_PMU_0_MASK << PM_INT_NUM)
+#if (PM_INT_NUM < 0 || PM_INT_NUM > 3)
+	#error PM_INT_NUM value out of range
+#endif
+
+#define IPI_APU_MASK		1U
+
+DEFINE_BAKERY_LOCK(pm_secure_lock);
+
+const struct pm_ipi apu_ipi = {
+	.mask = IPI_APU_MASK,
+	.base = IPI_BASEADDR,
+	.buffer_base = IPI_BUFFER_APU_BASE,
+};
+
+/**
+ * pm_ipi_init() - Initialize IPI peripheral for communication with PMU
+ *
+ * @return	On success, the initialization function must return 0.
+ *		Any other return value will cause the framework to ignore
+ *		the service
+ *
+ * Called from pm_setup initialization function
+ */
+int pm_ipi_init(void)
+{
+	bakery_lock_init(&pm_secure_lock);
+
+	/* IPI Interrupts Clear & Disable */
+	mmio_write_32(IPI_APU_ISR, 0xffffffff);
+	mmio_write_32(IPI_APU_IDR, 0xffffffff);
+
+	return 0;
+}
+
+/**
+ * pm_ipi_wait() - wait for pmu to handle request
+ * @proc	proc which is waiting for PMU to handle request
+ */
+static enum pm_ret_status pm_ipi_wait(const struct pm_proc *proc)
+{
+	int status;
+
+	/* Wait until previous interrupt is handled by PMU */
+	do {
+		status = mmio_read_32(proc->ipi->base + IPI_OBS_OFFSET) &
+					IPI_PMU_PM_INT_MASK;
+		/* TODO: 1) Use timer to add delay between read attempts */
+		/* TODO: 2) Return PM_RET_ERR_TIMEOUT if this times out */
+	} while (status);
+
+	return PM_RET_SUCCESS;
+}
+
+/**
+ * pm_ipi_send_common() - Sends IPI request to the PMU
+ * @proc	Pointer to the processor who is initiating request
+ * @payload	API id and call arguments to be written in IPI buffer
+ *
+ * Send an IPI request to the power controller. Caller needs to hold
+ * the 'pm_secure_lock' lock.
+ *
+ * @return	Returns status, either success or error+reason
+ */
+static enum pm_ret_status pm_ipi_send_common(const struct pm_proc *proc,
+					     uint32_t payload[PAYLOAD_ARG_CNT])
+{
+	unsigned int offset = 0;
+	uintptr_t buffer_base = proc->ipi->buffer_base +
+					IPI_BUFFER_TARGET_PMU_OFFSET +
+					IPI_BUFFER_REQ_OFFSET;
+
+	/* Wait until previous interrupt is handled by PMU */
+	pm_ipi_wait(proc);
+
+	/* Write payload into IPI buffer */
+	for (size_t i = 0; i < PAYLOAD_ARG_CNT; i++) {
+		mmio_write_32(buffer_base + offset, payload[i]);
+		offset += PAYLOAD_ARG_SIZE;
+	}
+	/* Generate IPI to PMU */
+	mmio_write_32(proc->ipi->base + IPI_TRIG_OFFSET, IPI_PMU_PM_INT_MASK);
+
+	return PM_RET_SUCCESS;
+}
+
+/**
+ * pm_ipi_send() - Sends IPI request to the PMU
+ * @proc	Pointer to the processor who is initiating request
+ * @payload	API id and call arguments to be written in IPI buffer
+ *
+ * Send an IPI request to the power controller.
+ *
+ * @return	Returns status, either success or error+reason
+ */
+enum pm_ret_status pm_ipi_send(const struct pm_proc *proc,
+			       uint32_t payload[PAYLOAD_ARG_CNT])
+{
+	enum pm_ret_status ret;
+
+	bakery_lock_get(&pm_secure_lock);
+
+	ret = pm_ipi_send_common(proc, payload);
+
+	bakery_lock_release(&pm_secure_lock);
+
+	return ret;
+}
+
+
+/**
+ * pm_ipi_buff_read() - Reads IPI response after PMU has handled interrupt
+ * @proc	Pointer to the processor who is waiting and reading response
+ * @value	Used to return value from IPI buffer element (optional)
+ * @count	Number of values to return in @value
+ *
+ * @return	Returns status, either success or error+reason
+ */
+static enum pm_ret_status pm_ipi_buff_read(const struct pm_proc *proc,
+					   unsigned int *value, size_t count)
+{
+	size_t i;
+	uintptr_t buffer_base = proc->ipi->buffer_base +
+				IPI_BUFFER_TARGET_PMU_OFFSET +
+				IPI_BUFFER_RESP_OFFSET;
+
+	pm_ipi_wait(proc);
+
+	/*
+	 * Read response from IPI buffer
+	 * buf-0: success or error+reason
+	 * buf-1: value
+	 * buf-2: unused
+	 * buf-3: unused
+	 */
+	for (i = 1; i <= count; i++) {
+		*value = mmio_read_32(buffer_base + (i * PAYLOAD_ARG_SIZE));
+		value++;
+	}
+
+	return mmio_read_32(buffer_base);
+}
+
+/**
+ * pm_ipi_buff_read_callb() - Reads IPI response after PMU has handled interrupt
+ * @value	Used to return value from IPI buffer element (optional)
+ * @count	Number of values to return in @value
+ *
+ * @return	Returns status, either success or error+reason
+ */
+void pm_ipi_buff_read_callb(unsigned int *value, size_t count)
+{
+	size_t i;
+	uintptr_t buffer_base = IPI_BUFFER_PMU_BASE +
+				IPI_BUFFER_TARGET_APU_OFFSET +
+				IPI_BUFFER_REQ_OFFSET;
+
+	if (count > IPI_BUFFER_MAX_WORDS)
+		count = IPI_BUFFER_MAX_WORDS;
+
+	for (i = 0; i <= count; i++) {
+		*value = mmio_read_32(buffer_base + (i * PAYLOAD_ARG_SIZE));
+		value++;
+	}
+}
+
+/**
+ * pm_ipi_send_sync() - Sends IPI request to the PMU
+ * @proc	Pointer to the processor who is initiating request
+ * @payload	API id and call arguments to be written in IPI buffer
+ * @value	Used to return value from IPI buffer element (optional)
+ * @count	Number of values to return in @value
+ *
+ * Send an IPI request to the power controller and wait for it to be handled.
+ *
+ * @return	Returns status, either success or error+reason and, optionally,
+ *		@value
+ */
+enum pm_ret_status pm_ipi_send_sync(const struct pm_proc *proc,
+				    uint32_t payload[PAYLOAD_ARG_CNT],
+				    unsigned int *value, size_t count)
+{
+	enum pm_ret_status ret;
+
+	bakery_lock_get(&pm_secure_lock);
+
+	ret = pm_ipi_send_common(proc, payload);
+	if (ret != PM_RET_SUCCESS)
+		goto unlock;
+
+	ret = pm_ipi_buff_read(proc, value, count);
+
+unlock:
+	bakery_lock_release(&pm_secure_lock);
+
+	return ret;
+}
+
+void pm_ipi_irq_enable(void)
+{
+	mmio_write_32(IPI_APU_IER, IPI_APU_IXR_PMU_0_MASK);
+}
+
+void pm_ipi_irq_disable(void)
+{
+	mmio_write_32(IPI_APU_IDR, IPI_APU_IXR_PMU_0_MASK);
+}
+
+void pm_ipi_irq_clear(void)
+{
+	mmio_write_32(IPI_APU_ISR, IPI_APU_IXR_PMU_0_MASK);
+}
diff --git a/plat/xilinx/zynqmp/pm_service/pm_ipi.h b/plat/xilinx/zynqmp/pm_service/pm_ipi.h
new file mode 100644
index 0000000..a76298b
--- /dev/null
+++ b/plat/xilinx/zynqmp/pm_service/pm_ipi.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _PM_IPI_H_
+#define _PM_IPI_H_
+
+#include "pm_common.h"
+
+int pm_ipi_init(void);
+
+enum pm_ret_status pm_ipi_send(const struct pm_proc *proc,
+			       uint32_t payload[PAYLOAD_ARG_CNT]);
+enum pm_ret_status pm_ipi_send_sync(const struct pm_proc *proc,
+				    uint32_t payload[PAYLOAD_ARG_CNT],
+				    unsigned int *value, size_t count);
+void pm_ipi_buff_read_callb(unsigned int *value, size_t count);
+void pm_ipi_irq_enable(void);
+void pm_ipi_irq_disable(void);
+void pm_ipi_irq_clear(void);
+
+#endif /* _PM_IPI_H_ */
diff --git a/plat/xilinx/zynqmp/pm_service/pm_svc_main.c b/plat/xilinx/zynqmp/pm_service/pm_svc_main.c
new file mode 100644
index 0000000..f4e679b
--- /dev/null
+++ b/plat/xilinx/zynqmp/pm_service/pm_svc_main.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * Top-level SMC handler for ZynqMP power management calls and
+ * IPI setup functions for communication with PMU.
+ */
+
+#include <errno.h>
+#include <gic_common.h>
+#include <runtime_svc.h>
+#include <string.h>
+#include "../zynqmp_private.h"
+#include "pm_api_sys.h"
+#include "pm_client.h"
+#include "pm_ipi.h"
+
+#define PM_GET_CALLBACK_DATA	0xa01
+
+/* 0 - UP, !0 - DOWN */
+static int32_t pm_down = !0;
+
+/**
+ * pm_context - Structure which contains data for power management
+ * @api_version		version of PM API, must match with one on PMU side
+ * @payload		payload array used to store received
+ *			data from ipi buffer registers
+ */
+static struct {
+	uint32_t api_version;
+	uint32_t payload[PAYLOAD_ARG_CNT];
+} pm_ctx;
+
+/**
+ * pm_setup() - PM service setup
+ *
+ * @return	On success, the initialization function must return 0.
+ *		Any other return value will cause the framework to ignore
+ *		the service
+ *
+ * Initialization functions for ZynqMP power management for
+ * communicaton with PMU.
+ *
+ * Called from sip_svc_setup initialization function with the
+ * rt_svc_init signature.
+ */
+int pm_setup(void)
+{
+	int status;
+
+	if (!zynqmp_is_pmu_up())
+		return -ENODEV;
+
+	status = pm_ipi_init();
+
+	if (status == 0)
+		INFO("BL31: PM Service Init Complete: API v%d.%d\n",
+		     PM_VERSION_MAJOR, PM_VERSION_MINOR);
+	else
+		INFO("BL31: PM Service Init Failed, Error Code %d!\n", status);
+
+	pm_down = status;
+
+	return status;
+}
+
+/**
+ * pm_smc_handler() - SMC handler for PM-API calls coming from EL1/EL2.
+ * @smc_fid - Function Identifier
+ * @x1 - x4 - Arguments
+ * @cookie  - Unused
+ * @handler - Pointer to caller's context structure
+ *
+ * @return  - Unused
+ *
+ * Determines that smc_fid is valid and supported PM SMC Function ID from the
+ * list of pm_api_ids, otherwise completes the request with
+ * the unknown SMC Function ID
+ *
+ * The SMC calls for PM service are forwarded from SIP Service SMC handler
+ * function with rt_svc_handle signature
+ */
+uint64_t pm_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
+			uint64_t x4, void *cookie, void *handle, uint64_t flags)
+{
+	enum pm_ret_status ret;
+
+	uint32_t pm_arg[4];
+
+	/* Handle case where PM wasn't initialized properly */
+	if (pm_down)
+		SMC_RET1(handle, SMC_UNK);
+
+	pm_arg[0] = (uint32_t)x1;
+	pm_arg[1] = (uint32_t)(x1 >> 32);
+	pm_arg[2] = (uint32_t)x2;
+	pm_arg[3] = (uint32_t)(x2 >> 32);
+
+	switch (smc_fid & FUNCID_NUM_MASK) {
+	/* PM API Functions */
+	case PM_SELF_SUSPEND:
+		ret = pm_self_suspend(pm_arg[0], pm_arg[1], pm_arg[2],
+				      pm_arg[3]);
+		SMC_RET1(handle, (uint64_t)ret);
+
+	case PM_REQ_SUSPEND:
+		ret = pm_req_suspend(pm_arg[0], pm_arg[1], pm_arg[2],
+				     pm_arg[3]);
+		SMC_RET1(handle, (uint64_t)ret);
+
+	case PM_REQ_WAKEUP:
+		ret = pm_req_wakeup(pm_arg[0], pm_arg[1], pm_arg[2],
+				    pm_arg[3]);
+		SMC_RET1(handle, (uint64_t)ret);
+
+	case PM_FORCE_POWERDOWN:
+		ret = pm_force_powerdown(pm_arg[0], pm_arg[1]);
+		SMC_RET1(handle, (uint64_t)ret);
+
+	case PM_ABORT_SUSPEND:
+		ret = pm_abort_suspend(pm_arg[0]);
+		SMC_RET1(handle, (uint64_t)ret);
+
+	case PM_SET_WAKEUP_SOURCE:
+		ret = pm_set_wakeup_source(pm_arg[0], pm_arg[1], pm_arg[2]);
+		SMC_RET1(handle, (uint64_t)ret);
+
+	case PM_SYSTEM_SHUTDOWN:
+		ret = pm_system_shutdown(pm_arg[0], pm_arg[1]);
+		SMC_RET1(handle, (uint64_t)ret);
+
+	case PM_REQ_NODE:
+		ret = pm_req_node(pm_arg[0], pm_arg[1], pm_arg[2], pm_arg[3]);
+		SMC_RET1(handle, (uint64_t)ret);
+
+	case PM_RELEASE_NODE:
+		ret = pm_release_node(pm_arg[0]);
+		SMC_RET1(handle, (uint64_t)ret);
+
+	case PM_SET_REQUIREMENT:
+		ret = pm_set_requirement(pm_arg[0], pm_arg[1], pm_arg[2],
+					 pm_arg[3]);
+		SMC_RET1(handle, (uint64_t)ret);
+
+	case PM_SET_MAX_LATENCY:
+		ret = pm_set_max_latency(pm_arg[0], pm_arg[1]);
+		SMC_RET1(handle, (uint64_t)ret);
+
+	case PM_GET_API_VERSION:
+		/* Check is PM API version already verified */
+		if (pm_ctx.api_version == PM_VERSION) {
+			SMC_RET1(handle, (uint64_t)PM_RET_SUCCESS |
+				 ((uint64_t)PM_VERSION << 32));
+		}
+
+		ret = pm_get_api_version(&pm_ctx.api_version);
+		/*
+		 * Enable IPI IRQ
+		 * assume the rich OS is OK to handle callback IRQs now.
+		 * Even if we were wrong, it would not enable the IRQ in
+		 * the GIC.
+		 */
+		pm_ipi_irq_enable();
+		SMC_RET1(handle, (uint64_t)ret |
+			 ((uint64_t)pm_ctx.api_version << 32));
+
+	case PM_SET_CONFIGURATION:
+		ret = pm_set_configuration(pm_arg[0]);
+		SMC_RET1(handle, (uint64_t)ret);
+
+	case PM_GET_NODE_STATUS:
+		ret = pm_get_node_status(pm_arg[0]);
+		SMC_RET1(handle, (uint64_t)ret);
+
+	case PM_GET_OP_CHARACTERISTIC:
+	{
+		uint32_t result;
+
+		ret = pm_get_op_characteristic(pm_arg[0], pm_arg[1], &result);
+		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)result << 32));
+	}
+
+	case PM_REGISTER_NOTIFIER:
+		ret = pm_register_notifier(pm_arg[0], pm_arg[1], pm_arg[2],
+					   pm_arg[3]);
+		SMC_RET1(handle, (uint64_t)ret);
+
+	case PM_RESET_ASSERT:
+		ret = pm_reset_assert(pm_arg[0], pm_arg[1]);
+		SMC_RET1(handle, (uint64_t)ret);
+
+	case PM_RESET_GET_STATUS:
+	{
+		uint32_t reset_status;
+
+		ret = pm_reset_get_status(pm_arg[0], &reset_status);
+		SMC_RET1(handle, (uint64_t)ret |
+			 ((uint64_t)reset_status << 32));
+	}
+
+	/* PM memory access functions */
+	case PM_MMIO_WRITE:
+		ret = pm_mmio_write(pm_arg[0], pm_arg[1], pm_arg[2]);
+		SMC_RET1(handle, (uint64_t)ret);
+
+	case PM_MMIO_READ:
+	{
+		uint32_t value;
+
+		ret = pm_mmio_read(pm_arg[0], &value);
+		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)value) << 32);
+	}
+
+	case PM_FPGA_LOAD:
+		ret = pm_fpga_load(pm_arg[0], pm_arg[1], pm_arg[2], pm_arg[3]);
+		SMC_RET1(handle, (uint64_t)ret);
+
+	case PM_FPGA_GET_STATUS:
+	{
+		uint32_t value;
+
+		ret = pm_fpga_get_status(&value);
+		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)value) << 32);
+	}
+
+	case PM_GET_CHIPID:
+	{
+		uint32_t result[2];
+
+		ret = pm_get_chipid(result);
+		SMC_RET2(handle, (uint64_t)ret | ((uint64_t)result[0] << 32),
+			 result[1]);
+	}
+
+	case PM_GET_CALLBACK_DATA:
+	{
+		uint32_t result[4];
+
+		pm_get_callbackdata(result, sizeof(result));
+		SMC_RET2(handle,
+			 (uint64_t)result[0] | ((uint64_t)result[1] << 32),
+			 (uint64_t)result[2] | ((uint64_t)result[3] << 32));
+	}
+
+	default:
+		WARN("Unimplemented PM Service Call: 0x%x\n", smc_fid);
+		SMC_RET1(handle, SMC_UNK);
+	}
+}
diff --git a/plat/xilinx/zynqmp/pm_service/pm_svc_main.h b/plat/xilinx/zynqmp/pm_service/pm_svc_main.h
new file mode 100644
index 0000000..d895321
--- /dev/null
+++ b/plat/xilinx/zynqmp/pm_service/pm_svc_main.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _PM_SVC_MAIN_H_
+#define _PM_SVC_MAIN_H_
+
+#include "pm_common.h"
+
+int pm_setup(void);
+uint64_t pm_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
+			uint64_t x4, void *cookie, void *handle,
+			uint64_t flags);
+
+#endif /*  _PM_SVC_MAIN_H_ */
diff --git a/plat/xilinx/zynqmp/sip_svc_setup.c b/plat/xilinx/zynqmp/sip_svc_setup.c
new file mode 100644
index 0000000..ae6ecaf
--- /dev/null
+++ b/plat/xilinx/zynqmp/sip_svc_setup.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* Top level SMC handler for SiP calls. Dispatch PM calls to PM SMC handler. */
+
+#include <runtime_svc.h>
+#include <uuid.h>
+#include "pm_svc_main.h"
+
+/* SMC function IDs for SiP Service queries */
+#define ZYNQMP_SIP_SVC_CALL_COUNT	0x8200ff00
+#define ZYNQMP_SIP_SVC_UID		0x8200ff01
+#define ZYNQMP_SIP_SVC_VERSION		0x8200ff03
+
+/* SiP Service Calls version numbers */
+#define SIP_SVC_VERSION_MAJOR	0
+#define SIP_SVC_VERSION_MINOR	1
+
+/* These macros are used to identify PM calls from the SMC function ID */
+#define PM_FID_MASK	0xf000u
+#define PM_FID_VALUE	0u
+#define is_pm_fid(_fid) (((_fid) & PM_FID_MASK) == PM_FID_VALUE)
+
+/* SiP Service UUID */
+DEFINE_SVC_UUID(zynqmp_sip_uuid,
+		0x2a1d9b5c, 0x8605, 0x4023, 0xa6, 0x1b,
+		0xb9, 0x25, 0x82, 0x2d, 0xe3, 0xa5);
+
+/**
+ * sip_svc_setup() - Setup SiP Service
+ *
+ * Invokes PM setup
+ */
+static int32_t sip_svc_setup(void)
+{
+	/* PM implementation as SiP Service */
+	pm_setup();
+
+	return 0;
+}
+
+/**
+ * sip_svc_smc_handler() - Top-level SiP Service SMC handler
+ *
+ * Handler for all SiP SMC calls. Handles standard SIP requests
+ * and calls PM SMC handler if the call is for a PM-API function.
+ */
+uint64_t sip_svc_smc_handler(uint32_t smc_fid,
+			     uint64_t x1,
+			     uint64_t x2,
+			     uint64_t x3,
+			     uint64_t x4,
+			     void *cookie,
+			     void *handle,
+			     uint64_t flags)
+{
+	/* Let PM SMC handler deal with PM-related requests */
+	if (is_pm_fid(smc_fid)) {
+		return pm_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle,
+				      flags);
+	}
+
+	switch (smc_fid) {
+	case ZYNQMP_SIP_SVC_CALL_COUNT:
+		/* PM functions + default functions */
+		SMC_RET1(handle, PM_API_MAX + 2);
+
+	case ZYNQMP_SIP_SVC_UID:
+		SMC_UUID_RET(handle, zynqmp_sip_uuid);
+
+	case ZYNQMP_SIP_SVC_VERSION:
+		SMC_RET2(handle, SIP_SVC_VERSION_MAJOR, SIP_SVC_VERSION_MINOR);
+
+	default:
+		WARN("Unimplemented SiP Service Call: 0x%x\n", smc_fid);
+		SMC_RET1(handle, SMC_UNK);
+	}
+}
+
+/* Register PM Service Calls as runtime service */
+DECLARE_RT_SVC(
+		sip_svc,
+		OEN_SIP_START,
+		OEN_SIP_END,
+		SMC_TYPE_FAST,
+		sip_svc_setup,
+		sip_svc_smc_handler);
diff --git a/plat/xilinx/zynqmp/tsp/tsp-zynqmp.mk b/plat/xilinx/zynqmp/tsp/tsp-zynqmp.mk
new file mode 100644
index 0000000..318b01d
--- /dev/null
+++ b/plat/xilinx/zynqmp/tsp/tsp-zynqmp.mk
@@ -0,0 +1,8 @@
+#
+# Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+
+# TSP source files specific to ZynqMP platform
+BL32_SOURCES		+=	plat/common/aarch64/platform_mp_stack.S		\
+				plat/xilinx/zynqmp/tsp/tsp_plat_setup.c
diff --git a/plat/xilinx/zynqmp/tsp/tsp_plat_setup.c b/plat/xilinx/zynqmp/tsp/tsp_plat_setup.c
new file mode 100644
index 0000000..ecc4d0a
--- /dev/null
+++ b/plat/xilinx/zynqmp/tsp/tsp_plat_setup.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <bl_common.h>
+#include <console.h>
+#include <debug.h>
+#include <plat_arm.h>
+#include <platform_tsp.h>
+#include "../zynqmp_private.h"
+
+#define BL32_END (unsigned long)(&__BL32_END__)
+
+/*******************************************************************************
+ * Initialize the UART
+ ******************************************************************************/
+void tsp_early_platform_setup(void)
+{
+	/*
+	 * Initialize a different console than already in use to display
+	 * messages from TSP
+	 */
+	console_init(ZYNQMP_UART_BASE, zynqmp_get_uart_clk(),
+		     ZYNQMP_UART_BAUDRATE);
+
+	/* Initialize the platform config for future decision making */
+	zynqmp_config_setup();
+}
+
+/*******************************************************************************
+ * Perform platform specific setup placeholder
+ ******************************************************************************/
+void tsp_platform_setup(void)
+{
+	plat_arm_gic_driver_init();
+	plat_arm_gic_init();
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only intializes the MMU
+ ******************************************************************************/
+void tsp_plat_arch_setup(void)
+{
+	arm_setup_page_tables(BL32_BASE,
+			      BL32_END - BL32_BASE,
+			      BL_CODE_BASE,
+			      BL_CODE_END,
+			      BL_RO_DATA_BASE,
+			      BL_RO_DATA_END,
+			      BL_COHERENT_RAM_BASE,
+			      BL_COHERENT_RAM_END
+			      );
+	enable_mmu_el1(0);
+}
diff --git a/plat/xilinx/zynqmp/zynqmp_def.h b/plat/xilinx/zynqmp/zynqmp_def.h
new file mode 100644
index 0000000..e90ad02
--- /dev/null
+++ b/plat/xilinx/zynqmp/zynqmp_def.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ZYNQMP_DEF_H__
+#define __ZYNQMP_DEF_H__
+
+#include <common_def.h>
+
+#define ZYNQMP_CONSOLE_ID_cadence	1
+#define ZYNQMP_CONSOLE_ID_cadence0	1
+#define ZYNQMP_CONSOLE_ID_cadence1	2
+#define ZYNQMP_CONSOLE_ID_dcc		3
+
+#define ZYNQMP_CONSOLE_IS(con)	(ZYNQMP_CONSOLE_ID_ ## con == ZYNQMP_CONSOLE)
+
+/* Firmware Image Package */
+#define ZYNQMP_PRIMARY_CPU		0
+
+/* Memory location options for Shared data and TSP in ZYNQMP */
+#define ZYNQMP_IN_TRUSTED_SRAM		0
+#define ZYNQMP_IN_TRUSTED_DRAM		1
+
+/*******************************************************************************
+ * ZYNQMP memory map related constants
+ ******************************************************************************/
+/* Aggregate of all devices in the first GB */
+#define DEVICE0_BASE		0xFF000000
+#define DEVICE0_SIZE		0x00E00000
+#define DEVICE1_BASE		0xF9000000
+#define DEVICE1_SIZE		0x00800000
+
+/* For cpu reset APU space here too 0xFE5F1000 CRF_APB*/
+#define CRF_APB_BASE		0xFD1A0000
+#define CRF_APB_SIZE		0x00600000
+
+/* CRF registers and bitfields */
+#define CRF_APB_RST_FPD_APU	(CRF_APB_BASE + 0X00000104)
+
+#define CRF_APB_RST_FPD_APU_ACPU_RESET		(1 << 0)
+#define CRF_APB_RST_FPD_APU_ACPU_PWRON_RESET	(1 << 10)
+
+/* CRL registers and bitfields */
+#define CRL_APB_BASE			0xFF5E0000
+#define CRL_APB_RPLL_CTRL		(CRL_APB_BASE + 0x30)
+#define CRL_APB_BOOT_MODE_USER		(CRL_APB_BASE + 0x200)
+#define CRL_APB_RESET_CTRL		(CRL_APB_BASE + 0x218)
+
+#define CRL_APB_RPLL_CTRL_BYPASS	(1 << 3)
+
+#define CRL_APB_RESET_CTRL_SOFT_RESET	(1 << 4)
+
+#define CRL_APB_BOOT_MODE_MASK		(0xf << 0)
+#define ZYNQMP_BOOTMODE_JTAG		0
+
+/* system counter registers and bitfields */
+#define IOU_SCNTRS_BASE			0xFF260000
+#define IOU_SCNTRS_BASEFREQ		(IOU_SCNTRS_BASE + 0x20)
+
+/* APU registers and bitfields */
+#define APU_BASE		0xFD5C0000
+#define APU_CONFIG_0		(APU_BASE + 0x20)
+#define APU_RVBAR_L_0		(APU_BASE + 0x40)
+#define APU_RVBAR_H_0		(APU_BASE + 0x44)
+#define APU_PWRCTL		(APU_BASE + 0x90)
+
+#define APU_CONFIG_0_VINITHI_SHIFT	8
+#define APU_0_PWRCTL_CPUPWRDWNREQ_MASK		1
+#define APU_1_PWRCTL_CPUPWRDWNREQ_MASK		2
+#define APU_2_PWRCTL_CPUPWRDWNREQ_MASK		4
+#define APU_3_PWRCTL_CPUPWRDWNREQ_MASK		8
+
+/* PMU registers and bitfields */
+#define PMU_GLOBAL_BASE			0xFFD80000
+#define PMU_GLOBAL_CNTRL		(PMU_GLOBAL_BASE + 0)
+#define PMU_GLOBAL_GEN_STORAGE6		(PMU_GLOBAL_BASE + 0x48)
+#define PMU_GLOBAL_REQ_PWRUP_STATUS	(PMU_GLOBAL_BASE + 0x110)
+#define PMU_GLOBAL_REQ_PWRUP_EN		(PMU_GLOBAL_BASE + 0x118)
+#define PMU_GLOBAL_REQ_PWRUP_DIS	(PMU_GLOBAL_BASE + 0x11c)
+#define PMU_GLOBAL_REQ_PWRUP_TRIG	(PMU_GLOBAL_BASE + 0x120)
+
+#define PMU_GLOBAL_CNTRL_FW_IS_PRESENT	(1 << 4)
+
+/*******************************************************************************
+ * CCI-400 related constants
+ ******************************************************************************/
+#define PLAT_ARM_CCI_BASE		0xFD6E0000
+#define PLAT_ARM_CCI_CLUSTER0_SL_IFACE_IX	3
+#define PLAT_ARM_CCI_CLUSTER1_SL_IFACE_IX	4
+
+/*******************************************************************************
+ * GIC-400 & interrupt handling related constants
+ ******************************************************************************/
+#define BASE_GICD_BASE		0xF9010000
+#define BASE_GICC_BASE		0xF9020000
+#define BASE_GICH_BASE		0xF9040000
+#define BASE_GICV_BASE		0xF9060000
+
+#define ARM_IRQ_SEC_PHY_TIMER		29
+
+#define ARM_IRQ_SEC_SGI_0		8
+#define ARM_IRQ_SEC_SGI_1		9
+#define ARM_IRQ_SEC_SGI_2		10
+#define ARM_IRQ_SEC_SGI_3		11
+#define ARM_IRQ_SEC_SGI_4		12
+#define ARM_IRQ_SEC_SGI_5		13
+#define ARM_IRQ_SEC_SGI_6		14
+#define ARM_IRQ_SEC_SGI_7		15
+
+#define MAX_INTR_EL3			128
+
+/*******************************************************************************
+ * UART related constants
+ ******************************************************************************/
+#define ZYNQMP_UART0_BASE		0xFF000000
+#define ZYNQMP_UART1_BASE		0xFF010000
+
+#if ZYNQMP_CONSOLE_IS(cadence)
+# define ZYNQMP_UART_BASE	ZYNQMP_UART0_BASE
+#elif ZYNQMP_CONSOLE_IS(cadence1)
+# define ZYNQMP_UART_BASE	ZYNQMP_UART1_BASE
+#else
+# error "invalid ZYNQMP_CONSOLE"
+#endif
+
+#define PLAT_ARM_CRASH_UART_BASE	ZYNQMP_UART_BASE
+/* impossible to call C routine how it is done now - hardcode any value */
+#define	PLAT_ARM_CRASH_UART_CLK_IN_HZ	100000000 /* FIXME */
+
+/* Must be non zero */
+#define ZYNQMP_UART_BAUDRATE	115200
+#define ARM_CONSOLE_BAUDRATE	ZYNQMP_UART_BAUDRATE
+
+/* Silicon version detection */
+#define ZYNQMP_SILICON_VER_MASK		0xF000
+#define ZYNQMP_SILICON_VER_SHIFT	12
+#define ZYNQMP_CSU_VERSION_SILICON	0
+#define ZYNQMP_CSU_VERSION_EP108	1
+#define ZYNQMP_CSU_VERSION_VELOCE	2
+#define ZYNQMP_CSU_VERSION_QEMU		3
+
+#define ZYNQMP_RTL_VER_MASK		0xFF0
+#define ZYNQMP_RTL_VER_SHIFT		4
+
+#define ZYNQMP_PS_VER_MASK		0xF
+#define ZYNQMP_PS_VER_SHIFT		0
+
+#define ZYNQMP_CSU_BASEADDR		0xFFCA0000
+#define ZYNQMP_CSU_IDCODE_OFFSET	0x40
+
+#define ZYNQMP_CSU_IDCODE_XILINX_ID_SHIFT	0
+#define ZYNQMP_CSU_IDCODE_XILINX_ID_MASK	(0xFFF << ZYNQMP_CSU_IDCODE_XILINX_ID_SHIFT)
+#define ZYNQMP_CSU_IDCODE_XILINX_ID		0x093
+
+#define ZYNQMP_CSU_IDCODE_SVD_SHIFT		12
+#define ZYNQMP_CSU_IDCODE_SVD_MASK		(0xE << ZYNQMP_CSU_IDCODE_SVD_SHIFT)
+#define ZYNQMP_CSU_IDCODE_DEVICE_CODE_SHIFT	15
+#define ZYNQMP_CSU_IDCODE_DEVICE_CODE_MASK	(0xF << ZYNQMP_CSU_IDCODE_DEVICE_CODE_SHIFT)
+#define ZYNQMP_CSU_IDCODE_SUB_FAMILY_SHIFT	19
+#define ZYNQMP_CSU_IDCODE_SUB_FAMILY_MASK	(0x3 << ZYNQMP_CSU_IDCODE_SUB_FAMILY_SHIFT)
+#define ZYNQMP_CSU_IDCODE_FAMILY_SHIFT		21
+#define ZYNQMP_CSU_IDCODE_FAMILY_MASK		(0x7F << ZYNQMP_CSU_IDCODE_FAMILY_SHIFT)
+#define ZYNQMP_CSU_IDCODE_FAMILY		0x23
+
+#define ZYNQMP_CSU_IDCODE_REVISION_SHIFT	28
+#define ZYNQMP_CSU_IDCODE_REVISION_MASK		(0xF << ZYNQMP_CSU_IDCODE_REVISION_SHIFT)
+#define ZYNQMP_CSU_IDCODE_REVISION		0
+
+#define ZYNQMP_CSU_VERSION_OFFSET	0x44
+
+/* Access control register defines */
+#define ACTLR_EL3_L2ACTLR_BIT	(1 << 6)
+#define ACTLR_EL3_CPUACTLR_BIT	(1 << 0)
+
+#endif /* __ZYNQMP_DEF_H__ */
diff --git a/plat/xilinx/zynqmp/zynqmp_private.h b/plat/xilinx/zynqmp/zynqmp_private.h
new file mode 100644
index 0000000..94a99f4
--- /dev/null
+++ b/plat/xilinx/zynqmp/zynqmp_private.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ZYNQMP_PRIVATE_H__
+#define __ZYNQMP_PRIVATE_H__
+
+#include <interrupt_mgmt.h>
+
+void zynqmp_config_setup(void);
+
+/* ZynqMP specific functions */
+unsigned int zynqmp_get_uart_clk(void);
+int zynqmp_is_pmu_up(void);
+unsigned int zynqmp_get_bootmode(void);
+
+/* For FSBL handover */
+void fsbl_atf_handover(entry_point_info_t *bl32_image_ep_info,
+		       entry_point_info_t *bl33_image_ep_info);
+
+#endif /* __ZYNQMP_PRIVATE_H__ */
diff --git a/readme.md b/readme.md
deleted file mode 100644
index 454b5f1..0000000
--- a/readme.md
+++ /dev/null
@@ -1,162 +0,0 @@
-ARM Trusted Firmware - version 1.1
-==================================
-
-ARM Trusted Firmware provides a reference implementation of secure world
-software for [ARMv8-A], including Exception Level 3 (EL3) software. This release
-provides complete support for version 0.2 of the [PSCI] specification, initial
-support for the new version 1.0 of that specification, and prototype support for
-the Trusted Board Boot Requirements specification.
-
-The intent is to provide a reference implementation of various ARM interface
-standards, such as the Power State Coordination Interface ([PSCI]), Trusted
-Board Boot Requirements (TBBR) and [Secure Monitor] [TEE-SMC] code. As far as
-possible the code is designed for reuse or porting to other ARMv8-A model and
-hardware platforms.
-
-ARM will continue development in collaboration with interested parties to
-provide a full reference implementation of PSCI, TBBR and Secure Monitor code
-to the benefit of all developers working with ARMv8-A TrustZone technology.
-
-
-License
--------
-
-The software is provided under a BSD 3-Clause [license]. Certain source files
-are derived from FreeBSD code: the original license is included in these
-source files.
-
-
-This Release
-------------
-
-This release is a limited functionality implementation of the Trusted Firmware.
-It provides a suitable starting point for productization. Future versions will
-contain new features, optimizations and quality improvements.
-
-### Functionality
-
-*   Prototype implementation of a subset of the Trusted Board Boot Requirements
-    Platform Design Document (PDD). This includes packaging the various firmware
-    images into a Firmware Image Package (FIP) to be loaded from non-volatile
-    storage, and a prototype of authenticated boot using key certificates stored
-    in the FIP.
-
-*   Initializes the secure world (for example, exception vectors, control
-    registers, GIC and interrupts for the platform), before transitioning into
-    the normal world.
-
-*   Supports both GICv2 and GICv3 initialization for use by normal world
-    software.
-
-*   Starts the normal world at the Exception Level and Register Width specified
-    by the platform port. Typically this is AArch64 EL2 if available.
-
-*   Handles SMCs (Secure Monitor Calls) conforming to the [SMC Calling
-    Convention PDD] [SMCCC] using an EL3 runtime services framework.
-
-*   Handles SMCs relating to the [Power State Coordination Interface PDD] [PSCI]
-    for the Secondary CPU Boot, CPU Hotplug, CPU Idle and System Shutdown/Reset
-    use-cases.
-
-*   A Test Secure-EL1 Payload and Dispatcher to demonstrate Secure Monitor
-    functionality such as world switching, EL1 context management and interrupt
-    routing. This also demonstrates Secure-EL1 interaction with PSCI. Some of
-    this functionality is provided in library form for re-use by other
-    Secure-EL1 Payload Dispatchers.
-
-*   Support for alternative Trusted Boot Firmware. Some platforms have their own
-    Trusted Boot implementation and only require the Secure Monitor
-    functionality provided by ARM Trusted Firmware.
-
-*   Isolation of memory accessible by the secure world from the normal world
-    through programming of a TrustZone controller.
-
-*   Support for CPU specific reset sequences, power down sequences and register
-    dumping during crash reporting. The CPU specific reset sequences include
-    support for errata workarounds.
-
-For a full description of functionality and implementation details, please
-see the [Firmware Design] and supporting documentation. The [Change Log]
-provides details of changes made since the last release.
-
-### Platforms
-
-This release of the Trusted Firmware has been tested on Revision B of the
-[Juno ARM Development Platform] [Juno] with Version r0p0-00rel7 of the
-[ARM SCP Firmware] [SCP download].
-
-The Trusted Firmware has also been tested on the 64-bit Linux versions of the
-following ARM [FVP]s:
-
-*   `Foundation_Platform` (Version 9.1, Build 9.1.33)
-*   `FVP_Base_AEMv8A-AEMv8A` (Version 6.2, Build 0.8.6202)
-*   `FVP_Base_Cortex-A57x4-A53x4` (Version 6.2, Build 0.8.6202)
-*   `FVP_Base_Cortex-A57x1-A53x1` (Version 6.2, Build 0.8.6202)
-*   `FVP_Base_Cortex-A57x2-A53x4` (Version 6.2, Build 0.8.6202)
-
-The Foundation FVP can be downloaded free of charge. The Base FVPs can be
-licensed from ARM: see [www.arm.com/fvp] [FVP].
-
-### Still to Come
-
-*   Complete and more flexible Trusted Board Boot implementation.
-
-*   Complete implementation of the [PSCI] v1.0 specification.
-
-*   Support for alternative types of Secure-EL1 Payloads.
-
-*   Extending the GICv3 support to the secure world.
-
-*   Support for new System IP devices.
-
-For a full list of detailed issues in the current code, please see the [Change
-Log] and the [GitHub issue tracker].
-
-
-Getting Started
----------------
-
-Get the Trusted Firmware source code from
-[GitHub](https://www.github.com/ARM-software/arm-trusted-firmware).
-
-See the [User Guide] for instructions on how to install, build and use
-the Trusted Firmware with the ARM [FVP]s.
-
-See the [Firmware Design] for information on how the ARM Trusted Firmware works.
-
-See the [Porting Guide] as well for information about how to use this
-software on another ARMv8-A platform.
-
-See the [Contributing Guidelines] for information on how to contribute to this
-project and the [Acknowledgments] file for a list of contributors to the
-project.
-
-### Feedback and support
-
-ARM welcomes any feedback on the Trusted Firmware. Please send feedback using
-the [GitHub issue tracker].
-
-ARM licensees may contact ARM directly via their partner managers.
-
-
-- - - - - - - - - - - - - - - - - - - - - - - - - -
-
-_Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved._
-
-
-[License]:                  ./license.md "BSD license for ARM Trusted Firmware"
-[Contributing Guidelines]:  ./contributing.md "Guidelines for contributors"
-[Acknowledgments]:          ./acknowledgements.md "Contributor acknowledgments"
-[Change Log]:               ./docs/change-log.md
-[User Guide]:               ./docs/user-guide.md
-[Firmware Design]:          ./docs/firmware-design.md
-[Porting Guide]:            ./docs/porting-guide.md
-
-[ARMv8-A]:               http://www.arm.com/products/processors/armv8-architecture.php "ARMv8-A Architecture"
-[FVP]:                   http://www.arm.com/fvp "ARM's Fixed Virtual Platforms"
-[Juno]:                  http://www.arm.com/products/tools/development-boards/versatile-express/juno-arm-development-platform.php "Juno ARM Development Platform"
-[SCP download]:          https://silver.arm.com/download/download.tm?pv=1764630
-[PSCI]:                  http://infocenter.arm.com/help/topic/com.arm.doc.den0022c/DEN0022C_Power_State_Coordination_Interface.pdf "Power State Coordination Interface PDD (ARM DEN 0022C)"
-[SMCCC]:                 http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html "SMC Calling Convention PDD (ARM DEN 0028A)"
-[TEE-SMC]:               http://www.arm.com/products/processors/technologies/trustzone/tee-smc.php "Secure Monitor and TEEs"
-[GitHub issue tracker]:  https://github.com/ARM-software/tf-issues/issues
diff --git a/readme.rst b/readme.rst
new file mode 100644
index 0000000..c3c0319
--- /dev/null
+++ b/readme.rst
@@ -0,0 +1,221 @@
+ARM Trusted Firmware - version 1.4
+==================================
+
+ARM Trusted Firmware provides a reference implementation of secure world
+software for `ARMv8-A`_, including a `Secure Monitor`_ executing at
+Exception Level 3 (EL3). It implements various ARM interface standards, such as:
+
+-  The `Power State Coordination Interface (PSCI)`_
+-  Trusted Board Boot Requirements (TBBR, ARM DEN0006C-1)
+-  `SMC Calling Convention`_
+-  `System Control and Management Interface`_
+
+As far as possible the code is designed for reuse or porting to other ARMv8-A
+model and hardware platforms.
+
+ARM will continue development in collaboration with interested parties to
+provide a full reference implementation of Secure Monitor code and ARM standards
+to the benefit of all developers working with ARMv8-A TrustZone technology.
+
+License
+-------
+
+The software is provided under a BSD-3-Clause `license`_. Contributions to this
+project are accepted under the same license with developer sign-off as
+described in the `Contributing Guidelines`_.
+
+This project contains code from other projects as listed below. The original
+license text is included in those source files.
+
+-  The stdlib source code is derived from FreeBSD code, which uses various
+   BSD licenses, including BSD-3-Clause and BSD-2-Clause.
+
+-  The libfdt source code is dual licensed. It is used by this project under
+   the terms of the BSD-2-Clause license.
+
+-  The LLVM compiler-rt source code is dual licensed. It is used by this
+   project under the terms of the NCSA license (also known as the University of
+   Illinois/NCSA Open Source License).
+
+This Release
+------------
+
+This release provides a suitable starting point for productization of secure
+world boot and runtime firmware, in either the AArch32 or AArch64 execution
+state.
+
+Users are encouraged to do their own security validation, including penetration
+testing, on any secure world code derived from ARM Trusted Firmware.
+
+Functionality
+~~~~~~~~~~~~~
+
+-  Initialization of the secure world, for example exception vectors, control
+   registers and interrupts for the platform.
+
+-  Library support for CPU specific reset and power down sequences. This
+   includes support for errata workarounds and the latest ARM DynamIQ CPUs.
+
+-  Drivers to enable standard initialization of ARM System IP, for example
+   Generic Interrupt Controller (GIC), Cache Coherent Interconnect (CCI),
+   Cache Coherent Network (CCN), Network Interconnect (NIC) and TrustZone
+   Controller (TZC).
+
+-  A generic `SCMI`_ driver to interface with conforming power controllers, for
+   example the ARM System Control Processor (SCP).
+
+-  SMC (Secure Monitor Call) handling, conforming to the `SMC Calling
+   Convention`_ using an EL3 runtime services framework.
+
+-  `PSCI`_ library support for CPU, cluster and system power management
+   use-cases.
+   This library is pre-integrated with the AArch64 EL3 Runtime Software, and
+   is also suitable for integration with other AArch32 EL3 Runtime Software,
+   for example an AArch32 Secure OS.
+
+-  A minimal AArch32 Secure Payload (SP\_MIN) to demonstrate `PSCI`_ library
+   integration with AArch32 EL3 Runtime Software.
+
+-  Secure Monitor library code such as world switching, EL1 context management
+   and interrupt routing.
+   When a Secure-EL1 Payload (SP) is present, for example a Secure OS, the
+   AArch64 EL3 Runtime Software must be integrated with a dispatcher component
+   (SPD) to customize the interaction with the SP.
+
+-  A Test SP/SPD to demonstrate AArch64 Secure Monitor functionality and SP
+   interaction with PSCI.
+
+-  SPDs for the `OP-TEE Secure OS`_, `NVidia Trusted Little Kernel`_
+   and `Trusty Secure OS`_.
+
+-  A Trusted Board Boot implementation, conforming to all mandatory TBBR
+   requirements. This includes image authentication, Firmware Update (or
+   recovery mode), and packaging of the various firmware images into a
+   Firmware Image Package (FIP).
+
+-  Pre-integration of TBB with the ARM TrustZone CryptoCell product, to take
+   advantage of its hardware Root of Trust and crypto acceleration services.
+
+-  Support for alternative boot flows, for example to support platforms where
+   the EL3 Runtime Software is loaded using other firmware or a separate
+   secure system processor.
+
+-  Support for the GCC, LLVM and ARM Compiler 6 toolchains.
+
+For a full description of functionality and implementation details, please
+see the `Firmware Design`_ and supporting documentation. The `Change Log`_
+provides details of changes made since the last release.
+
+Platforms
+~~~~~~~~~
+
+Various AArch32 and AArch64 builds of this release has been tested on variants
+r0, r1 and r2 of the `Juno ARM Development Platform`_.
+
+Various AArch64 builds of this release have been tested on the following ARM
+`FVP`_\ s (64-bit host machine only):
+
+NOTE: Unless otherwise stated, the FVP Version is 11.0, Build 11.0.34.
+
+-  ``Foundation_Platform``
+-  ``FVP_Base_AEMv8A-AEMv8A`` (Version 8.5, Build 0.8.8502)
+-  ``FVP_Base_Cortex-A35x4``
+-  ``FVP_Base_Cortex-A53x4``
+-  ``FVP_Base_Cortex-A57x4-A53x4``
+-  ``FVP_Base_Cortex-A57x4``
+-  ``FVP_Base_Cortex-A72x4-A53x4``
+-  ``FVP_Base_Cortex-A72x4``
+-  ``FVP_Base_Cortex-A73x4-A53x4``
+-  ``FVP_Base_Cortex-A73x4``
+-  ``FVP_CSS_SGM-775`` (Version 11.0, Build 11.0.36)
+
+Various AArch32 builds of this release has been tested on the following ARM
+`FVP`_\ s (64-bit host machine only):
+
+-  ``FVP_Base_AEMv8A-AEMv8A`` (Version 8.5, Build 0.8.8502)
+-  ``FVP_Base_Cortex-A32x4``
+
+The Foundation FVP can be downloaded free of charge. The Base FVPs can be
+licensed from ARM. See the `ARM FVP website`_.
+
+All the above platforms have been tested with `Linaro Release 17.04`_.
+
+This release also contains the following platform support:
+
+-  HiKey and HiKey960 boards
+-  MediaTek MT6795 and MT8173 SoCs
+-  NVidia T132, T186 and T210 SoCs
+-  QEMU emulator
+-  RockChip RK3328, RK3368 and RK3399 SoCs
+-  Socionext UniPhier SoC family
+-  Xilinx Zynq UltraScale + MPSoC
+
+Still to Come
+~~~~~~~~~~~~~
+
+-  More platform support.
+
+-  Ongoing support for new architectural features, CPUs and System IP.
+
+-  Ongoing support for new `PSCI`_, `SCMI`_ and TBBR features.
+
+-  Ongoing security hardening, optimization and quality improvements.
+
+For a full list of detailed issues in the current code, please see the `Change
+Log`_ and the `GitHub issue tracker`_.
+
+Getting Started
+---------------
+
+Get the Trusted Firmware source code from `GitHub`_.
+
+See the `User Guide`_ for instructions on how to install, build and use
+the Trusted Firmware with the ARM `FVP`_\ s.
+
+See the `Firmware Design`_ for information on how the Trusted Firmware works.
+
+See the `Porting Guide`_ as well for information about how to use this
+software on another ARMv8-A platform.
+
+See the `Contributing Guidelines`_ for information on how to contribute to this
+project and the `Acknowledgments`_ file for a list of contributors to the
+project.
+
+Feedback and support
+~~~~~~~~~~~~~~~~~~~~
+
+ARM welcomes any feedback on Trusted Firmware. If you think you have found a
+security vulnerability, please report this using the process defined in the
+Trusted Firmware `Security Centre`_. For all other feedback, please use the
+`GitHub issue tracker`_.
+
+ARM licensees may contact ARM directly via their partner managers.
+
+--------------
+
+*Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.*
+
+.. _ARMv8-A: http://www.arm.com/products/processors/armv8-architecture.php
+.. _Secure Monitor: http://www.arm.com/products/processors/technologies/trustzone/tee-smc.php
+.. _Power State Coordination Interface (PSCI): PSCI_
+.. _PSCI: http://infocenter.arm.com/help/topic/com.arm.doc.den0022d/Power_State_Coordination_Interface_PDD_v1_1_DEN0022D.pdf
+.. _SMC Calling Convention: http://infocenter.arm.com/help/topic/com.arm.doc.den0028b/ARM_DEN0028B_SMC_Calling_Convention.pdf
+.. _System Control and Management Interface: SCMI_
+.. _SCMI: http://infocenter.arm.com/help/topic/com.arm.doc.den0056a/DEN0056A_System_Control_and_Management_Interface.pdf
+.. _Juno ARM Development Platform: http://www.arm.com/products/tools/development-boards/versatile-express/juno-arm-development-platform.php
+.. _ARM FVP website: FVP_
+.. _FVP: https://developer.arm.com/products/system-design/fixed-virtual-platforms
+.. _Linaro Release 17.04: https://community.arm.com/dev-platforms/b/documents/posts/linaro-release-notes-deprecated#LinaroRelease17.04
+.. _OP-TEE Secure OS: https://github.com/OP-TEE/optee_os
+.. _NVidia Trusted Little Kernel: http://nv-tegra.nvidia.com/gitweb/?p=3rdparty/ote_partner/tlk.git;a=summary
+.. _Trusty Secure OS: https://source.android.com/security/trusty
+.. _GitHub: https://www.github.com/ARM-software/arm-trusted-firmware
+.. _GitHub issue tracker: https://github.com/ARM-software/tf-issues/issues
+.. _Security Centre: https://github.com/ARM-software/arm-trusted-firmware/wiki/ARM-Trusted-Firmware-Security-Centre
+.. _license: ./license.rst
+.. _Contributing Guidelines: ./contributing.rst
+.. _Acknowledgments: ./acknowledgements.rst
+.. _Firmware Design: ./docs/firmware-design.rst
+.. _Change Log: ./docs/change-log.rst
+.. _User Guide: ./docs/user-guide.rst
+.. _Porting Guide: ./docs/porting-guide.rst
diff --git a/services/spd/opteed/opteed.mk b/services/spd/opteed/opteed.mk
index 8057dcc..b1b2175 100644
--- a/services/spd/opteed/opteed.mk
+++ b/services/spd/opteed/opteed.mk
@@ -1,31 +1,7 @@
 #
 # Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
 #
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# Neither the name of ARM nor the names of its contributors may be used
-# to endorse or promote products derived from this software without specific
-# prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
 #
 
 OPTEED_DIR		:=	services/spd/opteed
diff --git a/services/spd/opteed/opteed_common.c b/services/spd/opteed/opteed_common.c
index 5743102..2693e7d 100644
--- a/services/spd/opteed/opteed_common.c
+++ b/services/spd/opteed/opteed_common.c
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch_helpers.h>
@@ -33,6 +9,7 @@
 #include <bl_common.h>
 #include <context_mgmt.h>
 #include <string.h>
+#include <utils.h>
 #include "opteed_private.h"
 
 /*******************************************************************************
@@ -42,8 +19,8 @@
  ******************************************************************************/
 void opteed_init_optee_ep_state(struct entry_point_info *optee_entry_point,
 				uint32_t rw, uint64_t pc,
-				uint64_t paged_part, uint64_t mem_limit,
-				optee_context_t *optee_ctx)
+				uint64_t pageable_part, uint64_t mem_limit,
+				uint64_t dt_addr, optee_context_t *optee_ctx)
 {
 	uint32_t ep_attr;
 
@@ -74,9 +51,10 @@
 						      DAIF_FIQ_BIT |
 							DAIF_IRQ_BIT |
 							DAIF_ABT_BIT);
-	memset(&optee_entry_point->args, 0, sizeof(optee_entry_point->args));
-	optee_entry_point->args.arg0 = paged_part;
+	zeromem(&optee_entry_point->args, sizeof(optee_entry_point->args));
+	optee_entry_point->args.arg0 = pageable_part;
 	optee_entry_point->args.arg1 = mem_limit;
+	optee_entry_point->args.arg2 = dt_addr;
 }
 
 /*******************************************************************************
diff --git a/services/spd/opteed/opteed_helpers.S b/services/spd/opteed/opteed_helpers.S
index ef59540..075a71b 100644
--- a/services/spd/opteed/opteed_helpers.S
+++ b/services/spd/opteed/opteed_helpers.S
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <asm_macros.S>
@@ -64,6 +40,7 @@
 	 * ---------------------------------------------
 	 */
 	b	el3_exit
+endfunc opteed_enter_sp
 
 	/* ---------------------------------------------
 	 * This function is called 'x0' pointing to a C
@@ -99,3 +76,4 @@
 	 */
 	mov	x0, x1
 	ret
+endfunc opteed_exit_sp
diff --git a/services/spd/opteed/opteed_main.c b/services/spd/opteed/opteed_main.c
index 49a301a..13a307a 100644
--- a/services/spd/opteed/opteed_main.c
+++ b/services/spd/opteed/opteed_main.c
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 
@@ -40,36 +16,19 @@
  ******************************************************************************/
 #include <arch_helpers.h>
 #include <assert.h>
-#include <bl_common.h>
 #include <bl31.h>
+#include <bl_common.h>
 #include <context_mgmt.h>
 #include <debug.h>
 #include <errno.h>
 #include <platform.h>
 #include <runtime_svc.h>
 #include <stddef.h>
-#include <string.h>
 #include <uuid.h>
 #include "opteed_private.h"
-#include "teesmc_opteed_macros.h"
 #include "teesmc_opteed.h"
+#include "teesmc_opteed_macros.h"
 
-#define OPTEE_MAGIC		0x4554504f
-#define OPTEE_VERSION		1
-#define OPTEE_ARCH_ARM32	0
-#define OPTEE_ARCH_ARM64	1
-
-struct optee_header {
-	uint32_t magic;
-	uint8_t version;
-	uint8_t arch;
-	uint16_t flags;
-	uint32_t init_size;
-	uint32_t init_load_addr_hi;
-	uint32_t init_load_addr_lo;
-	uint32_t init_mem_usage;
-	uint32_t paged_size;
-};
 
 /*******************************************************************************
  * Address of the entrypoint vector table in OPTEE. It is
@@ -83,8 +42,6 @@
 optee_context_t opteed_sp_context[OPTEED_CORE_COUNT];
 uint32_t opteed_rw;
 
-
-
 static int32_t opteed_init(void);
 
 /*******************************************************************************
@@ -98,26 +55,19 @@
 					    void *cookie)
 {
 	uint32_t linear_id;
-	uint64_t mpidr;
 	optee_context_t *optee_ctx;
 
 	/* Check the security state when the exception was generated */
 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
 
-#if IMF_READ_INTERRUPT_ID
-	/* Check the security status of the interrupt */
-	assert(plat_ic_get_interrupt_type(id) == INTR_TYPE_S_EL1);
-#endif
-
 	/* Sanity check the pointer to this cpu's context */
-	mpidr = read_mpidr();
 	assert(handle == cm_get_context(NON_SECURE));
 
 	/* Save the non-secure context before entering the OPTEE */
 	cm_el1_sysregs_context_save(NON_SECURE);
 
 	/* Get a reference to this cpu's OPTEE context */
-	linear_id = platform_get_core_pos(mpidr);
+	linear_id = plat_my_core_pos();
 	optee_ctx = &opteed_sp_context[linear_id];
 	assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE));
 
@@ -135,13 +85,6 @@
 	SMC_RET1(&optee_ctx->cpu_ctx, read_elr_el3());
 }
 
-
-static int is_mem_free(uint64_t free_base, size_t free_size,
-		       uint64_t addr, size_t size)
-{
-	return (addr >= free_base) && (addr + size <= free_base + free_size);
-}
-
 /*******************************************************************************
  * OPTEE Dispatcher setup. The OPTEED finds out the OPTEE entrypoint and type
  * (aarch32/aarch64) if not already known and initialises the context for entry
@@ -149,136 +92,47 @@
  ******************************************************************************/
 int32_t opteed_setup(void)
 {
-	entry_point_info_t *ep_info;
-	struct optee_header *header;
-	uint64_t mpidr = read_mpidr();
+	entry_point_info_t *optee_ep_info;
 	uint32_t linear_id;
-	uintptr_t init_load_addr;
-	size_t init_size;
-	size_t init_mem_usage;
-	uintptr_t payload_addr;
-	uintptr_t mem_limit;
-	uintptr_t paged_part;
-	uintptr_t paged_size;
+	uint64_t opteed_pageable_part;
+	uint64_t opteed_mem_limit;
+	uint64_t dt_addr;
 
-	linear_id = platform_get_core_pos(mpidr);
+	linear_id = plat_my_core_pos();
 
 	/*
 	 * Get information about the Secure Payload (BL32) image. Its
 	 * absence is a critical failure.  TODO: Add support to
 	 * conditionally include the SPD service
 	 */
-	ep_info = bl31_plat_get_next_image_ep_info(SECURE);
-	if (!ep_info) {
-		WARN("No OPTEE provided by BL2 boot loader.\n");
-		goto err;
+	optee_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+	if (!optee_ep_info) {
+		WARN("No OPTEE provided by BL2 boot loader, Booting device"
+			" without OPTEE initialization. SMC`s destined for OPTEE"
+			" will return SMC_UNK\n");
+		return 1;
 	}
 
-	header = (struct optee_header *)ep_info->pc;
-
-	if (header->magic != OPTEE_MAGIC || header->version != OPTEE_VERSION) {
-		WARN("Invalid OPTEE header.\n");
-		goto err;
-	}
-
-	if (header->arch == OPTEE_ARCH_ARM32)
-		opteed_rw = OPTEE_AARCH32;
-	else if (header->arch == OPTEE_ARCH_ARM64)
-		opteed_rw = OPTEE_AARCH64;
-	else {
-		WARN("Invalid OPTEE architecture (%d)\n", header->arch);
-		goto err;
-	}
-
-	init_load_addr = ((uint64_t)header->init_load_addr_hi << 32) |
-				header->init_load_addr_lo;
-	init_size = header->init_size;
-	init_mem_usage = header->init_mem_usage;
-	payload_addr = (uintptr_t)(header + 1);
-	paged_size = header->paged_size;
-
 	/*
-	 * Move OPTEE binary to the required location in memory.
-	 *
-	 * There's two ways OPTEE can be running in memory:
-	 * 1. A memory large enough to keep the entire OPTEE binary
-	 *    (DRAM currently)
-	 * 2. A part of OPTEE in a smaller (and more secure) memory
-	 *    (SRAM currently). This is achieved with demand paging
-	 *    of read-only data/code against a backing store in some
-	 *    larger memory (DRAM currently).
-	 *
-	 * In either case dictates init_load_addr in the OPTEE
-	 * header the address where what's after the header
-	 * (payload) should be residing when started. init_size in
-	 * the header tells how much of the payload that need to be
-	 * copied. init_mem_usage tells how much runtime memory in
-	 * total is needed by OPTEE.
-	 *
-	 * In alternative 2 there's additional data after
-	 * init_size, this is the rest of OPTEE which is demand
-	 * paged into memory.  A pointer to that data is supplied
-	 * to OPTEE when initializing.
-	 *
-	 * Alternative 1 only uses DRAM when executing OPTEE while
-	 * alternative 2 uses both SRAM and DRAM to execute.
-	 *
-	 * All data written which is later read by OPTEE must be flushed
-	 * out to memory since OPTEE starts with MMU turned off and caches
-	 * disabled.
+	 * If there's no valid entry point for SP, we return a non-zero value
+	 * signalling failure initializing the service. We bail out without
+	 * registering any handlers
 	 */
-	if (is_mem_free(BL32_SRAM_BASE,
-			 BL32_SRAM_LIMIT - BL32_SRAM_BASE,
-			 init_load_addr, init_mem_usage)) {
-		/* Running in SRAM, paging some code against DRAM */
-		memcpy((void *)init_load_addr, (void *)payload_addr,
-		       init_size);
-		flush_dcache_range(init_load_addr, init_size);
-		paged_part = payload_addr + init_size;
-		mem_limit = BL32_SRAM_LIMIT;
-	} else if (is_mem_free(BL32_DRAM_BASE,
-			       BL32_DRAM_LIMIT - BL32_DRAM_BASE,
-			       init_load_addr, init_mem_usage)) {
-		/*
-		 * Running in DRAM.
-		 *
-		 * The paged part normally empty, but if it isn't,
-		 * move it to the end of DRAM before moving the
-		 * init part in place.
-		 */
-		paged_part = BL32_DRAM_LIMIT - paged_size;
-		if (paged_size) {
-			if (!is_mem_free(BL32_DRAM_BASE,
-					 BL32_DRAM_LIMIT - BL32_DRAM_BASE,
-					 init_load_addr,
-					 init_mem_usage + paged_size)) {
-				WARN("Failed to reserve memory 0x%lx - 0x%lx\n",
-				      init_load_addr,
-				      init_load_addr + init_mem_usage +
-					paged_size);
-				goto err;
-			}
+	if (!optee_ep_info->pc)
+		return 1;
 
-			memcpy((void *)paged_part,
-				(void *)(payload_addr + init_size),
-				paged_size);
-			flush_dcache_range(paged_part, paged_size);
-		}
+	opteed_rw = optee_ep_info->args.arg0;
+	opteed_pageable_part = optee_ep_info->args.arg1;
+	opteed_mem_limit = optee_ep_info->args.arg2;
+	dt_addr = optee_ep_info->args.arg3;
 
-		memmove((void *)init_load_addr, (void *)payload_addr,
-			init_size);
-		flush_dcache_range(init_load_addr, init_size);
-		mem_limit = BL32_DRAM_LIMIT;
-	} else {
-		WARN("Failed to reserve memory 0x%lx - 0x%lx\n",
-			init_load_addr, init_load_addr + init_mem_usage);
-		goto err;
-	}
-
-
-	opteed_init_optee_ep_state(ep_info, opteed_rw, init_load_addr,
-				   paged_part, mem_limit,
-				   &opteed_sp_context[linear_id]);
+	opteed_init_optee_ep_state(optee_ep_info,
+				opteed_rw,
+				optee_ep_info->pc,
+				opteed_pageable_part,
+				opteed_mem_limit,
+				dt_addr,
+				&opteed_sp_context[linear_id]);
 
 	/*
 	 * All OPTEED initialization done. Now register our init function with
@@ -287,11 +141,6 @@
 	bl31_register_bl32_init(&opteed_init);
 
 	return 0;
-
-err:
-	WARN("Booting device without OPTEE initialization.\n");
-	WARN("SMC`s destined for OPTEE will return SMC_UNK\n");
-	return 1;
 }
 
 /*******************************************************************************
@@ -305,8 +154,7 @@
  ******************************************************************************/
 static int32_t opteed_init(void)
 {
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 	optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
 	entry_point_info_t *optee_entry_point;
 	uint64_t rc;
@@ -318,7 +166,7 @@
 	optee_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
 	assert(optee_entry_point);
 
-	cm_init_context(mpidr, optee_entry_point);
+	cm_init_my_context(optee_entry_point);
 
 	/*
 	 * Arrange for an entry into OPTEE. It will be returned via
@@ -349,8 +197,7 @@
 			 uint64_t flags)
 {
 	cpu_context_t *ns_cpu_context;
-	unsigned long mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 	optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
 	uint64_t rc;
 
@@ -359,9 +206,6 @@
 	 */
 
 	if (is_caller_non_secure(flags)) {
-		gp_regs_t *sec_gpregs = get_gpregs_ctx(&optee_ctx->cpu_ctx);
-		gp_regs_t *ns_gpregs = get_gpregs_ctx(handle);
-
 		/*
 		 * This is a fresh request from the non-secure client.
 		 * The parameters are in x1 and x2. Figure out which
@@ -395,21 +239,29 @@
 					&optee_vectors->fast_smc_entry);
 		} else {
 			cm_set_elr_el3(SECURE, (uint64_t)
-					&optee_vectors->std_smc_entry);
+					&optee_vectors->yield_smc_entry);
 		}
 
 		cm_el1_sysregs_context_restore(SECURE);
 		cm_set_next_eret_context(SECURE);
 
-		/* Propagate X4-X7 */
-		write_ctx_reg(sec_gpregs, CTX_GPREG_X4,
-			      read_ctx_reg(ns_gpregs, CTX_GPREG_X4));
-		write_ctx_reg(sec_gpregs, CTX_GPREG_X5,
-			      read_ctx_reg(ns_gpregs, CTX_GPREG_X5));
-		write_ctx_reg(sec_gpregs, CTX_GPREG_X6,
-			      read_ctx_reg(ns_gpregs, CTX_GPREG_X6));
-		write_ctx_reg(sec_gpregs, CTX_GPREG_X7,
-			      read_ctx_reg(ns_gpregs, CTX_GPREG_X7));
+		write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx),
+			      CTX_GPREG_X4,
+			      read_ctx_reg(get_gpregs_ctx(handle),
+					   CTX_GPREG_X4));
+		write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx),
+			      CTX_GPREG_X5,
+			      read_ctx_reg(get_gpregs_ctx(handle),
+					   CTX_GPREG_X5));
+		write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx),
+			      CTX_GPREG_X6,
+			      read_ctx_reg(get_gpregs_ctx(handle),
+					   CTX_GPREG_X6));
+		/* Propagate hypervisor client ID */
+		write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx),
+			      CTX_GPREG_X7,
+			      read_ctx_reg(get_gpregs_ctx(handle),
+					   CTX_GPREG_X7));
 
 		SMC_RET4(&optee_ctx->cpu_ctx, smc_fid, x1, x2, x3);
 	}
@@ -553,13 +405,13 @@
 	opteed_smc_handler
 );
 
-/* Define an OPTEED runtime service descriptor for standard SMC calls */
+/* Define an OPTEED runtime service descriptor for yielding SMC calls */
 DECLARE_RT_SVC(
 	opteed_std,
 
 	OEN_TOS_START,
 	OEN_TOS_END,
-	SMC_TYPE_STD,
+	SMC_TYPE_YIELD,
 	NULL,
 	opteed_smc_handler
 );
diff --git a/services/spd/opteed/opteed_pm.c b/services/spd/opteed/opteed_pm.c
index bce0d2f..2420b1e 100644
--- a/services/spd/opteed/opteed_pm.c
+++ b/services/spd/opteed/opteed_pm.c
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch_helpers.h>
@@ -51,8 +27,7 @@
 static int32_t opteed_cpu_off_handler(uint64_t unused)
 {
 	int32_t rc = 0;
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 	optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
 
 	assert(optee_vectors);
@@ -82,11 +57,10 @@
  * This cpu is being suspended. S-EL1 state must have been saved in the
  * resident cpu (mpidr format) if it is a UP/UP migratable OPTEE.
  ******************************************************************************/
-static void opteed_cpu_suspend_handler(uint64_t unused)
+static void opteed_cpu_suspend_handler(uint64_t max_off_pwrlvl)
 {
 	int32_t rc = 0;
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 	optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
 
 	assert(optee_vectors);
@@ -116,8 +90,7 @@
 static void opteed_cpu_on_finish_handler(uint64_t unused)
 {
 	int32_t rc = 0;
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 	optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
 	entry_point_info_t optee_on_entrypoint;
 
@@ -126,10 +99,10 @@
 
 	opteed_init_optee_ep_state(&optee_on_entrypoint, opteed_rw,
 				(uint64_t)&optee_vectors->cpu_on_entry,
-				0, 0, optee_ctx);
+				0, 0, 0, optee_ctx);
 
 	/* Initialise this cpu's secure context */
-	cm_init_context(mpidr, &optee_on_entrypoint);
+	cm_init_my_context(&optee_on_entrypoint);
 
 	/* Enter OPTEE */
 	rc = opteed_synchronous_sp_entry(optee_ctx);
@@ -150,20 +123,19 @@
  * completed the preceding suspend call. Use that context to program an entry
  * into OPTEE to allow it to do any remaining book keeping
  ******************************************************************************/
-static void opteed_cpu_suspend_finish_handler(uint64_t suspend_level)
+static void opteed_cpu_suspend_finish_handler(uint64_t max_off_pwrlvl)
 {
 	int32_t rc = 0;
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 	optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
 
 	assert(optee_vectors);
 	assert(get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_SUSPEND);
 
-	/* Program the entry point, suspend_level and enter the SP */
+	/* Program the entry point, max_off_pwrlvl and enter the SP */
 	write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx),
 		      CTX_GPREG_X0,
-		      suspend_level);
+		      max_off_pwrlvl);
 	cm_set_elr_el3(SECURE, (uint64_t) &optee_vectors->cpu_resume_entry);
 	rc = opteed_synchronous_sp_entry(optee_ctx);
 
@@ -193,8 +165,7 @@
  ******************************************************************************/
 static void opteed_system_off(void)
 {
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 	optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
 
 	assert(optee_vectors);
@@ -214,8 +185,7 @@
  ******************************************************************************/
 static void opteed_system_reset(void)
 {
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 	optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
 
 	assert(optee_vectors);
@@ -246,4 +216,3 @@
 	.svc_system_off = opteed_system_off,
 	.svc_system_reset = opteed_system_reset,
 };
-
diff --git a/services/spd/opteed/opteed_private.h b/services/spd/opteed/opteed_private.h
index 72fd97a..6cda2c8 100644
--- a/services/spd/opteed/opteed_private.h
+++ b/services/spd/opteed/opteed_private.h
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __OPTEED_PRIVATE_H__
@@ -110,7 +86,7 @@
 typedef uint32_t optee_vector_isn_t;
 
 typedef struct optee_vectors {
-	optee_vector_isn_t std_smc_entry;
+	optee_vector_isn_t yield_smc_entry;
 	optee_vector_isn_t fast_smc_entry;
 	optee_vector_isn_t cpu_on_entry;
 	optee_vector_isn_t cpu_off_entry;
@@ -169,8 +145,11 @@
 uint64_t opteed_synchronous_sp_entry(optee_context_t *optee_ctx);
 void __dead2 opteed_synchronous_sp_exit(optee_context_t *optee_ctx, uint64_t ret);
 void opteed_init_optee_ep_state(struct entry_point_info *optee_ep,
-				uint32_t rw, uint64_t pc,
-				uint64_t paged_part, uint64_t mem_limit,
+				uint32_t rw,
+				uint64_t pc,
+				uint64_t pageable_part,
+				uint64_t mem_limit,
+				uint64_t dt_addr,
 				optee_context_t *optee_ctx);
 
 extern optee_context_t opteed_sp_context[OPTEED_CORE_COUNT];
diff --git a/services/spd/opteed/teesmc_opteed.h b/services/spd/opteed/teesmc_opteed.h
index 7968d1f..71b8d71 100644
--- a/services/spd/opteed/teesmc_opteed.h
+++ b/services/spd/opteed/teesmc_opteed.h
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 /* Copyright (c) 2014, Linaro Limited. All rights reserved. */
diff --git a/services/spd/opteed/teesmc_opteed_macros.h b/services/spd/opteed/teesmc_opteed_macros.h
index 2453c9a..14f8a2d 100644
--- a/services/spd/opteed/teesmc_opteed_macros.h
+++ b/services/spd/opteed/teesmc_opteed_macros.h
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 #ifndef __TEESMC_OPTEED_MACROS_H__
 #define __TEESMC_OPTEED_MACROS_H__
diff --git a/services/spd/tlkd/tlkd.mk b/services/spd/tlkd/tlkd.mk
new file mode 100644
index 0000000..3010035
--- /dev/null
+++ b/services/spd/tlkd/tlkd.mk
@@ -0,0 +1,12 @@
+#
+# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+SPD_INCLUDES		:=	-Iinclude/bl32/payloads
+
+SPD_SOURCES		:=	services/spd/tlkd/tlkd_common.c		\
+				services/spd/tlkd/tlkd_helpers.S	\
+				services/spd/tlkd/tlkd_main.c		\
+				services/spd/tlkd/tlkd_pm.c
diff --git a/services/spd/tlkd/tlkd_common.c b/services/spd/tlkd/tlkd_common.c
new file mode 100644
index 0000000..599d7a3
--- /dev/null
+++ b/services/spd/tlkd/tlkd_common.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context_mgmt.h>
+#include <string.h>
+#include "tlkd_private.h"
+
+#define AT_MASK		3
+
+/*******************************************************************************
+ * This function helps the SP to translate NS/S virtual addresses.
+ ******************************************************************************/
+uint64_t tlkd_va_translate(uintptr_t va, int type)
+{
+	uint64_t pa;
+
+	if (type & TLK_TRANSLATE_NS_VADDR) {
+
+		/* save secure context */
+		cm_el1_sysregs_context_save(SECURE);
+
+		/* restore non-secure context */
+		cm_el1_sysregs_context_restore(NON_SECURE);
+
+		/* switch NS bit to start using 64-bit, non-secure mappings */
+		write_scr(cm_get_scr_el3(NON_SECURE));
+		isb();
+	}
+
+	int at = type & AT_MASK;
+	switch (at) {
+	case 0:
+		ats12e1r(va);
+		break;
+	case 1:
+		ats12e1w(va);
+		break;
+	case 2:
+		ats12e0r(va);
+		break;
+	case 3:
+		ats12e0w(va);
+		break;
+	default:
+		assert(0);
+	}
+
+	/* get the (NS/S) physical address */
+	isb();
+	pa = read_par_el1();
+
+	/* Restore secure state */
+	if (type & TLK_TRANSLATE_NS_VADDR) {
+
+		/* restore secure context */
+		cm_el1_sysregs_context_restore(SECURE);
+
+		/* switch NS bit to start using 32-bit, secure mappings */
+		write_scr(cm_get_scr_el3(SECURE));
+		isb();
+	}
+
+	return pa;
+}
+
+/*******************************************************************************
+ * Given a secure payload entrypoint, register width, cpu id & pointer to a
+ * context data structure, this function will create a secure context ready for
+ * programming an entry into the secure payload.
+ ******************************************************************************/
+void tlkd_init_tlk_ep_state(struct entry_point_info *tlk_entry_point,
+			    uint32_t rw,
+			    uint64_t pc,
+			    tlk_context_t *tlk_ctx)
+{
+	uint32_t ep_attr, spsr;
+
+	/* Passing a NULL context is a critical programming error */
+	assert(tlk_ctx);
+	assert(tlk_entry_point);
+	assert(pc);
+
+	/* Associate this context with the cpu specified */
+	tlk_ctx->mpidr = read_mpidr_el1();
+	clr_yield_smc_active_flag(tlk_ctx->state);
+	cm_set_context(&tlk_ctx->cpu_ctx, SECURE);
+
+	if (rw == SP_AARCH64)
+		spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+	else
+		spsr = SPSR_MODE32(MODE32_svc,
+				   SPSR_T_ARM,
+				   read_sctlr_el3() & SCTLR_EE_BIT,
+				   DISABLE_ALL_EXCEPTIONS);
+
+	/* initialise an entrypoint to set up the CPU context */
+	ep_attr = SECURE | EP_ST_ENABLE;
+	if (read_sctlr_el3() & SCTLR_EE_BIT)
+		ep_attr |= EP_EE_BIG;
+	SET_PARAM_HEAD(tlk_entry_point, PARAM_EP, VERSION_1, ep_attr);
+
+	tlk_entry_point->pc = pc;
+	tlk_entry_point->spsr = spsr;
+}
+
+/*******************************************************************************
+ * This function takes a TLK context pointer and:
+ * 1. Applies the S-EL1 system register context from tlk_ctx->cpu_ctx.
+ * 2. Saves the current C runtime state (callee saved registers) on the stack
+ *    frame and saves a reference to this state.
+ * 3. Calls el3_exit() so that the EL3 system and general purpose registers
+ *    from the tlk_ctx->cpu_ctx are used to enter the secure payload image.
+ ******************************************************************************/
+uint64_t tlkd_synchronous_sp_entry(tlk_context_t *tlk_ctx)
+{
+	uint64_t rc;
+
+	/* Passing a NULL context is a critical programming error */
+	assert(tlk_ctx);
+	assert(tlk_ctx->c_rt_ctx == 0);
+
+	/* Apply the Secure EL1 system register context and switch to it */
+	assert(cm_get_context(SECURE) == &tlk_ctx->cpu_ctx);
+	cm_el1_sysregs_context_restore(SECURE);
+	cm_set_next_eret_context(SECURE);
+
+	rc = tlkd_enter_sp(&tlk_ctx->c_rt_ctx);
+#if DEBUG
+	tlk_ctx->c_rt_ctx = 0;
+#endif
+
+	return rc;
+}
+
+/*******************************************************************************
+ * This function takes a TLK context pointer and:
+ * 1. Saves the S-EL1 system register context to tlk_ctx->cpu_ctx.
+ * 2. Restores the current C runtime state (callee saved registers) from the
+ *    stack frame using reference to this state saved in tlkd_enter_sp().
+ * 3. It does not need to save any general purpose or EL3 system register state
+ *    as the generic smc entry routine should have saved those.
+ ******************************************************************************/
+void tlkd_synchronous_sp_exit(tlk_context_t *tlk_ctx, uint64_t ret)
+{
+	/* Passing a NULL context is a critical programming error */
+	assert(tlk_ctx);
+
+	/* Save the Secure EL1 system register context */
+	assert(cm_get_context(SECURE) == &tlk_ctx->cpu_ctx);
+	cm_el1_sysregs_context_save(SECURE);
+
+	assert(tlk_ctx->c_rt_ctx != 0);
+	tlkd_exit_sp(tlk_ctx->c_rt_ctx, ret);
+
+	/* Should never reach here */
+	assert(0);
+}
diff --git a/services/spd/tlkd/tlkd_helpers.S b/services/spd/tlkd/tlkd_helpers.S
new file mode 100644
index 0000000..6e616a6
--- /dev/null
+++ b/services/spd/tlkd/tlkd_helpers.S
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include "tlkd_private.h"
+
+	.global tlkd_enter_sp
+	.global tlkd_exit_sp
+
+	/* ---------------------------------------------
+	 * This function is called with SP_EL0 as stack.
+	 * Here we stash our EL3 callee-saved registers
+	 * on to the stack as a part of saving the C
+	 * runtime and enter the secure payload.
+	 * 'x0' contains a pointer to the memory where
+	 * the address of the C runtime context is to be
+	 * saved.
+	 * ---------------------------------------------
+	 */
+func tlkd_enter_sp
+	/* Make space for the registers that we're going to save */
+	mov	x3, sp
+	str	x3, [x0, #0]
+	sub	sp, sp, #TLKD_C_RT_CTX_SIZE
+
+	/* Save callee-saved registers on to the stack */
+	stp	x19, x20, [sp, #TLKD_C_RT_CTX_X19]
+	stp	x21, x22, [sp, #TLKD_C_RT_CTX_X21]
+	stp	x23, x24, [sp, #TLKD_C_RT_CTX_X23]
+	stp	x25, x26, [sp, #TLKD_C_RT_CTX_X25]
+	stp	x27, x28, [sp, #TLKD_C_RT_CTX_X27]
+	stp	x29, x30, [sp, #TLKD_C_RT_CTX_X29]
+
+	/* ----------------------------------------------
+	 * Everything is setup now. el3_exit() will
+	 * use the secure context to restore to the
+	 * general purpose and EL3 system registers to
+	 * ERET into the secure payload.
+	 * ----------------------------------------------
+	 */
+	b	el3_exit
+endfunc tlkd_enter_sp
+
+	/* ----------------------------------------------
+	 * This function is called with 'x0' pointing to
+	 * a C runtime context saved in tlkd_enter_sp().
+	 * It restores the saved registers and jumps to
+	 * that runtime with 'x0' as the new sp. This
+	 * destroys the C runtime context that had been
+	 * built on the stack below the saved context by
+	 * the caller. Later the second parameter 'x1'
+	 * is passed as return value to the caller
+	 * ----------------------------------------------
+	 */
+func tlkd_exit_sp
+	/* Restore the previous stack */
+	mov	sp, x0
+
+	/* Restore callee-saved registers on to the stack */
+	ldp	x19, x20, [x0, #(TLKD_C_RT_CTX_X19 - TLKD_C_RT_CTX_SIZE)]
+	ldp	x21, x22, [x0, #(TLKD_C_RT_CTX_X21 - TLKD_C_RT_CTX_SIZE)]
+	ldp	x23, x24, [x0, #(TLKD_C_RT_CTX_X23 - TLKD_C_RT_CTX_SIZE)]
+	ldp	x25, x26, [x0, #(TLKD_C_RT_CTX_X25 - TLKD_C_RT_CTX_SIZE)]
+	ldp	x27, x28, [x0, #(TLKD_C_RT_CTX_X27 - TLKD_C_RT_CTX_SIZE)]
+	ldp	x29, x30, [x0, #(TLKD_C_RT_CTX_X29 - TLKD_C_RT_CTX_SIZE)]
+
+	/* ------------------------------------------------
+	 * This should take us back to the instruction
+	 * after the call to the last tlkd_enter_sp().
+	 * Place the second parameter to x0 so that the
+	 * caller will see it as a return value from the
+	 * original entry call
+	 * ------------------------------------------------
+	 */
+	mov	x0, x1
+	ret
+endfunc tlkd_exit_sp
diff --git a/services/spd/tlkd/tlkd_main.c b/services/spd/tlkd/tlkd_main.c
new file mode 100644
index 0000000..78e9853
--- /dev/null
+++ b/services/spd/tlkd/tlkd_main.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*******************************************************************************
+ * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a
+ * plug-in component to the Secure Monitor, registered as a runtime service. The
+ * SPD is expected to be a functional extension of the Secure Payload (SP) that
+ * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting
+ * the Trusted OS/Applications range to the dispatcher. The SPD will either
+ * handle the request locally or delegate it to the Secure Payload. It is also
+ * responsible for initialising and maintaining communication with the SP.
+ ******************************************************************************/
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl31.h>
+#include <bl_common.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <errno.h>
+#include <platform.h>
+#include <runtime_svc.h>
+#include <stddef.h>
+#include <tlk.h>
+#include <uuid.h>
+#include "tlkd_private.h"
+
+extern const spd_pm_ops_t tlkd_pm_ops;
+
+/*******************************************************************************
+ * Per-cpu Secure Payload state
+ ******************************************************************************/
+tlk_context_t tlk_ctx;
+
+/*******************************************************************************
+ * CPU number on which TLK booted up
+ ******************************************************************************/
+static uint32_t boot_cpu;
+
+/* TLK UID: RFC-4122 compliant UUID (version-5, sha-1) */
+DEFINE_SVC_UUID(tlk_uuid,
+		0xbd11e9c9, 0x2bba, 0x52ee, 0xb1, 0x72,
+		0x46, 0x1f, 0xba, 0x97, 0x7f, 0x63);
+
+int32_t tlkd_init(void);
+
+/*******************************************************************************
+ * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type
+ * (aarch32/aarch64) if not already known and initialises the context for entry
+ * into the SP for its initialisation.
+ ******************************************************************************/
+int32_t tlkd_setup(void)
+{
+	entry_point_info_t *tlk_ep_info;
+
+	/*
+	 * Get information about the Secure Payload (BL32) image. Its
+	 * absence is a critical failure.
+	 */
+	tlk_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+	if (!tlk_ep_info) {
+		WARN("No SP provided. Booting device without SP"
+			" initialization. SMC`s destined for SP"
+			" will return SMC_UNK\n");
+		return 1;
+	}
+
+	/*
+	 * If there's no valid entry point for SP, we return a non-zero value
+	 * signalling failure initializing the service. We bail out without
+	 * registering any handlers
+	 */
+	if (!tlk_ep_info->pc)
+		return 1;
+
+	/*
+	 * Inspect the SP image's SPSR and determine it's execution state
+	 * i.e whether AArch32 or AArch64.
+	 */
+	tlkd_init_tlk_ep_state(tlk_ep_info,
+		(tlk_ep_info->spsr >> MODE_RW_SHIFT) & MODE_RW_MASK,
+		tlk_ep_info->pc,
+		&tlk_ctx);
+
+	/*
+	 * All TLK SPD initialization done. Now register our init function
+	 * with BL31 for deferred invocation
+	 */
+	bl31_register_bl32_init(&tlkd_init);
+
+	return 0;
+}
+
+/*******************************************************************************
+ * This function passes control to the Secure Payload image (BL32) for the first
+ * time on the primary cpu after a cold boot. It assumes that a valid secure
+ * context has already been created by tlkd_setup() which can be directly
+ * used. This function performs a synchronous entry into the Secure payload.
+ * The SP passes control back to this routine through a SMC.
+ ******************************************************************************/
+int32_t tlkd_init(void)
+{
+	entry_point_info_t *tlk_entry_point;
+
+	/*
+	 * Get information about the Secure Payload (BL32) image. Its
+	 * absence is a critical failure.
+	 */
+	tlk_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
+	assert(tlk_entry_point);
+
+	cm_init_my_context(tlk_entry_point);
+
+	/*
+	 * TLK runs only on a single CPU. Store the value of the boot
+	 * CPU for sanity checking later.
+	 */
+	boot_cpu = plat_my_core_pos();
+
+	/*
+	 * Arrange for an entry into the test secure payload.
+	 */
+	return tlkd_synchronous_sp_entry(&tlk_ctx);
+}
+
+/*******************************************************************************
+ * This function is responsible for handling all SMCs in the Trusted OS/App
+ * range from the non-secure state as defined in the SMC Calling Convention
+ * Document. It is also responsible for communicating with the Secure payload
+ * to delegate work and return results back to the non-secure state. Lastly it
+ * will also return any information that the secure payload needs to do the
+ * work assigned to it.
+ ******************************************************************************/
+uint64_t tlkd_smc_handler(uint32_t smc_fid,
+			 uint64_t x1,
+			 uint64_t x2,
+			 uint64_t x3,
+			 uint64_t x4,
+			 void *cookie,
+			 void *handle,
+			 uint64_t flags)
+{
+	cpu_context_t *ns_cpu_context;
+	gp_regs_t *gp_regs;
+	uint32_t ns;
+	uint64_t par;
+
+	/* Passing a NULL context is a critical programming error */
+	assert(handle);
+
+	/* These SMCs are only supported by a single CPU */
+	if (boot_cpu != plat_my_core_pos())
+		SMC_RET1(handle, SMC_UNK);
+
+	/* Determine which security state this SMC originated from */
+	ns = is_caller_non_secure(flags);
+
+	switch (smc_fid) {
+
+	/*
+	 * This function ID is used by SP to indicate that it was
+	 * preempted by a non-secure world IRQ.
+	 */
+	case TLK_PREEMPTED:
+
+		if (ns)
+			SMC_RET1(handle, SMC_UNK);
+
+		assert(handle == cm_get_context(SECURE));
+		cm_el1_sysregs_context_save(SECURE);
+
+		/* Get a reference to the non-secure context */
+		ns_cpu_context = cm_get_context(NON_SECURE);
+		assert(ns_cpu_context);
+
+		/*
+		 * Restore non-secure state. There is no need to save the
+		 * secure system register context since the SP was supposed
+		 * to preserve it during S-EL1 interrupt handling.
+		 */
+		cm_el1_sysregs_context_restore(NON_SECURE);
+		cm_set_next_eret_context(NON_SECURE);
+
+		SMC_RET1(ns_cpu_context, x1);
+
+	/*
+	 * This is a request from the non-secure context to:
+	 *
+	 * a. register shared memory with the SP for storing it's
+	 *    activity logs.
+	 * b. register shared memory with the SP for passing args
+	 *    required for maintaining sessions with the Trusted
+	 *    Applications.
+	 * c. open/close sessions
+	 * d. issue commands to the Trusted Apps
+	 * e. resume the preempted yielding SMC call.
+	 */
+	case TLK_REGISTER_LOGBUF:
+	case TLK_REGISTER_REQBUF:
+	case TLK_OPEN_TA_SESSION:
+	case TLK_CLOSE_TA_SESSION:
+	case TLK_TA_LAUNCH_OP:
+	case TLK_TA_SEND_EVENT:
+	case TLK_RESUME_FID:
+
+		if (!ns)
+			SMC_RET1(handle, SMC_UNK);
+
+		/*
+		 * This is a fresh request from the non-secure client.
+		 * The parameters are in x1 and x2. Figure out which
+		 * registers need to be preserved, save the non-secure
+		 * state and send the request to the secure payload.
+		 */
+		assert(handle == cm_get_context(NON_SECURE));
+
+		/*
+		 * Check if we are already processing a yielding SMC
+		 * call. Of all the supported fids, only the "resume"
+		 * fid expects the flag to be set.
+		 */
+		if (smc_fid == TLK_RESUME_FID) {
+			if (!get_yield_smc_active_flag(tlk_ctx.state))
+				SMC_RET1(handle, SMC_UNK);
+		} else {
+			if (get_yield_smc_active_flag(tlk_ctx.state))
+				SMC_RET1(handle, SMC_UNK);
+		}
+
+		cm_el1_sysregs_context_save(NON_SECURE);
+
+		/*
+		 * Verify if there is a valid context to use.
+		 */
+		assert(&tlk_ctx.cpu_ctx == cm_get_context(SECURE));
+
+		/*
+		 * Mark the SP state as active.
+		 */
+		set_yield_smc_active_flag(tlk_ctx.state);
+
+		/*
+		 * We are done stashing the non-secure context. Ask the
+		 * secure payload to do the work now.
+		 */
+		cm_el1_sysregs_context_restore(SECURE);
+		cm_set_next_eret_context(SECURE);
+
+		/*
+		 * TLK is a 32-bit Trusted OS and so expects the SMC
+		 * arguments via r0-r7. TLK expects the monitor frame
+		 * registers to be 64-bits long. Hence, we pass x0 in
+		 * r0-r1, x1 in r2-r3, x3 in r4-r5 and x4 in r6-r7.
+		 *
+		 * As smc_fid is a uint32 value, r1 contains 0.
+		 */
+		gp_regs = get_gpregs_ctx(&tlk_ctx.cpu_ctx);
+		write_ctx_reg(gp_regs, CTX_GPREG_X4, (uint32_t)x2);
+		write_ctx_reg(gp_regs, CTX_GPREG_X5, (uint32_t)(x2 >> 32));
+		write_ctx_reg(gp_regs, CTX_GPREG_X6, (uint32_t)x3);
+		write_ctx_reg(gp_regs, CTX_GPREG_X7, (uint32_t)(x3 >> 32));
+		SMC_RET4(&tlk_ctx.cpu_ctx, smc_fid, 0, (uint32_t)x1,
+			(uint32_t)(x1 >> 32));
+
+	/*
+	 * Translate NS/EL1-S virtual addresses.
+	 *
+	 * x1 = virtual address
+	 * x3 = type (NS/S)
+	 *
+	 * Returns PA:lo in r0, PA:hi in r1.
+	 */
+	case TLK_VA_TRANSLATE:
+
+		/* Should be invoked only by secure world */
+		if (ns)
+			SMC_RET1(handle, SMC_UNK);
+
+		/* NS virtual addresses are 64-bit long */
+		if (x3 & TLK_TRANSLATE_NS_VADDR)
+			x1 = (uint32_t)x1 | (x2 << 32);
+
+		if (!x1)
+			SMC_RET1(handle, SMC_UNK);
+
+		/*
+		 * TODO: Sanity check x1. This would require platform
+		 * support.
+		 */
+
+		/* virtual address and type: ns/s */
+		par = tlkd_va_translate(x1, x3);
+
+		/* return physical address in r0-r1 */
+		SMC_RET4(handle, (uint32_t)par, (uint32_t)(par >> 32), 0, 0);
+
+	/*
+	 * This is a request from the SP to mark completion of
+	 * a yielding function ID.
+	 */
+	case TLK_REQUEST_DONE:
+		if (ns)
+			SMC_RET1(handle, SMC_UNK);
+
+		/*
+		 * Mark the SP state as inactive.
+		 */
+		clr_yield_smc_active_flag(tlk_ctx.state);
+
+		/* Get a reference to the non-secure context */
+		ns_cpu_context = cm_get_context(NON_SECURE);
+		assert(ns_cpu_context);
+
+		/*
+		 * This is a request completion SMC and we must switch to
+		 * the non-secure world to pass the result.
+		 */
+		cm_el1_sysregs_context_save(SECURE);
+
+		/*
+		 * We are done stashing the secure context. Switch to the
+		 * non-secure context and return the result.
+		 */
+		cm_el1_sysregs_context_restore(NON_SECURE);
+		cm_set_next_eret_context(NON_SECURE);
+		SMC_RET1(ns_cpu_context, x1);
+
+	/*
+	 * This function ID is used only by the SP to indicate it has
+	 * finished initialising itself after a cold boot
+	 */
+	case TLK_ENTRY_DONE:
+		if (ns)
+			SMC_RET1(handle, SMC_UNK);
+
+		/*
+		 * SP has been successfully initialized. Register power
+		 * managemnt hooks with PSCI
+		 */
+		psci_register_spd_pm_hook(&tlkd_pm_ops);
+
+		/*
+		 * TLK reports completion. The SPD must have initiated
+		 * the original request through a synchronous entry
+		 * into the SP. Jump back to the original C runtime
+		 * context.
+		 */
+		tlkd_synchronous_sp_exit(&tlk_ctx, x1);
+
+	/*
+	 * These function IDs are used only by TLK to indicate it has
+	 * finished:
+	 * 1. suspending itself after an earlier psci cpu_suspend
+	 *    request.
+	 * 2. resuming itself after an earlier psci cpu_suspend
+	 *    request.
+	 * 3. powering down after an earlier psci system_off/system_reset
+	 *    request.
+	 */
+	case TLK_SUSPEND_DONE:
+	case TLK_RESUME_DONE:
+	case TLK_SYSTEM_OFF_DONE:
+
+		if (ns)
+			SMC_RET1(handle, SMC_UNK);
+
+		/*
+		 * TLK reports completion. TLKD must have initiated the
+		 * original request through a synchronous entry into the SP.
+		 * Jump back to the original C runtime context, and pass x1 as
+		 * return value to the caller
+		 */
+		tlkd_synchronous_sp_exit(&tlk_ctx, x1);
+
+	/*
+	 * Return the number of service function IDs implemented to
+	 * provide service to non-secure
+	 */
+	case TOS_CALL_COUNT:
+		SMC_RET1(handle, TLK_NUM_FID);
+
+	/*
+	 * Return TLK's UID to the caller
+	 */
+	case TOS_UID:
+		SMC_UUID_RET(handle, tlk_uuid);
+
+	/*
+	 * Return the version of current implementation
+	 */
+	case TOS_CALL_VERSION:
+		SMC_RET2(handle, TLK_VERSION_MAJOR, TLK_VERSION_MINOR);
+
+	default:
+		break;
+	}
+
+	SMC_RET1(handle, SMC_UNK);
+}
+
+/* Define a SPD runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+	tlkd_tos_fast,
+
+	OEN_TOS_START,
+	OEN_TOS_END,
+	SMC_TYPE_FAST,
+	tlkd_setup,
+	tlkd_smc_handler
+);
+
+/* Define a SPD runtime service descriptor for yielding SMC calls */
+DECLARE_RT_SVC(
+	tlkd_tos_std,
+
+	OEN_TOS_START,
+	OEN_TOS_END,
+	SMC_TYPE_YIELD,
+	NULL,
+	tlkd_smc_handler
+);
+
+/* Define a SPD runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+	tlkd_tap_fast,
+
+	OEN_TAP_START,
+	OEN_TAP_END,
+	SMC_TYPE_FAST,
+	NULL,
+	tlkd_smc_handler
+);
+
+/* Define a SPD runtime service descriptor for yielding SMC calls */
+DECLARE_RT_SVC(
+	tlkd_tap_std,
+
+	OEN_TAP_START,
+	OEN_TAP_END,
+	SMC_TYPE_YIELD,
+	NULL,
+	tlkd_smc_handler
+);
diff --git a/services/spd/tlkd/tlkd_pm.c b/services/spd/tlkd/tlkd_pm.c
new file mode 100644
index 0000000..8b4c457
--- /dev/null
+++ b/services/spd/tlkd/tlkd_pm.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <psci.h>
+#include <tlk.h>
+
+#include "tlkd_private.h"
+
+extern tlk_context_t tlk_ctx;
+
+#define MPIDR_CPU0	0x80000000
+
+/*******************************************************************************
+ * Return the type of payload TLKD is dealing with. Report the current
+ * resident cpu (mpidr format) if it is a UP/UP migratable payload.
+ ******************************************************************************/
+static int32_t cpu_migrate_info(uint64_t *resident_cpu)
+{
+	/* the payload runs only on CPU0 */
+	*resident_cpu = MPIDR_CPU0;
+
+	/* Uniprocessor, not migrate capable payload */
+	return PSCI_TOS_NOT_UP_MIG_CAP;
+}
+
+/*******************************************************************************
+ * This cpu is being suspended. Inform TLK of the SYSTEM_SUSPEND event, so
+ * that it can pass this information to its Trusted Apps.
+ ******************************************************************************/
+static void cpu_suspend_handler(uint64_t suspend_level)
+{
+	gp_regs_t *gp_regs;
+	int cpu = read_mpidr() & MPIDR_CPU_MASK;
+	int32_t rc = 0;
+
+	/*
+	 * TLK runs only on CPU0 and suspends its Trusted Apps during
+	 * SYSTEM_SUSPEND. It has no role to play during CPU_SUSPEND.
+	 */
+	if ((cpu != 0) || (suspend_level != PLAT_MAX_PWR_LVL))
+		return;
+
+	/* pass system suspend event to TLK */
+	gp_regs = get_gpregs_ctx(&tlk_ctx.cpu_ctx);
+	write_ctx_reg(gp_regs, CTX_GPREG_X0, TLK_SYSTEM_SUSPEND);
+
+	/* Program the entry point and enter TLK */
+	rc = tlkd_synchronous_sp_entry(&tlk_ctx);
+
+	/*
+	 * Read the response from TLK. A non-zero return means that
+	 * something went wrong while communicating with it.
+	 */
+	if (rc != 0)
+		panic();
+}
+
+/*******************************************************************************
+ * This cpu is being resumed. Inform TLK of the SYSTEM_SUSPEND exit, so
+ * that it can pass this information to its Trusted Apps.
+ ******************************************************************************/
+static void cpu_resume_handler(uint64_t suspend_level)
+{
+	gp_regs_t *gp_regs;
+	int cpu = read_mpidr() & MPIDR_CPU_MASK;
+	int32_t rc = 0;
+
+	/*
+	 * TLK runs only on CPU0 and resumes its Trusted Apps during
+	 * SYSTEM_SUSPEND exit. It has no role to play during CPU_SUSPEND
+	 * exit.
+	 */
+	if ((cpu != 0) || (suspend_level != PLAT_MAX_PWR_LVL))
+		return;
+
+	/* pass system resume event to TLK */
+	gp_regs = get_gpregs_ctx(&tlk_ctx.cpu_ctx);
+	write_ctx_reg(gp_regs, CTX_GPREG_X0, TLK_SYSTEM_RESUME);
+
+	/* Program the entry point and enter TLK */
+	rc = tlkd_synchronous_sp_entry(&tlk_ctx);
+
+	/*
+	 * Read the response from TLK. A non-zero return means that
+	 * something went wrong while communicating with it.
+	 */
+	if (rc != 0)
+		panic();
+}
+
+/*******************************************************************************
+ * System is about to be reset. Inform the SP to allow any book-keeping
+ ******************************************************************************/
+static void system_off_handler(void)
+{
+	int cpu = read_mpidr() & MPIDR_CPU_MASK;
+	gp_regs_t *gp_regs;
+
+	/* TLK runs only on CPU0 */
+	if (cpu != 0)
+		return;
+
+	/* pass system off/reset events to TLK */
+	gp_regs = get_gpregs_ctx(&tlk_ctx.cpu_ctx);
+	write_ctx_reg(gp_regs, CTX_GPREG_X0, TLK_SYSTEM_OFF);
+
+	/*
+	 * Enter the SP. We do not care about the return value because we
+	 * must continue with the shutdown anyway.
+	 */
+	(void)tlkd_synchronous_sp_entry(&tlk_ctx);
+}
+
+/*******************************************************************************
+ * Structure populated by the Dispatcher to be given a chance to perform any
+ * bookkeeping before PSCI executes a power mgmt.  operation.
+ ******************************************************************************/
+const spd_pm_ops_t tlkd_pm_ops = {
+	.svc_migrate_info = cpu_migrate_info,
+	.svc_suspend = cpu_suspend_handler,
+	.svc_suspend_finish = cpu_resume_handler,
+	.svc_system_off = system_off_handler,
+	.svc_system_reset = system_off_handler
+};
diff --git a/services/spd/tlkd/tlkd_private.h b/services/spd/tlkd/tlkd_private.h
new file mode 100644
index 0000000..ba66098
--- /dev/null
+++ b/services/spd/tlkd/tlkd_private.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TLKD_PRIVATE_H__
+#define __TLKD_PRIVATE_H__
+
+#include <arch.h>
+#include <context.h>
+#include <interrupt_mgmt.h>
+#include <platform_def.h>
+#include <psci.h>
+
+/*
+ * This flag is used by the TLKD to determine if the SP is servicing a yielding
+ * SMC request prior to programming the next entry into the SP e.g. if SP
+ * execution is preempted by a non-secure interrupt and handed control to the
+ * normal world. If another request which is distinct from what the SP was
+ * previously doing arrives, then this flag will be help the TLKD to either
+ * reject the new request or service it while ensuring that the previous context
+ * is not corrupted.
+ */
+#define YIELD_SMC_ACTIVE_FLAG_SHIFT	2
+#define YIELD_SMC_ACTIVE_FLAG_MASK	1
+#define get_yield_smc_active_flag(state)				\
+			(((state) >> YIELD_SMC_ACTIVE_FLAG_SHIFT)	\
+			& YIELD_SMC_ACTIVE_FLAG_MASK)
+#define set_yield_smc_active_flag(state)	((state) |=		\
+					 (1 << YIELD_SMC_ACTIVE_FLAG_SHIFT))
+#define clr_yield_smc_active_flag(state)	((state) &=		\
+					 ~(YIELD_SMC_ACTIVE_FLAG_MASK	\
+					 << YIELD_SMC_ACTIVE_FLAG_SHIFT))
+
+/*******************************************************************************
+ * Translate virtual address received from the NS world
+ ******************************************************************************/
+#define TLK_TRANSLATE_NS_VADDR		4
+
+/*******************************************************************************
+ * Secure Payload execution state information i.e. aarch32 or aarch64
+ ******************************************************************************/
+#define SP_AARCH32		MODE_RW_32
+#define SP_AARCH64		MODE_RW_64
+
+/*******************************************************************************
+ * Number of cpus that the present on this platform. TODO: Rely on a topology
+ * tree to determine this in the future to avoid assumptions about mpidr
+ * allocation
+ ******************************************************************************/
+#define TLKD_CORE_COUNT		PLATFORM_CORE_COUNT
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve callee-saved registers of the
+ * C runtime context while performing a security state switch.
+ ******************************************************************************/
+#define TLKD_C_RT_CTX_X19		0x0
+#define TLKD_C_RT_CTX_X20		0x8
+#define TLKD_C_RT_CTX_X21		0x10
+#define TLKD_C_RT_CTX_X22		0x18
+#define TLKD_C_RT_CTX_X23		0x20
+#define TLKD_C_RT_CTX_X24		0x28
+#define TLKD_C_RT_CTX_X25		0x30
+#define TLKD_C_RT_CTX_X26		0x38
+#define TLKD_C_RT_CTX_X27		0x40
+#define TLKD_C_RT_CTX_X28		0x48
+#define TLKD_C_RT_CTX_X29		0x50
+#define TLKD_C_RT_CTX_X30		0x58
+#define TLKD_C_RT_CTX_SIZE		0x60
+#define TLKD_C_RT_CTX_ENTRIES		(TLKD_C_RT_CTX_SIZE >> DWORD_SHIFT)
+
+#ifndef __ASSEMBLY__
+
+#include <cassert.h>
+#include <stdint.h>
+
+/* AArch64 callee saved general purpose register context structure. */
+DEFINE_REG_STRUCT(c_rt_regs, TLKD_C_RT_CTX_ENTRIES);
+
+/*
+ * Compile time assertion to ensure that both the compiler and linker
+ * have the same double word aligned view of the size of the C runtime
+ * register context.
+ */
+CASSERT(TLKD_C_RT_CTX_SIZE == sizeof(c_rt_regs_t),	\
+	assert_tlkd_c_rt_regs_size_mismatch);
+
+/*******************************************************************************
+ * Structure which helps the SPD to maintain the per-cpu state of the SP.
+ * 'state'          - collection of flags to track SP state e.g. on/off
+ * 'mpidr'          - mpidr to associate a context with a cpu
+ * 'c_rt_ctx'       - stack address to restore C runtime context from after
+ *                    returning from a synchronous entry into the SP.
+ * 'cpu_ctx'        - space to maintain SP architectural state
+ * 'saved_tsp_args' - space to store arguments for TSP arithmetic operations
+ *                    which will queried using the TSP_GET_ARGS SMC by TSP.
+ ******************************************************************************/
+typedef struct tlk_context {
+	uint32_t state;
+	uint64_t mpidr;
+	uint64_t c_rt_ctx;
+	cpu_context_t cpu_ctx;
+} tlk_context_t;
+
+/*******************************************************************************
+ * Function & Data prototypes
+ ******************************************************************************/
+uint64_t tlkd_va_translate(uintptr_t va, int type);
+uint64_t tlkd_enter_sp(uint64_t *c_rt_ctx);
+void __dead2 tlkd_exit_sp(uint64_t c_rt_ctx, uint64_t ret);
+uint64_t tlkd_synchronous_sp_entry(tlk_context_t *tlk_ctx);
+void __dead2 tlkd_synchronous_sp_exit(tlk_context_t *tlk_ctx,
+			uint64_t ret);
+void tlkd_init_tlk_ep_state(struct entry_point_info *tlk_entry_point,
+				uint32_t rw,
+				uint64_t pc,
+				tlk_context_t *tlk_ctx);
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* __TLKD_PRIVATE_H__ */
diff --git a/services/spd/trusty/sm_err.h b/services/spd/trusty/sm_err.h
new file mode 100644
index 0000000..4342466
--- /dev/null
+++ b/services/spd/trusty/sm_err.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __LIB_SM_SM_ERR_H
+#define __LIB_SM_SM_ERR_H
+
+/* Errors from the secure monitor */
+#define SM_ERR_UNDEFINED_SMC		0xFFFFFFFF /* Unknown SMC (defined by ARM DEN 0028A(0.9.0) */
+#define SM_ERR_INVALID_PARAMETERS	-2
+#define SM_ERR_INTERRUPTED		-3	/* Got interrupted. Call back with restart SMC */
+#define SM_ERR_UNEXPECTED_RESTART	-4	/* Got an restart SMC when we didn't expect it */
+#define SM_ERR_BUSY			-5	/* Temporarily busy. Call back with original args */
+#define SM_ERR_INTERLEAVED_SMC		-6	/* Got a trusted_service SMC when a restart SMC is required */
+#define SM_ERR_INTERNAL_FAILURE		-7	/* Unknown error */
+#define SM_ERR_NOT_SUPPORTED		-8
+#define SM_ERR_NOT_ALLOWED		-9	/* SMC call not allowed */
+#define SM_ERR_END_OF_INPUT		-10
+
+#endif
diff --git a/services/spd/trusty/smcall.h b/services/spd/trusty/smcall.h
new file mode 100644
index 0000000..99f1608
--- /dev/null
+++ b/services/spd/trusty/smcall.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __LIB_SM_SMCALL_H
+#define __LIB_SM_SMCALL_H
+
+#define SMC_NUM_ENTITIES	64
+#define SMC_NUM_ARGS		4
+#define SMC_NUM_PARAMS		(SMC_NUM_ARGS - 1)
+
+#define SMC_IS_FASTCALL(smc_nr)	((smc_nr) & 0x80000000)
+#define SMC_IS_SMC64(smc_nr)	((smc_nr) & 0x40000000)
+#define SMC_ENTITY(smc_nr)	(((smc_nr) & 0x3F000000) >> 24)
+#define SMC_FUNCTION(smc_nr)	((smc_nr) & 0x0000FFFF)
+
+#define SMC_NR(entity, fn, fastcall, smc64)			\
+		(((((unsigned int) (fastcall)) & 0x1) << 31) |	\
+		(((smc64) & 0x1) << 30) |			\
+		(((entity) & 0x3F) << 24) |			\
+		((fn) & 0xFFFF)					\
+		)
+
+#define SMC_FASTCALL_NR(entity, fn)	SMC_NR((entity), (fn), 1, 0)
+#define SMC_FASTCALL64_NR(entity, fn)	SMC_NR((entity), (fn), 1, 1)
+#define SMC_YIELDCALL_NR(entity, fn)	SMC_NR((entity), (fn), 0, 0)
+#define SMC_YIELDCALL64_NR(entity, fn)	SMC_NR((entity), (fn), 0, 1)
+
+#define	SMC_ENTITY_ARCH			0	/* ARM Architecture calls */
+#define	SMC_ENTITY_CPU			1	/* CPU Service calls */
+#define	SMC_ENTITY_SIP			2	/* SIP Service calls */
+#define	SMC_ENTITY_OEM			3	/* OEM Service calls */
+#define	SMC_ENTITY_STD			4	/* Standard Service calls */
+#define	SMC_ENTITY_RESERVED		5	/* Reserved for future use */
+#define	SMC_ENTITY_TRUSTED_APP		48	/* Trusted Application calls */
+#define	SMC_ENTITY_TRUSTED_OS		50	/* Trusted OS calls */
+#define SMC_ENTITY_LOGGING              51	/* Used for secure -> nonsecure logging */
+#define	SMC_ENTITY_SECURE_MONITOR	60	/* Trusted OS calls internal to secure monitor */
+
+/* FC = Fast call, YC = Yielding call */
+#define SMC_YC_RESTART_LAST	SMC_YIELDCALL_NR  (SMC_ENTITY_SECURE_MONITOR, 0)
+#define SMC_YC_NOP		SMC_YIELDCALL_NR  (SMC_ENTITY_SECURE_MONITOR, 1)
+
+/*
+ * Return from secure os to non-secure os with return value in r1
+ */
+#define SMC_YC_NS_RETURN	SMC_YIELDCALL_NR  (SMC_ENTITY_SECURE_MONITOR, 0)
+
+#define SMC_FC_RESERVED		SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0)
+#define SMC_FC_FIQ_EXIT		SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1)
+#define SMC_FC_REQUEST_FIQ	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 2)
+#define SMC_FC_GET_NEXT_IRQ	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 3)
+#define SMC_FC_FIQ_ENTER	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 4)
+
+#define SMC_FC64_SET_FIQ_HANDLER SMC_FASTCALL64_NR(SMC_ENTITY_SECURE_MONITOR, 5)
+#define SMC_FC64_GET_FIQ_REGS	SMC_FASTCALL64_NR (SMC_ENTITY_SECURE_MONITOR, 6)
+
+#define SMC_FC_CPU_SUSPEND	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 7)
+#define SMC_FC_CPU_RESUME	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 8)
+
+#define SMC_FC_AARCH_SWITCH	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 9)
+#define SMC_FC_GET_VERSION_STR	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 10)
+
+/* Trusted OS entity calls */
+#define SMC_YC_VIRTIO_GET_DESCR	  SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 20)
+#define SMC_YC_VIRTIO_START	  SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 21)
+#define SMC_YC_VIRTIO_STOP	  SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 22)
+
+#define SMC_YC_VDEV_RESET	  SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 23)
+#define SMC_YC_VDEV_KICK_VQ	  SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 24)
+#define SMC_YC_SET_ROT_PARAMS	  SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 65535)
+
+#endif /* __LIB_SM_SMCALL_H */
diff --git a/services/spd/trusty/trusty.c b/services/spd/trusty/trusty.c
new file mode 100644
index 0000000..ecbcfae
--- /dev/null
+++ b/services/spd/trusty/trusty.c
@@ -0,0 +1,451 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h> /* for context_mgmt.h */
+#include <bl31.h>
+#include <bl_common.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <interrupt_mgmt.h>
+#include <platform.h>
+#include <runtime_svc.h>
+#include <string.h>
+
+#include "sm_err.h"
+#include "smcall.h"
+
+/* macro to check if Hypervisor is enabled in the HCR_EL2 register */
+#define HYP_ENABLE_FLAG		0x286001
+
+/* length of Trusty's input parameters (in bytes) */
+#define TRUSTY_PARAMS_LEN_BYTES	(4096*2)
+
+struct trusty_stack {
+	uint8_t space[PLATFORM_STACK_SIZE] __aligned(16);
+	uint32_t end;
+};
+
+struct trusty_cpu_ctx {
+	cpu_context_t	cpu_ctx;
+	void		*saved_sp;
+	uint32_t	saved_security_state;
+	int		fiq_handler_active;
+	uint64_t	fiq_handler_pc;
+	uint64_t	fiq_handler_cpsr;
+	uint64_t	fiq_handler_sp;
+	uint64_t	fiq_pc;
+	uint64_t	fiq_cpsr;
+	uint64_t	fiq_sp_el1;
+	gp_regs_t	fiq_gpregs;
+	struct trusty_stack	secure_stack;
+};
+
+struct args {
+	uint64_t	r0;
+	uint64_t	r1;
+	uint64_t	r2;
+	uint64_t	r3;
+	uint64_t	r4;
+	uint64_t	r5;
+	uint64_t	r6;
+	uint64_t	r7;
+};
+
+struct trusty_cpu_ctx trusty_cpu_ctx[PLATFORM_CORE_COUNT];
+
+struct args trusty_init_context_stack(void **sp, void *new_stack);
+struct args trusty_context_switch_helper(void **sp, void *smc_params);
+
+static uint32_t current_vmid;
+
+static struct trusty_cpu_ctx *get_trusty_ctx(void)
+{
+	return &trusty_cpu_ctx[plat_my_core_pos()];
+}
+
+static uint32_t is_hypervisor_mode(void)
+{
+	uint64_t hcr = read_hcr();
+
+	return !!(hcr & HYP_ENABLE_FLAG);
+}
+
+static struct args trusty_context_switch(uint32_t security_state, uint64_t r0,
+					 uint64_t r1, uint64_t r2, uint64_t r3)
+{
+	struct args ret;
+	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+	struct trusty_cpu_ctx *ctx_smc;
+
+	assert(ctx->saved_security_state != security_state);
+
+	ret.r7 = 0;
+	if (is_hypervisor_mode()) {
+		/* According to the ARM DEN0028A spec, VMID is stored in x7 */
+		ctx_smc = cm_get_context(NON_SECURE);
+		assert(ctx_smc);
+		ret.r7 = SMC_GET_GP(ctx_smc, CTX_GPREG_X7);
+	}
+	/* r4, r5, r6 reserved for future use. */
+	ret.r6 = 0;
+	ret.r5 = 0;
+	ret.r4 = 0;
+	ret.r3 = r3;
+	ret.r2 = r2;
+	ret.r1 = r1;
+	ret.r0 = r0;
+
+	/*
+	 * To avoid the additional overhead in PSCI flow, skip FP context
+	 * saving/restoring in case of CPU suspend and resume, asssuming that
+	 * when it's needed the PSCI caller has preserved FP context before
+	 * going here.
+	 */
+#if CTX_INCLUDE_FPREGS
+	if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME)
+		fpregs_context_save(get_fpregs_ctx(cm_get_context(security_state)));
+#endif
+	cm_el1_sysregs_context_save(security_state);
+
+	ctx->saved_security_state = security_state;
+	ret = trusty_context_switch_helper(&ctx->saved_sp, &ret);
+
+	assert(ctx->saved_security_state == !security_state);
+
+	cm_el1_sysregs_context_restore(security_state);
+#if CTX_INCLUDE_FPREGS
+	if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME)
+		fpregs_context_restore(get_fpregs_ctx(cm_get_context(security_state)));
+#endif
+
+	cm_set_next_eret_context(security_state);
+
+	return ret;
+}
+
+static uint64_t trusty_fiq_handler(uint32_t id,
+				   uint32_t flags,
+				   void *handle,
+				   void *cookie)
+{
+	struct args ret;
+	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+
+	assert(!is_caller_secure(flags));
+
+	ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_ENTER, 0, 0, 0);
+	if (ret.r0) {
+		SMC_RET0(handle);
+	}
+
+	if (ctx->fiq_handler_active) {
+		INFO("%s: fiq handler already active\n", __func__);
+		SMC_RET0(handle);
+	}
+
+	ctx->fiq_handler_active = 1;
+	memcpy(&ctx->fiq_gpregs, get_gpregs_ctx(handle), sizeof(ctx->fiq_gpregs));
+	ctx->fiq_pc = SMC_GET_EL3(handle, CTX_ELR_EL3);
+	ctx->fiq_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
+	ctx->fiq_sp_el1 = read_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1);
+
+	write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_handler_sp);
+	cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_handler_pc, ctx->fiq_handler_cpsr);
+
+	SMC_RET0(handle);
+}
+
+static uint64_t trusty_set_fiq_handler(void *handle, uint64_t cpu,
+			uint64_t handler, uint64_t stack)
+{
+	struct trusty_cpu_ctx *ctx;
+
+	if (cpu >= PLATFORM_CORE_COUNT) {
+		ERROR("%s: cpu %ld >= %d\n", __func__, cpu, PLATFORM_CORE_COUNT);
+		return SM_ERR_INVALID_PARAMETERS;
+	}
+
+	ctx = &trusty_cpu_ctx[cpu];
+	ctx->fiq_handler_pc = handler;
+	ctx->fiq_handler_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
+	ctx->fiq_handler_sp = stack;
+
+	SMC_RET1(handle, 0);
+}
+
+static uint64_t trusty_get_fiq_regs(void *handle)
+{
+	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+	uint64_t sp_el0 = read_ctx_reg(&ctx->fiq_gpregs, CTX_GPREG_SP_EL0);
+
+	SMC_RET4(handle, ctx->fiq_pc, ctx->fiq_cpsr, sp_el0, ctx->fiq_sp_el1);
+}
+
+static uint64_t trusty_fiq_exit(void *handle, uint64_t x1, uint64_t x2, uint64_t x3)
+{
+	struct args ret;
+	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+
+	if (!ctx->fiq_handler_active) {
+		NOTICE("%s: fiq handler not active\n", __func__);
+		SMC_RET1(handle, SM_ERR_INVALID_PARAMETERS);
+	}
+
+	ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_EXIT, 0, 0, 0);
+	if (ret.r0 != 1) {
+		INFO("%s(%p) SMC_FC_FIQ_EXIT returned unexpected value, %ld\n",
+		       __func__, handle, ret.r0);
+	}
+
+	/*
+	 * Restore register state to state recorded on fiq entry.
+	 *
+	 * x0, sp_el1, pc and cpsr need to be restored because el1 cannot
+	 * restore them.
+	 *
+	 * x1-x4 and x8-x17 need to be restored here because smc_handler64
+	 * corrupts them (el1 code also restored them).
+	 */
+	memcpy(get_gpregs_ctx(handle), &ctx->fiq_gpregs, sizeof(ctx->fiq_gpregs));
+	ctx->fiq_handler_active = 0;
+	write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_sp_el1);
+	cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_pc, ctx->fiq_cpsr);
+
+	SMC_RET0(handle);
+}
+
+static uint64_t trusty_smc_handler(uint32_t smc_fid,
+			 uint64_t x1,
+			 uint64_t x2,
+			 uint64_t x3,
+			 uint64_t x4,
+			 void *cookie,
+			 void *handle,
+			 uint64_t flags)
+{
+	struct args ret;
+	uint32_t vmid = 0;
+	entry_point_info_t *ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+
+	/*
+	 * Return success for SET_ROT_PARAMS if Trusty is not present, as
+	 * Verified Boot is not even supported and returning success here
+	 * would not compromise the boot process.
+	 */
+	if (!ep_info && (smc_fid == SMC_YC_SET_ROT_PARAMS)) {
+		SMC_RET1(handle, 0);
+	} else if (!ep_info) {
+		SMC_RET1(handle, SMC_UNK);
+	}
+
+	if (is_caller_secure(flags)) {
+		if (smc_fid == SMC_YC_NS_RETURN) {
+			ret = trusty_context_switch(SECURE, x1, 0, 0, 0);
+			SMC_RET8(handle, ret.r0, ret.r1, ret.r2, ret.r3,
+				 ret.r4, ret.r5, ret.r6, ret.r7);
+		}
+		INFO("%s (0x%x, 0x%lx, 0x%lx, 0x%lx, 0x%lx, %p, %p, 0x%lx) \
+		     cpu %d, unknown smc\n",
+		     __func__, smc_fid, x1, x2, x3, x4, cookie, handle, flags,
+		     plat_my_core_pos());
+		SMC_RET1(handle, SMC_UNK);
+	} else {
+		switch (smc_fid) {
+		case SMC_FC64_SET_FIQ_HANDLER:
+			return trusty_set_fiq_handler(handle, x1, x2, x3);
+		case SMC_FC64_GET_FIQ_REGS:
+			return trusty_get_fiq_regs(handle);
+		case SMC_FC_FIQ_EXIT:
+			return trusty_fiq_exit(handle, x1, x2, x3);
+		default:
+			if (is_hypervisor_mode())
+				vmid = SMC_GET_GP(handle, CTX_GPREG_X7);
+
+			if ((current_vmid != 0) && (current_vmid != vmid)) {
+				/* This message will cause SMC mechanism
+				 * abnormal in multi-guest environment.
+				 * Change it to WARN in case you need it.
+				 */
+				VERBOSE("Previous SMC not finished.\n");
+				SMC_RET1(handle, SM_ERR_BUSY);
+			}
+			current_vmid = vmid;
+			ret = trusty_context_switch(NON_SECURE, smc_fid, x1,
+				x2, x3);
+			current_vmid = 0;
+			SMC_RET1(handle, ret.r0);
+		}
+	}
+}
+
+static int32_t trusty_init(void)
+{
+	void el3_exit(void);
+	entry_point_info_t *ep_info;
+	struct args zero_args = {0};
+	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+	uint32_t cpu = plat_my_core_pos();
+	int reg_width = GET_RW(read_ctx_reg(get_el3state_ctx(&ctx->cpu_ctx),
+			       CTX_SPSR_EL3));
+
+	/*
+	 * Get information about the Trusty image. Its absence is a critical
+	 * failure.
+	 */
+	ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+	assert(ep_info);
+
+	cm_el1_sysregs_context_save(NON_SECURE);
+
+	cm_set_context(&ctx->cpu_ctx, SECURE);
+	cm_init_my_context(ep_info);
+
+	/*
+	 * Adjust secondary cpu entry point for 32 bit images to the
+	 * end of exeption vectors
+	 */
+	if ((cpu != 0) && (reg_width == MODE_RW_32)) {
+		INFO("trusty: cpu %d, adjust entry point to 0x%lx\n",
+		     cpu, ep_info->pc + (1U << 5));
+		cm_set_elr_el3(SECURE, ep_info->pc + (1U << 5));
+	}
+
+	cm_el1_sysregs_context_restore(SECURE);
+	cm_set_next_eret_context(SECURE);
+
+	ctx->saved_security_state = ~0; /* initial saved state is invalid */
+	trusty_init_context_stack(&ctx->saved_sp, &ctx->secure_stack.end);
+
+	trusty_context_switch_helper(&ctx->saved_sp, &zero_args);
+
+	cm_el1_sysregs_context_restore(NON_SECURE);
+	cm_set_next_eret_context(NON_SECURE);
+
+	return 0;
+}
+
+static void trusty_cpu_suspend(void)
+{
+	struct args ret;
+
+	ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_SUSPEND, 0, 0, 0);
+	if (ret.r0 != 0) {
+		INFO("%s: cpu %d, SMC_FC_CPU_SUSPEND returned unexpected value, %ld\n",
+		     __func__, plat_my_core_pos(), ret.r0);
+	}
+}
+
+static void trusty_cpu_resume(void)
+{
+	struct args ret;
+
+	ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_RESUME, 0, 0, 0);
+	if (ret.r0 != 0) {
+		INFO("%s: cpu %d, SMC_FC_CPU_RESUME returned unexpected value, %ld\n",
+		     __func__, plat_my_core_pos(), ret.r0);
+	}
+}
+
+static int32_t trusty_cpu_off_handler(uint64_t unused)
+{
+	trusty_cpu_suspend();
+
+	return 0;
+}
+
+static void trusty_cpu_on_finish_handler(uint64_t unused)
+{
+	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+
+	if (!ctx->saved_sp) {
+		trusty_init();
+	} else {
+		trusty_cpu_resume();
+	}
+}
+
+static void trusty_cpu_suspend_handler(uint64_t unused)
+{
+	trusty_cpu_suspend();
+}
+
+static void trusty_cpu_suspend_finish_handler(uint64_t unused)
+{
+	trusty_cpu_resume();
+}
+
+static const spd_pm_ops_t trusty_pm = {
+	.svc_off = trusty_cpu_off_handler,
+	.svc_suspend = trusty_cpu_suspend_handler,
+	.svc_on_finish = trusty_cpu_on_finish_handler,
+	.svc_suspend_finish = trusty_cpu_suspend_finish_handler,
+};
+
+static int32_t trusty_setup(void)
+{
+	entry_point_info_t *ep_info;
+	uint32_t flags;
+	int ret;
+
+	/* Get trusty's entry point info */
+	ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+	if (!ep_info) {
+		INFO("Trusty image missing.\n");
+		return -1;
+	}
+
+	/* Trusty runs in AARCH64 mode */
+	SET_PARAM_HEAD(ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE);
+	ep_info->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+
+	/*
+	 * arg0 = TZDRAM aperture available for BL32
+	 * arg1 = BL32 boot params
+	 * arg2 = BL32 boot params length
+	 */
+	ep_info->args.arg1 = ep_info->args.arg2;
+	ep_info->args.arg2 = TRUSTY_PARAMS_LEN_BYTES;
+
+	/* register init handler */
+	bl31_register_bl32_init(trusty_init);
+
+	/* register power management hooks */
+	psci_register_spd_pm_hook(&trusty_pm);
+
+	/* register interrupt handler */
+	flags = 0;
+	set_interrupt_rm_flag(flags, NON_SECURE);
+	ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
+					      trusty_fiq_handler,
+					      flags);
+	if (ret)
+		ERROR("trusty: failed to register fiq handler, ret = %d\n", ret);
+
+	return 0;
+}
+
+/* Define a SPD runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+	trusty_fast,
+
+	OEN_TOS_START,
+	SMC_ENTITY_SECURE_MONITOR,
+	SMC_TYPE_FAST,
+	trusty_setup,
+	trusty_smc_handler
+);
+
+/* Define a SPD runtime service descriptor for yielding SMC calls */
+DECLARE_RT_SVC(
+	trusty_std,
+
+	OEN_TAP_START,
+	SMC_ENTITY_SECURE_MONITOR,
+	SMC_TYPE_YIELD,
+	NULL,
+	trusty_smc_handler
+);
diff --git a/services/spd/trusty/trusty.mk b/services/spd/trusty/trusty.mk
new file mode 100644
index 0000000..beca875
--- /dev/null
+++ b/services/spd/trusty/trusty.mk
@@ -0,0 +1,10 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+SPD_INCLUDES		:=
+
+SPD_SOURCES		:=	services/spd/trusty/trusty.c		\
+				services/spd/trusty/trusty_helpers.S
diff --git a/services/spd/trusty/trusty_helpers.S b/services/spd/trusty/trusty_helpers.S
new file mode 100644
index 0000000..da5cb57
--- /dev/null
+++ b/services/spd/trusty/trusty_helpers.S
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+.macro push ra, rb, sp=sp
+	stp \ra, \rb, [\sp,#-16]!
+.endm
+
+.macro pop ra, rb, sp=sp
+	ldp \ra, \rb, [\sp], #16
+.endm
+
+	.global trusty_context_switch_helper
+func trusty_context_switch_helper
+	push	x8, xzr
+	push	x19, x20
+	push	x21, x22
+	push	x23, x24
+	push	x25, x26
+	push	x27, x28
+	push	x29, x30
+
+	mov	x9, sp
+	ldr	x10, [x0]
+	mov	sp, x10
+	str	x9, [x0]
+
+	pop	x29, x30
+	pop	x27, x28
+	pop	x25, x26
+	pop	x23, x24
+	pop	x21, x22
+	pop	x19, x20
+	pop	x8, xzr
+
+        ldr     x2, [x1]
+        ldr     x3, [x1, #0x08]
+        ldr     x4, [x1, #0x10]
+        ldr     x5, [x1, #0x18]
+        ldr     x6, [x1, #0x20]
+        ldr     x7, [x1, #0x28]
+        ldr     x10, [x1, #0x30]
+        ldr     x11, [x1, #0x38]
+
+        stp     x2, x3, [x8]
+        stp     x4, x5, [x8, #16]
+        stp     x6, x7, [x8, #32]
+        stp     x10, x11, [x8, #48]
+
+	ret
+endfunc trusty_context_switch_helper
+
+	.global trusty_init_context_stack
+func trusty_init_context_stack
+	push	x8, xzr, x1
+	push	xzr, xzr, x1
+	push	xzr, xzr, x1
+	push	xzr, xzr, x1
+	push	xzr, xzr, x1
+	push	xzr, xzr, x1
+	adr	x9, el3_exit
+	push	xzr, x9, x1
+	str	x1, [x0]
+	ret
+endfunc trusty_init_context_stack
diff --git a/services/spd/tspd/tspd.mk b/services/spd/tspd/tspd.mk
index 139c7d7..223e418 100644
--- a/services/spd/tspd/tspd.mk
+++ b/services/spd/tspd/tspd.mk
@@ -1,31 +1,7 @@
 #
 # Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
 #
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# Neither the name of ARM nor the names of its contributors may be used
-# to endorse or promote products derived from this software without specific
-# prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
 #
 
 TSPD_DIR		:=	services/spd/tspd
@@ -55,7 +31,17 @@
 
 # Flag used to enable routing of non-secure interrupts to EL3 when they are
 # generated while the code is executing in S-EL1/0.
-TSPD_ROUTE_IRQ_TO_EL3	:=	0
+TSP_NS_INTR_ASYNC_PREEMPT	:=	0
 
-$(eval $(call assert_boolean,TSPD_ROUTE_IRQ_TO_EL3))
-$(eval $(call add_define,TSPD_ROUTE_IRQ_TO_EL3))
+# If TSPD_ROUTE_IRQ_TO_EL3 build flag is defined, use it to define value for
+# TSP_NS_INTR_ASYNC_PREEMPT for backward compatibility.
+ifdef TSPD_ROUTE_IRQ_TO_EL3
+ifeq (${ERROR_DEPRECATED},1)
+$(error "TSPD_ROUTE_IRQ_TO_EL3 is deprecated. Please use the new build flag TSP_NS_INTR_ASYNC_PREEMPT")
+endif
+$(warning "TSPD_ROUTE_IRQ_TO_EL3 is deprecated. Please use the new build flag TSP_NS_INTR_ASYNC_PREEMPT")
+TSP_NS_INTR_ASYNC_PREEMPT	:= ${TSPD_ROUTE_IRQ_TO_EL3}
+endif
+
+$(eval $(call assert_boolean,TSP_NS_INTR_ASYNC_PREEMPT))
+$(eval $(call add_define,TSP_NS_INTR_ASYNC_PREEMPT))
diff --git a/services/spd/tspd/tspd_common.c b/services/spd/tspd/tspd_common.c
index 322413c..1538232 100644
--- a/services/spd/tspd/tspd_common.c
+++ b/services/spd/tspd/tspd_common.c
@@ -1,38 +1,17 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch_helpers.h>
 #include <assert.h>
 #include <bl_common.h>
 #include <context_mgmt.h>
+#include <debug.h>
 #include <string.h>
+#include <tsp.h>
+#include <utils.h>
 #include "tspd_private.h"
 
 /*******************************************************************************
@@ -62,7 +41,7 @@
 	tsp_ctx->mpidr = read_mpidr_el1();
 	tsp_ctx->state = 0;
 	set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF);
-	clr_std_smc_active_flag(tsp_ctx->state);
+	clr_yield_smc_active_flag(tsp_ctx->state);
 
 	cm_set_context(&tsp_ctx->cpu_ctx, SECURE);
 
@@ -76,7 +55,7 @@
 	tsp_entry_point->spsr = SPSR_64(MODE_EL1,
 					MODE_SP_ELX,
 					DISABLE_ALL_EXCEPTIONS);
-	memset(&tsp_entry_point->args, 0, sizeof(tsp_entry_point->args));
+	zeromem(&tsp_entry_point->args, sizeof(tsp_entry_point->args));
 }
 
 /*******************************************************************************
@@ -129,3 +108,31 @@
 	/* Should never reach here */
 	assert(0);
 }
+
+/*******************************************************************************
+ * This function takes an SP context pointer and abort any preempted SMC
+ * request.
+ * Return 1 if there was a preempted SMC request, 0 otherwise.
+ ******************************************************************************/
+int tspd_abort_preempted_smc(tsp_context_t *tsp_ctx)
+{
+	if (!get_yield_smc_active_flag(tsp_ctx->state))
+		return 0;
+
+	/* Abort any preempted SMC request */
+	clr_yield_smc_active_flag(tsp_ctx->state);
+
+	/*
+	 * Arrange for an entry into the test secure payload. It will
+	 * be returned via TSP_ABORT_DONE case in tspd_smc_handler.
+	 */
+	cm_set_elr_el3(SECURE,
+		       (uint64_t) &tsp_vectors->abort_yield_smc_entry);
+	uint64_t rc = tspd_synchronous_sp_entry(tsp_ctx);
+
+	if (rc != 0)
+		panic();
+
+	return 1;
+}
+
diff --git a/services/spd/tspd/tspd_helpers.S b/services/spd/tspd/tspd_helpers.S
index dd3b07b..f15d66b 100644
--- a/services/spd/tspd/tspd_helpers.S
+++ b/services/spd/tspd/tspd_helpers.S
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <asm_macros.S>
@@ -64,6 +40,7 @@
 	 * ---------------------------------------------
 	 */
 	b	el3_exit
+endfunc tspd_enter_sp
 
 	/* ---------------------------------------------
 	 * This function is called 'x0' pointing to a C
@@ -99,3 +76,4 @@
 	 */
 	mov	x0, x1
 	ret
+endfunc tspd_exit_sp
diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c
index ee17483..2ba9f84 100644
--- a/services/spd/tspd/tspd_main.c
+++ b/services/spd/tspd/tspd_main.c
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 
@@ -40,8 +16,8 @@
  ******************************************************************************/
 #include <arch_helpers.h>
 #include <assert.h>
-#include <bl_common.h>
 #include <bl31.h>
+#include <bl_common.h>
 #include <context_mgmt.h>
 #include <debug.h>
 #include <errno.h>
@@ -72,9 +48,16 @@
 
 int32_t tspd_init(void);
 
+/*
+ * This helper function handles Secure EL1 preemption. The preemption could be
+ * due Non Secure interrupts or EL3 interrupts. In both the cases we context
+ * switch to the normal world and in case of EL3 interrupts, it will again be
+ * routed to EL3 which will get handled at the exception vectors.
+ */
 uint64_t tspd_handle_sp_preemption(void *handle)
 {
 	cpu_context_t *ns_cpu_context;
+
 	assert(handle == cm_get_context(SECURE));
 	cm_el1_sysregs_context_save(SECURE);
 	/* Get a reference to the non-secure context */
@@ -82,18 +65,30 @@
 	assert(ns_cpu_context);
 
 	/*
-	 * Restore non-secure state. The secure system
-	 * register context will be saved when required.
+	 * To allow Secure EL1 interrupt handler to re-enter TSP while TSP
+	 * is preempted, the secure system register context which will get
+	 * overwritten must be additionally saved. This is currently done
+	 * by the TSPD S-EL1 interrupt handler.
+	 */
+
+	/*
+	 * Restore non-secure state.
 	 */
 	cm_el1_sysregs_context_restore(NON_SECURE);
 	cm_set_next_eret_context(NON_SECURE);
 
+	/*
+	 * The TSP was preempted during execution of a Yielding SMC Call.
+	 * Return back to the normal world with SMC_PREEMPTED as error
+	 * code in x0.
+	 */
 	SMC_RET1(ns_cpu_context, SMC_PREEMPTED);
 }
+
 /*******************************************************************************
  * This function is the handler registered for S-EL1 interrupts by the TSPD. It
  * validates the interrupt and upon success arranges entry into the TSP at
- * 'tsp_fiq_entry()' for handling the interrupt.
+ * 'tsp_sel1_intr_entry()' for handling the interrupt.
  ******************************************************************************/
 static uint64_t tspd_sel1_interrupt_handler(uint32_t id,
 					    uint32_t flags,
@@ -101,26 +96,19 @@
 					    void *cookie)
 {
 	uint32_t linear_id;
-	uint64_t mpidr;
 	tsp_context_t *tsp_ctx;
 
 	/* Check the security state when the exception was generated */
 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
 
-#if IMF_READ_INTERRUPT_ID
-	/* Check the security status of the interrupt */
-	assert(plat_ic_get_interrupt_type(id) == INTR_TYPE_S_EL1);
-#endif
-
 	/* Sanity check the pointer to this cpu's context */
-	mpidr = read_mpidr();
 	assert(handle == cm_get_context(NON_SECURE));
 
 	/* Save the non-secure context before entering the TSP */
 	cm_el1_sysregs_context_save(NON_SECURE);
 
 	/* Get a reference to this cpu's TSP context */
-	linear_id = platform_get_core_pos(mpidr);
+	linear_id = plat_my_core_pos();
 	tsp_ctx = &tspd_sp_context[linear_id];
 	assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE));
 
@@ -128,44 +116,44 @@
 	 * Determine if the TSP was previously preempted. Its last known
 	 * context has to be preserved in this case.
 	 * The TSP should return control to the TSPD after handling this
-	 * FIQ. Preserve essential EL3 context to allow entry into the
-	 * TSP at the FIQ entry point using the 'cpu_context' structure.
-	 * There is no need to save the secure system register context
-	 * since the TSP is supposed to preserve it during S-EL1 interrupt
-	 * handling.
+	 * S-EL1 interrupt. Preserve essential EL3 context to allow entry into
+	 * the TSP at the S-EL1 interrupt entry point using the 'cpu_context'
+	 * structure. There is no need to save the secure system register
+	 * context since the TSP is supposed to preserve it during S-EL1
+	 * interrupt handling.
 	 */
-	if (get_std_smc_active_flag(tsp_ctx->state)) {
+	if (get_yield_smc_active_flag(tsp_ctx->state)) {
 		tsp_ctx->saved_spsr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx,
 						      CTX_SPSR_EL3);
 		tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx,
 						     CTX_ELR_EL3);
-#if TSPD_ROUTE_IRQ_TO_EL3
+#if TSP_NS_INTR_ASYNC_PREEMPT
 		/*Need to save the previously interrupted secure context */
 		memcpy(&tsp_ctx->sp_ctx, &tsp_ctx->cpu_ctx, TSPD_SP_CTX_SIZE);
 #endif
 	}
 
 	cm_el1_sysregs_context_restore(SECURE);
-	cm_set_elr_spsr_el3(SECURE, (uint64_t) &tsp_vectors->fiq_entry,
+	cm_set_elr_spsr_el3(SECURE, (uint64_t) &tsp_vectors->sel1_intr_entry,
 		    SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
 
 	cm_set_next_eret_context(SECURE);
 
 	/*
-	 * Tell the TSP that it has to handle an FIQ synchronously. Also the
-	 * instruction in normal world where the interrupt was generated is
-	 * passed for debugging purposes. It is safe to retrieve this address
-	 * from ELR_EL3 as the secure context will not take effect until
-	 * el3_exit().
+	 * Tell the TSP that it has to handle a S-EL1 interrupt synchronously.
+	 * Also the instruction in normal world where the interrupt was
+	 * generated is passed for debugging purposes. It is safe to retrieve
+	 * this address from ELR_EL3 as the secure context will not take effect
+	 * until el3_exit().
 	 */
-	SMC_RET2(&tsp_ctx->cpu_ctx, TSP_HANDLE_FIQ_AND_RETURN, read_elr_el3());
+	SMC_RET2(&tsp_ctx->cpu_ctx, TSP_HANDLE_SEL1_INTR_AND_RETURN, read_elr_el3());
 }
 
-#if TSPD_ROUTE_IRQ_TO_EL3
+#if TSP_NS_INTR_ASYNC_PREEMPT
 /*******************************************************************************
- * This function is the handler registered for S-EL1 interrupts by the TSPD. It
- * validates the interrupt and upon success arranges entry into the TSP at
- * 'tsp_fiq_entry()' for handling the interrupt.
+ * This function is the handler registered for Non secure interrupts by the
+ * TSPD. It validates the interrupt and upon success arranges entry into the
+ * normal world for handling the interrupt.
  ******************************************************************************/
 static uint64_t tspd_ns_interrupt_handler(uint32_t id,
 					    uint32_t flags,
@@ -175,10 +163,6 @@
 	/* Check the security state when the exception was generated */
 	assert(get_interrupt_src_ss(flags) == SECURE);
 
-#if IMF_READ_INTERRUPT_ID
-	/* Check the security status of the interrupt */
-	assert(plat_ic_get_interrupt_type(id) == INTR_TYPE_NS);
-#endif
 	/*
 	 * Disable the routing of NS interrupts from secure world to EL3 while
 	 * interrupted on this core.
@@ -197,10 +181,9 @@
 int32_t tspd_setup(void)
 {
 	entry_point_info_t *tsp_ep_info;
-	uint64_t mpidr = read_mpidr();
 	uint32_t linear_id;
 
-	linear_id = platform_get_core_pos(mpidr);
+	linear_id = plat_my_core_pos();
 
 	/*
 	 * Get information about the Secure Payload (BL32) image. Its
@@ -224,7 +207,7 @@
 		return 1;
 
 	/*
-	 * We could inspect the SP image and determine it's execution
+	 * We could inspect the SP image and determine its execution
 	 * state i.e whether AArch32 or AArch64. Assuming it's AArch64
 	 * for the time being.
 	 */
@@ -256,8 +239,7 @@
  ******************************************************************************/
 int32_t tspd_init(void)
 {
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 	tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
 	entry_point_info_t *tsp_entry_point;
 	uint64_t rc;
@@ -269,7 +251,7 @@
 	tsp_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
 	assert(tsp_entry_point);
 
-	cm_init_context(mpidr, tsp_entry_point);
+	cm_init_my_context(tsp_entry_point);
 
 	/*
 	 * Arrange for an entry into the test secure payload. It will be
@@ -300,8 +282,7 @@
 			 uint64_t flags)
 {
 	cpu_context_t *ns_cpu_context;
-	unsigned long mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr), ns;
+	uint32_t linear_id = plat_my_core_pos(), ns;
 	tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
 	uint64_t rc;
 #if TSP_INIT_ASYNC
@@ -326,10 +307,11 @@
 
 	/*
 	 * This function ID is used only by the TSP to indicate that it has
-	 * finished handling a S-EL1 FIQ interrupt. Execution should resume
+	 * finished handling a S-EL1 interrupt or was preempted by a higher
+	 * priority pending EL3 interrupt. Execution should resume
 	 * in the normal world.
 	 */
-	case TSP_HANDLED_S_EL1_FIQ:
+	case TSP_HANDLED_S_EL1_INTR:
 		if (ns)
 			SMC_RET1(handle, SMC_UNK);
 
@@ -339,14 +321,14 @@
 		 * Restore the relevant EL3 state which saved to service
 		 * this SMC.
 		 */
-		if (get_std_smc_active_flag(tsp_ctx->state)) {
+		if (get_yield_smc_active_flag(tsp_ctx->state)) {
 			SMC_SET_EL3(&tsp_ctx->cpu_ctx,
 				    CTX_SPSR_EL3,
 				    tsp_ctx->saved_spsr_el3);
 			SMC_SET_EL3(&tsp_ctx->cpu_ctx,
 				    CTX_ELR_EL3,
 				    tsp_ctx->saved_elr_el3);
-#if TSPD_ROUTE_IRQ_TO_EL3
+#if TSP_NS_INTR_ASYNC_PREEMPT
 			/*
 			 * Need to restore the previously interrupted
 			 * secure context.
@@ -370,35 +352,6 @@
 
 		SMC_RET0((uint64_t) ns_cpu_context);
 
-
-	/*
-	 * This function ID is used only by the TSP to indicate that it was
-	 * interrupted due to a EL3 FIQ interrupt. Execution should resume
-	 * in the normal world.
-	 */
-	case TSP_EL3_FIQ:
-		if (ns)
-			SMC_RET1(handle, SMC_UNK);
-
-		assert(handle == cm_get_context(SECURE));
-
-		/* Assert that standard SMC execution has been preempted */
-		assert(get_std_smc_active_flag(tsp_ctx->state));
-
-		/* Save the secure system register state */
-		cm_el1_sysregs_context_save(SECURE);
-
-		/* Get a reference to the non-secure context */
-		ns_cpu_context = cm_get_context(NON_SECURE);
-		assert(ns_cpu_context);
-
-		/* Restore non-secure state */
-		cm_el1_sysregs_context_restore(NON_SECURE);
-		cm_set_next_eret_context(NON_SECURE);
-
-		SMC_RET1(ns_cpu_context, TSP_EL3_FIQ);
-
-
 	/*
 	 * This function ID is used only by the SP to indicate it has
 	 * finished initialising itself after a cold boot
@@ -436,7 +389,7 @@
 			if (rc)
 				panic();
 
-#if TSPD_ROUTE_IRQ_TO_EL3
+#if TSP_NS_INTR_ASYNC_PREEMPT
 			/*
 			 * Register an interrupt handler for NS interrupts when
 			 * generated during code executing in secure state are
@@ -452,8 +405,7 @@
 				panic();
 
 			/*
-			 * Disable the interrupt NS locally since it will be enabled globally
-			 * within cm_init_context.
+			 * Disable the NS interrupt locally.
 			 */
 			disable_intr_rm_local(INTR_TYPE_NS, SECURE);
 #endif
@@ -471,7 +423,7 @@
 		assert(NON_SECURE ==
 				GET_SECURITY_STATE(next_image_info->h.attr));
 
-		cm_init_context(read_mpidr_el1(), next_image_info);
+		cm_init_my_context(next_image_info);
 		cm_prepare_el3_exit(NON_SECURE);
 		SMC_RET0(cm_get_context(NON_SECURE));
 #else
@@ -483,9 +435,14 @@
 		 */
 		tspd_synchronous_sp_exit(tsp_ctx, x1);
 #endif
+	/*
+	 * This function ID is used only by the SP to indicate it has finished
+	 * aborting a preempted Yielding SMC Call.
+	 */
+	case TSP_ABORT_DONE:
 
 	/*
-	 * These function IDs is used only by the SP to indicate it has
+	 * These function IDs are used only by the SP to indicate it has
 	 * finished:
 	 * 1. turning itself on in response to an earlier psci
 	 *    cpu_on request
@@ -496,7 +453,7 @@
 	case TSP_RESUME_DONE:
 
 	/*
-	 * These function IDs is used only by the SP to indicate it has
+	 * These function IDs are used only by the SP to indicate it has
 	 * finished:
 	 * 1. suspending itself after an earlier psci cpu_suspend
 	 *    request.
@@ -528,10 +485,10 @@
 	case TSP_FAST_FID(TSP_MUL):
 	case TSP_FAST_FID(TSP_DIV):
 
-	case TSP_STD_FID(TSP_ADD):
-	case TSP_STD_FID(TSP_SUB):
-	case TSP_STD_FID(TSP_MUL):
-	case TSP_STD_FID(TSP_DIV):
+	case TSP_YIELD_FID(TSP_ADD):
+	case TSP_YIELD_FID(TSP_SUB):
+	case TSP_YIELD_FID(TSP_MUL):
+	case TSP_YIELD_FID(TSP_DIV):
 		if (ns) {
 			/*
 			 * This is a fresh request from the non-secure client.
@@ -542,7 +499,7 @@
 			assert(handle == cm_get_context(NON_SECURE));
 
 			/* Check if we are already preempted */
-			if (get_std_smc_active_flag(tsp_ctx->state))
+			if (get_yield_smc_active_flag(tsp_ctx->state))
 				SMC_RET1(handle, SMC_UNK);
 
 			cm_el1_sysregs_context_save(NON_SECURE);
@@ -572,13 +529,14 @@
 				cm_set_elr_el3(SECURE, (uint64_t)
 						&tsp_vectors->fast_smc_entry);
 			} else {
-				set_std_smc_active_flag(tsp_ctx->state);
+				set_yield_smc_active_flag(tsp_ctx->state);
 				cm_set_elr_el3(SECURE, (uint64_t)
-						&tsp_vectors->std_smc_entry);
-#if TSPD_ROUTE_IRQ_TO_EL3
+						&tsp_vectors->yield_smc_entry);
+#if TSP_NS_INTR_ASYNC_PREEMPT
 				/*
 				 * Enable the routing of NS interrupts to EL3
-				 * during STD SMC processing on this core.
+				 * during processing of a Yielding SMC Call on
+				 * this core.
 				 */
 				enable_intr_rm_local(INTR_TYPE_NS, SECURE);
 #endif
@@ -604,13 +562,13 @@
 			/* Restore non-secure state */
 			cm_el1_sysregs_context_restore(NON_SECURE);
 			cm_set_next_eret_context(NON_SECURE);
-			if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_STD) {
-				clr_std_smc_active_flag(tsp_ctx->state);
-#if TSPD_ROUTE_IRQ_TO_EL3
+			if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_YIELD) {
+				clr_yield_smc_active_flag(tsp_ctx->state);
+#if TSP_NS_INTR_ASYNC_PREEMPT
 				/*
 				 * Disable the routing of NS interrupts to EL3
-				 * after STD SMC processing is finished on this
-				 * core.
+				 * after processing of a Yielding SMC Call on
+				 * this core is finished.
 				 */
 				disable_intr_rm_local(INTR_TYPE_NS, SECURE);
 #endif
@@ -620,10 +578,41 @@
 		}
 
 		break;
+	/*
+	 * Request from the non-secure world to abort a preempted Yielding SMC
+	 * Call.
+	 */
+	case TSP_FID_ABORT:
+		/* ABORT should only be invoked by normal world */
+		if (!ns) {
+			assert(0);
+			break;
+		}
+
+		assert(handle == cm_get_context(NON_SECURE));
+		cm_el1_sysregs_context_save(NON_SECURE);
+
+		/* Abort the preempted SMC request */
+		if (!tspd_abort_preempted_smc(tsp_ctx)) {
+			/*
+			 * If there was no preempted SMC to abort, return
+			 * SMC_UNK.
+			 *
+			 * Restoring the NON_SECURE context is not necessary as
+			 * the synchronous entry did not take place if the
+			 * return code of tspd_abort_preempted_smc is zero.
+			 */
+			cm_set_next_eret_context(NON_SECURE);
+			break;
+		}
+
+		cm_el1_sysregs_context_restore(NON_SECURE);
+		cm_set_next_eret_context(NON_SECURE);
+		SMC_RET1(handle, SMC_OK);
 
 		/*
 		 * Request from non secure world to resume the preempted
-		 * Standard SMC call.
+		 * Yielding SMC Call.
 		 */
 	case TSP_FID_RESUME:
 		/* RESUME should be invoked only by normal world */
@@ -640,7 +629,7 @@
 		assert(handle == cm_get_context(NON_SECURE));
 
 		/* Check if we are already preempted before resume */
-		if (!get_std_smc_active_flag(tsp_ctx->state))
+		if (!get_yield_smc_active_flag(tsp_ctx->state))
 			SMC_RET1(handle, SMC_UNK);
 
 		cm_el1_sysregs_context_save(NON_SECURE);
@@ -649,10 +638,10 @@
 		 * We are done stashing the non-secure context. Ask the
 		 * secure payload to do the work now.
 		 */
-#if TSPD_ROUTE_IRQ_TO_EL3
+#if TSP_NS_INTR_ASYNC_PREEMPT
 		/*
 		 * Enable the routing of NS interrupts to EL3 during resumption
-		 * of STD SMC call on this core.
+		 * of a Yielding SMC Call on this core.
 		 */
 		enable_intr_rm_local(INTR_TYPE_NS, SECURE);
 #endif
@@ -712,13 +701,13 @@
 	tspd_smc_handler
 );
 
-/* Define a SPD runtime service descriptor for standard SMC calls */
+/* Define a SPD runtime service descriptor for Yielding SMC Calls */
 DECLARE_RT_SVC(
 	tspd_std,
 
 	OEN_TOS_START,
 	OEN_TOS_END,
-	SMC_TYPE_STD,
+	SMC_TYPE_YIELD,
 	NULL,
 	tspd_smc_handler
 );
diff --git a/services/spd/tspd/tspd_pm.c b/services/spd/tspd/tspd_pm.c
index 009ff5f..5fdd3dc 100644
--- a/services/spd/tspd/tspd_pm.c
+++ b/services/spd/tspd/tspd_pm.c
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <arch_helpers.h>
@@ -52,13 +28,18 @@
 static int32_t tspd_cpu_off_handler(uint64_t unused)
 {
 	int32_t rc = 0;
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 	tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
 
 	assert(tsp_vectors);
 	assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON);
 
+	/*
+	 * Abort any preempted SMC request before overwriting the SECURE
+	 * context.
+	 */
+	tspd_abort_preempted_smc(tsp_ctx);
+
 	/* Program the entry point and enter the TSP */
 	cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->cpu_off_entry);
 	rc = tspd_synchronous_sp_entry(tsp_ctx);
@@ -76,23 +57,28 @@
 	 */
 	set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF);
 
-	 return 0;
+	return 0;
 }
 
 /*******************************************************************************
  * This cpu is being suspended. S-EL1 state must have been saved in the
  * resident cpu (mpidr format) if it is a UP/UP migratable TSP.
  ******************************************************************************/
-static void tspd_cpu_suspend_handler(uint64_t unused)
+static void tspd_cpu_suspend_handler(uint64_t max_off_pwrlvl)
 {
 	int32_t rc = 0;
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 	tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
 
 	assert(tsp_vectors);
 	assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON);
 
+	/*
+	 * Abort any preempted SMC request before overwriting the SECURE
+	 * context.
+	 */
+	tspd_abort_preempted_smc(tsp_ctx);
+
 	/* Program the entry point and enter the TSP */
 	cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->cpu_suspend_entry);
 	rc = tspd_synchronous_sp_entry(tsp_ctx);
@@ -101,7 +87,7 @@
 	 * Read the response from the TSP. A non-zero return means that
 	 * something went wrong while communicating with the TSP.
 	 */
-	if (rc != 0)
+	if (rc)
 		panic();
 
 	/* Update its context to reflect the state the TSP is in */
@@ -110,15 +96,14 @@
 
 /*******************************************************************************
  * This cpu has been turned on. Enter the TSP to initialise S-EL1 and other bits
- * before passing control back to the Secure Monitor. Entry in S-El1 is done
+ * before passing control back to the Secure Monitor. Entry in S-EL1 is done
  * after initialising minimal architectural state that guarantees safe
  * execution.
  ******************************************************************************/
 static void tspd_cpu_on_finish_handler(uint64_t unused)
 {
 	int32_t rc = 0;
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 	tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
 	entry_point_info_t tsp_on_entrypoint;
 
@@ -131,12 +116,12 @@
 				tsp_ctx);
 
 	/* Initialise this cpu's secure context */
-	cm_init_context(mpidr, &tsp_on_entrypoint);
+	cm_init_my_context(&tsp_on_entrypoint);
 
-#if TSPD_ROUTE_IRQ_TO_EL3
+#if TSP_NS_INTR_ASYNC_PREEMPT
 	/*
 	 * Disable the NS interrupt locally since it will be enabled globally
-	 * within cm_init_context.
+	 * within cm_init_my_context.
 	 */
 	disable_intr_rm_local(INTR_TYPE_NS, SECURE);
 #endif
@@ -160,20 +145,19 @@
  * completed the preceding suspend call. Use that context to program an entry
  * into the TSP to allow it to do any remaining book keeping
  ******************************************************************************/
-static void tspd_cpu_suspend_finish_handler(uint64_t suspend_level)
+static void tspd_cpu_suspend_finish_handler(uint64_t max_off_pwrlvl)
 {
 	int32_t rc = 0;
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 	tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
 
 	assert(tsp_vectors);
 	assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_SUSPEND);
 
-	/* Program the entry point, suspend_level and enter the SP */
+	/* Program the entry point, max_off_pwrlvl and enter the SP */
 	write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx),
 		      CTX_GPREG_X0,
-		      suspend_level);
+		      max_off_pwrlvl);
 	cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->cpu_resume_entry);
 	rc = tspd_synchronous_sp_entry(tsp_ctx);
 
@@ -203,13 +187,18 @@
  ******************************************************************************/
 static void tspd_system_off(void)
 {
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 	tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
 
 	assert(tsp_vectors);
 	assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON);
 
+	/*
+	 * Abort any preempted SMC request before overwriting the SECURE
+	 * context.
+	 */
+	tspd_abort_preempted_smc(tsp_ctx);
+
 	/* Program the entry point */
 	cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->system_off_entry);
 
@@ -224,18 +213,25 @@
  ******************************************************************************/
 static void tspd_system_reset(void)
 {
-	uint64_t mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
+	uint32_t linear_id = plat_my_core_pos();
 	tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
 
 	assert(tsp_vectors);
 	assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON);
 
+	/*
+	 * Abort any preempted SMC request before overwriting the SECURE
+	 * context.
+	 */
+	tspd_abort_preempted_smc(tsp_ctx);
+
 	/* Program the entry point */
 	cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->system_reset_entry);
 
-	/* Enter the TSP. We do not care about the return value because we
-	 * must continue the reset anyway */
+	/*
+	 * Enter the TSP. We do not care about the return value because we
+	 * must continue the reset anyway
+	 */
 	tspd_synchronous_sp_entry(tsp_ctx);
 }
 
diff --git a/services/spd/tspd/tspd_private.h b/services/spd/tspd/tspd_private.h
index 5f6fb2b..8fadb7a 100644
--- a/services/spd/tspd/tspd_private.h
+++ b/services/spd/tspd/tspd_private.h
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __TSPD_PRIVATE_H__
@@ -57,7 +33,7 @@
 
 
 /*
- * This flag is used by the TSPD to determine if the TSP is servicing a standard
+ * This flag is used by the TSPD to determine if the TSP is servicing a yielding
  * SMC request prior to programming the next entry into the TSP e.g. if TSP
  * execution is preempted by a non-secure interrupt and handed control to the
  * normal world. If another request which is distinct from what the TSP was
@@ -65,15 +41,16 @@
  * reject the new request or service it while ensuring that the previous context
  * is not corrupted.
  */
-#define STD_SMC_ACTIVE_FLAG_SHIFT	2
-#define STD_SMC_ACTIVE_FLAG_MASK	1
-#define get_std_smc_active_flag(state)	((state >> STD_SMC_ACTIVE_FLAG_SHIFT) \
-					 & STD_SMC_ACTIVE_FLAG_MASK)
-#define set_std_smc_active_flag(state)	(state |=                             \
-					 1 << STD_SMC_ACTIVE_FLAG_SHIFT)
-#define clr_std_smc_active_flag(state)	(state &=                             \
-					 ~(STD_SMC_ACTIVE_FLAG_MASK           \
-					   << STD_SMC_ACTIVE_FLAG_SHIFT))
+#define YIELD_SMC_ACTIVE_FLAG_SHIFT	2
+#define YIELD_SMC_ACTIVE_FLAG_MASK	1
+#define get_yield_smc_active_flag(state)				\
+				((state >> YIELD_SMC_ACTIVE_FLAG_SHIFT) \
+				& YIELD_SMC_ACTIVE_FLAG_MASK)
+#define set_yield_smc_active_flag(state)	(state |=		\
+					1 << YIELD_SMC_ACTIVE_FLAG_SHIFT)
+#define clr_yield_smc_active_flag(state)	(state &=		\
+					~(YIELD_SMC_ACTIVE_FLAG_MASK	\
+					<< YIELD_SMC_ACTIVE_FLAG_SHIFT))
 
 /*******************************************************************************
  * Secure Payload execution state information i.e. aarch32 or aarch64
@@ -183,10 +160,10 @@
 
 /*******************************************************************************
  * Structure which helps the SPD to maintain the per-cpu state of the SP.
- * 'saved_spsr_el3' - temporary copy to allow FIQ handling when the TSP has been
- *                    preempted.
- * 'saved_elr_el3'  - temporary copy to allow FIQ handling when the TSP has been
- *                    preempted.
+ * 'saved_spsr_el3' - temporary copy to allow S-EL1 interrupt handling when
+ *                    the TSP has been preempted.
+ * 'saved_elr_el3'  - temporary copy to allow S-EL1 interrupt handling when
+ *                    the TSP has been preempted.
  * 'state'          - collection of flags to track SP state e.g. on/off
  * 'mpidr'          - mpidr to associate a context with a cpu
  * 'c_rt_ctx'       - stack address to restore C runtime context from after
@@ -207,7 +184,7 @@
 	uint64_t c_rt_ctx;
 	cpu_context_t cpu_ctx;
 	uint64_t saved_tsp_args[TSP_NUM_ARGS];
-#if TSPD_ROUTE_IRQ_TO_EL3
+#if TSP_NS_INTR_ASYNC_PREEMPT
 	sp_ctx_regs_t sp_ctx;
 #endif
 } tsp_context_t;
@@ -242,6 +219,7 @@
 				uint32_t rw,
 				uint64_t pc,
 				tsp_context_t *tsp_ctx);
+int tspd_abort_preempted_smc(tsp_context_t *tsp_ctx);
 
 extern tsp_context_t tspd_sp_context[TSPD_CORE_COUNT];
 extern struct tsp_vectors *tsp_vectors;
diff --git a/services/std_svc/psci/psci_afflvl_off.c b/services/std_svc/psci/psci_afflvl_off.c
deleted file mode 100644
index 7eb9688..0000000
--- a/services/std_svc/psci/psci_afflvl_off.c
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <arch_helpers.h>
-#include <assert.h>
-#include <debug.h>
-#include <string.h>
-#include "psci_private.h"
-
-typedef void (*afflvl_off_handler_t)(aff_map_node_t *node);
-
-/*******************************************************************************
- * The next three functions implement a handler for each supported affinity
- * level which is called when that affinity level is turned off.
- ******************************************************************************/
-static void psci_afflvl0_off(aff_map_node_t *cpu_node)
-{
-	assert(cpu_node->level == MPIDR_AFFLVL0);
-
-	/*
-	 * Arch. management. Perform the necessary steps to flush all
-	 * cpu caches.
-	 */
-	psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL0);
-
-	/*
-	 * Plat. management: Perform platform specific actions to turn this
-	 * cpu off e.g. exit cpu coherency, program the power controller etc.
-	 */
-	psci_plat_pm_ops->affinst_off(cpu_node->level,
-				     psci_get_phys_state(cpu_node));
-}
-
-static void psci_afflvl1_off(aff_map_node_t *cluster_node)
-{
-	/* Sanity check the cluster level */
-	assert(cluster_node->level == MPIDR_AFFLVL1);
-
-	/*
-	 * Arch. Management. Flush all levels of caches to PoC if
-	 * the cluster is to be shutdown.
-	 */
-	psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL1);
-
-	/*
-	 * Plat. Management. Allow the platform to do its cluster
-	 * specific bookeeping e.g. turn off interconnect coherency,
-	 * program the power controller etc.
-	 */
-	psci_plat_pm_ops->affinst_off(cluster_node->level,
-					     psci_get_phys_state(cluster_node));
-}
-
-static void psci_afflvl2_off(aff_map_node_t *system_node)
-{
-	/* Cannot go beyond this level */
-	assert(system_node->level == MPIDR_AFFLVL2);
-
-	/*
-	 * Keep the physical state of the system handy to decide what
-	 * action needs to be taken
-	 */
-
-	/*
-	 * Arch. Management. Flush all levels of caches to PoC if
-	 * the system is to be shutdown.
-	 */
-	psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL2);
-
-	/*
-	 * Plat. Management : Allow the platform to do its bookeeping
-	 * at this affinity level
-	 */
-	psci_plat_pm_ops->affinst_off(system_node->level,
-					     psci_get_phys_state(system_node));
-}
-
-static const afflvl_off_handler_t psci_afflvl_off_handlers[] = {
-	psci_afflvl0_off,
-	psci_afflvl1_off,
-	psci_afflvl2_off,
-};
-
-/*******************************************************************************
- * This function takes an array of pointers to affinity instance nodes in the
- * topology tree and calls the off handler for the corresponding affinity
- * levels
- ******************************************************************************/
-static void psci_call_off_handlers(aff_map_node_t *mpidr_nodes[],
-				  int start_afflvl,
-				  int end_afflvl)
-{
-	int level;
-	aff_map_node_t *node;
-
-	for (level = start_afflvl; level <= end_afflvl; level++) {
-		node = mpidr_nodes[level];
-		if (node == NULL)
-			continue;
-
-		psci_afflvl_off_handlers[level](node);
-	}
-}
-
-/*******************************************************************************
- * Top level handler which is called when a cpu wants to power itself down.
- * It's assumed that along with turning the cpu off, higher affinity levels will
- * be turned off as far as possible. It traverses through all the affinity
- * levels performing generic, architectural, platform setup and state management
- * e.g. for a cluster that's to be powered off, it will call the platform
- * specific code which will disable coherency at the interconnect level if the
- * cpu is the last in the cluster. For a cpu it could mean programming the power
- * the power controller etc.
- *
- * The state of all the relevant affinity levels is changed prior to calling the
- * affinity level specific handlers as their actions would depend upon the state
- * the affinity level is about to enter.
- *
- * The affinity level specific handlers are called in ascending order i.e. from
- * the lowest to the highest affinity level implemented by the platform because
- * to turn off affinity level X it is neccesary to turn off affinity level X - 1
- * first.
- ******************************************************************************/
-int psci_afflvl_off(int start_afflvl,
-		    int end_afflvl)
-{
-	int rc;
-	mpidr_aff_map_nodes_t mpidr_nodes;
-	unsigned int max_phys_off_afflvl;
-
-	/*
-	 * This function must only be called on platforms where the
-	 * CPU_OFF platform hooks have been implemented.
-	 */
-	assert(psci_plat_pm_ops->affinst_off);
-
-	/*
-	 * Collect the pointers to the nodes in the topology tree for
-	 * each affinity instance in the mpidr. If this function does
-	 * not return successfully then either the mpidr or the affinity
-	 * levels are incorrect. Either way, this an internal TF error
-	 * therefore assert.
-	 */
-	rc = psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
-				    start_afflvl,
-				    end_afflvl,
-				    mpidr_nodes);
-	assert(rc == PSCI_E_SUCCESS);
-
-	/*
-	 * This function acquires the lock corresponding to each affinity
-	 * level so that by the time all locks are taken, the system topology
-	 * is snapshot and state management can be done safely.
-	 */
-	psci_acquire_afflvl_locks(start_afflvl,
-				  end_afflvl,
-				  mpidr_nodes);
-
-
-	/*
-	 * Call the cpu off handler registered by the Secure Payload Dispatcher
-	 * to let it do any bookkeeping. Assume that the SPD always reports an
-	 * E_DENIED error if SP refuse to power down
-	 */
-	if (psci_spd_pm && psci_spd_pm->svc_off) {
-		rc = psci_spd_pm->svc_off(0);
-		if (rc)
-			goto exit;
-	}
-
-	/*
-	 * This function updates the state of each affinity instance
-	 * corresponding to the mpidr in the range of affinity levels
-	 * specified.
-	 */
-	psci_do_afflvl_state_mgmt(start_afflvl,
-				  end_afflvl,
-				  mpidr_nodes,
-				  PSCI_STATE_OFF);
-
-	max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl,
-							   end_afflvl,
-							   mpidr_nodes);
-	assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
-
-	/* Stash the highest affinity level that will enter the OFF state. */
-	psci_set_max_phys_off_afflvl(max_phys_off_afflvl);
-
-	/* Perform generic, architecture and platform specific handling */
-	psci_call_off_handlers(mpidr_nodes,
-				    start_afflvl,
-				    end_afflvl);
-
-	/*
-	 * Invalidate the entry for the highest affinity level stashed earlier.
-	 * This ensures that any reads of this variable outside the power
-	 * up/down sequences return PSCI_INVALID_DATA.
-	 *
-	 */
-	psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA);
-
-exit:
-	/*
-	 * Release the locks corresponding to each affinity level in the
-	 * reverse order to which they were acquired.
-	 */
-	psci_release_afflvl_locks(start_afflvl,
-				  end_afflvl,
-				  mpidr_nodes);
-
-	/*
-	 * Check if all actions needed to safely power down this cpu have
-	 * successfully completed. Enter a wfi loop which will allow the
-	 * power controller to physically power down this cpu.
-	 */
-	if (rc == PSCI_E_SUCCESS)
-		psci_power_down_wfi();
-
-	return rc;
-}
diff --git a/services/std_svc/psci/psci_afflvl_on.c b/services/std_svc/psci/psci_afflvl_on.c
deleted file mode 100644
index 0ee03cb..0000000
--- a/services/std_svc/psci/psci_afflvl_on.c
+++ /dev/null
@@ -1,403 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <arch_helpers.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <bl31.h>
-#include <debug.h>
-#include <context_mgmt.h>
-#include <platform.h>
-#include <runtime_svc.h>
-#include <stddef.h>
-#include "psci_private.h"
-
-typedef int (*afflvl_on_handler_t)(unsigned long target_cpu,
-				 aff_map_node_t *node);
-
-/*******************************************************************************
- * This function checks whether a cpu which has been requested to be turned on
- * is OFF to begin with.
- ******************************************************************************/
-static int cpu_on_validate_state(unsigned int psci_state)
-{
-	if (psci_state == PSCI_STATE_ON || psci_state == PSCI_STATE_SUSPEND)
-		return PSCI_E_ALREADY_ON;
-
-	if (psci_state == PSCI_STATE_ON_PENDING)
-		return PSCI_E_ON_PENDING;
-
-	assert(psci_state == PSCI_STATE_OFF);
-	return PSCI_E_SUCCESS;
-}
-
-/*******************************************************************************
- * Handler routine to turn a cpu on. It takes care of any generic, architectural
- * or platform specific setup required.
- * TODO: Split this code across separate handlers for each type of setup?
- ******************************************************************************/
-static int psci_afflvl0_on(unsigned long target_cpu,
-			   aff_map_node_t *cpu_node)
-{
-	unsigned long psci_entrypoint;
-
-	/* Sanity check to safeguard against data corruption */
-	assert(cpu_node->level == MPIDR_AFFLVL0);
-
-	/* Set the secure world (EL3) re-entry point after BL1 */
-	psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
-
-	/*
-	 * Plat. management: Give the platform the current state
-	 * of the target cpu to allow it to perform the necessary
-	 * steps to power on.
-	 */
-	return psci_plat_pm_ops->affinst_on(target_cpu,
-					    psci_entrypoint,
-					    cpu_node->level,
-					    psci_get_phys_state(cpu_node));
-}
-
-/*******************************************************************************
- * Handler routine to turn a cluster on. It takes care or any generic, arch.
- * or platform specific setup required.
- * TODO: Split this code across separate handlers for each type of setup?
- ******************************************************************************/
-static int psci_afflvl1_on(unsigned long target_cpu,
-			   aff_map_node_t *cluster_node)
-{
-	unsigned long psci_entrypoint;
-
-	assert(cluster_node->level == MPIDR_AFFLVL1);
-
-	/*
-	 * There is no generic and arch. specific cluster
-	 * management required
-	 */
-
-	/* State management: Is not required while turning a cluster on */
-
-	/*
-	 * Plat. management: Give the platform the current state
-	 * of the target cpu to allow it to perform the necessary
-	 * steps to power on.
-	 */
-	psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
-	return psci_plat_pm_ops->affinst_on(target_cpu,
-					    psci_entrypoint,
-					    cluster_node->level,
-					    psci_get_phys_state(cluster_node));
-}
-
-/*******************************************************************************
- * Handler routine to turn a cluster of clusters on. It takes care or any
- * generic, arch. or platform specific setup required.
- * TODO: Split this code across separate handlers for each type of setup?
- ******************************************************************************/
-static int psci_afflvl2_on(unsigned long target_cpu,
-			   aff_map_node_t *system_node)
-{
-	unsigned long psci_entrypoint;
-
-	/* Cannot go beyond affinity level 2 in this psci imp. */
-	assert(system_node->level == MPIDR_AFFLVL2);
-
-	/*
-	 * There is no generic and arch. specific system management
-	 * required
-	 */
-
-	/* State management: Is not required while turning a system on */
-
-	/*
-	 * Plat. management: Give the platform the current state
-	 * of the target cpu to allow it to perform the necessary
-	 * steps to power on.
-	 */
-	psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
-	return psci_plat_pm_ops->affinst_on(target_cpu,
-					    psci_entrypoint,
-					    system_node->level,
-					    psci_get_phys_state(system_node));
-}
-
-/* Private data structure to make this handlers accessible through indexing */
-static const afflvl_on_handler_t psci_afflvl_on_handlers[] = {
-	psci_afflvl0_on,
-	psci_afflvl1_on,
-	psci_afflvl2_on,
-};
-
-/*******************************************************************************
- * This function takes an array of pointers to affinity instance nodes in the
- * topology tree and calls the on handler for the corresponding affinity
- * levels
- ******************************************************************************/
-static int psci_call_on_handlers(aff_map_node_t *target_cpu_nodes[],
-				 int start_afflvl,
-				 int end_afflvl,
-				 unsigned long target_cpu)
-{
-	int rc = PSCI_E_INVALID_PARAMS, level;
-	aff_map_node_t *node;
-
-	for (level = end_afflvl; level >= start_afflvl; level--) {
-		node = target_cpu_nodes[level];
-		if (node == NULL)
-			continue;
-
-		/*
-		 * TODO: In case of an error should there be a way
-		 * of undoing what we might have setup at higher
-		 * affinity levels.
-		 */
-		rc = psci_afflvl_on_handlers[level](target_cpu,
-						    node);
-		if (rc != PSCI_E_SUCCESS)
-			break;
-	}
-
-	return rc;
-}
-
-/*******************************************************************************
- * Generic handler which is called to physically power on a cpu identified by
- * its mpidr. It traverses through all the affinity levels performing generic,
- * architectural, platform setup and state management e.g. for a cpu that is
- * to be powered on, it will ensure that enough information is stashed for it
- * to resume execution in the non-secure security state.
- *
- * The state of all the relevant affinity levels is changed after calling the
- * affinity level specific handlers as their actions would depend upon the state
- * the affinity level is currently in.
- *
- * The affinity level specific handlers are called in descending order i.e. from
- * the highest to the lowest affinity level implemented by the platform because
- * to turn on affinity level X it is necessary to turn on affinity level X + 1
- * first.
- ******************************************************************************/
-int psci_afflvl_on(unsigned long target_cpu,
-		   entry_point_info_t *ep,
-		   int start_afflvl,
-		   int end_afflvl)
-{
-	int rc;
-	mpidr_aff_map_nodes_t target_cpu_nodes;
-
-	/*
-	 * This function must only be called on platforms where the
-	 * CPU_ON platform hooks have been implemented.
-	 */
-	assert(psci_plat_pm_ops->affinst_on &&
-			psci_plat_pm_ops->affinst_on_finish);
-
-	/*
-	 * Collect the pointers to the nodes in the topology tree for
-	 * each affinity instance in the mpidr. If this function does
-	 * not return successfully then either the mpidr or the affinity
-	 * levels are incorrect.
-	 */
-	rc = psci_get_aff_map_nodes(target_cpu,
-				    start_afflvl,
-				    end_afflvl,
-				    target_cpu_nodes);
-	assert(rc == PSCI_E_SUCCESS);
-
-	/*
-	 * This function acquires the lock corresponding to each affinity
-	 * level so that by the time all locks are taken, the system topology
-	 * is snapshot and state management can be done safely.
-	 */
-	psci_acquire_afflvl_locks(start_afflvl,
-				  end_afflvl,
-				  target_cpu_nodes);
-
-	/*
-	 * Generic management: Ensure that the cpu is off to be
-	 * turned on.
-	 */
-	rc = cpu_on_validate_state(psci_get_state(
-				    target_cpu_nodes[MPIDR_AFFLVL0]));
-	if (rc != PSCI_E_SUCCESS)
-		goto exit;
-
-	/*
-	 * Call the cpu on handler registered by the Secure Payload Dispatcher
-	 * to let it do any bookeeping. If the handler encounters an error, it's
-	 * expected to assert within
-	 */
-	if (psci_spd_pm && psci_spd_pm->svc_on)
-		psci_spd_pm->svc_on(target_cpu);
-
-	/* Perform generic, architecture and platform specific handling. */
-	rc = psci_call_on_handlers(target_cpu_nodes,
-				   start_afflvl,
-				   end_afflvl,
-				   target_cpu);
-
-	assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
-
-	/*
-	 * This function updates the state of each affinity instance
-	 * corresponding to the mpidr in the range of affinity levels
-	 * specified.
-	 */
-	if (rc == PSCI_E_SUCCESS) {
-		psci_do_afflvl_state_mgmt(start_afflvl,
-					  end_afflvl,
-					  target_cpu_nodes,
-					  PSCI_STATE_ON_PENDING);
-
-		/*
-		 * Store the re-entry information for the non-secure world.
-		 */
-		cm_init_context(target_cpu, ep);
-	}
-
-exit:
-	/*
-	 * This loop releases the lock corresponding to each affinity level
-	 * in the reverse order to which they were acquired.
-	 */
-	psci_release_afflvl_locks(start_afflvl,
-				  end_afflvl,
-				  target_cpu_nodes);
-
-	return rc;
-}
-
-/*******************************************************************************
- * The following functions finish an earlier affinity power on request. They
- * are called by the common finisher routine in psci_common.c.
- ******************************************************************************/
-static void psci_afflvl0_on_finish(aff_map_node_t *cpu_node)
-{
-	unsigned int plat_state, state;
-
-	assert(cpu_node->level == MPIDR_AFFLVL0);
-
-	/* Ensure we have been explicitly woken up by another cpu */
-	state = psci_get_state(cpu_node);
-	assert(state == PSCI_STATE_ON_PENDING);
-
-	/*
-	 * Plat. management: Perform the platform specific actions
-	 * for this cpu e.g. enabling the gic or zeroing the mailbox
-	 * register. The actual state of this cpu has already been
-	 * changed.
-	 */
-
-	/* Get the physical state of this cpu */
-	plat_state = get_phys_state(state);
-	psci_plat_pm_ops->affinst_on_finish(cpu_node->level,
-							 plat_state);
-
-	/*
-	 * Arch. management: Enable data cache and manage stack memory
-	 */
-	psci_do_pwrup_cache_maintenance();
-
-	/*
-	 * All the platform specific actions for turning this cpu
-	 * on have completed. Perform enough arch.initialization
-	 * to run in the non-secure address space.
-	 */
-	bl31_arch_setup();
-
-	/*
-	 * Call the cpu on finish handler registered by the Secure Payload
-	 * Dispatcher to let it do any bookeeping. If the handler encounters an
-	 * error, it's expected to assert within
-	 */
-	if (psci_spd_pm && psci_spd_pm->svc_on_finish)
-		psci_spd_pm->svc_on_finish(0);
-
-	/*
-	 * Generic management: Now we just need to retrieve the
-	 * information that we had stashed away during the cpu_on
-	 * call to set this cpu on its way.
-	 */
-	cm_prepare_el3_exit(NON_SECURE);
-
-	/* Clean caches before re-entering normal world */
-	dcsw_op_louis(DCCSW);
-}
-
-static void psci_afflvl1_on_finish(aff_map_node_t *cluster_node)
-{
-	unsigned int plat_state;
-
-	assert(cluster_node->level == MPIDR_AFFLVL1);
-
-	/*
-	 * Plat. management: Perform the platform specific actions
-	 * as per the old state of the cluster e.g. enabling
-	 * coherency at the interconnect depends upon the state with
-	 * which this cluster was powered up. If anything goes wrong
-	 * then assert as there is no way to recover from this
-	 * situation.
-	 */
-	plat_state = psci_get_phys_state(cluster_node);
-	psci_plat_pm_ops->affinst_on_finish(cluster_node->level,
-						 plat_state);
-}
-
-
-static void psci_afflvl2_on_finish(aff_map_node_t *system_node)
-{
-	unsigned int plat_state;
-
-	/* Cannot go beyond this affinity level */
-	assert(system_node->level == MPIDR_AFFLVL2);
-
-	/*
-	 * Currently, there are no architectural actions to perform
-	 * at the system level.
-	 */
-
-	/*
-	 * Plat. management: Perform the platform specific actions
-	 * as per the old state of the cluster e.g. enabling
-	 * coherency at the interconnect depends upon the state with
-	 * which this cluster was powered up. If anything goes wrong
-	 * then assert as there is no way to recover from this
-	 * situation.
-	 */
-	plat_state = psci_get_phys_state(system_node);
-	psci_plat_pm_ops->affinst_on_finish(system_node->level,
-						   plat_state);
-}
-
-const afflvl_power_on_finisher_t psci_afflvl_on_finishers[] = {
-	psci_afflvl0_on_finish,
-	psci_afflvl1_on_finish,
-	psci_afflvl2_on_finish,
-};
diff --git a/services/std_svc/psci/psci_afflvl_suspend.c b/services/std_svc/psci/psci_afflvl_suspend.c
deleted file mode 100644
index dad0cef..0000000
--- a/services/std_svc/psci/psci_afflvl_suspend.c
+++ /dev/null
@@ -1,469 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <assert.h>
-#include <bl_common.h>
-#include <arch.h>
-#include <arch_helpers.h>
-#include <context.h>
-#include <context_mgmt.h>
-#include <cpu_data.h>
-#include <debug.h>
-#include <platform.h>
-#include <runtime_svc.h>
-#include <stddef.h>
-#include "psci_private.h"
-
-typedef void (*afflvl_suspend_handler_t)(aff_map_node_t *node);
-
-/*******************************************************************************
- * This function saves the power state parameter passed in the current PSCI
- * cpu_suspend call in the per-cpu data array.
- ******************************************************************************/
-void psci_set_suspend_power_state(unsigned int power_state)
-{
-	set_cpu_data(psci_svc_cpu_data.power_state, power_state);
-	flush_cpu_data(psci_svc_cpu_data.power_state);
-}
-
-/*******************************************************************************
- * This function gets the affinity level till which the current cpu could be
- * powered down during a cpu_suspend call. Returns PSCI_INVALID_DATA if the
- * power state is invalid.
- ******************************************************************************/
-int psci_get_suspend_afflvl()
-{
-	unsigned int power_state;
-
-	power_state = get_cpu_data(psci_svc_cpu_data.power_state);
-
-	return ((power_state == PSCI_INVALID_DATA) ?
-		power_state : psci_get_pstate_afflvl(power_state));
-}
-
-/*******************************************************************************
- * This function gets the state id of the current cpu from the power state
- * parameter saved in the per-cpu data array. Returns PSCI_INVALID_DATA if the
- * power state saved is invalid.
- ******************************************************************************/
-int psci_get_suspend_stateid()
-{
-	unsigned int power_state;
-
-	power_state = get_cpu_data(psci_svc_cpu_data.power_state);
-
-	return ((power_state == PSCI_INVALID_DATA) ?
-		power_state : psci_get_pstate_id(power_state));
-}
-
-/*******************************************************************************
- * This function gets the state id of the cpu specified by the 'mpidr' parameter
- * from the power state parameter saved in the per-cpu data array. Returns
- * PSCI_INVALID_DATA if the power state saved is invalid.
- ******************************************************************************/
-int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
-{
-	unsigned int power_state;
-
-	power_state = get_cpu_data_by_mpidr(mpidr,
-					    psci_svc_cpu_data.power_state);
-
-	return ((power_state == PSCI_INVALID_DATA) ?
-		power_state : psci_get_pstate_id(power_state));
-}
-
-/*******************************************************************************
- * The next three functions implement a handler for each supported affinity
- * level which is called when that affinity level is about to be suspended.
- ******************************************************************************/
-static void psci_afflvl0_suspend(aff_map_node_t *cpu_node)
-{
-	unsigned long psci_entrypoint;
-
-	/* Sanity check to safeguard against data corruption */
-	assert(cpu_node->level == MPIDR_AFFLVL0);
-
-	/* Set the secure world (EL3) re-entry point after BL1 */
-	psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
-
-	/*
-	 * Arch. management. Perform the necessary steps to flush all
-	 * cpu caches.
-	 */
-	psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL0);
-
-	/*
-	 * Plat. management: Allow the platform to perform the
-	 * necessary actions to turn off this cpu e.g. set the
-	 * platform defined mailbox with the psci entrypoint,
-	 * program the power controller etc.
-	 */
-	psci_plat_pm_ops->affinst_suspend(psci_entrypoint,
-						 cpu_node->level,
-						 psci_get_phys_state(cpu_node));
-}
-
-static void psci_afflvl1_suspend(aff_map_node_t *cluster_node)
-{
-	unsigned int plat_state;
-	unsigned long psci_entrypoint;
-
-	/* Sanity check the cluster level */
-	assert(cluster_node->level == MPIDR_AFFLVL1);
-
-	/*
-	 * Arch. management: Flush all levels of caches to PoC if the
-	 * cluster is to be shutdown.
-	 */
-	psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL1);
-
-	/*
-	 * Plat. Management. Allow the platform to do its cluster specific
-	 * bookeeping e.g. turn off interconnect coherency, program the power
-	 * controller etc. Sending the psci entrypoint is currently redundant
-	 * beyond affinity level 0 but one never knows what a platform might
-	 * do. Also it allows us to keep the platform handler prototype the
-	 * same.
-	 */
-	plat_state = psci_get_phys_state(cluster_node);
-	psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
-	psci_plat_pm_ops->affinst_suspend(psci_entrypoint,
-						 cluster_node->level,
-						 plat_state);
-}
-
-
-static void psci_afflvl2_suspend(aff_map_node_t *system_node)
-{
-	unsigned int plat_state;
-	unsigned long psci_entrypoint;
-
-	/* Cannot go beyond this */
-	assert(system_node->level == MPIDR_AFFLVL2);
-
-	/*
-	 * Keep the physical state of the system handy to decide what
-	 * action needs to be taken
-	 */
-	plat_state = psci_get_phys_state(system_node);
-
-	/*
-	 * Arch. management: Flush all levels of caches to PoC if the
-	 * system is to be shutdown.
-	 */
-	psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL2);
-
-	/*
-	 * Plat. Management : Allow the platform to do its bookeeping
-	 * at this affinity level
-	 */
-
-	/*
-	 * Sending the psci entrypoint is currently redundant
-	 * beyond affinity level 0 but one never knows what a
-	 * platform might do. Also it allows us to keep the
-	 * platform handler prototype the same.
-	 */
-	plat_state = psci_get_phys_state(system_node);
-	psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
-	psci_plat_pm_ops->affinst_suspend(psci_entrypoint,
-						 system_node->level,
-						 plat_state);
-}
-
-static const afflvl_suspend_handler_t psci_afflvl_suspend_handlers[] = {
-	psci_afflvl0_suspend,
-	psci_afflvl1_suspend,
-	psci_afflvl2_suspend,
-};
-
-/*******************************************************************************
- * This function takes an array of pointers to affinity instance nodes in the
- * topology tree and calls the suspend handler for the corresponding affinity
- * levels
- ******************************************************************************/
-static void psci_call_suspend_handlers(aff_map_node_t *mpidr_nodes[],
-				      int start_afflvl,
-				      int end_afflvl)
-{
-	int level;
-	aff_map_node_t *node;
-
-	for (level = start_afflvl; level <= end_afflvl; level++) {
-		node = mpidr_nodes[level];
-		if (node == NULL)
-			continue;
-
-		psci_afflvl_suspend_handlers[level](node);
-	}
-}
-
-/*******************************************************************************
- * Top level handler which is called when a cpu wants to suspend its execution.
- * It is assumed that along with turning the cpu off, higher affinity levels
- * until the target affinity level will be turned off as well. It traverses
- * through all the affinity levels performing generic, architectural, platform
- * setup and state management e.g. for a cluster that's to be suspended, it will
- * call the platform specific code which will disable coherency at the
- * interconnect level if the cpu is the last in the cluster. For a cpu it could
- * mean programming the power controller etc.
- *
- * The state of all the relevant affinity levels is changed prior to calling the
- * affinity level specific handlers as their actions would depend upon the state
- * the affinity level is about to enter.
- *
- * The affinity level specific handlers are called in ascending order i.e. from
- * the lowest to the highest affinity level implemented by the platform because
- * to turn off affinity level X it is neccesary to turn off affinity level X - 1
- * first.
- *
- * All the required parameter checks are performed at the beginning and after
- * the state transition has been done, no further error is expected and it
- * is not possible to undo any of the actions taken beyond that point.
- ******************************************************************************/
-void psci_afflvl_suspend(entry_point_info_t *ep,
-			int start_afflvl,
-			int end_afflvl)
-{
-	int skip_wfi = 0;
-	mpidr_aff_map_nodes_t mpidr_nodes;
-	unsigned int max_phys_off_afflvl;
-
-	/*
-	 * This function must only be called on platforms where the
-	 * CPU_SUSPEND platform hooks have been implemented.
-	 */
-	assert(psci_plat_pm_ops->affinst_suspend &&
-			psci_plat_pm_ops->affinst_suspend_finish);
-
-	/*
-	 * Collect the pointers to the nodes in the topology tree for
-	 * each affinity instance in the mpidr. If this function does
-	 * not return successfully then either the mpidr or the affinity
-	 * levels are incorrect. Either way, this an internal TF error
-	 * therefore assert.
-	 */
-	if (psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
-		   start_afflvl, end_afflvl, mpidr_nodes) != PSCI_E_SUCCESS)
-		assert(0);
-
-	/*
-	 * This function acquires the lock corresponding to each affinity
-	 * level so that by the time all locks are taken, the system topology
-	 * is snapshot and state management can be done safely.
-	 */
-	psci_acquire_afflvl_locks(start_afflvl,
-				  end_afflvl,
-				  mpidr_nodes);
-
-	/*
-	 * We check if there are any pending interrupts after the delay
-	 * introduced by lock contention to increase the chances of early
-	 * detection that a wake-up interrupt has fired.
-	 */
-	if (read_isr_el1()) {
-		skip_wfi = 1;
-		goto exit;
-	}
-
-	/*
-	 * Call the cpu suspend handler registered by the Secure Payload
-	 * Dispatcher to let it do any bookeeping. If the handler encounters an
-	 * error, it's expected to assert within
-	 */
-	if (psci_spd_pm && psci_spd_pm->svc_suspend)
-		psci_spd_pm->svc_suspend(0);
-
-	/*
-	 * This function updates the state of each affinity instance
-	 * corresponding to the mpidr in the range of affinity levels
-	 * specified.
-	 */
-	psci_do_afflvl_state_mgmt(start_afflvl,
-				  end_afflvl,
-				  mpidr_nodes,
-				  PSCI_STATE_SUSPEND);
-
-	max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl,
-							    end_afflvl,
-							    mpidr_nodes);
-	assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
-
-	/* Stash the highest affinity level that will be turned off */
-	psci_set_max_phys_off_afflvl(max_phys_off_afflvl);
-
-	/*
-	 * Store the re-entry information for the non-secure world.
-	 */
-	cm_init_context(read_mpidr_el1(), ep);
-
-	/* Perform generic, architecture and platform specific handling */
-	psci_call_suspend_handlers(mpidr_nodes,
-					start_afflvl,
-					end_afflvl);
-
-	/*
-	 * Invalidate the entry for the highest affinity level stashed earlier.
-	 * This ensures that any reads of this variable outside the power
-	 * up/down sequences return PSCI_INVALID_DATA.
-	 */
-	psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA);
-
-exit:
-	/*
-	 * Release the locks corresponding to each affinity level in the
-	 * reverse order to which they were acquired.
-	 */
-	psci_release_afflvl_locks(start_afflvl,
-				  end_afflvl,
-				  mpidr_nodes);
-	if (!skip_wfi)
-		psci_power_down_wfi();
-}
-
-/*******************************************************************************
- * The following functions finish an earlier affinity suspend request. They
- * are called by the common finisher routine in psci_common.c.
- ******************************************************************************/
-static void psci_afflvl0_suspend_finish(aff_map_node_t *cpu_node)
-{
-	unsigned int plat_state, state;
-	int32_t suspend_level;
-	uint64_t counter_freq;
-
-	assert(cpu_node->level == MPIDR_AFFLVL0);
-
-	/* Ensure we have been woken up from a suspended state */
-	state = psci_get_state(cpu_node);
-	assert(state == PSCI_STATE_SUSPEND);
-
-	/*
-	 * Plat. management: Perform the platform specific actions
-	 * before we change the state of the cpu e.g. enabling the
-	 * gic or zeroing the mailbox register. If anything goes
-	 * wrong then assert as there is no way to recover from this
-	 * situation.
-	 */
-
-	/* Get the physical state of this cpu */
-	plat_state = get_phys_state(state);
-	psci_plat_pm_ops->affinst_suspend_finish(cpu_node->level,
-							      plat_state);
-
-	/*
-	 * Arch. management: Enable the data cache, manage stack memory and
-	 * restore the stashed EL3 architectural context from the 'cpu_context'
-	 * structure for this cpu.
-	 */
-	psci_do_pwrup_cache_maintenance();
-
-	/* Re-init the cntfrq_el0 register */
-	counter_freq = plat_get_syscnt_freq();
-	write_cntfrq_el0(counter_freq);
-
-	/*
-	 * Call the cpu suspend finish handler registered by the Secure Payload
-	 * Dispatcher to let it do any bookeeping. If the handler encounters an
-	 * error, it's expected to assert within
-	 */
-	if (psci_spd_pm && psci_spd_pm->svc_suspend) {
-		suspend_level = psci_get_suspend_afflvl();
-		assert (suspend_level != PSCI_INVALID_DATA);
-		psci_spd_pm->svc_suspend_finish(suspend_level);
-	}
-
-	/* Invalidate the suspend context for the node */
-	psci_set_suspend_power_state(PSCI_INVALID_DATA);
-
-	/*
-	 * Generic management: Now we just need to retrieve the
-	 * information that we had stashed away during the suspend
-	 * call to set this cpu on its way.
-	 */
-	cm_prepare_el3_exit(NON_SECURE);
-
-	/* Clean caches before re-entering normal world */
-	dcsw_op_louis(DCCSW);
-}
-
-static void psci_afflvl1_suspend_finish(aff_map_node_t *cluster_node)
-{
-	unsigned int plat_state;
-
-	assert(cluster_node->level == MPIDR_AFFLVL1);
-
-	/*
-	 * Plat. management: Perform the platform specific actions
-	 * as per the old state of the cluster e.g. enabling
-	 * coherency at the interconnect depends upon the state with
-	 * which this cluster was powered up. If anything goes wrong
-	 * then assert as there is no way to recover from this
-	 * situation.
-	 */
-
-	/* Get the physical state of this cpu */
-	plat_state = psci_get_phys_state(cluster_node);
-	psci_plat_pm_ops->affinst_suspend_finish(cluster_node->level,
-						      plat_state);
-}
-
-
-static void psci_afflvl2_suspend_finish(aff_map_node_t *system_node)
-{
-	unsigned int plat_state;
-
-	/* Cannot go beyond this affinity level */
-	assert(system_node->level == MPIDR_AFFLVL2);
-
-	/*
-	 * Currently, there are no architectural actions to perform
-	 * at the system level.
-	 */
-
-	/*
-	 * Plat. management: Perform the platform specific actions
-	 * as per the old state of the cluster e.g. enabling
-	 * coherency at the interconnect depends upon the state with
-	 * which this cluster was powered up. If anything goes wrong
-	 * then assert as there is no way to recover from this
-	 * situation.
-	 */
-
-	/* Get the physical state of the system */
-	plat_state = psci_get_phys_state(system_node);
-	psci_plat_pm_ops->affinst_suspend_finish(system_node->level,
-						      plat_state);
-}
-
-const afflvl_power_on_finisher_t psci_afflvl_suspend_finishers[] = {
-	psci_afflvl0_suspend_finish,
-	psci_afflvl1_suspend_finish,
-	psci_afflvl2_suspend_finish,
-};
diff --git a/services/std_svc/psci/psci_common.c b/services/std_svc/psci/psci_common.c
deleted file mode 100644
index ad163ba..0000000
--- a/services/std_svc/psci/psci_common.c
+++ /dev/null
@@ -1,655 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <arch_helpers.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <context.h>
-#include <context_mgmt.h>
-#include <debug.h>
-#include <platform.h>
-#include <string.h>
-#include "psci_private.h"
-
-/*
- * SPD power management operations, expected to be supplied by the registered
- * SPD on successful SP initialization
- */
-const spd_pm_ops_t *psci_spd_pm;
-
-/*******************************************************************************
- * Grand array that holds the platform's topology information for state
- * management of affinity instances. Each node (aff_map_node) in the array
- * corresponds to an affinity instance e.g. cluster, cpu within an mpidr
- ******************************************************************************/
-aff_map_node_t psci_aff_map[PSCI_NUM_AFFS]
-#if USE_COHERENT_MEM
-__attribute__ ((section("tzfw_coherent_mem")))
-#endif
-;
-
-/*******************************************************************************
- * Pointer to functions exported by the platform to complete power mgmt. ops
- ******************************************************************************/
-const plat_pm_ops_t *psci_plat_pm_ops;
-
-/*******************************************************************************
- * This function is passed an array of pointers to affinity level nodes in the
- * topology tree for an mpidr. It iterates through the nodes to find the highest
- * affinity level which is marked as physically powered off.
- ******************************************************************************/
-uint32_t psci_find_max_phys_off_afflvl(uint32_t start_afflvl,
-				       uint32_t end_afflvl,
-				       aff_map_node_t *mpidr_nodes[])
-{
-	uint32_t max_afflvl = PSCI_INVALID_DATA;
-
-	for (; start_afflvl <= end_afflvl; start_afflvl++) {
-		if (mpidr_nodes[start_afflvl] == NULL)
-			continue;
-
-		if (psci_get_phys_state(mpidr_nodes[start_afflvl]) ==
-		    PSCI_STATE_OFF)
-			max_afflvl = start_afflvl;
-	}
-
-	return max_afflvl;
-}
-
-/*******************************************************************************
- * This function verifies that the all the other cores in the system have been
- * turned OFF and the current CPU is the last running CPU in the system.
- * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false)
- * otherwise.
- ******************************************************************************/
-unsigned int psci_is_last_on_cpu(void)
-{
-	unsigned long mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
-	unsigned int i;
-
-	for (i = psci_aff_limits[MPIDR_AFFLVL0].min;
-			i <= psci_aff_limits[MPIDR_AFFLVL0].max; i++) {
-
-		assert(psci_aff_map[i].level == MPIDR_AFFLVL0);
-
-		if (!(psci_aff_map[i].state & PSCI_AFF_PRESENT))
-			continue;
-
-		if (psci_aff_map[i].mpidr == mpidr) {
-			assert(psci_get_state(&psci_aff_map[i])
-					== PSCI_STATE_ON);
-			continue;
-		}
-
-		if (psci_get_state(&psci_aff_map[i]) != PSCI_STATE_OFF)
-			return 0;
-	}
-
-	return 1;
-}
-
-/*******************************************************************************
- * This function saves the highest affinity level which is in OFF state. The
- * affinity instance with which the level is associated is determined by the
- * caller.
- ******************************************************************************/
-void psci_set_max_phys_off_afflvl(uint32_t afflvl)
-{
-	set_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl, afflvl);
-
-	/*
-	 * Ensure that the saved value is flushed to main memory and any
-	 * speculatively pre-fetched stale copies are invalidated from the
-	 * caches of other cpus in the same coherency domain. This ensures that
-	 * the value can be safely read irrespective of the state of the data
-	 * cache.
-	 */
-	flush_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl);
-}
-
-/*******************************************************************************
- * This function reads the saved highest affinity level which is in OFF
- * state. The affinity instance with which the level is associated is determined
- * by the caller.
- ******************************************************************************/
-uint32_t psci_get_max_phys_off_afflvl(void)
-{
-	/*
-	 * Ensure that the last update of this value in this cpu's cache is
-	 * flushed to main memory and any speculatively pre-fetched stale copies
-	 * are invalidated from the caches of other cpus in the same coherency
-	 * domain. This ensures that the value is always read from the main
-	 * memory when it was written before the data cache was enabled.
-	 */
-	flush_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl);
-	return get_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl);
-}
-
-/*******************************************************************************
- * Routine to return the maximum affinity level to traverse to after a cpu has
- * been physically powered up. It is expected to be called immediately after
- * reset from assembler code.
- ******************************************************************************/
-int get_power_on_target_afflvl()
-{
-	int afflvl;
-
-#if DEBUG
-	unsigned int state;
-	aff_map_node_t *node;
-
-	/* Retrieve our node from the topology tree */
-	node = psci_get_aff_map_node(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
-				     MPIDR_AFFLVL0);
-	assert(node);
-
-	/*
-	 * Sanity check the state of the cpu. It should be either suspend or "on
-	 * pending"
-	 */
-	state = psci_get_state(node);
-	assert(state == PSCI_STATE_SUSPEND || state == PSCI_STATE_ON_PENDING);
-#endif
-
-	/*
-	 * Assume that this cpu was suspended and retrieve its target affinity
-	 * level. If it is invalid then it could only have been turned off
-	 * earlier. get_max_afflvl() will return the highest affinity level a
-	 * cpu can be turned off to.
-	 */
-	afflvl = psci_get_suspend_afflvl();
-	if (afflvl == PSCI_INVALID_DATA)
-		afflvl = get_max_afflvl();
-	return afflvl;
-}
-
-/*******************************************************************************
- * Simple routine to retrieve the maximum affinity level supported by the
- * platform and check that it makes sense.
- ******************************************************************************/
-int get_max_afflvl(void)
-{
-	int aff_lvl;
-
-	aff_lvl = plat_get_max_afflvl();
-	assert(aff_lvl <= MPIDR_MAX_AFFLVL && aff_lvl >= MPIDR_AFFLVL0);
-
-	return aff_lvl;
-}
-
-/*******************************************************************************
- * Simple routine to set the id of an affinity instance at a given level in the
- * mpidr.
- ******************************************************************************/
-unsigned long mpidr_set_aff_inst(unsigned long mpidr,
-				 unsigned char aff_inst,
-				 int aff_lvl)
-{
-	unsigned long aff_shift;
-
-	assert(aff_lvl <= MPIDR_AFFLVL3);
-
-	/*
-	 * Decide the number of bits to shift by depending upon
-	 * the affinity level
-	 */
-	aff_shift = get_afflvl_shift(aff_lvl);
-
-	/* Clear the existing affinity instance & set the new one*/
-	mpidr &= ~(MPIDR_AFFLVL_MASK << aff_shift);
-	mpidr |= aff_inst << aff_shift;
-
-	return mpidr;
-}
-
-/*******************************************************************************
- * This function sanity checks a range of affinity levels.
- ******************************************************************************/
-int psci_check_afflvl_range(int start_afflvl, int end_afflvl)
-{
-	/* Sanity check the parameters passed */
-	if (end_afflvl > get_max_afflvl())
-		return PSCI_E_INVALID_PARAMS;
-
-	if (start_afflvl < MPIDR_AFFLVL0)
-		return PSCI_E_INVALID_PARAMS;
-
-	if (end_afflvl < start_afflvl)
-		return PSCI_E_INVALID_PARAMS;
-
-	return PSCI_E_SUCCESS;
-}
-
-/*******************************************************************************
- * This function is passed an array of pointers to affinity level nodes in the
- * topology tree for an mpidr and the state which each node should transition
- * to. It updates the state of each node between the specified affinity levels.
- ******************************************************************************/
-void psci_do_afflvl_state_mgmt(uint32_t start_afflvl,
-			       uint32_t end_afflvl,
-			       aff_map_node_t *mpidr_nodes[],
-			       uint32_t state)
-{
-	uint32_t level;
-
-	for (level = start_afflvl; level <= end_afflvl; level++) {
-		if (mpidr_nodes[level] == NULL)
-			continue;
-		psci_set_state(mpidr_nodes[level], state);
-	}
-}
-
-/*******************************************************************************
- * This function is passed an array of pointers to affinity level nodes in the
- * topology tree for an mpidr. It picks up locks for each affinity level bottom
- * up in the range specified.
- ******************************************************************************/
-void psci_acquire_afflvl_locks(int start_afflvl,
-			       int end_afflvl,
-			       aff_map_node_t *mpidr_nodes[])
-{
-	int level;
-
-	for (level = start_afflvl; level <= end_afflvl; level++) {
-		if (mpidr_nodes[level] == NULL)
-			continue;
-
-		psci_lock_get(mpidr_nodes[level]);
-	}
-}
-
-/*******************************************************************************
- * This function is passed an array of pointers to affinity level nodes in the
- * topology tree for an mpidr. It releases the lock for each affinity level top
- * down in the range specified.
- ******************************************************************************/
-void psci_release_afflvl_locks(int start_afflvl,
-			       int end_afflvl,
-			       aff_map_node_t *mpidr_nodes[])
-{
-	int level;
-
-	for (level = end_afflvl; level >= start_afflvl; level--) {
-		if (mpidr_nodes[level] == NULL)
-			continue;
-
-		psci_lock_release(mpidr_nodes[level]);
-	}
-}
-
-/*******************************************************************************
- * Simple routine to determine whether an affinity instance at a given level
- * in an mpidr exists or not.
- ******************************************************************************/
-int psci_validate_mpidr(unsigned long mpidr, int level)
-{
-	aff_map_node_t *node;
-
-	node = psci_get_aff_map_node(mpidr, level);
-	if (node && (node->state & PSCI_AFF_PRESENT))
-		return PSCI_E_SUCCESS;
-	else
-		return PSCI_E_INVALID_PARAMS;
-}
-
-/*******************************************************************************
- * This function determines the full entrypoint information for the requested
- * PSCI entrypoint on power on/resume and returns it.
- ******************************************************************************/
-int psci_get_ns_ep_info(entry_point_info_t *ep,
-		       uint64_t entrypoint, uint64_t context_id)
-{
-	uint32_t ep_attr, mode, sctlr, daif, ee;
-	uint32_t ns_scr_el3 = read_scr_el3();
-	uint32_t ns_sctlr_el1 = read_sctlr_el1();
-
-	sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1;
-	ee = 0;
-
-	ep_attr = NON_SECURE | EP_ST_DISABLE;
-	if (sctlr & SCTLR_EE_BIT) {
-		ep_attr |= EP_EE_BIG;
-		ee = 1;
-	}
-	SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
-
-	ep->pc = entrypoint;
-	memset(&ep->args, 0, sizeof(ep->args));
-	ep->args.arg0 = context_id;
-
-	/*
-	 * Figure out whether the cpu enters the non-secure address space
-	 * in aarch32 or aarch64
-	 */
-	if (ns_scr_el3 & SCR_RW_BIT) {
-
-		/*
-		 * Check whether a Thumb entry point has been provided for an
-		 * aarch64 EL
-		 */
-		if (entrypoint & 0x1)
-			return PSCI_E_INVALID_PARAMS;
-
-		mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1;
-
-		ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
-	} else {
-
-		mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
-
-		/*
-		 * TODO: Choose async. exception bits if HYP mode is not
-		 * implemented according to the values of SCR.{AW, FW} bits
-		 */
-		daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
-
-		ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif);
-	}
-
-	return PSCI_E_SUCCESS;
-}
-
-/*******************************************************************************
- * This function takes a pointer to an affinity node in the topology tree and
- * returns its state. State of a non-leaf node needs to be calculated.
- ******************************************************************************/
-unsigned short psci_get_state(aff_map_node_t *node)
-{
-#if !USE_COHERENT_MEM
-	flush_dcache_range((uint64_t) node, sizeof(*node));
-#endif
-
-	assert(node->level >= MPIDR_AFFLVL0 && node->level <= MPIDR_MAX_AFFLVL);
-
-	/* A cpu node just contains the state which can be directly returned */
-	if (node->level == MPIDR_AFFLVL0)
-		return (node->state >> PSCI_STATE_SHIFT) & PSCI_STATE_MASK;
-
-	/*
-	 * For an affinity level higher than a cpu, the state has to be
-	 * calculated. It depends upon the value of the reference count
-	 * which is managed by each node at the next lower affinity level
-	 * e.g. for a cluster, each cpu increments/decrements the reference
-	 * count. If the reference count is 0 then the affinity level is
-	 * OFF else ON.
-	 */
-	if (node->ref_count)
-		return PSCI_STATE_ON;
-	else
-		return PSCI_STATE_OFF;
-}
-
-/*******************************************************************************
- * This function takes a pointer to an affinity node in the topology tree and
- * a target state. State of a non-leaf node needs to be converted to a reference
- * count. State of a leaf node can be set directly.
- ******************************************************************************/
-void psci_set_state(aff_map_node_t *node, unsigned short state)
-{
-	assert(node->level >= MPIDR_AFFLVL0 && node->level <= MPIDR_MAX_AFFLVL);
-
-	/*
-	 * For an affinity level higher than a cpu, the state is used
-	 * to decide whether the reference count is incremented or
-	 * decremented. Entry into the ON_PENDING state does not have
-	 * effect.
-	 */
-	if (node->level > MPIDR_AFFLVL0) {
-		switch (state) {
-		case PSCI_STATE_ON:
-			node->ref_count++;
-			break;
-		case PSCI_STATE_OFF:
-		case PSCI_STATE_SUSPEND:
-			node->ref_count--;
-			break;
-		case PSCI_STATE_ON_PENDING:
-			/*
-			 * An affinity level higher than a cpu will not undergo
-			 * a state change when it is about to be turned on
-			 */
-			return;
-		default:
-			assert(0);
-		}
-	} else {
-		node->state &= ~(PSCI_STATE_MASK << PSCI_STATE_SHIFT);
-		node->state |= (state & PSCI_STATE_MASK) << PSCI_STATE_SHIFT;
-	}
-
-#if !USE_COHERENT_MEM
-	flush_dcache_range((uint64_t) node, sizeof(*node));
-#endif
-}
-
-/*******************************************************************************
- * An affinity level could be on, on_pending, suspended or off. These are the
- * logical states it can be in. Physically either it is off or on. When it is in
- * the state on_pending then it is about to be turned on. It is not possible to
- * tell whether that's actually happenned or not. So we err on the side of
- * caution & treat the affinity level as being turned off.
- ******************************************************************************/
-unsigned short psci_get_phys_state(aff_map_node_t *node)
-{
-	unsigned int state;
-
-	state = psci_get_state(node);
-	return get_phys_state(state);
-}
-
-/*******************************************************************************
- * This function takes an array of pointers to affinity instance nodes in the
- * topology tree and calls the physical power on handler for the corresponding
- * affinity levels
- ******************************************************************************/
-static void psci_call_power_on_handlers(aff_map_node_t *mpidr_nodes[],
-				       int start_afflvl,
-				       int end_afflvl,
-				       afflvl_power_on_finisher_t *pon_handlers)
-{
-	int level;
-	aff_map_node_t *node;
-
-	for (level = end_afflvl; level >= start_afflvl; level--) {
-		node = mpidr_nodes[level];
-		if (node == NULL)
-			continue;
-
-		/*
-		 * If we run into any trouble while powering up an
-		 * affinity instance, then there is no recovery path
-		 * so simply return an error and let the caller take
-		 * care of the situation.
-		 */
-		pon_handlers[level](node);
-	}
-}
-
-/*******************************************************************************
- * Generic handler which is called when a cpu is physically powered on. It
- * traverses through all the affinity levels performing generic, architectural,
- * platform setup and state management e.g. for a cluster that's been powered
- * on, it will call the platform specific code which will enable coherency at
- * the interconnect level. For a cpu it could mean turning on the MMU etc.
- *
- * The state of all the relevant affinity levels is changed after calling the
- * affinity level specific handlers as their actions would depend upon the state
- * the affinity level is exiting from.
- *
- * The affinity level specific handlers are called in descending order i.e. from
- * the highest to the lowest affinity level implemented by the platform because
- * to turn on affinity level X it is neccesary to turn on affinity level X + 1
- * first.
- ******************************************************************************/
-void psci_afflvl_power_on_finish(int start_afflvl,
-				 int end_afflvl,
-				 afflvl_power_on_finisher_t *pon_handlers)
-{
-	mpidr_aff_map_nodes_t mpidr_nodes;
-	int rc;
-	unsigned int max_phys_off_afflvl;
-
-
-	/*
-	 * Collect the pointers to the nodes in the topology tree for
-	 * each affinity instance in the mpidr. If this function does
-	 * not return successfully then either the mpidr or the affinity
-	 * levels are incorrect. Either case is an irrecoverable error.
-	 */
-	rc = psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
-				    start_afflvl,
-				    end_afflvl,
-				    mpidr_nodes);
-	if (rc != PSCI_E_SUCCESS)
-		panic();
-
-	/*
-	 * This function acquires the lock corresponding to each affinity
-	 * level so that by the time all locks are taken, the system topology
-	 * is snapshot and state management can be done safely.
-	 */
-	psci_acquire_afflvl_locks(start_afflvl,
-				  end_afflvl,
-				  mpidr_nodes);
-
-	max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl,
-							    end_afflvl,
-							    mpidr_nodes);
-	assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
-
-	/*
-	 * Stash the highest affinity level that will come out of the OFF or
-	 * SUSPEND states.
-	 */
-	psci_set_max_phys_off_afflvl(max_phys_off_afflvl);
-
-	/* Perform generic, architecture and platform specific handling */
-	psci_call_power_on_handlers(mpidr_nodes,
-					 start_afflvl,
-					 end_afflvl,
-					 pon_handlers);
-
-	/*
-	 * This function updates the state of each affinity instance
-	 * corresponding to the mpidr in the range of affinity levels
-	 * specified.
-	 */
-	psci_do_afflvl_state_mgmt(start_afflvl,
-				  end_afflvl,
-				  mpidr_nodes,
-				  PSCI_STATE_ON);
-
-	/*
-	 * Invalidate the entry for the highest affinity level stashed earlier.
-	 * This ensures that any reads of this variable outside the power
-	 * up/down sequences return PSCI_INVALID_DATA
-	 */
-	psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA);
-
-	/*
-	 * This loop releases the lock corresponding to each affinity level
-	 * in the reverse order to which they were acquired.
-	 */
-	psci_release_afflvl_locks(start_afflvl,
-				  end_afflvl,
-				  mpidr_nodes);
-}
-
-/*******************************************************************************
- * This function initializes the set of hooks that PSCI invokes as part of power
- * management operation. The power management hooks are expected to be provided
- * by the SPD, after it finishes all its initialization
- ******************************************************************************/
-void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
-{
-	assert(pm);
-	psci_spd_pm = pm;
-
-	if (pm->svc_migrate)
-		psci_caps |= define_psci_cap(PSCI_MIG_AARCH64);
-
-	if (pm->svc_migrate_info)
-		psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64)
-				| define_psci_cap(PSCI_MIG_INFO_TYPE);
-}
-
-/*******************************************************************************
- * This function invokes the migrate info hook in the spd_pm_ops. It performs
- * the necessary return value validation. If the Secure Payload is UP and
- * migrate capable, it returns the mpidr of the CPU on which the Secure payload
- * is resident through the mpidr parameter. Else the value of the parameter on
- * return is undefined.
- ******************************************************************************/
-int psci_spd_migrate_info(uint64_t *mpidr)
-{
-	int rc;
-
-	if (!psci_spd_pm || !psci_spd_pm->svc_migrate_info)
-		return PSCI_E_NOT_SUPPORTED;
-
-	rc = psci_spd_pm->svc_migrate_info(mpidr);
-
-	assert(rc == PSCI_TOS_UP_MIG_CAP || rc == PSCI_TOS_NOT_UP_MIG_CAP \
-		|| rc == PSCI_TOS_NOT_PRESENT_MP || rc == PSCI_E_NOT_SUPPORTED);
-
-	return rc;
-}
-
-
-/*******************************************************************************
- * This function prints the state of all affinity instances present in the
- * system
- ******************************************************************************/
-void psci_print_affinity_map(void)
-{
-#if LOG_LEVEL >= LOG_LEVEL_INFO
-	aff_map_node_t *node;
-	unsigned int idx;
-	/* This array maps to the PSCI_STATE_X definitions in psci.h */
-	static const char *psci_state_str[] = {
-		"ON",
-		"OFF",
-		"ON_PENDING",
-		"SUSPEND"
-	};
-
-	INFO("PSCI Affinity Map:\n");
-	for (idx = 0; idx < PSCI_NUM_AFFS ; idx++) {
-		node = &psci_aff_map[idx];
-		if (!(node->state & PSCI_AFF_PRESENT)) {
-			continue;
-		}
-		INFO("  AffInst: Level %u, MPID 0x%lx, State %s\n",
-				node->level, node->mpidr,
-				psci_state_str[psci_get_state(node)]);
-	}
-#endif
-}
diff --git a/services/std_svc/psci/psci_entry.S b/services/std_svc/psci/psci_entry.S
deleted file mode 100644
index 3e67d34..0000000
--- a/services/std_svc/psci/psci_entry.S
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-#include <psci.h>
-#include <xlat_tables.h>
-
-	.globl	psci_aff_on_finish_entry
-	.globl	psci_aff_suspend_finish_entry
-	.globl	psci_power_down_wfi
-
-	/* -----------------------------------------------------
-	 * This cpu has been physically powered up. Depending
-	 * upon whether it was resumed from suspend or simply
-	 * turned on, call the common power on finisher with
-	 * the handlers (chosen depending upon original state).
-	 * -----------------------------------------------------
-	 */
-func psci_aff_on_finish_entry
-	adr	x23, psci_afflvl_on_finishers
-	b	psci_aff_common_finish_entry
-
-psci_aff_suspend_finish_entry:
-	adr	x23, psci_afflvl_suspend_finishers
-
-psci_aff_common_finish_entry:
-#if !RESET_TO_BL31
-	/* ---------------------------------------------
-	 * Perform any processor specific actions which
-	 * undo or are in addition to the actions
-	 * performed by the reset handler in the BootROM
-	 * (BL1) e.g. cache, tlb invalidations, errata
-	 * workarounds etc.
-	 * ---------------------------------------------
-	 */
-	bl      reset_handler
-
-	/* ---------------------------------------------
-	 * Enable the instruction cache, stack pointer
-	 * and data access alignment checks.
-	 * It can be assumed that BL3-1 entrypoint code
-	 * will do this when RESET_TO_BL31 is set. The
-	 * same  assumption cannot be made when another
-	 * boot loader executes before BL3-1 in the warm
-	 * boot path e.g. BL1.
-	 * ---------------------------------------------
-	 */
-	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
-	mrs	x0, sctlr_el3
-	orr	x0, x0, x1
-	msr	sctlr_el3, x0
-	isb
-#endif
-
-	/* ---------------------------------------------
-	 * Initialise the pcpu cache pointer for the CPU
-	 * ---------------------------------------------
-	 */
-	bl	init_cpu_data_ptr
-
-	/* ---------------------------------------------
-	 * Initialize the cpu_ops pointer.
-	 * ---------------------------------------------
-	 */
-	bl	init_cpu_ops
-
-	/* ---------------------------------------------
-	 * Set the exception vectors
-	 * ---------------------------------------------
-	 */
-	adr	x0, runtime_exceptions
-	msr	vbar_el3, x0
-	isb
-
-	/* ---------------------------------------------
-	 * Enable the SError interrupt now that the
-	 * exception vectors have been setup.
-	 * ---------------------------------------------
-	 */
-	msr	daifclr, #DAIF_ABT_BIT
-
-	/* ---------------------------------------------
-	 * Use SP_EL0 for the C runtime stack.
-	 * ---------------------------------------------
-	 */
-	msr	spsel, #0
-
-	/* --------------------------------------------
-	 * Give ourselves a stack whose memory will be
-	 * marked as Normal-IS-WBWA when the MMU is
-	 * enabled.
-	 * --------------------------------------------
-	 */
-	mrs	x0, mpidr_el1
-	bl	platform_set_stack
-
-	/* --------------------------------------------
-	 * Enable the MMU with the DCache disabled. It
-	 * is safe to use stacks allocated in normal
-	 * memory as a result. All memory accesses are
-	 * marked nGnRnE when the MMU is disabled. So
-	 * all the stack writes will make it to memory.
-	 * All memory accesses are marked Non-cacheable
-	 * when the MMU is enabled but D$ is disabled.
-	 * So used stack memory is guaranteed to be
-	 * visible immediately after the MMU is enabled
-	 * Enabling the DCache at the same time as the
-	 * MMU can lead to speculatively fetched and
-	 * possibly stale stack memory being read from
-	 * other caches. This can lead to coherency
-	 * issues.
-	 * --------------------------------------------
-	 */
-	mov	x0, #DISABLE_DCACHE
-	bl	bl31_plat_enable_mmu
-
-	/* ---------------------------------------------
-	 * Call the finishers starting from affinity
-	 * level 0.
-	 * ---------------------------------------------
-	 */
-	bl	get_power_on_target_afflvl
-	mov	x2, x23
-	mov	x1, x0
-	mov	x0, #MPIDR_AFFLVL0
-	bl	psci_afflvl_power_on_finish
-
-	b	el3_exit
-
-	/* --------------------------------------------
-	 * This function is called to indicate to the
-	 * power controller that it is safe to power
-	 * down this cpu. It should not exit the wfi
-	 * and will be released from reset upon power
-	 * up. 'wfi_spill' is used to catch erroneous
-	 * exits from wfi.
-	 * --------------------------------------------
-	 */
-func psci_power_down_wfi
-	dsb	sy		// ensure write buffer empty
-	wfi
-wfi_spill:
-	b	wfi_spill
-
diff --git a/services/std_svc/psci/psci_helpers.S b/services/std_svc/psci/psci_helpers.S
deleted file mode 100644
index 9a51d5c..0000000
--- a/services/std_svc/psci/psci_helpers.S
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-#include <assert_macros.S>
-#include <platform_def.h>
-#include <psci.h>
-
-	.globl	psci_do_pwrdown_cache_maintenance
-	.globl	psci_do_pwrup_cache_maintenance
-
-/* -----------------------------------------------------------------------
- * void psci_do_pwrdown_cache_maintenance(uint32_t affinity level);
- *
- * This function performs cache maintenance if the specified affinity
- * level is the equal to the level of the highest affinity instance which
- * will be/is physically powered off. The levels of cache affected are
- * determined by the affinity level which is passed as the argument i.e.
- * level 0 results in a flush of the L1 cache. Both the L1 and L2 caches
- * are flushed for a higher affinity level.
- *
- * Additionally, this function also ensures that stack memory is correctly
- * flushed out to avoid coherency issues due to a change in its memory
- * attributes after the data cache is disabled.
- * -----------------------------------------------------------------------
- */
-func psci_do_pwrdown_cache_maintenance
-	stp     x29, x30, [sp,#-16]!
-	stp     x19, x20, [sp,#-16]!
-
-	mov	x19, x0
-	bl	psci_get_max_phys_off_afflvl
-#if ASM_ASSERTION
-	cmp	x0, #PSCI_INVALID_DATA
-	ASM_ASSERT(ne)
-#endif
-	cmp	x0, x19
-	b.ne	1f
-
-	/* ---------------------------------------------
-	 * Determine to how many levels of cache will be
-	 * subject to cache maintenance. Affinity level
-	 * 0 implies that only the cpu is being powered
-	 * down. Only the L1 data cache needs to be
-	 * flushed to the PoU in this case. For a higher
-	 * affinity level we are assuming that a flush
-	 * of L1 data and L2 unified cache is enough.
-	 * This information should be provided by the
-	 * platform.
-	 * ---------------------------------------------
-	 */
-	cmp	x0, #MPIDR_AFFLVL0
-	b.eq	do_core_pwr_dwn
-	bl	prepare_cluster_pwr_dwn
-	b	do_stack_maintenance
-
-do_core_pwr_dwn:
-	bl	prepare_core_pwr_dwn
-
-	/* ---------------------------------------------
-	 * Do stack maintenance by flushing the used
-	 * stack to the main memory and invalidating the
-	 * remainder.
-	 * ---------------------------------------------
-	 */
-do_stack_maintenance:
-	mrs	x0, mpidr_el1
-	bl	platform_get_stack
-
-	/* ---------------------------------------------
-	 * Calculate and store the size of the used
-	 * stack memory in x1.
-	 * ---------------------------------------------
-	 */
-	mov	x19, x0
-	mov	x1, sp
-	sub	x1, x0, x1
-	mov	x0, sp
-	bl	flush_dcache_range
-
-	/* ---------------------------------------------
-	 * Calculate and store the size of the unused
-	 * stack memory in x1. Calculate and store the
-	 * stack base address in x0.
-	 * ---------------------------------------------
-	 */
-	sub	x0, x19, #PLATFORM_STACK_SIZE
-	sub	x1, sp, x0
-	bl	inv_dcache_range
-
-1:
-	ldp	x19, x20, [sp], #16
-	ldp	x29, x30, [sp], #16
-	ret
-
-
-/* -----------------------------------------------------------------------
- * void psci_do_pwrup_cache_maintenance(void);
- *
- * This function performs cache maintenance after this cpu is powered up.
- * Currently, this involves managing the used stack memory before turning
- * on the data cache.
- * -----------------------------------------------------------------------
- */
-func psci_do_pwrup_cache_maintenance
-	stp	x29, x30, [sp,#-16]!
-
-	/* ---------------------------------------------
-	 * Ensure any inflight stack writes have made it
-	 * to main memory.
-	 * ---------------------------------------------
-	 */
-	dmb	st
-
-	/* ---------------------------------------------
-	 * Calculate and store the size of the used
-	 * stack memory in x1. Calculate and store the
-	 * stack base address in x0.
-	 * ---------------------------------------------
-	 */
-	mrs	x0, mpidr_el1
-	bl	platform_get_stack
-	mov	x1, sp
-	sub	x1, x0, x1
-	mov	x0, sp
-	bl	inv_dcache_range
-
-	/* ---------------------------------------------
-	 * Enable the data cache.
-	 * ---------------------------------------------
-	 */
-	mrs	x0, sctlr_el3
-	orr	x0, x0, #SCTLR_C_BIT
-	msr	sctlr_el3, x0
-	isb
-
-	ldp	x29, x30, [sp], #16
-	ret
diff --git a/services/std_svc/psci/psci_main.c b/services/std_svc/psci/psci_main.c
deleted file mode 100644
index 52d252c..0000000
--- a/services/std_svc/psci/psci_main.c
+++ /dev/null
@@ -1,463 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <arch_helpers.h>
-#include <assert.h>
-#include <debug.h>
-#include <platform.h>
-#include <runtime_svc.h>
-#include <std_svc.h>
-#include "psci_private.h"
-
-/*******************************************************************************
- * PSCI frontend api for servicing SMCs. Described in the PSCI spec.
- ******************************************************************************/
-int psci_cpu_on(unsigned long target_cpu,
-		unsigned long entrypoint,
-		unsigned long context_id)
-
-{
-	int rc;
-	unsigned int start_afflvl, end_afflvl;
-	entry_point_info_t ep;
-
-	/* Determine if the cpu exists of not */
-	rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
-	if (rc != PSCI_E_SUCCESS) {
-		return PSCI_E_INVALID_PARAMS;
-	}
-
-	/* Validate the entrypoint using platform pm_ops */
-	if (psci_plat_pm_ops->validate_ns_entrypoint) {
-		rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
-		if (rc != PSCI_E_SUCCESS) {
-			assert(rc == PSCI_E_INVALID_PARAMS);
-			return PSCI_E_INVALID_PARAMS;
-		}
-	}
-
-	/*
-	 * Verify and derive the re-entry information for
-	 * the non-secure world from the non-secure state from
-	 * where this call originated.
-	 */
-	rc = psci_get_ns_ep_info(&ep, entrypoint, context_id);
-	if (rc != PSCI_E_SUCCESS)
-		return rc;
-
-
-	/*
-	 * To turn this cpu on, specify which affinity
-	 * levels need to be turned on
-	 */
-	start_afflvl = MPIDR_AFFLVL0;
-	end_afflvl = get_max_afflvl();
-	rc = psci_afflvl_on(target_cpu,
-			    &ep,
-			    start_afflvl,
-			    end_afflvl);
-
-	return rc;
-}
-
-unsigned int psci_version(void)
-{
-	return PSCI_MAJOR_VER | PSCI_MINOR_VER;
-}
-
-int psci_cpu_suspend(unsigned int power_state,
-		     unsigned long entrypoint,
-		     unsigned long context_id)
-{
-	int rc;
-	unsigned int target_afflvl, pstate_type;
-	entry_point_info_t ep;
-
-	/* Check SBZ bits in power state are zero */
-	if (psci_validate_power_state(power_state))
-		return PSCI_E_INVALID_PARAMS;
-
-	/* Sanity check the requested state */
-	target_afflvl = psci_get_pstate_afflvl(power_state);
-	if (target_afflvl > get_max_afflvl())
-		return PSCI_E_INVALID_PARAMS;
-
-	/* Validate the power_state using platform pm_ops */
-	if (psci_plat_pm_ops->validate_power_state) {
-		rc = psci_plat_pm_ops->validate_power_state(power_state);
-		if (rc != PSCI_E_SUCCESS) {
-			assert(rc == PSCI_E_INVALID_PARAMS);
-			return PSCI_E_INVALID_PARAMS;
-		}
-	}
-
-	/* Validate the entrypoint using platform pm_ops */
-	if (psci_plat_pm_ops->validate_ns_entrypoint) {
-		rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
-		if (rc != PSCI_E_SUCCESS) {
-			assert(rc == PSCI_E_INVALID_PARAMS);
-			return PSCI_E_INVALID_PARAMS;
-		}
-	}
-
-	/* Determine the 'state type' in the 'power_state' parameter */
-	pstate_type = psci_get_pstate_type(power_state);
-
-	/*
-	 * Ensure that we have a platform specific handler for entering
-	 * a standby state.
-	 */
-	if (pstate_type == PSTATE_TYPE_STANDBY) {
-		if  (!psci_plat_pm_ops->affinst_standby)
-			return PSCI_E_INVALID_PARAMS;
-
-		psci_plat_pm_ops->affinst_standby(power_state);
-		return PSCI_E_SUCCESS;
-	}
-
-	/*
-	 * Verify and derive the re-entry information for
-	 * the non-secure world from the non-secure state from
-	 * where this call originated.
-	 */
-	rc = psci_get_ns_ep_info(&ep, entrypoint, context_id);
-	if (rc != PSCI_E_SUCCESS)
-		return rc;
-
-	/* Save PSCI power state parameter for the core in suspend context */
-	psci_set_suspend_power_state(power_state);
-
-	/*
-	 * Do what is needed to enter the power down state. Upon success,
-	 * enter the final wfi which will power down this CPU.
-	 */
-	psci_afflvl_suspend(&ep,
-			    MPIDR_AFFLVL0,
-			    target_afflvl);
-
-	/* Reset PSCI power state parameter for the core. */
-	psci_set_suspend_power_state(PSCI_INVALID_DATA);
-	return PSCI_E_SUCCESS;
-}
-
-int psci_system_suspend(unsigned long entrypoint,
-			unsigned long context_id)
-{
-	int rc;
-	unsigned int power_state;
-	entry_point_info_t ep;
-
-	/* Validate the entrypoint using platform pm_ops */
-	if (psci_plat_pm_ops->validate_ns_entrypoint) {
-		rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
-		if (rc != PSCI_E_SUCCESS) {
-			assert(rc == PSCI_E_INVALID_PARAMS);
-			return PSCI_E_INVALID_PARAMS;
-		}
-	}
-
-	/* Check if the current CPU is the last ON CPU in the system */
-	if (!psci_is_last_on_cpu())
-		return PSCI_E_DENIED;
-
-	/*
-	 * Verify and derive the re-entry information for
-	 * the non-secure world from the non-secure state from
-	 * where this call originated.
-	 */
-	rc = psci_get_ns_ep_info(&ep, entrypoint, context_id);
-	if (rc != PSCI_E_SUCCESS)
-		return rc;
-
-	/*
-	 * Assert that the required pm_ops hook is implemented to ensure that
-	 * the capability detected during psci_setup() is valid.
-	 */
-	assert(psci_plat_pm_ops->get_sys_suspend_power_state);
-
-	/*
-	 * Query the platform for the power_state required for system suspend
-	 */
-	power_state = psci_plat_pm_ops->get_sys_suspend_power_state();
-
-	/* Save PSCI power state parameter for the core in suspend context */
-	psci_set_suspend_power_state(power_state);
-
-	/*
-	 * Do what is needed to enter the power down state. Upon success,
-	 * enter the final wfi which will power down this cpu.
-	 */
-	psci_afflvl_suspend(&ep,
-			    MPIDR_AFFLVL0,
-			    PLATFORM_MAX_AFFLVL);
-
-	/* Reset PSCI power state parameter for the core. */
-	psci_set_suspend_power_state(PSCI_INVALID_DATA);
-	return PSCI_E_SUCCESS;
-}
-
-int psci_cpu_off(void)
-{
-	int rc;
-	int target_afflvl = get_max_afflvl();
-
-	/*
-	 * Traverse from the highest to the lowest affinity level. When the
-	 * lowest affinity level is hit, all the locks are acquired. State
-	 * management is done immediately followed by cpu, cluster ...
-	 * ..target_afflvl specific actions as this function unwinds back.
-	 */
-	rc = psci_afflvl_off(MPIDR_AFFLVL0, target_afflvl);
-
-	/*
-	 * The only error cpu_off can return is E_DENIED. So check if that's
-	 * indeed the case.
-	 */
-	assert (rc == PSCI_E_DENIED);
-
-	return rc;
-}
-
-int psci_affinity_info(unsigned long target_affinity,
-		       unsigned int lowest_affinity_level)
-{
-	int rc = PSCI_E_INVALID_PARAMS;
-	unsigned int aff_state;
-	aff_map_node_t *node;
-
-	if (lowest_affinity_level > get_max_afflvl())
-		return rc;
-
-	node = psci_get_aff_map_node(target_affinity, lowest_affinity_level);
-	if (node && (node->state & PSCI_AFF_PRESENT)) {
-
-		/*
-		 * TODO: For affinity levels higher than 0 i.e. cpu, the
-		 * state will always be either ON or OFF. Need to investigate
-		 * how critical is it to support ON_PENDING here.
-		 */
-		aff_state = psci_get_state(node);
-
-		/* A suspended cpu is available & on for the OS */
-		if (aff_state == PSCI_STATE_SUSPEND) {
-			aff_state = PSCI_STATE_ON;
-		}
-
-		rc = aff_state;
-	}
-
-	return rc;
-}
-
-int psci_migrate(unsigned long target_cpu)
-{
-	int rc;
-	unsigned long resident_cpu_mpidr;
-
-	rc = psci_spd_migrate_info(&resident_cpu_mpidr);
-	if (rc != PSCI_TOS_UP_MIG_CAP)
-		return (rc == PSCI_TOS_NOT_UP_MIG_CAP) ?
-			  PSCI_E_DENIED : PSCI_E_NOT_SUPPORTED;
-
-	/*
-	 * Migrate should only be invoked on the CPU where
-	 * the Secure OS is resident.
-	 */
-	if (resident_cpu_mpidr != read_mpidr_el1())
-		return PSCI_E_NOT_PRESENT;
-
-	/* Check the validity of the specified target cpu */
-	rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
-	if (rc != PSCI_E_SUCCESS)
-		return PSCI_E_INVALID_PARAMS;
-
-	assert(psci_spd_pm && psci_spd_pm->svc_migrate);
-
-	rc = psci_spd_pm->svc_migrate(read_mpidr_el1(), target_cpu);
-	assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
-
-	return rc;
-}
-
-int psci_migrate_info_type(void)
-{
-	unsigned long resident_cpu_mpidr;
-
-	return psci_spd_migrate_info(&resident_cpu_mpidr);
-}
-
-long psci_migrate_info_up_cpu(void)
-{
-	unsigned long resident_cpu_mpidr;
-	int rc;
-
-	/*
-	 * Return value of this depends upon what
-	 * psci_spd_migrate_info() returns.
-	 */
-	rc = psci_spd_migrate_info(&resident_cpu_mpidr);
-	if (rc != PSCI_TOS_NOT_UP_MIG_CAP && rc != PSCI_TOS_UP_MIG_CAP)
-		return PSCI_E_INVALID_PARAMS;
-
-	return resident_cpu_mpidr;
-}
-
-int psci_features(unsigned int psci_fid)
-{
-	uint32_t local_caps = psci_caps;
-
-	/* Check if it is a 64 bit function */
-	if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64)
-		local_caps &= PSCI_CAP_64BIT_MASK;
-
-	/* Check for invalid fid */
-	if (!(is_std_svc_call(psci_fid) && is_valid_fast_smc(psci_fid)
-			&& is_psci_fid(psci_fid)))
-		return PSCI_E_NOT_SUPPORTED;
-
-
-	/* Check if the psci fid is supported or not */
-	if (!(local_caps & define_psci_cap(psci_fid)))
-		return PSCI_E_NOT_SUPPORTED;
-
-	/* Format the feature flags */
-	if (psci_fid == PSCI_CPU_SUSPEND_AARCH32 ||
-			psci_fid == PSCI_CPU_SUSPEND_AARCH64) {
-		/*
-		 * The trusted firmware uses the original power state format
-		 * and does not support OS Initiated Mode.
-		 */
-		return (FF_PSTATE_ORIG << FF_PSTATE_SHIFT) |
-			((!FF_SUPPORTS_OS_INIT_MODE) << FF_MODE_SUPPORT_SHIFT);
-	}
-
-	/* Return 0 for all other fid's */
-	return PSCI_E_SUCCESS;
-}
-
-/*******************************************************************************
- * PSCI top level handler for servicing SMCs.
- ******************************************************************************/
-uint64_t psci_smc_handler(uint32_t smc_fid,
-			  uint64_t x1,
-			  uint64_t x2,
-			  uint64_t x3,
-			  uint64_t x4,
-			  void *cookie,
-			  void *handle,
-			  uint64_t flags)
-{
-	if (is_caller_secure(flags))
-		SMC_RET1(handle, SMC_UNK);
-
-	/* Check the fid against the capabilities */
-	if (!(psci_caps & define_psci_cap(smc_fid)))
-		SMC_RET1(handle, SMC_UNK);
-
-	if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
-		/* 32-bit PSCI function, clear top parameter bits */
-
-		x1 = (uint32_t)x1;
-		x2 = (uint32_t)x2;
-		x3 = (uint32_t)x3;
-
-		switch (smc_fid) {
-		case PSCI_VERSION:
-			SMC_RET1(handle, psci_version());
-
-		case PSCI_CPU_OFF:
-			SMC_RET1(handle, psci_cpu_off());
-
-		case PSCI_CPU_SUSPEND_AARCH32:
-			SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
-
-		case PSCI_CPU_ON_AARCH32:
-			SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
-
-		case PSCI_AFFINITY_INFO_AARCH32:
-			SMC_RET1(handle, psci_affinity_info(x1, x2));
-
-		case PSCI_MIG_AARCH32:
-			SMC_RET1(handle, psci_migrate(x1));
-
-		case PSCI_MIG_INFO_TYPE:
-			SMC_RET1(handle, psci_migrate_info_type());
-
-		case PSCI_MIG_INFO_UP_CPU_AARCH32:
-			SMC_RET1(handle, psci_migrate_info_up_cpu());
-
-		case PSCI_SYSTEM_SUSPEND_AARCH32:
-			SMC_RET1(handle, psci_system_suspend(x1, x2));
-
-		case PSCI_SYSTEM_OFF:
-			psci_system_off();
-			/* We should never return from psci_system_off() */
-
-		case PSCI_SYSTEM_RESET:
-			psci_system_reset();
-			/* We should never return from psci_system_reset() */
-
-		case PSCI_FEATURES:
-			SMC_RET1(handle, psci_features(x1));
-
-		default:
-			break;
-		}
-	} else {
-		/* 64-bit PSCI function */
-
-		switch (smc_fid) {
-		case PSCI_CPU_SUSPEND_AARCH64:
-			SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
-
-		case PSCI_CPU_ON_AARCH64:
-			SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
-
-		case PSCI_AFFINITY_INFO_AARCH64:
-			SMC_RET1(handle, psci_affinity_info(x1, x2));
-
-		case PSCI_MIG_AARCH64:
-			SMC_RET1(handle, psci_migrate(x1));
-
-		case PSCI_MIG_INFO_UP_CPU_AARCH64:
-			SMC_RET1(handle, psci_migrate_info_up_cpu());
-
-		case PSCI_SYSTEM_SUSPEND_AARCH64:
-			SMC_RET1(handle, psci_system_suspend(x1, x2));
-
-		default:
-			break;
-		}
-	}
-
-	WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid);
-	SMC_RET1(handle, SMC_UNK);
-}
diff --git a/services/std_svc/psci/psci_private.h b/services/std_svc/psci/psci_private.h
deleted file mode 100644
index f23ed8a..0000000
--- a/services/std_svc/psci/psci_private.h
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __PSCI_PRIVATE_H__
-#define __PSCI_PRIVATE_H__
-
-#include <arch.h>
-#include <bakery_lock.h>
-#include <bl_common.h>
-#include <psci.h>
-
-/*
- * The following helper macros abstract the interface to the Bakery
- * Lock API.
- */
-#if USE_COHERENT_MEM
-#define psci_lock_init(aff_map, idx)	bakery_lock_init(&(aff_map)[(idx)].lock)
-#define psci_lock_get(node)		bakery_lock_get(&((node)->lock))
-#define psci_lock_release(node)		bakery_lock_release(&((node)->lock))
-#else
-#define psci_lock_init(aff_map, idx)	((aff_map)[(idx)].aff_map_index = (idx))
-#define psci_lock_get(node)		bakery_lock_get((node)->aff_map_index,	  \
-						CPU_DATA_PSCI_LOCK_OFFSET)
-#define psci_lock_release(node)		bakery_lock_release((node)->aff_map_index,\
-						CPU_DATA_PSCI_LOCK_OFFSET)
-#endif
-
-/*
- * The PSCI capability which are provided by the generic code but does not
- * depend on the platform or spd capabilities.
- */
-#define PSCI_GENERIC_CAP	\
-			(define_psci_cap(PSCI_VERSION) |		\
-			define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) |	\
-			define_psci_cap(PSCI_FEATURES))
-
-/*
- * The PSCI capabilities mask for 64 bit functions.
- */
-#define PSCI_CAP_64BIT_MASK	\
-			(define_psci_cap(PSCI_CPU_SUSPEND_AARCH64) |	\
-			define_psci_cap(PSCI_CPU_ON_AARCH64) |		\
-			define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) |	\
-			define_psci_cap(PSCI_MIG_AARCH64) |		\
-			define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) |	\
-			define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64))
-
-
-/*******************************************************************************
- * The following two data structures hold the topology tree which in turn tracks
- * the state of the all the affinity instances supported by the platform.
- ******************************************************************************/
-typedef struct aff_map_node {
-	unsigned long mpidr;
-	unsigned char ref_count;
-	unsigned char state;
-	unsigned char level;
-#if USE_COHERENT_MEM
-	bakery_lock_t lock;
-#else
-	/* For indexing the bakery_info array in per CPU data */
-	unsigned char aff_map_index;
-#endif
-} aff_map_node_t;
-
-typedef struct aff_limits_node {
-	int min;
-	int max;
-} aff_limits_node_t;
-
-typedef aff_map_node_t (*mpidr_aff_map_nodes_t[MPIDR_MAX_AFFLVL + 1]);
-typedef void (*afflvl_power_on_finisher_t)(aff_map_node_t *);
-
-/*******************************************************************************
- * Data prototypes
- ******************************************************************************/
-extern const plat_pm_ops_t *psci_plat_pm_ops;
-extern aff_map_node_t psci_aff_map[PSCI_NUM_AFFS];
-extern aff_limits_node_t psci_aff_limits[MPIDR_MAX_AFFLVL + 1];
-extern uint32_t psci_caps;
-
-/*******************************************************************************
- * SPD's power management hooks registered with PSCI
- ******************************************************************************/
-extern const spd_pm_ops_t *psci_spd_pm;
-
-/*******************************************************************************
- * Function prototypes
- ******************************************************************************/
-/* Private exported functions from psci_common.c */
-int get_max_afflvl(void);
-unsigned short psci_get_state(aff_map_node_t *node);
-unsigned short psci_get_phys_state(aff_map_node_t *node);
-void psci_set_state(aff_map_node_t *node, unsigned short state);
-unsigned long mpidr_set_aff_inst(unsigned long, unsigned char, int);
-int psci_validate_mpidr(unsigned long, int);
-int get_power_on_target_afflvl(void);
-void psci_afflvl_power_on_finish(int,
-				int,
-				afflvl_power_on_finisher_t *);
-int psci_get_ns_ep_info(entry_point_info_t *ep,
-		       uint64_t entrypoint, uint64_t context_id);
-int psci_check_afflvl_range(int start_afflvl, int end_afflvl);
-void psci_do_afflvl_state_mgmt(uint32_t start_afflvl,
-			       uint32_t end_afflvl,
-			       aff_map_node_t *mpidr_nodes[],
-			       uint32_t state);
-void psci_acquire_afflvl_locks(int start_afflvl,
-			       int end_afflvl,
-			       aff_map_node_t *mpidr_nodes[]);
-void psci_release_afflvl_locks(int start_afflvl,
-				int end_afflvl,
-				mpidr_aff_map_nodes_t mpidr_nodes);
-void psci_print_affinity_map(void);
-void psci_set_max_phys_off_afflvl(uint32_t afflvl);
-uint32_t psci_find_max_phys_off_afflvl(uint32_t start_afflvl,
-				       uint32_t end_afflvl,
-				       aff_map_node_t *mpidr_nodes[]);
-unsigned int psci_is_last_on_cpu(void);
-int psci_spd_migrate_info(uint64_t *mpidr);
-
-/* Private exported functions from psci_setup.c */
-int psci_get_aff_map_nodes(unsigned long mpidr,
-				int start_afflvl,
-				int end_afflvl,
-				aff_map_node_t *mpidr_nodes[]);
-aff_map_node_t *psci_get_aff_map_node(unsigned long, int);
-
-/* Private exported functions from psci_affinity_on.c */
-int psci_afflvl_on(unsigned long target_cpu,
-		   entry_point_info_t *ep,
-		   int start_afflvl,
-		   int end_afflvl);
-
-/* Private exported functions from psci_affinity_off.c */
-int psci_afflvl_off(int, int);
-
-/* Private exported functions from psci_affinity_suspend.c */
-void psci_afflvl_suspend(entry_point_info_t *ep,
-			int start_afflvl,
-			int end_afflvl);
-
-unsigned int psci_afflvl_suspend_finish(int, int);
-void psci_set_suspend_power_state(unsigned int power_state);
-
-/* Private exported functions from psci_helpers.S */
-void psci_do_pwrdown_cache_maintenance(uint32_t affinity_level);
-void psci_do_pwrup_cache_maintenance(void);
-
-/* Private exported functions from psci_system_off.c */
-void __dead2 psci_system_off(void);
-void __dead2 psci_system_reset(void);
-
-#endif /* __PSCI_PRIVATE_H__ */
diff --git a/services/std_svc/psci/psci_setup.c b/services/std_svc/psci/psci_setup.c
deleted file mode 100644
index fbd76d0..0000000
--- a/services/std_svc/psci/psci_setup.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <arch_helpers.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <context.h>
-#include <context_mgmt.h>
-#include <platform.h>
-#include <stddef.h>
-#include "psci_private.h"
-
-/*******************************************************************************
- * Per cpu non-secure contexts used to program the architectural state prior
- * return to the normal world.
- * TODO: Use the memory allocator to set aside memory for the contexts instead
- * of relying on platform defined constants. Using PSCI_NUM_AFFS will be an
- * overkill.
- ******************************************************************************/
-static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT];
-
-/*******************************************************************************
- * In a system, a certain number of affinity instances are present at an
- * affinity level. The cumulative number of instances across all levels are
- * stored in 'psci_aff_map'. The topology tree has been flattenned into this
- * array. To retrieve nodes, information about the extents of each affinity
- * level i.e. start index and end index needs to be present. 'psci_aff_limits'
- * stores this information.
- ******************************************************************************/
-aff_limits_node_t psci_aff_limits[MPIDR_MAX_AFFLVL + 1];
-
-/******************************************************************************
- * Define the psci capability variable.
- *****************************************************************************/
-uint32_t psci_caps;
-
-
-/*******************************************************************************
- * Routines for retrieving the node corresponding to an affinity level instance
- * in the mpidr. The first one uses binary search to find the node corresponding
- * to the mpidr (key) at a particular affinity level. The second routine decides
- * extents of the binary search at each affinity level.
- ******************************************************************************/
-static int psci_aff_map_get_idx(unsigned long key,
-				int min_idx,
-				int max_idx)
-{
-	int mid;
-
-	/*
-	 * Terminating condition: If the max and min indices have crossed paths
-	 * during the binary search then the key has not been found.
-	 */
-	if (max_idx < min_idx)
-		return PSCI_E_INVALID_PARAMS;
-
-	/*
-	 * Make sure we are within array limits.
-	 */
-	assert(min_idx >= 0 && max_idx < PSCI_NUM_AFFS);
-
-	/*
-	 * Bisect the array around 'mid' and then recurse into the array chunk
-	 * where the key is likely to be found. The mpidrs in each node in the
-	 * 'psci_aff_map' for a given affinity level are stored in an ascending
-	 * order which makes the binary search possible.
-	 */
-	mid = min_idx + ((max_idx - min_idx) >> 1);	/* Divide by 2 */
-
-	if (psci_aff_map[mid].mpidr > key)
-		return psci_aff_map_get_idx(key, min_idx, mid - 1);
-	else if (psci_aff_map[mid].mpidr < key)
-		return psci_aff_map_get_idx(key, mid + 1, max_idx);
-	else
-		return mid;
-}
-
-aff_map_node_t *psci_get_aff_map_node(unsigned long mpidr, int aff_lvl)
-{
-	int rc;
-
-	if (aff_lvl > get_max_afflvl())
-		return NULL;
-
-	/* Right shift the mpidr to the required affinity level */
-	mpidr = mpidr_mask_lower_afflvls(mpidr, aff_lvl);
-
-	rc = psci_aff_map_get_idx(mpidr,
-				  psci_aff_limits[aff_lvl].min,
-				  psci_aff_limits[aff_lvl].max);
-	if (rc >= 0)
-		return &psci_aff_map[rc];
-	else
-		return NULL;
-}
-
-/*******************************************************************************
- * This function populates an array with nodes corresponding to a given range of
- * affinity levels in an mpidr. It returns successfully only when the affinity
- * levels are correct, the mpidr is valid i.e. no affinity level is absent from
- * the topology tree & the affinity instance at level 0 is not absent.
- ******************************************************************************/
-int psci_get_aff_map_nodes(unsigned long mpidr,
-			   int start_afflvl,
-			   int end_afflvl,
-			   aff_map_node_t *mpidr_nodes[])
-{
-	int rc = PSCI_E_INVALID_PARAMS, level;
-	aff_map_node_t *node;
-
-	rc = psci_check_afflvl_range(start_afflvl, end_afflvl);
-	if (rc != PSCI_E_SUCCESS)
-		return rc;
-
-	for (level = start_afflvl; level <= end_afflvl; level++) {
-
-		/*
-		 * Grab the node for each affinity level. No affinity level
-		 * can be missing as that would mean that the topology tree
-		 * is corrupted.
-		 */
-		node = psci_get_aff_map_node(mpidr, level);
-		if (node == NULL) {
-			rc = PSCI_E_INVALID_PARAMS;
-			break;
-		}
-
-		/*
-		 * Skip absent affinity levels unless it's afffinity level 0.
-		 * An absent cpu means that the mpidr is invalid. Save the
-		 * pointer to the node for the present affinity level
-		 */
-		if (!(node->state & PSCI_AFF_PRESENT)) {
-			if (level == MPIDR_AFFLVL0) {
-				rc = PSCI_E_INVALID_PARAMS;
-				break;
-			}
-
-			mpidr_nodes[level] = NULL;
-		} else
-			mpidr_nodes[level] = node;
-	}
-
-	return rc;
-}
-
-/*******************************************************************************
- * Function which initializes the 'aff_map_node' corresponding to an affinity
- * level instance. Each node has a unique mpidr, level and bakery lock. The data
- * field is opaque and holds affinity level specific data e.g. for affinity
- * level 0 it contains the index into arrays that hold the secure/non-secure
- * state for a cpu that's been turned on/off
- ******************************************************************************/
-static void psci_init_aff_map_node(unsigned long mpidr,
-				   int level,
-				   unsigned int idx)
-{
-	unsigned char state;
-	uint32_t linear_id;
-	psci_aff_map[idx].mpidr = mpidr;
-	psci_aff_map[idx].level = level;
-	psci_lock_init(psci_aff_map, idx);
-
-	/*
-	 * If an affinity instance is present then mark it as OFF to begin with.
-	 */
-	state = plat_get_aff_state(level, mpidr);
-	psci_aff_map[idx].state = state;
-
-	if (level == MPIDR_AFFLVL0) {
-
-		/*
-		 * Mark the cpu as OFF. Higher affinity level reference counts
-		 * have already been memset to 0
-		 */
-		if (state & PSCI_AFF_PRESENT)
-			psci_set_state(&psci_aff_map[idx], PSCI_STATE_OFF);
-
-		/*
-		 * Associate a non-secure context with this affinity
-		 * instance through the context management library.
-		 */
-		linear_id = platform_get_core_pos(mpidr);
-		assert(linear_id < PLATFORM_CORE_COUNT);
-
-		/* Invalidate the suspend context for the node */
-		set_cpu_data_by_index(linear_id,
-				      psci_svc_cpu_data.power_state,
-				      PSCI_INVALID_DATA);
-
-		/*
-		 * There is no state associated with the current execution
-		 * context so ensure that any reads of the highest affinity
-		 * level in a powered down state return PSCI_INVALID_DATA.
-		 */
-		set_cpu_data_by_index(linear_id,
-				      psci_svc_cpu_data.max_phys_off_afflvl,
-				      PSCI_INVALID_DATA);
-
-		flush_cpu_data_by_index(linear_id, psci_svc_cpu_data);
-
-		cm_set_context_by_mpidr(mpidr,
-					(void *) &psci_ns_context[linear_id],
-					NON_SECURE);
-	}
-
-	return;
-}
-
-/*******************************************************************************
- * Core routine used by the Breadth-First-Search algorithm to populate the
- * affinity tree. Each level in the tree corresponds to an affinity level. This
- * routine's aim is to traverse to the target affinity level and populate nodes
- * in the 'psci_aff_map' for all the siblings at that level. It uses the current
- * affinity level to keep track of how many levels from the root of the tree
- * have been traversed. If the current affinity level != target affinity level,
- * then the platform is asked to return the number of children that each
- * affinity instance has at the current affinity level. Traversal is then done
- * for each child at the next lower level i.e. current affinity level - 1.
- *
- * CAUTION: This routine assumes that affinity instance ids are allocated in a
- * monotonically increasing manner at each affinity level in a mpidr starting
- * from 0. If the platform breaks this assumption then this code will have to
- * be reworked accordingly.
- ******************************************************************************/
-static unsigned int psci_init_aff_map(unsigned long mpidr,
-				      unsigned int affmap_idx,
-				      int cur_afflvl,
-				      int tgt_afflvl)
-{
-	unsigned int ctr, aff_count;
-
-	assert(cur_afflvl >= tgt_afflvl);
-
-	/*
-	 * Find the number of siblings at the current affinity level &
-	 * assert if there are none 'cause then we have been invoked with
-	 * an invalid mpidr.
-	 */
-	aff_count = plat_get_aff_count(cur_afflvl, mpidr);
-	assert(aff_count);
-
-	if (tgt_afflvl < cur_afflvl) {
-		for (ctr = 0; ctr < aff_count; ctr++) {
-			mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl);
-			affmap_idx = psci_init_aff_map(mpidr,
-						       affmap_idx,
-						       cur_afflvl - 1,
-						       tgt_afflvl);
-		}
-	} else {
-		for (ctr = 0; ctr < aff_count; ctr++, affmap_idx++) {
-			mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl);
-			psci_init_aff_map_node(mpidr, cur_afflvl, affmap_idx);
-		}
-
-		/* affmap_idx is 1 greater than the max index of cur_afflvl */
-		psci_aff_limits[cur_afflvl].max = affmap_idx - 1;
-	}
-
-	return affmap_idx;
-}
-
-/*******************************************************************************
- * This function initializes the topology tree by querying the platform. To do
- * so, it's helper routines implement a Breadth-First-Search. At each affinity
- * level the platform conveys the number of affinity instances that exist i.e.
- * the affinity count. The algorithm populates the psci_aff_map recursively
- * using this information. On a platform that implements two clusters of 4 cpus
- * each, the populated aff_map_array would look like this:
- *
- *            <- cpus cluster0 -><- cpus cluster1 ->
- * ---------------------------------------------------
- * | 0  | 1  | 0  | 1  | 2  | 3  | 0  | 1  | 2  | 3  |
- * ---------------------------------------------------
- *           ^                                       ^
- * cluster __|                                 cpu __|
- * limit                                      limit
- *
- * The first 2 entries are of the cluster nodes. The next 4 entries are of cpus
- * within cluster 0. The last 4 entries are of cpus within cluster 1.
- * The 'psci_aff_limits' array contains the max & min index of each affinity
- * level within the 'psci_aff_map' array. This allows restricting search of a
- * node at an affinity level between the indices in the limits array.
- ******************************************************************************/
-int32_t psci_setup(void)
-{
-	unsigned long mpidr = read_mpidr();
-	int afflvl, affmap_idx, max_afflvl;
-	aff_map_node_t *node;
-
-	psci_plat_pm_ops = NULL;
-
-	/* Find out the maximum affinity level that the platform implements */
-	max_afflvl = get_max_afflvl();
-	assert(max_afflvl <= MPIDR_MAX_AFFLVL);
-
-	/*
-	 * This call traverses the topology tree with help from the platform and
-	 * populates the affinity map using a breadth-first-search recursively.
-	 * We assume that the platform allocates affinity instance ids from 0
-	 * onwards at each affinity level in the mpidr. FIRST_MPIDR = 0.0.0.0
-	 */
-	affmap_idx = 0;
-	for (afflvl = max_afflvl; afflvl >= MPIDR_AFFLVL0; afflvl--) {
-		affmap_idx = psci_init_aff_map(FIRST_MPIDR,
-					       affmap_idx,
-					       max_afflvl,
-					       afflvl);
-	}
-
-#if !USE_COHERENT_MEM
-	/*
-	 * The psci_aff_map only needs flushing when it's not allocated in
-	 * coherent memory.
-	 */
-	flush_dcache_range((uint64_t) &psci_aff_map, sizeof(psci_aff_map));
-#endif
-
-	/*
-	 * Set the bounds for the affinity counts of each level in the map. Also
-	 * flush out the entire array so that it's visible to subsequent power
-	 * management operations. The 'psci_aff_limits' array is allocated in
-	 * normal memory. It will be accessed when the mmu is off e.g. after
-	 * reset. Hence it needs to be flushed.
-	 */
-	for (afflvl = MPIDR_AFFLVL0; afflvl < max_afflvl; afflvl++) {
-		psci_aff_limits[afflvl].min =
-			psci_aff_limits[afflvl + 1].max + 1;
-	}
-
-	flush_dcache_range((unsigned long) psci_aff_limits,
-			   sizeof(psci_aff_limits));
-
-	/*
-	 * Mark the affinity instances in our mpidr as ON. No need to lock as
-	 * this is the primary cpu.
-	 */
-	mpidr &= MPIDR_AFFINITY_MASK;
-	for (afflvl = MPIDR_AFFLVL0; afflvl <= max_afflvl; afflvl++) {
-
-		node = psci_get_aff_map_node(mpidr, afflvl);
-		assert(node);
-
-		/* Mark each present node as ON. */
-		if (node->state & PSCI_AFF_PRESENT)
-			psci_set_state(node, PSCI_STATE_ON);
-	}
-
-	platform_setup_pm(&psci_plat_pm_ops);
-	assert(psci_plat_pm_ops);
-
-	/* Initialize the psci capability */
-	psci_caps = PSCI_GENERIC_CAP;
-
-	if (psci_plat_pm_ops->affinst_off)
-		psci_caps |=  define_psci_cap(PSCI_CPU_OFF);
-	if (psci_plat_pm_ops->affinst_on && psci_plat_pm_ops->affinst_on_finish)
-		psci_caps |=  define_psci_cap(PSCI_CPU_ON_AARCH64);
-	if (psci_plat_pm_ops->affinst_suspend &&
-			psci_plat_pm_ops->affinst_suspend_finish) {
-		psci_caps |=  define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
-		if (psci_plat_pm_ops->get_sys_suspend_power_state)
-			psci_caps |=  define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
-	}
-	if (psci_plat_pm_ops->system_off)
-		psci_caps |=  define_psci_cap(PSCI_SYSTEM_OFF);
-	if (psci_plat_pm_ops->system_reset)
-		psci_caps |=  define_psci_cap(PSCI_SYSTEM_RESET);
-
-	return 0;
-}
diff --git a/services/std_svc/psci/psci_system_off.c b/services/std_svc/psci/psci_system_off.c
deleted file mode 100644
index 970d4bb..0000000
--- a/services/std_svc/psci/psci_system_off.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stddef.h>
-#include <arch_helpers.h>
-#include <assert.h>
-#include <debug.h>
-#include <platform.h>
-#include "psci_private.h"
-
-void psci_system_off(void)
-{
-	psci_print_affinity_map();
-
-	assert(psci_plat_pm_ops->system_off);
-
-	/* Notify the Secure Payload Dispatcher */
-	if (psci_spd_pm && psci_spd_pm->svc_system_off) {
-		psci_spd_pm->svc_system_off();
-	}
-
-	/* Call the platform specific hook */
-	psci_plat_pm_ops->system_off();
-
-	/* This function does not return. We should never get here */
-}
-
-void psci_system_reset(void)
-{
-	psci_print_affinity_map();
-
-	assert(psci_plat_pm_ops->system_reset);
-
-	/* Notify the Secure Payload Dispatcher */
-	if (psci_spd_pm && psci_spd_pm->svc_system_reset) {
-		psci_spd_pm->svc_system_reset();
-	}
-
-	/* Call the platform specific hook */
-	psci_plat_pm_ops->system_reset();
-
-	/* This function does not return. We should never get here */
-}
diff --git a/services/std_svc/std_svc_setup.c b/services/std_svc/std_svc_setup.c
index 6cb0319..8e69046 100644
--- a/services/std_svc/std_svc_setup.c
+++ b/services/std_svc/std_svc_setup.c
@@ -1,36 +1,17 @@
 /*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
+#include <assert.h>
+#include <cpu_data.h>
 #include <debug.h>
+#include <pmf.h>
 #include <psci.h>
+#include <runtime_instr.h>
 #include <runtime_svc.h>
+#include <smcc_helpers.h>
 #include <std_svc.h>
 #include <stdint.h>
 #include <uuid.h>
@@ -43,33 +24,60 @@
 /* Setup Standard Services */
 static int32_t std_svc_setup(void)
 {
+	uintptr_t svc_arg;
+
+	svc_arg = get_arm_std_svc_args(PSCI_FID_MASK);
+	assert(svc_arg);
+
 	/*
 	 * PSCI is the only specification implemented as a Standard Service.
-	 * Invoke PSCI setup from here
+	 * The `psci_setup()` also does EL3 architectural setup.
 	 */
-	return psci_setup();
+	return psci_setup((const psci_lib_args_t *)svc_arg);
 }
 
 /*
  * Top-level Standard Service SMC handler. This handler will in turn dispatch
  * calls to PSCI SMC handler
  */
-uint64_t std_svc_smc_handler(uint32_t smc_fid,
-			     uint64_t x1,
-			     uint64_t x2,
-			     uint64_t x3,
-			     uint64_t x4,
+uintptr_t std_svc_smc_handler(uint32_t smc_fid,
+			     u_register_t x1,
+			     u_register_t x2,
+			     u_register_t x3,
+			     u_register_t x4,
 			     void *cookie,
 			     void *handle,
-			     uint64_t flags)
+			     u_register_t flags)
 {
 	/*
 	 * Dispatch PSCI calls to PSCI SMC handler and return its return
 	 * value
 	 */
 	if (is_psci_fid(smc_fid)) {
-		return psci_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
-				handle, flags);
+		uint64_t ret;
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+
+		/*
+		 * Flush cache line so that even if CPU power down happens
+		 * the timestamp update is reflected in memory.
+		 */
+		PMF_WRITE_TIMESTAMP(rt_instr_svc,
+		    RT_INSTR_ENTER_PSCI,
+		    PMF_CACHE_MAINT,
+		    get_cpu_data(cpu_data_pmf_ts[CPU_DATA_PMF_TS0_IDX]));
+#endif
+
+		ret = psci_smc_handler(smc_fid, x1, x2, x3, x4,
+		    cookie, handle, flags);
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+		PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
+		    RT_INSTR_EXIT_PSCI,
+		    PMF_NO_CACHE_MAINT);
+#endif
+
+		SMC_RET1(handle, ret);
 	}
 
 	switch (smc_fid) {
diff --git a/tools/cert_create/Makefile b/tools/cert_create/Makefile
index f1aa797..437b692 100644
--- a/tools/cert_create/Makefile
+++ b/tools/cert_create/Makefile
@@ -1,53 +1,48 @@
 #
-# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
 #
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# Neither the name of ARM nor the names of its contributors may be used
-# to endorse or promote products derived from this software without specific
-# prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
 #
 
 PROJECT		:= cert_create
 PLAT		:= none
-V		:= 0
+V		?= 0
 DEBUG		:= 0
-BINARY		:= ${PROJECT}
+BINARY		:= ${PROJECT}${BIN_EXT}
+OPENSSL_DIR	:= /usr
+USE_TBBR_DEFS   := 1
 
 OBJECTS := src/cert.o \
+           src/cmd_opt.o \
            src/ext.o \
            src/key.o \
            src/main.o \
-           src/tbb_cert.o \
-           src/tbb_ext.o \
-           src/tbb_key.o \
-           src/sha.o
+           src/sha.o \
+           src/tbbr/tbb_cert.o \
+           src/tbbr/tbb_ext.o \
+           src/tbbr/tbb_key.o
 
 CFLAGS := -Wall -std=c99
 
-# Check the platform
-ifeq (${PLAT},none)
-  $(error Error: No platform defined. Use PLAT=<platform>.)
+MAKE_HELPERS_DIRECTORY := ../../make_helpers/
+include ${MAKE_HELPERS_DIRECTORY}build_macros.mk
+include ${MAKE_HELPERS_DIRECTORY}build_env.mk
+
+ifeq (${USE_TBBR_DEFS},1)
+# In this case, cert_tool is platform-independent
+PLAT_MSG		:=	TBBR Generic
+PLAT_INCLUDE		:=	../../include/tools_share
+else
+PLAT_MSG		:=	${PLAT}
+
+PLATFORM_ROOT		:=	../../plat/
+include ${MAKE_HELPERS_DIRECTORY}plat_helpers.mk
+
+PLAT_INCLUDE		:=	$(wildcard ${PLAT_DIR}include)
+
+ifeq ($(PLAT_INCLUDE),)
+  $(error "Error: Invalid platform '${PLAT}' has no include directory.")
+endif
 endif
 
 ifeq (${DEBUG},1)
@@ -56,37 +51,40 @@
   CFLAGS += -O2 -DLOG_LEVEL=20
 endif
 ifeq (${V},0)
-	Q := @
+  Q := @
 else
-	Q :=
+  Q :=
 endif
 
+$(eval $(call add_define,USE_TBBR_DEFS))
+CFLAGS += ${DEFINES}
+
 # Make soft links and include from local directory otherwise wrong headers
 # could get pulled in from firmware tree.
-INC_DIR := -I ./include -I ../../plat/${PLAT}/include
-LIB_DIR :=
+INC_DIR := -I ./include -I ${PLAT_INCLUDE} -I ${OPENSSL_DIR}/include
+LIB_DIR := -L ${OPENSSL_DIR}/lib
 LIB := -lssl -lcrypto
 
-CC := gcc
-RM := rm -rf
+HOSTCC ?= gcc
 
-.PHONY: all clean
+.PHONY: all clean realclean
 
 all: clean ${BINARY}
 
 ${BINARY}: ${OBJECTS} Makefile
 	@echo "  LD      $@"
 	@echo 'const char build_msg[] = "Built : "__TIME__", "__DATE__; \
-                const char platform_msg[] = "${PLAT}";' | \
+                const char platform_msg[] = "${PLAT_MSG}";' | \
                 ${CC} -c ${CFLAGS} -xc - -o src/build_msg.o
-	${Q}${CC} src/build_msg.o ${OBJECTS} ${LIB_DIR} ${LIB} -o $@
+	${Q}${HOSTCC} src/build_msg.o ${OBJECTS} ${LIB_DIR} ${LIB} -o $@
 
 %.o: %.c
 	@echo "  CC      $<"
-	${Q}${CC} -c ${CFLAGS} ${INC_DIR} $< -o $@
+	${Q}${HOSTCC} -c ${CFLAGS} ${INC_DIR} $< -o $@
 
 clean:
-	${Q}${RM} -f src/build_msg.o ${OBJECTS}
+	$(call SHELL_DELETE_ALL, src/build_msg.o ${OBJECTS})
 
 realclean: clean
-	${Q}${RM} -f ${BINARY}
+	$(call SHELL_DELETE, ${BINARY})
+
diff --git a/tools/cert_create/include/cert.h b/tools/cert_create/include/cert.h
index 48a4146..256e7af 100644
--- a/tools/cert_create/include/cert.h
+++ b/tools/cert_create/include/cert.h
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef CERT_H_
@@ -33,8 +9,11 @@
 
 #include <openssl/ossl_typ.h>
 #include <openssl/x509.h>
+#include "ext.h"
 #include "key.h"
 
+#define CERT_MAX_EXT			4
+
 /*
  * This structure contains information related to the generation of the
  * certificates. All these fields must be known and specified at build time
@@ -51,19 +30,33 @@
 struct cert_s {
 	int id;			/* Unique identifier */
 
+	const char *opt;	/* Command line option to pass filename */
 	const char *fn;		/* Filename to save the certificate */
-	const char *bin;	/* Image associated to this certificate */
-
 	const char *cn;		/* Subject CN (Company Name) */
+	const char *help_msg;	/* Help message */
+
+	/* These fields must be defined statically */
+	int key;		/* Key to be signed */
+	int issuer;		/* Issuer certificate */
+	int ext[CERT_MAX_EXT];	/* Certificate extensions */
+	int num_ext;		/* Number of extensions in the certificate */
 
 	X509 *x;		/* X509 certificate container */
-	key_t *key;		/* Key to be signed */
-
-	cert_t *issuer;		/* Issuer certificate */
 };
 
+/* Exported API */
+int cert_init(void);
+cert_t *cert_get_by_opt(const char *opt);
 int cert_add_ext(X509 *issuer, X509 *subject, int nid, char *value);
+int cert_new(int key_alg, cert_t *cert, int days, int ca, STACK_OF(X509_EXTENSION) * sk);
 
-int cert_new(cert_t *cert, int days, int ca, STACK_OF(X509_EXTENSION) * sk);
+/* Macro to register the certificates used in the CoT */
+#define REGISTER_COT(_certs) \
+	cert_t *certs = &_certs[0]; \
+	const unsigned int num_certs = sizeof(_certs)/sizeof(_certs[0])
+
+/* Exported variables */
+extern cert_t *certs;
+extern const unsigned int num_certs;
 
 #endif /* CERT_H_ */
diff --git a/tools/cert_create/include/cmd_opt.h b/tools/cert_create/include/cmd_opt.h
new file mode 100644
index 0000000..5095ed1
--- /dev/null
+++ b/tools/cert_create/include/cmd_opt.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CMD_OPT_H_
+#define CMD_OPT_H_
+
+#include <getopt.h>
+
+#define CMD_OPT_MAX_NUM			64
+
+/* Supported long command line option types */
+enum {
+	CMD_OPT_CERT,
+	CMD_OPT_KEY,
+	CMD_OPT_EXT
+};
+
+/* Structure to define a command line option */
+typedef struct cmd_opt_s {
+	struct option long_opt;
+	const char *help_msg;
+} cmd_opt_t;
+
+/* Exported API*/
+void cmd_opt_add(const cmd_opt_t *cmd_opt);
+const struct option *cmd_opt_get_array(void);
+const char *cmd_opt_get_name(int idx);
+const char *cmd_opt_get_help_msg(int idx);
+
+#endif /* CMD_OPT_H_ */
diff --git a/tools/cert_create/include/debug.h b/tools/cert_create/include/debug.h
index dd0510a..6302b41 100644
--- a/tools/cert_create/include/debug.h
+++ b/tools/cert_create/include/debug.h
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef __DEBUG_H__
diff --git a/tools/cert_create/include/ext.h b/tools/cert_create/include/ext.h
index d73f573..d432e63 100644
--- a/tools/cert_create/include/ext.h
+++ b/tools/cert_create/include/ext.h
@@ -1,37 +1,27 @@
 /*
  * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef EXT_H_
 #define EXT_H_
 
 #include <openssl/x509v3.h>
+#include "key.h"
+
+/* Extension types supported */
+enum ext_type_e {
+	EXT_TYPE_NVCOUNTER,
+	EXT_TYPE_PKEY,
+	EXT_TYPE_HASH
+};
+
+/* NV-Counter types */
+enum nvctr_type_e {
+	NVCTR_TYPE_TFW,
+	NVCTR_TYPE_NTFW
+};
 
 /*
  * This structure contains the relevant information to create the extensions
@@ -42,11 +32,22 @@
 	const char *oid;	/* OID of the extension */
 	const char *sn;		/* Short name */
 	const char *ln;		/* Long description */
-	int type;		/* OpenSSL ASN1 type of the extension data.
+	const char *opt;	/* Command line option to specify data */
+	const char *help_msg;	/* Help message */
+	const char *arg;	/* Argument passed from command line */
+	int asn1_type;		/* OpenSSL ASN1 type of the extension data.
 				 * Supported types are:
 				 *   - V_ASN1_INTEGER
 				 *   - V_ASN1_OCTET_STRING
 				 */
+	int type;		/* See ext_type_e */
+
+	/* Extension attributes (depends on extension type) */
+	union {
+		int nvctr_type;	/* See nvctr_type_e */
+		int key;	/* Index into array of registered public keys */
+	} attr;
+
 	int alias;		/* In case OpenSSL provides an standard
 				 * extension of the same type, add the new
 				 * extension as an alias of this one
@@ -55,6 +56,8 @@
 	X509V3_EXT_METHOD method; /* This field may be used to define a custom
 				   * function to print the contents of the
 				   * extension */
+
+	int optional;	/* This field may be used optionally to exclude an image */
 } ext_t;
 
 enum {
@@ -62,9 +65,21 @@
 	EXT_CRIT = !EXT_NON_CRIT,
 };
 
-int ext_init(ext_t *tbb_ext);
-X509_EXTENSION *ext_new_hash(int nid, int crit, unsigned char *buf, size_t len);
+/* Exported API */
+int ext_init(void);
+ext_t *ext_get_by_opt(const char *opt);
+X509_EXTENSION *ext_new_hash(int nid, int crit, const EVP_MD *md,
+		unsigned char *buf, size_t len);
 X509_EXTENSION *ext_new_nvcounter(int nid, int crit, int value);
 X509_EXTENSION *ext_new_key(int nid, int crit, EVP_PKEY *k);
 
+/* Macro to register the extensions used in the CoT */
+#define REGISTER_EXTENSIONS(_ext) \
+	ext_t *extensions = &_ext[0]; \
+	const unsigned int num_extensions = sizeof(_ext)/sizeof(_ext[0])
+
+/* Exported variables */
+extern ext_t *extensions;
+extern const unsigned int num_extensions;
+
 #endif /* EXT_H_ */
diff --git a/tools/cert_create/include/key.h b/tools/cert_create/include/key.h
index 8819750..304fa61 100644
--- a/tools/cert_create/include/key.h
+++ b/tools/cert_create/include/key.h
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef KEY_H_
@@ -35,6 +11,25 @@
 
 #define RSA_KEY_BITS		2048
 
+/* Error codes */
+enum {
+	KEY_ERR_NONE,
+	KEY_ERR_MALLOC,
+	KEY_ERR_FILENAME,
+	KEY_ERR_OPEN,
+	KEY_ERR_LOAD
+};
+
+/* Supported key algorithms */
+enum {
+	KEY_ALG_RSA,		/* RSA PSS as defined by PKCS#1 v2.1 (default) */
+	KEY_ALG_RSA_1_5,	/* RSA as defined by PKCS#1 v1.5 */
+#ifndef OPENSSL_NO_EC
+	KEY_ALG_ECDSA,
+#endif /* OPENSSL_NO_EC */
+	KEY_ALG_MAX_NUM
+};
+
 /*
  * This structure contains the relevant information to create the keys
  * required to sign the certificates.
@@ -45,13 +40,28 @@
  */
 typedef struct key_s {
 	int id;			/* Key id */
+	const char *opt;	/* Command line option to specify a key */
+	const char *help_msg;	/* Help message */
 	const char *desc;	/* Key description (debug purposes) */
 	char *fn;		/* Filename to load/store the key */
 	EVP_PKEY *key;		/* Key container */
 } key_t;
 
+/* Exported API */
+int key_init(void);
+key_t *key_get_by_opt(const char *opt);
 int key_new(key_t *key);
-int key_load(key_t *key);
+int key_create(key_t *key, int type);
+int key_load(key_t *key, unsigned int *err_code);
 int key_store(key_t *key);
 
+/* Macro to register the keys used in the CoT */
+#define REGISTER_KEYS(_keys) \
+	key_t *keys = &_keys[0]; \
+	const unsigned int num_keys = sizeof(_keys)/sizeof(_keys[0])
+
+/* Exported variables */
+extern key_t *keys;
+extern const unsigned int num_keys;
+
 #endif /* KEY_H_ */
diff --git a/tools/cert_create/include/sha.h b/tools/cert_create/include/sha.h
index 466d668..6907fa1 100644
--- a/tools/cert_create/include/sha.h
+++ b/tools/cert_create/include/sha.h
@@ -1,31 +1,7 @@
 /*
  * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef SHA_H_
diff --git a/tools/cert_create/include/tbb_cert.h b/tools/cert_create/include/tbb_cert.h
deleted file mode 100644
index 4e48125..0000000
--- a/tools/cert_create/include/tbb_cert.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TBB_CERT_H_
-#define TBB_CERT_H_
-
-#include "cert.h"
-
-/*
- * Enumerate the certificates that are used to establish the chain of trust
- */
-enum {
-	BL2_CERT,
-	TRUSTED_KEY_CERT,
-	BL30_KEY_CERT,
-	BL30_CERT,
-	BL31_KEY_CERT,
-	BL31_CERT,
-	BL32_KEY_CERT,
-	BL32_CERT,
-	BL33_KEY_CERT,
-	BL33_CERT,
-	NUM_CERTIFICATES,
-};
-
-/*
- * Array containing the certificate instances
- */
-extern cert_t certs[NUM_CERTIFICATES];
-
-#endif /* TBB_CERT_H_ */
diff --git a/tools/cert_create/include/tbb_ext.h b/tools/cert_create/include/tbb_ext.h
deleted file mode 100644
index 155d3cb..0000000
--- a/tools/cert_create/include/tbb_ext.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef TBB_EXT_H_
-#define TBB_EXT_H_
-
-#include "ext.h"
-
-/* Array containing the extensions used in the chain of trust */
-extern ext_t tbb_ext[];
-
-#endif /* TBB_EXT_H_ */
diff --git a/tools/cert_create/include/tbb_key.h b/tools/cert_create/include/tbb_key.h
deleted file mode 100644
index cc927d1..0000000
--- a/tools/cert_create/include/tbb_key.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TBB_KEY_H_
-#define TBB_KEY_H_
-
-#include "key.h"
-
-/*
- * Enumerate the keys that are used to establish the chain of trust
- */
-enum {
-	ROT_KEY,
-	TRUSTED_WORLD_KEY,
-	NON_TRUSTED_WORLD_KEY,
-	BL30_KEY,
-	BL31_KEY,
-	BL32_KEY,
-	BL33_KEY,
-	NUM_KEYS
-};
-
-/*
- * Array containing the key instances
- */
-extern key_t keys[];
-
-#endif /* TBB_KEY_H_ */
diff --git a/tools/cert_create/include/tbbr/tbb_cert.h b/tools/cert_create/include/tbbr/tbb_cert.h
new file mode 100644
index 0000000..716b570
--- /dev/null
+++ b/tools/cert_create/include/tbbr/tbb_cert.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TBB_CERT_H_
+#define TBB_CERT_H_
+
+#include "cert.h"
+
+/*
+ * Enumerate the certificates that are used to establish the chain of trust
+ */
+enum {
+	TRUSTED_BOOT_FW_CERT,
+	TRUSTED_KEY_CERT,
+	SCP_FW_KEY_CERT,
+	SCP_FW_CONTENT_CERT,
+	SOC_FW_KEY_CERT,
+	SOC_FW_CONTENT_CERT,
+	TRUSTED_OS_FW_KEY_CERT,
+	TRUSTED_OS_FW_CONTENT_CERT,
+	NON_TRUSTED_FW_KEY_CERT,
+	NON_TRUSTED_FW_CONTENT_CERT,
+	FWU_CERT
+};
+
+#endif /* TBB_CERT_H_ */
diff --git a/tools/cert_create/include/tbbr/tbb_ext.h b/tools/cert_create/include/tbbr/tbb_ext.h
new file mode 100644
index 0000000..85ad359
--- /dev/null
+++ b/tools/cert_create/include/tbbr/tbb_ext.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef TBB_EXT_H_
+#define TBB_EXT_H_
+
+#include "ext.h"
+
+/* TBBR extensions */
+enum {
+	TRUSTED_FW_NVCOUNTER_EXT,
+	NON_TRUSTED_FW_NVCOUNTER_EXT,
+	TRUSTED_BOOT_FW_HASH_EXT,
+	TRUSTED_WORLD_PK_EXT,
+	NON_TRUSTED_WORLD_PK_EXT,
+	SCP_FW_CONTENT_CERT_PK_EXT,
+	SCP_FW_HASH_EXT,
+	SOC_FW_CONTENT_CERT_PK_EXT,
+	SOC_AP_FW_HASH_EXT,
+	TRUSTED_OS_FW_CONTENT_CERT_PK_EXT,
+	TRUSTED_OS_FW_HASH_EXT,
+	TRUSTED_OS_FW_EXTRA1_HASH_EXT,
+	TRUSTED_OS_FW_EXTRA2_HASH_EXT,
+	NON_TRUSTED_FW_CONTENT_CERT_PK_EXT,
+	NON_TRUSTED_WORLD_BOOTLOADER_HASH_EXT,
+	SCP_FWU_CFG_HASH_EXT,
+	AP_FWU_CFG_HASH_EXT,
+	FWU_HASH_EXT
+};
+
+#endif /* TBB_EXT_H_ */
diff --git a/tools/cert_create/include/tbbr/tbb_key.h b/tools/cert_create/include/tbbr/tbb_key.h
new file mode 100644
index 0000000..df634c6
--- /dev/null
+++ b/tools/cert_create/include/tbbr/tbb_key.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TBB_KEY_H_
+#define TBB_KEY_H_
+
+#include "key.h"
+
+/*
+ * Enumerate the keys that are used to establish the chain of trust
+ */
+enum {
+	ROT_KEY,
+	TRUSTED_WORLD_KEY,
+	NON_TRUSTED_WORLD_KEY,
+	SCP_FW_CONTENT_CERT_KEY,
+	SOC_FW_CONTENT_CERT_KEY,
+	TRUSTED_OS_FW_CONTENT_CERT_KEY,
+	NON_TRUSTED_FW_CONTENT_CERT_KEY
+};
+
+#endif /* TBB_KEY_H_ */
diff --git a/tools/cert_create/src/cert.c b/tools/cert_create/src/cert.c
index 9705643..3f0b4d3 100644
--- a/tools/cert_create/src/cert.c
+++ b/tools/cert_create/src/cert.c
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <stdio.h>
@@ -34,17 +10,25 @@
 
 #include <openssl/conf.h>
 #include <openssl/err.h>
+#include <openssl/opensslv.h>
 #include <openssl/pem.h>
 #include <openssl/sha.h>
 #include <openssl/x509v3.h>
 
+#if USE_TBBR_DEFS
+#include <tbbr_oid.h>
+#else
+#include <platform_oid.h>
+#endif
+
 #include "cert.h"
+#include "cmd_opt.h"
 #include "debug.h"
 #include "key.h"
-#include "platform_oid.h"
 #include "sha.h"
 
 #define SERIAL_RAND_BITS	64
+#define RSA_SALT_LEN		32
 
 int rand_serial(BIGNUM *b, ASN1_INTEGER *ai)
 {
@@ -95,17 +79,19 @@
 	return 1;
 }
 
-
-int cert_new(cert_t *cert, int days, int ca, STACK_OF(X509_EXTENSION) * sk)
+int cert_new(int key_alg, cert_t *cert, int days, int ca, STACK_OF(X509_EXTENSION) * sk)
 {
-	EVP_PKEY *pkey = cert->key->key;
-	EVP_PKEY *ikey = cert->issuer->key->key;
-	X509 *issuer = cert->issuer->x;
-	X509 *x = NULL;
-	X509_EXTENSION *ex = NULL;
-	X509_NAME *name = NULL;
-	ASN1_INTEGER *sno = NULL;
-	int i, num;
+	EVP_PKEY *pkey = keys[cert->key].key;
+	cert_t *issuer_cert = &certs[cert->issuer];
+	EVP_PKEY *ikey = keys[issuer_cert->key].key;
+	X509 *issuer = issuer_cert->x;
+	X509 *x;
+	X509_EXTENSION *ex;
+	X509_NAME *name;
+	ASN1_INTEGER *sno;
+	int i, num, rc = 0;
+	EVP_MD_CTX *mdCtx;
+	EVP_PKEY_CTX *pKeyCtx = NULL;
 
 	/* Create the certificate structure */
 	x = X509_new();
@@ -125,6 +111,39 @@
 		issuer = x;
 	}
 
+	mdCtx = EVP_MD_CTX_create();
+	if (mdCtx == NULL) {
+		ERR_print_errors_fp(stdout);
+		goto END;
+	}
+
+	/* Sign the certificate with the issuer key */
+	if (!EVP_DigestSignInit(mdCtx, &pKeyCtx, EVP_sha256(), NULL, ikey)) {
+		ERR_print_errors_fp(stdout);
+		goto END;
+	}
+
+	/*
+	 * Set additional parameters if algorithm is RSA PSS. This is not
+	 * required for RSA 1.5 or ECDSA.
+	 */
+	if (key_alg == KEY_ALG_RSA) {
+		if (!EVP_PKEY_CTX_set_rsa_padding(pKeyCtx, RSA_PKCS1_PSS_PADDING)) {
+			ERR_print_errors_fp(stdout);
+			goto END;
+		}
+
+		if (!EVP_PKEY_CTX_set_rsa_pss_saltlen(pKeyCtx, RSA_SALT_LEN)) {
+			ERR_print_errors_fp(stdout);
+			goto END;
+		}
+
+		if (!EVP_PKEY_CTX_set_rsa_mgf1_md(pKeyCtx, EVP_sha256())) {
+			ERR_print_errors_fp(stdout);
+			goto END;
+		}
+	}
+
 	/* x509.v3 */
 	X509_set_version(x, 2);
 
@@ -147,7 +166,7 @@
 	/* Issuer name */
 	name = X509_get_issuer_name(x);
 	X509_NAME_add_entry_by_txt(name, "CN", MBSTRING_ASC,
-			(const unsigned char *)cert->issuer->cn, -1, -1, 0);
+			(const unsigned char *)issuer_cert->cn, -1, -1, 0);
 	X509_set_issuer_name(x, name);
 
 	/* Add various extensions: standard extensions */
@@ -169,12 +188,50 @@
 		}
 	}
 
-	/* Sign the certificate with the issuer key */
-	if (!X509_sign(x, ikey, EVP_sha1())) {
+	if (!X509_sign_ctx(x, mdCtx)) {
 		ERR_print_errors_fp(stdout);
-		return 0;
+		goto END;
 	}
 
+	/* X509 certificate signed successfully */
+	rc = 1;
 	cert->x = x;
-	return 1;
+
+END:
+	EVP_MD_CTX_destroy(mdCtx);
+	return rc;
+}
+
+int cert_init(void)
+{
+	cmd_opt_t cmd_opt;
+	cert_t *cert;
+	unsigned int i;
+
+	for (i = 0; i < num_certs; i++) {
+		cert = &certs[i];
+		cmd_opt.long_opt.name = cert->opt;
+		cmd_opt.long_opt.has_arg = required_argument;
+		cmd_opt.long_opt.flag = NULL;
+		cmd_opt.long_opt.val = CMD_OPT_CERT;
+		cmd_opt.help_msg = cert->help_msg;
+		cmd_opt_add(&cmd_opt);
+	}
+
+	return 0;
+}
+
+cert_t *cert_get_by_opt(const char *opt)
+{
+	cert_t *cert;
+	unsigned int i;
+
+	for (i = 0; i < num_certs; i++) {
+		cert = &certs[i];
+		if (0 == strcmp(cert->opt, opt)) {
+			return cert;
+		}
+	}
+
+	return NULL;
 }
diff --git a/tools/cert_create/src/cmd_opt.c b/tools/cert_create/src/cmd_opt.c
new file mode 100644
index 0000000..64180d1
--- /dev/null
+++ b/tools/cert_create/src/cmd_opt.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <cmd_opt.h>
+#include <getopt.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include "debug.h"
+
+/* Command line options */
+static struct option long_opt[CMD_OPT_MAX_NUM+1];
+static const char *help_msg[CMD_OPT_MAX_NUM+1];
+static int num_reg_opt;
+
+void cmd_opt_add(const cmd_opt_t *cmd_opt)
+{
+	assert(cmd_opt != NULL);
+
+	if (num_reg_opt >= CMD_OPT_MAX_NUM) {
+		ERROR("Out of memory. Please increase CMD_OPT_MAX_NUM\n");
+		exit(1);
+	}
+
+	long_opt[num_reg_opt].name = cmd_opt->long_opt.name;
+	long_opt[num_reg_opt].has_arg = cmd_opt->long_opt.has_arg;
+	long_opt[num_reg_opt].flag = 0;
+	long_opt[num_reg_opt].val = cmd_opt->long_opt.val;
+
+	help_msg[num_reg_opt] = cmd_opt->help_msg;
+
+	num_reg_opt++;
+}
+
+const struct option *cmd_opt_get_array(void)
+{
+	return long_opt;
+}
+
+const char *cmd_opt_get_name(int idx)
+{
+	if (idx >= num_reg_opt) {
+		return NULL;
+	}
+
+	return long_opt[idx].name;
+}
+
+const char *cmd_opt_get_help_msg(int idx)
+{
+	if (idx >= num_reg_opt) {
+		return NULL;
+	}
+
+	return help_msg[idx];
+}
diff --git a/tools/cert_create/src/ext.c b/tools/cert_create/src/ext.c
index 31f84a8..055ddbf 100644
--- a/tools/cert_create/src/ext.c
+++ b/tools/cert_create/src/ext.c
@@ -1,43 +1,37 @@
 /*
  * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <stddef.h>
 #include <stdio.h>
 #include <string.h>
+#include <openssl/asn1.h>
+#include <openssl/asn1t.h>
 #include <openssl/err.h>
 #include <openssl/x509v3.h>
+
+#include "cmd_opt.h"
 #include "ext.h"
 
 DECLARE_ASN1_ITEM(ASN1_INTEGER)
+DECLARE_ASN1_ITEM(X509_ALGOR)
 DECLARE_ASN1_ITEM(ASN1_OCTET_STRING)
 
+typedef struct {
+	X509_ALGOR *hashAlgorithm;
+	ASN1_OCTET_STRING *dataHash;
+} HASH;
+
+ASN1_SEQUENCE(HASH) = {
+	ASN1_SIMPLE(HASH, hashAlgorithm, X509_ALGOR),
+	ASN1_SIMPLE(HASH, dataHash, ASN1_OCTET_STRING),
+} ASN1_SEQUENCE_END(HASH)
+
+DECLARE_ASN1_FUNCTIONS(HASH)
+IMPLEMENT_ASN1_FUNCTIONS(HASH)
+
 /*
  * This function adds the TBB extensions to the internal extension list
  * maintained by OpenSSL so they can be used later.
@@ -49,20 +43,36 @@
  *
  * Return: 0 = success, Otherwise: error
  */
-int ext_init(ext_t *tbb_ext)
+int ext_init(void)
 {
+	cmd_opt_t cmd_opt;
 	ext_t *ext;
 	X509V3_EXT_METHOD *m;
-	int i = 0, nid, ret;
+	int nid, ret;
+	unsigned int i;
 
-	while ((ext = &tbb_ext[i++]) && ext->oid) {
+	for (i = 0; i < num_extensions; i++) {
+		ext = &extensions[i];
+		/* Register command line option */
+		if (ext->opt) {
+			cmd_opt.long_opt.name = ext->opt;
+			cmd_opt.long_opt.has_arg = required_argument;
+			cmd_opt.long_opt.flag = NULL;
+			cmd_opt.long_opt.val = CMD_OPT_EXT;
+			cmd_opt.help_msg = ext->help_msg;
+			cmd_opt_add(&cmd_opt);
+		}
+		/* Register the extension OID in OpenSSL */
+		if (ext->oid == NULL) {
+			continue;
+		}
 		nid = OBJ_create(ext->oid, ext->sn, ext->ln);
 		if (ext->alias) {
 			X509V3_EXT_add_alias(nid, ext->alias);
 		} else {
 			m = &ext->method;
 			memset(m, 0x0, sizeof(X509V3_EXT_METHOD));
-			switch (ext->type) {
+			switch (ext->asn1_type) {
 			case V_ASN1_INTEGER:
 				m->it = ASN1_ITEM_ref(ASN1_INTEGER);
 				m->i2s = (X509V3_EXT_I2S)i2s_ASN1_INTEGER;
@@ -123,37 +133,85 @@
 }
 
 /*
- * Creates a x509v3 extension containing a hash encapsulated in an ASN1 Octet
- * String
+ * Creates a x509v3 extension containing a hash
+ *
+ * DigestInfo ::= SEQUENCE {
+ *     digestAlgorithm  AlgorithmIdentifier,
+ *     digest           OCTET STRING
+ * }
+ *
+ * AlgorithmIdentifier ::=  SEQUENCE  {
+ *     algorithm        OBJECT IDENTIFIER,
+ *     parameters       ANY DEFINED BY algorithm OPTIONAL
+ * }
  *
  * Parameters:
- *   pex: OpenSSL extension pointer (output parameter)
  *   nid: extension identifier
  *   crit: extension critical (EXT_NON_CRIT, EXT_CRIT)
+ *   md: hash algorithm
  *   buf: pointer to the buffer that contains the hash
  *   len: size of the hash in bytes
  *
  * Return: Extension address, NULL if error
  */
-X509_EXTENSION *ext_new_hash(int nid, int crit, unsigned char *buf, size_t len)
+X509_EXTENSION *ext_new_hash(int nid, int crit, const EVP_MD *md,
+		unsigned char *buf, size_t len)
 {
-	X509_EXTENSION *ex = NULL;
-	ASN1_OCTET_STRING *hash = NULL;
+	X509_EXTENSION *ex;
+	ASN1_OCTET_STRING *octet;
+	HASH *hash;
+	ASN1_OBJECT *algorithm;
+	X509_ALGOR *x509_algor;
 	unsigned char *p = NULL;
-	int sz = -1;
+	int sz;
 
-	/* Encode Hash */
-	hash = ASN1_OCTET_STRING_new();
-	ASN1_OCTET_STRING_set(hash, buf, len);
-	sz = i2d_ASN1_OCTET_STRING(hash, NULL);
-	i2d_ASN1_OCTET_STRING(hash, &p);
+	/* OBJECT_IDENTIFIER with hash algorithm */
+	algorithm = OBJ_nid2obj(EVP_MD_type(md));
+	if (algorithm == NULL) {
+		return NULL;
+	}
+
+	/* Create X509_ALGOR */
+	x509_algor = X509_ALGOR_new();
+	if (x509_algor == NULL) {
+		return NULL;
+	}
+	x509_algor->algorithm = algorithm;
+	x509_algor->parameter = ASN1_TYPE_new();
+	ASN1_TYPE_set(x509_algor->parameter, V_ASN1_NULL, NULL);
+
+	/* OCTET_STRING with the actual hash */
+	octet = ASN1_OCTET_STRING_new();
+	if (octet == NULL) {
+		X509_ALGOR_free(x509_algor);
+		return NULL;
+	}
+	ASN1_OCTET_STRING_set(octet, buf, len);
+
+	/* HASH structure containing algorithm + hash */
+	hash = HASH_new();
+	if (hash == NULL) {
+		ASN1_OCTET_STRING_free(octet);
+		X509_ALGOR_free(x509_algor);
+		return NULL;
+	}
+	hash->hashAlgorithm = x509_algor;
+	hash->dataHash = octet;
+
+	/* DER encoded HASH */
+	sz = i2d_HASH(hash, &p);
+	if ((sz <= 0) || (p == NULL)) {
+		HASH_free(hash);
+		X509_ALGOR_free(x509_algor);
+		return NULL;
+	}
 
 	/* Create the extension */
 	ex = ext_new(nid, crit, p, sz);
 
 	/* Clean up */
 	OPENSSL_free(p);
-	ASN1_OCTET_STRING_free(hash);
+	HASH_free(hash);
 
 	return ex;
 }
@@ -172,16 +230,15 @@
  */
 X509_EXTENSION *ext_new_nvcounter(int nid, int crit, int value)
 {
-	X509_EXTENSION *ex = NULL;
-	ASN1_INTEGER *counter = NULL;
+	X509_EXTENSION *ex;
+	ASN1_INTEGER *counter;
 	unsigned char *p = NULL;
-	int sz = -1;
+	int sz;
 
 	/* Encode counter */
 	counter = ASN1_INTEGER_new();
 	ASN1_INTEGER_set(counter, value);
-	sz = i2d_ASN1_INTEGER(counter, NULL);
-	i2d_ASN1_INTEGER(counter, &p);
+	sz = i2d_ASN1_INTEGER(counter, &p);
 
 	/* Create the extension */
 	ex = ext_new(nid, crit, p, sz);
@@ -210,9 +267,9 @@
  */
 X509_EXTENSION *ext_new_key(int nid, int crit, EVP_PKEY *k)
 {
-	X509_EXTENSION *ex = NULL;
-	unsigned char *p = NULL;
-	int sz = -1;
+	X509_EXTENSION *ex;
+	unsigned char *p;
+	int sz;
 
 	/* Encode key */
 	BIO *mem = BIO_new(BIO_s_mem());
@@ -231,3 +288,20 @@
 
 	return ex;
 }
+
+ext_t *ext_get_by_opt(const char *opt)
+{
+	ext_t *ext;
+	unsigned int i;
+
+	/* Sequential search. This is not a performance concern since the number
+	 * of extensions is bounded and the code runs on a host machine */
+	for (i = 0; i < num_extensions; i++) {
+		ext = &extensions[i];
+		if (ext->opt && !strcmp(ext->opt, opt)) {
+			return ext;
+		}
+	}
+
+	return NULL;
+}
diff --git a/tools/cert_create/src/key.c b/tools/cert_create/src/key.c
index b5737d9..871f9ee 100644
--- a/tools/cert_create/src/key.c
+++ b/tools/cert_create/src/key.c
@@ -1,31 +1,7 @@
 /*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <getopt.h>
@@ -37,81 +13,157 @@
 #include <openssl/evp.h>
 #include <openssl/pem.h>
 
+#if USE_TBBR_DEFS
+#include <tbbr_oid.h>
+#else
+#include <platform_oid.h>
+#endif
+
 #include "cert.h"
+#include "cmd_opt.h"
 #include "debug.h"
 #include "key.h"
-#include "platform_oid.h"
 #include "sha.h"
 
 #define MAX_FILENAME_LEN		1024
 
 /*
- * Create a new key
+ * Create a new key container
  */
 int key_new(key_t *key)
 {
-	RSA *rsa = NULL;
-	EVP_PKEY *k = NULL;
-
 	/* Create key pair container */
-	k = EVP_PKEY_new();
-	if (k == NULL) {
+	key->key = EVP_PKEY_new();
+	if (key->key == NULL) {
 		return 0;
 	}
 
-	/* Generate a new RSA key */
-	rsa = RSA_generate_key(RSA_KEY_BITS, RSA_F4, NULL, NULL);
-	if (EVP_PKEY_assign_RSA(k, rsa)) {
-		key->key = k;
-		return 1;
-	} else {
-		printf("Cannot assign RSA key\n");
+	return 1;
+}
+
+static int key_create_rsa(key_t *key)
+{
+	BIGNUM *e;
+	RSA *rsa = NULL;
+
+	e = BN_new();
+	if (e == NULL) {
+		printf("Cannot create RSA exponent\n");
+		goto err;
 	}
 
-	if (k)
-		EVP_PKEY_free(k);
+	if (!BN_set_word(e, RSA_F4)) {
+		printf("Cannot assign RSA exponent\n");
+		goto err;
+	}
+
+	rsa = RSA_new();
+	if (rsa == NULL) {
+		printf("Cannot create RSA key\n");
+		goto err;
+	}
+
+	if (!RSA_generate_key_ex(rsa, RSA_KEY_BITS, e, NULL)) {
+		printf("Cannot generate RSA key\n");
+		goto err;
+	}
+
+	if (!EVP_PKEY_assign_RSA(key->key, rsa)) {
+		printf("Cannot assign RSA key\n");
+		goto err;
+	}
+
+	return 1;
+err:
+	RSA_free(rsa);
+	BN_free(e);
 	return 0;
 }
 
-int key_load(key_t *key)
+#ifndef OPENSSL_NO_EC
+static int key_create_ecdsa(key_t *key)
 {
-	FILE *fp = NULL;
-	EVP_PKEY *k = NULL;
+	EC_KEY *ec;
 
-	/* Create key pair container */
-	k = EVP_PKEY_new();
-	if (k == NULL) {
+	ec = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1);
+	if (ec == NULL) {
+		printf("Cannot create EC key\n");
+		goto err;
+	}
+	if (!EC_KEY_generate_key(ec)) {
+		printf("Cannot generate EC key\n");
+		goto err;
+	}
+	EC_KEY_set_flags(ec, EC_PKEY_NO_PARAMETERS);
+	EC_KEY_set_asn1_flag(ec, OPENSSL_EC_NAMED_CURVE);
+	if (!EVP_PKEY_assign_EC_KEY(key->key, ec)) {
+		printf("Cannot assign EC key\n");
+		goto err;
+	}
+
+	return 1;
+err:
+	EC_KEY_free(ec);
+	return 0;
+}
+#endif /* OPENSSL_NO_EC */
+
+typedef int (*key_create_fn_t)(key_t *key);
+static const key_create_fn_t key_create_fn[KEY_ALG_MAX_NUM] = {
+	key_create_rsa, 	/* KEY_ALG_RSA */
+	key_create_rsa, 	/* KEY_ALG_RSA_1_5 */
+#ifndef OPENSSL_NO_EC
+	key_create_ecdsa, 	/* KEY_ALG_ECDSA */
+#endif /* OPENSSL_NO_EC */
+};
+
+int key_create(key_t *key, int type)
+{
+	if (type >= KEY_ALG_MAX_NUM) {
+		printf("Invalid key type\n");
 		return 0;
 	}
 
+	if (key_create_fn[type]) {
+		return key_create_fn[type](key);
+	}
+
+	return 0;
+}
+
+int key_load(key_t *key, unsigned int *err_code)
+{
+	FILE *fp;
+	EVP_PKEY *k;
+
 	if (key->fn) {
 		/* Load key from file */
 		fp = fopen(key->fn, "r");
 		if (fp) {
-			k = PEM_read_PrivateKey(fp, &k, NULL, NULL);
+			k = PEM_read_PrivateKey(fp, &key->key, NULL, NULL);
 			fclose(fp);
 			if (k) {
-				key->key = k;
+				*err_code = KEY_ERR_NONE;
 				return 1;
 			} else {
-				ERROR("Cannot read key from %s\n", key->fn);
+				ERROR("Cannot load key from %s\n", key->fn);
+				*err_code = KEY_ERR_LOAD;
 			}
 		} else {
-			ERROR("Cannot open file %s\n", key->fn);
+			WARN("Cannot open file %s\n", key->fn);
+			*err_code = KEY_ERR_OPEN;
 		}
 	} else {
-		ERROR("Key filename not specified\n");
+		WARN("Key filename not specified\n");
+		*err_code = KEY_ERR_FILENAME;
 	}
 
-	if (k)
-		EVP_PKEY_free(k);
-
 	return 0;
 }
 
 int key_store(key_t *key)
 {
-	FILE *fp = NULL;
+	FILE *fp;
 
 	if (key->fn) {
 		fp = fopen(key->fn, "w");
@@ -129,3 +181,41 @@
 
 	return 0;
 }
+
+int key_init(void)
+{
+	cmd_opt_t cmd_opt;
+	key_t *key;
+	unsigned int i;
+
+	for (i = 0; i < num_keys; i++) {
+		key = &keys[i];
+		if (key->opt != NULL) {
+			cmd_opt.long_opt.name = key->opt;
+			cmd_opt.long_opt.has_arg = required_argument;
+			cmd_opt.long_opt.flag = NULL;
+			cmd_opt.long_opt.val = CMD_OPT_KEY;
+			cmd_opt.help_msg = key->help_msg;
+			cmd_opt_add(&cmd_opt);
+		}
+	}
+
+	return 0;
+}
+
+key_t *key_get_by_opt(const char *opt)
+{
+	key_t *key;
+	unsigned int i;
+
+	/* Sequential search. This is not a performance concern since the number
+	 * of keys is bounded and the code runs on a host machine */
+	for (i = 0; i < num_keys; i++) {
+		key = &keys[i];
+		if (0 == strcmp(key->opt, opt)) {
+			return key;
+		}
+	}
+
+	return NULL;
+}
diff --git a/tools/cert_create/src/main.c b/tools/cert_create/src/main.c
index 6df367a..741242f 100644
--- a/tools/cert_create/src/main.c
+++ b/tools/cert_create/src/main.c
@@ -1,33 +1,11 @@
 /*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
+#include <assert.h>
+#include <ctype.h>
 #include <getopt.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -40,15 +18,21 @@
 #include <openssl/sha.h>
 #include <openssl/x509v3.h>
 
+#if USE_TBBR_DEFS
+#include <tbbr_oid.h>
+#else
+#include <platform_oid.h>
+#endif
+
 #include "cert.h"
+#include "cmd_opt.h"
 #include "debug.h"
 #include "ext.h"
 #include "key.h"
-#include "platform_oid.h"
 #include "sha.h"
-#include "tbb_ext.h"
-#include "tbb_cert.h"
-#include "tbb_key.h"
+#include "tbbr/tbb_cert.h"
+#include "tbbr/tbb_ext.h"
+#include "tbbr/tbb_key.h"
 
 /*
  * Helper macros to simplify the code. This macro assigns the return value of
@@ -79,49 +63,14 @@
 #define MAX_FILENAME_LEN		1024
 #define VAL_DAYS			7300
 #define ID_TO_BIT_MASK(id)		(1 << id)
-#define NVCOUNTER_VALUE			0
-
-/* Files */
-enum {
-	/* Image file names (inputs) */
-	BL2_ID = 0,
-	BL30_ID,
-	BL31_ID,
-	BL32_ID,
-	BL33_ID,
-	/* Certificate file names (outputs) */
-	BL2_CERT_ID,
-	TRUSTED_KEY_CERT_ID,
-	BL30_KEY_CERT_ID,
-	BL30_CERT_ID,
-	BL31_KEY_CERT_ID,
-	BL31_CERT_ID,
-	BL32_KEY_CERT_ID,
-	BL32_CERT_ID,
-	BL33_KEY_CERT_ID,
-	BL33_CERT_ID,
-	/* Key file names (input/output) */
-	ROT_KEY_ID,
-	TRUSTED_WORLD_KEY_ID,
-	NON_TRUSTED_WORLD_KEY_ID,
-	BL30_KEY_ID,
-	BL31_KEY_ID,
-	BL32_KEY_ID,
-	BL33_KEY_ID,
-	NUM_OPTS
-};
+#define NUM_ELEM(x)			((sizeof(x)) / (sizeof(x[0])))
+#define HELP_OPT_MAX_LEN		128
 
 /* Global options */
+static int key_alg;
 static int new_keys;
 static int save_keys;
 static int print_cert;
-static int bl30_present;
-static int bl32_present;
-
-/* We are not checking nvcounters in TF. Include them in the certificates but
- * the value will be set to 0 */
-static int tf_nvcounter;
-static int non_tf_nvcounter;
 
 /* Info messages created in the Makefile */
 extern const char build_msg[];
@@ -138,44 +87,24 @@
 	return dup;
 }
 
-/* Command line options */
-static const struct option long_opt[] = {
-	/* Binary images */
-	{"bl2", required_argument, 0, BL2_ID},
-	{"bl30", required_argument, 0, BL30_ID},
-	{"bl31", required_argument, 0, BL31_ID},
-	{"bl32", required_argument, 0, BL32_ID},
-	{"bl33", required_argument, 0, BL33_ID},
-	/* Certificate files */
-	{"bl2-cert", required_argument, 0, BL2_CERT_ID},
-	{"trusted-key-cert", required_argument, 0, TRUSTED_KEY_CERT_ID},
-	{"bl30-key-cert", required_argument, 0, BL30_KEY_CERT_ID},
-	{"bl30-cert", required_argument, 0, BL30_CERT_ID},
-	{"bl31-key-cert", required_argument, 0, BL31_KEY_CERT_ID},
-	{"bl31-cert", required_argument, 0, BL31_CERT_ID},
-	{"bl32-key-cert", required_argument, 0, BL32_KEY_CERT_ID},
-	{"bl32-cert", required_argument, 0, BL32_CERT_ID},
-	{"bl33-key-cert", required_argument, 0, BL33_KEY_CERT_ID},
-	{"bl33-cert", required_argument, 0, BL33_CERT_ID},
-	/* Private key files */
-	{"rot-key", required_argument, 0, ROT_KEY_ID},
-	{"trusted-world-key", required_argument, 0, TRUSTED_WORLD_KEY_ID},
-	{"non-trusted-world-key", required_argument, 0, NON_TRUSTED_WORLD_KEY_ID},
-	{"bl30-key", required_argument, 0, BL30_KEY_ID},
-	{"bl31-key", required_argument, 0, BL31_KEY_ID},
-	{"bl32-key", required_argument, 0, BL32_KEY_ID},
-	{"bl33-key", required_argument, 0, BL33_KEY_ID},
-	/* Common options */
-	{"help", no_argument, 0, 'h'},
-	{"save-keys", no_argument, 0, 'k'},
-	{"new-chain", no_argument, 0, 'n'},
-	{"print-cert", no_argument, 0, 'p'},
-	{0, 0, 0, 0}
+static const char *key_algs_str[] = {
+	[KEY_ALG_RSA] = "rsa",
+	[KEY_ALG_RSA_1_5] = "rsa_1_5",
+#ifndef OPENSSL_NO_EC
+	[KEY_ALG_ECDSA] = "ecdsa"
+#endif /* OPENSSL_NO_EC */
 };
 
-static void print_help(const char *cmd)
+static void print_help(const char *cmd, const struct option *long_opt)
 {
-	int i = 0;
+	int rem, i = 0;
+	const struct option *opt;
+	char line[HELP_OPT_MAX_LEN];
+	char *p;
+
+	assert(cmd != NULL);
+	assert(long_opt != NULL);
+
 	printf("\n\n");
 	printf("The certificate generation tool loads the binary images and\n"
 	       "optionally the RSA keys, and outputs the key and content\n"
@@ -183,107 +112,186 @@
 	       "If keys are provided, they must be in PEM format.\n"
 	       "Certificates are generated in DER format.\n");
 	printf("\n");
-	printf("Usage:\n\n");
-	printf("    %s [-hknp] \\\n", cmd);
-	for (i = 0; i < NUM_OPTS; i++) {
-		printf("        --%s <file>  \\\n", long_opt[i].name);
+	printf("Usage:\n");
+	printf("\t%s [OPTIONS]\n\n", cmd);
+
+	printf("Available options:\n");
+	opt = long_opt;
+	while (opt->name) {
+		p = line;
+		rem = HELP_OPT_MAX_LEN;
+		if (isalpha(opt->val)) {
+			/* Short format */
+			sprintf(p, "-%c,", (char)opt->val);
+			p += 3;
+			rem -= 3;
+		}
+		snprintf(p, rem, "--%s %s", opt->name,
+			 (opt->has_arg == required_argument) ? "<arg>" : "");
+		printf("\t%-32s %s\n", line, cmd_opt_get_help_msg(i));
+		opt++;
+		i++;
 	}
 	printf("\n");
-	printf("-h    Print help and exit\n");
-	printf("-k    Save key pairs into files. Filenames must be provided\n");
-	printf("-n    Generate new key pairs if no key files are provided\n");
-	printf("-p    Print the certificates in the standard output\n");
-	printf("\n");
 
 	exit(0);
 }
 
+static int get_key_alg(const char *key_alg_str)
+{
+	int i;
+
+	for (i = 0 ; i < NUM_ELEM(key_algs_str) ; i++) {
+		if (0 == strcmp(key_alg_str, key_algs_str[i])) {
+			return i;
+		}
+	}
+
+	return -1;
+}
+
 static void check_cmd_params(void)
 {
-	/* BL2, BL31 and BL33 are mandatory */
-	if (certs[BL2_CERT].bin == NULL) {
-		ERROR("BL2 image not specified\n");
+	cert_t *cert;
+	ext_t *ext;
+	key_t *key;
+	int i, j;
+
+	/* Only save new keys */
+	if (save_keys && !new_keys) {
+		ERROR("Only new keys can be saved to disk\n");
 		exit(1);
 	}
 
-	if (certs[BL31_CERT].bin == NULL) {
-		ERROR("BL31 image not specified\n");
-		exit(1);
-	}
-
-	if (certs[BL33_CERT].bin == NULL) {
-		ERROR("BL33 image not specified\n");
-		exit(1);
-	}
-
-	/* BL30 and BL32 are optional */
-	if (certs[BL30_CERT].bin != NULL) {
-		bl30_present = 1;
-	}
-
-	if (certs[BL32_CERT].bin != NULL) {
-		bl32_present = 1;
-	}
-
-	/* TODO: Certificate filenames */
-
-	/* Filenames to store keys must be specified */
-	if (save_keys || !new_keys) {
-		if (keys[ROT_KEY].fn == NULL) {
-			ERROR("ROT key not specified\n");
-			exit(1);
+	/* Check that all required options have been specified in the
+	 * command line */
+	for (i = 0; i < num_certs; i++) {
+		cert = &certs[i];
+		if (cert->fn == NULL) {
+			/* Certificate not requested. Skip to the next one */
+			continue;
 		}
 
-		if (keys[TRUSTED_WORLD_KEY].fn == NULL) {
-			ERROR("Trusted World key not specified\n");
-			exit(1);
-		}
-
-		if (keys[NON_TRUSTED_WORLD_KEY].fn == NULL) {
-			ERROR("Non-trusted World key not specified\n");
-			exit(1);
-		}
-
-		if (keys[BL31_KEY].fn == NULL) {
-			ERROR("BL31 key not specified\n");
-			exit(1);
-		}
-
-		if (keys[BL33_KEY].fn == NULL) {
-			ERROR("BL33 key not specified\n");
-			exit(1);
-		}
-
-		if (bl30_present && (keys[BL30_KEY].fn == NULL)) {
-			ERROR("BL30 key not specified\n");
-			exit(1);
-		}
-
-		if (bl32_present && (keys[BL32_KEY].fn == NULL)) {
-			ERROR("BL32 key not specified\n");
-			exit(1);
+		/* Check that all parameters required to create this certificate
+		 * have been specified in the command line */
+		for (j = 0; j < cert->num_ext; j++) {
+			ext = &extensions[cert->ext[j]];
+			switch (ext->type) {
+			case EXT_TYPE_NVCOUNTER:
+				/* Counter value must be specified */
+				if ((!ext->optional) && (ext->arg == NULL)) {
+					ERROR("Value for '%s' not specified\n",
+					      ext->ln);
+					exit(1);
+				}
+				break;
+			case EXT_TYPE_PKEY:
+				/* Key filename must be specified */
+				key = &keys[ext->attr.key];
+				if (!new_keys && key->fn == NULL) {
+					ERROR("Key '%s' required by '%s' not "
+					      "specified\n", key->desc,
+					      cert->cn);
+					exit(1);
+				}
+				break;
+			case EXT_TYPE_HASH:
+				/*
+				 * Binary image must be specified
+				 * unless it is explicitly made optional.
+				 */
+				if ((!ext->optional) && (ext->arg == NULL)) {
+					ERROR("Image for '%s' not specified\n",
+					      ext->ln);
+					exit(1);
+				}
+				break;
+			default:
+				ERROR("Unknown extension type '%d' in '%s'\n",
+				      ext->type, ext->ln);
+				exit(1);
+				break;
+			}
 		}
 	}
 }
 
+/* Common command line options */
+static const cmd_opt_t common_cmd_opt[] = {
+	{
+		{ "help", no_argument, NULL, 'h' },
+		"Print this message and exit"
+	},
+	{
+		{ "key-alg", required_argument, NULL, 'a' },
+		"Key algorithm: 'rsa' (default) - RSAPSS scheme as per \
+PKCS#1 v2.1, 'rsa_1_5' - RSA PKCS#1 v1.5, 'ecdsa'"
+	},
+	{
+		{ "save-keys", no_argument, NULL, 'k' },
+		"Save key pairs into files. Filenames must be provided"
+	},
+	{
+		{ "new-keys", no_argument, NULL, 'n' },
+		"Generate new key pairs if no key files are provided"
+	},
+	{
+		{ "print-cert", no_argument, NULL, 'p' },
+		"Print the certificates in the standard output"
+	}
+};
+
 int main(int argc, char *argv[])
 {
-	STACK_OF(X509_EXTENSION) * sk = NULL;
-	X509_EXTENSION *hash_ext = NULL;
-	X509_EXTENSION *nvctr_ext = NULL;
-	X509_EXTENSION *trusted_key_ext = NULL;
-	X509_EXTENSION *non_trusted_key_ext = NULL;
-	FILE *file = NULL;
-	int i, tz_nvctr_nid, ntz_nvctr_nid, hash_nid, pk_nid;
+	STACK_OF(X509_EXTENSION) * sk;
+	X509_EXTENSION *cert_ext = NULL;
+	ext_t *ext;
+	key_t *key;
+	cert_t *cert;
+	FILE *file;
+	int i, j, ext_nid, nvctr;
 	int c, opt_idx = 0;
+	const struct option *cmd_opt;
+	const char *cur_opt;
+	unsigned int err_code;
 	unsigned char md[SHA256_DIGEST_LENGTH];
+	const EVP_MD *md_info;
 
 	NOTICE("CoT Generation Tool: %s\n", build_msg);
 	NOTICE("Target platform: %s\n", platform_msg);
 
+	/* Set default options */
+	key_alg = KEY_ALG_RSA;
+
+	/* Add common command line options */
+	for (i = 0; i < NUM_ELEM(common_cmd_opt); i++) {
+		cmd_opt_add(&common_cmd_opt[i]);
+	}
+
+	/* Initialize the certificates */
+	if (cert_init() != 0) {
+		ERROR("Cannot initialize certificates\n");
+		exit(1);
+	}
+
+	/* Initialize the keys */
+	if (key_init() != 0) {
+		ERROR("Cannot initialize keys\n");
+		exit(1);
+	}
+
+	/* Initialize the new types and register OIDs for the extensions */
+	if (ext_init() != 0) {
+		ERROR("Cannot initialize TBB extensions\n");
+		exit(1);
+	}
+
+	/* Get the command line options populated during the initialization */
+	cmd_opt = cmd_opt_get_array();
+
 	while (1) {
 		/* getopt_long stores the option index here. */
-		c = getopt_long(argc, argv, "hknp", long_opt, &opt_idx);
+		c = getopt_long(argc, argv, "a:hknp", cmd_opt, &opt_idx);
 
 		/* Detect the end of the options. */
 		if (c == -1) {
@@ -291,8 +299,15 @@
 		}
 
 		switch (c) {
+		case 'a':
+			key_alg = get_key_alg(optarg);
+			if (key_alg < 0) {
+				ERROR("Invalid key algorithm '%s'\n", optarg);
+				exit(1);
+			}
+			break;
 		case 'h':
-			print_help(argv[0]);
+			print_help(argv[0], cmd_opt);
 			break;
 		case 'k':
 			save_keys = 1;
@@ -303,378 +318,153 @@
 		case 'p':
 			print_cert = 1;
 			break;
-		case BL2_ID:
-			certs[BL2_CERT].bin = strdup(optarg);
+		case CMD_OPT_EXT:
+			cur_opt = cmd_opt_get_name(opt_idx);
+			ext = ext_get_by_opt(cur_opt);
+			ext->arg = strdup(optarg);
 			break;
-		case BL30_ID:
-			certs[BL30_CERT].bin = strdup(optarg);
+		case CMD_OPT_KEY:
+			cur_opt = cmd_opt_get_name(opt_idx);
+			key = key_get_by_opt(cur_opt);
+			key->fn = strdup(optarg);
 			break;
-		case BL31_ID:
-			certs[BL31_CERT].bin = strdup(optarg);
-			break;
-		case BL32_ID:
-			certs[BL32_CERT].bin = strdup(optarg);
-			break;
-		case BL33_ID:
-			certs[BL33_CERT].bin = strdup(optarg);
-			break;
-		case BL2_CERT_ID:
-			certs[BL2_CERT].fn = strdup(optarg);
-			break;
-		case TRUSTED_KEY_CERT_ID:
-			certs[TRUSTED_KEY_CERT].fn = strdup(optarg);
-			break;
-		case BL30_KEY_CERT_ID:
-			certs[BL30_KEY_CERT].fn = strdup(optarg);
-			break;
-		case BL30_CERT_ID:
-			certs[BL30_CERT].fn = strdup(optarg);
-			break;
-		case BL31_KEY_CERT_ID:
-			certs[BL31_KEY_CERT].fn = strdup(optarg);
-			break;
-		case BL31_CERT_ID:
-			certs[BL31_CERT].fn = strdup(optarg);
-			break;
-		case BL32_KEY_CERT_ID:
-			certs[BL32_KEY_CERT].fn = strdup(optarg);
-			break;
-		case BL32_CERT_ID:
-			certs[BL32_CERT].fn = strdup(optarg);
-			break;
-		case BL33_KEY_CERT_ID:
-			certs[BL33_KEY_CERT].fn = strdup(optarg);
-			break;
-		case BL33_CERT_ID:
-			certs[BL33_CERT].fn = strdup(optarg);
-			break;
-		case ROT_KEY_ID:
-			keys[ROT_KEY].fn = strdup(optarg);
-			break;
-		case TRUSTED_WORLD_KEY_ID:
-			keys[TRUSTED_WORLD_KEY].fn = strdup(optarg);
-			break;
-		case NON_TRUSTED_WORLD_KEY_ID:
-			keys[NON_TRUSTED_WORLD_KEY].fn = strdup(optarg);
-			break;
-		case BL30_KEY_ID:
-			keys[BL30_KEY].fn = strdup(optarg);
-			break;
-		case BL31_KEY_ID:
-			keys[BL31_KEY].fn = strdup(optarg);
-			break;
-		case BL32_KEY_ID:
-			keys[BL32_KEY].fn = strdup(optarg);
-			break;
-		case BL33_KEY_ID:
-			keys[BL33_KEY].fn = strdup(optarg);
+		case CMD_OPT_CERT:
+			cur_opt = cmd_opt_get_name(opt_idx);
+			cert = cert_get_by_opt(cur_opt);
+			cert->fn = strdup(optarg);
 			break;
 		case '?':
 		default:
-			printf("%s\n", optarg);
+			print_help(argv[0], cmd_opt);
 			exit(1);
 		}
 	}
 
-	/* Set the value of the NVCounters */
-	tf_nvcounter = NVCOUNTER_VALUE;
-	non_tf_nvcounter = NVCOUNTER_VALUE;
-
 	/* Check command line arguments */
 	check_cmd_params();
 
-	/* Register the new types and OIDs for the extensions */
-	if (ext_init(tbb_ext) != 0) {
-		ERROR("Cannot initialize TBB extensions\n");
-		exit(1);
-	}
-
-	/* Get non-volatile counters NIDs */
-	CHECK_OID(tz_nvctr_nid, TZ_FW_NVCOUNTER_OID);
-	CHECK_OID(ntz_nvctr_nid, NTZ_FW_NVCOUNTER_OID);
+	/* Indicate SHA256 as image hash algorithm in the certificate
+	 * extension */
+	md_info = EVP_sha256();
 
 	/* Load private keys from files (or generate new ones) */
-	if (new_keys) {
-		for (i = 0 ; i < NUM_KEYS ; i++) {
-			if (!key_new(&keys[i])) {
-				ERROR("Error creating %s\n", keys[i].desc);
+	for (i = 0 ; i < num_keys ; i++) {
+		if (!key_new(&keys[i])) {
+			ERROR("Failed to allocate key container\n");
+			exit(1);
+		}
+
+		/* First try to load the key from disk */
+		if (key_load(&keys[i], &err_code)) {
+			/* Key loaded successfully */
+			continue;
+		}
+
+		/* Key not loaded. Check the error code */
+		if (err_code == KEY_ERR_LOAD) {
+			/* File exists, but it does not contain a valid private
+			 * key. Abort. */
+			ERROR("Error loading '%s'\n", keys[i].fn);
+			exit(1);
+		}
+
+		/* File does not exist, could not be opened or no filename was
+		 * given */
+		if (new_keys) {
+			/* Try to create a new key */
+			NOTICE("Creating new key for '%s'\n", keys[i].desc);
+			if (!key_create(&keys[i], key_alg)) {
+				ERROR("Error creating key '%s'\n", keys[i].desc);
 				exit(1);
 			}
+		} else {
+			if (err_code == KEY_ERR_OPEN) {
+				ERROR("Error opening '%s'\n", keys[i].fn);
+			} else {
+				ERROR("Key '%s' not specified\n", keys[i].desc);
+			}
+			exit(1);
 		}
-	} else {
-		for (i = 0 ; i < NUM_KEYS ; i++) {
-			if (!key_load(&keys[i])) {
-				ERROR("Error loading %s\n", keys[i].desc);
+	}
+
+	/* Create the certificates */
+	for (i = 0 ; i < num_certs ; i++) {
+
+		cert = &certs[i];
+
+		/* Create a new stack of extensions. This stack will be used
+		 * to create the certificate */
+		CHECK_NULL(sk, sk_X509_EXTENSION_new_null());
+
+		for (j = 0 ; j < cert->num_ext ; j++) {
+
+			ext = &extensions[cert->ext[j]];
+
+			/* Get OpenSSL internal ID for this extension */
+			CHECK_OID(ext_nid, ext->oid);
+
+			/*
+			 * Three types of extensions are currently supported:
+			 *     - EXT_TYPE_NVCOUNTER
+			 *     - EXT_TYPE_HASH
+			 *     - EXT_TYPE_PKEY
+			 */
+			switch (ext->type) {
+			case EXT_TYPE_NVCOUNTER:
+				if (ext->arg) {
+					nvctr = atoi(ext->arg);
+					CHECK_NULL(cert_ext, ext_new_nvcounter(ext_nid,
+						EXT_CRIT, nvctr));
+				}
+				break;
+			case EXT_TYPE_HASH:
+				if (ext->arg == NULL) {
+					if (ext->optional) {
+						/* Include a hash filled with zeros */
+						memset(md, 0x0, SHA256_DIGEST_LENGTH);
+					} else {
+						/* Do not include this hash in the certificate */
+						break;
+					}
+				} else {
+					/* Calculate the hash of the file */
+					if (!sha_file(ext->arg, md)) {
+						ERROR("Cannot calculate hash of %s\n",
+							ext->arg);
+						exit(1);
+					}
+				}
+				CHECK_NULL(cert_ext, ext_new_hash(ext_nid,
+						EXT_CRIT, md_info, md,
+						SHA256_DIGEST_LENGTH));
+				break;
+			case EXT_TYPE_PKEY:
+				CHECK_NULL(cert_ext, ext_new_key(ext_nid,
+					EXT_CRIT, keys[ext->attr.key].key));
+				break;
+			default:
+				ERROR("Unknown extension type '%d' in %s\n",
+						ext->type, cert->cn);
 				exit(1);
 			}
+
+			/* Push the extension into the stack */
+			sk_X509_EXTENSION_push(sk, cert_ext);
 		}
-	}
 
-	/* *********************************************************************
-	 * BL2 certificate (Trusted Boot Firmware certificate):
-	 *     - Self-signed with OEM ROT private key
-	 *     - Extensions:
-	 *         - TrustedFirmwareNVCounter (TODO)
-	 *         - BL2 hash
-	 **********************************************************************/
-	CHECK_NULL(sk, sk_X509_EXTENSION_new_null());
-
-	/* Add the NVCounter as a critical extension */
-	CHECK_NULL(nvctr_ext, ext_new_nvcounter(tz_nvctr_nid, EXT_CRIT,
-			tf_nvcounter));
-	sk_X509_EXTENSION_push(sk, nvctr_ext);
-
-	/* Add hash of BL2 as an extension */
-	if (!sha_file(certs[BL2_CERT].bin, md)) {
-		ERROR("Cannot calculate the hash of %s\n", certs[BL2_CERT].bin);
-		exit(1);
-	}
-	CHECK_OID(hash_nid, BL2_HASH_OID);
-	CHECK_NULL(hash_ext, ext_new_hash(hash_nid, EXT_CRIT, md,
-			SHA256_DIGEST_LENGTH));
-	sk_X509_EXTENSION_push(sk, hash_ext);
-
-	/* Create certificate. Signed with ROT key */
-	if (!cert_new(&certs[BL2_CERT], VAL_DAYS, 0, sk)) {
-		ERROR("Cannot create %s\n", certs[BL2_CERT].cn);
-		exit(1);
-	}
-	sk_X509_EXTENSION_free(sk);
-
-	/* *********************************************************************
-	 * Trusted Key certificate:
-	 *     - Self-signed with OEM ROT private key
-	 *     - Extensions:
-	 *         - TrustedFirmwareNVCounter (TODO)
-	 *         - TrustedWorldPK
-	 *         - NonTrustedWorldPK
-	 **********************************************************************/
-	CHECK_NULL(sk, sk_X509_EXTENSION_new_null());
-	CHECK_NULL(nvctr_ext, ext_new_nvcounter(tz_nvctr_nid, EXT_CRIT,
-			tf_nvcounter));
-	sk_X509_EXTENSION_push(sk, nvctr_ext);
-	CHECK_OID(pk_nid, TZ_WORLD_PK_OID);
-	CHECK_NULL(trusted_key_ext, ext_new_key(pk_nid, EXT_CRIT,
-			keys[TRUSTED_WORLD_KEY].key));
-	sk_X509_EXTENSION_push(sk, trusted_key_ext);
-	CHECK_OID(pk_nid, NTZ_WORLD_PK_OID);
-	CHECK_NULL(non_trusted_key_ext, ext_new_key(pk_nid, EXT_CRIT,
-			keys[NON_TRUSTED_WORLD_KEY].key));
-	sk_X509_EXTENSION_push(sk, non_trusted_key_ext);
-	if (!cert_new(&certs[TRUSTED_KEY_CERT], VAL_DAYS, 0, sk)) {
-		ERROR("Cannot create %s\n", certs[TRUSTED_KEY_CERT].cn);
-		exit(1);
-	}
-	sk_X509_EXTENSION_free(sk);
-
-	/* *********************************************************************
-	 * BL30 Key certificate (Trusted SCP Firmware Key certificate):
-	 *     - Self-signed with Trusted World key
-	 *     - Extensions:
-	 *         - TrustedFirmwareNVCounter (TODO)
-	 *         - SCPFirmwareContentCertPK
-	 **********************************************************************/
-	if (bl30_present) {
-		CHECK_NULL(sk, sk_X509_EXTENSION_new_null());
-		CHECK_NULL(nvctr_ext, ext_new_nvcounter(tz_nvctr_nid, EXT_CRIT,
-				tf_nvcounter));
-		sk_X509_EXTENSION_push(sk, nvctr_ext);
-		CHECK_OID(pk_nid, BL30_CONTENT_CERT_PK_OID);
-		CHECK_NULL(trusted_key_ext, ext_new_key(pk_nid, EXT_CRIT,
-				keys[BL30_KEY].key));
-		sk_X509_EXTENSION_push(sk, trusted_key_ext);
-		if (!cert_new(&certs[BL30_KEY_CERT], VAL_DAYS, 0, sk)) {
-			ERROR("Cannot create %s\n", certs[BL30_KEY_CERT].cn);
-			exit(1);
-		}
-		sk_X509_EXTENSION_free(sk);
-	}
-
-	/* *********************************************************************
-	 * BL30 certificate (SCP Firmware Content certificate):
-	 *     - Signed with Trusted World Key
-	 *     - Extensions:
-	 *         - TrustedFirmwareNVCounter (TODO)
-	 *         - SCPFirmwareHash
-	 **********************************************************************/
-	if (bl30_present) {
-		CHECK_NULL(sk, sk_X509_EXTENSION_new_null());
-		CHECK_NULL(nvctr_ext, ext_new_nvcounter(tz_nvctr_nid, EXT_CRIT,
-				tf_nvcounter));
-		sk_X509_EXTENSION_push(sk, nvctr_ext);
-
-		if (!sha_file(certs[BL30_CERT].bin, md)) {
-			ERROR("Cannot calculate the hash of %s\n",
-					certs[BL30_CERT].bin);
-			exit(1);
-		}
-		CHECK_OID(hash_nid, BL30_HASH_OID);
-		CHECK_NULL(hash_ext, ext_new_hash(hash_nid, EXT_CRIT, md,
-				SHA256_DIGEST_LENGTH));
-		sk_X509_EXTENSION_push(sk, hash_ext);
-
-		if (!cert_new(&certs[BL30_CERT], VAL_DAYS, 0, sk)) {
-			ERROR("Cannot create %s\n", certs[BL30_CERT].cn);
+		/* Create certificate. Signed with corresponding key */
+		if (cert->fn && !cert_new(key_alg, cert, VAL_DAYS, 0, sk)) {
+			ERROR("Cannot create %s\n", cert->cn);
 			exit(1);
 		}
 
 		sk_X509_EXTENSION_free(sk);
 	}
 
-	/* *********************************************************************
-	 * BL31 Key certificate (Trusted SoC Firmware Key certificate):
-	 *     - Self-signed with Trusted World key
-	 *     - Extensions:
-	 *         - TrustedFirmwareNVCounter (TODO)
-	 *         - SoCFirmwareContentCertPK
-	 **********************************************************************/
-	CHECK_NULL(sk, sk_X509_EXTENSION_new_null());
-	CHECK_NULL(nvctr_ext, ext_new_nvcounter(tz_nvctr_nid, EXT_CRIT,
-			tf_nvcounter));
-	sk_X509_EXTENSION_push(sk, nvctr_ext);
-	CHECK_OID(pk_nid, BL31_CONTENT_CERT_PK_OID);
-	CHECK_NULL(trusted_key_ext, ext_new_key(pk_nid, EXT_CRIT,
-			keys[BL31_KEY].key));
-	sk_X509_EXTENSION_push(sk, trusted_key_ext);
-	if (!cert_new(&certs[BL31_KEY_CERT], VAL_DAYS, 0, sk)) {
-		ERROR("Cannot create %s\n", certs[BL31_KEY_CERT].cn);
-		exit(1);
-	}
-	sk_X509_EXTENSION_free(sk);
-
-	/* *********************************************************************
-	 * BL31 certificate (SOC Firmware Content certificate):
-	 *     - Signed with Trusted World Key
-	 *     - Extensions:
-	 *         - TrustedFirmwareNVCounter (TODO)
-	 *         - BL31 hash
-	 **********************************************************************/
-	CHECK_NULL(sk, sk_X509_EXTENSION_new_null());
-	CHECK_NULL(nvctr_ext, ext_new_nvcounter(tz_nvctr_nid, EXT_CRIT,
-			tf_nvcounter));
-	sk_X509_EXTENSION_push(sk, nvctr_ext);
-
-	if (!sha_file(certs[BL31_CERT].bin, md)) {
-		ERROR("Cannot calculate the hash of %s\n", certs[BL31_CERT].bin);
-		exit(1);
-	}
-	CHECK_OID(hash_nid, BL31_HASH_OID);
-	CHECK_NULL(hash_ext, ext_new_hash(hash_nid, EXT_CRIT, md,
-			SHA256_DIGEST_LENGTH));
-	sk_X509_EXTENSION_push(sk, hash_ext);
-
-	if (!cert_new(&certs[BL31_CERT], VAL_DAYS, 0, sk)) {
-		ERROR("Cannot create %s\n", certs[BL31_CERT].cn);
-		exit(1);
-	}
-
-	sk_X509_EXTENSION_free(sk);
-
-	/* *********************************************************************
-	 * BL32 Key certificate (Trusted OS Firmware Key certificate):
-	 *     - Self-signed with Trusted World key
-	 *     - Extensions:
-	 *         - TrustedFirmwareNVCounter (TODO)
-	 *         - TrustedOSFirmwareContentCertPK
-	 **********************************************************************/
-	if (bl32_present) {
-		CHECK_NULL(sk, sk_X509_EXTENSION_new_null());
-		CHECK_NULL(nvctr_ext, ext_new_nvcounter(tz_nvctr_nid, EXT_CRIT,
-				tf_nvcounter));
-		sk_X509_EXTENSION_push(sk, nvctr_ext);
-		CHECK_OID(pk_nid, BL32_CONTENT_CERT_PK_OID);
-		CHECK_NULL(trusted_key_ext, ext_new_key(pk_nid, EXT_CRIT,
-				keys[BL32_KEY].key));
-		sk_X509_EXTENSION_push(sk, trusted_key_ext);
-		if (!cert_new(&certs[BL32_KEY_CERT], VAL_DAYS, 0, sk)) {
-			ERROR("Cannot create %s\n", certs[BL32_KEY_CERT].cn);
-			exit(1);
-		}
-		sk_X509_EXTENSION_free(sk);
-	}
-
-	/* *********************************************************************
-	 * BL32 certificate (TrustedOS Firmware Content certificate):
-	 *     - Signed with Trusted World Key
-	 *     - Extensions:
-	 *         - TrustedFirmwareNVCounter (TODO)
-	 *         - BL32 hash
-	 **********************************************************************/
-	if (bl32_present) {
-		CHECK_NULL(sk, sk_X509_EXTENSION_new_null());
-		CHECK_NULL(nvctr_ext, ext_new_nvcounter(tz_nvctr_nid, EXT_CRIT,
-				tf_nvcounter));
-		sk_X509_EXTENSION_push(sk, nvctr_ext);
-
-		if (!sha_file(certs[BL32_CERT].bin, md)) {
-			ERROR("Cannot calculate the hash of %s\n",
-					certs[BL32_CERT].bin);
-			exit(1);
-		}
-		CHECK_OID(hash_nid, BL32_HASH_OID);
-		CHECK_NULL(hash_ext, ext_new_hash(hash_nid, EXT_CRIT, md,
-				SHA256_DIGEST_LENGTH));
-		sk_X509_EXTENSION_push(sk, hash_ext);
-
-		if (!cert_new(&certs[BL32_CERT], VAL_DAYS, 0, sk)) {
-			ERROR("Cannot create %s\n", certs[BL32_CERT].cn);
-			exit(1);
-		}
-
-		sk_X509_EXTENSION_free(sk);
-	}
-
-	/* *********************************************************************
-	 * BL33 Key certificate (Non Trusted Firmware Key certificate):
-	 *     - Self-signed with Non Trusted World key
-	 *     - Extensions:
-	 *         - NonTrustedFirmwareNVCounter (TODO)
-	 *         - NonTrustedFirmwareContentCertPK
-	 **********************************************************************/
-	CHECK_NULL(sk, sk_X509_EXTENSION_new_null());
-	CHECK_NULL(nvctr_ext, ext_new_nvcounter(ntz_nvctr_nid, EXT_CRIT,
-			non_tf_nvcounter));
-	sk_X509_EXTENSION_push(sk, nvctr_ext);
-	CHECK_OID(pk_nid, BL33_CONTENT_CERT_PK_OID);
-	CHECK_NULL(non_trusted_key_ext, ext_new_key(pk_nid, EXT_CRIT,
-			keys[BL33_KEY].key));
-	sk_X509_EXTENSION_push(sk, non_trusted_key_ext);
-	if (!cert_new(&certs[BL33_KEY_CERT], VAL_DAYS, 0, sk)) {
-		ERROR("Cannot create %s\n", certs[BL33_KEY_CERT].cn);
-		exit(1);
-	}
-	sk_X509_EXTENSION_free(sk);
-
-	/* *********************************************************************
-	 * BL33 certificate (Non-Trusted World Content certificate):
-	 *     - Signed with Non-Trusted World Key
-	 *     - Extensions:
-	 *         - NonTrustedFirmwareNVCounter (TODO)
-	 *         - BL33 hash
-	 **********************************************************************/
-	CHECK_NULL(sk, sk_X509_EXTENSION_new_null());
-	CHECK_NULL(nvctr_ext, ext_new_nvcounter(ntz_nvctr_nid, EXT_CRIT,
-			non_tf_nvcounter));
-	sk_X509_EXTENSION_push(sk, nvctr_ext);
-
-	if (!sha_file(certs[BL33_CERT].bin, md)) {
-		ERROR("Cannot calculate the hash of %s\n", certs[BL33_CERT].bin);
-		exit(1);
-	}
-	CHECK_OID(hash_nid, BL33_HASH_OID);
-	CHECK_NULL(hash_ext, ext_new_hash(hash_nid, EXT_CRIT, md,
-			SHA256_DIGEST_LENGTH));
-	sk_X509_EXTENSION_push(sk, hash_ext);
-
-	if (!cert_new(&certs[BL33_CERT], VAL_DAYS, 0, sk)) {
-		ERROR("Cannot create %s\n", certs[BL33_CERT].cn);
-		exit(1);
-	}
-	sk_X509_EXTENSION_free(sk);
 
 	/* Print the certificates */
 	if (print_cert) {
-		for (i = 0 ; i < NUM_CERTIFICATES ; i++) {
+		for (i = 0 ; i < num_certs ; i++) {
 			if (!certs[i].x) {
 				continue;
 			}
@@ -684,7 +474,7 @@
 	}
 
 	/* Save created certificates to files */
-	for (i = 0 ; i < NUM_CERTIFICATES ; i++) {
+	for (i = 0 ; i < num_certs ; i++) {
 		if (certs[i].x && certs[i].fn) {
 			file = fopen(certs[i].fn, "w");
 			if (file != NULL) {
@@ -698,18 +488,13 @@
 
 	/* Save keys */
 	if (save_keys) {
-		for (i = 0 ; i < NUM_KEYS ; i++) {
+		for (i = 0 ; i < num_keys ; i++) {
 			if (!key_store(&keys[i])) {
 				ERROR("Cannot save %s\n", keys[i].desc);
 			}
 		}
 	}
 
-	X509_EXTENSION_free(hash_ext);
-	X509_EXTENSION_free(nvctr_ext);
-	X509_EXTENSION_free(trusted_key_ext);
-	X509_EXTENSION_free(non_trusted_key_ext);
-
 #ifndef OPENSSL_NO_ENGINE
 	ENGINE_cleanup();
 #endif
diff --git a/tools/cert_create/src/sha.c b/tools/cert_create/src/sha.c
index 57026b5..2971593 100644
--- a/tools/cert_create/src/sha.c
+++ b/tools/cert_create/src/sha.c
@@ -1,35 +1,11 @@
 /*
  * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
  */
 
-#include <stdio.h>
 #include <openssl/sha.h>
+#include <stdio.h>
 
 #include "debug.h"
 
diff --git a/tools/cert_create/src/tbb_cert.c b/tools/cert_create/src/tbb_cert.c
deleted file mode 100644
index 8dfda60..0000000
--- a/tools/cert_create/src/tbb_cert.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "tbb_cert.h"
-#include "tbb_key.h"
-
-/*
- * Certificates used in the chain of trust
- *
- * The order of the certificates must follow the enumeration specified in
- * tbb_cert.h. All certificates are self-signed.
- */
-cert_t certs[NUM_CERTIFICATES] = {
-	{
-		.id = BL2_CERT,
-		.fn = NULL,
-		.cn = "BL2 Certificate",
-		.key = &keys[ROT_KEY],
-		.issuer = &certs[BL2_CERT],
-	},
-	{
-		.id = TRUSTED_KEY_CERT,
-		.fn = NULL,
-		.cn = "Trusted Key Certificate",
-		.key = &keys[ROT_KEY],
-		.issuer = &certs[TRUSTED_KEY_CERT],
-	},
-	{
-		.id = BL30_KEY_CERT,
-		.fn = NULL,
-		.cn = "BL3-0 Key Certificate",
-		.key = &keys[TRUSTED_WORLD_KEY],
-		.issuer = &certs[BL30_KEY_CERT],
-	},
-	{
-		.id = BL30_CERT,
-		.fn = NULL,
-		.cn = "BL3-0 Content Certificate",
-		.key = &keys[BL30_KEY],
-		.issuer = &certs[BL30_CERT],
-	},
-	{
-		.id = BL31_KEY_CERT,
-		.fn = NULL,
-		.cn = "BL3-1 Key Certificate",
-		.key = &keys[TRUSTED_WORLD_KEY],
-		.issuer = &certs[BL31_KEY_CERT],
-	},
-	{
-		.id = BL31_CERT,
-		.fn = NULL,
-		.cn = "BL3-1 Content Certificate",
-		.key = &keys[BL31_KEY],
-		.issuer = &certs[BL31_CERT],
-	},
-	{
-		.id = BL32_KEY_CERT,
-		.fn = NULL,
-		.cn = "BL3-2 Key Certificate",
-		.key = &keys[TRUSTED_WORLD_KEY],
-		.issuer = &certs[BL32_KEY_CERT],
-	},
-	{
-		.id = BL32_CERT,
-		.fn = NULL,
-		.cn = "BL3-2 Content Certificate",
-		.key = &keys[BL32_KEY],
-		.issuer = &certs[BL32_CERT],
-	},
-	{
-		.id = BL33_KEY_CERT,
-		.fn = NULL,
-		.cn = "BL3-3 Key Certificate",
-		.key = &keys[NON_TRUSTED_WORLD_KEY],
-		.issuer = &certs[BL33_KEY_CERT],
-	},
-	{
-		.id = BL33_CERT,
-		.fn = NULL,
-		.cn = "BL3-3 Content Certificate",
-		.key = &keys[BL33_KEY],
-		.issuer = &certs[BL33_CERT],
-	}
-};
diff --git a/tools/cert_create/src/tbb_ext.c b/tools/cert_create/src/tbb_ext.c
deleted file mode 100644
index 0022611..0000000
--- a/tools/cert_create/src/tbb_ext.c
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <openssl/err.h>
-#include <openssl/x509v3.h>
-#include "ext.h"
-#include "platform_oid.h"
-
-ext_t tbb_ext[] = {
-	{
-		.oid = TZ_FW_NVCOUNTER_OID,
-		.sn = "TrustedNvCounter",
-		.ln = "Non-volatile trusted counter",
-		.type = V_ASN1_INTEGER
-	},
-	{
-		.oid = NTZ_FW_NVCOUNTER_OID,
-		.sn = "NonTrustedNvCounter",
-		.ln = "Non-volatile non-trusted counter",
-		.type = V_ASN1_INTEGER
-	},
-	{
-		.oid = BL2_HASH_OID,
-		.sn = "TrustedBootFirmwareHash",
-		.ln = "Trusted Boot Firmware (BL2) hash (SHA256)",
-		.type = V_ASN1_OCTET_STRING
-	},
-	{
-		.oid = TZ_WORLD_PK_OID,
-		.sn = "TrustedWorldPublicKey",
-		.ln = "Trusted World Public Key",
-		.type = V_ASN1_OCTET_STRING
-	},
-	{
-		.oid = NTZ_WORLD_PK_OID,
-		.sn = "NonTrustedWorldPublicKey",
-		.ln = "Non-Trusted World Public Key",
-		.type = V_ASN1_OCTET_STRING
-	},
-	{
-		.oid = BL31_CONTENT_CERT_PK_OID,
-		.sn = "SoCFirmwareContentCertPK",
-		.ln = "SoC Firmware content certificate public key",
-		.type = V_ASN1_OCTET_STRING
-	},
-	{
-		.oid = BL31_HASH_OID,
-		.sn = "APROMPatchHash",
-		.ln = "AP ROM patch hash",
-		.type = V_ASN1_OCTET_STRING
-	},
-	{
-		.oid = BL30_CONTENT_CERT_PK_OID,
-		.sn = "SCPFirmwareContentCertPK",
-		.ln = "SCP Firmware content certificate public key",
-		.type = V_ASN1_OCTET_STRING
-	},
-	{
-		.oid = BL30_HASH_OID,
-		.sn = "SCPFirmwareHash",
-		.ln = "SCP Firmware (BL30) hash (SHA256)",
-		.type = V_ASN1_OCTET_STRING
-	},
-	{
-		.oid = BL32_CONTENT_CERT_PK_OID,
-		.sn = "TrustedOSFirmwareContentCertPK",
-		.ln = "Trusted OS Firmware content certificate public key",
-		.type = V_ASN1_OCTET_STRING
-	},
-	{
-		.oid = BL32_HASH_OID,
-		.sn = "TrustedOSHash",
-		.ln = "Trusted OS (BL32) hash (SHA256)",
-		.type = V_ASN1_OCTET_STRING
-	},
-	{
-		.oid = BL33_CONTENT_CERT_PK_OID,
-		.sn = "NonTrustedFirmwareContentCertPK",
-		.ln = "Non-Trusted Firmware content certificate public key",
-		.type = V_ASN1_OCTET_STRING
-	},
-	{
-		.oid = BL33_HASH_OID,
-		.sn = "NonTrustedWorldBootloaderHash",
-		.ln = "Non-Trusted World (BL33) hash (SHA256)",
-		.type = V_ASN1_OCTET_STRING
-	},
-	{ 0, 0, 0, 0 }
-};
diff --git a/tools/cert_create/src/tbb_key.c b/tools/cert_create/src/tbb_key.c
deleted file mode 100644
index 140aeda..0000000
--- a/tools/cert_create/src/tbb_key.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "tbb_key.h"
-
-/*
- * Keys used to establish the chain of trust
- *
- * The order of the keys must follow the enumeration specified in tbb_key.h
- */
-key_t keys[NUM_KEYS] = {
-	{
-		.id = ROT_KEY,
-		.desc = "Root Of Trust key"
-	},
-	{
-		.id = TRUSTED_WORLD_KEY,
-		.desc = "Trusted World key"
-	},
-	{
-		.id = NON_TRUSTED_WORLD_KEY,
-		.desc = "Non Trusted World key"
-	},
-	{
-		.id = BL30_KEY,
-		.desc = "BL30 key"
-	},
-	{
-		.id = BL31_KEY,
-		.desc = "BL31 key"
-	},
-	{
-		.id = BL32_KEY,
-		.desc = "BL32 key"
-	},
-	{
-		.id = BL33_KEY,
-		.desc = "BL33 key"
-	}
-};
diff --git a/tools/cert_create/src/tbbr/tbb_cert.c b/tools/cert_create/src/tbbr/tbb_cert.c
new file mode 100644
index 0000000..c815178
--- /dev/null
+++ b/tools/cert_create/src/tbbr/tbb_cert.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "tbbr/tbb_cert.h"
+#include "tbbr/tbb_ext.h"
+#include "tbbr/tbb_key.h"
+
+/*
+ * Certificates used in the chain of trust
+ *
+ * The order of the certificates must follow the enumeration specified in
+ * tbb_cert.h. All certificates are self-signed, so the issuer certificate
+ * field points to itself.
+ */
+static cert_t tbb_certs[] = {
+	[TRUSTED_BOOT_FW_CERT] = {
+		.id = TRUSTED_BOOT_FW_CERT,
+		.opt = "tb-fw-cert",
+		.help_msg = "Trusted Boot FW Certificate (output file)",
+		.fn = NULL,
+		.cn = "Trusted Boot FW Certificate",
+		.key = ROT_KEY,
+		.issuer = TRUSTED_BOOT_FW_CERT,
+		.ext = {
+			TRUSTED_FW_NVCOUNTER_EXT,
+			TRUSTED_BOOT_FW_HASH_EXT
+		},
+		.num_ext = 2
+	},
+	[TRUSTED_KEY_CERT] = {
+		.id = TRUSTED_KEY_CERT,
+		.opt = "trusted-key-cert",
+		.help_msg = "Trusted Key Certificate (output file)",
+		.fn = NULL,
+		.cn = "Trusted Key Certificate",
+		.key = ROT_KEY,
+		.issuer = TRUSTED_KEY_CERT,
+		.ext = {
+			TRUSTED_FW_NVCOUNTER_EXT,
+			TRUSTED_WORLD_PK_EXT,
+			NON_TRUSTED_WORLD_PK_EXT
+		},
+		.num_ext = 3
+	},
+	[SCP_FW_KEY_CERT] = {
+		.id = SCP_FW_KEY_CERT,
+		.opt = "scp-fw-key-cert",
+		.help_msg = "SCP Firmware Key Certificate (output file)",
+		.fn = NULL,
+		.cn = "SCP Firmware Key Certificate",
+		.key = TRUSTED_WORLD_KEY,
+		.issuer = SCP_FW_KEY_CERT,
+		.ext = {
+			TRUSTED_FW_NVCOUNTER_EXT,
+			SCP_FW_CONTENT_CERT_PK_EXT
+		},
+		.num_ext = 2
+	},
+	[SCP_FW_CONTENT_CERT] = {
+		.id = SCP_FW_CONTENT_CERT,
+		.opt = "scp-fw-cert",
+		.help_msg = "SCP Firmware Content Certificate (output file)",
+		.fn = NULL,
+		.cn = "SCP Firmware Content Certificate",
+		.key = SCP_FW_CONTENT_CERT_KEY,
+		.issuer = SCP_FW_CONTENT_CERT,
+		.ext = {
+			TRUSTED_FW_NVCOUNTER_EXT,
+			SCP_FW_HASH_EXT
+		},
+		.num_ext = 2
+	},
+	[SOC_FW_KEY_CERT] = {
+		.id = SOC_FW_KEY_CERT,
+		.opt = "soc-fw-key-cert",
+		.help_msg = "SoC Firmware Key Certificate (output file)",
+		.fn = NULL,
+		.cn = "SoC Firmware Key Certificate",
+		.key = TRUSTED_WORLD_KEY,
+		.issuer = SOC_FW_KEY_CERT,
+		.ext = {
+			TRUSTED_FW_NVCOUNTER_EXT,
+			SOC_FW_CONTENT_CERT_PK_EXT
+		},
+		.num_ext = 2
+	},
+	[SOC_FW_CONTENT_CERT] = {
+		.id = SOC_FW_CONTENT_CERT,
+		.opt = "soc-fw-cert",
+		.help_msg = "SoC Firmware Content Certificate (output file)",
+		.fn = NULL,
+		.cn = "SoC Firmware Content Certificate",
+		.key = SOC_FW_CONTENT_CERT_KEY,
+		.issuer = SOC_FW_CONTENT_CERT,
+		.ext = {
+			TRUSTED_FW_NVCOUNTER_EXT,
+			SOC_AP_FW_HASH_EXT
+		},
+		.num_ext = 2
+	},
+	[TRUSTED_OS_FW_KEY_CERT] = {
+		.id = TRUSTED_OS_FW_KEY_CERT,
+		.opt = "tos-fw-key-cert",
+		.help_msg = "Trusted OS Firmware Key Certificate (output file)",
+		.fn = NULL,
+		.cn = "Trusted OS Firmware Key Certificate",
+		.key = TRUSTED_WORLD_KEY,
+		.issuer = TRUSTED_OS_FW_KEY_CERT,
+		.ext = {
+			TRUSTED_FW_NVCOUNTER_EXT,
+			TRUSTED_OS_FW_CONTENT_CERT_PK_EXT
+		},
+		.num_ext = 2
+	},
+	[TRUSTED_OS_FW_CONTENT_CERT] = {
+		.id = TRUSTED_OS_FW_CONTENT_CERT,
+		.opt = "tos-fw-cert",
+		.help_msg = "Trusted OS Firmware Content Certificate (output file)",
+		.fn = NULL,
+		.cn = "Trusted OS Firmware Content Certificate",
+		.key = TRUSTED_OS_FW_CONTENT_CERT_KEY,
+		.issuer = TRUSTED_OS_FW_CONTENT_CERT,
+		.ext = {
+			TRUSTED_FW_NVCOUNTER_EXT,
+			TRUSTED_OS_FW_HASH_EXT,
+			TRUSTED_OS_FW_EXTRA1_HASH_EXT,
+			TRUSTED_OS_FW_EXTRA2_HASH_EXT
+		},
+		.num_ext = 4
+	},
+	[NON_TRUSTED_FW_KEY_CERT] = {
+		.id = NON_TRUSTED_FW_KEY_CERT,
+		.opt = "nt-fw-key-cert",
+		.help_msg = "Non-Trusted Firmware Key Certificate (output file)",
+		.fn = NULL,
+		.cn = "Non-Trusted Firmware Key Certificate",
+		.key = NON_TRUSTED_WORLD_KEY,
+		.issuer = NON_TRUSTED_FW_KEY_CERT,
+		.ext = {
+			NON_TRUSTED_FW_NVCOUNTER_EXT,
+			NON_TRUSTED_FW_CONTENT_CERT_PK_EXT
+		},
+		.num_ext = 2
+	},
+	[NON_TRUSTED_FW_CONTENT_CERT] = {
+		.id = NON_TRUSTED_FW_CONTENT_CERT,
+		.opt = "nt-fw-cert",
+		.help_msg = "Non-Trusted Firmware Content Certificate (output file)",
+		.fn = NULL,
+		.cn = "Non-Trusted Firmware Content Certificate",
+		.key = NON_TRUSTED_FW_CONTENT_CERT_KEY,
+		.issuer = NON_TRUSTED_FW_CONTENT_CERT,
+		.ext = {
+			NON_TRUSTED_FW_NVCOUNTER_EXT,
+			NON_TRUSTED_WORLD_BOOTLOADER_HASH_EXT
+		},
+		.num_ext = 2
+	},
+	[FWU_CERT] = {
+		.id = FWU_CERT,
+		.opt = "fwu-cert",
+		.help_msg = "Firmware Update Certificate (output file)",
+		.fn = NULL,
+		.cn = "Firmware Update Certificate",
+		.key = ROT_KEY,
+		.issuer = FWU_CERT,
+		.ext = {
+			SCP_FWU_CFG_HASH_EXT,
+			AP_FWU_CFG_HASH_EXT,
+			FWU_HASH_EXT
+		},
+		.num_ext = 3
+	}
+};
+
+REGISTER_COT(tbb_certs);
diff --git a/tools/cert_create/src/tbbr/tbb_ext.c b/tools/cert_create/src/tbbr/tbb_ext.c
new file mode 100644
index 0000000..504b0fc
--- /dev/null
+++ b/tools/cert_create/src/tbbr/tbb_ext.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <openssl/err.h>
+#include <openssl/x509v3.h>
+
+#if USE_TBBR_DEFS
+#include <tbbr_oid.h>
+#else
+#include <platform_oid.h>
+#endif
+
+#include "ext.h"
+#include "tbbr/tbb_ext.h"
+#include "tbbr/tbb_key.h"
+
+/* TODO: get these values from the command line */
+#define TRUSTED_WORLD_NVCTR_VALUE	0
+#define NORMAL_WORLD_NVCTR_VALUE	0
+
+static ext_t tbb_ext[] = {
+	[TRUSTED_FW_NVCOUNTER_EXT] = {
+		.oid = TRUSTED_FW_NVCOUNTER_OID,
+		.opt = "tfw-nvctr",
+		.help_msg = "Trusted Firmware Non-Volatile counter value",
+		.sn = "TrustedWorldNVCounter",
+		.ln = "Trusted World Non-Volatile counter",
+		.asn1_type = V_ASN1_INTEGER,
+		.type = EXT_TYPE_NVCOUNTER,
+		.attr.nvctr_type = NVCTR_TYPE_TFW
+	},
+	[NON_TRUSTED_FW_NVCOUNTER_EXT] = {
+		.oid = NON_TRUSTED_FW_NVCOUNTER_OID,
+		.opt = "ntfw-nvctr",
+		.help_msg = "Non-Trusted Firmware Non-Volatile counter value",
+		.sn = "NormalWorldNVCounter",
+		.ln = "Non-Trusted Firmware Non-Volatile counter",
+		.asn1_type = V_ASN1_INTEGER,
+		.type = EXT_TYPE_NVCOUNTER,
+		.attr.nvctr_type = NVCTR_TYPE_NTFW
+	},
+	[TRUSTED_BOOT_FW_HASH_EXT] = {
+		.oid = TRUSTED_BOOT_FW_HASH_OID,
+		.opt = "tb-fw",
+		.help_msg = "Trusted Boot Firmware image file",
+		.sn = "TrustedBootFirmwareHash",
+		.ln = "Trusted Boot Firmware hash (SHA256)",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_HASH
+	},
+	[TRUSTED_WORLD_PK_EXT] = {
+		.oid = TRUSTED_WORLD_PK_OID,
+		.sn = "TrustedWorldPublicKey",
+		.ln = "Trusted World Public Key",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_PKEY,
+		.attr.key = TRUSTED_WORLD_KEY
+	},
+	[NON_TRUSTED_WORLD_PK_EXT] = {
+		.oid = NON_TRUSTED_WORLD_PK_OID,
+		.sn = "NonTrustedWorldPublicKey",
+		.ln = "Non-Trusted World Public Key",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_PKEY,
+		.attr.key = NON_TRUSTED_WORLD_KEY
+	},
+	[SCP_FW_CONTENT_CERT_PK_EXT] = {
+		.oid = SCP_FW_CONTENT_CERT_PK_OID,
+		.sn = "SCPFirmwareContentCertPK",
+		.ln = "SCP Firmware content certificate public key",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_PKEY,
+		.attr.key = SCP_FW_CONTENT_CERT_KEY
+	},
+	[SCP_FW_HASH_EXT] = {
+		.oid = SCP_FW_HASH_OID,
+		.opt = "scp-fw",
+		.help_msg = "SCP Firmware image file",
+		.sn = "SCPFirmwareHash",
+		.ln = "SCP Firmware hash (SHA256)",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_HASH
+	},
+	[SOC_FW_CONTENT_CERT_PK_EXT] = {
+		.oid = SOC_FW_CONTENT_CERT_PK_OID,
+		.sn = "SoCFirmwareContentCertPK",
+		.ln = "SoC Firmware content certificate public key",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_PKEY,
+		.attr.key = SOC_FW_CONTENT_CERT_KEY
+	},
+	[SOC_AP_FW_HASH_EXT] = {
+		.oid = SOC_AP_FW_HASH_OID,
+		.opt = "soc-fw",
+		.help_msg = "SoC AP Firmware image file",
+		.sn = "SoCAPFirmwareHash",
+		.ln = "SoC AP Firmware hash (SHA256)",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_HASH
+	},
+	[TRUSTED_OS_FW_CONTENT_CERT_PK_EXT] = {
+		.oid = TRUSTED_OS_FW_CONTENT_CERT_PK_OID,
+		.sn = "TrustedOSFirmwareContentCertPK",
+		.ln = "Trusted OS Firmware content certificate public key",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_PKEY,
+		.attr.key = TRUSTED_OS_FW_CONTENT_CERT_KEY
+	},
+	[TRUSTED_OS_FW_HASH_EXT] = {
+		.oid = TRUSTED_OS_FW_HASH_OID,
+		.opt = "tos-fw",
+		.help_msg = "Trusted OS image file",
+		.sn = "TrustedOSHash",
+		.ln = "Trusted OS hash (SHA256)",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_HASH
+	},
+	[TRUSTED_OS_FW_EXTRA1_HASH_EXT] = {
+		.oid = TRUSTED_OS_FW_EXTRA1_HASH_OID,
+		.opt = "tos-fw-extra1",
+		.help_msg = "Trusted OS Extra1 image file",
+		.sn = "TrustedOSExtra1Hash",
+		.ln = "Trusted OS Extra1 hash (SHA256)",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_HASH,
+		.optional = 1
+	},
+	[TRUSTED_OS_FW_EXTRA2_HASH_EXT] = {
+		.oid = TRUSTED_OS_FW_EXTRA2_HASH_OID,
+		.opt = "tos-fw-extra2",
+		.help_msg = "Trusted OS Extra2 image file",
+		.sn = "TrustedOSExtra2Hash",
+		.ln = "Trusted OS Extra2 hash (SHA256)",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_HASH,
+		.optional = 1
+	},
+	[NON_TRUSTED_FW_CONTENT_CERT_PK_EXT] = {
+		.oid = NON_TRUSTED_FW_CONTENT_CERT_PK_OID,
+		.sn = "NonTrustedFirmwareContentCertPK",
+		.ln = "Non-Trusted Firmware content certificate public key",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_PKEY,
+		.attr.key = NON_TRUSTED_FW_CONTENT_CERT_KEY
+	},
+	[NON_TRUSTED_WORLD_BOOTLOADER_HASH_EXT] = {
+		.oid = NON_TRUSTED_WORLD_BOOTLOADER_HASH_OID,
+		.opt = "nt-fw",
+		.help_msg = "Non-Trusted World Bootloader image file",
+		.sn = "NonTrustedWorldBootloaderHash",
+		.ln = "Non-Trusted World hash (SHA256)",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_HASH
+	},
+	[SCP_FWU_CFG_HASH_EXT] = {
+		.oid = SCP_FWU_CFG_HASH_OID,
+		.opt = "scp-fwu-cfg",
+		.help_msg = "SCP Firmware Update Config image file",
+		.sn = "SCPFWUpdateConfig",
+		.ln = "SCP Firmware Update Config hash (SHA256)",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_HASH,
+		.optional = 1
+	},
+	[AP_FWU_CFG_HASH_EXT] = {
+		.oid = AP_FWU_CFG_HASH_OID,
+		.opt = "ap-fwu-cfg",
+		.help_msg = "AP Firmware Update Config image file",
+		.sn = "APFWUpdateConfig",
+		.ln = "AP Firmware Update Config hash (SHA256)",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_HASH,
+		.optional = 1
+	},
+	[FWU_HASH_EXT] = {
+		.oid = FWU_HASH_OID,
+		.opt = "fwu",
+		.help_msg = "Firmware Updater image file",
+		.sn = "FWUpdaterHash",
+		.ln = "Firmware Updater hash (SHA256)",
+		.asn1_type = V_ASN1_OCTET_STRING,
+		.type = EXT_TYPE_HASH,
+		.optional = 1
+	}
+};
+
+REGISTER_EXTENSIONS(tbb_ext);
diff --git a/tools/cert_create/src/tbbr/tbb_key.c b/tools/cert_create/src/tbbr/tbb_key.c
new file mode 100644
index 0000000..a81f0e4
--- /dev/null
+++ b/tools/cert_create/src/tbbr/tbb_key.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "tbbr/tbb_key.h"
+
+/*
+ * Keys used to establish the chain of trust
+ *
+ * The order of the keys must follow the enumeration specified in tbb_key.h
+ */
+static key_t tbb_keys[] = {
+	[ROT_KEY] = {
+		.id = ROT_KEY,
+		.opt = "rot-key",
+		.help_msg = "Root Of Trust key (input/output file)",
+		.desc = "Root Of Trust key"
+	},
+	[TRUSTED_WORLD_KEY] = {
+		.id = TRUSTED_WORLD_KEY,
+		.opt = "trusted-world-key",
+		.help_msg = "Trusted World key (input/output file)",
+		.desc = "Trusted World key"
+	},
+	[NON_TRUSTED_WORLD_KEY] = {
+		.id = NON_TRUSTED_WORLD_KEY,
+		.opt = "non-trusted-world-key",
+		.help_msg = "Non Trusted World key (input/output file)",
+		.desc = "Non Trusted World key"
+	},
+	[SCP_FW_CONTENT_CERT_KEY] = {
+		.id = SCP_FW_CONTENT_CERT_KEY,
+		.opt = "scp-fw-key",
+		.help_msg = "SCP Firmware Content Certificate key (input/output file)",
+		.desc = "SCP Firmware Content Certificate key"
+	},
+	[SOC_FW_CONTENT_CERT_KEY] = {
+		.id = SOC_FW_CONTENT_CERT_KEY,
+		.opt = "soc-fw-key",
+		.help_msg = "SoC Firmware Content Certificate key (input/output file)",
+		.desc = "SoC Firmware Content Certificate key"
+	},
+	[TRUSTED_OS_FW_CONTENT_CERT_KEY] = {
+		.id = TRUSTED_OS_FW_CONTENT_CERT_KEY,
+		.opt = "tos-fw-key",
+		.help_msg = "Trusted OS Firmware Content Certificate key (input/output file)",
+		.desc = "Trusted OS Firmware Content Certificate key"
+	},
+	[NON_TRUSTED_FW_CONTENT_CERT_KEY] = {
+		.id = NON_TRUSTED_FW_CONTENT_CERT_KEY,
+		.opt = "nt-fw-key",
+		.help_msg = "Non Trusted Firmware Content Certificate key (input/output file)",
+		.desc = "Non Trusted Firmware Content Certificate key"
+	}
+};
+
+REGISTER_KEYS(tbb_keys);
diff --git a/tools/fip_create/Makefile b/tools/fip_create/Makefile
deleted file mode 100644
index c72bae5..0000000
--- a/tools/fip_create/Makefile
+++ /dev/null
@@ -1,65 +0,0 @@
-#
-# Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# Neither the name of ARM nor the names of its contributors may be used
-# to endorse or promote products derived from this software without specific
-# prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-
-PROJECT = fip_create
-OBJECTS = fip_create.o
-
-CFLAGS = -Wall -Werror -pedantic -std=c99
-ifeq (${DEBUG},1)
-  CFLAGS += -g -O0 -DDEBUG
-else
-  CFLAGS += -O2
-endif
-
-# Make soft links and include from local directory otherwise wrong headers
-# could get pulled in from firmware tree.
-INCLUDE_PATHS = -I.
-
-CC := gcc
-RM := rm -rf
-
-.PHONY: all clean
-
-all: ${PROJECT}
-
-${PROJECT}: ${OBJECTS} Makefile
-	@echo "  LD      $@"
-	${Q}${CC} ${OBJECTS} -o $@
-	@echo
-	@echo "Built $@ successfully"
-	@echo
-
-%.o: %.c %.h Makefile
-	@echo "  CC      $<"
-	${Q}${CC} -c ${CFLAGS} ${INCLUDE_PATHS} $< -o $@
-
-clean:
-	${Q}${RM} ${PROJECT}
-	${Q}${RM} ${OBJECTS}
diff --git a/tools/fip_create/fip_create.c b/tools/fip_create/fip_create.c
deleted file mode 100644
index c6869f9..0000000
--- a/tools/fip_create/fip_create.c
+++ /dev/null
@@ -1,695 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <errno.h>
-#include <getopt.h> /* getopt_long() is a GNU extention */
-#include <stdbool.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/stat.h>
-#include "fip_create.h"
-#include "firmware_image_package.h"
-
-/* Values returned by getopt() as part of the command line parsing */
-#define OPT_TOC_ENTRY 0
-#define OPT_DUMP 1
-#define OPT_HELP 2
-
-file_info_t files[MAX_FILES];
-unsigned file_info_count = 0;
-uuid_t uuid_null = {0};
-
-/*
- * TODO: Add ability to specify and flag different file types.
- * Add flags to the toc_entry?
- * const char* format_type_str[] = { "RAW", "ELF", "PIC" };
- */
-
-/* The images used depends on the platform. */
-static entry_lookup_list_t toc_entry_lookup_list[] = {
-	{ "Trusted Boot Firmware BL2", UUID_TRUSTED_BOOT_FIRMWARE_BL2,
-	  "bl2", NULL, FLAG_FILENAME },
-	{ "SCP Firmware BL3-0", UUID_SCP_FIRMWARE_BL30,
-	  "bl30", NULL, FLAG_FILENAME},
-	{ "EL3 Runtime Firmware BL3-1", UUID_EL3_RUNTIME_FIRMWARE_BL31,
-	  "bl31", NULL, FLAG_FILENAME},
-	{ "Secure Payload BL3-2 (Trusted OS)", UUID_SECURE_PAYLOAD_BL32,
-	  "bl32", NULL, FLAG_FILENAME},
-	{ "Non-Trusted Firmware BL3-3", UUID_NON_TRUSTED_FIRMWARE_BL33,
-	  "bl33", NULL, FLAG_FILENAME},
-	/* Key Certificates */
-	{ "Root Of Trust key certificate", UUID_ROT_KEY_CERT,
-	  "rot-cert", NULL, FLAG_FILENAME },
-	{ "Trusted key certificate", UUID_TRUSTED_KEY_CERT,
-	  "trusted-key-cert", NULL, FLAG_FILENAME},
-	{ "SCP Firmware BL3-0 key certificate", UUID_SCP_FIRMWARE_BL30_KEY_CERT,
-	  "bl30-key-cert", NULL, FLAG_FILENAME},
-	{ "EL3 Runtime Firmware BL3-1 key certificate", UUID_EL3_RUNTIME_FIRMWARE_BL31_KEY_CERT,
-	  "bl31-key-cert", NULL, FLAG_FILENAME},
-	{ "Secure Payload BL3-2 (Trusted OS) key certificate", UUID_SECURE_PAYLOAD_BL32_KEY_CERT,
-	  "bl32-key-cert", NULL, FLAG_FILENAME},
-	{ "Non-Trusted Firmware BL3-3 key certificate", UUID_NON_TRUSTED_FIRMWARE_BL33_KEY_CERT,
-	  "bl33-key-cert", NULL, FLAG_FILENAME},
-	/* Content certificates */
-	{ "Trusted Boot Firmware BL2 certificate", UUID_TRUSTED_BOOT_FIRMWARE_BL2_CERT,
-	  "bl2-cert", NULL, FLAG_FILENAME },
-	{ "SCP Firmware BL3-0 certificate", UUID_SCP_FIRMWARE_BL30_CERT,
-	  "bl30-cert", NULL, FLAG_FILENAME},
-	{ "EL3 Runtime Firmware BL3-1 certificate", UUID_EL3_RUNTIME_FIRMWARE_BL31_CERT,
-	  "bl31-cert", NULL, FLAG_FILENAME},
-	{ "Secure Payload BL3-2 (Trusted OS) certificate", UUID_SECURE_PAYLOAD_BL32_CERT,
-	  "bl32-cert", NULL, FLAG_FILENAME},
-	{ "Non-Trusted Firmware BL3-3 certificate", UUID_NON_TRUSTED_FIRMWARE_BL33_CERT,
-	  "bl33-cert", NULL, FLAG_FILENAME},
-	{ NULL, {0}, 0 }
-};
-
-
-/* Return 0 for equal uuids */
-static inline int compare_uuids(const uuid_t *uuid1, const uuid_t *uuid2)
-{
-	return memcmp(uuid1, uuid2, sizeof(uuid_t));
-}
-
-
-static inline void copy_uuid(uuid_t *to_uuid, const uuid_t *from_uuid)
-{
-	memcpy(to_uuid, from_uuid, sizeof(uuid_t));
-}
-
-
-static void print_usage(void)
-{
-	entry_lookup_list_t *entry = toc_entry_lookup_list;
-
-	printf("Usage: fip_create [options] FIP_FILENAME\n\n");
-	printf("\tThis tool is used to create a Firmware Image Package.\n\n");
-	printf("Options:\n");
-	printf("\t--help: Print this help message and exit\n");
-	printf("\t--dump: Print contents of FIP\n\n");
-	printf("\tComponents that can be added/updated:\n");
-	for (; entry->command_line_name != NULL; entry++) {
-		printf("\t--%s%s\t\t%s",
-		       entry->command_line_name,
-		       (entry->flags & FLAG_FILENAME) ? " FILENAME" : "",
-		       entry->name);
-		printf("\n");
-	}
-}
-
-
-static entry_lookup_list_t *get_entry_lookup_from_uuid(const uuid_t *uuid)
-{
-	unsigned int lookup_index = 0;
-
-	while (toc_entry_lookup_list[lookup_index].command_line_name != NULL) {
-		if (compare_uuids(&toc_entry_lookup_list[lookup_index].name_uuid,
-		    uuid) == 0) {
-			return &toc_entry_lookup_list[lookup_index];
-		}
-		lookup_index++;
-	}
-	return NULL;
-}
-
-
-static file_info_t *find_file_info_from_uuid(const uuid_t *uuid)
-{
-	int index;
-
-	for (index = 0; index < file_info_count; index++) {
-		if (compare_uuids(&files[index].name_uuid, uuid) == 0) {
-			return &files[index];
-		}
-	}
-	return NULL;
-}
-
-
-static int add_file_info_entry(entry_lookup_list_t *lookup_entry, char *filename)
-{
-	file_info_t *file_info_entry;
-	int error;
-	struct stat file_status;
-	bool is_new_entry = false;
-
-	/* Check if the file already exists in the array */
-	file_info_entry = find_file_info_from_uuid(&lookup_entry->name_uuid);
-	if (file_info_entry == NULL) {
-		/* The file does not exist in the current list; take the next
-		 * one available in the file_info list. 'file_info_count' is
-		 * incremented in case of successful update at the end of the
-		 * function.
-		 */
-		file_info_entry = &files[file_info_count];
-		is_new_entry = true;
-
-		/* Copy the uuid for the new entry */
-		copy_uuid(&file_info_entry->name_uuid,
-			  &lookup_entry->name_uuid);
-	}
-
-	/* Get the file information for entry */
-	error = stat(filename, &file_status);
-	if (error != 0) {
-		printf("Error: Cannot get information for file \"%s\": %s\n",
-			filename, strerror(errno));
-		return errno;
-	}
-	file_info_entry->filename = filename;
-	file_info_entry->size = (unsigned int)file_status.st_size;
-	file_info_entry->entry = lookup_entry;
-
-	/* Increment the file_info counter on success if it is new file entry */
-	if (is_new_entry) {
-		file_info_count++;
-
-		/* Ensure we do not overflow */
-		if (file_info_count > MAX_FILES) {
-			printf("ERROR: Too many files in Package\n");
-			return 1;
-		}
-	}
-
-	return 0;
-}
-
-
-static int write_memory_to_file(const uint8_t *start, const char *filename,
-		unsigned int size)
-{
-	FILE *stream;
-	unsigned int bytes_written;
-
-	/* Write the packed file out to the filesystem */
-	stream = fopen(filename, "r+");
-	if (stream == NULL) {
-		stream = fopen(filename, "w");
-		if (stream == NULL) {
-			printf("Error: Cannot create output file \"%s\": %s\n",
-			       filename, strerror(errno));
-			return errno;
-		} else {
-			printf("Creating \"%s\"\n", filename);
-		}
-	} else {
-		printf("Updating \"%s\"\n", filename);
-	}
-
-	bytes_written = fwrite(start, sizeof(uint8_t), size, stream);
-	fclose(stream);
-
-	if (bytes_written != size) {
-		printf("Error: Incorrect write for file \"%s\": Size=%u,"
-			"Written=%u bytes.\n", filename, size, bytes_written);
-		return EIO;
-	}
-
-	return 0;
-}
-
-
-static int read_file_to_memory(void *memory, const file_info_t *info)
-{
-	FILE *stream;
-	unsigned int bytes_read;
-
-	/* If the file_info is defined by its filename we need to load it */
-	if (info->filename) {
-		/* Read image from filesystem */
-		stream = fopen(info->filename, "r");
-		if (stream == NULL) {
-			printf("Error: Cannot open file \"%s\": %s\n",
-				info->filename, strerror(errno));
-			return errno;
-		}
-
-		bytes_read = (unsigned int)fread(memory, sizeof(uint8_t),
-						 info->size, stream);
-		fclose(stream);
-		if (bytes_read != info->size) {
-			printf("Error: Incomplete read for file \"%s\":"
-				"Size=%u, Read=%u bytes.\n", info->filename,
-				info->size, bytes_read);
-			return EIO;
-		}
-	} else {
-		if (info->image_buffer == NULL) {
-			printf("ERROR: info->image_buffer = NULL\n");
-			return EIO;
-		}
-		/* Copy the file_info buffer (extracted from the existing
-		 * image package) into the new buffer.
-		 */
-		memcpy(memory, info->image_buffer, info->size);
-	}
-
-	return 0;
-}
-
-
-/* Create the image package file */
-static int pack_images(const char *fip_filename)
-{
-	int status;
-	uint8_t *fip_base_address;
-	void *entry_address;
-	fip_toc_header_t *toc_header;
-	fip_toc_entry_t *toc_entry;
-	unsigned int entry_index;
-	unsigned int toc_size;
-	unsigned int fip_size;
-	unsigned int entry_offset_address;
-	unsigned int payload_size = 0;
-
-	/* Validate filename */
-	if ((fip_filename == NULL) || (strcmp(fip_filename, "") == 0)) {
-		return EINVAL;
-	}
-
-	/* Payload size calculation */
-	for (entry_index = 0; entry_index < file_info_count; entry_index++) {
-		payload_size += files[entry_index].size;
-	}
-
-	/* Allocate memory for entire package, including the final null entry */
-	toc_size = (sizeof(fip_toc_header_t) +
-		    (sizeof(fip_toc_entry_t) * (file_info_count + 1)));
-	fip_size = toc_size + payload_size;
-	fip_base_address = malloc(fip_size);
-	if (fip_base_address == NULL) {
-		printf("Error: Can't allocate enough memory to create package."
-		       "Process aborted.\n");
-		return ENOMEM;
-	}
-	memset(fip_base_address, 0, fip_size);
-
-	/* Create ToC Header */
-	toc_header = (fip_toc_header_t *)fip_base_address;
-	toc_header->name = TOC_HEADER_NAME;
-	toc_header->serial_number = TOC_HEADER_SERIAL_NUMBER;
-	toc_header->flags = 0;
-
-	toc_entry = (fip_toc_entry_t *)(fip_base_address +
-				      sizeof(fip_toc_header_t));
-
-	/* Calculate the starting address of the first image, right after the
-	 * toc header.
-	 */
-	entry_offset_address = toc_size;
-	entry_index = 0;
-
-	/* Create the package in memory. */
-	for (entry_index = 0; entry_index < file_info_count; entry_index++) {
-		entry_address = (fip_base_address + entry_offset_address);
-		status = read_file_to_memory(entry_address,
-					     &files[entry_index]);
-		if (status != 0) {
-			printf("Error: While reading \"%s\" from filesystem.\n",
-				files[entry_index].filename);
-			return status;
-		}
-
-		copy_uuid(&toc_entry->uuid, &files[entry_index].name_uuid);
-		toc_entry->offset_address = entry_offset_address;
-		toc_entry->size = files[entry_index].size;
-		toc_entry->flags = 0;
-		entry_offset_address += toc_entry->size;
-		toc_entry++;
-	}
-
-	/* Add a null uuid entry to mark the end of toc entries */
-	copy_uuid(&toc_entry->uuid, &uuid_null);
-	toc_entry->offset_address = entry_offset_address;
-	toc_entry->size = 0;
-	toc_entry->flags = 0;
-
-	/* Save the package to file */
-	status = write_memory_to_file(fip_base_address, fip_filename, fip_size);
-	if (status != 0) {
-		printf("Error: Failed while writing package to file \"%s\" "
-			"with status=%d.\n", fip_filename, status);
-		return status;
-	}
-	return 0;
-}
-
-
-static void dump_toc(void)
-{
-	unsigned int index = 0;
-	unsigned int image_offset;
-	unsigned int image_size = 0;
-
-	image_offset = sizeof(fip_toc_header_t) +
-		(sizeof(fip_toc_entry_t) * (file_info_count + 1));
-
-	printf("Firmware Image Package ToC:\n");
-	printf("---------------------------\n");
-	for (index = 0; index < file_info_count; index++) {
-		if (files[index].entry) {
-			printf("- %s: ", files[index].entry->name);
-		} else {
-			printf("- Unknown entry: ");
-		}
-		image_size = files[index].size;
-
-		printf("offset=0x%X, size=0x%X\n", image_offset, image_size);
-		image_offset += image_size;
-
-		if (files[index].filename) {
-			printf("  file: '%s'\n", files[index].filename);
-		}
-	}
-	printf("---------------------------\n");
-}
-
-
-/* Read and load existing package into memory. */
-static int parse_fip(const char *fip_filename)
-{
-	FILE *fip;
-	char *fip_buffer;
-	char *fip_buffer_end;
-	int fip_size, read_fip_size;
-	fip_toc_header_t *toc_header;
-	fip_toc_entry_t *toc_entry;
-	bool found_last_toc_entry = false;
-	file_info_t *file_info_entry;
-	int status = -1;
-	struct stat st;
-
-	fip = fopen(fip_filename, "r");
-	if (fip == NULL) {
-		/* If the fip does not exist just return, it should not be
-		 * considered as an error. The package will be created later
-		 */
-		status = 0;
-		goto parse_fip_return;
-	}
-
-	if (stat(fip_filename, &st) != 0) {
-		status = errno;
-		goto parse_fip_fclose;
-	} else {
-		fip_size = (int)st.st_size;
-	}
-
-	/* Allocate a buffer to read the package */
-	fip_buffer = (char *)malloc(fip_size);
-	if (fip_buffer == NULL) {
-		printf("ERROR: Cannot allocate %d bytes.\n", fip_size);
-		status = errno;
-		goto parse_fip_fclose;
-	}
-	fip_buffer_end = fip_buffer + fip_size;
-
-	/* Read the file */
-	read_fip_size = fread(fip_buffer, sizeof(char), fip_size, fip);
-	if (read_fip_size != fip_size) {
-		printf("ERROR: Cannot read the FIP.\n");
-		status = EIO;
-		goto parse_fip_free;
-	}
-	fclose(fip);
-	fip = NULL;
-
-	/* The package must at least contain the ToC Header */
-	if (fip_size < sizeof(fip_toc_header_t)) {
-		printf("ERROR: Given FIP is smaller than the ToC header.\n");
-		status = EINVAL;
-		goto parse_fip_free;
-	}
-	/* Set the ToC Header at the base of the buffer */
-	toc_header = (fip_toc_header_t *)fip_buffer;
-	/* The first toc entry should be just after the ToC header */
-	toc_entry = (fip_toc_entry_t *)(toc_header + 1);
-
-	/* While the ToC entry is contained into the buffer */
-	int cnt = 0;
-	while (((char *)toc_entry + sizeof(fip_toc_entry_t)) < fip_buffer_end) {
-		cnt++;
-		/* Check if the ToC Entry is the last one */
-		if (compare_uuids(&toc_entry->uuid, &uuid_null) == 0) {
-			found_last_toc_entry = true;
-			status = 0;
-			break;
-		}
-
-		/* Add the entry into file_info */
-
-		/* Get the new entry in the array and clear it */
-		file_info_entry = &files[file_info_count++];
-		memset(file_info_entry, 0, sizeof(file_info_t));
-
-		/* Copy the info from the ToC entry */
-		copy_uuid(&file_info_entry->name_uuid, &toc_entry->uuid);
-		file_info_entry->image_buffer = fip_buffer +
-		  toc_entry->offset_address;
-		file_info_entry->size = toc_entry->size;
-
-		/* Check if there is a corresponding entry in lookup table */
-		file_info_entry->entry =
-		  get_entry_lookup_from_uuid(&toc_entry->uuid);
-
-		/* Go to the next ToC entry */
-		toc_entry++;
-	}
-
-	if (!found_last_toc_entry) {
-		printf("ERROR: Given FIP does not have an end ToC entry.\n");
-		status = EINVAL;
-		goto parse_fip_free;
-	} else {
-		/* All is well, we should not free any of the loaded images */
-		goto parse_fip_fclose;
-	}
-
- parse_fip_free:
-	if (fip_buffer != NULL) {
-		free(fip_buffer);
-		fip_buffer = NULL;
-	}
-
- parse_fip_fclose:
-	if (fip != NULL) {
-		fclose(fip);
-	}
-
- parse_fip_return:
-	return status;
-}
-
-
-/* Parse all command-line options and return the FIP name if present. */
-static char *get_filename(int argc, char **argv, struct option *options)
-{
-	int c;
-	char *filename = NULL;
-
-	/* Reset option pointer so we parse all args. starts at 1.
-	 * The filename is the only argument that does not have an option flag.
-	 */
-	optind = 1;
-	while (1) {
-		c = getopt_long(argc, argv, "", options, NULL);
-		if (c == -1)
-			break;
-
-		if (c == '?') {
-			/* Failed to parse an option. Fail. */
-			return NULL;
-		}
-	}
-
-	/* Only one argument left then it is the filename.
-	 * We dont expect any other options
-	 */
-	if (optind + 1 == argc)
-		filename = argv[optind];
-
-	return filename;
-}
-
-
-/* Work through command-line options */
-static int parse_cmdline(int argc, char **argv, struct option *options,
-			 int *do_pack)
-{
-	int c;
-	int status = 0;
-	int option_index = 0;
-	entry_lookup_list_t *lookup_entry;
-	int do_dump = 0;
-
-	/* restart parse to process all options. starts at 1. */
-	optind = 1;
-	while (1) {
-		c = getopt_long(argc, argv, "", options, &option_index);
-		if (c == -1)
-			break;
-
-		switch (c) {
-		case OPT_TOC_ENTRY:
-			if (optarg) {
-				/* Does the option expect a filename. */
-				lookup_entry = &toc_entry_lookup_list[option_index];
-				if (lookup_entry->flags & FLAG_FILENAME) {
-					status = add_file_info_entry(lookup_entry, optarg);
-					if (status != 0) {
-						printf("Failed to process %s\n",
-						       options[option_index].name);
-						return status;
-					} else {
-						/* Update package */
-						*do_pack = 1;
-					}
-				}
-			}
-			break;
-
-		case OPT_DUMP:
-			do_dump = 1;
-			continue;
-
-		case OPT_HELP:
-			print_usage();
-			exit(0);
-
-		default:
-			/* Unrecognised options are caught in get_filename() */
-			break;
-		}
-	}
-
-
-	/* Do not dump toc if we have an error as it could hide the error */
-	if ((status == 0) && (do_dump)) {
-		dump_toc();
-	}
-
-	return status;
-
-}
-
-int main(int argc, char **argv)
-{
-	int i;
-	int status;
-	char *fip_filename;
-	int do_pack = 0;
-
-	/* Clear file list table. */
-	memset(files, 0, sizeof(files));
-
-	/* Initialise for getopt_long().
-	 * Use image table as defined at top of file to get options.
-	 * Add 'dump' option, 'help' option and end marker.
-	 */
-	static struct option long_options[(sizeof(toc_entry_lookup_list)/
-					   sizeof(entry_lookup_list_t)) + 2];
-
-	for (i = 0;
-	     /* -1 because we dont want to process end marker in toc table */
-	     i < sizeof(toc_entry_lookup_list)/sizeof(entry_lookup_list_t) - 1;
-	     i++) {
-		long_options[i].name = toc_entry_lookup_list[i].command_line_name;
-		/* The only flag defined at the moment is for a FILENAME */
-		long_options[i].has_arg = toc_entry_lookup_list[i].flags ? 1 : 0;
-		long_options[i].flag = 0;
-		long_options[i].val = OPT_TOC_ENTRY;
-	}
-
-	/* Add '--dump' option */
-	long_options[i].name = "dump";
-	long_options[i].has_arg = 0;
-	long_options[i].flag = 0;
-	long_options[i].val = OPT_DUMP;
-
-	/* Add '--help' option */
-	long_options[++i].name = "help";
-	long_options[i].has_arg = 0;
-	long_options[i].flag = 0;
-	long_options[i].val = OPT_HELP;
-
-	/* Zero the last entry (required) */
-	long_options[++i].name = 0;
-	long_options[i].has_arg = 0;
-	long_options[i].flag = 0;
-	long_options[i].val = 0;
-
-#ifdef DEBUG
-	/* Print all supported options */
-	for (i = 0; i < sizeof(long_options)/sizeof(struct option); i++) {
-		printf("long opt (%d) : name = %s\n", i, long_options[i].name);
-	}
-#endif /* DEBUG */
-
-	/* As the package may already exist and is to be updated we need to get
-	 * the filename from the arguments and load from it.
-	 * NOTE: As this is the first function to look at the program arguments
-	 * it causes a failure if bad options were provided.
-	 */
-	fip_filename = get_filename(argc, argv, long_options);
-
-	/* Try to open the file and load it into memory */
-	if (fip_filename != NULL) {
-		status = parse_fip(fip_filename);
-		if (status != 0) {
-			return status;
-		}
-	}
-
-	/* Work through provided program arguments and perform actions */
-	status = parse_cmdline(argc, argv, long_options, &do_pack);
-	if (status != 0) {
-		return status;
-	};
-
-	if (fip_filename == NULL) {
-		printf("ERROR: Missing FIP filename\n");
-		print_usage();
-		return 0;
-	}
-
-	/* Processed all command line options. Create/update the package if
-	 * required.
-	 */
-	if (do_pack) {
-		status = pack_images(fip_filename);
-		if (status != 0) {
-			printf("Failed to create package (status = %d).\n",
-			       status);
-		}
-	}
-
-	return status;
-}
diff --git a/tools/fip_create/fip_create.h b/tools/fip_create/fip_create.h
deleted file mode 100644
index 3258335..0000000
--- a/tools/fip_create/fip_create.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __FIP_CREATE_H__
-#define __FIP_CREATE_H__
-
-#include <stdint.h>
-#include <uuid.h>
-
-#define MAX_FILES			20
-
-/* TODO: Update this number as required */
-#define TOC_HEADER_SERIAL_NUMBER	0x12345678
-
-#define FLAG_FILENAME			(1 << 0)
-
-typedef struct entry_lookup_list {
-	const char		*name;
-	uuid_t			 name_uuid;
-	const char		*command_line_name;
-	struct file_info	*info;
-	unsigned int		 flags;
-} entry_lookup_list_t;
-
-typedef struct file_info {
-	uuid_t			 name_uuid;
-	const char		*filename;
-	unsigned int		 size;
-	void			*image_buffer;
-	entry_lookup_list_t	*entry;
-} file_info_t;
-
-#endif /* __FIP_CREATE_H__ */
diff --git a/tools/fip_create/firmware_image_package.h b/tools/fip_create/firmware_image_package.h
deleted file mode 120000
index cc61903..0000000
--- a/tools/fip_create/firmware_image_package.h
+++ /dev/null
@@ -1 +0,0 @@
-../../include/common/firmware_image_package.h
\ No newline at end of file
diff --git a/tools/fip_create/uuid.h b/tools/fip_create/uuid.h
deleted file mode 120000
index c77762f..0000000
--- a/tools/fip_create/uuid.h
+++ /dev/null
@@ -1 +0,0 @@
-../../include/stdlib/sys/uuid.h
\ No newline at end of file
diff --git a/tools/fiptool/Makefile b/tools/fiptool/Makefile
new file mode 100644
index 0000000..e0e3923
--- /dev/null
+++ b/tools/fiptool/Makefile
@@ -0,0 +1,54 @@
+#
+# Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+MAKE_HELPERS_DIRECTORY := ../../make_helpers/
+include ${MAKE_HELPERS_DIRECTORY}build_macros.mk
+include ${MAKE_HELPERS_DIRECTORY}build_env.mk
+
+PROJECT := fiptool${BIN_EXT}
+OBJECTS := fiptool.o tbbr_config.o
+V ?= 0
+
+override CPPFLAGS += -D_GNU_SOURCE -D_XOPEN_SOURCE=700
+CFLAGS := -Wall -Werror -pedantic -std=c99
+ifeq (${DEBUG},1)
+  CFLAGS += -g -O0 -DDEBUG
+else
+  CFLAGS += -O2
+endif
+LDLIBS := -lcrypto
+
+ifeq (${V},0)
+  Q := @
+else
+  Q :=
+endif
+
+INCLUDE_PATHS := -I../../include/tools_share
+
+HOSTCC ?= gcc
+
+.PHONY: all clean distclean
+
+all: ${PROJECT} fip_create
+
+${PROJECT}: ${OBJECTS} Makefile
+	@echo "  LD      $@"
+	${Q}${HOSTCC} ${OBJECTS} -o $@ ${LDLIBS}
+	@${ECHO_BLANK_LINE}
+	@echo "Built $@ successfully"
+	@${ECHO_BLANK_LINE}
+
+fip_create: fip_create.sh
+	${Q}mkdir -p ../fip_create
+	${Q}install -m 755 fip_create.sh ../fip_create/fip_create
+
+%.o: %.c %.h Makefile
+	@echo "  CC      $<"
+	${Q}${HOSTCC} -c ${CPPFLAGS} ${CFLAGS} ${INCLUDE_PATHS} $< -o $@
+
+clean:
+	$(call SHELL_DELETE_ALL, ${PROJECT} ${OBJECTS} fip_create)
diff --git a/tools/fiptool/fip_create.sh b/tools/fiptool/fip_create.sh
new file mode 100644
index 0000000..0e80199
--- /dev/null
+++ b/tools/fiptool/fip_create.sh
@@ -0,0 +1,127 @@
+#!/bin/sh
+#
+# This script implements the old fip_create tool on top of
+# the new fiptool.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+usage() {
+    cat << EOF
+This tool is used to create a Firmware Image Package.
+
+Usage:
+	fip_create [options] FIP_FILENAME
+
+Options:
+	-h,--help: Print this help message and exit
+	-d,--dump: Print contents of FIP after update
+	-u,--unpack: Unpack images from an existing FIP
+	-f,--force: Overwrite existing files when unpacking images
+
+Components that can be added/updated:
+	--scp-fwu-cfg FILENAME		SCP Firmware Updater Configuration FWU SCP_BL2U
+	--ap-fwu-cfg FILENAME		AP Firmware Updater Configuration BL2U
+	--fwu FILENAME			Firmware Updater NS_BL2U
+	--fwu-cert FILENAME		Non-Trusted Firmware Updater certificate
+	--tb-fw FILENAME		Trusted Boot Firmware BL2
+	--scp-fw FILENAME		SCP Firmware SCP_BL2
+	--soc-fw FILENAME		EL3 Runtime Firmware BL31
+	--tos-fw FILENAME		Secure Payload BL32 (Trusted OS)
+	--tos-fw-extra1 FILENAME	Secure Payload BL32 Extra1 (Trusted OS Extra1)
+	--tos-fw-extra2 FILENAME	Secure Payload BL32 Extra2 (Trusted OS Extra2)
+	--nt-fw FILENAME		Non-Trusted Firmware BL33
+	--rot-cert FILENAME		Root Of Trust key certificate
+	--trusted-key-cert FILENAME	Trusted key certificate
+	--scp-fw-key-cert FILENAME	SCP Firmware key certificate
+	--soc-fw-key-cert FILENAME	SoC Firmware key certificate
+	--tos-fw-key-cert FILENAME	Trusted OS Firmware key certificate
+	--nt-fw-key-cert FILENAME	Non-Trusted Firmware key certificate
+	--tb-fw-cert FILENAME		Trusted Boot Firmware BL2 certificate
+	--scp-fw-cert FILENAME		SCP Firmware content certificate
+	--soc-fw-cert FILENAME		SoC Firmware content certificate
+	--tos-fw-cert FILENAME		Trusted OS Firmware content certificate
+	--nt-fw-cert FILENAME		Non-Trusted Firmware content certificate
+EOF
+    exit
+}
+
+echo "!! The fip_create tool is deprecated.  Use the new fiptool. !!"
+basedir="$(dirname $0)/../fiptool"
+fiptool_args=
+while :; do
+    case "$1" in
+	-h | --help )
+	    usage
+	    break ;;
+	-d | --dump )
+	    fiptool_args="info $fiptool_args"
+	    shift ;;
+	-u | --unpack )
+	    fiptool_args="unpack $fiptool_args"
+	    shift ;;
+	-f | --force )
+	    fiptool_args="$fiptool_args --force"
+	    shift ;;
+	--scp-fwu-cfg | \
+	    --ap-fwu-cfg | \
+	    --fwu | \
+	    --fwu-cert | \
+	    --tb-fw | \
+	    --scp-fw | \
+	    --soc-fw | \
+	    --tos-fw | \
+	    --tos-fw-extra1 | \
+	    --tos-fw-extra2 | \
+	    --nt-fw | \
+	    --rot-cert | \
+	    --trusted-key-cert | \
+	    --scp-fw-key-cert | \
+	    --soc-fw-key-cert | \
+	    --tos-fw-key-cert | \
+	    --nt-fw-key-cert | \
+	    --tb-fw-cert | \
+	    --scp-fw-cert | \
+	    --soc-fw-cert | \
+	    --tos-fw-cert | \
+	    --nt-fw-cert )
+	    fiptool_args="$fiptool_args $1"
+	    shift
+	    if test -z $1; then
+		usage
+	    fi
+	    fiptool_args="$fiptool_args $1"
+	    shift ;;
+	* )
+	    break ;;
+    esac
+done
+
+# expect a FIP filename
+if test -z $1; then
+    usage
+fi
+
+is_pack_cmd=1
+for arg in $fiptool_args; do
+    case "$arg" in
+	unpack )
+	    is_pack_cmd=0
+	    break ;;
+	info )
+	    is_pack_cmd=0
+	    break ;;
+	* )
+    esac
+done
+
+# if --unpack and --dump were not specified
+# the default action is to pack
+if test "$is_pack_cmd" -eq 1; then
+    fiptool_args="update $fiptool_args"
+fi
+
+# append FIP filename
+fiptool_args="$fiptool_args $1"
+echo "Invoking fiptool with args: $fiptool_args"
+"$basedir/fiptool" $fiptool_args
diff --git a/tools/fiptool/fiptool.c b/tools/fiptool/fiptool.c
new file mode 100644
index 0000000..1dcb7e8
--- /dev/null
+++ b/tools/fiptool/fiptool.c
@@ -0,0 +1,1211 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <assert.h>
+#include <errno.h>
+#include <limits.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "fiptool.h"
+#include "tbbr_config.h"
+
+#define OPT_TOC_ENTRY 0
+#define OPT_PLAT_TOC_FLAGS 1
+#define OPT_ALIGN 2
+
+static int info_cmd(int argc, char *argv[]);
+static void info_usage(void);
+static int create_cmd(int argc, char *argv[]);
+static void create_usage(void);
+static int update_cmd(int argc, char *argv[]);
+static void update_usage(void);
+static int unpack_cmd(int argc, char *argv[]);
+static void unpack_usage(void);
+static int remove_cmd(int argc, char *argv[]);
+static void remove_usage(void);
+static int version_cmd(int argc, char *argv[]);
+static void version_usage(void);
+static int help_cmd(int argc, char *argv[]);
+static void usage(void);
+
+/* Available subcommands. */
+static cmd_t cmds[] = {
+	{ .name = "info",    .handler = info_cmd,    .usage = info_usage    },
+	{ .name = "create",  .handler = create_cmd,  .usage = create_usage  },
+	{ .name = "update",  .handler = update_cmd,  .usage = update_usage  },
+	{ .name = "unpack",  .handler = unpack_cmd,  .usage = unpack_usage  },
+	{ .name = "remove",  .handler = remove_cmd,  .usage = remove_usage  },
+	{ .name = "version", .handler = version_cmd, .usage = version_usage },
+	{ .name = "help",    .handler = help_cmd,    .usage = NULL          },
+};
+
+static image_desc_t *image_desc_head;
+static size_t nr_image_descs;
+static uuid_t uuid_null = { 0 };
+static int verbose;
+
+static void vlog(int prio, const char *msg, va_list ap)
+{
+	char *prefix[] = { "DEBUG", "WARN", "ERROR" };
+
+	fprintf(stderr, "%s: ", prefix[prio]);
+	vfprintf(stderr, msg, ap);
+	fputc('\n', stderr);
+}
+
+static void log_dbgx(const char *msg, ...)
+{
+	va_list ap;
+
+	va_start(ap, msg);
+	vlog(LOG_DBG, msg, ap);
+	va_end(ap);
+}
+
+static void log_warnx(const char *msg, ...)
+{
+	va_list ap;
+
+	va_start(ap, msg);
+	vlog(LOG_WARN, msg, ap);
+	va_end(ap);
+}
+
+static void log_err(const char *msg, ...)
+{
+	char buf[512];
+	va_list ap;
+
+	va_start(ap, msg);
+	snprintf(buf, sizeof(buf), "%s: %s", msg, strerror(errno));
+	vlog(LOG_ERR, buf, ap);
+	va_end(ap);
+	exit(1);
+}
+
+static void log_errx(const char *msg, ...)
+{
+	va_list ap;
+
+	va_start(ap, msg);
+	vlog(LOG_ERR, msg, ap);
+	va_end(ap);
+	exit(1);
+}
+
+static char *xstrdup(const char *s, const char *msg)
+{
+	char *d;
+
+	d = strdup(s);
+	if (d == NULL)
+		log_errx("strdup: %s", msg);
+	return d;
+}
+
+static void *xmalloc(size_t size, const char *msg)
+{
+	void *d;
+
+	d = malloc(size);
+	if (d == NULL)
+		log_errx("malloc: %s", msg);
+	return d;
+}
+
+static void *xzalloc(size_t size, const char *msg)
+{
+	return memset(xmalloc(size, msg), 0, size);
+}
+
+static void xfwrite(void *buf, size_t size, FILE *fp, const char *filename)
+{
+	if (fwrite(buf, 1, size, fp) != size)
+		log_errx("Failed to write %s", filename);
+}
+
+static image_desc_t *new_image_desc(const uuid_t *uuid,
+    const char *name, const char *cmdline_name)
+{
+	image_desc_t *desc;
+
+	desc = xzalloc(sizeof(*desc),
+	    "failed to allocate memory for image descriptor");
+	memcpy(&desc->uuid, uuid, sizeof(uuid_t));
+	desc->name = xstrdup(name,
+	    "failed to allocate memory for image name");
+	desc->cmdline_name = xstrdup(cmdline_name,
+	    "failed to allocate memory for image command line name");
+	desc->action = DO_UNSPEC;
+	return desc;
+}
+
+static void set_image_desc_action(image_desc_t *desc, int action,
+    const char *arg)
+{
+	assert(desc != NULL);
+
+	if (desc->action_arg != (char *)DO_UNSPEC)
+		free(desc->action_arg);
+	desc->action = action;
+	desc->action_arg = NULL;
+	if (arg != NULL)
+		desc->action_arg = xstrdup(arg,
+		    "failed to allocate memory for argument");
+}
+
+static void free_image_desc(image_desc_t *desc)
+{
+	free(desc->name);
+	free(desc->cmdline_name);
+	free(desc->action_arg);
+	free(desc->image);
+	free(desc);
+}
+
+static void add_image_desc(image_desc_t *desc)
+{
+	image_desc_t **p = &image_desc_head;
+
+	while (*p)
+		p = &(*p)->next;
+
+	assert(*p == NULL);
+	*p = desc;
+	nr_image_descs++;
+}
+
+static void free_image_descs(void)
+{
+	image_desc_t *desc = image_desc_head, *tmp;
+
+	while (desc != NULL) {
+		tmp = desc->next;
+		free_image_desc(desc);
+		desc = tmp;
+		nr_image_descs--;
+	}
+	assert(nr_image_descs == 0);
+}
+
+static void fill_image_descs(void)
+{
+	toc_entry_t *toc_entry;
+
+	for (toc_entry = toc_entries;
+	     toc_entry->cmdline_name != NULL;
+	     toc_entry++) {
+		image_desc_t *desc;
+
+		desc = new_image_desc(&toc_entry->uuid,
+		    toc_entry->name,
+		    toc_entry->cmdline_name);
+		add_image_desc(desc);
+	}
+}
+
+static image_desc_t *lookup_image_desc_from_uuid(const uuid_t *uuid)
+{
+	image_desc_t *desc;
+
+	for (desc = image_desc_head; desc != NULL; desc = desc->next)
+		if (memcmp(&desc->uuid, uuid, sizeof(uuid_t)) == 0)
+			return desc;
+	return NULL;
+}
+
+static image_desc_t *lookup_image_desc_from_opt(const char *opt)
+{
+	image_desc_t *desc;
+
+	for (desc = image_desc_head; desc != NULL; desc = desc->next)
+		if (strcmp(desc->cmdline_name, opt) == 0)
+			return desc;
+	return NULL;
+}
+
+static void uuid_to_str(char *s, size_t len, const uuid_t *u)
+{
+	assert(len >= (_UUID_STR_LEN + 1));
+
+	snprintf(s, len, "%08X-%04X-%04X-%04X-%04X%04X%04X",
+	    u->time_low,
+	    u->time_mid,
+	    u->time_hi_and_version,
+	    ((uint16_t)u->clock_seq_hi_and_reserved << 8) | u->clock_seq_low,
+	    ((uint16_t)u->node[0] << 8) | u->node[1],
+	    ((uint16_t)u->node[2] << 8) | u->node[3],
+	    ((uint16_t)u->node[4] << 8) | u->node[5]);
+}
+
+static void uuid_from_str(uuid_t *u, const char *s)
+{
+	int n;
+
+	if (s == NULL)
+		log_errx("UUID cannot be NULL");
+	if (strlen(s) != _UUID_STR_LEN)
+		log_errx("Invalid UUID: %s", s);
+
+	n = sscanf(s,
+	    "%8x-%4hx-%4hx-%2hhx%2hhx-%2hhx%2hhx%2hhx%2hhx%2hhx%2hhx",
+	    &u->time_low, &u->time_mid, &u->time_hi_and_version,
+	    &u->clock_seq_hi_and_reserved, &u->clock_seq_low, &u->node[0],
+	    &u->node[1], &u->node[2], &u->node[3], &u->node[4], &u->node[5]);
+	/*
+	 * Given the format specifier above, we expect 11 items to be scanned
+	 * for a properly formatted UUID.
+	 */
+	if (n != 11)
+		log_errx("Invalid UUID: %s", s);
+}
+
+static int parse_fip(const char *filename, fip_toc_header_t *toc_header_out)
+{
+	struct BLD_PLAT_STAT st;
+	FILE *fp;
+	char *buf, *bufend;
+	fip_toc_header_t *toc_header;
+	fip_toc_entry_t *toc_entry;
+	int terminated = 0;
+
+	fp = fopen(filename, "rb");
+	if (fp == NULL)
+		log_err("fopen %s", filename);
+
+	if (fstat(fileno(fp), &st) == -1)
+		log_err("fstat %s", filename);
+
+	buf = xmalloc(st.st_size, "failed to load file into memory");
+	if (fread(buf, 1, st.st_size, fp) != st.st_size)
+		log_errx("Failed to read %s", filename);
+	bufend = buf + st.st_size;
+	fclose(fp);
+
+	if (st.st_size < sizeof(fip_toc_header_t))
+		log_errx("FIP %s is truncated", filename);
+
+	toc_header = (fip_toc_header_t *)buf;
+	toc_entry = (fip_toc_entry_t *)(toc_header + 1);
+
+	if (toc_header->name != TOC_HEADER_NAME)
+		log_errx("%s is not a FIP file", filename);
+
+	/* Return the ToC header if the caller wants it. */
+	if (toc_header_out != NULL)
+		*toc_header_out = *toc_header;
+
+	/* Walk through each ToC entry in the file. */
+	while ((char *)toc_entry + sizeof(*toc_entry) - 1 < bufend) {
+		image_t *image;
+		image_desc_t *desc;
+
+		/* Found the ToC terminator, we are done. */
+		if (memcmp(&toc_entry->uuid, &uuid_null, sizeof(uuid_t)) == 0) {
+			terminated = 1;
+			break;
+		}
+
+		/*
+		 * Build a new image out of the ToC entry and add it to the
+		 * table of images.
+		 */
+		image = xzalloc(sizeof(*image),
+		    "failed to allocate memory for image");
+		image->toc_e = *toc_entry;
+		image->buffer = xmalloc(toc_entry->size,
+		    "failed to allocate image buffer, is FIP file corrupted?");
+		/* Overflow checks before memory copy. */
+		if (toc_entry->size > (uint64_t)-1 - toc_entry->offset_address)
+			log_errx("FIP %s is corrupted", filename);
+		if (toc_entry->size + toc_entry->offset_address > st.st_size)
+			log_errx("FIP %s is corrupted", filename);
+
+		memcpy(image->buffer, buf + toc_entry->offset_address,
+		    toc_entry->size);
+
+		/* If this is an unknown image, create a descriptor for it. */
+		desc = lookup_image_desc_from_uuid(&toc_entry->uuid);
+		if (desc == NULL) {
+			char name[_UUID_STR_LEN + 1], filename[PATH_MAX];
+
+			uuid_to_str(name, sizeof(name), &toc_entry->uuid);
+			snprintf(filename, sizeof(filename), "%s%s",
+			    name, ".bin");
+			desc = new_image_desc(&toc_entry->uuid, name, "blob");
+			desc->action = DO_UNPACK;
+			desc->action_arg = xstrdup(filename,
+			    "failed to allocate memory for blob filename");
+			add_image_desc(desc);
+		}
+
+		assert(desc->image == NULL);
+		desc->image = image;
+
+		toc_entry++;
+	}
+
+	if (terminated == 0)
+		log_errx("FIP %s does not have a ToC terminator entry",
+		    filename);
+	free(buf);
+	return 0;
+}
+
+static image_t *read_image_from_file(const uuid_t *uuid, const char *filename)
+{
+	struct BLD_PLAT_STAT st;
+	image_t *image;
+	FILE *fp;
+
+	assert(uuid != NULL);
+	assert(filename != NULL);
+
+	fp = fopen(filename, "rb");
+	if (fp == NULL)
+		log_err("fopen %s", filename);
+
+	if (fstat(fileno(fp), &st) == -1)
+		log_errx("fstat %s", filename);
+
+	image = xzalloc(sizeof(*image), "failed to allocate memory for image");
+	image->toc_e.uuid = *uuid;
+	image->buffer = xmalloc(st.st_size, "failed to allocate image buffer");
+	if (fread(image->buffer, 1, st.st_size, fp) != st.st_size)
+		log_errx("Failed to read %s", filename);
+	image->toc_e.size = st.st_size;
+
+	fclose(fp);
+	return image;
+}
+
+static int write_image_to_file(const image_t *image, const char *filename)
+{
+	FILE *fp;
+
+	fp = fopen(filename, "wb");
+	if (fp == NULL)
+		log_err("fopen");
+	xfwrite(image->buffer, image->toc_e.size, fp, filename);
+	fclose(fp);
+	return 0;
+}
+
+static struct option *add_opt(struct option *opts, size_t *nr_opts,
+    const char *name, int has_arg, int val)
+{
+	opts = realloc(opts, (*nr_opts + 1) * sizeof(*opts));
+	if (opts == NULL)
+		log_err("realloc");
+	opts[*nr_opts].name = name;
+	opts[*nr_opts].has_arg = has_arg;
+	opts[*nr_opts].flag = NULL;
+	opts[*nr_opts].val = val;
+	++*nr_opts;
+	return opts;
+}
+
+static struct option *fill_common_opts(struct option *opts, size_t *nr_opts,
+    int has_arg)
+{
+	image_desc_t *desc;
+
+	for (desc = image_desc_head; desc != NULL; desc = desc->next)
+		opts = add_opt(opts, nr_opts, desc->cmdline_name, has_arg,
+		    OPT_TOC_ENTRY);
+	return opts;
+}
+
+static void md_print(const unsigned char *md, size_t len)
+{
+	size_t i;
+
+	for (i = 0; i < len; i++)
+		printf("%02x", md[i]);
+}
+
+static int info_cmd(int argc, char *argv[])
+{
+	image_desc_t *desc;
+	fip_toc_header_t toc_header;
+
+	if (argc != 2)
+		info_usage();
+	argc--, argv++;
+
+	parse_fip(argv[0], &toc_header);
+
+	if (verbose) {
+		log_dbgx("toc_header[name]: 0x%llX",
+		    (unsigned long long)toc_header.name);
+		log_dbgx("toc_header[serial_number]: 0x%llX",
+		    (unsigned long long)toc_header.serial_number);
+		log_dbgx("toc_header[flags]: 0x%llX",
+		    (unsigned long long)toc_header.flags);
+	}
+
+	for (desc = image_desc_head; desc != NULL; desc = desc->next) {
+		image_t *image = desc->image;
+
+		if (image == NULL)
+			continue;
+		printf("%s: offset=0x%llX, size=0x%llX, cmdline=\"--%s\"",
+		       desc->name,
+		       (unsigned long long)image->toc_e.offset_address,
+		       (unsigned long long)image->toc_e.size,
+		       desc->cmdline_name);
+#ifndef _MSC_VER	/* We don't have SHA256 for Visual Studio. */
+		if (verbose) {
+			unsigned char md[SHA256_DIGEST_LENGTH];
+
+			SHA256(image->buffer, image->toc_e.size, md);
+			printf(", sha256=");
+			md_print(md, sizeof(md));
+		}
+#endif
+		putchar('\n');
+	}
+
+	return 0;
+}
+
+static void info_usage(void)
+{
+	printf("fiptool info FIP_FILENAME\n");
+	exit(1);
+}
+
+static int pack_images(const char *filename, uint64_t toc_flags, unsigned long align)
+{
+	FILE *fp;
+	image_desc_t *desc;
+	fip_toc_header_t *toc_header;
+	fip_toc_entry_t *toc_entry;
+	char *buf;
+	uint64_t entry_offset, buf_size, payload_size = 0;
+	size_t nr_images = 0;
+
+	for (desc = image_desc_head; desc != NULL; desc = desc->next)
+		if (desc->image != NULL)
+			nr_images++;
+
+	buf_size = sizeof(fip_toc_header_t) +
+	    sizeof(fip_toc_entry_t) * (nr_images + 1);
+	buf = calloc(1, buf_size);
+	if (buf == NULL)
+		log_err("calloc");
+
+	/* Build up header and ToC entries from the image table. */
+	toc_header = (fip_toc_header_t *)buf;
+	toc_header->name = TOC_HEADER_NAME;
+	toc_header->serial_number = TOC_HEADER_SERIAL_NUMBER;
+	toc_header->flags = toc_flags;
+
+	toc_entry = (fip_toc_entry_t *)(toc_header + 1);
+
+	entry_offset = buf_size;
+	for (desc = image_desc_head; desc != NULL; desc = desc->next) {
+		image_t *image = desc->image;
+
+		if (image == NULL)
+			continue;
+		payload_size += image->toc_e.size;
+		entry_offset = (entry_offset + align - 1) & ~(align - 1);
+		image->toc_e.offset_address = entry_offset;
+		*toc_entry++ = image->toc_e;
+		entry_offset += image->toc_e.size;
+	}
+
+	/* Append a null uuid entry to mark the end of ToC entries. */
+	memset(toc_entry, 0, sizeof(*toc_entry));
+	toc_entry->offset_address = entry_offset;
+
+	/* Generate the FIP file. */
+	fp = fopen(filename, "wb");
+	if (fp == NULL)
+		log_err("fopen %s", filename);
+
+	if (verbose)
+		log_dbgx("Metadata size: %zu bytes", buf_size);
+
+	xfwrite(buf, buf_size, fp, filename);
+	free(buf);
+
+	if (verbose)
+		log_dbgx("Payload size: %zu bytes", payload_size);
+
+	for (desc = image_desc_head; desc != NULL; desc = desc->next) {
+		image_t *image = desc->image;
+
+		if (image == NULL)
+			continue;
+		if (fseek(fp, image->toc_e.offset_address, SEEK_SET))
+			log_errx("Failed to set file position");
+
+		xfwrite(image->buffer, image->toc_e.size, fp, filename);
+	}
+
+	fclose(fp);
+	return 0;
+}
+
+/*
+ * This function is shared between the create and update subcommands.
+ * The difference between the two subcommands is that when the FIP file
+ * is created, the parsing of an existing FIP is skipped.  This results
+ * in update_fip() creating the new FIP file from scratch because the
+ * internal image table is not populated.
+ */
+static void update_fip(void)
+{
+	image_desc_t *desc;
+
+	/* Add or replace images in the FIP file. */
+	for (desc = image_desc_head; desc != NULL; desc = desc->next) {
+		image_t *image;
+
+		if (desc->action != DO_PACK)
+			continue;
+
+		image = read_image_from_file(&desc->uuid,
+		    desc->action_arg);
+		if (desc->image != NULL) {
+			if (verbose) {
+				log_dbgx("Replacing %s with %s",
+				    desc->cmdline_name,
+				    desc->action_arg);
+			}
+			free(desc->image);
+			desc->image = image;
+		} else {
+			if (verbose)
+				log_dbgx("Adding image %s",
+				    desc->action_arg);
+			desc->image = image;
+		}
+	}
+}
+
+static void parse_plat_toc_flags(const char *arg, unsigned long long *toc_flags)
+{
+	unsigned long long flags;
+	char *endptr;
+
+	errno = 0;
+	flags = strtoull(arg, &endptr, 16);
+	if (*endptr != '\0' || flags > UINT16_MAX || errno != 0)
+		log_errx("Invalid platform ToC flags: %s", arg);
+	/* Platform ToC flags is a 16-bit field occupying bits [32-47]. */
+	*toc_flags |= flags << 32;
+}
+
+static int is_power_of_2(unsigned long x)
+{
+	return x && !(x & (x - 1));
+}
+
+static unsigned long get_image_align(char *arg)
+{
+	char *endptr;
+	unsigned long align;
+
+	errno = 0;
+	align = strtoul(arg, &endptr, 0);
+	if (*endptr != '\0' || !is_power_of_2(align) || errno != 0)
+		log_errx("Invalid alignment: %s", arg);
+
+	return align;
+}
+
+static void parse_blob_opt(char *arg, uuid_t *uuid, char *filename, size_t len)
+{
+	char *p;
+
+	for (p = strtok(arg, ","); p != NULL; p = strtok(NULL, ",")) {
+		if (strncmp(p, "uuid=", strlen("uuid=")) == 0) {
+			p += strlen("uuid=");
+			uuid_from_str(uuid, p);
+		} else if (strncmp(p, "file=", strlen("file=")) == 0) {
+			p += strlen("file=");
+			snprintf(filename, len, "%s", p);
+		}
+	}
+}
+
+static int create_cmd(int argc, char *argv[])
+{
+	struct option *opts = NULL;
+	size_t nr_opts = 0;
+	unsigned long long toc_flags = 0;
+	unsigned long align = 1;
+
+	if (argc < 2)
+		create_usage();
+
+	opts = fill_common_opts(opts, &nr_opts, required_argument);
+	opts = add_opt(opts, &nr_opts, "plat-toc-flags", required_argument,
+	    OPT_PLAT_TOC_FLAGS);
+	opts = add_opt(opts, &nr_opts, "align", required_argument, OPT_ALIGN);
+	opts = add_opt(opts, &nr_opts, "blob", required_argument, 'b');
+	opts = add_opt(opts, &nr_opts, NULL, 0, 0);
+
+	while (1) {
+		int c, opt_index = 0;
+
+		c = getopt_long(argc, argv, "b:", opts, &opt_index);
+		if (c == -1)
+			break;
+
+		switch (c) {
+		case OPT_TOC_ENTRY: {
+			image_desc_t *desc;
+
+			desc = lookup_image_desc_from_opt(opts[opt_index].name);
+			set_image_desc_action(desc, DO_PACK, optarg);
+			break;
+		}
+		case OPT_PLAT_TOC_FLAGS:
+			parse_plat_toc_flags(optarg, &toc_flags);
+			break;
+		case OPT_ALIGN:
+			align = get_image_align(optarg);
+			break;
+		case 'b': {
+			char name[_UUID_STR_LEN + 1];
+			char filename[PATH_MAX] = { 0 };
+			uuid_t uuid = { 0 };
+			image_desc_t *desc;
+
+			parse_blob_opt(optarg, &uuid,
+			    filename, sizeof(filename));
+
+			if (memcmp(&uuid, &uuid_null, sizeof(uuid_t)) == 0 ||
+			    filename[0] == '\0')
+				create_usage();
+
+			desc = lookup_image_desc_from_uuid(&uuid);
+			if (desc == NULL) {
+				uuid_to_str(name, sizeof(name), &uuid);
+				desc = new_image_desc(&uuid, name, "blob");
+				add_image_desc(desc);
+			}
+			set_image_desc_action(desc, DO_PACK, filename);
+			break;
+		}
+		default:
+			create_usage();
+		}
+	}
+	argc -= optind;
+	argv += optind;
+	free(opts);
+
+	if (argc == 0)
+		create_usage();
+
+	update_fip();
+
+	pack_images(argv[0], toc_flags, align);
+	return 0;
+}
+
+static void create_usage(void)
+{
+	toc_entry_t *toc_entry = toc_entries;
+
+	printf("fiptool create [opts] FIP_FILENAME\n");
+	printf("\n");
+	printf("Options:\n");
+	printf("  --align <value>\t\tEach image is aligned to <value> (default: 1).\n");
+	printf("  --blob uuid=...,file=...\tAdd an image with the given UUID pointed to by file.\n");
+	printf("  --plat-toc-flags <value>\t16-bit platform specific flag field occupying bits 32-47 in 64-bit ToC header.\n");
+	printf("\n");
+	printf("Specific images are packed with the following options:\n");
+	for (; toc_entry->cmdline_name != NULL; toc_entry++)
+		printf("  --%-16s FILENAME\t%s\n", toc_entry->cmdline_name,
+		    toc_entry->name);
+	exit(1);
+}
+
+static int update_cmd(int argc, char *argv[])
+{
+	struct option *opts = NULL;
+	size_t nr_opts = 0;
+	char outfile[PATH_MAX] = { 0 };
+	fip_toc_header_t toc_header = { 0 };
+	unsigned long long toc_flags = 0;
+	unsigned long align = 1;
+	int pflag = 0;
+
+	if (argc < 2)
+		update_usage();
+
+	opts = fill_common_opts(opts, &nr_opts, required_argument);
+	opts = add_opt(opts, &nr_opts, "align", required_argument, OPT_ALIGN);
+	opts = add_opt(opts, &nr_opts, "blob", required_argument, 'b');
+	opts = add_opt(opts, &nr_opts, "out", required_argument, 'o');
+	opts = add_opt(opts, &nr_opts, "plat-toc-flags", required_argument,
+	    OPT_PLAT_TOC_FLAGS);
+	opts = add_opt(opts, &nr_opts, NULL, 0, 0);
+
+	while (1) {
+		int c, opt_index = 0;
+
+		c = getopt_long(argc, argv, "b:o:", opts, &opt_index);
+		if (c == -1)
+			break;
+
+		switch (c) {
+		case OPT_TOC_ENTRY: {
+			image_desc_t *desc;
+
+			desc = lookup_image_desc_from_opt(opts[opt_index].name);
+			set_image_desc_action(desc, DO_PACK, optarg);
+			break;
+		}
+		case OPT_PLAT_TOC_FLAGS:
+			parse_plat_toc_flags(optarg, &toc_flags);
+			pflag = 1;
+			break;
+		case 'b': {
+			char name[_UUID_STR_LEN + 1];
+			char filename[PATH_MAX] = { 0 };
+			uuid_t uuid = { 0 };
+			image_desc_t *desc;
+
+			parse_blob_opt(optarg, &uuid,
+			    filename, sizeof(filename));
+
+			if (memcmp(&uuid, &uuid_null, sizeof(uuid_t)) == 0 ||
+			    filename[0] == '\0')
+				update_usage();
+
+			desc = lookup_image_desc_from_uuid(&uuid);
+			if (desc == NULL) {
+				uuid_to_str(name, sizeof(name), &uuid);
+				desc = new_image_desc(&uuid, name, "blob");
+				add_image_desc(desc);
+			}
+			set_image_desc_action(desc, DO_PACK, filename);
+			break;
+		}
+		case OPT_ALIGN:
+			align = get_image_align(optarg);
+			break;
+		case 'o':
+			snprintf(outfile, sizeof(outfile), "%s", optarg);
+			break;
+		default:
+			update_usage();
+		}
+	}
+	argc -= optind;
+	argv += optind;
+	free(opts);
+
+	if (argc == 0)
+		update_usage();
+
+	if (outfile[0] == '\0')
+		snprintf(outfile, sizeof(outfile), "%s", argv[0]);
+
+	if (access(argv[0], F_OK) == 0)
+		parse_fip(argv[0], &toc_header);
+
+	if (pflag)
+		toc_header.flags &= ~(0xffffULL << 32);
+	toc_flags = (toc_header.flags |= toc_flags);
+
+	update_fip();
+
+	pack_images(outfile, toc_flags, align);
+	return 0;
+}
+
+static void update_usage(void)
+{
+	toc_entry_t *toc_entry = toc_entries;
+
+	printf("fiptool update [opts] FIP_FILENAME\n");
+	printf("\n");
+	printf("Options:\n");
+	printf("  --align <value>\t\tEach image is aligned to <value> (default: 1).\n");
+	printf("  --blob uuid=...,file=...\tAdd or update an image with the given UUID pointed to by file.\n");
+	printf("  --out FIP_FILENAME\t\tSet an alternative output FIP file.\n");
+	printf("  --plat-toc-flags <value>\t16-bit platform specific flag field occupying bits 32-47 in 64-bit ToC header.\n");
+	printf("\n");
+	printf("Specific images are packed with the following options:\n");
+	for (; toc_entry->cmdline_name != NULL; toc_entry++)
+		printf("  --%-16s FILENAME\t%s\n", toc_entry->cmdline_name,
+		    toc_entry->name);
+	exit(1);
+}
+
+static int unpack_cmd(int argc, char *argv[])
+{
+	struct option *opts = NULL;
+	size_t nr_opts = 0;
+	char outdir[PATH_MAX] = { 0 };
+	image_desc_t *desc;
+	int fflag = 0;
+	int unpack_all = 1;
+
+	if (argc < 2)
+		unpack_usage();
+
+	opts = fill_common_opts(opts, &nr_opts, required_argument);
+	opts = add_opt(opts, &nr_opts, "blob", required_argument, 'b');
+	opts = add_opt(opts, &nr_opts, "force", no_argument, 'f');
+	opts = add_opt(opts, &nr_opts, "out", required_argument, 'o');
+	opts = add_opt(opts, &nr_opts, NULL, 0, 0);
+
+	while (1) {
+		int c, opt_index = 0;
+
+		c = getopt_long(argc, argv, "b:fo:", opts, &opt_index);
+		if (c == -1)
+			break;
+
+		switch (c) {
+		case OPT_TOC_ENTRY: {
+			image_desc_t *desc;
+
+			desc = lookup_image_desc_from_opt(opts[opt_index].name);
+			set_image_desc_action(desc, DO_UNPACK, optarg);
+			unpack_all = 0;
+			break;
+		}
+		case 'b': {
+			char name[_UUID_STR_LEN + 1];
+			char filename[PATH_MAX] = { 0 };
+			uuid_t uuid = { 0 };
+			image_desc_t *desc;
+
+			parse_blob_opt(optarg, &uuid,
+			    filename, sizeof(filename));
+
+			if (memcmp(&uuid, &uuid_null, sizeof(uuid_t)) == 0 ||
+			    filename[0] == '\0')
+				unpack_usage();
+
+			desc = lookup_image_desc_from_uuid(&uuid);
+			if (desc == NULL) {
+				uuid_to_str(name, sizeof(name), &uuid);
+				desc = new_image_desc(&uuid, name, "blob");
+				add_image_desc(desc);
+			}
+			set_image_desc_action(desc, DO_UNPACK, filename);
+			unpack_all = 0;
+			break;
+		}
+		case 'f':
+			fflag = 1;
+			break;
+		case 'o':
+			snprintf(outdir, sizeof(outdir), "%s", optarg);
+			break;
+		default:
+			unpack_usage();
+		}
+	}
+	argc -= optind;
+	argv += optind;
+	free(opts);
+
+	if (argc == 0)
+		unpack_usage();
+
+	parse_fip(argv[0], NULL);
+
+	if (outdir[0] != '\0')
+		if (chdir(outdir) == -1)
+			log_err("chdir %s", outdir);
+
+	/* Unpack all specified images. */
+	for (desc = image_desc_head; desc != NULL; desc = desc->next) {
+		char file[PATH_MAX];
+		image_t *image = desc->image;
+
+		if (!unpack_all && desc->action != DO_UNPACK)
+			continue;
+
+		/* Build filename. */
+		if (desc->action_arg == NULL)
+			snprintf(file, sizeof(file), "%s.bin",
+			    desc->cmdline_name);
+		else
+			snprintf(file, sizeof(file), "%s",
+			    desc->action_arg);
+
+		if (image == NULL) {
+			if (!unpack_all)
+				log_warnx("%s does not exist in %s",
+				    file, argv[0]);
+			continue;
+		}
+
+		if (access(file, F_OK) != 0 || fflag) {
+			if (verbose)
+				log_dbgx("Unpacking %s", file);
+			write_image_to_file(image, file);
+		} else {
+			log_warnx("File %s already exists, use --force to overwrite it",
+			    file);
+		}
+	}
+
+	return 0;
+}
+
+static void unpack_usage(void)
+{
+	toc_entry_t *toc_entry = toc_entries;
+
+	printf("fiptool unpack [opts] FIP_FILENAME\n");
+	printf("\n");
+	printf("Options:\n");
+	printf("  --blob uuid=...,file=...\tUnpack an image with the given UUID to file.\n");
+	printf("  --force\t\t\tIf the output file already exists, use --force to overwrite it.\n");
+	printf("  --out path\t\t\tSet the output directory path.\n");
+	printf("\n");
+	printf("Specific images are unpacked with the following options:\n");
+	for (; toc_entry->cmdline_name != NULL; toc_entry++)
+		printf("  --%-16s FILENAME\t%s\n", toc_entry->cmdline_name,
+		    toc_entry->name);
+	printf("\n");
+	printf("If no options are provided, all images will be unpacked.\n");
+	exit(1);
+}
+
+static int remove_cmd(int argc, char *argv[])
+{
+	struct option *opts = NULL;
+	size_t nr_opts = 0;
+	char outfile[PATH_MAX] = { 0 };
+	fip_toc_header_t toc_header;
+	image_desc_t *desc;
+	unsigned long align = 1;
+	int fflag = 0;
+
+	if (argc < 2)
+		remove_usage();
+
+	opts = fill_common_opts(opts, &nr_opts, no_argument);
+	opts = add_opt(opts, &nr_opts, "align", required_argument, OPT_ALIGN);
+	opts = add_opt(opts, &nr_opts, "blob", required_argument, 'b');
+	opts = add_opt(opts, &nr_opts, "force", no_argument, 'f');
+	opts = add_opt(opts, &nr_opts, "out", required_argument, 'o');
+	opts = add_opt(opts, &nr_opts, NULL, 0, 0);
+
+	while (1) {
+		int c, opt_index = 0;
+
+		c = getopt_long(argc, argv, "b:fo:", opts, &opt_index);
+		if (c == -1)
+			break;
+
+		switch (c) {
+		case OPT_TOC_ENTRY: {
+			image_desc_t *desc;
+
+			desc = lookup_image_desc_from_opt(opts[opt_index].name);
+			set_image_desc_action(desc, DO_REMOVE, NULL);
+			break;
+		}
+		case OPT_ALIGN:
+			align = get_image_align(optarg);
+			break;
+		case 'b': {
+			char name[_UUID_STR_LEN + 1], filename[PATH_MAX];
+			uuid_t uuid = { 0 };
+			image_desc_t *desc;
+
+			parse_blob_opt(optarg, &uuid,
+			    filename, sizeof(filename));
+
+			if (memcmp(&uuid, &uuid_null, sizeof(uuid_t)) == 0)
+				remove_usage();
+
+			desc = lookup_image_desc_from_uuid(&uuid);
+			if (desc == NULL) {
+				uuid_to_str(name, sizeof(name), &uuid);
+				desc = new_image_desc(&uuid, name, "blob");
+				add_image_desc(desc);
+			}
+			set_image_desc_action(desc, DO_REMOVE, NULL);
+			break;
+		}
+		case 'f':
+			fflag = 1;
+			break;
+		case 'o':
+			snprintf(outfile, sizeof(outfile), "%s", optarg);
+			break;
+		default:
+			remove_usage();
+		}
+	}
+	argc -= optind;
+	argv += optind;
+	free(opts);
+
+	if (argc == 0)
+		remove_usage();
+
+	if (outfile[0] != '\0' && access(outfile, F_OK) == 0 && !fflag)
+		log_errx("File %s already exists, use --force to overwrite it",
+		    outfile);
+
+	if (outfile[0] == '\0')
+		snprintf(outfile, sizeof(outfile), "%s", argv[0]);
+
+	parse_fip(argv[0], &toc_header);
+
+	for (desc = image_desc_head; desc != NULL; desc = desc->next) {
+		if (desc->action != DO_REMOVE)
+			continue;
+
+		if (desc->image != NULL) {
+			if (verbose)
+				log_dbgx("Removing %s",
+				    desc->cmdline_name);
+			free(desc->image);
+			desc->image = NULL;
+		} else {
+			log_warnx("%s does not exist in %s",
+			    desc->cmdline_name, argv[0]);
+		}
+	}
+
+	pack_images(outfile, toc_header.flags, align);
+	return 0;
+}
+
+static void remove_usage(void)
+{
+	toc_entry_t *toc_entry = toc_entries;
+
+	printf("fiptool remove [opts] FIP_FILENAME\n");
+	printf("\n");
+	printf("Options:\n");
+	printf("  --align <value>\tEach image is aligned to <value> (default: 1).\n");
+	printf("  --blob uuid=...\tRemove an image with the given UUID.\n");
+	printf("  --force\t\tIf the output FIP file already exists, use --force to overwrite it.\n");
+	printf("  --out FIP_FILENAME\tSet an alternative output FIP file.\n");
+	printf("\n");
+	printf("Specific images are removed with the following options:\n");
+	for (; toc_entry->cmdline_name != NULL; toc_entry++)
+		printf("  --%-16s\t%s\n", toc_entry->cmdline_name,
+		    toc_entry->name);
+	exit(1);
+}
+
+static int version_cmd(int argc, char *argv[])
+{
+#ifdef VERSION
+	puts(VERSION);
+#else
+	/* If built from fiptool directory, VERSION is not set. */
+	puts("Unknown version");
+#endif
+	return 0;
+}
+
+static void version_usage(void)
+{
+	printf("fiptool version\n");
+	exit(1);
+}
+
+static int help_cmd(int argc, char *argv[])
+{
+	int i;
+
+	if (argc < 2)
+		usage();
+	argc--, argv++;
+
+	for (i = 0; i < NELEM(cmds); i++) {
+		if (strcmp(cmds[i].name, argv[0]) == 0 &&
+		    cmds[i].usage != NULL)
+			cmds[i].usage();
+	}
+	if (i == NELEM(cmds))
+		printf("No help for subcommand '%s'\n", argv[0]);
+	return 0;
+}
+
+static void usage(void)
+{
+	printf("usage: fiptool [--verbose] <command> [<args>]\n");
+	printf("Global options supported:\n");
+	printf("  --verbose\tEnable verbose output for all commands.\n");
+	printf("\n");
+	printf("Commands supported:\n");
+	printf("  info\t\tList images contained in FIP.\n");
+	printf("  create\tCreate a new FIP with the given images.\n");
+	printf("  update\tUpdate an existing FIP with the given images.\n");
+	printf("  unpack\tUnpack images from FIP.\n");
+	printf("  remove\tRemove images from FIP.\n");
+	printf("  version\tShow fiptool version.\n");
+	printf("  help\t\tShow help for given command.\n");
+	exit(1);
+}
+
+int main(int argc, char *argv[])
+{
+	int i, ret = 0;
+
+	while (1) {
+		int c, opt_index = 0;
+		static struct option opts[] = {
+			{ "verbose", no_argument, NULL, 'v' },
+			{ NULL, no_argument, NULL, 0 }
+		};
+
+		/*
+		 * Set POSIX mode so getopt stops at the first non-option
+		 * which is the subcommand.
+		 */
+		c = getopt_long(argc, argv, "+v", opts, &opt_index);
+		if (c == -1)
+			break;
+
+		switch (c) {
+		case 'v':
+			verbose = 1;
+			break;
+		default:
+			usage();
+		}
+	}
+	argc -= optind;
+	argv += optind;
+	/* Reset optind for subsequent getopt processing. */
+	optind = 0;
+
+	if (argc == 0)
+		usage();
+
+	fill_image_descs();
+	for (i = 0; i < NELEM(cmds); i++) {
+		if (strcmp(cmds[i].name, argv[0]) == 0) {
+			ret = cmds[i].handler(argc, argv);
+			break;
+		}
+	}
+	if (i == NELEM(cmds))
+		usage();
+	free_image_descs();
+	return ret;
+}
diff --git a/tools/fiptool/fiptool.h b/tools/fiptool/fiptool.h
new file mode 100644
index 0000000..d8a5d2c
--- /dev/null
+++ b/tools/fiptool/fiptool.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __FIPTOOL_H__
+#define __FIPTOOL_H__
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <firmware_image_package.h>
+#include <uuid.h>
+
+#include "fiptool_platform.h"
+
+#define NELEM(x) (sizeof (x) / sizeof *(x))
+
+enum {
+	DO_UNSPEC = 0,
+	DO_PACK   = 1,
+	DO_UNPACK = 2,
+	DO_REMOVE = 3
+};
+
+enum {
+	LOG_DBG,
+	LOG_WARN,
+	LOG_ERR
+};
+
+typedef struct image_desc {
+	uuid_t             uuid;
+	char              *name;
+	char              *cmdline_name;
+	int                action;
+	char              *action_arg;
+	struct image      *image;
+	struct image_desc *next;
+} image_desc_t;
+
+typedef struct image {
+	struct fip_toc_entry toc_e;
+	void                *buffer;
+} image_t;
+
+typedef struct cmd {
+	char              *name;
+	int              (*handler)(int, char **);
+	void             (*usage)(void);
+} cmd_t;
+
+#endif /* __FIPTOOL_H__ */
diff --git a/tools/fiptool/fiptool_platform.h b/tools/fiptool/fiptool_platform.h
new file mode 100644
index 0000000..fd0a120
--- /dev/null
+++ b/tools/fiptool/fiptool_platform.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Build platform specific handling.
+ * This allows for builds on non-Posix platforms
+ * e.g. Visual Studio on Windows
+ */
+
+#ifndef __FIPTOOL_PLATFORM_H__
+#	define __FIPTOOL_PLATFORM_H__
+
+#	ifndef _MSC_VER
+
+		/* Not Visual Studio, so include Posix Headers. */
+#		include <getopt.h>
+#		include <openssl/sha.h>
+#		include <unistd.h>
+
+#		define  BLD_PLAT_STAT stat
+
+#	else
+
+		/* Visual Studio. */
+#		include "win_posix.h"
+
+#	endif
+
+#endif /* __FIPTOOL_PLATFORM_H__ */
diff --git a/tools/fiptool/tbbr_config.c b/tools/fiptool/tbbr_config.c
new file mode 100644
index 0000000..827cab2
--- /dev/null
+++ b/tools/fiptool/tbbr_config.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+
+#include <firmware_image_package.h>
+
+#include "tbbr_config.h"
+
+/* The images used depends on the platform. */
+toc_entry_t toc_entries[] = {
+	{
+		.name = "SCP Firmware Updater Configuration FWU SCP_BL2U",
+		.uuid = UUID_TRUSTED_UPDATE_FIRMWARE_SCP_BL2U,
+		.cmdline_name = "scp-fwu-cfg"
+	},
+	{
+		.name = "AP Firmware Updater Configuration BL2U",
+		.uuid = UUID_TRUSTED_UPDATE_FIRMWARE_BL2U,
+		.cmdline_name = "ap-fwu-cfg"
+	},
+	{
+		.name = "Firmware Updater NS_BL2U",
+		.uuid = UUID_TRUSTED_UPDATE_FIRMWARE_NS_BL2U,
+		.cmdline_name = "fwu"
+	},
+	{
+		.name = "Non-Trusted Firmware Updater certificate",
+		.uuid = UUID_TRUSTED_FWU_CERT,
+		.cmdline_name = "fwu-cert"
+	},
+	{
+		.name = "Trusted Boot Firmware BL2",
+		.uuid = UUID_TRUSTED_BOOT_FIRMWARE_BL2,
+		.cmdline_name = "tb-fw"
+	},
+	{
+		.name = "SCP Firmware SCP_BL2",
+		.uuid = UUID_SCP_FIRMWARE_SCP_BL2,
+		.cmdline_name = "scp-fw"
+	},
+	{
+		.name = "EL3 Runtime Firmware BL31",
+		.uuid = UUID_EL3_RUNTIME_FIRMWARE_BL31,
+		.cmdline_name = "soc-fw"
+	},
+	{
+		.name = "Secure Payload BL32 (Trusted OS)",
+		.uuid = UUID_SECURE_PAYLOAD_BL32,
+		.cmdline_name = "tos-fw"
+	},
+	{
+		.name = "Secure Payload BL32 Extra1 (Trusted OS Extra1)",
+		.uuid = UUID_SECURE_PAYLOAD_BL32_EXTRA1,
+		.cmdline_name = "tos-fw-extra1"
+	},
+	{
+		.name = "Secure Payload BL32 Extra2 (Trusted OS Extra2)",
+		.uuid = UUID_SECURE_PAYLOAD_BL32_EXTRA2,
+		.cmdline_name = "tos-fw-extra2"
+	},
+	{
+		.name = "Non-Trusted Firmware BL33",
+		.uuid = UUID_NON_TRUSTED_FIRMWARE_BL33,
+		.cmdline_name = "nt-fw"
+	},
+
+	/* Key Certificates */
+	{
+		.name = "Root Of Trust key certificate",
+		.uuid = UUID_ROT_KEY_CERT,
+		.cmdline_name = "rot-cert"
+	},
+	{
+		.name = "Trusted key certificate",
+		.uuid = UUID_TRUSTED_KEY_CERT,
+		.cmdline_name = "trusted-key-cert"
+	},
+	{
+		.name = "SCP Firmware key certificate",
+		.uuid = UUID_SCP_FW_KEY_CERT,
+		.cmdline_name = "scp-fw-key-cert"
+	},
+	{
+		.name = "SoC Firmware key certificate",
+		.uuid = UUID_SOC_FW_KEY_CERT,
+		.cmdline_name = "soc-fw-key-cert"
+	},
+	{
+		.name = "Trusted OS Firmware key certificate",
+		.uuid = UUID_TRUSTED_OS_FW_KEY_CERT,
+		.cmdline_name = "tos-fw-key-cert"
+	},
+	{
+		.name = "Non-Trusted Firmware key certificate",
+		.uuid = UUID_NON_TRUSTED_FW_KEY_CERT,
+		.cmdline_name = "nt-fw-key-cert"
+	},
+
+	/* Content certificates */
+	{
+		.name = "Trusted Boot Firmware BL2 certificate",
+		.uuid = UUID_TRUSTED_BOOT_FW_CERT,
+		.cmdline_name = "tb-fw-cert"
+	},
+	{
+		.name = "SCP Firmware content certificate",
+		.uuid = UUID_SCP_FW_CONTENT_CERT,
+		.cmdline_name = "scp-fw-cert"
+	},
+	{
+		.name = "SoC Firmware content certificate",
+		.uuid = UUID_SOC_FW_CONTENT_CERT,
+		.cmdline_name = "soc-fw-cert"
+	},
+	{
+		.name = "Trusted OS Firmware content certificate",
+		.uuid = UUID_TRUSTED_OS_FW_CONTENT_CERT,
+		.cmdline_name = "tos-fw-cert"
+	},
+	{
+		.name = "Non-Trusted Firmware content certificate",
+		.uuid = UUID_NON_TRUSTED_FW_CONTENT_CERT,
+		.cmdline_name = "nt-fw-cert"
+	},
+	{
+		.name = NULL,
+		.uuid = { 0 },
+		.cmdline_name = NULL,
+	}
+};
diff --git a/tools/fiptool/tbbr_config.h b/tools/fiptool/tbbr_config.h
new file mode 100644
index 0000000..bad757d
--- /dev/null
+++ b/tools/fiptool/tbbr_config.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TBBR_CONFIG_H__
+#define __TBBR_CONFIG_H__
+
+#include <stdint.h>
+
+#include <uuid.h>
+
+/* TODO: Update this number as required */
+#define TOC_HEADER_SERIAL_NUMBER 0x12345678
+
+typedef struct toc_entry {
+	char         *name;
+	uuid_t        uuid;
+	char         *cmdline_name;
+} toc_entry_t;
+
+extern toc_entry_t toc_entries[];
+
+#endif /* __TBBR_CONFIG_H__ */
diff --git a/tools/fiptool/win_posix.c b/tools/fiptool/win_posix.c
new file mode 100644
index 0000000..48feb16
--- /dev/null
+++ b/tools/fiptool/win_posix.c
@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include "win_posix.h"
+
+/*
+ * This variable is set by getopt to the index of the next element of the
+ * argv array to be processed. Once getopt has found all of the option
+ * arguments, you can use this variable to determine where the remaining
+ * non-option arguments begin. The initial value of this variable is 1.
+ */
+int optind = 1;
+
+/*
+ * If the value of this variable is nonzero, then getopt prints an error
+ * message to the standard error stream if it encounters an unknown option
+ * default character or an option with a missing required argument.
+ * If you set this variable to zero, getopt does not print any messages,
+ * but it still returns the character ? to indicate an error.
+ */
+const int opterr; /* = 0; */
+/* const because we do not implement error printing.*/
+/* Not initialised to conform with the coding standard. */
+
+/*
+ * When getopt encounters an unknown option character or an option with a
+ * missing required argument, it stores that option character in this
+ * variable.
+ */
+int optopt;	/* = 0; */
+
+/*
+ * This variable is set by getopt to point at the value of the option
+ * argument, for those options that accept arguments.
+ */
+char *optarg;	/* = 0; */
+
+enum return_flags {
+	RET_ERROR = -1,
+	RET_END_OPT_LIST = -1,
+	RET_NO_PARAM = '?',
+	RET_NO_PARAM2 = ':',
+	RET_UNKNOWN_OPT = '?'
+};
+
+/*
+ * Common initialisation on entry.
+ */
+static
+void getopt_init(void)
+{
+	optarg = (char *)0;
+	optopt = 0;
+	/* optind may be zero with some POSIX uses.
+	 * For our purposes we just change it to 1.
+	 */
+	if (optind == 0)
+		optind = 1;
+}
+
+/*
+ * Common handling for a single letter option.
+ */
+static
+int getopt_1char(int argc,
+		 char *const argv[],
+		 const char *const opstring,
+		 const int optchar)
+{
+	size_t nlen = (opstring == 0) ? 0 : strlen(opstring);
+	size_t loptn;
+
+	for (loptn = 0; loptn < nlen; loptn++) {
+		if (optchar == opstring[loptn]) {
+			if (opstring[loptn + 1] == ':') {
+				/* Option has argument */
+				if (optind < argc) {
+					/* Found argument. */
+					assert(argv != 0);
+					optind++;
+					optarg = argv[optind++];
+					return optchar;
+				}
+				/* Missing argument. */
+				if (opstring[loptn + 2] == ':') {
+					/* OK if optional "x::". */
+					optind++;
+					return optchar;
+				}
+				/* Actual missing value. */
+				optopt = optchar;
+				return ((opstring[0] == ':')
+					? RET_NO_PARAM2
+					: RET_NO_PARAM);
+			}
+			/* No argument, just return option char */
+			optind++;
+			return optchar;
+		}
+	}
+	/*
+	 * If getopt finds an option character in argv that was not included in
+	 * options, ... it returns '?' and sets the external variable optopt to
+	 * the actual option character.
+	 */
+	optopt = optchar;
+	return RET_UNKNOWN_OPT;
+}
+
+int getopt(int argc,
+	   char *argv[],
+	   char *opstring)
+{
+	int result = RET_END_OPT_LIST;
+	size_t argn = 0;
+	size_t nlen = strlen(opstring);
+
+	getopt_init();
+	/* If we have an argument left to play with */
+	if ((argc > optind) && (argv != 0)) {
+		const char *arg = (const char *)argv[optind];
+
+		if ((arg != 0) && (arg[0] == '-'))
+			result = getopt_1char(argc, argv, opstring, arg[1]);
+	}
+
+	return result;
+}
+
+/*
+ * Match an argument value against an option name.
+ * Note that we only match over the shorter length of the pair, to allow
+ * for abbreviation or say --match=value
+ * Long option names may be abbreviated if the abbreviation is unique or an
+ * exact match for some defined option.
+ * A long option may take a parameter, of the form --opt=param or --opt param.
+*/
+static
+int optmatch(const char *argval, const char *optname)
+{
+	int result = 0;
+
+	while ((result == 0) && (*optname != 0) && (*argval != 0))
+		result = (*argval++) - (*optname++);
+	return result;
+}
+
+/* Handling for a single long option. */
+static
+int getopt_1long(const int argc,
+		 char *const argv[],
+		 const struct option *const longopts,
+		 const char *const optname,
+		 int *const indexptr)
+{
+	int result = RET_UNKNOWN_OPT;
+	size_t loptn = 0;
+
+	while (longopts[loptn].name != 0) {
+		if (optmatch(optname, longopts[loptn].name) == 0) {
+			/* We found a match. */
+			result = longopts[loptn].val;
+			if (indexptr != 0)
+				*indexptr = loptn;
+			switch (longopts[loptn].has_arg) {
+			case required_argument:
+				if ((optind + 1) >= argc) {
+					/* Missing argument. */
+					optopt = result;
+					return RET_NO_PARAM;
+				}
+				/* Fallthrough to get option value. */
+
+			case optional_argument:
+				if ((argc - optind) > 0) {
+					/* Found argument. */
+					optarg = argv[++optind];
+				}
+				/* Fallthrough to handle flag. */
+
+			case no_argument:
+				optind++;
+				if (longopts[loptn].flag != 0) {
+					*longopts[loptn].flag = result;
+					result = 0;
+				}
+				break;
+
+			}
+			return result;
+		}
+		++loptn;
+	}
+	/*
+	 * If getopt finds an option character in argv that was not included
+	 * in options, ... it returns '?' and sets the external variable
+	 * optopt to the actual option character.
+	 */
+	return RET_UNKNOWN_OPT;
+}
+
+/*
+ * getopt_long gets the next option argument from the argument list
+ * specified by the argv and argc arguments.  Options may be either short
+ * (single letter) as for getopt, or longer names (preceded by --).
+ */
+int getopt_long(int argc,
+		char *argv[],
+		const char *shortopts,
+		const struct option *longopts,
+		int *indexptr)
+{
+	int result = RET_END_OPT_LIST;
+
+	getopt_init();
+	/* If we have an argument left to play with */
+	if ((argc > optind) && (argv != 0)) {
+		const char *arg = argv[optind];
+
+		if ((arg != 0) && (arg[0] == '-')) {
+			if (arg[1] == '-') {
+				/* Looks like a long option. */
+				result = getopt_1long(argc,
+						      argv,
+						      longopts,
+						      &arg[2],
+						      indexptr);
+			} else {
+				result = getopt_1char(argc,
+						      argv,
+						      shortopts,
+						      arg[1]);
+			}
+		}
+	}
+	return result;
+}
+
+/*
+ * getopt_long_only gets the next option argument from the argument list
+ * specified by the argv and argc arguments.  Options may be either short
+ * or long as for getopt_long, but the long names may have a single '-'
+ * prefix too.
+ */
+int getopt_long_only(int argc,
+		     char *argv[],
+		     const char *shortopts,
+		     const struct option *longopts,
+		     int *indexptr)
+{
+	int result = RET_END_OPT_LIST;
+
+	getopt_init();
+	/* If we have an argument left to play with */
+	if ((argc > optind) && (argv != 0)) {
+		const char *arg = argv[optind];
+
+		if ((arg != 0) && (arg[0] == '-')) {
+			if (arg[1] == '-') {
+				/* Looks like a long option. */
+				result = getopt_1long(argc,
+						      argv,
+						      longopts,
+						      &arg[2],
+						      indexptr);
+			} else {
+				result = getopt_1long(argc,
+						      argv,
+						      longopts,
+						      &arg[1],
+						      indexptr);
+				if (result == RET_UNKNOWN_OPT) {
+					result = getopt_1char(argc,
+							      argv,
+							      shortopts,
+							      arg[1]);
+				}
+			}
+		}
+	}
+	return result;
+}
diff --git a/tools/fiptool/win_posix.h b/tools/fiptool/win_posix.h
new file mode 100644
index 0000000..c3fc399
--- /dev/null
+++ b/tools/fiptool/win_posix.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __WINPOSIX_H__
+#	define __WINPOSIX_H__
+
+#	define _CRT_SECURE_NO_WARNINGS
+
+#	include <direct.h>
+#	include <io.h>
+#	include <stdint.h>
+#	include <stdlib.h>
+#	include <string.h>
+#	include <sys/stat.h>
+
+#	include "uuid.h"
+
+
+/* Derive or provide Windows equivalents of Posix/GCC/Unix stuff. */
+#	ifndef PATH_MAX
+#		ifdef MAX_PATH
+#			define PATH_MAX MAX_PATH
+#		else
+#			ifdef _MAX_PATH
+#				define MAX_PATH _MAX_PATH
+#				define PATH_MAX _MAX_PATH
+#			else
+#				define PATH_MAX 260
+#			endif
+#		endif
+#	endif
+
+#	ifndef _CRT_SECURE_NO_WARNINGS
+#		define _CRT_SECURE_NO_WARNINGS 1
+#	endif
+
+/*
+ * Platform specific names.
+ *
+ * Visual Studio deprecates a number of POSIX functions and only provides
+ * ISO C++ compliant alternatives (distinguished by their '_' prefix).
+ * These macros help provide a stopgap for that.
+ */
+
+/* fileno cannot be an inline function, because _fileno is a macro. */
+#	define fileno(fileptr) _fileno(fileptr)
+
+/* _fstat uses the _stat structure, not stat. */
+#	define BLD_PLAT_STAT	_stat
+
+/* Define flag values for _access. */
+#	define F_OK	0
+
+
+/* getopt implementation for Windows: Data. */
+
+/* Legitimate values for option.has_arg. */
+enum has_arg_values {
+	no_argument,		/* No argument value required */
+	required_argument,	/* value must be specified. */
+	optional_argument	/* value may be specified. */
+};
+
+/* Long option table entry for get_opt_long. */
+struct option {
+	/* The name of the option. */
+	const char *name;
+
+	/*
+	 * Indicates whether the option takes an argument.
+	 * Possible values: see has_arg_values above.
+	 */
+	int has_arg;
+
+	/* If not null, when option present, *flag is set to val. */
+	int *flag;
+
+	/*
+	 * The value associated with this option to return
+	 * (and save in *flag when not null)
+	 */
+	int val;
+};
+
+/*
+ * This variable is set by getopt to point at the value of the option
+ * argument, for those options that accept arguments.
+ */
+extern char *optarg;
+
+/*
+ * When this variable is not zero, getopt emits an error message to stderr
+ * if it encounters an unspecified option, or a missing argument.
+ * Otherwise no message is reported.
+ */
+extern const int opterr;	/* const as NOT used in this implementation. */
+
+/*
+ * This variable is set by getopt to the index of the next element of the
+ * argv array to be processed. Once getopt has found all of the option
+ * arguments, you can use this variable to determine where the remaining
+ * non-option arguments begin. The initial value of this variable is 1.
+ */
+extern int optind;
+
+/*
+ * When getopt encounters an unknown option character or an option with a
+ * missing required argument, it stores that option character in this
+ * variable.
+ */
+extern int optopt;
+
+
+/*
+ * Platform specific names.
+ *
+ * Visual Studio deprecates a number of POSIX functions and only provides
+ * ISO C++ compliant alternatives (distinguished by their '_' prefix).
+ * These inline functions provide a stopgap for that.
+ */
+
+inline int access(const char *path, int mode)
+{
+	return _access(path, mode);
+}
+
+inline int chdir(const char *s)
+{
+	return _chdir(s);
+}
+
+inline int fstat(int fd, struct _stat *buffer)
+{
+	return _fstat(fd, buffer);
+}
+
+inline char *strdup(const char *s)
+{
+	return _strdup(s);
+}
+
+/*
+ * getopt implementation for Windows: Functions.
+ *
+ * Windows does not have the getopt family of functions, as it normally
+ * uses '/' instead of '-' as the command line option delimiter.
+ * These functions provide a Windows version that  uses '-', which precludes
+ * using '-' as the intial letter of a program argument.
+ * This is not seen as a problem in the specific instance of fiptool,
+ * and enables existing makefiles to work on a Windows build environment.
+ */
+
+/*
+ * The getopt function gets the next option argument from the argument list
+ * specified by the argv and argc arguments.
+ */
+int getopt(int argc,
+	   char *argv[],
+	   char *options);
+
+/*
+ * getopt_long gets the next option argument from the argument list
+ * specified by the argv and argc arguments.  Options may be either short
+ * (single letter) as for getopt, or longer names (preceded by --).
+ */
+int getopt_long(int argc,
+		char *argv[],
+		const char *shortopts,
+		const struct option *longopts,
+		int *indexptr);
+
+/*
+ * getopt_long_only gets the next option argument from the argument list
+ * specified by the argv and argc arguments.  Options may be either short
+ * or long as for getopt_long, but the long names may have a single '-'
+ * prefix, too.
+ */
+int getopt_long_only(int argc,
+			   char *argv[],
+			   const char *shortopts,
+			   const struct option *longopts,
+			   int *indexptr);
+
+#endif /* __WINPOSIX_H__ */