Merge "fix(bl1): invalidate SP in data cache during secure SMC" into integration
diff --git a/Makefile b/Makefile
index 16c85bc..05d97b0 100644
--- a/Makefile
+++ b/Makefile
@@ -833,6 +833,10 @@
     endif
 endif
 
+ifeq ($(DRTM_SUPPORT),1)
+    $(info DRTM_SUPPORT is an experimental feature)
+endif
+
 ################################################################################
 # Process platform overrideable behaviour
 ################################################################################
@@ -1008,9 +1012,11 @@
         HW_ASSISTED_COHERENCY \
         INVERTED_MEMMAP \
         MEASURED_BOOT \
+        DRTM_SUPPORT \
         NS_TIMER_SWITCH \
         OVERRIDE_LIBC \
         PL011_GENERIC_UART \
+        PLAT_RSS_NOT_SUPPORTED \
         PROGRAMMABLE_RESET_ADDRESS \
         PSCI_EXTENDED_STATE_ID \
         RESET_TO_BL31 \
@@ -1143,9 +1149,11 @@
         HW_ASSISTED_COHERENCY \
         LOG_LEVEL \
         MEASURED_BOOT \
+        DRTM_SUPPORT \
         NS_TIMER_SWITCH \
         PL011_GENERIC_UART \
         PLAT_${PLAT} \
+        PLAT_RSS_NOT_SUPPORTED \
         PROGRAMMABLE_RESET_ADDRESS \
         PSCI_EXTENDED_STATE_ID \
         RAS_EXTENSION \
diff --git a/changelog.yaml b/changelog.yaml
index add81ef..e2184e4 100644
--- a/changelog.yaml
+++ b/changelog.yaml
@@ -645,6 +645,9 @@
                   - title: GIC-600AE
                     scope: gic600ae
 
+          - title: SMMU
+            scope: smmu
+
           - title: TZC
             scope: tzc
 
@@ -984,6 +987,9 @@
       - title: Prerequisites
         scope: prerequisites
 
+      - title: Threat Model
+        scope: threat-model
+
   - title: Build System
     scope: build
 
diff --git a/common/uuid.c b/common/uuid.c
index ac6db50..3e47eb4 100644
--- a/common/uuid.c
+++ b/common/uuid.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -132,3 +132,27 @@
 	return 0;
 }
 
+/*
+ * Helper function to check if 2 UUIDs match.
+ */
+bool uuid_match(uint32_t *uuid1, uint32_t *uuid2)
+{
+	return !memcmp(uuid1, uuid2, sizeof(uint32_t) * 4);
+}
+
+/*
+ * Helper function to copy from one UUID struct to another.
+ */
+void copy_uuid(uint32_t *to_uuid, uint32_t *from_uuid)
+{
+	to_uuid[0] = from_uuid[0];
+	to_uuid[1] = from_uuid[1];
+	to_uuid[2] = from_uuid[2];
+	to_uuid[3] = from_uuid[3];
+}
+
+bool is_null_uuid(uint32_t *uuid)
+{
+	return (uuid[0] == 0 && uuid[1] == 0 &&
+		uuid[2] == 0 && uuid[3] == 0);
+}
diff --git a/docs/about/maintainers.rst b/docs/about/maintainers.rst
index b9b5878..159a3db 100644
--- a/docs/about/maintainers.rst
+++ b/docs/about/maintainers.rst
@@ -75,8 +75,6 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 :|M|: Mark Dykes <mark.dykes@arm.com>
 :|G|: `mardyk01`_
-:|M|: John Powell <john.powell@arm.com>
-:|G|: `john-powell-arm`_
 :|F|: services/std_svc/sdei/
 
 Trusted Boot
@@ -89,8 +87,14 @@
 :|G|: `ManishVB-Arm`_
 :|F|: drivers/auth/
 
-Secure Partition Manager (SPM)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Secure Partition Manager Core (EL3 FF-A SPMC)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+:|M|: Marc Bonnici <marc.bonnici@arm.com>
+:|G|: `marcbonnici`_
+:|F|: services/std_svc/spm/el3_spmc/\*
+
+Secure Partition Manager Dispatcher (SPMD)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 :|M|: Olivier Deprez <olivier.deprez@arm.com>
 :|G|: `odeprez`_
 :|M|: Manish Pandey <manish.pandey2@arm.com>
@@ -99,14 +103,12 @@
 :|G|: `max-shvetsov`_
 :|M|: Joao Alves <Joao.Alves@arm.com>
 :|G|: `J-Alves`_
-:|F|: services/std_svc/spm\*
+:|F|: services/std_svc/spmd/\*
 
 Exception Handling Framework (EHF)
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 :|M|: Manish Badarkhe <manish.badarkhe@arm.com>
 :|G|: `ManishVB-Arm`_
-:|M|: John Powell <john.powell@arm.com>
-:|G|: `john-powell-arm`_
 :|F|: bl31/ehf.c
 
 Realm Management Extension (RME)
@@ -115,8 +117,6 @@
 :|G|: `bipinravi-arm`_
 :|M|: Mark Dykes <mark.dykes@arm.com>
 :|G|: `mardyk01`_
-:|M|: John Powell <john.powell@arm.com>
-:|G|: `john-powell-arm`_
 :|M|: Zelalem Aweke <Zelalem.Aweke@arm.com>
 :|G|: `zelalem-aweke`_
 
@@ -193,16 +193,12 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 :|M|: Joao Alves <Joao.Alves@arm.com>
 :|G|: `J-Alves`_
-:|M|: Jimmy Brisson <Jimmy.Brisson@arm.com>
-:|G|: `theotherjimmy`_
 :|F|: lib/pmf/
 
 Arm CPU libraries
 ^^^^^^^^^^^^^^^^^
 :|M|: Lauren Wehrmeister <Lauren.Wehrmeister@arm.com>
 :|G|: `laurenw-arm`_
-:|M|: John Powell <john.powell@arm.com>
-:|G|: `john-powell-arm`_
 :|F|: lib/cpus/
 
 Reliability Availability Serviceabilty (RAS) framework
@@ -225,8 +221,6 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 :|M|: Zelalem Aweke <Zelalem.Aweke@arm.com>
 :|G|: `zelalem-aweke`_
-:|M|: Jimmy Brisson <Jimmy.Brisson@arm.com>
-:|G|: `theotherjimmy`_
 :|F|: lib/extensions/mpam/
 
 Pointer Authentication (PAuth) and Branch Target Identification (BTI) extensions
@@ -241,22 +235,12 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 :|M|: Zelalem Aweke <Zelalem.Aweke@arm.com>
 :|G|: `zelalem-aweke`_
-:|M|: Jimmy Brisson <Jimmy.Brisson@arm.com>
-:|G|: `theotherjimmy`_
 :|F|: lib/extensions/spe/
 
-Scalable Vector Extension (SVE)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-:|M|: Jimmy Brisson <Jimmy.Brisson@arm.com>
-:|G|: `theotherjimmy`_
-:|F|: lib/extensions/sve/
-
 Standard C library
 ^^^^^^^^^^^^^^^^^^
 :|M|: Alexei Fedorov <Alexei.Fedorov@arm.com>
 :|G|: `AlexeiFedorov`_
-:|M|: John Powell <john.powell@arm.com>
-:|G|: `john-powell-arm`_
 :|F|: lib/libc/
 
 Library At ROM (ROMlib)
@@ -293,6 +277,20 @@
 :|G|: `odeprez`_
 :|F|: drivers/arm/gic/
 
+Message Handling Unit (MHU) driver
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+:|M|: David Vincze <david.vincze@arm.com>
+:|G|: `davidvincze`_
+:|F|: include/drivers/arm/mhu.h
+:|F|: drivers/arm/mhu
+
+Runtime Security Subsystem (RSS) comms driver
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+:|M|: David Vincze <david.vincze@arm.com>
+:|G|: `davidvincze`_
+:|F|: include/drivers/arm/rss_comms.h
+:|F|: drivers/arm/rss
+
 Libfdt wrappers
 ^^^^^^^^^^^^^^^
 :|M|: Madhukar Pappireddy <Madhukar.Pappireddy@arm.com>
@@ -331,6 +329,13 @@
 :|F|: drivers/fwu
 :|F|: include/drivers/fwu
 
+Platform Security Architecture (PSA) APIs
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+:|M|: Sandrine Bailleux <sandrine.bailleux@arm.com>
+:|G|: `sandrine-bailleux-arm`_
+:|F|: include/lib/psa
+:|F|: lib/psa
+
 System Control and Management Interface (SCMI) Server
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 :|M|: Etienne Carriere <etienne.carriere@st.com>
@@ -351,8 +356,6 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 :|M|: Mark Dykes <mark.dykes@arm.com>
 :|G|: `mardyk01`_
-:|M|: John Powell <john.powell@arm.com>
-:|G|: `john-powell-arm`_
 :|F|: lib/gpt_rme
 :|F|: include/lib/gpt_rme
 
@@ -821,6 +824,7 @@
 .. _b49020: https://github.com/b49020
 .. _carlocaione: https://github.com/carlocaione
 .. _danh-arm: https://github.com/danh-arm
+.. _davidvincze: https://github.com/davidvincze
 .. _etienne-lms: https://github.com/etienne-lms
 .. _glneo: https://github.com/glneo
 .. _grandpaul: https://github.com/grandpaul
@@ -868,10 +872,8 @@
 .. _javieralso-arm: https://github.com/javieralso-arm
 .. _laurenw-arm: https://github.com/laurenw-arm
 .. _zelalem-aweke: https://github.com/zelalem-aweke
-.. _theotherjimmy: https://github.com/theotherjimmy
 .. _J-Alves: https://github.com/J-Alves
 .. _madhukar-Arm: https://github.com/madhukar-Arm
-.. _john-powell-arm: https://github.com/john-powell-arm
 .. _raghuncstate: https://github.com/raghuncstate
 .. _CJKay: https://github.com/cjkay
 .. _nmenon: https://github.com/nmenon
@@ -886,5 +888,6 @@
 .. _JiafeiPan: https://github.com/JiafeiPan
 .. _arve-android: https://github.com/arve-android
 .. _marcone: https://github.com/marcone
+.. _marcbonnici: https://github.com/marcbonnici
 
 .. _Project Maintenance Process: https://developer.trustedfirmware.org/w/collaboration/project-maintenance-process/
diff --git a/docs/about/release-information.rst b/docs/about/release-information.rst
index b3553ae..e9eaa80 100644
--- a/docs/about/release-information.rst
+++ b/docs/about/release-information.rst
@@ -48,7 +48,9 @@
 +-----------------+---------------------------+------------------------------+
 | v2.6            | 4th week of Nov '21       | 2nd week of Nov '21          |
 +-----------------+---------------------------+------------------------------+
-| v2.7            | 2nd week of May '22       | 4th week of Apr '22          |
+| v2.7            | 5th week of May '22       | 3rd week of May '22          |
++-----------------+---------------------------+------------------------------+
+| v2.8            | 5th week of Nov '22       | 3rd week of Nov '22          |
 +-----------------+---------------------------+------------------------------+
 
 Removal of Deprecated Interfaces
diff --git a/docs/components/ffa-manifest-binding.rst b/docs/components/ffa-manifest-binding.rst
index df2985c..59996cc 100644
--- a/docs/components/ffa-manifest-binding.rst
+++ b/docs/components/ffa-manifest-binding.rst
@@ -151,6 +151,16 @@
    - List of <u32> tuples, identifying the IDs this partition is acting as
      proxy for.
 
+- power-management-messages
+   - value type: <u32>
+   - Specifies which power management messages a partition subscribes to.
+     A set bit means the partition should be informed of the power event, clear
+     bit - should not be informed of event:
+
+      - Bit[0]: CPU_OFF
+      - Bit[1]: CPU_SUSPEND
+      - Bit[2]: CPU_SUSPEND_RESUME
+
 Memory Regions
 --------------
 
diff --git a/docs/design/cpu-specific-build-macros.rst b/docs/design/cpu-specific-build-macros.rst
index 3029458..fbd2cbc 100644
--- a/docs/design/cpu-specific-build-macros.rst
+++ b/docs/design/cpu-specific-build-macros.rst
@@ -296,6 +296,14 @@
    CPU. This needs to be enabled for revisions r1p0, r1p1, and r1p2. The issue
    is present in r0p0 but there is no workaround. It is still open.
 
+-  ``ERRATA_A78_2376745``: This applies errata 2376745 workaround to Cortex-A78
+   CPU. This needs to be enabled for revisions r0p0, r1p0, r1p1, and r1p2, and
+   it is still open.
+
+-  ``ERRATA_A78_2395406``: This applies errata 2395406 workaround to Cortex-A78
+   CPU. This needs to be enabled for revisions r0p0, r1p0, r1p1, and r1p2, and
+   it is still open.
+
 For Cortex-A78 AE, the following errata build flags are defined :
 
 - ``ERRATA_A78_AE_1941500`` : This applies errata 1941500 workaround to
@@ -314,6 +322,17 @@
   Cortex-A78 AE CPU. This needs to be enabled for revisions r0p0 and r0p1. This
   erratum is still open.
 
+For Cortex-X1 CPU, the following errata build flags are defined:
+
+- ``ERRATA_X1_1821534`` : This applies errata 1821534 workaround to Cortex-X1
+   CPU. This needs to be enabled only for revision <= r1p0 of the CPU.
+
+- ``ERRATA_X1_1688305`` : This applies errata 1688305 workaround to Cortex-X1
+   CPU. This needs to be enabled only for revision <= r1p0 of the CPU.
+
+- ``ERRATA_X1_1827429`` : This applies errata 1827429 workaround to Cortex-X1
+   CPU. This needs to be enabled only for revision <= r1p0 of the CPU.
+
 For Neoverse N1, the following errata build flags are defined :
 
 -  ``ERRATA_N1_1073348``: This applies errata 1073348 workaround to Neoverse-N1
@@ -433,6 +452,10 @@
    Cortex-A710 CPU. This needs to be enabled for revisions r0p0, r1p0 and r2p0
    of the CPU and is fixed in r2p1.
 
+-  ``ERRATA_A710_2008768``: This applies errata 2008768 workaround to
+   Cortex-A710 CPU. This needs to be enabled for revisions r0p0, r1p0 and r2p0
+   of the CPU and is fixed in r2p1.
+
 For Neoverse N2, the following errata build flags are defined :
 
 -  ``ERRATA_N2_2002655``: This applies errata 2002655 workaround to Neoverse-N2
@@ -553,6 +576,12 @@
    r2p0 it is fixed). However, please note that this workaround results in
    increased DSU power consumption on idle.
 
+-  ``ERRATA_DSU_2313941``: This applies errata 2313941 workaround for the
+   affected DSU configurations. This errata applies for those DSUs with
+   revisions r0p0, r1p0, r2p0, r2p1, r3p0, r3p1 and is still open. However,
+   please note that this workaround results in increased DSU power consumption
+   on idle.
+
 CPU Specific optimizations
 --------------------------
 
diff --git a/docs/getting_started/build-options.rst b/docs/getting_started/build-options.rst
index d2cda4d..be50e5e 100644
--- a/docs/getting_started/build-options.rst
+++ b/docs/getting_started/build-options.rst
@@ -649,6 +649,15 @@
 
    This option defaults to 0.
 
+-  ``DRTM_SUPPORT``: Boolean flag to enable support for Dynamic Root of Trust
+   for Measurement (DRTM). This feature has trust dependency on BL31 for taking
+   the measurements and recording them as per `PSA DRTM specification`_. For
+   platforms which use BL2 to load/authenticate BL31 ``TRUSTED_BOARD_BOOT`` can
+   be used and for the platforms which use ``RESET_TO_BL31`` platform owners
+   should have mechanism to authenticate BL31.
+
+   This option defaults to 0.
+
 -  ``NON_TRUSTED_WORLD_KEY``: This option is used when ``GENERATE_COT=1``. It
    specifies the file that contains the Non-Trusted World private key in PEM
    format. If ``SAVE_KEYS=1``, this file name will be used to save the key.
@@ -994,6 +1003,11 @@
   if FEAT_TRF is implemented. This flag can take the values 0 to 2, to align
   with the ``FEATURE_DETECTION`` mechanism. This flag is disabled by default.
 
+- ``PLAT_RSS_NOT_SUPPORTED``: Boolean option to enable the usage of the PSA
+  APIs on platforms that doesn't support RSS (providing Arm CCA HES
+  functionalities). When enabled (``1``), a mocked version of the APIs are used.
+  The default value is 0.
+
 GICv3 driver options
 --------------------
 
@@ -1111,3 +1125,4 @@
 
 .. _DEN0115: https://developer.arm.com/docs/den0115/latest
 .. _PSA FW update specification: https://developer.arm.com/documentation/den0118/a/
+.. _PSA DRTM specification: https://developer.arm.com/documentation/den0113/a
diff --git a/docs/getting_started/prerequisites.rst b/docs/getting_started/prerequisites.rst
index 3a54e69..c625090 100644
--- a/docs/getting_started/prerequisites.rst
+++ b/docs/getting_started/prerequisites.rst
@@ -54,7 +54,7 @@
 The following libraries must be available to build one or more components or
 supporting tools:
 
-- OpenSSL >= 1.0.1
+- OpenSSL >= 3.0
 
    Required to build the cert_create tool.
 
diff --git a/docs/plat/arm/fvp/index.rst b/docs/plat/arm/fvp/index.rst
index 9280f7b..3d10e45 100644
--- a/docs/plat/arm/fvp/index.rst
+++ b/docs/plat/arm/fvp/index.rst
@@ -12,7 +12,7 @@
 (64-bit host machine only).
 
 .. note::
-   The FVP models used are Version 11.16 Build 16, unless otherwise stated.
+   The FVP models used are Version 11.17 Build 21, unless otherwise stated.
 
 -  ``Foundation_Platform``
 -  ``FVP_Base_AEMv8A-AEMv8A-AEMv8A-AEMv8A-CCN502``
@@ -48,12 +48,12 @@
 -  ``FVP_Base_Neoverse-N2x4`` (Version 11.12 build 38)
 -  ``FVP_Base_Neoverse-V1x4``
 -  ``FVP_Base_RevC-2xAEMvA``  (For certain configurations also uses 0.0/6557)
--  ``FVP_CSS_SGI-575``        (Version 11.15/26)
--  ``FVP_Morello``            (Version 0.11/19)
--  ``FVP_RD_E1_edge``         (Version 11.15/26)
--  ``FVP_RD_N1_edge_dual``    (Version 11.15/26)
--  ``FVP_RD_N1_edge``         (Version 11.15/26)
--  ``FVP_RD_V1``              (Version 11.15/26)
+-  ``FVP_CSS_SGI-575``        (Version 11.17/33)
+-  ``FVP_Morello``            (Version 0.11/33)
+-  ``FVP_RD_E1_edge``         (Version 11.17/33)
+-  ``FVP_RD_N1_edge_dual``    (Version 11.17/33)
+-  ``FVP_RD_N1_edge``         (Version 11.17/33)
+-  ``FVP_RD_V1``              (Version 11.17/33)
 -  ``FVP_TC0``
 -  ``FVP_TC1``
 
diff --git a/docs/plat/xilinx-versal.rst b/docs/plat/xilinx-versal.rst
index 91ad6f1..09a6ee2 100644
--- a/docs/plat/xilinx-versal.rst
+++ b/docs/plat/xilinx-versal.rst
@@ -44,7 +44,7 @@
 *   `VERSAL_PLATFORM`: Select the platform. Options:
     -   `versal_virt`	: Versal Virtual platform
     -   `spp_itr6`	: SPP ITR6
-    -   `emu_it6`	: EMU ITR6
+    -   `emu_itr6`	: EMU ITR6
 
 # PLM->TF-A Parameter Passing
 ------------------------------
diff --git a/docs/threat_model/threat_model.rst b/docs/threat_model/threat_model.rst
index 072babc..611e8a1 100644
--- a/docs/threat_model/threat_model.rst
+++ b/docs/threat_model/threat_model.rst
@@ -1,9 +1,10 @@
 Generic Threat Model
 ********************
 
-************************
+************
 Introduction
-************************
+************
+
 This document provides a generic threat model for TF-A firmware.
 
 .. note::
@@ -11,9 +12,10 @@
  This threat model doesn't consider Root and Realm worlds introduced by
  :ref:`Realm Management Extension (RME)`.
 
-************************
+********************
 Target of Evaluation
-************************
+********************
+
 In this threat model, the target of evaluation is the Trusted
 Firmware for A-class Processors (TF-A). This includes the boot ROM (BL1),
 the trusted boot firmware (BL2) and the runtime EL3 firmware (BL31) as
@@ -34,8 +36,12 @@
 - There is no Secure-EL2. We don't consider threats that may come with
   Secure-EL2 software.
 
+- No experimental features are enabled. We do not consider threats that may come
+  from them.
+
 Data Flow Diagram
-======================
+=================
+
 Figure 1 shows a high-level data flow diagram for TF-A. The diagram
 shows a model of the different components of a TF-A-based system and
 their interactions with TF-A. A description of each diagram element
@@ -51,26 +57,26 @@
   +-----------------+--------------------------------------------------------+
   | Diagram Element | Description                                            |
   +=================+========================================================+
-  |       ``DF1``   | | At boot time, images are loaded from non-volatile    |
+  |       DF1       | | At boot time, images are loaded from non-volatile    |
   |                 |   memory and verified by TF-A boot firmware. These     |
   |                 |   images include TF-A BL2 and BL31 images, as well as  |
   |                 |   other secure and non-secure images.                  |
   +-----------------+--------------------------------------------------------+
-  |       ``DF2``   | | TF-A log system framework outputs debug messages     |
+  |       DF2       | | TF-A log system framework outputs debug messages     |
   |                 |   over a UART interface.                               |
   +-----------------+--------------------------------------------------------+
-  |       ``DF3``   | | Debug and trace IP on a platform can allow access    |
+  |       DF3       | | Debug and trace IP on a platform can allow access    |
   |                 |   to registers and memory of TF-A.                     |
   +-----------------+--------------------------------------------------------+
-  |       ``DF4``   | | Secure world software (e.g. trusted OS) interact     |
+  |       DF4       | | Secure world software (e.g. trusted OS) interact     |
   |                 |   with TF-A through SMC call interface and/or shared   |
   |                 |   memory.                                              |
   +-----------------+--------------------------------------------------------+
-  |       ``DF5``   | | Non-secure world software (e.g. rich OS) interact    |
+  |       DF5       | | Non-secure world software (e.g. rich OS) interact    |
   |                 |   with TF-A through SMC call interface and/or shared   |
   |                 |   memory.                                              |
   +-----------------+--------------------------------------------------------+
-  |       ``DF6``   | | This path represents the interaction between TF-A and|
+  |       DF6       | | This path represents the interaction between TF-A and|
   |                 |   various hardware IPs such as TrustZone controller    |
   |                 |   and GIC. At boot time TF-A configures/initializes the|
   |                 |   IPs and interacts with them at runtime through       |
@@ -78,9 +84,10 @@
   +-----------------+--------------------------------------------------------+
 
 
-*********************
+***************
 Threat Analysis
-*********************
+***************
+
 In this section we identify and provide assessment of potential threats to TF-A
 firmware. The threats are identified for each diagram element on the
 data flow diagram above.
@@ -91,7 +98,8 @@
 potential mitigations.
 
 Assets
-==================
+======
+
 We have identified the following assets for TF-A:
 
 .. table:: Table 2: TF-A Assets
@@ -99,21 +107,22 @@
   +--------------------+---------------------------------------------------+
   | Asset              | Description                                       |
   +====================+===================================================+
-  | ``Sensitive Data`` | | These include sensitive data that an attacker   |
+  | Sensitive Data     | | These include sensitive data that an attacker   |
   |                    |   must not be able to tamper with (e.g. the Root  |
   |                    |   of Trust Public Key) or see (e.g. secure logs,  |
   |                    |   debugging information such as crash reports).   |
   +--------------------+---------------------------------------------------+
-  | ``Code Execution`` | | This represents the requirement that the        |
+  | Code Execution     | | This represents the requirement that the        |
   |                    |   platform should run only TF-A code approved by  |
   |                    |   the platform provider.                          |
   +--------------------+---------------------------------------------------+
-  | ``Availability``   | | This represents the requirement that TF-A       |
+  | Availability       | | This represents the requirement that TF-A       |
   |                    |   services should always be available for use.    |
   +--------------------+---------------------------------------------------+
 
 Threat Agents
-=====================
+=============
+
 To understand the attack surface, it is important to identify potential
 attackers, i.e. attack entry points. The following threat agents are
 in scope of this threat model.
@@ -123,16 +132,16 @@
   +-------------------+-------------------------------------------------------+
   | Threat Agent      | Description                                           |
   +===================+=======================================================+
-  |   ``NSCode``      | | Malicious or faulty code running in the Non-secure  |
+  |   NSCode          | | Malicious or faulty code running in the Non-secure  |
   |                   |   world, including NS-EL0 NS-EL1 and NS-EL2 levels    |
   +-------------------+-------------------------------------------------------+
-  |   ``SecCode``     | | Malicious or faulty code running in the secure      |
+  |   SecCode         | | Malicious or faulty code running in the secure      |
   |                   |   world, including S-EL0 and S-EL1 levels             |
   +-------------------+-------------------------------------------------------+
-  |   ``AppDebug``    | | Physical attacker using  debug signals to access    |
+  |   AppDebug        | | Physical attacker using  debug signals to access    |
   |                   |   TF-A resources                                      |
   +-------------------+-------------------------------------------------------+
-  | ``PhysicalAccess``| | Physical attacker having access to external device  |
+  |  PhysicalAccess   | | Physical attacker having access to external device  |
   |                   |   communication bus and to external flash             |
   |                   |   communication bus using common hardware             |
   +-------------------+-------------------------------------------------------+
@@ -145,7 +154,8 @@
   considered out-of-scope.
 
 Threat Types
-========================
+============
+
 In this threat model we categorize threats using the `STRIDE threat
 analysis technique`_. In this technique a threat is categorized as one
 or more of these types: ``Spoofing``, ``Tampering``, ``Repudiation``,
@@ -153,7 +163,8 @@
 ``Elevation of privilege``.
 
 Threat Risk Ratings
-========================
+===================
+
 For each threat identified, a risk rating that ranges
 from *informational* to *critical* is given based on the likelihood of the
 threat occuring if a mitigation is not in place, and the impact of the
@@ -165,7 +176,7 @@
   +-----------------------+-------------------------+---------------------------+
   | **Rating (Score)**    | **Impact**              | **Likelihood**            |
   +=======================+=========================+===========================+
-  | ``Critical (5)``      | | Extreme impact to     | | Threat is almost        |
+  | Critical (5)          | | Extreme impact to     | | Threat is almost        |
   |                       |   entire organization   |   certain to be exploited.|
   |                       |   if exploited.         |                           |
   |                       |                         | | Knowledge of the threat |
@@ -173,17 +184,17 @@
   |                       |                         |   are in the public       |
   |                       |                         |   domain.                 |
   +-----------------------+-------------------------+---------------------------+
-  | ``High (4)``          | | Major impact to entire| | Threat is relatively    |
+  | High (4)              | | Major impact to entire| | Threat is relatively    |
   |                       |   organization or single|   easy to detect and      |
   |                       |   line of business if   |   exploit by an attacker  |
   |                       |   exploited             |   with little skill.      |
   +-----------------------+-------------------------+---------------------------+
-  | ``Medium (3)``        | | Noticeable impact to  | | A knowledgeable insider |
+  | Medium (3)            | | Noticeable impact to  | | A knowledgeable insider |
   |                       |   line of business if   |   or expert attacker could|
   |                       |   exploited.            |   exploit the threat      |
   |                       |                         |   without much difficulty.|
   +-----------------------+-------------------------+---------------------------+
-  | ``Low (2)``           | | Minor damage if       | | Exploiting the threat   |
+  | Low (2)               | | Minor damage if       | | Exploiting the threat   |
   |                       |   exploited or could    |   would require           |
   |                       |   be used in conjunction|   considerable expertise  |
   |                       |   with other            |   and resources           |
@@ -191,7 +202,7 @@
   |                       |   perform a more serious|                           |
   |                       |   attack                |                           |
   +-----------------------+-------------------------+---------------------------+
-  | ``Informational (1)`` | | Poor programming      | | Threat is not likely    |
+  | Informational (1)     | | Poor programming      | | Threat is not likely    |
   |                       |   practice or poor      |   to be exploited on its  |
   |                       |   design decision that  |   own, but may be used to |
   |                       |   may not represent an  |   gain information for    |
@@ -235,14 +246,15 @@
 ``Internet of Things(IoT)``, ``Mobile`` and ``Server``.
 
 Threat Assessment
-============================
+=================
+
 The following threats were identified by applying STRIDE analysis on
 each diagram element of the data flow diagram.
 
 +------------------------+----------------------------------------------------+
 | ID                     | 01                                                 |
 +========================+====================================================+
-| ``Threat``             | | **An attacker can mangle firmware images to      |
+| Threat                 | | **An attacker can mangle firmware images to      |
 |                        |   execute arbitrary code**                         |
 |                        |                                                    |
 |                        | | Some TF-A images are loaded from external        |
@@ -252,26 +264,26 @@
 |                        |   updating mechanism to modify the non-volatile    |
 |                        |   images to execute arbitrary code.                |
 +------------------------+----------------------------------------------------+
-| ``Diagram Elements``   | DF1, DF4, DF5                                      |
+| Diagram Elements       | DF1, DF4, DF5                                      |
 +------------------------+----------------------------------------------------+
-| ``Affected TF-A        | BL2, BL31                                          |
-| Components``           |                                                    |
+| Affected TF-A          | BL2, BL31                                          |
+| Components             |                                                    |
 +------------------------+----------------------------------------------------+
-| ``Assets``             | Code Execution                                     |
+| Assets                 | Code Execution                                     |
 +------------------------+----------------------------------------------------+
-| ``Threat Agent``       | PhysicalAccess, NSCode, SecCode                    |
+| Threat Agent           | PhysicalAccess, NSCode, SecCode                    |
 +------------------------+----------------------------------------------------+
-| ``Threat Type``        | Tampering, Elevation of Privilege                  |
+| Threat Type            | Tampering, Elevation of Privilege                  |
 +------------------------+------------------+-----------------+---------------+
-| ``Application``        | ``Server``       | ``IoT``         | ``Mobile``    |
+| Application            | Server           | IoT             | Mobile        |
 +------------------------+------------------+-----------------+---------------+
-| ``Impact``             | Critical (5)     | Critical (5)    | Critical (5)  |
+| Impact                 | Critical (5)     | Critical (5)    | Critical (5)  |
 +------------------------+------------------+-----------------+---------------+
-| ``Likelihood``         | Critical (5)     | Critical (5)    | Critical (5)  |
+| Likelihood             | Critical (5)     | Critical (5)    | Critical (5)  |
 +------------------------+------------------+-----------------+---------------+
-| ``Total Risk Rating``  | Critical (25)    | Critical (25)   | Critical (25) |
+| Total Risk Rating      | Critical (25)    | Critical (25)   | Critical (25) |
 +------------------------+------------------+-----------------+---------------+
-| ``Mitigations``        | | TF-A implements the `Trusted Board Boot (TBB)`_  |
+| Mitigations            | | TF-A implements the `Trusted Board Boot (TBB)`_  |
 |                        |   feature which prevents malicious firmware from   |
 |                        |   running on the platform by authenticating all    |
 |                        |   firmware images. In addition to this, the TF-A   |
@@ -283,33 +295,33 @@
 +------------------------+----------------------------------------------------+
 | ID                     | 02                                                 |
 +========================+====================================================+
-| ``Threat``             | | **An attacker may attempt to boot outdated,      |
+| Threat                 | | **An attacker may attempt to boot outdated,      |
 |                        |   potentially vulnerable firmware image**          |
 |                        |                                                    |
 |                        | | When updating firmware, an attacker may attempt  |
 |                        |   to rollback to an older version that has unfixed |
 |                        |   vulnerabilities.                                 |
 +------------------------+----------------------------------------------------+
-| ``Diagram Elements``   | DF1, DF4, DF5                                      |
+| Diagram Elements       | DF1, DF4, DF5                                      |
 +------------------------+----------------------------------------------------+
-| ``Affected TF-A        | BL2, BL31                                          |
-| Components``           |                                                    |
+| Affected TF-A          | BL2, BL31                                          |
+| Components             |                                                    |
 +------------------------+----------------------------------------------------+
-| ``Assets``             | Code Execution                                     |
+| Assets                 | Code Execution                                     |
 +------------------------+----------------------------------------------------+
-| ``Threat Agent``       | PhysicalAccess, NSCode, SecCode                    |
+| Threat Agent           | PhysicalAccess, NSCode, SecCode                    |
 +------------------------+----------------------------------------------------+
-| ``Threat Type``        | Tampering                                          |
+| Threat Type            | Tampering                                          |
 +------------------------+------------------+-----------------+---------------+
-| ``Application``        | ``Server``       | ``IoT``         | ``Mobile``    |
+| Application            | Server           | IoT             | Mobile        |
 +------------------------+------------------+-----------------+---------------+
-| ``Impact``             | Critical (5)     | Critical (5)    | Critical (5)  |
+| Impact                 | Critical (5)     | Critical (5)    | Critical (5)  |
 +------------------------+------------------+-----------------+---------------+
-| ``Likelihood``         | Critical (5)     | Critical (5)    | Critical (5)  |
+| Likelihood             | Critical (5)     | Critical (5)    | Critical (5)  |
 +------------------------+------------------+-----------------+---------------+
-| ``Total Risk Rating``  | Critical (25)    | Critical (25)   | Critical (25) |
+| Total Risk Rating      | Critical (25)    | Critical (25)   | Critical (25) |
 +------------------------+------------------+-----------------+---------------+
-| ``Mitigations``        | | TF-A supports anti-rollback protection using     |
+| Mitigations            | | TF-A supports anti-rollback protection using     |
 |                        |   non-volatile counters (NV counters) as required  |
 |                        |   by `TBBR-Client specification`_. After a firmware|
 |                        |   image is validated, the image revision number    |
@@ -324,7 +336,7 @@
 +------------------------+-------------------------------------------------------+
 | ID                     | 03                                                    |
 +========================+=======================================================+
-| ``Threat``             | |  **An attacker can use Time-of-Check-Time-of-Use    |
+| Threat                 | |  **An attacker can use Time-of-Check-Time-of-Use    |
 |                        |   (TOCTOU) attack to bypass image authentication      |
 |                        |   during the boot process**                           |
 |                        |                                                       |
@@ -336,33 +348,33 @@
 |                        |   after the integrity and authentication check has    |
 |                        |   been performed.                                     |
 +------------------------+-------------------------------------------------------+
-| ``Diagram Elements``   | DF1                                                   |
+| Diagram Elements       | DF1                                                   |
 +------------------------+-------------------------------------------------------+
-| ``Affected TF-A        | BL1, BL2                                              |
-| Components``           |                                                       |
+| Affected TF-A          | BL1, BL2                                              |
+| Components             |                                                       |
 +------------------------+-------------------------------------------------------+
-| ``Assets``             | Code Execution, Sensitive Data                        |
+| Assets                 | Code Execution, Sensitive Data                        |
 +------------------------+-------------------------------------------------------+
-| ``Threat Agent``       | PhysicalAccess                                        |
+| Threat Agent           | PhysicalAccess                                        |
 +------------------------+-------------------------------------------------------+
-| ``Threat Type``        | Elevation of Privilege                                |
+| Threat Type            | Elevation of Privilege                                |
 +------------------------+---------------------+-----------------+---------------+
-| ``Application``        | ``Server``          | ``IoT``         | ``Mobile``    |
+| Application            | Server              | IoT             | Mobile        |
 +------------------------+---------------------+-----------------+---------------+
-| ``Impact``             | N/A                 | Critical (5)    | Critical (5)  |
+| Impact                 | N/A                 | Critical (5)    | Critical (5)  |
 +------------------------+---------------------+-----------------+---------------+
-| ``Likelihood``         | N/A                 | Medium (3)      | Medium (3)    |
+| Likelihood             | N/A                 | Medium (3)      | Medium (3)    |
 +------------------------+---------------------+-----------------+---------------+
-| ``Total Risk Rating``  | N/A                 | High (15)       | High (15)     |
+| Total Risk Rating      | N/A                 | High (15)       | High (15)     |
 +------------------------+---------------------+-----------------+---------------+
-| ``Mitigations``        | | TF-A boot firmware copies image to on-chip          |
+| Mitigations            | | TF-A boot firmware copies image to on-chip          |
 |                        |   memory before authenticating an image.              |
 +------------------------+-------------------------------------------------------+
 
 +------------------------+-------------------------------------------------------+
 | ID                     | 04                                                    |
 +========================+=======================================================+
-| ``Threat``             | | **An attacker with physical access can execute      |
+| Threat                 | | **An attacker with physical access can execute      |
 |                        |   arbitrary image by bypassing the signature          |
 |                        |   verification stage using glitching techniques**     |
 |                        |                                                       |
@@ -381,26 +393,26 @@
 |                        |   points where the image is validated against the     |
 |                        |   signature.                                          |
 +------------------------+-------------------------------------------------------+
-| ``Diagram Elements``   | DF1                                                   |
+| Diagram Elements       | DF1                                                   |
 +------------------------+-------------------------------------------------------+
-| ``Affected TF-A        | BL1, BL2                                              |
-| Components``           |                                                       |
+| Affected TF-A          | BL1, BL2                                              |
+| Components             |                                                       |
 +------------------------+-------------------------------------------------------+
-| ``Assets``             | Code Execution                                        |
+| Assets                 | Code Execution                                        |
 +------------------------+-------------------------------------------------------+
-| ``Threat Agent``       | PhysicalAccess                                        |
+| Threat Agent           | PhysicalAccess                                        |
 +------------------------+-------------------------------------------------------+
-| ``Threat Type``        | Tampering, Elevation of Privilege                     |
+| Threat Type            | Tampering, Elevation of Privilege                     |
 +------------------------+---------------------+-----------------+---------------+
-| ``Application``        | ``Server``          | ``IoT``         | ``Mobile``    |
+| Application            | Server              | IoT             | Mobile        |
 +------------------------+---------------------+-----------------+---------------+
-| ``Impact``             | N/A                 | Critical (5)    | Critical (5)  |
+| Impact                 | N/A                 | Critical (5)    | Critical (5)  |
 +------------------------+---------------------+-----------------+---------------+
-| ``Likelihood``         | N/A                 | Medium (3)      | Medium (3)    |
+| Likelihood             | N/A                 | Medium (3)      | Medium (3)    |
 +------------------------+---------------------+-----------------+---------------+
-| ``Total Risk Rating``  | N/A                 | High (15)       | High (15)     |
+| Total Risk Rating      | N/A                 | High (15)       | High (15)     |
 +------------------------+---------------------+-----------------+---------------+
-| ``Mitigations``        | | The most effective mitigation is adding glitching   |
+| Mitigations            | | The most effective mitigation is adding glitching   |
 |                        |   detection and mitigation circuit at the hardware    |
 |                        |   level. However, software techniques,                |
 |                        |   such as adding redundant checks when performing     |
@@ -413,7 +425,7 @@
 +------------------------+---------------------------------------------------+
 | ID                     | 05                                                |
 +========================+===================================================+
-| ``Threat``             | | **Information leak via UART logs such as        |
+| Threat                 | | **Information leak via UART logs such as        |
 |                        |   crashes**                                       |
 |                        |                                                   |
 |                        | | During the development stages of software it is |
@@ -426,26 +438,26 @@
 |                        |   attacker to develop a working exploit if left   |
 |                        |   in the production version.                      |
 +------------------------+---------------------------------------------------+
-| ``Diagram Elements``   | DF2                                               |
+| Diagram Elements       | DF2                                               |
 +------------------------+---------------------------------------------------+
-| ``Affected TF-A        | BL1, BL2, BL31                                    |
-| Components``           |                                                   |
+| Affected TF-A          | BL1, BL2, BL31                                    |
+| Components             |                                                   |
 +------------------------+---------------------------------------------------+
-| ``Assets``             | Sensitive Data                                    |
+| Assets                 | Sensitive Data                                    |
 +------------------------+---------------------------------------------------+
-| ``Threat Agent``       | AppDebug                                          |
+| Threat Agent           | AppDebug                                          |
 +------------------------+---------------------------------------------------+
-| ``Threat Type``        | Information Disclosure                            |
+| Threat Type            | Information Disclosure                            |
 +------------------------+------------------+----------------+---------------+
-| ``Application``        | ``Server``       | ``IoT``        | ``Mobile``    |
+| Application            | Server           | IoT            | Mobile        |
 +------------------------+------------------+----------------+---------------+
-| ``Impact``             | N/A              | Low (2)        | Low (2)       |
+| Impact                 | N/A              | Low (2)        | Low (2)       |
 +------------------------+------------------+----------------+---------------+
-| ``Likelihood``         | N/A              | High (4)       | High (4)      |
+| Likelihood             | N/A              | High (4)       | High (4)      |
 +------------------------+------------------+----------------+---------------+
-| ``Total Risk Rating``  | N/A              | Medium (8)     | Medium (8)    |
+| Total Risk Rating      | N/A              | Medium (8)     | Medium (8)    |
 +------------------------+------------------+----------------+---------------+
-| ``Mitigations``        | | In TF-A, crash reporting is only enabled for    |
+| Mitigations            | | In TF-A, crash reporting is only enabled for    |
 |                        |   debug builds by default. Alternatively, the log |
 |                        |   level can be tuned at build time (from verbose  |
 |                        |   to no output at all), independently of the      |
@@ -455,7 +467,7 @@
 +------------------------+----------------------------------------------------+
 | ID                     | 06                                                 |
 +========================+====================================================+
-| ``Threat``             | | **An attacker can read sensitive data and        |
+| Threat                 | | **An attacker can read sensitive data and        |
 |                        |   execute arbitrary code through the external      |
 |                        |   debug and trace interface**                      |
 |                        |                                                    |
@@ -468,27 +480,27 @@
 |                        |   attacker to read sensitive data and execute      |
 |                        |   arbitrary code.                                  |
 +------------------------+----------------------------------------------------+
-| ``Diagram Elements``   | DF3                                                |
+| Diagram Elements       | DF3                                                |
 +------------------------+----------------------------------------------------+
-| ``Affected TF-A        | BL1, BL2, BL31                                     |
-| Components``           |                                                    |
+| Affected TF-A          | BL1, BL2, BL31                                     |
+| Components             |                                                    |
 +------------------------+----------------------------------------------------+
-| ``Assets``             | Code Execution, Sensitive Data                     |
+| Assets                 | Code Execution, Sensitive Data                     |
 +------------------------+----------------------------------------------------+
-| ``Threat Agent``       | AppDebug                                           |
+| Threat Agent           | AppDebug                                           |
 +------------------------+----------------------------------------------------+
-| ``Threat Type``        | Tampering, Information Disclosure,                 |
+| Threat Type            | Tampering, Information Disclosure,                 |
 |                        | Elevation of privilege                             |
 +------------------------+------------------+---------------+-----------------+
-| ``Application``        | ``Server``       | ``IoT``       | ``Mobile``      |
+| Application            | Server           | IoT           | Mobile          |
 +------------------------+------------------+---------------+-----------------+
-| ``Impact``             | N/A              | High (4)      | High (4)        |
+| Impact                 | N/A              | High (4)      | High (4)        |
 +------------------------+------------------+---------------+-----------------+
-| ``Likelihood``         | N/A              | Critical (5)  | Critical (5)    |
+| Likelihood             | N/A              | Critical (5)  | Critical (5)    |
 +------------------------+------------------+---------------+-----------------+
-| ``Total Risk Rating``  | N/A              | Critical (20) | Critical (20)   |
+| Total Risk Rating      | N/A              | Critical (20) | Critical (20)   |
 +------------------------+------------------+---------------+-----------------+
-| ``Mitigations``        | | Configuration of debug and trace capabilities is |
+| Mitigations            | | Configuration of debug and trace capabilities is |
 |                        |   platform specific. Therefore, platforms must     |
 |                        |   disable the debug and trace capability for       |
 |                        |   production releases or enable proper debug       |
@@ -498,7 +510,7 @@
 +------------------------+------------------------------------------------------+
 | ID                     | 07                                                   |
 +========================+======================================================+
-| ``Threat``             | | **An attacker can perform a denial-of-service      |
+| Threat                 | | **An attacker can perform a denial-of-service      |
 |                        |   attack by using a broken SMC call that causes the  |
 |                        |   system to reboot or enter into unknown state.**    |
 |                        |                                                      |
@@ -508,26 +520,26 @@
 |                        |   by calling unimplemented SMC call or by passing    |
 |                        |   invalid arguments.                                 |
 +------------------------+------------------------------------------------------+
-| ``Diagram Elements``   | DF4, DF5                                             |
+| Diagram Elements       | DF4, DF5                                             |
 +------------------------+------------------------------------------------------+
-| ``Affected TF-A        | BL31                                                 |
-| Components``           |                                                      |
+| Affected TF-A          | BL31                                                 |
+| Components             |                                                      |
 +------------------------+------------------------------------------------------+
-| ``Assets``             | Availability                                         |
+| Assets                 | Availability                                         |
 +------------------------+------------------------------------------------------+
-| ``Threat Agent``       | NSCode, SecCode                                      |
+| Threat Agent           | NSCode, SecCode                                      |
 +------------------------+------------------------------------------------------+
-| ``Threat Type``        | Denial of Service                                    |
+| Threat Type            | Denial of Service                                    |
 +------------------------+-------------------+----------------+-----------------+
-| ``Application``        | ``Server``        | ``IoT``        | ``Mobile``      |
+| Application            | Server            | IoT            | Mobile          |
 +------------------------+-------------------+----------------+-----------------+
-| ``Impact``             | Medium (3)        | Medium (3)     | Medium (3)      |
+| Impact                 | Medium (3)        | Medium (3)     | Medium (3)      |
 +------------------------+-------------------+----------------+-----------------+
-| ``Likelihood``         | High (4)          | High (4)       | High (4)        |
+| Likelihood             | High (4)          | High (4)       | High (4)        |
 +------------------------+-------------------+----------------+-----------------+
-| ``Total Risk Rating``  | High (12)         | High (12)      | High (12)       |
+| Total Risk Rating      | High (12)         | High (12)      | High (12)       |
 +------------------------+-------------------+----------------+-----------------+
-| ``Mitigations``        | | The generic TF-A code validates SMC function ids   |
+| Mitigations            | | The generic TF-A code validates SMC function ids   |
 |                        |   and arguments before using them.                   |
 |                        |   Platforms that implement SiP services must also    |
 |                        |   validate SMC call arguments.                       |
@@ -536,20 +548,15 @@
 +------------------------+------------------------------------------------------+
 | ID                     | 08                                                   |
 +========================+======================================================+
-| ``Threat``             | | **Memory corruption due to memory overflows and    |
+| Threat                 | | **Memory corruption due to memory overflows and    |
 |                        |   lack of boundary checking when accessing resources |
 |                        |   could allow an attacker to execute arbitrary code, |
 |                        |   modify some state variable to change the normal    |
 |                        |   flow of the program, or leak sensitive             |
 |                        |   information**                                      |
 |                        |                                                      |
-|                        | | Like in other software, the Trusted Firmware has   |
-|                        |   multiple points where memory corruption security   |
-|                        |   errors can arise. Memory corruption is a dangerous |
-|                        |   security issue since it could allow an attacker    |
-|                        |   to execute arbitrary code, modify some state       |
-|                        |   variable to change the normal flow of the program, |
-|                        |   or leak sensitive information.                     |
+|                        | | Like in other software, TF-A has multiple points   |
+|                        |   where memory corruption security errors can arise. |
 |                        |                                                      |
 |                        | | Some of the errors include integer overflow,       |
 |                        |   buffer overflow, incorrect array boundary checks,  |
@@ -558,27 +565,27 @@
 |                        |   validations might also result in these kinds of    |
 |                        |   errors in release builds.                          |
 +------------------------+------------------------------------------------------+
-| ``Diagram Elements``   | DF4, DF5                                             |
+| Diagram Elements       | DF4, DF5                                             |
 +------------------------+------------------------------------------------------+
-| ``Affected TF-A        | BL1, BL2, BL31                                       |
-| Components``           |                                                      |
+| Affected TF-A          | BL1, BL2, BL31                                       |
+| Components             |                                                      |
 +------------------------+------------------------------------------------------+
-| ``Assets``             | Code Execution, Sensitive Data                       |
+| Assets                 | Code Execution, Sensitive Data                       |
 +------------------------+------------------------------------------------------+
-| ``Threat Agent``       | NSCode, SecCode                                      |
+| Threat Agent           | NSCode, SecCode                                      |
 +------------------------+------------------------------------------------------+
-| ``Threat Type``        | Tampering, Information Disclosure,                   |
+| Threat Type            | Tampering, Information Disclosure,                   |
 |                        | Elevation of Privilege                               |
 +------------------------+-------------------+-----------------+----------------+
-| ``Application``        | ``Server``        | ``IoT``         | ``Mobile``     |
+| Application            | Server            | IoT             | Mobile         |
 +------------------------+-------------------+-----------------+----------------+
-| ``Impact``             | Critical (5)      | Critical (5)    | Critical (5)   |
+| Impact                 | Critical (5)      | Critical (5)    | Critical (5)   |
 +------------------------+-------------------+-----------------+----------------+
-| ``Likelihood``         | Medium (3         | Medium (3)      | Medium (3)     |
+| Likelihood             | Medium (3         | Medium (3)      | Medium (3)     |
 +------------------------+-------------------+-----------------+----------------+
-| ``Total Risk Rating``  | High (15)         | High (15)       | High (15)      |
+| Total Risk Rating      | High (15)         | High (15)       | High (15)      |
 +------------------------+-------------------+-----------------+----------------+
-| ``Mitigations``        | | TF-A uses a combination of manual code reviews and |
+| Mitigations            | | TF-A uses a combination of manual code reviews and |
 |                        |   automated program analysis and testing to detect   |
 |                        |   and fix memory corruption bugs. All TF-A code      |
 |                        |   including platform code go through manual code     |
@@ -607,7 +614,7 @@
 +------------------------+------------------------------------------------------+
 | ID                     | 09                                                   |
 +========================+======================================================+
-| ``Threat``             | | **Improperly handled SMC calls can leak register   |
+| Threat                 | | **Improperly handled SMC calls can leak register   |
 |                        |   contents**                                         |
 |                        |                                                      |
 |                        | | When switching between secure and non-secure       |
@@ -615,26 +622,26 @@
 |                        |   register contents of other normal world clients    |
 |                        |   can be leaked.                                     |
 +------------------------+------------------------------------------------------+
-| ``Diagram Elements``   | DF5                                                  |
+| Diagram Elements       | DF5                                                  |
 +------------------------+------------------------------------------------------+
-| ``Affected TF-A        | BL31                                                 |
-| Components``           |                                                      |
+| Affected TF-A          | BL31                                                 |
+| Components             |                                                      |
 +------------------------+------------------------------------------------------+
-| ``Assets``             | Sensitive Data                                       |
+| Assets                 | Sensitive Data                                       |
 +------------------------+------------------------------------------------------+
-| ``Threat Agent``       | NSCode                                               |
+| Threat Agent           | NSCode                                               |
 +------------------------+------------------------------------------------------+
-| ``Threat Type``        | Information Disclosure                               |
+| Threat Type            | Information Disclosure                               |
 +------------------------+-------------------+----------------+-----------------+
-| ``Application``        | ``Server``        | ``IoT``        | ``Mobile``      |
+| Application            | Server            | IoT            | Mobile          |
 +------------------------+-------------------+----------------+-----------------+
-| ``Impact``             | Medium (3)        | Medium (3)     | Medium (3)      |
+| Impact                 | Medium (3)        | Medium (3)     | Medium (3)      |
 +------------------------+-------------------+----------------+-----------------+
-| ``Likelihood``         | High (4)          | High (4)       | High (4)        |
+| Likelihood             | High (4)          | High (4)       | High (4)        |
 +------------------------+-------------------+----------------+-----------------+
-| ``Total Risk Rating``  | High (12)         | High (12)      | High (12)       |
+| Total Risk Rating      | High (12)         | High (12)      | High (12)       |
 +------------------------+-------------------+----------------+-----------------+
-| ``Mitigations``        | | TF-A saves and restores registers                  |
+| Mitigations            | | TF-A saves and restores registers                  |
 |                        |   by default when switching contexts. Build options  |
 |                        |   are also provided to save/restore additional       |
 |                        |   registers such as floating-point registers.        |
@@ -643,7 +650,7 @@
 +------------------------+-----------------------------------------------------+
 | ID                     | 10                                                  |
 +========================+=====================================================+
-| ``Threat``             | | **SMC calls can leak sensitive information from   |
+| Threat                 | | **SMC calls can leak sensitive information from   |
 |                        |   TF-A memory via microarchitectural side channels**|
 |                        |                                                     |
 |                        | | Microarchitectural side-channel attacks such as   |
@@ -652,26 +659,26 @@
 |                        |   use this kind of attack to leak sensitive         |
 |                        |   data from TF-A memory.                            |
 +------------------------+-----------------------------------------------------+
-| ``Diagram Elements``   | DF4, DF5                                            |
+| Diagram Elements       | DF4, DF5                                            |
 +------------------------+-----------------------------------------------------+
-| ``Affected TF-A        | BL31                                                |
-| Components``           |                                                     |
+| Affected TF-A          | BL31                                                |
+| Components             |                                                     |
 +------------------------+-----------------------------------------------------+
-| ``Assets``             | Sensitive Data                                      |
+| Assets                 | Sensitive Data                                      |
 +------------------------+-----------------------------------------------------+
-| ``Threat Agent``       | SecCode, NSCode                                     |
+| Threat Agent           | SecCode, NSCode                                     |
 +------------------------+-----------------------------------------------------+
-| ``Threat Type``        | Information Disclosure                              |
+| Threat Type            | Information Disclosure                              |
 +------------------------+-------------------+----------------+----------------+
-| ``Application``        | ``Server``        | ``IoT``        | ``Mobile``     |
+| Application            | Server            | IoT            | Mobile         |
 +------------------------+-------------------+----------------+----------------+
-| ``Impact``             | Medium (3)        | Medium (3)     | Medium (3)     |
+| Impact                 | Medium (3)        | Medium (3)     | Medium (3)     |
 +------------------------+-------------------+----------------+----------------+
-| ``Likelihood``         | Medium (3)        | Medium (3)     | Medium (3)     |
+| Likelihood             | Medium (3)        | Medium (3)     | Medium (3)     |
 +------------------------+-------------------+----------------+----------------+
-| ``Total Risk Rating``  | Medium (9)        | Medium (9)     | Medium (9)     |
+| Total Risk Rating      | Medium (9)        | Medium (9)     | Medium (9)     |
 +------------------------+-------------------+----------------+----------------+
-| ``Mitigations``        | | TF-A implements software mitigations for Spectre  |
+| Mitigations            | | TF-A implements software mitigations for Spectre  |
 |                        |   type attacks as recommended by `Cache Speculation |
 |                        |   Side-channels`_ for the generic code. SiPs should |
 |                        |   implement similar mitigations for code that is    |
@@ -681,7 +688,7 @@
 +------------------------+----------------------------------------------------+
 | ID                     | 11                                                 |
 +========================+====================================================+
-| ``Threat``             | | **Misconfiguration of the Memory Management Unit |
+| Threat                 | | **Misconfiguration of the Memory Management Unit |
 |                        |   (MMU) may allow a normal world software to       |
 |                        |   access sensitive data or execute arbitrary       |
 |                        |   code**                                           |
@@ -692,26 +699,26 @@
 |                        |   execute code if the proper security mechanisms   |
 |                        |   are not in place.                                |
 +------------------------+----------------------------------------------------+
-| ``Diagram Elements``   | DF5, DF6                                           |
+| Diagram Elements       | DF5, DF6                                           |
 +------------------------+----------------------------------------------------+
-| ``Affected TF-A        | BL1, BL2, BL31                                     |
-| Components``           |                                                    |
+| Affected TF-A          | BL1, BL2, BL31                                     |
+| Components             |                                                    |
 +------------------------+----------------------------------------------------+
-| ``Assets``             | Sensitive Data, Code execution                     |
+| Assets                 | Sensitive Data, Code execution                     |
 +------------------------+----------------------------------------------------+
-| ``Threat Agent``       | NSCode                                             |
+| Threat Agent           | NSCode                                             |
 +------------------------+----------------------------------------------------+
-| ``Threat Type``        | Information Disclosure, Elevation of Privilege     |
+| Threat Type            | Information Disclosure, Elevation of Privilege     |
 +------------------------+-----------------+-----------------+----------------+
-| ``Application``        | ``Server``      | ``IoT``         | ``Mobile``     |
+| Application            | Server          | IoT             | Mobile         |
 +------------------------+-----------------+-----------------+----------------+
-| ``Impact``             | Critical (5)    | Critical (5)    | Critical (5)   |
+| Impact                 | Critical (5)    | Critical (5)    | Critical (5)   |
 +------------------------+-----------------+-----------------+----------------+
-| ``Likelihood``         | High (4)        | High (4)        | High (4)       |
+| Likelihood             | High (4)        | High (4)        | High (4)       |
 +------------------------+-----------------+-----------------+----------------+
-| ``Total Risk Rating``  | Critical (20)   | Critical (20)   | Critical (20)  |
+| Total Risk Rating      | Critical (20)   | Critical (20)   | Critical (20)  |
 +------------------------+-----------------+-----------------+----------------+
-| ``Mitigations``        | | In TF-A, configuration of the MMU is done        |
+| Mitigations            | | In TF-A, configuration of the MMU is done        |
 |                        |   through a translation tables library. The        |
 |                        |   library provides APIs to define memory regions   |
 |                        |   and assign attributes including memory types and |
@@ -729,7 +736,7 @@
 +------------------------+-----------------------------------------------------+
 | ID                     | 12                                                  |
 +========================+=====================================================+
-| ``Threat``             | | **Incorrect configuration of Performance Monitor  |
+| Threat                 | | **Incorrect configuration of Performance Monitor  |
 |                        |   Unit (PMU) counters can allow an attacker to      |
 |                        |   mount side-channel attacks using information      |
 |                        |   exposed by the counters**                         |
@@ -741,24 +748,24 @@
 |                        |   software) to potentially  carry out               |
 |                        |   side-channel timing attacks against TF-A.         |
 +------------------------+-----------------------------------------------------+
-| ``Diagram Elements``   | DF5, DF6                                            |
+| Diagram Elements       | DF5, DF6                                            |
 +------------------------+-----------------------------------------------------+
-| ``Affected TF-A        | BL31                                                |
-| Components``           |                                                     |
+| Affected TF-A          | BL31                                                |
+| Components             |                                                     |
 +------------------------+-----------------------------------------------------+
-| ``Assets``             | Sensitive Data                                      |
+| Assets                 | Sensitive Data                                      |
 +------------------------+-----------------------------------------------------+
-| ``Threat Agent``       | NSCode                                              |
+| Threat Agent           | NSCode                                              |
 +------------------------+-----------------------------------------------------+
-| ``Threat Type``        | Information Disclosure                              |
+| Threat Type            | Information Disclosure                              |
 +------------------------+-------------------+----------------+----------------+
-| ``Impact``             | Medium (3)        | Medium (3)     | Medium (3)     |
+| Impact                 | Medium (3)        | Medium (3)     | Medium (3)     |
 +------------------------+-------------------+----------------+----------------+
-| ``Likelihood``         | Low (2)           | Low (2)        | Low (2)        |
+| Likelihood             | Low (2)           | Low (2)        | Low (2)        |
 +------------------------+-------------------+----------------+----------------+
-| ``Total Risk Rating``  | Medium (6)        | Medium (6)     | Medium (6)     |
+| Total Risk Rating      | Medium (6)        | Medium (6)     | Medium (6)     |
 +------------------------+-------------------+----------------+----------------+
-| ``Mitigations``        | | TF-A follows mitigation strategies as described   |
+| Mitigations            | | TF-A follows mitigation strategies as described   |
 |                        |   in `Secure Development Guidelines`_. General      |
 |                        |   events and cycle counting in the Secure world is  |
 |                        |   prohibited by default when applicable. However,   |
@@ -774,7 +781,7 @@
 
 --------------
 
-*Copyright (c) 2021, Arm Limited. All rights reserved.*
+*Copyright (c) 2021-2022, Arm Limited. All rights reserved.*
 
 
 .. _STRIDE threat analysis technique: https://docs.microsoft.com/en-us/azure/security/develop/threat-modeling-tool-threats#stride-model
diff --git a/drivers/arm/mhu/mhu_v2_x.c b/drivers/arm/mhu/mhu_v2_x.c
new file mode 100644
index 0000000..3103b92
--- /dev/null
+++ b/drivers/arm/mhu/mhu_v2_x.c
@@ -0,0 +1,379 @@
+/*
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "mhu_v2_x.h"
+
+#define MHU_V2_X_MAX_CHANNELS		124
+#define MHU_V2_1_MAX_CHCOMB_INT		4
+#define ENABLE				0x1
+#define DISABLE				0x0
+#define CLEAR_INTR			0x1
+#define CH_PER_CH_COMB			0x20
+#define SEND_FRAME(p_mhu)		((struct mhu_v2_x_send_frame_t *)p_mhu)
+#define RECV_FRAME(p_mhu)		((struct mhu_v2_x_recv_frame_t *)p_mhu)
+
+#define MHU_MAJOR_REV_V2		0x1u
+#define MHU_MINOR_REV_2_0		0x0u
+#define MHU_MINOR_REV_2_1		0x1u
+
+struct mhu_v2_x_send_ch_window_t {
+	/* Offset: 0x00 (R/ ) Channel Status */
+	volatile uint32_t ch_st;
+	/* Offset: 0x04 (R/ ) Reserved */
+	volatile uint32_t reserved_0;
+	/* Offset: 0x08 (R/ ) Reserved */
+	volatile uint32_t reserved_1;
+	/* Offset: 0x0C ( /W) Channel Set */
+	volatile uint32_t ch_set;
+	/* Offset: 0x10 (R/ ) Channel Interrupt Status (Reserved in 2.0) */
+	volatile uint32_t ch_int_st;
+	/* Offset: 0x14 ( /W) Channel Interrupt Clear  (Reserved in 2.0) */
+	volatile uint32_t ch_int_clr;
+	/* Offset: 0x18 (R/W) Channel Interrupt Enable (Reserved in 2.0) */
+	volatile uint32_t ch_int_en;
+	/* Offset: 0x1C (R/ ) Reserved */
+	volatile uint32_t reserved_2;
+};
+
+struct mhu_v2_x_send_frame_t {
+	/* Offset: 0x000 ( / ) Sender Channel Window 0 -123 */
+	struct mhu_v2_x_send_ch_window_t send_ch_window[MHU_V2_X_MAX_CHANNELS];
+	/* Offset: 0xF80 (R/ ) Message Handling Unit Configuration */
+	volatile uint32_t mhu_cfg;
+	/* Offset: 0xF84 (R/W) Response Configuration */
+	volatile uint32_t resp_cfg;
+	/* Offset: 0xF88 (R/W) Access Request */
+	volatile uint32_t access_request;
+	/* Offset: 0xF8C (R/ ) Access Ready */
+	volatile uint32_t access_ready;
+	/* Offset: 0xF90 (R/ ) Interrupt Status */
+	volatile uint32_t int_st;
+	/* Offset: 0xF94 ( /W) Interrupt Clear */
+	volatile uint32_t int_clr;
+	/* Offset: 0xF98 (R/W) Interrupt Enable */
+	volatile uint32_t int_en;
+	/* Offset: 0xF9C (R/ ) Reserved */
+	volatile uint32_t reserved_0;
+	/* Offset: 0xFA0 (R/W) Channel Combined IRQ Stat (Reserved in 2.0) */
+	volatile uint32_t ch_comb_int_st[MHU_V2_1_MAX_CHCOMB_INT];
+	/* Offset: 0xFC4 (R/ ) Reserved */
+	volatile uint32_t reserved_1[6];
+	/* Offset: 0xFC8 (R/ ) Implementer Identification Register */
+	volatile uint32_t iidr;
+	/* Offset: 0xFCC (R/ ) Architecture Identification Register */
+	volatile uint32_t aidr;
+	/* Offset: 0xFD0 (R/ )  */
+	volatile uint32_t pid_1[4];
+	/* Offset: 0xFE0 (R/ )  */
+	volatile uint32_t pid_0[4];
+	/* Offset: 0xFF0 (R/ )  */
+	volatile uint32_t cid[4];
+};
+
+struct mhu_v2_x_rec_ch_window_t {
+	/* Offset: 0x00 (R/ ) Channel Status */
+	volatile uint32_t ch_st;
+	/* Offset: 0x04 (R/ ) Channel Status Masked */
+	volatile uint32_t ch_st_msk;
+	/* Offset: 0x08 ( /W) Channel Clear */
+	volatile uint32_t ch_clr;
+	/* Offset: 0x0C (R/ ) Reserved */
+	volatile uint32_t reserved_0;
+	/* Offset: 0x10 (R/ ) Channel Mask Status */
+	volatile uint32_t ch_msk_st;
+	/* Offset: 0x14 ( /W) Channel Mask Set */
+	volatile uint32_t ch_msk_set;
+	/* Offset: 0x18 ( /W) Channel Mask Clear */
+	volatile uint32_t ch_msk_clr;
+	/* Offset: 0x1C (R/ ) Reserved */
+	volatile uint32_t reserved_1;
+};
+
+struct mhu_v2_x_recv_frame_t {
+	/* Offset: 0x000 ( / ) Receiver Channel Window 0 -123 */
+	struct mhu_v2_x_rec_ch_window_t rec_ch_window[MHU_V2_X_MAX_CHANNELS];
+	/* Offset: 0xF80 (R/ ) Message Handling Unit Configuration */
+	volatile uint32_t mhu_cfg;
+	/* Offset: 0xF84 (R/ ) Reserved */
+	volatile uint32_t reserved_0[3];
+	/* Offset: 0xF90 (R/ ) Interrupt Status (Reserved in 2.0) */
+	volatile uint32_t int_st;
+	/* Offset: 0xF94 (R/ ) Interrupt Clear  (Reserved in 2.0) */
+	volatile uint32_t int_clr;
+	/* Offset: 0xF98 (R/W) Interrupt Enable (Reserved in 2.0) */
+	volatile uint32_t int_en;
+	/* Offset: 0xF9C (R/ ) Reserved  */
+	volatile uint32_t reserved_1;
+	/* Offset: 0xFA0 (R/ ) Channel Combined IRQ Stat (Reserved in 2.0) */
+	volatile uint32_t ch_comb_int_st[MHU_V2_1_MAX_CHCOMB_INT];
+	/* Offset: 0xFB0 (R/ ) Reserved */
+	volatile uint32_t reserved_2[6];
+	/* Offset: 0xFC8 (R/ ) Implementer Identification Register */
+	volatile uint32_t iidr;
+	/* Offset: 0xFCC (R/ ) Architecture Identification Register */
+	volatile uint32_t aidr;
+	/* Offset: 0xFD0 (R/ )  */
+	volatile uint32_t pid_1[4];
+	/* Offset: 0xFE0 (R/ )  */
+	volatile uint32_t pid_0[4];
+	/* Offset: 0xFF0 (R/ )  */
+	volatile uint32_t cid[4];
+};
+
+union mhu_v2_x_frame {
+	struct mhu_v2_x_send_frame_t send_frame;
+	struct mhu_v2_x_recv_frame_t recv_frame;
+};
+
+enum mhu_v2_x_error_t mhu_v2_x_driver_init(struct mhu_v2_x_dev_t *dev,
+	 enum mhu_v2_x_supported_revisions rev)
+{
+	uint32_t AIDR = 0;
+	union mhu_v2_x_frame *p_mhu;
+
+	assert(dev != NULL);
+
+	p_mhu = (union mhu_v2_x_frame *)dev->base;
+
+	if (dev->is_initialized) {
+		return MHU_V_2_X_ERR_ALREADY_INIT;
+	}
+
+	if (rev == MHU_REV_READ_FROM_HW) {
+		/* Read revision from HW */
+		if (dev->frame == MHU_V2_X_RECEIVER_FRAME) {
+			AIDR = p_mhu->recv_frame.aidr;
+		} else {
+			AIDR = p_mhu->send_frame.aidr;
+		}
+
+		/* Get bits 7:4 to read major revision */
+		if (((AIDR >> 4) & 0b1111) != MHU_MAJOR_REV_V2) {
+			/* Unsupported MHU version */
+			return MHU_V_2_X_ERR_UNSUPPORTED_VERSION;
+		} /* No need to save major version, driver only supports MHUv2 */
+
+		/* Get bits 3:0 to read minor revision */
+		dev->subversion = AIDR & 0b1111;
+
+		if (dev->subversion != MHU_MINOR_REV_2_0 &&
+			dev->subversion != MHU_MINOR_REV_2_1) {
+			/* Unsupported subversion */
+			return MHU_V_2_X_ERR_UNSUPPORTED_VERSION;
+		}
+	} else {
+		/* Revisions were provided by caller */
+		if (rev == MHU_REV_2_0) {
+			dev->subversion = MHU_MINOR_REV_2_0;
+		} else if (rev == MHU_REV_2_1) {
+			dev->subversion = MHU_MINOR_REV_2_1;
+		} else {
+			/* Unsupported subversion */
+			return MHU_V_2_X_ERR_UNSUPPORTED_VERSION;
+		} /* No need to save major version, driver only supports MHUv2 */
+	}
+
+	dev->is_initialized = true;
+
+	return MHU_V_2_X_ERR_NONE;
+}
+
+uint32_t mhu_v2_x_get_num_channel_implemented(const struct mhu_v2_x_dev_t *dev)
+{
+	union mhu_v2_x_frame *p_mhu;
+
+	assert(dev != NULL);
+
+	p_mhu = (union mhu_v2_x_frame *)dev->base;
+
+	if (!(dev->is_initialized)) {
+		return MHU_V_2_X_ERR_NOT_INIT;
+	}
+
+	if (dev->frame == MHU_V2_X_SENDER_FRAME) {
+		return (SEND_FRAME(p_mhu))->mhu_cfg;
+	} else {
+		assert(dev->frame == MHU_V2_X_RECEIVER_FRAME);
+		return (RECV_FRAME(p_mhu))->mhu_cfg;
+	}
+}
+
+enum mhu_v2_x_error_t mhu_v2_x_channel_send(const struct mhu_v2_x_dev_t *dev,
+	 uint32_t channel, uint32_t val)
+{
+	union mhu_v2_x_frame *p_mhu;
+
+	assert(dev != NULL);
+
+	p_mhu = (union mhu_v2_x_frame *)dev->base;
+
+	if (!(dev->is_initialized)) {
+		return MHU_V_2_X_ERR_NOT_INIT;
+	}
+
+	if (dev->frame == MHU_V2_X_SENDER_FRAME) {
+		(SEND_FRAME(p_mhu))->send_ch_window[channel].ch_set = val;
+		return MHU_V_2_X_ERR_NONE;
+	} else {
+		return MHU_V_2_X_ERR_INVALID_ARG;
+	}
+}
+
+enum mhu_v2_x_error_t mhu_v2_x_channel_poll(const struct mhu_v2_x_dev_t *dev,
+	 uint32_t channel, uint32_t *value)
+{
+	union mhu_v2_x_frame *p_mhu;
+
+	assert(dev != NULL);
+
+	p_mhu = (union mhu_v2_x_frame *)dev->base;
+
+	if (!(dev->is_initialized)) {
+		return MHU_V_2_X_ERR_NOT_INIT;
+	}
+
+	if (dev->frame == MHU_V2_X_SENDER_FRAME) {
+		*value = (SEND_FRAME(p_mhu))->send_ch_window[channel].ch_st;
+		return MHU_V_2_X_ERR_NONE;
+	} else {
+		return MHU_V_2_X_ERR_INVALID_ARG;
+	}
+}
+
+enum mhu_v2_x_error_t mhu_v2_x_channel_clear(const struct mhu_v2_x_dev_t *dev,
+	 uint32_t channel)
+{
+	union mhu_v2_x_frame *p_mhu;
+
+	assert(dev != NULL);
+
+	p_mhu = (union mhu_v2_x_frame *)dev->base;
+
+	if (!(dev->is_initialized)) {
+		return MHU_V_2_X_ERR_NOT_INIT;
+	}
+
+	if (dev->frame == MHU_V2_X_RECEIVER_FRAME) {
+		(RECV_FRAME(p_mhu))->rec_ch_window[channel].ch_clr = UINT32_MAX;
+		return MHU_V_2_X_ERR_NONE;
+	} else {
+		return MHU_V_2_X_ERR_INVALID_ARG;
+	}
+}
+
+enum mhu_v2_x_error_t mhu_v2_x_channel_receive(
+	 const struct mhu_v2_x_dev_t *dev, uint32_t channel, uint32_t *value)
+{
+	union mhu_v2_x_frame *p_mhu;
+
+	assert(dev != NULL);
+
+	p_mhu = (union mhu_v2_x_frame *)dev->base;
+
+	if (!(dev->is_initialized)) {
+		return MHU_V_2_X_ERR_NOT_INIT;
+	}
+
+	if (dev->frame == MHU_V2_X_RECEIVER_FRAME) {
+		*value = (RECV_FRAME(p_mhu))->rec_ch_window[channel].ch_st;
+		return MHU_V_2_X_ERR_NONE;
+	} else {
+		return MHU_V_2_X_ERR_INVALID_ARG;
+	}
+}
+
+enum mhu_v2_x_error_t mhu_v2_x_channel_mask_set(
+	 const struct mhu_v2_x_dev_t *dev, uint32_t channel, uint32_t mask)
+{
+	union mhu_v2_x_frame *p_mhu;
+
+	assert(dev != NULL);
+
+	p_mhu = (union mhu_v2_x_frame *)dev->base;
+
+	if (!(dev->is_initialized)) {
+		return MHU_V_2_X_ERR_NOT_INIT;
+	}
+
+	if (dev->frame == MHU_V2_X_RECEIVER_FRAME) {
+		(RECV_FRAME(p_mhu))->rec_ch_window[channel].ch_msk_set = mask;
+		return MHU_V_2_X_ERR_NONE;
+	} else {
+		return MHU_V_2_X_ERR_INVALID_ARG;
+	}
+}
+
+enum mhu_v2_x_error_t mhu_v2_x_channel_mask_clear(
+	 const struct mhu_v2_x_dev_t *dev, uint32_t channel, uint32_t mask)
+{
+	union mhu_v2_x_frame *p_mhu;
+
+	assert(dev != NULL);
+
+	p_mhu = (union mhu_v2_x_frame *)dev->base;
+
+	if (!(dev->is_initialized)) {
+		return MHU_V_2_X_ERR_NOT_INIT;
+	}
+
+	if (dev->frame == MHU_V2_X_RECEIVER_FRAME) {
+		(RECV_FRAME(p_mhu))->rec_ch_window[channel].ch_msk_clr = mask;
+		return MHU_V_2_X_ERR_NONE;
+	} else {
+		return MHU_V_2_X_ERR_INVALID_ARG;
+	}
+}
+enum mhu_v2_x_error_t mhu_v2_x_initiate_transfer(
+	 const struct mhu_v2_x_dev_t *dev)
+{
+	union mhu_v2_x_frame *p_mhu;
+
+	assert(dev != NULL);
+
+	p_mhu = (union mhu_v2_x_frame *)dev->base;
+
+	if (!(dev->is_initialized)) {
+		return MHU_V_2_X_ERR_NOT_INIT;
+	}
+
+	if (dev->frame != MHU_V2_X_SENDER_FRAME) {
+		return MHU_V_2_X_ERR_INVALID_ARG;
+	}
+
+	(SEND_FRAME(p_mhu))->access_request = ENABLE;
+
+	while (!((SEND_FRAME(p_mhu))->access_ready)) {
+		/* Wait in a loop for access ready signal to be high */
+		;
+	}
+
+	return MHU_V_2_X_ERR_NONE;
+}
+
+enum mhu_v2_x_error_t mhu_v2_x_close_transfer(const struct mhu_v2_x_dev_t *dev)
+{
+	union mhu_v2_x_frame *p_mhu;
+
+	assert(dev != NULL);
+
+	p_mhu = (union mhu_v2_x_frame *)dev->base;
+
+	if (!(dev->is_initialized)) {
+		return MHU_V_2_X_ERR_NOT_INIT;
+	}
+
+	if (dev->frame != MHU_V2_X_SENDER_FRAME) {
+		return MHU_V_2_X_ERR_INVALID_ARG;
+	}
+
+	(SEND_FRAME(p_mhu))->access_request = DISABLE;
+
+	return MHU_V_2_X_ERR_NONE;
+}
diff --git a/drivers/arm/mhu/mhu_v2_x.h b/drivers/arm/mhu/mhu_v2_x.h
new file mode 100644
index 0000000..10247d2
--- /dev/null
+++ b/drivers/arm/mhu/mhu_v2_x.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MHU_V2_X_H
+#define MHU_V2_X_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#define MHU_2_X_INTR_NR2R_OFF		(0x0u)
+#define MHU_2_X_INTR_R2NR_OFF		(0x1u)
+#define MHU_2_1_INTR_CHCOMB_OFF		(0x2u)
+
+#define MHU_2_X_INTR_NR2R_MASK		(0x1u << MHU_2_X_INTR_NR2R_OFF)
+#define MHU_2_X_INTR_R2NR_MASK		(0x1u << MHU_2_X_INTR_R2NR_OFF)
+#define MHU_2_1_INTR_CHCOMB_MASK	(0x1u << MHU_2_1_INTR_CHCOMB_OFF)
+
+enum mhu_v2_x_frame_t {
+	MHU_V2_X_SENDER_FRAME   = 0x0u,
+	MHU_V2_X_RECEIVER_FRAME = 0x1u,
+};
+
+enum mhu_v2_x_supported_revisions {
+	MHU_REV_READ_FROM_HW = 0,
+	MHU_REV_2_0,
+	MHU_REV_2_1,
+};
+
+struct mhu_v2_x_dev_t {
+	uintptr_t base;
+	enum mhu_v2_x_frame_t frame;
+	uint32_t subversion;	/*!< Hardware subversion: v2.X */
+	bool is_initialized;	/*!< Indicates if the MHU driver
+				 *   is initialized and enabled
+				 */
+};
+
+/**
+ * MHU v2 error enumeration types.
+ */
+enum mhu_v2_x_error_t {
+	MHU_V_2_X_ERR_NONE			=  0,
+	MHU_V_2_X_ERR_NOT_INIT			= -1,
+	MHU_V_2_X_ERR_ALREADY_INIT		= -2,
+	MHU_V_2_X_ERR_UNSUPPORTED_VERSION	= -3,
+	MHU_V_2_X_ERR_INVALID_ARG		= -4,
+	MHU_V_2_X_ERR_GENERAL			= -5
+};
+
+/**
+ * Initializes the driver.
+ *
+ * dev		MHU device struct mhu_v2_x_dev_t.
+ * rev		MHU revision (if can't be identified from HW).
+ *
+ * Reads the MHU hardware version.
+ *
+ * Returns mhu_v2_x_error_t error code.
+ *
+ * MHU revision only has to be specified when versions can't be read
+ * from HW (ARCH_MAJOR_REV reg reads as 0x0).
+ *
+ * This function doesn't check if dev is NULL.
+ */
+enum mhu_v2_x_error_t mhu_v2_x_driver_init(struct mhu_v2_x_dev_t *dev,
+	enum mhu_v2_x_supported_revisions rev);
+
+/**
+ * Returns the number of channels implemented.
+ *
+ * dev		MHU device struct mhu_v2_x_dev_t.
+ *
+ * This function doesn't check if dev is NULL.
+ */
+uint32_t mhu_v2_x_get_num_channel_implemented(
+		const struct mhu_v2_x_dev_t *dev);
+
+/**
+ * Sends the value over a channel.
+ *
+ * dev		MHU device struct mhu_v2_x_dev_t.
+ * channel	Channel to send the value over.
+ * val		Value to send.
+ *
+ * Sends the value over a channel.
+ *
+ * Returns mhu_v2_x_error_t error code.
+ *
+ * This function doesn't check if dev is NULL.
+ * This function doesn't check if channel is implemented.
+ */
+enum mhu_v2_x_error_t mhu_v2_x_channel_send(const struct mhu_v2_x_dev_t *dev,
+	uint32_t channel, uint32_t val);
+
+/**
+ * Polls sender channel status.
+ *
+ * dev		MHU device struct mhu_v2_x_dev_t.
+ * channel	Channel to poll the status of.
+ * value	Pointer to variable that will store the value.
+ *
+ * Polls sender channel status.
+ *
+ * Returns mhu_v2_x_error_t error code.
+ *
+ * This function doesn't check if dev is NULL.
+ * This function doesn't check if channel is implemented.
+ */
+enum mhu_v2_x_error_t mhu_v2_x_channel_poll(const struct mhu_v2_x_dev_t *dev,
+	uint32_t channel, uint32_t *value);
+
+/**
+ * Clears the channel after the value is send over it.
+ *
+ * dev		MHU device struct mhu_v2_x_dev_t.
+ * channel	Channel to clear.
+ *
+ * Clears the channel after the value is send over it.
+ *
+ * Returns mhu_v2_x_error_t error code..
+ *
+ * This function doesn't check if dev is NULL.
+ * This function doesn't check if channel is implemented.
+ */
+enum mhu_v2_x_error_t mhu_v2_x_channel_clear(const struct mhu_v2_x_dev_t *dev,
+	uint32_t channel);
+
+/**
+ * Receives the value over a channel.
+ *
+ * dev		MHU device struct mhu_v2_x_dev_t.
+ * channel	Channel to receive the value from.
+ * value	Pointer to variable that will store the value.
+ *
+ * Receives the value over a channel.
+ *
+ * Returns mhu_v2_x_error_t error code.
+ *
+ * This function doesn't check if dev is NULL.
+ * This function doesn't check if channel is implemented.
+ */
+enum mhu_v2_x_error_t mhu_v2_x_channel_receive(
+	const struct mhu_v2_x_dev_t *dev, uint32_t channel, uint32_t *value);
+
+/**
+ * Sets bits in the Channel Mask.
+ *
+ * dev		MHU device struct mhu_v2_x_dev_t.
+ * channel	Which channel's mask to set.
+ * mask		Mask to be set over a receiver frame.
+ *
+ * Sets bits in the Channel Mask.
+ *
+ * Returns mhu_v2_x_error_t error code..
+ *
+ * This function doesn't check if dev is NULL.
+ *  This function doesn't check if channel is implemented.
+ */
+enum mhu_v2_x_error_t mhu_v2_x_channel_mask_set(
+	const struct mhu_v2_x_dev_t *dev, uint32_t channel, uint32_t mask);
+
+/**
+ * Clears bits in the Channel Mask.
+ *
+ * dev	MHU device struct mhu_v2_x_dev_t.
+ * channel	Which channel's mask to clear.
+ * mask	Mask to be clear over a receiver frame.
+ *
+ * Clears bits in the Channel Mask.
+ *
+ * Returns mhu_v2_x_error_t error code.
+ *
+ * This function doesn't check if dev is NULL.
+ *  This function doesn't check if channel is implemented.
+ */
+enum mhu_v2_x_error_t mhu_v2_x_channel_mask_clear(
+	const struct mhu_v2_x_dev_t *dev, uint32_t channel, uint32_t mask);
+
+/**
+ * Initiates a MHU transfer with the handshake signals.
+ *
+ * dev		MHU device struct mhu_v2_x_dev_t.
+ *
+ * Initiates a MHU transfer with the handshake signals in a blocking mode.
+ *
+ * Returns mhu_v2_x_error_t error code.
+ *
+ * This function doesn't check if dev is NULL.
+ */
+enum mhu_v2_x_error_t mhu_v2_x_initiate_transfer(
+	const struct mhu_v2_x_dev_t *dev);
+
+/**
+ * Closes a MHU transfer with the handshake signals.
+ *
+ * dev		MHU device struct mhu_v2_x_dev_t.
+ *
+ * Closes a MHU transfer with the handshake signals in a blocking mode.
+ *
+ * Returns mhu_v2_x_error_t error code.
+ *
+ * This function doesn't check if dev is NULL.
+ */
+enum mhu_v2_x_error_t mhu_v2_x_close_transfer(
+	const struct mhu_v2_x_dev_t *dev);
+
+#endif /* MHU_V2_X_H */
diff --git a/drivers/arm/mhu/mhu_wrapper_v2_x.c b/drivers/arm/mhu/mhu_wrapper_v2_x.c
new file mode 100644
index 0000000..d8b7cfd
--- /dev/null
+++ b/drivers/arm/mhu/mhu_wrapper_v2_x.c
@@ -0,0 +1,302 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <drivers/arm/mhu.h>
+
+#include "mhu_v2_x.h"
+
+#define MHU_NOTIFY_VALUE	(1234u)
+
+/*
+ * MHU devices for host:
+ * HSE: Host to Secure Enclave (sender device)
+ * SEH: Secure Enclave to Host (receiver device)
+ */
+struct mhu_v2_x_dev_t MHU1_HSE_DEV = {0, MHU_V2_X_SENDER_FRAME};
+struct mhu_v2_x_dev_t MHU1_SEH_DEV = {0, MHU_V2_X_RECEIVER_FRAME};
+
+static enum mhu_error_t error_mapping_to_mhu_error_t(enum mhu_v2_x_error_t err)
+{
+	switch (err) {
+	case MHU_V_2_X_ERR_NONE:
+		return MHU_ERR_NONE;
+	case MHU_V_2_X_ERR_NOT_INIT:
+		return MHU_ERR_NOT_INIT;
+	case MHU_V_2_X_ERR_ALREADY_INIT:
+		return MHU_ERR_ALREADY_INIT;
+	case MHU_V_2_X_ERR_UNSUPPORTED_VERSION:
+		return MHU_ERR_UNSUPPORTED_VERSION;
+	case MHU_V_2_X_ERR_INVALID_ARG:
+		return MHU_ERR_INVALID_ARG;
+	case MHU_V_2_X_ERR_GENERAL:
+		return MHU_ERR_GENERAL;
+	default:
+		return MHU_ERR_GENERAL;
+	}
+}
+
+static enum mhu_v2_x_error_t signal_and_wait_for_clear(void)
+{
+	enum mhu_v2_x_error_t err;
+	struct mhu_v2_x_dev_t *dev = &MHU1_HSE_DEV;
+	uint32_t val = MHU_NOTIFY_VALUE;
+	/* Using the last channel for notifications */
+	uint32_t channel_notify = mhu_v2_x_get_num_channel_implemented(dev) - 1;
+
+	err = mhu_v2_x_channel_send(dev, channel_notify, val);
+	if (err != MHU_V_2_X_ERR_NONE) {
+		return err;
+	}
+
+	do {
+		err = mhu_v2_x_channel_poll(dev, channel_notify, &val);
+		if (err != MHU_V_2_X_ERR_NONE) {
+			break;
+		}
+	} while (val != 0);
+
+	return err;
+}
+
+static enum mhu_v2_x_error_t wait_for_signal(void)
+{
+	enum mhu_v2_x_error_t err;
+	struct mhu_v2_x_dev_t *dev = &MHU1_SEH_DEV;
+	uint32_t val = 0;
+	/* Using the last channel for notifications */
+	uint32_t channel_notify = mhu_v2_x_get_num_channel_implemented(dev) - 1;
+
+	do {
+		err = mhu_v2_x_channel_receive(dev, channel_notify, &val);
+		if (err != MHU_V_2_X_ERR_NONE) {
+			break;
+		}
+	} while (val != MHU_NOTIFY_VALUE);
+
+	return err;
+}
+
+static enum mhu_v2_x_error_t clear_and_wait_for_next_signal(void)
+{
+	enum mhu_v2_x_error_t err;
+	struct mhu_v2_x_dev_t *dev = &MHU1_SEH_DEV;
+	uint32_t num_channels = mhu_v2_x_get_num_channel_implemented(dev);
+	uint32_t i;
+
+	/* Clear all channels */
+	for (i = 0; i < num_channels; ++i) {
+		err = mhu_v2_x_channel_clear(dev, i);
+		if (err != MHU_V_2_X_ERR_NONE) {
+			return err;
+		}
+	}
+
+	return wait_for_signal();
+}
+
+enum mhu_error_t mhu_init_sender(uintptr_t mhu_sender_base)
+{
+	enum mhu_v2_x_error_t err;
+
+	assert(mhu_sender_base != (uintptr_t)NULL);
+
+	MHU1_HSE_DEV.base = mhu_sender_base;
+
+	err = mhu_v2_x_driver_init(&MHU1_HSE_DEV, MHU_REV_READ_FROM_HW);
+	return error_mapping_to_mhu_error_t(err);
+}
+
+enum mhu_error_t mhu_init_receiver(uintptr_t mhu_receiver_base)
+{
+	enum mhu_v2_x_error_t err;
+	uint32_t num_channels, i;
+
+	assert(mhu_receiver_base != (uintptr_t)NULL);
+
+	MHU1_SEH_DEV.base = mhu_receiver_base;
+
+	err = mhu_v2_x_driver_init(&MHU1_SEH_DEV, MHU_REV_READ_FROM_HW);
+	if (err != MHU_V_2_X_ERR_NONE) {
+		return error_mapping_to_mhu_error_t(err);
+	}
+
+	num_channels = mhu_v2_x_get_num_channel_implemented(&MHU1_SEH_DEV);
+
+	/* Mask all channels except the notifying channel */
+	for (i = 0; i < (num_channels - 1); ++i) {
+		err = mhu_v2_x_channel_mask_set(&MHU1_SEH_DEV, i, UINT32_MAX);
+		if (err != MHU_V_2_X_ERR_NONE) {
+			return error_mapping_to_mhu_error_t(err);
+		}
+	}
+
+	/* The last channel is used for notifications */
+	err = mhu_v2_x_channel_mask_clear(
+		&MHU1_SEH_DEV, (num_channels - 1), UINT32_MAX);
+	return error_mapping_to_mhu_error_t(err);
+}
+
+/*
+ * Public function. See mhu.h
+ *
+ * The basic steps of transferring a message:
+ * 1.	Initiate MHU transfer.
+ * 2.	Send over the size of the payload on Channel 1. It is the very first
+ *	4 Bytes of the transfer. Continue with Channel 2.
+ * 3.	Send over the payload, writing the channels one after the other
+ *	(4 Bytes each). The last available channel is reserved for controlling
+ *	the transfer.
+ *	When the last channel is reached or no more data is left, STOP.
+ * 4.	Notify the receiver using the last channel and wait for acknowledge.
+ *	If there is still data to transfer, jump to step 3. Otherwise, proceed.
+ * 5.	Close MHU transfer.
+ *
+ */
+enum mhu_error_t mhu_send_data(const uint8_t *send_buffer, size_t size)
+{
+	enum mhu_v2_x_error_t err;
+	struct mhu_v2_x_dev_t *dev = &MHU1_HSE_DEV;
+	uint32_t num_channels = mhu_v2_x_get_num_channel_implemented(dev);
+	uint32_t chan = 0;
+	uint32_t i;
+	uint32_t *p;
+
+	/* For simplicity, require the send_buffer to be 4-byte aligned */
+	if ((uintptr_t)send_buffer & 0x3U) {
+		return MHU_ERR_INVALID_ARG;
+	}
+
+	err = mhu_v2_x_initiate_transfer(dev);
+	if (err != MHU_V_2_X_ERR_NONE) {
+		return error_mapping_to_mhu_error_t(err);
+	}
+
+	/* First send over the size of the actual message */
+	err = mhu_v2_x_channel_send(dev, chan, (uint32_t)size);
+	if (err != MHU_V_2_X_ERR_NONE) {
+		return error_mapping_to_mhu_error_t(err);
+	}
+	chan++;
+
+	p = (uint32_t *)send_buffer;
+	for (i = 0; i < size; i += 4) {
+		err = mhu_v2_x_channel_send(dev, chan, *p++);
+		if (err != MHU_V_2_X_ERR_NONE) {
+			return error_mapping_to_mhu_error_t(err);
+		}
+		if (++chan == (num_channels - 1)) {
+			err = signal_and_wait_for_clear();
+			if (err != MHU_V_2_X_ERR_NONE) {
+				return error_mapping_to_mhu_error_t(err);
+			}
+			chan = 0;
+		}
+	}
+
+	/* Signal the end of transfer.
+	 *   It's not required to send a signal when the message was
+	 *   perfectly-aligned (num_channels - 1 channels were used in the last
+	 *   round) preventing it from signaling twice at the end of transfer.
+	 */
+	if (chan != 0) {
+		err = signal_and_wait_for_clear();
+		if (err != MHU_V_2_X_ERR_NONE) {
+			return error_mapping_to_mhu_error_t(err);
+		}
+	}
+
+	err = mhu_v2_x_close_transfer(dev);
+	return error_mapping_to_mhu_error_t(err);
+}
+
+/*
+ * Public function. See mhu.h
+ *
+ * The basic steps of receiving a message:
+ * 1.	Read the size of the payload from Channel 1. It is the very first
+ *	4 Bytes of the transfer. Continue with Channel 2.
+ * 2.	Receive the payload, read the channels one after the other
+ *	(4 Bytes each). The last available channel is reserved for controlling
+ *	the transfer.
+ *	When the last channel is reached clear all the channels
+ *	(also sending an acknowledge on the last channel).
+ * 3.	If there is still data to receive wait for a notification on the last
+ *	channel and jump to step 2 as soon as it arrived. Otherwise, proceed.
+ * 4.	End of transfer.
+ *
+ */
+enum mhu_error_t mhu_receive_data(uint8_t *receive_buffer, size_t *size)
+{
+	enum mhu_v2_x_error_t err;
+	struct mhu_v2_x_dev_t *dev = &MHU1_SEH_DEV;
+	uint32_t num_channels = mhu_v2_x_get_num_channel_implemented(dev);
+	uint32_t chan = 0;
+	uint32_t message_len;
+	uint32_t i;
+	uint32_t *p;
+
+	/* For simplicity, require:
+	 * - the receive_buffer to be 4-byte aligned,
+	 * - the buffer size to be a multiple of 4.
+	 */
+	if (((uintptr_t)receive_buffer & 0x3U) || (*size & 0x3U)) {
+		return MHU_ERR_INVALID_ARG;
+	}
+
+	/* Busy wait for incoming reply */
+	err = wait_for_signal();
+	if (err != MHU_V_2_X_ERR_NONE) {
+		return error_mapping_to_mhu_error_t(err);
+	}
+
+	/* The first word is the length of the actual message */
+	err = mhu_v2_x_channel_receive(dev, chan, &message_len);
+	if (err != MHU_V_2_X_ERR_NONE) {
+		return error_mapping_to_mhu_error_t(err);
+	}
+	chan++;
+
+	if (message_len > *size) {
+		/* Message buffer too small */
+		*size = message_len;
+		return MHU_ERR_BUFFER_TOO_SMALL;
+	}
+
+	p = (uint32_t *)receive_buffer;
+	for (i = 0; i < message_len; i += 4) {
+		err = mhu_v2_x_channel_receive(dev, chan, p++);
+		if (err != MHU_V_2_X_ERR_NONE) {
+			return error_mapping_to_mhu_error_t(err);
+		}
+
+		/* Only wait for next transfer if there is still missing data */
+		if (++chan == (num_channels - 1) && (message_len - i) > 4) {
+			/* Busy wait for next transfer */
+			err = clear_and_wait_for_next_signal();
+			if (err != MHU_V_2_X_ERR_NONE) {
+				return error_mapping_to_mhu_error_t(err);
+			}
+			chan = 0;
+		}
+	}
+
+	/* Clear all channels */
+	for (i = 0; i < num_channels; ++i) {
+		err = mhu_v2_x_channel_clear(dev, i);
+		if (err != MHU_V_2_X_ERR_NONE) {
+			return error_mapping_to_mhu_error_t(err);
+		}
+	}
+
+	*size = message_len;
+
+	return MHU_ERR_NONE;
+}
diff --git a/drivers/arm/rss/rss_comms.c b/drivers/arm/rss/rss_comms.c
new file mode 100644
index 0000000..28a4925
--- /dev/null
+++ b/drivers/arm/rss/rss_comms.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include <drivers/arm/mhu.h>
+#include <drivers/arm/rss_comms.h>
+#include <initial_attestation.h>
+#include <psa/client.h>
+
+#include <platform_def.h>
+
+#define TYPE_OFFSET	U(16)
+#define TYPE_MASK	(0xFFFFUL << TYPE_OFFSET)
+#define IN_LEN_OFFSET	U(8)
+#define IN_LEN_MASK	(0xFFUL << IN_LEN_OFFSET)
+#define OUT_LEN_OFFSET	U(0)
+#define OUT_LEN_MASK	(0xFFUL << OUT_LEN_OFFSET)
+
+#define PARAM_PACK(type, in_len, out_len)			  \
+	(((((uint32_t)type) << TYPE_OFFSET) & TYPE_MASK)	| \
+	 ((((uint32_t)in_len) << IN_LEN_OFFSET) & IN_LEN_MASK)	| \
+	 ((((uint32_t)out_len) << OUT_LEN_OFFSET) & OUT_LEN_MASK))
+
+#define PARAM_UNPACK_IN_LEN(ctrl_param) \
+	((size_t)(((ctrl_param) & IN_LEN_MASK) >> IN_LEN_OFFSET))
+
+/* Message types */
+struct __packed packed_psa_call_t {
+	uint8_t protocol_ver;
+	uint8_t seq_num;
+	uint16_t client_id;
+	psa_handle_t handle;
+	uint32_t ctrl_param; /* type, in_len, out_len */
+	uint16_t io_size[4];
+};
+
+struct __packed packed_psa_reply_t {
+	uint8_t protocol_ver;
+	uint8_t seq_num;
+	uint16_t client_id;
+	int32_t return_val;
+	uint16_t out_size[4];
+};
+
+/*
+ * In the current implementation the RoT Service request that requires the
+ * biggest message buffer is the RSS_ATTEST_GET_TOKEN. The maximum required
+ * buffer size is calculated based on the platform-specific needs of
+ * this request.
+ */
+#define MAX_REQUEST_PAYLOAD_SIZE	(PSA_INITIAL_ATTEST_CHALLENGE_SIZE_64 \
+					 + PLAT_ATTEST_TOKEN_MAX_SIZE)
+
+/* Buffer to store the messages to be sent/received. */
+static uint8_t message_buf[MAX_REQUEST_PAYLOAD_SIZE] __aligned(4);
+
+static int32_t pack_params(const psa_invec *invecs,
+			   size_t in_len,
+			   uint8_t *buf,
+			   size_t *buf_len)
+{
+	uint32_t i;
+	size_t payload_size = 0U;
+
+	for (i = 0U; i < in_len; ++i) {
+		if (invecs[i].len > *buf_len - payload_size) {
+			return -1;
+		}
+		memcpy(buf + payload_size, invecs[i].base, invecs[i].len);
+		payload_size += invecs[i].len;
+	}
+
+	*buf_len = payload_size;
+	return 0;
+}
+
+static int serialise_message(const struct packed_psa_call_t *msg,
+			     const psa_invec *invecs,
+			     uint8_t *payload_buf,
+			     size_t *payload_len)
+{
+	size_t message_len = 0U;
+	size_t len;
+
+	/* Copy the message header into the payload buffer. */
+	len = sizeof(*msg);
+	if (len > *payload_len) {
+		ERROR("[RSS-COMMS] Message buffer too small.\n");
+		return -1;
+	}
+	memcpy(payload_buf, (const void *)msg, len);
+	message_len += len;
+
+	/* The input data will follow the message header in the payload buffer. */
+	len = *payload_len - message_len;
+	if (pack_params(invecs, PARAM_UNPACK_IN_LEN(msg->ctrl_param),
+			payload_buf + message_len, &len) != 0) {
+		ERROR("[RSS-COMMS] Message buffer too small.\n");
+		return -1;
+	}
+	message_len += len;
+
+	*payload_len = message_len;
+	return 0;
+}
+
+static void unpack_params(const uint8_t *buf,
+			  psa_outvec *outvecs,
+			  size_t out_len)
+{
+	size_t i;
+
+	for (i = 0U; i < out_len; ++i) {
+		memcpy(outvecs[i].base, buf, outvecs[i].len);
+		buf += outvecs[i].len;
+	}
+}
+
+static void deserialise_reply(struct packed_psa_reply_t *reply,
+			      psa_outvec *outvecs,
+			      size_t outlen,
+			      const uint8_t *message,
+			      size_t message_len)
+{
+	uint32_t i;
+
+	memcpy(reply, message, sizeof(*reply));
+
+	/* Outvecs */
+	for (i = 0U; i < outlen; ++i) {
+		outvecs[i].len = reply->out_size[i];
+	}
+
+	unpack_params(message + sizeof(*reply), outvecs, outlen);
+}
+
+psa_status_t psa_call(psa_handle_t handle, int32_t type,
+		      const psa_invec *in_vec, size_t in_len,
+		      psa_outvec *out_vec, size_t out_len)
+{
+	enum mhu_error_t err;
+	static uint32_t seq_num = 1U;
+	struct packed_psa_call_t msg = {
+		.protocol_ver = 0U,
+		.seq_num = seq_num,
+		/* No need to distinguish callers (currently concurrent calls are not supported). */
+		.client_id = 1U,
+		.handle = handle,
+		.ctrl_param = PARAM_PACK(type, in_len, out_len),
+	};
+
+	struct packed_psa_reply_t reply = {0};
+	size_t message_size;
+	uint32_t i;
+
+	/* Fill msg iovec lengths */
+	for (i = 0U; i < in_len; ++i) {
+		msg.io_size[i] = in_vec[i].len;
+	}
+	for (i = 0U; i < out_len; ++i) {
+		msg.io_size[in_len + i] = out_vec[i].len;
+	}
+
+	message_size = sizeof(message_buf);
+	if (serialise_message(&msg, in_vec, message_buf, &message_size)) {
+		/* Local buffer is probably too small. */
+		return PSA_ERROR_INSUFFICIENT_MEMORY;
+	}
+
+	err = mhu_send_data(message_buf, message_size);
+	if (err != MHU_ERR_NONE) {
+		return PSA_ERROR_COMMUNICATION_FAILURE;
+	}
+
+	message_size = sizeof(message_buf);
+#if DEBUG
+	/*
+	 * Poisoning the message buffer (with a known pattern).
+	 * Helps in detecting hypothetical RSS communication bugs.
+	 */
+	memset(message_buf, 0xA5, message_size);
+#endif
+	err = mhu_receive_data(message_buf, &message_size);
+	if (err != MHU_ERR_NONE) {
+		return PSA_ERROR_COMMUNICATION_FAILURE;
+	}
+
+	deserialise_reply(&reply, out_vec, out_len, message_buf, message_size);
+
+	seq_num++;
+
+	VERBOSE("[RSS-COMMS] Received reply\n");
+	VERBOSE("protocol_ver=%d\n", reply.protocol_ver);
+	VERBOSE("seq_num=%d\n", reply.seq_num);
+	VERBOSE("client_id=%d\n", reply.client_id);
+	VERBOSE("return_val=%d\n", reply.return_val);
+	VERBOSE("out_size[0]=%d\n", reply.out_size[0]);
+
+	return reply.return_val;
+}
+
+int rss_comms_init(uintptr_t mhu_sender_base, uintptr_t mhu_receiver_base)
+{
+	enum mhu_error_t err;
+
+	err = mhu_init_sender(mhu_sender_base);
+	if (err != MHU_ERR_NONE) {
+		ERROR("[RSS-COMMS] Host to RSS MHU driver initialization failed: %d\n", err);
+		return -1;
+	}
+
+	err = mhu_init_receiver(mhu_receiver_base);
+	if (err != MHU_ERR_NONE) {
+		ERROR("[RSS-COMMS] RSS to Host MHU driver initialization failed: %d\n", err);
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/drivers/arm/smmu/smmu_v3.c b/drivers/arm/smmu/smmu_v3.c
index 45f6df9..6c6f978 100644
--- a/drivers/arm/smmu/smmu_v3.c
+++ b/drivers/arm/smmu/smmu_v3.c
@@ -14,7 +14,7 @@
 /* SMMU poll number of retries */
 #define SMMU_POLL_TIMEOUT_US	U(1000)
 
-static int __init smmuv3_poll(uintptr_t smmu_reg, uint32_t mask,
+static int smmuv3_poll(uintptr_t smmu_reg, uint32_t mask,
 				uint32_t value)
 {
 	uint32_t reg_val;
@@ -155,3 +155,28 @@
 	return smmuv3_poll(smmu_base + SMMU_S_INIT,
 				SMMU_S_INIT_INV_ALL, 0U);
 }
+
+int smmuv3_ns_set_abort_all(uintptr_t smmu_base)
+{
+	/* Attribute update has completed when SMMU_GBPA.Update bit is 0 */
+	if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) {
+		return -1;
+	}
+
+	/*
+	 * Set GBPA's ABORT bit. Other GBPA fields are presumably ignored then,
+	 * so simply preserve their value.
+	 */
+	mmio_setbits_32(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT);
+	if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) {
+		return -1;
+	}
+
+	/* Disable the SMMU to engage the GBPA fields previously configured. */
+	mmio_clrbits_32(smmu_base + SMMU_CR0, SMMU_CR0_SMMUEN);
+	if (smmuv3_poll(smmu_base + SMMU_CR0ACK, SMMU_CR0_SMMUEN, 0U) != 0U) {
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/drivers/measured_boot/rss/rss_measured_boot.c b/drivers/measured_boot/rss/rss_measured_boot.c
new file mode 100644
index 0000000..fe2baf0
--- /dev/null
+++ b/drivers/measured_boot/rss/rss_measured_boot.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <assert.h>
+#include <stdint.h>
+
+#include <common/debug.h>
+#include <drivers/auth/crypto_mod.h>
+#include <drivers/measured_boot/rss/rss_measured_boot.h>
+#include <lib/psa/measured_boot.h>
+#include <psa/crypto_types.h>
+#include <psa/crypto_values.h>
+#include <psa/error.h>
+
+#define MBOOT_ALG_SHA512 0
+#define MBOOT_ALG_SHA384 1
+#define MBOOT_ALG_SHA256 2
+
+#if MBOOT_ALG_ID == MBOOT_ALG_SHA512
+#define	CRYPTO_MD_ID		CRYPTO_MD_SHA512
+#define PSA_CRYPTO_MD_ID	PSA_ALG_SHA_512
+#elif MBOOT_ALG_ID == MBOOT_ALG_SHA384
+#define	CRYPTO_MD_ID		CRYPTO_MD_SHA384
+#define PSA_CRYPTO_MD_ID	PSA_ALG_SHA_384
+#elif MBOOT_ALG_ID == MBOOT_ALG_SHA256
+#define	CRYPTO_MD_ID		CRYPTO_MD_SHA256
+#define PSA_CRYPTO_MD_ID	PSA_ALG_SHA_256
+#else
+#  error Invalid Measured Boot algorithm.
+#endif /* MBOOT_ALG_ID */
+
+/* Pointer to struct rss_mboot_metadata */
+static struct rss_mboot_metadata *plat_metadata_ptr;
+
+/* Functions' declarations */
+void rss_measured_boot_init(void)
+{
+	/* At this point it is expected that communication channel over MHU
+	 * is already initialised by platform init.
+	 */
+
+	/* Get pointer to platform's struct rss_mboot_metadata structure */
+	plat_metadata_ptr = plat_rss_mboot_get_metadata();
+	assert(plat_metadata_ptr != NULL);
+}
+
+int rss_mboot_measure_and_record(uintptr_t data_base, uint32_t data_size,
+				 uint32_t data_id)
+{
+	unsigned char hash_data[CRYPTO_MD_MAX_SIZE];
+	int rc;
+	psa_status_t ret;
+	const struct rss_mboot_metadata *metadata_ptr = plat_metadata_ptr;
+
+	/* Get the metadata associated with this image. */
+	while ((metadata_ptr->id != RSS_MBOOT_INVALID_ID) &&
+		(metadata_ptr->id != data_id)) {
+		metadata_ptr++;
+	}
+
+	/* If image is not present in metadata array then skip */
+	if (metadata_ptr->id == RSS_MBOOT_INVALID_ID) {
+		return 0;
+	}
+
+	/* Calculate hash */
+	rc = crypto_mod_calc_hash(CRYPTO_MD_ID,
+				  (void *)data_base, data_size, hash_data);
+	if (rc != 0) {
+		return rc;
+	}
+
+	ret = rss_measured_boot_extend_measurement(
+						metadata_ptr->slot,
+						metadata_ptr->signer_id,
+						metadata_ptr->signer_id_size,
+						metadata_ptr->version,
+						metadata_ptr->version_size,
+						PSA_CRYPTO_MD_ID,
+						metadata_ptr->sw_type,
+						metadata_ptr->sw_type_size,
+						hash_data,
+						MBOOT_DIGEST_SIZE,
+						metadata_ptr->lock_measurement);
+	if (ret != PSA_SUCCESS) {
+		return ret;
+	}
+
+	return 0;
+}
+
+int rss_mboot_set_signer_id(unsigned int img_id,
+			    const void *pk_ptr,
+			    size_t pk_len)
+{
+	unsigned char hash_data[CRYPTO_MD_MAX_SIZE];
+	struct rss_mboot_metadata *metadata_ptr = plat_metadata_ptr;
+	int rc;
+
+	/* Get the metadata associated with this image. */
+	while ((metadata_ptr->id != RSS_MBOOT_INVALID_ID) &&
+		(metadata_ptr->id != img_id)) {
+		metadata_ptr++;
+	}
+
+	/* If image is not present in metadata array then skip */
+	if (metadata_ptr->id == RSS_MBOOT_INVALID_ID) {
+		return 0;
+	}
+
+	/* Calculate public key hash */
+	rc = crypto_mod_calc_hash(CRYPTO_MD_ID, (void *)pk_ptr,
+				  pk_len, hash_data);
+	if (rc != 0) {
+		return rc;
+	}
+
+	/* Update metadata struct with the received signer_id */
+	(void)memcpy(metadata_ptr->signer_id, hash_data, MBOOT_DIGEST_SIZE);
+	metadata_ptr->signer_id_size = MBOOT_DIGEST_SIZE;
+
+	return 0;
+}
diff --git a/drivers/measured_boot/rss/rss_measured_boot.mk b/drivers/measured_boot/rss/rss_measured_boot.mk
new file mode 100644
index 0000000..01545af
--- /dev/null
+++ b/drivers/measured_boot/rss/rss_measured_boot.mk
@@ -0,0 +1,35 @@
+#
+# Copyright (c) 2022, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Hash algorithm for measured boot
+# SHA-256 (or stronger) is required.
+# TODO: The measurement algorithm incorrectly suggests that the TPM backend
+#       is used which may not be the case. It is currently being worked on and
+#       soon TPM_HASH_ALG will be replaced by a more generic name.
+TPM_HASH_ALG			:=	sha256
+
+ifeq (${TPM_HASH_ALG}, sha512)
+    MBOOT_ALG_ID		:=	MBOOT_ALG_SHA512
+    MBOOT_DIGEST_SIZE		:=	64U
+else ifeq (${TPM_HASH_ALG}, sha384)
+    MBOOT_ALG_ID		:=	MBOOT_ALG_SHA384
+    MBOOT_DIGEST_SIZE		:=	48U
+else
+    MBOOT_ALG_ID		:=	MBOOT_ALG_SHA256
+    MBOOT_DIGEST_SIZE		:=	32U
+endif #TPM_HASH_ALG
+
+# Set definitions for Measured Boot driver.
+$(eval $(call add_defines,\
+    $(sort \
+        MBOOT_ALG_ID \
+        MBOOT_DIGEST_SIZE \
+        MBOOT_RSS_BACKEND \
+)))
+
+MEASURED_BOOT_SRC_DIR	:= drivers/measured_boot/rss/
+
+MEASURED_BOOT_SOURCES	+= ${MEASURED_BOOT_SRC_DIR}rss_measured_boot.c
diff --git a/drivers/partition/partition.c b/drivers/partition/partition.c
index 7706f88..c84816f 100644
--- a/drivers/partition/partition.c
+++ b/drivers/partition/partition.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,7 @@
 #include <string.h>
 
 #include <common/debug.h>
+#include <common/tf_crc32.h>
 #include <drivers/io/io_storage.h>
 #include <drivers/partition/efi.h>
 #include <drivers/partition/partition.h>
@@ -76,7 +77,7 @@
 }
 
 /*
- * Load GPT header and check the GPT signature.
+ * Load GPT header and check the GPT signature and header CRC.
  * If partition numbers could be found, check & update it.
  */
 static int load_gpt_header(uintptr_t image_handle)
@@ -84,6 +85,7 @@
 	gpt_header_t header;
 	size_t bytes_read;
 	int result;
+	uint32_t header_crc, calc_crc;
 
 	result = io_seek(image_handle, IO_SEEK_SET, GPT_HEADER_OFFSET);
 	if (result != 0) {
@@ -99,6 +101,23 @@
 		return -EINVAL;
 	}
 
+	/*
+	 * UEFI Spec 2.8 March 2019 Page 119: HeaderCRC32 value is
+	 * computed by setting this field to 0, and computing the
+	 * 32-bit CRC for HeaderSize bytes.
+	 */
+	header_crc = header.header_crc;
+	header.header_crc = 0U;
+
+	calc_crc = tf_crc32(0U, (uint8_t *)&header, DEFAULT_GPT_HEADER_SIZE);
+	if (header_crc != calc_crc) {
+		ERROR("Invalid GPT Header CRC: Expected 0x%x but got 0x%x.\n",
+		      header_crc, calc_crc);
+		return -EINVAL;
+	}
+
+	header.header_crc = header_crc;
+
 	/* partition numbers can't exceed PLAT_PARTITION_MAX_ENTRIES */
 	list.entry_count = header.list_num;
 	if (list.entry_count > PLAT_PARTITION_MAX_ENTRIES) {
diff --git a/drivers/st/spi/stm32_qspi.c b/drivers/st/spi/stm32_qspi.c
index d3c26d9..73aa9ac 100644
--- a/drivers/st/spi/stm32_qspi.c
+++ b/drivers/st/spi/stm32_qspi.c
@@ -1,13 +1,10 @@
 /*
- * Copyright (c) 2019-2021, STMicroelectronics - All Rights Reserved
+ * Copyright (c) 2019-2022, STMicroelectronics - All Rights Reserved
  *
  * SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
  */
 
 #include <inttypes.h>
-#include <libfdt.h>
-
-#include <platform_def.h>
 
 #include <common/debug.h>
 #include <common/fdt_wrappers.h>
@@ -19,6 +16,9 @@
 #include <drivers/st/stm32mp_reset.h>
 #include <lib/mmio.h>
 #include <lib/utils_def.h>
+#include <libfdt.h>
+
+#include <platform_def.h>
 
 /* Timeout for device interface reset */
 #define TIMEOUT_US_1_MS			1000U
@@ -139,10 +139,6 @@
 	int ret = 0;
 	uint64_t timeout;
 
-	if (op->data.nbytes == 0U) {
-		return stm32_qspi_wait_for_not_busy();
-	}
-
 	timeout = timeout_init_us(QSPI_CMD_TIMEOUT_US);
 	while ((mmio_read_32(qspi_base() + QSPI_SR) & QSPI_SR_TCF) == 0U) {
 		if (timeout_elapsed(timeout)) {
@@ -163,6 +159,10 @@
 	/* Clear flags */
 	mmio_write_32(qspi_base() + QSPI_FCR, QSPI_FCR_CTCF | QSPI_FCR_CTEF);
 
+	if (ret == 0) {
+		ret = stm32_qspi_wait_for_not_busy();
+	}
+
 	return ret;
 }
 
@@ -251,11 +251,6 @@
 		op->dummy.buswidth, op->data.buswidth,
 		op->addr.val, op->data.nbytes);
 
-	ret = stm32_qspi_wait_for_not_busy();
-	if (ret != 0) {
-		return ret;
-	}
-
 	addr_max = op->addr.val + op->data.nbytes + 1U;
 
 	if ((op->data.dir == SPI_MEM_DATA_IN) && (op->data.nbytes != 0U)) {
diff --git a/fdts/stm32mp13-fw-config.dtsi b/fdts/stm32mp13-fw-config.dtsi
index dc8ca1b..28f7086 100644
--- a/fdts/stm32mp13-fw-config.dtsi
+++ b/fdts/stm32mp13-fw-config.dtsi
@@ -13,11 +13,9 @@
 #endif
 
 #define DDR_NS_BASE	STM32MP_DDR_BASE
-#define DDR_SEC_SIZE	0x01e00000
+#define DDR_SEC_SIZE	0x02000000
 #define DDR_SEC_BASE	(STM32MP_DDR_BASE + (DDR_SIZE - DDR_SEC_SIZE))
-#define DDR_SHARE_SIZE	0x00200000
-#define DDR_SHARE_BASE	(DDR_SEC_BASE - DDR_SHARE_SIZE)
-#define DDR_NS_SIZE	(DDR_SHARE_BASE - DDR_NS_BASE)
+#define DDR_NS_SIZE	(DDR_SEC_BASE - DDR_NS_BASE)
 
 /dts-v1/;
 
@@ -48,8 +46,6 @@
 		compatible = "st,mem-firewall";
 		memory-ranges = <
 			DDR_NS_BASE DDR_NS_SIZE TZC_REGION_S_NONE TZC_REGION_NSEC_ALL_ACCESS_RDWR
-			DDR_SHARE_BASE DDR_SHARE_SIZE TZC_REGION_S_NONE
-			TZC_REGION_ACCESS_RDWR(STM32MP1_TZC_A7_ID)
 			DDR_SEC_BASE DDR_SEC_SIZE TZC_REGION_S_RDWR 0>;
 	};
 };
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
index bbbc77a..dfb9fe4 100644
--- a/include/arch/aarch64/arch.h
+++ b/include/arch/aarch64/arch.h
@@ -488,7 +488,8 @@
 #define SCR_HXEn_BIT		(UL(1) << 38)
 #define SCR_ENTP2_SHIFT		U(41)
 #define SCR_ENTP2_BIT		(UL(1) << SCR_ENTP2_SHIFT)
-#define SCR_AMVOFFEN_BIT	(UL(1) << 35)
+#define SCR_AMVOFFEN_SHIFT	U(35)
+#define SCR_AMVOFFEN_BIT	(UL(1) << SCR_AMVOFFEN_SHIFT)
 #define SCR_TWEDEn_BIT		(UL(1) << 29)
 #define SCR_ECVEN_BIT		(UL(1) << 28)
 #define SCR_FGTEN_BIT		(UL(1) << 27)
@@ -1222,7 +1223,8 @@
 #define ERXMISC0_EL1		S3_0_C5_C5_0
 #define ERXMISC1_EL1		S3_0_C5_C5_1
 
-#define ERXCTLR_ED_BIT		(U(1) << 0)
+#define ERXCTLR_ED_SHIFT	U(0)
+#define ERXCTLR_ED_BIT		(U(1) << ERXCTLR_ED_SHIFT)
 #define ERXCTLR_UE_BIT		(U(1) << 4)
 
 #define ERXPFGCTL_UC_BIT	(U(1) << 1)
diff --git a/include/common/uuid.h b/include/common/uuid.h
index 5651d0d..c8dd681 100644
--- a/include/common/uuid.h
+++ b/include/common/uuid.h
@@ -1,15 +1,18 @@
 /*
- * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
-#ifndef UUID_H
-#define UUID_H
+#ifndef UUID_COMMON_H
+#define UUID_COMMON_H
 
 #define UUID_BYTES_LENGTH	16
 #define UUID_STRING_LENGTH	36
 
 int read_uuid(uint8_t *dest, char *uuid);
+bool uuid_match(uint32_t *uuid1, uint32_t *uuid2);
+void copy_uuid(uint32_t *to_uuid, uint32_t *from_uuid);
+bool is_null_uuid(uint32_t *uuid);
 
-#endif /* UUID_H */
+#endif /* UUID_COMMON_H */
diff --git a/include/drivers/arm/mhu.h b/include/drivers/arm/mhu.h
new file mode 100644
index 0000000..7745bd9
--- /dev/null
+++ b/include/drivers/arm/mhu.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MHU_H
+#define MHU_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+/**
+ * Generic MHU error enumeration types.
+ */
+enum mhu_error_t {
+	MHU_ERR_NONE			=  0,
+	MHU_ERR_NOT_INIT		= -1,
+	MHU_ERR_ALREADY_INIT		= -2,
+	MHU_ERR_UNSUPPORTED_VERSION	= -3,
+	MHU_ERR_UNSUPPORTED		= -4,
+	MHU_ERR_INVALID_ARG		= -5,
+	MHU_ERR_BUFFER_TOO_SMALL	= -6,
+	MHU_ERR_GENERAL			= -7,
+};
+
+/**
+ * Initializes sender MHU.
+ *
+ * mhu_sender_base	Base address of sender MHU.
+ *
+ * Returns mhu_error_t error code.
+ *
+ * This function must be called before mhu_send_data().
+ */
+enum mhu_error_t mhu_init_sender(uintptr_t mhu_sender_base);
+
+
+/**
+ * Initializes receiver MHU.
+ *
+ * mhu_receiver_base	Base address of receiver MHU.
+ *
+ * Returns mhu_error_t error code.
+ *
+ * This function must be called before mhu_receive_data().
+ */
+enum mhu_error_t mhu_init_receiver(uintptr_t mhu_receiver_base);
+
+/**
+ * Sends data over MHU.
+ *
+ * send_buffer		Pointer to buffer containing the data to be transmitted.
+ * size			Size of the data to be transmitted in bytes.
+ *
+ * Returns mhu_error_t error code.
+ *
+ * The send_buffer must be 4-byte aligned and its length must be at least
+ * (4 - (size % 4)) bytes bigger than the data size to prevent buffer
+ * over-reading.
+ */
+enum mhu_error_t mhu_send_data(const uint8_t *send_buffer, size_t size);
+
+/**
+ * Receives data from MHU.
+ *
+ * receive_buffer	Pointer the buffer where to store the received data.
+ * size			As input the size of the receive_buffer, as output the
+ *			number of bytes received. As a limitation,
+ *			the size of the buffer must be a multiple of 4.
+ *
+ * Returns mhu_error_t error code.
+ *
+ * The receive_buffer must be 4-byte aligned and its length must be a
+ * multiple of 4.
+ */
+enum mhu_error_t mhu_receive_data(uint8_t *receive_buffer, size_t *size);
+
+#endif /* MHU_H */
diff --git a/include/drivers/arm/rss_comms.h b/include/drivers/arm/rss_comms.h
new file mode 100644
index 0000000..b96c79f
--- /dev/null
+++ b/include/drivers/arm/rss_comms.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef RSS_COMMS_H
+#define RSS_COMMS_H
+
+#include <stdint.h>
+
+int rss_comms_init(uintptr_t mhu_sender_base, uintptr_t mhu_receiver_base);
+
+#endif /* RSS_COMMS_H */
diff --git a/include/drivers/arm/smmu_v3.h b/include/drivers/arm/smmu_v3.h
index e60c754..37da56f 100644
--- a/include/drivers/arm/smmu_v3.h
+++ b/include/drivers/arm/smmu_v3.h
@@ -12,6 +12,8 @@
 #include <platform_def.h>
 
 /* SMMUv3 register offsets from device base */
+#define SMMU_CR0	U(0x0020)
+#define SMMU_CR0ACK	U(0x0024)
 #define SMMU_GBPA	U(0x0044)
 #define SMMU_S_IDR1	U(0x8004)
 #define SMMU_S_INIT	U(0x803c)
@@ -37,6 +39,9 @@
 
 #endif /* ENABLE_RME */
 
+/* SMMU_CR0 and SMMU_CR0ACK register fields */
+#define SMMU_CR0_SMMUEN			(1UL << 0)
+
 /* SMMU_GBPA register fields */
 #define SMMU_GBPA_UPDATE		(1UL << 31)
 #define SMMU_GBPA_ABORT			(1UL << 20)
@@ -61,4 +66,6 @@
 int smmuv3_init(uintptr_t smmu_base);
 int smmuv3_security_init(uintptr_t smmu_base);
 
+int smmuv3_ns_set_abort_all(uintptr_t smmu_base);
+
 #endif /* SMMU_V3_H */
diff --git a/include/drivers/measured_boot/rss/rss_measured_boot.h b/include/drivers/measured_boot/rss/rss_measured_boot.h
new file mode 100644
index 0000000..fe88576
--- /dev/null
+++ b/include/drivers/measured_boot/rss/rss_measured_boot.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RSS_MEASURED_BOOT_H
+#define RSS_MEASURED_BOOT_H
+
+#include <stdint.h>
+
+#include <common/debug.h>
+#include <measured_boot.h>
+
+#define RSS_MBOOT_INVALID_ID	UINT32_MAX
+
+/*
+ * Each boot measurement has some metadata (i.e. a string) that identifies
+ * what was measured and how. The sw_type field of the rss_mboot_metadata
+ * structure represents the role of the software component that was measured.
+ * The below macros define strings suitable for the sw_type.
+ * The key thing is to choose meaningful strings so that when the attestation
+ * token is verified, then the different components can be identified.
+ */
+#define RSS_MBOOT_BL2_STRING		"BL_2"
+#define RSS_MBOOT_BL31_STRING		"SECURE_RT_EL3"
+#define RSS_MBOOT_HW_CONFIG_STRING	"HW_CONFIG"
+#define RSS_MBOOT_FW_CONFIG_STRING	"FW_CONFIG"
+#define RSS_MBOOT_TB_FW_CONFIG_STRING	"TB_FW_CONFIG"
+#define RSS_MBOOT_SOC_FW_CONFIG_STRING	"SOC_FW_CONFIG"
+#define RSS_MBOOT_RMM_STRING		"RMM"
+
+
+struct rss_mboot_metadata {
+	unsigned int id;
+	uint8_t slot;
+	uint8_t signer_id[SIGNER_ID_MAX_SIZE];
+	size_t  signer_id_size;
+	uint8_t version[VERSION_MAX_SIZE];
+	size_t  version_size;
+	uint8_t sw_type[SW_TYPE_MAX_SIZE];
+	size_t  sw_type_size;
+	bool    lock_measurement;
+};
+
+/* Functions' declarations */
+void rss_measured_boot_init(void);
+struct rss_mboot_metadata *plat_rss_mboot_get_metadata(void);
+int rss_mboot_measure_and_record(uintptr_t data_base, uint32_t data_size,
+				 uint32_t data_id);
+
+/* TODO: These metadata are currently not available during TF-A boot */
+int rss_mboot_set_signer_id(unsigned int img_id, const void *pk_ptr, size_t pk_len);
+
+#endif /* RSS_MEASURED_BOOT_H */
diff --git a/include/drivers/partition/partition.h b/include/drivers/partition/partition.h
index b292ec7..11e5acf 100644
--- a/include/drivers/partition/partition.h
+++ b/include/drivers/partition/partition.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -29,6 +29,8 @@
 
 #define LEGACY_PARTITION_BLOCK_SIZE	512
 
+#define DEFAULT_GPT_HEADER_SIZE 	92
+
 typedef struct partition_entry {
 	uint64_t		start;
 	uint64_t		length;
diff --git a/include/lib/cpus/aarch64/cortex_hunter.h b/include/lib/cpus/aarch64/cortex_hunter.h
index 8b59fd9..24bd217 100644
--- a/include/lib/cpus/aarch64/cortex_hunter.h
+++ b/include/lib/cpus/aarch64/cortex_hunter.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,9 @@
 
 #define CORTEX_HUNTER_MIDR					U(0x410FD810)
 
+/* Cortex Hunter loop count for CVE-2022-23960 mitigation */
+#define CORTEX_HUNTER_BHB_LOOP_COUNT				U(132)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cortex_makalu.h b/include/lib/cpus/aarch64/cortex_makalu.h
index 4e0dc86..ee59657 100644
--- a/include/lib/cpus/aarch64/cortex_makalu.h
+++ b/include/lib/cpus/aarch64/cortex_makalu.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,9 @@
 
 #define CORTEX_MAKALU_MIDR					U(0x410FD4D0)
 
+/* Cortex Makalu loop count for CVE-2022-23960 mitigation */
+#define CORTEX_MAKALU_BHB_LOOP_COUNT				U(38)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cortex_makalu_elp_arm.h b/include/lib/cpus/aarch64/cortex_makalu_elp_arm.h
index a0d788e..9ed5ee3 100644
--- a/include/lib/cpus/aarch64/cortex_makalu_elp_arm.h
+++ b/include/lib/cpus/aarch64/cortex_makalu_elp_arm.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,9 @@
 
 #define CORTEX_MAKALU_ELP_ARM_MIDR				U(0x410FD4E0)
 
+/* Cortex Makalu ELP loop count for CVE-2022-23960 mitigation */
+#define CORTEX_MAKALU_ELP_ARM_BHB_LOOP_COUNT			U(132)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/cortex_x1.h b/include/lib/cpus/aarch64/cortex_x1.h
new file mode 100644
index 0000000..e3661a8
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_x1.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2022, Google LLC. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_X1_H
+#define CORTEX_X1_H
+
+/* Cortex-X1 MIDR for r1p0 */
+#define CORTEX_X1_MIDR			U(0x411fd440)
+
+/* Cortex-X1 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_X1_BHB_LOOP_COUNT	U(32)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_X1_CPUECTLR_EL1		S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_X1_ACTLR2_EL1		S3_0_C15_C1_1
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_X1_CPUPWRCTLR_EL1	S3_0_C15_C2_7
+#define CORTEX_X1_CORE_PWRDN_EN_MASK	U(0x1)
+
+#endif /* CORTEX_X1_H */
diff --git a/include/lib/cpus/aarch64/dsu_def.h b/include/lib/cpus/aarch64/dsu_def.h
index 0969acf..577de61 100644
--- a/include/lib/cpus/aarch64/dsu_def.h
+++ b/include/lib/cpus/aarch64/dsu_def.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -32,6 +32,7 @@
 #define CLUSTERACTLR_EL1	S3_0_C15_C3_3
 
 #define CLUSTERACTLR_EL1_DISABLE_CLOCK_GATING	(ULL(1) << 15)
+#define CLUSTERACTLR_EL1_DISABLE_SCLK_GATING	(ULL(3) << 15)
 
 /********************************************************************
  * Masks applied for DSU errata workarounds
diff --git a/include/lib/cpus/aarch64/neoverse_demeter.h b/include/lib/cpus/aarch64/neoverse_demeter.h
index 230ed66..f1afae7 100644
--- a/include/lib/cpus/aarch64/neoverse_demeter.h
+++ b/include/lib/cpus/aarch64/neoverse_demeter.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,6 +9,9 @@
 
 #define NEOVERSE_DEMETER_MIDR				U(0x410FD4F0)
 
+/* Neoverse Demeter loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_DEMETER_BHB_LOOP_COUNT			U(132)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions
  ******************************************************************************/
diff --git a/include/lib/cpus/aarch64/neoverse_poseidon.h b/include/lib/cpus/aarch64/neoverse_poseidon.h
index 0a8b1d1..798ecd1 100644
--- a/include/lib/cpus/aarch64/neoverse_poseidon.h
+++ b/include/lib/cpus/aarch64/neoverse_poseidon.h
@@ -10,6 +10,9 @@
 
 #define NEOVERSE_POSEIDON_MIDR                      		U(0x410FD830)
 
+/* Neoverse Poseidon loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_POSEIDON_BHB_LOOP_COUNT			U(132)
+
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
  ******************************************************************************/
diff --git a/include/lib/psa/initial_attestation.h b/include/lib/psa/initial_attestation.h
new file mode 100644
index 0000000..93169f0
--- /dev/null
+++ b/include/lib/psa/initial_attestation.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PSA_INITIAL_ATTESTATION_H
+#define PSA_INITIAL_ATTESTATION_H
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "psa/error.h"
+
+/*
+ * Initial attestation API version is: 1.0.0
+ */
+#define PSA_INITIAL_ATTEST_API_VERSION_MAJOR	(1)
+#define PSA_INITIAL_ATTEST_API_VERSION_MINOR	(0)
+
+/* The allowed size of input challenge in bytes. */
+#define PSA_INITIAL_ATTEST_CHALLENGE_SIZE_32	32U
+#define PSA_INITIAL_ATTEST_CHALLENGE_SIZE_48	48U
+#define PSA_INITIAL_ATTEST_CHALLENGE_SIZE_64	64U
+
+/* Initial Attestation message types that distinguish Attest services. */
+#define RSS_ATTEST_GET_TOKEN		1001U
+#define RSS_ATTEST_GET_TOKEN_SIZE	1002U
+#define RSS_ATTEST_GET_DELEGATED_KEY	1003U
+
+/**
+ * Get the platform attestation token.
+ *
+ * auth_challenge	Pointer to buffer where challenge input is stored. This
+ *			must be the hash of the public part of the delegated
+ *			attestation key.
+ * challenge_size	Size of challenge object in bytes.
+ * token_buf		Pointer to the buffer where attestation token will be
+ *			stored.
+ * token_buf_size	Size of allocated buffer for token, in bytes.
+ * token_size		Size of the token that has been returned, in bytes.
+ *
+ * Returns error code as specified in psa_status_t.
+ */
+psa_status_t
+psa_initial_attest_get_token(const uint8_t *auth_challenge,
+			     size_t         challenge_size,
+			     uint8_t       *token_buf,
+			     size_t         token_buf_size,
+			     size_t        *token_size);
+
+#endif /* PSA_INITIAL_ATTESTATION_H */
diff --git a/include/lib/psa/measured_boot.h b/include/lib/psa/measured_boot.h
new file mode 100644
index 0000000..bdb79d5
--- /dev/null
+++ b/include/lib/psa/measured_boot.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PSA_MEASURED_BOOT_H
+#define PSA_MEASURED_BOOT_H
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "psa/error.h"
+
+/* Minimum measurement value size that can be requested to store */
+#define MEASUREMENT_VALUE_MIN_SIZE	32U
+/* Maximum measurement value size that can be requested to store */
+#define MEASUREMENT_VALUE_MAX_SIZE	64U
+/* Minimum signer id size that can be requested to store */
+#define SIGNER_ID_MIN_SIZE		MEASUREMENT_VALUE_MIN_SIZE
+/* Maximum signer id size that can be requested to store */
+#define SIGNER_ID_MAX_SIZE		MEASUREMENT_VALUE_MAX_SIZE
+/* The theoretical maximum image version is: "255.255.65535\0" */
+#define VERSION_MAX_SIZE		14U
+/* Example sw_type: "BL_2, BL_33, etc." */
+#define SW_TYPE_MAX_SIZE		20U
+#define NUM_OF_MEASUREMENT_SLOTS	32U
+
+
+/**
+ * Extends and stores a measurement to the requested slot.
+ *
+ * index			Slot number in which measurement is to be stored
+ * signer_id			Pointer to signer_id buffer.
+ * signer_id_size		Size of the signer_id buffer in bytes.
+ * version			Pointer to version buffer.
+ * version_size			Size of the version buffer in bytes.
+ * measurement_algo		Algorithm identifier used for measurement.
+ * sw_type			Pointer to sw_type buffer.
+ * sw_type_size			Size of the sw_type buffer in bytes.
+ * measurement_value		Pointer to measurement_value buffer.
+ * measurement_value_size	Size of the measurement_value buffer in bytes.
+ * lock_measurement		Boolean flag requesting whether the measurement
+ *				is to be locked.
+ *
+ * PSA_SUCCESS:
+ *	- Success.
+ * PSA_ERROR_INVALID_ARGUMENT:
+ *	- The size of any argument is invalid OR
+ *	- Input Measurement value is NULL OR
+ *	- Input Signer ID is NULL OR
+ *	- Requested slot index is invalid.
+ * PSA_ERROR_BAD_STATE:
+ *	- Request to lock, when slot is already locked.
+ * PSA_ERROR_NOT_PERMITTED:
+ *	- When the requested slot is not accessible to the caller.
+ */
+
+/* Not a standard PSA API, just an extension therefore use the 'rss_' prefix
+ * rather than the usual 'psa_'.
+ */
+psa_status_t
+rss_measured_boot_extend_measurement(uint8_t index,
+				     const uint8_t *signer_id,
+				     size_t signer_id_size,
+				     const uint8_t *version,
+				     size_t version_size,
+				     uint32_t measurement_algo,
+				     const uint8_t *sw_type,
+				     size_t sw_type_size,
+				     const uint8_t *measurement_value,
+				     size_t measurement_value_size,
+				     bool lock_measurement);
+
+#endif /* PSA_MEASURED_BOOT_H */
diff --git a/include/lib/psa/psa/client.h b/include/lib/psa/psa/client.h
new file mode 100644
index 0000000..56fe028
--- /dev/null
+++ b/include/lib/psa/psa/client.h
@@ -0,0 +1,102 @@
+
+/*
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PSA_CLIENT_H
+#define PSA_CLIENT_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <psa/error.h>
+
+#ifndef IOVEC_LEN
+#define IOVEC_LEN(arr) ((uint32_t)(sizeof(arr)/sizeof(arr[0])))
+#endif
+/*********************** PSA Client Macros and Types *************************/
+/**
+ * The version of the PSA Framework API that is being used to build the calling
+ * firmware. Only part of features of FF-M v1.1 have been implemented. FF-M v1.1
+ * is compatible with v1.0.
+ */
+#define PSA_FRAMEWORK_VERSION	(0x0101u)
+/**
+ * Return value from psa_version() if the requested RoT Service is not present
+ * in the system.
+ */
+#define PSA_VERSION_NONE	(0u)
+/**
+ * The zero-value null handle can be assigned to variables used in clients and
+ * RoT Services, indicating that there is no current connection or message.
+ */
+#define PSA_NULL_HANDLE		((psa_handle_t)0)
+/**
+ * Tests whether a handle value returned by psa_connect() is valid.
+ */
+#define PSA_HANDLE_IS_VALID(handle)	((psa_handle_t)(handle) > 0)
+/**
+ * Converts the handle value returned from a failed call psa_connect() into
+ * an error code.
+ */
+#define PSA_HANDLE_TO_ERROR(handle)	((psa_status_t)(handle))
+/**
+ * Maximum number of input and output vectors for a request to psa_call().
+ */
+#define PSA_MAX_IOVEC		(4u)
+/**
+ * An IPC message type that indicates a generic client request.
+ */
+#define PSA_IPC_CALL		(0)
+typedef int32_t psa_handle_t;
+/**
+ * A read-only input memory region provided to an RoT Service.
+ */
+typedef struct psa_invec {
+	const void *base;	/*!< the start address of the memory buffer */
+	size_t len;		/*!< the size in bytes                      */
+} psa_invec;
+/**
+ * A writable output memory region provided to an RoT Service.
+ */
+typedef struct psa_outvec {
+	void *base;		/*!< the start address of the memory buffer */
+	size_t len;		/*!< the size in bytes                      */
+} psa_outvec;
+
+/**
+ * Call an RoT Service on an established connection.
+ *
+ * handle	A handle to an established connection.
+ * type		The request type. Must be zero(PSA_IPC_CALL) or positive.
+ * in_vec	Array of input psa_invec structures.
+ * in_len	Number of input psa_invec structures.
+ * out_vec	Array of output psa_outvec structures.
+ * out_len	Number of output psa_outvec structures.
+ *
+ * Return value >=0	RoT Service-specific status value.
+ * Return value <0	RoT Service-specific error code.
+ *
+ * PSA_ERROR_PROGRAMMER_ERROR:
+ *	- The connection has been terminated by the RoT Service.
+ *
+ * The call is a PROGRAMMER ERROR if one or more of the following are true:
+ *	- An invalid handle was passed.
+ *	- The connection is already handling a request.
+ *	- type < 0.
+ *	- An invalid memory reference was provided.
+ *	- in_len + out_len > PSA_MAX_IOVEC.
+ *	- The message is unrecognized by the RoT.
+ *	- Service or incorrectly formatted.
+ */
+psa_status_t psa_call(psa_handle_t handle,
+		      int32_t type,
+		      const psa_invec *in_vec,
+		      size_t in_len,
+		      psa_outvec *out_vec,
+		      size_t out_len);
+
+#endif /* PSA_CLIENT_H */
diff --git a/include/lib/psa/psa/error.h b/include/lib/psa/psa/error.h
new file mode 100644
index 0000000..8a6eb7b
--- /dev/null
+++ b/include/lib/psa/psa/error.h
@@ -0,0 +1,42 @@
+
+/*
+ * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PSA_ERROR_H
+#define PSA_ERROR_H
+
+#include <stdint.h>
+
+typedef int32_t psa_status_t;
+
+#define PSA_SUCCESS                     ((psa_status_t)0)
+#define PSA_SUCCESS_REBOOT              ((psa_status_t)1)
+#define PSA_SUCCESS_RESTART             ((psa_status_t)2)
+#define PSA_ERROR_PROGRAMMER_ERROR      ((psa_status_t)-129)
+#define PSA_ERROR_CONNECTION_REFUSED    ((psa_status_t)-130)
+#define PSA_ERROR_CONNECTION_BUSY       ((psa_status_t)-131)
+#define PSA_ERROR_GENERIC_ERROR         ((psa_status_t)-132)
+#define PSA_ERROR_NOT_PERMITTED         ((psa_status_t)-133)
+#define PSA_ERROR_NOT_SUPPORTED         ((psa_status_t)-134)
+#define PSA_ERROR_INVALID_ARGUMENT      ((psa_status_t)-135)
+#define PSA_ERROR_INVALID_HANDLE        ((psa_status_t)-136)
+#define PSA_ERROR_BAD_STATE             ((psa_status_t)-137)
+#define PSA_ERROR_BUFFER_TOO_SMALL      ((psa_status_t)-138)
+#define PSA_ERROR_ALREADY_EXISTS        ((psa_status_t)-139)
+#define PSA_ERROR_DOES_NOT_EXIST        ((psa_status_t)-140)
+#define PSA_ERROR_INSUFFICIENT_MEMORY   ((psa_status_t)-141)
+#define PSA_ERROR_INSUFFICIENT_STORAGE  ((psa_status_t)-142)
+#define PSA_ERROR_INSUFFICIENT_DATA     ((psa_status_t)-143)
+#define PSA_ERROR_SERVICE_FAILURE       ((psa_status_t)-144)
+#define PSA_ERROR_COMMUNICATION_FAILURE ((psa_status_t)-145)
+#define PSA_ERROR_STORAGE_FAILURE       ((psa_status_t)-146)
+#define PSA_ERROR_HARDWARE_FAILURE      ((psa_status_t)-147)
+#define PSA_ERROR_INVALID_SIGNATURE     ((psa_status_t)-149)
+#define PSA_ERROR_DEPENDENCY_NEEDED     ((psa_status_t)-156)
+#define PSA_ERROR_CURRENTLY_INSTALLING  ((psa_status_t)-157)
+
+#endif /* PSA_ERROR_H */
diff --git a/include/lib/psa/psa_manifest/sid.h b/include/lib/psa/psa_manifest/sid.h
new file mode 100644
index 0000000..947e58f
--- /dev/null
+++ b/include/lib/psa/psa_manifest/sid.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2019-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PSA_MANIFEST_SID_H
+#define PSA_MANIFEST_SID_H
+
+/******** PSA_SP_INITIAL_ATTESTATION ********/
+#define RSS_ATTESTATION_SERVICE_SID			(0x00000020U)
+#define RSS_ATTESTATION_SERVICE_VERSION			(1U)
+#define RSS_ATTESTATION_SERVICE_HANDLE			(0x40000103U)
+
+/******** PSA_SP_MEASURED_BOOT ********/
+#define RSS_MEASURED_BOOT_SID				(0x000000E0U)
+#define RSS_MEASURED_BOOT_VERSION			(1U)
+#define RSS_MEASURED_BOOT_HANDLE			(0x40000104U)
+
+#endif /* PSA_MANIFEST_SID_H */
diff --git a/include/lib/utils_def.h b/include/lib/utils_def.h
index 7a7012d..198b890 100644
--- a/include/lib/utils_def.h
+++ b/include/lib/utils_def.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved.
  * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
@@ -104,6 +104,13 @@
 #define round_down(value, boundary)		\
 	((value) & ~round_boundary(value, boundary))
 
+/**
+ * Helper macro to ensure a value lies on a given boundary.
+ */
+#define is_aligned(value, boundary)			\
+	(round_up((uintptr_t) value, boundary) ==	\
+	 round_down((uintptr_t) value, boundary))
+
 /*
  * Evaluates to 1 if (ptr + inc) overflows, 0 otherwise.
  * Both arguments must be unsigned pointer values (i.e. uintptr_t).
diff --git a/include/plat/arm/common/arm_reclaim_init.ld.S b/include/plat/arm/common/arm_reclaim_init.ld.S
index 717f65e..788e9ff 100644
--- a/include/plat/arm/common/arm_reclaim_init.ld.S
+++ b/include/plat/arm/common/arm_reclaim_init.ld.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -12,7 +12,7 @@
             . = . + PLATFORM_STACK_SIZE;
             . = ALIGN(PAGE_SIZE);
             __INIT_CODE_START__ = .;
-	    *(*text.init*);
+	    *(*text.init.*);
             __INIT_CODE_END__ = .;
             INIT_CODE_END_ALIGNED = ALIGN(PAGE_SIZE);
         } >RAM
diff --git a/include/plat/common/platform.h b/include/plat/common/platform.h
index 7664509..b62a631 100644
--- a/include/plat/common/platform.h
+++ b/include/plat/common/platform.h
@@ -347,6 +347,10 @@
 int plat_spm_core_manifest_load(spmc_manifest_attribute_t *manifest,
 				const void *pm_addr);
 #endif
+#if defined(SPMC_AT_EL3)
+int plat_spmc_shmem_datastore_get(uint8_t **datastore, size_t *size);
+#endif
+
 /*******************************************************************************
  * Mandatory BL image load functions(may be overridden).
  ******************************************************************************/
diff --git a/include/services/el3_spmc_ffa_memory.h b/include/services/el3_spmc_ffa_memory.h
new file mode 100644
index 0000000..2037eca
--- /dev/null
+++ b/include/services/el3_spmc_ffa_memory.h
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef EL3_SPMC_FFA_MEM_H
+#define EL3_SPMC_FFA_MEM_H
+
+#include <assert.h>
+
+/*
+ * Subset of Arm Firmware Framework for Armv8-A
+ * (https://developer.arm.com/docs/den0077/a) needed for shared memory.
+ */
+
+/**
+ * typedef ffa_endpoint_id16_t - Endpoint ID
+ *
+ * Current implementation only supports VM IDs. FF-A spec also support stream
+ * endpoint ids.
+ */
+typedef uint16_t ffa_endpoint_id16_t;
+
+/**
+ * struct ffa_cons_mrd - Constituent memory region descriptor
+ * @address:
+ *         Start address of contiguous memory region. Must be 4K page aligned.
+ * @page_count:
+ *         Number of 4K pages in region.
+ * @reserved_12_15:
+ *         Reserve bytes 12-15 to pad struct size to 16 bytes.
+ */
+struct ffa_cons_mrd {
+	uint64_t address;
+	uint32_t page_count;
+	uint32_t reserved_12_15;
+};
+CASSERT(sizeof(struct ffa_cons_mrd) == 16, assert_ffa_cons_mrd_size_mismatch);
+
+/**
+ * struct ffa_comp_mrd - Composite memory region descriptor
+ * @total_page_count:
+ *         Number of 4k pages in memory region. Must match sum of
+ *         @address_range_array[].page_count.
+ * @address_range_count:
+ *         Number of entries in @address_range_array.
+ * @reserved_8_15:
+ *         Reserve bytes 8-15 to pad struct size to 16 byte alignment and
+ *         make @address_range_array 16 byte aligned.
+ * @address_range_array:
+ *         Array of &struct ffa_cons_mrd entries.
+ */
+struct ffa_comp_mrd {
+	uint32_t total_page_count;
+	uint32_t address_range_count;
+	uint64_t reserved_8_15;
+	struct ffa_cons_mrd address_range_array[];
+};
+CASSERT(sizeof(struct ffa_comp_mrd) == 16, assert_ffa_comp_mrd_size_mismatch);
+
+/**
+ * typedef ffa_mem_attr8_t - Memory region attributes v1.0.
+ * typedef ffa_mem_attr16_t - Memory region attributes v1.1.
+ *
+ * * @FFA_MEM_ATTR_NS_BIT:
+ *     Memory security state.
+ * * @FFA_MEM_ATTR_DEVICE_NGNRNE:
+ *     Device-nGnRnE.
+ * * @FFA_MEM_ATTR_DEVICE_NGNRE:
+ *     Device-nGnRE.
+ * * @FFA_MEM_ATTR_DEVICE_NGRE:
+ *     Device-nGRE.
+ * * @FFA_MEM_ATTR_DEVICE_GRE:
+ *     Device-GRE.
+ * * @FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED
+ *     Normal memory. Non-cacheable.
+ * * @FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB
+ *     Normal memory. Write-back cached.
+ * * @FFA_MEM_ATTR_NON_SHAREABLE
+ *     Non-shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*.
+ * * @FFA_MEM_ATTR_OUTER_SHAREABLE
+ *     Outer Shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*.
+ * * @FFA_MEM_ATTR_INNER_SHAREABLE
+ *     Inner Shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*.
+ */
+typedef uint8_t ffa_mem_attr8_t;
+typedef uint16_t ffa_mem_attr16_t;
+#define FFA_MEM_ATTR_NS_BIT			(0x1U << 6)
+#define FFA_MEM_ATTR_DEVICE_NGNRNE		((1U << 4) | (0x0U << 2))
+#define FFA_MEM_ATTR_DEVICE_NGNRE		((1U << 4) | (0x1U << 2))
+#define FFA_MEM_ATTR_DEVICE_NGRE		((1U << 4) | (0x2U << 2))
+#define FFA_MEM_ATTR_DEVICE_GRE			((1U << 4) | (0x3U << 2))
+#define FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED	((2U << 4) | (0x1U << 2))
+#define FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB	((2U << 4) | (0x3U << 2))
+#define FFA_MEM_ATTR_NON_SHAREABLE		(0x0U << 0)
+#define FFA_MEM_ATTR_OUTER_SHAREABLE		(0x2U << 0)
+#define FFA_MEM_ATTR_INNER_SHAREABLE		(0x3U << 0)
+
+/**
+ * typedef ffa_mem_perm8_t - Memory access permissions
+ *
+ * * @FFA_MEM_ATTR_RO
+ *     Request or specify read-only mapping.
+ * * @FFA_MEM_ATTR_RW
+ *     Request or allow read-write mapping.
+ * * @FFA_MEM_PERM_NX
+ *     Deny executable mapping.
+ * * @FFA_MEM_PERM_X
+ *     Request executable mapping.
+ */
+typedef uint8_t ffa_mem_perm8_t;
+#define FFA_MEM_PERM_RO		(1U << 0)
+#define FFA_MEM_PERM_RW		(1U << 1)
+#define FFA_MEM_PERM_NX		(1U << 2)
+#define FFA_MEM_PERM_X		(1U << 3)
+
+/**
+ * typedef ffa_mem_flag8_t - Endpoint memory flags
+ *
+ * * @FFA_MEM_FLAG_NON_RETRIEVAL_BORROWER
+ *     Non-retrieval Borrower. Memory region must not be or was not retrieved on
+ *     behalf of this endpoint.
+ */
+typedef uint8_t ffa_mem_flag8_t;
+#define FFA_MEM_FLAG_NON_RETRIEVAL_BORROWER	(1U << 0)
+
+/**
+ * typedef ffa_mtd_flag32_t - Memory transaction descriptor flags
+ *
+ * * @FFA_MTD_FLAG_ZERO_MEMORY
+ *     Zero memory after unmapping from sender (must be 0 for share).
+ * * @FFA_MTD_FLAG_TIME_SLICING
+ *     Not supported by this implementation.
+ * * @FFA_MTD_FLAG_ZERO_MEMORY_AFTER_RELINQUISH
+ *     Zero memory after unmapping from borrowers (must be 0 for share).
+ * * @FFA_MTD_FLAG_TYPE_MASK
+ *     Bit-mask to extract memory management transaction type from flags.
+ * * @FFA_MTD_FLAG_TYPE_SHARE_MEMORY
+ *     Share memory transaction flag.
+ *     Used by @SMC_FC_FFA_MEM_RETRIEVE_RESP to indicate that memory came from
+ *     @SMC_FC_FFA_MEM_SHARE and by @SMC_FC_FFA_MEM_RETRIEVE_REQ to specify that
+ *     it must have.
+ * * @FFA_MTD_FLAG_ADDRESS_RANGE_ALIGNMENT_HINT_MASK
+ *     Not supported by this implementation.
+ */
+typedef uint32_t ffa_mtd_flag32_t;
+#define FFA_MTD_FLAG_ZERO_MEMORY			(1U << 0)
+#define FFA_MTD_FLAG_TIME_SLICING			(1U << 1)
+#define FFA_MTD_FLAG_ZERO_MEMORY_AFTER_RELINQUISH	(1U << 2)
+#define FFA_MTD_FLAG_TYPE_MASK				(3U << 3)
+#define FFA_MTD_FLAG_TYPE_SHARE_MEMORY			(1U << 3)
+#define FFA_MTD_FLAG_TYPE_LEND_MEMORY			(1U << 4)
+#define FFA_MTD_FLAG_ADDRESS_RANGE_ALIGNMENT_HINT_MASK	(0x1FU << 5)
+
+/**
+ * struct ffa_mapd - Memory access permissions descriptor
+ * @endpoint_id:
+ *         Endpoint id that @memory_access_permissions and @flags apply to.
+ *         (&typedef ffa_endpoint_id16_t).
+ * @memory_access_permissions:
+ *         FFA_MEM_PERM_* values or'ed together (&typedef ffa_mem_perm8_t).
+ * @flags:
+ *         FFA_MEM_FLAG_* values or'ed together (&typedef ffa_mem_flag8_t).
+ */
+struct ffa_mapd {
+	ffa_endpoint_id16_t endpoint_id;
+	ffa_mem_perm8_t memory_access_permissions;
+	ffa_mem_flag8_t flags;
+};
+CASSERT(sizeof(struct ffa_mapd) == 4, assert_ffa_mapd_size_mismatch);
+
+/**
+ * struct ffa_emad_v1_0 - Endpoint memory access descriptor.
+ * @mapd:  &struct ffa_mapd.
+ * @comp_mrd_offset:
+ *         Offset of &struct ffa_comp_mrd from start of &struct ffa_mtd_v1_0.
+ * @reserved_8_15:
+ *         Reserved bytes 8-15. Must be 0.
+ */
+struct ffa_emad_v1_0 {
+	struct ffa_mapd mapd;
+	uint32_t comp_mrd_offset;
+	uint64_t reserved_8_15;
+};
+CASSERT(sizeof(struct ffa_emad_v1_0) == 16, assert_ffa_emad_v1_0_size_mismatch);
+
+/**
+ * struct ffa_mtd_v1_0 - Memory transaction descriptor.
+ * @sender_id:
+ *         Sender endpoint id.
+ * @memory_region_attributes:
+ *         FFA_MEM_ATTR_* values or'ed together (&typedef ffa_mem_attr8_t).
+ * @reserved_3:
+ *         Reserved bytes 3. Must be 0.
+ * @flags:
+ *         FFA_MTD_FLAG_* values or'ed together (&typedef ffa_mtd_flag32_t).
+ * @handle:
+ *         Id of shared memory object. Must be 0 for MEM_SHARE or MEM_LEND.
+ * @tag:   Client allocated tag. Must match original value.
+ * @reserved_24_27:
+ *         Reserved bytes 24-27. Must be 0.
+ * @emad_count:
+ *         Number of entries in @emad.
+ * @emad:
+ *         Endpoint memory access descriptor array (see @struct ffa_emad_v1_0).
+ */
+struct ffa_mtd_v1_0 {
+	ffa_endpoint_id16_t sender_id;
+	ffa_mem_attr8_t memory_region_attributes;
+	uint8_t reserved_3;
+	ffa_mtd_flag32_t flags;
+	uint64_t handle;
+	uint64_t tag;
+	uint32_t reserved_24_27;
+	uint32_t emad_count;
+	struct ffa_emad_v1_0 emad[];
+};
+CASSERT(sizeof(struct ffa_mtd_v1_0) == 32, assert_ffa_mtd_size_v1_0_mismatch);
+
+/**
+ * struct ffa_mtd - Memory transaction descriptor for FF-A v1.1.
+ * @sender_id:
+ *         Sender endpoint id.
+ * @memory_region_attributes:
+ *         FFA_MEM_ATTR_* values or'ed together (&typedef ffa_mem_attr16_t).
+ * @flags:
+ *         FFA_MTD_FLAG_* values or'ed together (&typedef ffa_mtd_flag32_t).
+ * @handle:
+ *         Id of shared memory object. Must be 0 for MEM_SHARE or MEM_LEND.
+ * @tag:   Client allocated tag. Must match original value.
+ * @emad_size:
+ *         Size of the emad descriptor.
+ * @emad_count:
+ *         Number of entries in the emad array.
+ * @emad_offset:
+ *         Offset from the beginning of the descriptor to the location of the
+ *         memory access descriptor array (see @struct ffa_emad_v1_0).
+ * @reserved_36_39:
+ *         Reserved bytes 36-39. Must be 0.
+ * @reserved_40_47:
+ *         Reserved bytes 44-47. Must be 0.
+ */
+struct ffa_mtd {
+	ffa_endpoint_id16_t sender_id;
+	ffa_mem_attr16_t memory_region_attributes;
+	ffa_mtd_flag32_t flags;
+	uint64_t handle;
+	uint64_t tag;
+	uint32_t emad_size;
+	uint32_t emad_count;
+	uint32_t emad_offset;
+	uint32_t reserved_36_39;
+	uint64_t reserved_40_47;
+};
+CASSERT(sizeof(struct ffa_mtd) == 48, assert_ffa_mtd_size_mismatch);
+
+#endif /* EL3_SPMC_FFA_MEM_H */
diff --git a/include/services/ffa_svc.h b/include/services/ffa_svc.h
index 2b4a377..da016fd 100644
--- a/include/services/ffa_svc.h
+++ b/include/services/ffa_svc.h
@@ -38,6 +38,7 @@
 #define FFA_VERSION_MINOR_SHIFT		0
 #define FFA_VERSION_MINOR_MASK		U(0xFFFF)
 #define FFA_VERSION_BIT31_MASK 		U(0x1u << 31)
+#define FFA_VERSION_MASK		U(0xFFFFFFFF)
 
 
 #define MAKE_FFA_VERSION(major, minor) 	\
@@ -55,6 +56,19 @@
 	(((blk) & FFA_MSG_SEND_ATTRS_BLK_MASK) \
 	<< FFA_MSG_SEND_ATTRS_BLK_SHIFT)
 
+/* Defines for FF-A framework messages exchanged using direct messages. */
+#define FFA_FWK_MSG_BIT		BIT(31)
+#define FFA_FWK_MSG_MASK	0xFF
+#define FFA_FWK_MSG_PSCI	U(0x0)
+
+/* Defines for FF-A power management messages framework messages. */
+#define FFA_PM_MSG_WB_REQ	U(0x1) /* Warm boot request. */
+#define FFA_PM_MSG_PM_RESP	U(0x2) /* Response to PSCI or warmboot req. */
+
+/* FF-A warm boot types. */
+#define FFA_WB_TYPE_S2RAM	0x0
+#define FFA_WB_TYPE_NOTS2RAM	0x1
+
 /* Get FFA fastcall std FID from function number */
 #define FFA_FID(smc_cc, func_num)			\
 		((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT) |	\
@@ -87,6 +101,8 @@
 #define FFA_FNUM_MEM_RETRIEVE_RESP		U(0x75)
 #define FFA_FNUM_MEM_RELINQUISH			U(0x76)
 #define FFA_FNUM_MEM_RECLAIM			U(0x77)
+#define FFA_FNUM_MEM_FRAG_RX			U(0x7A)
+#define FFA_FNUM_MEM_FRAG_TX			U(0x7B)
 #define FFA_FNUM_NORMAL_WORLD_RESUME		U(0x7C)
 
 /* FF-A v1.1 */
@@ -142,6 +158,8 @@
 #define FFA_NOTIFICATION_GET 	FFA_FID(SMC_32, FFA_FNUM_NOTIFICATION_GET)
 #define FFA_NOTIFICATION_INFO_GET \
 	FFA_FID(SMC_32, FFA_FNUM_NOTIFICATION_INFO_GET)
+#define FFA_MEM_FRAG_RX	FFA_FID(SMC_32, FFA_FNUM_MEM_FRAG_RX)
+#define FFA_MEM_FRAG_TX	FFA_FID(SMC_32, FFA_FNUM_MEM_FRAG_TX)
 #define FFA_SPM_ID_GET		FFA_FID(SMC_32, FFA_FNUM_SPM_ID_GET)
 #define FFA_NORMAL_WORLD_RESUME	FFA_FID(SMC_32, FFA_FNUM_NORMAL_WORLD_RESUME)
 
@@ -195,6 +213,11 @@
 #define SPMC_SECURE_ID_SHIFT			U(15)
 
 /*
+ * Partition Count Flag in FFA_PARTITION_INFO_GET.
+ */
+#define FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK U(1 << 0)
+
+/*
  * Mask for source and destination endpoint id in
  * a direct message request/response.
  */
@@ -248,4 +271,71 @@
 	return !ffa_is_secure_world_id(id);
 }
 
+
+/******************************************************************************
+ * Boot information protocol as per the FF-A v1.1 spec.
+ *****************************************************************************/
+#define FFA_INIT_DESC_SIGNATURE			0x00000FFA
+
+/* Boot information type. */
+#define FFA_BOOT_INFO_TYPE_STD			U(0x0)
+#define FFA_BOOT_INFO_TYPE_IMPL			U(0x1)
+
+#define FFA_BOOT_INFO_TYPE_MASK			U(0x1)
+#define FFA_BOOT_INFO_TYPE_SHIFT		U(0x7)
+#define FFA_BOOT_INFO_TYPE(type)		\
+	(((type) & FFA_BOOT_INFO_TYPE_MASK)	\
+	<< FFA_BOOT_INFO_TYPE_SHIFT)
+
+/* Boot information identifier. */
+#define FFA_BOOT_INFO_TYPE_ID_FDT		U(0x0)
+#define FFA_BOOT_INFO_TYPE_ID_HOB		U(0x1)
+
+#define FFA_BOOT_INFO_TYPE_ID_MASK		U(0x3F)
+#define FFA_BOOT_INFO_TYPE_ID_SHIFT		U(0x0)
+#define FFA_BOOT_INFO_TYPE_ID(type)		\
+	(((type) & FFA_BOOT_INFO_TYPE_ID_MASK)	\
+	<< FFA_BOOT_INFO_TYPE_ID_SHIFT)
+
+/* Format of Flags Name field. */
+#define FFA_BOOT_INFO_FLAG_NAME_STRING		U(0x0)
+#define FFA_BOOT_INFO_FLAG_NAME_UUID		U(0x1)
+
+#define FFA_BOOT_INFO_FLAG_NAME_MASK		U(0x3)
+#define FFA_BOOT_INFO_FLAG_NAME_SHIFT		U(0x0)
+#define FFA_BOOT_INFO_FLAG_NAME(type)		\
+	(((type) & FFA_BOOT_INFO_FLAG_NAME_MASK)\
+	<< FFA_BOOT_INFO_FLAG_NAME_SHIFT)
+
+/* Format of Flags Contents field. */
+#define FFA_BOOT_INFO_FLAG_CONTENT_ADR		U(0x0)
+#define FFA_BOOT_INFO_FLAG_CONTENT_VAL		U(0x1)
+
+#define FFA_BOOT_INFO_FLAG_CONTENT_MASK		U(0x1)
+#define FFA_BOOT_INFO_FLAG_CONTENT_SHIFT	U(0x2)
+#define FFA_BOOT_INFO_FLAG_CONTENT(content)		\
+	(((content) & FFA_BOOT_INFO_FLAG_CONTENT_MASK)	\
+	<< FFA_BOOT_INFO_FLAG_CONTENT_SHIFT)
+
+/* Boot information descriptor. */
+struct ffa_boot_info_desc {
+	uint8_t name[16];
+	uint8_t type;
+	uint8_t reserved;
+	uint16_t flags;
+	uint32_t size_boot_info;
+	uint64_t content;
+};
+
+/* Boot information header. */
+struct ffa_boot_info_header {
+	uint32_t signature; /* 0xFFA */
+	uint32_t version;
+	uint32_t size_boot_info_blob;
+	uint32_t size_boot_info_desc;
+	uint32_t count_boot_info_desc;
+	uint32_t offset_boot_info_desc;
+	uint64_t reserved;
+};
+
 #endif /* FFA_SVC_H */
diff --git a/lib/cpus/aarch32/cortex_a57.S b/lib/cpus/aarch32/cortex_a57.S
index 2e97abb..18ee1f9 100644
--- a/lib/cpus/aarch32/cortex_a57.S
+++ b/lib/cpus/aarch32/cortex_a57.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -396,6 +396,11 @@
 	bx	lr
 endfunc check_errata_cve_2018_3639
 
+func check_errata_cve_2022_23960
+	mov	r0, #ERRATA_MISSING
+	bx	lr
+endfunc check_errata_cve_2022_23960
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A57.
 	 * Shall clobber: r0-r6
@@ -600,6 +605,7 @@
 	report_errata ERRATA_A57_859972, cortex_a57, 859972
 	report_errata WORKAROUND_CVE_2017_5715, cortex_a57, cve_2017_5715
 	report_errata WORKAROUND_CVE_2018_3639, cortex_a57, cve_2018_3639
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a57, cve_2022_23960
 
 	pop	{r12, lr}
 	bx	lr
diff --git a/lib/cpus/aarch32/cortex_a72.S b/lib/cpus/aarch32/cortex_a72.S
index ff2b0e6..03914b2 100644
--- a/lib/cpus/aarch32/cortex_a72.S
+++ b/lib/cpus/aarch32/cortex_a72.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -101,6 +101,11 @@
 	bx	lr
 endfunc check_errata_cve_2018_3639
 
+func check_errata_cve_2022_23960
+	mov	r0, #ERRATA_MISSING
+	bx	lr
+endfunc check_errata_cve_2022_23960
+
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A72.
 	 * -------------------------------------------------
@@ -260,6 +265,7 @@
 	report_errata ERRATA_A72_859971, cortex_a72, 859971
 	report_errata WORKAROUND_CVE_2017_5715, cortex_a72, cve_2017_5715
 	report_errata WORKAROUND_CVE_2018_3639, cortex_a72, cve_2018_3639
+	report_errata WORKAROUND_CVE_2022_23960, cortex_a72, cve_2022_23960
 
 	pop	{r12, lr}
 	bx	lr
diff --git a/lib/cpus/aarch64/cortex_a510.S b/lib/cpus/aarch64/cortex_a510.S
index 34e1082..f444077 100644
--- a/lib/cpus/aarch64/cortex_a510.S
+++ b/lib/cpus/aarch64/cortex_a510.S
@@ -301,6 +301,7 @@
 	report_errata ERRATA_A510_2250311, cortex_a510, 2250311
 	report_errata ERRATA_A510_2218950, cortex_a510, 2218950
 	report_errata ERRATA_A510_2172148, cortex_a510, 2172148
+	report_errata ERRATA_DSU_2313941, cortex_a510, dsu_2313941
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -312,12 +313,15 @@
 
 	/* Disable speculative loads */
 	msr	SSBS, xzr
-	isb
 
 	/* Get the CPU revision and stash it in x18. */
 	bl	cpu_get_rev_var
 	mov	x18, x0
 
+#if ERRATA_DSU_2313941
+	bl	errata_dsu_2313941_wa
+#endif
+
 #if ERRATA_A510_1922240
 	mov	x0, x18
 	bl	errata_cortex_a510_1922240_wa
@@ -353,6 +357,7 @@
 	bl	errata_cortex_a510_2172148_wa
 #endif
 
+	isb
 	ret	x19
 endfunc cortex_a510_reset_func
 
diff --git a/lib/cpus/aarch64/cortex_a710.S b/lib/cpus/aarch64/cortex_a710.S
index aea62ae..5d8e9a6 100644
--- a/lib/cpus/aarch64/cortex_a710.S
+++ b/lib/cpus/aarch64/cortex_a710.S
@@ -310,6 +310,49 @@
 	b       cpu_rev_var_ls
 endfunc check_errata_2282622
 
+/* ---------------------------------------------------------------
+ * Errata Workaround for Cortex-A710 Erratum 2008768.
+ * This applies to revision r0p0, r1p0 and r2p0.
+ * It is fixed in r2p1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x2, x17
+ * ---------------------------------------------------------------
+ */
+func errata_a710_2008768_wa
+	mov     x17, x30
+	bl      check_errata_2008768
+	cbz     x0, 1f
+
+	/* Stash ERRSELR_EL1 in x2 */
+	mrs	x2, ERRSELR_EL1
+
+	/* Select error record 0 and clear ED bit */
+	msr	ERRSELR_EL1, xzr
+	mrs	x1, ERXCTLR_EL1
+	bfi	x1, xzr, #ERXCTLR_ED_SHIFT, #1
+	msr	ERXCTLR_EL1, x1
+
+	/* Select error record 1 and clear ED bit */
+	mov	x0, #1
+	msr	ERRSELR_EL1, x0
+	mrs	x1, ERXCTLR_EL1
+	bfi	x1, xzr, #ERXCTLR_ED_SHIFT, #1
+	msr	ERXCTLR_EL1, x1
+
+	/* Restore ERRSELR_EL1 from x2 */
+	msr	ERRSELR_EL1, x2
+
+1:
+	ret     x17
+endfunc errata_a710_2008768_wa
+
+func check_errata_2008768
+	/* Applies to r0p0, r1p0 and r2p0 */
+	mov     x1, #0x20
+	b       cpu_rev_var_ls
+endfunc check_errata_2008768
+
 func check_errata_cve_2022_23960
 #if WORKAROUND_CVE_2022_23960
 	mov	x0, #ERRATA_APPLIES
@@ -324,6 +367,14 @@
 	 * ----------------------------------------------------
 	 */
 func cortex_a710_core_pwr_dwn
+
+#if ERRATA_A710_2008768
+	mov	x4, x30
+	bl	cpu_get_rev_var
+	bl	errata_a710_2008768_wa
+	mov	x30, x4
+#endif
+
 	/* ---------------------------------------------------
 	 * Enable CPU power down bit in power control register
 	 * ---------------------------------------------------
@@ -358,7 +409,9 @@
 	report_errata ERRATA_A710_2267065, cortex_a710, 2267065
 	report_errata ERRATA_A710_2136059, cortex_a710, 2136059
 	report_errata ERRATA_A710_2282622, cortex_a710, 2282622
+	report_errata ERRATA_A710_2008768, cortex_a710, 2008768
 	report_errata WORKAROUND_CVE_2022_23960, cortex_a710, cve_2022_23960
+	report_errata ERRATA_DSU_2313941, cortex_a710, dsu_2313941
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -374,6 +427,10 @@
 	bl	cpu_get_rev_var
 	mov	x18, x0
 
+#if ERRATA_DSU_2313941
+	bl	errata_dsu_2313941_wa
+#endif
+
 #if ERRATA_A710_1987031
 	mov	x0, x18
 	bl	errata_a710_1987031_wa
diff --git a/lib/cpus/aarch64/cortex_a78.S b/lib/cpus/aarch64/cortex_a78.S
index 1a6f848..be94e91 100644
--- a/lib/cpus/aarch64/cortex_a78.S
+++ b/lib/cpus/aarch64/cortex_a78.S
@@ -267,6 +267,62 @@
 	b	cpu_rev_var_range
 endfunc check_errata_2242635
 
+/* --------------------------------------------------
+ * Errata Workaround for Cortex A78 Errata 2376745.
+ * This applies to revisions r0p0, r1p0, r1p1, and r1p2.
+ * It is still open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * --------------------------------------------------
+ */
+func errata_a78_2376745_wa
+	/* Check revision. */
+	mov	x17, x30
+	bl	check_errata_2376745
+	cbz	x0, 1f
+
+	/* Apply the workaround. */
+	mrs	x1, CORTEX_A78_ACTLR2_EL1
+	orr	x1, x1, #BIT(0)
+	msr	CORTEX_A78_ACTLR2_EL1, x1
+1:
+	ret	x17
+endfunc errata_a78_2376745_wa
+
+func check_errata_2376745
+	/* Applies to r0p0, r0p1, r1p1, and r1p2 */
+	mov	x1, #CPU_REV(1, 2)
+	b	cpu_rev_var_ls
+endfunc check_errata_2376745
+
+/* --------------------------------------------------
+ * Errata Workaround for Cortex A78 Errata 2395406.
+ * This applies to revisions r0p0, r1p0, r1p1, and r1p2.
+ * It is still open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * --------------------------------------------------
+ */
+func errata_a78_2395406_wa
+	/* Check revision. */
+	mov	x17, x30
+	bl	check_errata_2395406
+	cbz	x0, 1f
+
+	/* Apply the workaround. */
+	mrs	x1, CORTEX_A78_ACTLR2_EL1
+	orr	x1, x1, #BIT(40)
+	msr	CORTEX_A78_ACTLR2_EL1, x1
+1:
+	ret	x17
+endfunc errata_a78_2395406_wa
+
+func check_errata_2395406
+	/* Applies to r0p0, r0p1, r1p1, and r1p2 */
+	mov	x1, #CPU_REV(1, 2)
+	b	cpu_rev_var_ls
+endfunc check_errata_2395406
+
 func check_errata_cve_2022_23960
 #if WORKAROUND_CVE_2022_23960
 	mov	x0, #ERRATA_APPLIES
@@ -320,6 +376,16 @@
 	bl	errata_a78_2242635_wa
 #endif
 
+#if ERRATA_A78_2376745
+	mov	x0, x18
+	bl	errata_a78_2376745_wa
+#endif
+
+#if ERRATA_A78_2395406
+	mov	x0, x18
+	bl	errata_a78_2395406_wa
+#endif
+
 #if ENABLE_AMU
 	/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
 	mrs	x0, actlr_el3
@@ -390,6 +456,8 @@
 	report_errata ERRATA_A78_1952683, cortex_a78, 1952683
 	report_errata ERRATA_A78_2132060, cortex_a78, 2132060
 	report_errata ERRATA_A78_2242635, cortex_a78, 2242635
+	report_errata ERRATA_A78_2376745, cortex_a78, 2376745
+	report_errata ERRATA_A78_2395406, cortex_a78, 2395406
 	report_errata WORKAROUND_CVE_2022_23960, cortex_a78, cve_2022_23960
 
 	ldp	x8, x30, [sp], #16
diff --git a/lib/cpus/aarch64/cortex_hunter.S b/lib/cpus/aarch64/cortex_hunter.S
index 2ab4296..973637e 100644
--- a/lib/cpus/aarch64/cortex_hunter.S
+++ b/lib/cpus/aarch64/cortex_hunter.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,7 @@
 #include <cortex_hunter.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,9 +22,32 @@
 #error "Cortex Hunter supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+        wa_cve_2022_23960_bhb_vector_table CORTEX_HUNTER_BHB_LOOP_COUNT, cortex_hunter
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 func cortex_hunter_reset_func
 	/* Disable speculative loads */
 	msr	SSBS, xzr
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex Hunter generic vectors are overridden to apply errata
+	 * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_cortex_hunter
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
 	isb
 	ret
 endfunc cortex_hunter_reset_func
@@ -49,6 +73,18 @@
  * Errata printing function for Cortex Hunter. Must follow AAPCS.
  */
 func cortex_hunter_errata_report
+	stp	x8, x30, [sp, #-16]!
+
+	bl	cpu_get_rev_var
+	mov	x8, x0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata WORKAROUND_CVE_2022_23960, cortex_hunter, cve_2022_23960
+
+	ldp	x8, x30, [sp], #16
 	ret
 endfunc cortex_hunter_errata_report
 #endif
diff --git a/lib/cpus/aarch64/cortex_makalu.S b/lib/cpus/aarch64/cortex_makalu.S
index 98c7d6d..7603210 100644
--- a/lib/cpus/aarch64/cortex_makalu.S
+++ b/lib/cpus/aarch64/cortex_makalu.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,7 @@
 #include <cortex_makalu.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,9 +22,32 @@
 #error "Cortex Makalu supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table CORTEX_MAKALU_BHB_LOOP_COUNT, cortex_makalu
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov     x0, #ERRATA_APPLIES
+#else
+	mov     x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
 func cortex_makalu_reset_func
 	/* Disable speculative loads */
 	msr	SSBS, xzr
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex Makalu generic vectors are overridden to apply errata
+	 * mitigation on exception entry from lower ELs.
+	 */
+        adr	x0, wa_cve_vbar_cortex_makalu
+        msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
 	isb
 	ret
 endfunc cortex_makalu_reset_func
@@ -49,6 +73,18 @@
  * Errata printing function for Cortex Makalu. Must follow AAPCS.
  */
 func cortex_makalu_errata_report
+	stp	x8, x30, [sp, #-16]!
+
+	bl	cpu_get_rev_var
+	mov	x8, x0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata WORKAROUND_CVE_2022_23960, cortex_makalu, cve_2022_23960
+
+	ldp     x8, x30, [sp], #16
 	ret
 endfunc cortex_makalu_errata_report
 #endif
diff --git a/lib/cpus/aarch64/cortex_makalu_elp_arm.S b/lib/cpus/aarch64/cortex_makalu_elp_arm.S
index fbbf205..f4d2df0 100644
--- a/lib/cpus/aarch64/cortex_makalu_elp_arm.S
+++ b/lib/cpus/aarch64/cortex_makalu_elp_arm.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,7 @@
 #include <cortex_makalu_elp_arm.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
 #error "Cortex Makalu ELP supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table CORTEX_MAKALU_ELP_ARM_BHB_LOOP_COUNT, cortex_makalu_elp_arm
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ----------------------------------------------------
@@ -37,22 +42,53 @@
 	ret
 endfunc cortex_makalu_elp_arm_core_pwr_dwn
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
+func cortex_makalu_elp_arm_reset_func
+	/* Disable speculative loads */
+	msr	SSBS, xzr
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex Makalu ELP generic vectors are overridden to apply
+	 * errata mitigation on exception entry from lower ELs.
+         */
+	adr	x0, wa_cve_vbar_cortex_makalu_elp_arm
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+	isb
+	ret
+endfunc cortex_makalu_elp_arm_reset_func
+
 #if REPORT_ERRATA
 /*
  * Errata printing function for Cortex Makalu ELP. Must follow AAPCS.
  */
 func cortex_makalu_elp_arm_errata_report
+	stp	x8, x30, [sp, #-16]!
+
+	bl	cpu_get_rev_var
+	mov	x8, x0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata WORKAROUND_CVE_2022_23960, cortex_makalu_elp_arm, cve_2022_23960
+
+	ldp	x8, x30, [sp], #16
 	ret
 endfunc cortex_makalu_elp_arm_errata_report
 #endif
 
-func cortex_makalu_elp_arm_reset_func
-	/* Disable speculative loads */
-	msr	SSBS, xzr
-	isb
-	ret
-endfunc cortex_makalu_elp_arm_reset_func
-
 	/* ---------------------------------------------
 	 * This function provides Cortex Makalu ELP-
 	 * specific register information for crash
diff --git a/lib/cpus/aarch64/cortex_x1.S b/lib/cpus/aarch64/cortex_x1.S
new file mode 100644
index 0000000..9a7f666
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_x1.S
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2022, Google LLC. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <cortex_x1.h>
+#include <cpu_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex-X1 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex-X1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table CORTEX_X1_BHB_LOOP_COUNT, cortex_x1
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+/* --------------------------------------------------
+ * Errata Workaround for X1 Erratum 1821534.
+ * This applies to revision r0p0 and r1p0 of X1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_x1_1821534_wa
+	/* Compare x0 against revision r1p0 */
+	mov	x17, x30
+	bl	check_errata_1821534
+	cbz	x0, 1f
+	mrs	x1, CORTEX_X1_ACTLR2_EL1
+	orr	x1, x1, BIT(2)
+	msr	CORTEX_X1_ACTLR2_EL1, x1
+	isb
+1:
+	ret	x17
+endfunc errata_x1_1821534_wa
+
+func check_errata_1821534
+	/* Applies to r0p0 and r1p0 */
+	mov	x1, #0x10
+	b	cpu_rev_var_ls
+endfunc check_errata_1821534
+
+/* --------------------------------------------------
+ * Errata Workaround for X1 Erratum 1688305.
+ * This applies to revision r0p0 and r1p0 of X1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_x1_1688305_wa
+	/* Compare x0 against revision r1p0 */
+	mov	x17, x30
+	bl	check_errata_1688305
+	cbz	x0, 1f
+	mrs	x0, CORTEX_X1_ACTLR2_EL1
+	orr	x0, x0, BIT(1)
+	msr	CORTEX_X1_ACTLR2_EL1, x0
+	isb
+
+1:
+	ret	x17
+endfunc errata_x1_1688305_wa
+
+func check_errata_1688305
+	/* Applies to r0p0 and r1p0 */
+	mov	x1, #0x10
+	b	cpu_rev_var_ls
+endfunc check_errata_1688305
+
+/* --------------------------------------------------
+ * Errata Workaround for X1 Erratum 1827429.
+ * This applies to revision r0p0 and r1p0 of X1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_x1_1827429_wa
+	/* Compare x0 against revision r1p0 */
+	mov	x17, x30
+	bl	check_errata_1827429
+	cbz	x0, 1f
+	mrs	x0, CORTEX_X1_CPUECTLR_EL1
+	orr	x0, x0, BIT(53)
+	msr	CORTEX_X1_CPUECTLR_EL1, x0
+	isb
+
+1:
+	ret	x17
+endfunc errata_x1_1827429_wa
+
+func check_errata_1827429
+	/* Applies to r0p0 and r1p0 */
+	mov	x1, #0x10
+	b	cpu_rev_var_ls
+endfunc check_errata_1827429
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
+	/* -------------------------------------------------
+	 * The CPU Ops reset function for Cortex-X1.
+	 * Shall clobber: x0-x19
+	 * -------------------------------------------------
+	 */
+func cortex_x1_reset_func
+	mov	x19, x30
+	bl	cpu_get_rev_var
+	mov	x18, x0
+
+#if ERRATA_X1_1821534
+	mov	x0, x18
+	bl	errata_x1_1821534_wa
+#endif
+
+#if ERRATA_X1_1688305
+	mov	x0, x18
+	bl	errata_x1_1688305_wa
+#endif
+
+#if ERRATA_X1_1827429
+	mov	x0, x18
+	bl	errata_x1_1827429_wa
+#endif
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Cortex-X1 generic vectors are overridden to apply errata
+	 * mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_cortex_x1
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+	isb
+	ret	x19
+endfunc cortex_x1_reset_func
+
+	/* ---------------------------------------------
+	 * HW will do the cache maintenance while powering down
+	 * ---------------------------------------------
+	 */
+func cortex_x1_core_pwr_dwn
+	/* ---------------------------------------------
+	 * Enable CPU power down bit in power control register
+	 * ---------------------------------------------
+	 */
+	mrs	x0, CORTEX_X1_CPUPWRCTLR_EL1
+	orr	x0, x0, #CORTEX_X1_CORE_PWRDN_EN_MASK
+	msr	CORTEX_X1_CPUPWRCTLR_EL1, x0
+	isb
+	ret
+endfunc cortex_x1_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex X1. Must follow AAPCS.
+ */
+func cortex_x1_errata_report
+	stp	x8, x30, [sp, #-16]!
+
+	bl	cpu_get_rev_var
+	mov	x8, x0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata ERRATA_X1_1821534, cortex_x1, 1821534
+	report_errata ERRATA_X1_1688305, cortex_x1, 1688305
+	report_errata ERRATA_X1_1827429, cortex_x1, 1827429
+	report_errata WORKAROUND_CVE_2022_23960, cortex_x1, cve_2022_23960
+
+	ldp	x8, x30, [sp], #16
+	ret
+endfunc cortex_x1_errata_report
+#endif
+
+       /* ---------------------------------------------
+	* This function provides Cortex X1 specific
+	* register information for crash reporting.
+	* It needs to return with x6 pointing to
+	* a list of register names in ascii and
+	* x8 - x15 having values of registers to be
+	* reported.
+	* ---------------------------------------------
+	*/
+.section .rodata.cortex_x1_regs, "aS"
+cortex_x1_regs:  /* The ascii list of register names to be reported */
+	.asciz	"cpuectlr_el1", ""
+
+func cortex_x1_cpu_reg_dump
+	adr	x6, cortex_x1_regs
+	mrs	x8, CORTEX_X1_CPUECTLR_EL1
+	ret
+endfunc cortex_x1_cpu_reg_dump
+
+declare_cpu_ops cortex_x1, CORTEX_X1_MIDR, \
+	cortex_x1_reset_func, \
+	cortex_x1_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_x2.S b/lib/cpus/aarch64/cortex_x2.S
index 90a906b..3e0810b 100644
--- a/lib/cpus/aarch64/cortex_x2.S
+++ b/lib/cpus/aarch64/cortex_x2.S
@@ -305,6 +305,7 @@
 	report_errata ERRATA_X2_2147715, cortex_x2, 2147715
 	report_errata ERRATA_X2_2216384, cortex_x2, 2216384
 	report_errata WORKAROUND_CVE_2022_23960, cortex_x2, cve_2022_23960
+	report_errata ERRATA_DSU_2313941, cortex_x2, dsu_2313941
 
 	ldp	x8, x30, [sp], #16
 	ret
@@ -316,12 +317,15 @@
 
 	/* Disable speculative loads */
 	msr	SSBS, xzr
-	isb
 
 	/* Get the CPU revision and stash it in x18. */
 	bl	cpu_get_rev_var
 	mov	x18, x0
 
+#if ERRATA_DSU_2313941
+	bl	errata_dsu_2313941_wa
+#endif
+
 #if ERRATA_X2_2002765
 	mov	x0, x18
 	bl	errata_cortex_x2_2002765_wa
@@ -367,7 +371,7 @@
 #endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
 
 	isb
-	ret x19
+	ret	x19
 endfunc cortex_x2_reset_func
 
 	/* ---------------------------------------------
diff --git a/lib/cpus/aarch64/dsu_helpers.S b/lib/cpus/aarch64/dsu_helpers.S
index da052d5..419b6ea 100644
--- a/lib/cpus/aarch64/dsu_helpers.S
+++ b/lib/cpus/aarch64/dsu_helpers.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2022, Arm Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -139,3 +139,57 @@
 1:
 	ret	x17
 endfunc errata_dsu_936184_wa
+
+	/* -----------------------------------------------------------------------
+	 * DSU erratum 2313941 check function
+	 * Checks the DSU variant, revision and configuration to determine if
+	 * the erratum applies. Erratum applies on all configurations of the
+	 * DSU and if revision-variant is r0p0, r1p0, r2p0, r2p1, r3p0, r3p1.
+	 *
+	 * The erratum is still open.
+	 *
+	 * This function is called from both assembly and C environment. So it
+	 * follows AAPCS.
+	 *
+	 * Clobbers: x0-x3
+	 * -----------------------------------------------------------------------
+	 */
+	.globl	check_errata_dsu_2313941
+	.globl	errata_dsu_2313941_wa
+
+func check_errata_dsu_2313941
+	mov	x2, #ERRATA_APPLIES
+	mov	x3, #ERRATA_NOT_APPLIES
+
+	/* Check if DSU version is less than or equal to r3p1 */
+	mrs	x1, CLUSTERIDR_EL1
+
+	/* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
+	ubfx	x0, x1, #CLUSTERIDR_REV_SHIFT,\
+			#(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
+	mov	x1, #(0x31 << CLUSTERIDR_REV_SHIFT)
+	cmp	x0, x1
+	csel	x0, x2, x3, LS
+	ret
+endfunc check_errata_dsu_2313941
+
+	/* --------------------------------------------------
+	 * Errata Workaround for DSU erratum #2313941.
+	 *
+	 * Can clobber only: x0-x17
+	 * --------------------------------------------------
+	 */
+func errata_dsu_2313941_wa
+	mov	x17, x30
+	bl	check_errata_dsu_2313941
+	cbz	x0, 1f
+
+	/* If erratum applies, disable high-level clock gating */
+	mrs	x0, CLUSTERACTLR_EL1
+	orr	x0, x0, #CLUSTERACTLR_EL1_DISABLE_SCLK_GATING
+	msr	CLUSTERACTLR_EL1, x0
+	isb
+1:
+	ret	x17
+endfunc errata_dsu_2313941_wa
+
diff --git a/lib/cpus/aarch64/neoverse_demeter.S b/lib/cpus/aarch64/neoverse_demeter.S
index f43c18b..41cb4ee 100644
--- a/lib/cpus/aarch64/neoverse_demeter.S
+++ b/lib/cpus/aarch64/neoverse_demeter.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,7 @@
 #include <neoverse_demeter.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
 #error "Neoverse Demeter supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table NEOVERSE_DEMETER_BHB_LOOP_COUNT, neoverse_demeter
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 	/* ----------------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ----------------------------------------------------
@@ -37,22 +42,52 @@
 	ret
 endfunc neoverse_demeter_core_pwr_dwn
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
+func neoverse_demeter_reset_func
+	/* Disable speculative loads */
+	msr	SSBS, xzr
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Neoverse Demeter vectors are overridden to apply
+	 * errata mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_neoverse_demeter
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+	isb
+	ret
+endfunc neoverse_demeter_reset_func
+
 #if REPORT_ERRATA
 /*
  * Errata printing function for Neoverse Demeter. Must follow AAPCS.
  */
 func neoverse_demeter_errata_report
+	stp	x8, x30, [sp, #-16]!
+
+	bl	cpu_get_rev_var
+	mov	x8, x0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata WORKAROUND_CVE_2022_23960, neoverse_demeter, cve_2022_23960
+
+	ldp	x8, x30, [sp], #16
 	ret
 endfunc neoverse_demeter_errata_report
 #endif
 
-func neoverse_demeter_reset_func
-	/* Disable speculative loads */
-	msr	SSBS, xzr
-	isb
-	ret
-endfunc neoverse_demeter_reset_func
-
 	/* ---------------------------------------------
 	 * This function provides Neoverse Demeter-
 	 * specific register information for crash
diff --git a/lib/cpus/aarch64/neoverse_n2.S b/lib/cpus/aarch64/neoverse_n2.S
index b93f2a6..5b796dc 100644
--- a/lib/cpus/aarch64/neoverse_n2.S
+++ b/lib/cpus/aarch64/neoverse_n2.S
@@ -367,6 +367,10 @@
 	orr	x0, x0, #NEOVERSE_N2_CPUACTLR2_EL1_BIT_2
 	msr	NEOVERSE_N2_CPUACTLR2_EL1, x0
 
+#if ERRATA_DSU_2313941
+	bl	errata_dsu_2313941_wa
+#endif
+
 #if ERRATA_N2_2067956
 	mov	x0, x18
 	bl	errata_n2_2067956_wa
@@ -493,6 +497,7 @@
 	report_errata ERRATA_N2_2242400, neoverse_n2, 2242400
 	report_errata ERRATA_N2_2280757, neoverse_n2, 2280757
 	report_errata WORKAROUND_CVE_2022_23960, neoverse_n2, cve_2022_23960
+	report_errata ERRATA_DSU_2313941, neoverse_n2, dsu_2313941
 
 	ldp	x8, x30, [sp], #16
 	ret
diff --git a/lib/cpus/aarch64/neoverse_poseidon.S b/lib/cpus/aarch64/neoverse_poseidon.S
index 43a93aa..030293d 100644
--- a/lib/cpus/aarch64/neoverse_poseidon.S
+++ b/lib/cpus/aarch64/neoverse_poseidon.S
@@ -10,6 +10,7 @@
 #include <neoverse_poseidon.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
 
 /* Hardware handled coherency */
 #if HW_ASSISTED_COHERENCY == 0
@@ -21,6 +22,10 @@
 #error "Neoverse Poseidon supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
 #endif
 
+#if WORKAROUND_CVE_2022_23960
+	wa_cve_2022_23960_bhb_vector_table NEOVERSE_POSEIDON_BHB_LOOP_COUNT, neoverse_poseidon
+#endif /* WORKAROUND_CVE_2022_23960 */
+
 	/* ---------------------------------------------
 	 * HW will do the cache maintenance while powering down
 	 * ---------------------------------------------
@@ -37,22 +42,53 @@
 	ret
 endfunc neoverse_poseidon_core_pwr_dwn
 
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2022_23960
+
+func neoverse_poseidon_reset_func
+	/* Disable speculative loads */
+	msr	SSBS, xzr
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+	/*
+	 * The Neoverse Poseidon generic vectors are overridden to apply
+	 * errata mitigation on exception entry from lower ELs.
+	 */
+	adr	x0, wa_cve_vbar_neoverse_poseidon
+	msr	vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+	isb
+	ret
+endfunc neoverse_poseidon_reset_func
+
 #if REPORT_ERRATA
 	/*
 	 * Errata printing function for Neoverse Poseidon. Must follow AAPCS.
 	 */
 func neoverse_poseidon_errata_report
+	stp	x8, x30, [sp, #-16]!
+
+	bl	cpu_get_rev_var
+	mov	x8, x0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata WORKAROUND_CVE_2022_23960, neoverse_poseidon, cve_2022_23960
+
+	ldp	x8, x30, [sp], #16
 	ret
 endfunc neoverse_poseidon_errata_report
 #endif
 
-func neoverse_poseidon_reset_func
-	/* Disable speculative loads */
-	msr	SSBS, xzr
-	isb
-	ret
-endfunc neoverse_poseidon_reset_func
-
 	/* ---------------------------------------------
 	 * This function provides Neoverse-Poseidon specific
 	 * register information for crash reporting.
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index 462ca9d..e14bb24 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2014-2022, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2014-2022, Arm Limited and Contributors. All rights reserved.
 # Copyright (c) 2020-2022, NVIDIA Corporation. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
@@ -333,6 +333,14 @@
 # present in r0p0 as well but there is no workaround for that revision.
 ERRATA_A78_2242635	?=0
 
+# Flag to apply erratum 2376745 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, r1p1, and r1p2 of the A78 cpu. It is still open.
+ERRATA_A78_2376745	?=0
+
+# Flag to apply erratum 2395406 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, r1p1, and r1p2 of the A78 cpu. It is still open.
+ERRATA_A78_2395406	?=0
+
 # Flag to apply erratum 1941500 workaround during reset. This erratum applies
 # to revisions r0p0 and r0p1 of the A78 AE cpu. It is still open.
 ERRATA_A78_AE_1941500	?=0
@@ -349,6 +357,18 @@
 # to revisions r0p0 and r0p1 of the A78 AE cpu. It is still open.
 ERRATA_A78_AE_2395408	?=0
 
+# Flag to apply erratum 1821534 workaround during reset. This erratum applies
+# to revisions r0p0 - r1p0 of the X1 cpu and fixed in r1p1.
+ERRATA_X1_1821534	?=0
+
+# Flag to apply erratum 1688305 workaround during reset. This erratum applies
+# to revisions r0p0 - r1p0 of the X1 cpu and fixed in r1p1.
+ERRATA_X1_1688305	?=0
+
+# Flag to apply erratum 1827429 workaround during reset. This erratum applies
+# to revisions r0p0 - r1p0 of the X1 cpu and fixed in r1p1.
+ERRATA_X1_1827429	?=0
+
 # Flag to apply T32 CLREX workaround during reset. This erratum applies
 # only to r0p0 and r1p0 of the Neoverse N1 cpu.
 ERRATA_N1_1043202	?=0
@@ -484,6 +504,10 @@
 # to revision r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is fixed in r2p1.
 ERRATA_A710_2282622	?=0
 
+# Flag to apply erratum 2008768 workaround during reset. This erratum applies
+# to revision r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is fixed in r2p1.
+ERRATA_A710_2008768	?=0
+
 # Flag to apply erratum 2067956 workaround during reset. This erratum applies
 # to revision r0p0 of the Neoverse N2 cpu and is still open.
 ERRATA_N2_2067956	?=0
@@ -590,6 +614,11 @@
 # higher DSU power consumption on idle.
 ERRATA_DSU_936184	?=0
 
+# Flag to apply DSU erratum 2313941. This erratum applies to DSUs revisions
+# r0p0, r1p0, r2p0, r2p1, r3p0, r3p1 and is still open. Applying the workaround
+# results in higher DSU power consumption on idle.
+ERRATA_DSU_2313941	?=0
+
 # Process ERRATA_A9_794073 flag
 $(eval $(call assert_boolean,ERRATA_A9_794073))
 $(eval $(call add_define,ERRATA_A9_794073))
@@ -842,6 +871,14 @@
 $(eval $(call assert_boolean,ERRATA_A78_2242635))
 $(eval $(call add_define,ERRATA_A78_2242635))
 
+# Process ERRATA_A78_2376745 flag
+$(eval $(call assert_boolean,ERRATA_A78_2376745))
+$(eval $(call add_define,ERRATA_A78_2376745))
+
+# Process ERRATA_A78_2395406 flag
+$(eval $(call assert_boolean,ERRATA_A78_2395406))
+$(eval $(call add_define,ERRATA_A78_2395406))
+
 # Process ERRATA_A78_AE_1941500 flag
 $(eval $(call assert_boolean,ERRATA_A78_AE_1941500))
 $(eval $(call add_define,ERRATA_A78_AE_1941500))
@@ -858,6 +895,18 @@
 $(eval $(call assert_boolean,ERRATA_A78_AE_2395408))
 $(eval $(call add_define,ERRATA_A78_AE_2395408))
 
+# Process ERRATA_X1_1821534 flag
+$(eval $(call assert_boolean,ERRATA_X1_1821534))
+$(eval $(call add_define,ERRATA_X1_1821534))
+
+# Process ERRATA_X1_1688305 flag
+$(eval $(call assert_boolean,ERRATA_X1_1688305))
+$(eval $(call add_define,ERRATA_X1_1688305))
+
+# Process ERRATA_X1_1827429 flag
+$(eval $(call assert_boolean,ERRATA_X1_1827429))
+$(eval $(call add_define,ERRATA_X1_1827429))
+
 # Process ERRATA_N1_1043202 flag
 $(eval $(call assert_boolean,ERRATA_N1_1043202))
 $(eval $(call add_define,ERRATA_N1_1043202))
@@ -990,6 +1039,10 @@
 $(eval $(call assert_boolean,ERRATA_A710_2282622))
 $(eval $(call add_define,ERRATA_A710_2282622))
 
+# Process ERRATA_A710_2008768 flag
+$(eval $(call assert_boolean,ERRATA_A710_2008768))
+$(eval $(call add_define,ERRATA_A710_2008768))
+
 # Process ERRATA_N2_2067956 flag
 $(eval $(call assert_boolean,ERRATA_N2_2067956))
 $(eval $(call add_define,ERRATA_N2_2067956))
@@ -1090,6 +1143,10 @@
 $(eval $(call assert_boolean,ERRATA_DSU_936184))
 $(eval $(call add_define,ERRATA_DSU_936184))
 
+# Process ERRATA_DSU_2313941 flag
+$(eval $(call assert_boolean,ERRATA_DSU_2313941))
+$(eval $(call add_define,ERRATA_DSU_2313941))
+
 # Errata build flags
 ifneq (${ERRATA_A53_843419},0)
 TF_LDFLAGS_aarch64	+= --fix-cortex-a53-843419
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index 449f120..e393493 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -36,6 +36,64 @@
 #endif /* ENABLE_FEAT_TWED */
 
 static void manage_extensions_secure(cpu_context_t *ctx);
+
+static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info *ep)
+{
+	u_register_t sctlr_elx, actlr_elx;
+
+	/*
+	 * Initialise SCTLR_EL1 to the reset value corresponding to the target
+	 * execution state setting all fields rather than relying on the hw.
+	 * Some fields have architecturally UNKNOWN reset values and these are
+	 * set to zero.
+	 *
+	 * SCTLR.EE: Endianness is taken from the entrypoint attributes.
+	 *
+	 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as
+	 * required by PSCI specification)
+	 */
+	sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL;
+	if (GET_RW(ep->spsr) == MODE_RW_64) {
+		sctlr_elx |= SCTLR_EL1_RES1;
+	} else {
+		/*
+		 * If the target execution state is AArch32 then the following
+		 * fields need to be set.
+		 *
+		 * SCTRL_EL1.nTWE: Set to one so that EL0 execution of WFE
+		 *  instructions are not trapped to EL1.
+		 *
+		 * SCTLR_EL1.nTWI: Set to one so that EL0 execution of WFI
+		 *  instructions are not trapped to EL1.
+		 *
+		 * SCTLR_EL1.CP15BEN: Set to one to enable EL0 execution of the
+		 *  CP15DMB, CP15DSB, and CP15ISB instructions.
+		 */
+		sctlr_elx |= SCTLR_AARCH32_EL1_RES1 | SCTLR_CP15BEN_BIT
+					| SCTLR_NTWI_BIT | SCTLR_NTWE_BIT;
+	}
+
+#if ERRATA_A75_764081
+	/*
+	 * If workaround of errata 764081 for Cortex-A75 is used then set
+	 * SCTLR_EL1.IESB to enable Implicit Error Synchronization Barrier.
+	 */
+	sctlr_elx |= SCTLR_IESB_BIT;
+#endif
+	/* Store the initialised SCTLR_EL1 value in the cpu_context */
+	write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
+
+	/*
+	 * Base the context ACTLR_EL1 on the current value, as it is
+	 * implementation defined. The context restore process will write
+	 * the value from the context to the actual register and can cause
+	 * problems for processor cores that don't expect certain bits to
+	 * be zero.
+	 */
+	actlr_elx = read_actlr_el1();
+	write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
+}
+
 /******************************************************************************
  * This function performs initializations that are specific to SECURE state
  * and updates the cpu context specified by 'ctx'.
@@ -85,6 +143,14 @@
 
 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
 
+	/*
+	 * Initialize EL1 context registers unless SPMC is running
+	 * at S-EL2.
+	 */
+#if !SPMD_SPM_AT_SEL2
+	setup_el1_context(ctx, ep);
+#endif
+
 	manage_extensions_secure(ctx);
 }
 
@@ -147,6 +213,9 @@
 #endif
 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
 
+	/* Initialize EL1 context registers */
+	setup_el1_context(ctx, ep);
+
 	/* Initialize EL2 context registers */
 #if CTX_INCLUDE_EL2_REGS
 
@@ -186,7 +255,6 @@
 	u_register_t scr_el3;
 	el3_state_t *state;
 	gp_regs_t *gp_regs;
-	u_register_t sctlr_elx, actlr_elx;
 
 	/* Clear any residual register values from the context */
 	zeromem(ctx, sizeof(*ctx));
@@ -214,8 +282,10 @@
 
 	/*
 	 * SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical
-	 *  Secure timer registers to EL3, from AArch64 state only, if specified
-	 *  by the entrypoint attributes.
+	 * Secure timer registers to EL3, from AArch64 state only, if specified
+	 * by the entrypoint attributes. If SEL2 is present and enabled, the ST
+	 * bit always behaves as 1 (i.e. secure physical timer register access
+	 * is not trapped)
 	 */
 	if (EP_GET_ST(ep->h.attr) != 0U) {
 		scr_el3 |= SCR_ST_BIT;
@@ -283,56 +353,6 @@
 		}
 	}
 
-	/*
-	 * FEAT_AMUv1p1 virtual offset registers are only accessible from EL3
-	 * and EL2, when clear, this bit traps accesses from EL2 so we set it
-	 * to 1 when EL2 is present.
-	 */
-	if (is_armv8_6_feat_amuv1p1_present() &&
-		(el_implemented(2) != EL_IMPL_NONE)) {
-		scr_el3 |= SCR_AMVOFFEN_BIT;
-	}
-
-	/*
-	 * Initialise SCTLR_EL1 to the reset value corresponding to the target
-	 * execution state setting all fields rather than relying of the hw.
-	 * Some fields have architecturally UNKNOWN reset values and these are
-	 * set to zero.
-	 *
-	 * SCTLR.EE: Endianness is taken from the entrypoint attributes.
-	 *
-	 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as
-	 *  required by PSCI specification)
-	 */
-	sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0U;
-	if (GET_RW(ep->spsr) == MODE_RW_64) {
-		sctlr_elx |= SCTLR_EL1_RES1;
-	} else {
-		/*
-		 * If the target execution state is AArch32 then the following
-		 * fields need to be set.
-		 *
-		 * SCTRL_EL1.nTWE: Set to one so that EL0 execution of WFE
-		 *  instructions are not trapped to EL1.
-		 *
-		 * SCTLR_EL1.nTWI: Set to one so that EL0 execution of WFI
-		 *  instructions are not trapped to EL1.
-		 *
-		 * SCTLR_EL1.CP15BEN: Set to one to enable EL0 execution of the
-		 *  CP15DMB, CP15DSB, and CP15ISB instructions.
-		 */
-		sctlr_elx |= SCTLR_AARCH32_EL1_RES1 | SCTLR_CP15BEN_BIT
-					| SCTLR_NTWI_BIT | SCTLR_NTWE_BIT;
-	}
-
-#if ERRATA_A75_764081
-	/*
-	 * If workaround of errata 764081 for Cortex-A75 is used then set
-	 * SCTLR_EL1.IESB to enable Implicit Error Synchronization Barrier.
-	 */
-	sctlr_elx |= SCTLR_IESB_BIT;
-#endif
-
 #if ENABLE_FEAT_TWED
 	/* Enable WFE trap delay in SCR_EL3 if supported and configured */
 	/* Set delay in SCR_EL3 */
@@ -345,23 +365,6 @@
 #endif /* ENABLE_FEAT_TWED */
 
 	/*
-	 * Store the initialised SCTLR_EL1 value in the cpu_context - SCTLR_EL2
-	 * and other EL2 registers are set up by cm_prepare_el3_exit() as they
-	 * are not part of the stored cpu_context.
-	 */
-	write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
-
-	/*
-	 * Base the context ACTLR_EL1 on the current value, as it is
-	 * implementation defined. The context restore process will write
-	 * the value from the context to the actual register and can cause
-	 * problems for processor cores that don't expect certain bits to
-	 * be zero.
-	 */
-	actlr_elx = read_actlr_el1();
-	write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
-
-	/*
 	 * Populate EL3 state so that we've the right context
 	 * before doing ERET
 	 */
@@ -830,6 +833,14 @@
 	cpu_context_t *ctx = cm_get_context(NON_SECURE);
 	assert(ctx != NULL);
 
+	/* Assert that EL2 is used. */
+#if ENABLE_ASSERTIONS
+	el3_state_t *state = get_el3state_ctx(ctx);
+	u_register_t scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
+#endif
+	assert(((scr_el3 & SCR_HCE_BIT) != 0UL) &&
+			(el_implemented(2U) != EL_IMPL_NONE));
+
 	/*
 	 * Currently some extensions are configured using
 	 * direct register updates. Therefore, do this here
@@ -843,6 +854,12 @@
 	 */
 	write_scr_el3(read_scr_el3() | SCR_NS_BIT);
 
+	/*
+	 * Ensure the NS bit change is committed before the EL2/EL1
+	 * state restoration.
+	 */
+	isb();
+
 	/* Restore EL2 and EL1 sysreg contexts */
 	cm_el2_sysregs_context_restore(NON_SECURE);
 	cm_el1_sysregs_context_restore(NON_SECURE);
diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c
index d329c3d..72566fd 100644
--- a/lib/extensions/amu/aarch64/amu.c
+++ b/lib/extensions/amu/aarch64/amu.c
@@ -75,7 +75,7 @@
 		((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
 }
 
-static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
+static inline __unused void ctx_write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
 {
 	uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
 
@@ -85,6 +85,16 @@
 	write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
 }
 
+static inline __unused void ctx_write_scr_el3_amvoffen(cpu_context_t *ctx, uint64_t amvoffen)
+{
+	uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
+
+	value &= ~SCR_AMVOFFEN_BIT;
+	value |= (amvoffen << SCR_AMVOFFEN_SHIFT) & SCR_AMVOFFEN_BIT;
+
+	write_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3, value);
+}
+
 static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
 {
 	write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
@@ -226,7 +236,7 @@
 	 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
 	 * the Activity Monitor registers do not trap to EL3.
 	 */
-	write_cptr_el3_tam(ctx, 0U);
+	ctx_write_cptr_el3_tam(ctx, 0U);
 
 	/*
 	 * Retrieve the number of architected counters. All of these counters
@@ -285,6 +295,13 @@
 			 * used.
 			 */
 			write_hcr_el2_amvoffen(0U);
+		} else {
+			/*
+			 * Virtual offset registers are only accessible from EL3
+			 * and EL2, when clear, this bit traps accesses from EL2
+			 * so we set it to 1 when EL2 is present.
+			 */
+			ctx_write_scr_el3_amvoffen(ctx, 1U);
 		}
 
 #if AMU_RESTRICT_COUNTERS
diff --git a/lib/libc/snprintf.c b/lib/libc/snprintf.c
index 675d243..12f51c0 100644
--- a/lib/libc/snprintf.c
+++ b/lib/libc/snprintf.c
@@ -11,6 +11,16 @@
 #include <common/debug.h>
 #include <plat/common/platform.h>
 
+#define get_num_va_args(_args, _lcount)				\
+	(((_lcount) > 1)  ? va_arg(_args, long long int) :	\
+	(((_lcount) == 1) ? va_arg(_args, long int) :		\
+			    va_arg(_args, int)))
+
+#define get_unum_va_args(_args, _lcount)				\
+	(((_lcount) > 1)  ? va_arg(_args, unsigned long long int) :	\
+	(((_lcount) == 1) ? va_arg(_args, unsigned long int) :		\
+			    va_arg(_args, unsigned int)))
+
 #define CHECK_AND_PUT_CHAR(buf, size, chars_printed, ch)	\
 	do {						\
 		if ((chars_printed) < (size)) {		\
@@ -80,6 +90,11 @@
  * %u - unsigned decimal format
  * %p - pointer format
  *
+ * The following length specifiers are supported by this print
+ * %l - long int
+ * %ll - long long int
+ * %z - size_t sized integer formats
+ *
  * The following padding specifiers are supported by this print
  * %0NN - Left-pad the number with 0s (NN is a decimal number)
  * %NN - Left-pad the number or string with spaces (NN is a decimal number)
@@ -101,6 +116,7 @@
 	bool left;
 	bool capitalise;
 	size_t chars_printed = 0U;
+	unsigned int l_count;
 
 	if (n == 0U) {
 		/* There isn't space for anything. */
@@ -118,6 +134,7 @@
 		padc ='\0';
 		padn = 0;
 		capitalise = false;
+		l_count = 0;
 
 		if (*fmt == '%') {
 			fmt++;
@@ -152,7 +169,7 @@
 
 			case 'i':
 			case 'd':
-				num = va_arg(args, int);
+				num = get_num_va_args(args, l_count);
 
 				if (num < 0) {
 					CHECK_AND_PUT_CHAR(s, n, chars_printed,
@@ -170,10 +187,18 @@
 				string_print(&s, n, &chars_printed, str);
 				break;
 			case 'u':
-				unum = va_arg(args, unsigned int);
+				unum = get_unum_va_args(args, l_count);
 				unsigned_num_print(&s, n, &chars_printed,
 						   unum, 10, padc, padn, false);
 				break;
+			case 'z':
+				l_count = 1;
+				fmt++;
+				goto loop;
+			case 'l':
+				l_count++;
+				fmt++;
+				goto loop;
 			case 'p':
 				unum = (uintptr_t)va_arg(args, void *);
 				if (unum > 0U) {
@@ -186,7 +211,7 @@
 			case 'X':
 				capitalise = true;
 			case 'x':
-				unum = va_arg(args, unsigned int);
+				unum = get_unum_va_args(args, l_count);
 				unsigned_num_print(&s, n, &chars_printed,
 						   unum, 16, padc, padn,
 						   capitalise);
diff --git a/lib/psa/initial_attestation.c b/lib/psa/initial_attestation.c
new file mode 100644
index 0000000..44498a8
--- /dev/null
+++ b/lib/psa/initial_attestation.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <initial_attestation.h>
+#include <psa/client.h>
+#include <psa_manifest/sid.h>
+
+#if !PLAT_RSS_NOT_SUPPORTED
+psa_status_t
+psa_initial_attest_get_token(const uint8_t *auth_challenge,
+			     size_t         challenge_size,
+			     uint8_t       *token_buf,
+			     size_t         token_buf_size,
+			     size_t        *token_size)
+{
+	psa_status_t status;
+	psa_invec in_vec[] = {
+		{auth_challenge, challenge_size}
+	};
+	psa_outvec out_vec[] = {
+		{token_buf, token_buf_size},
+	};
+
+	status = psa_call(RSS_ATTESTATION_SERVICE_HANDLE, RSS_ATTEST_GET_TOKEN,
+			  in_vec, IOVEC_LEN(in_vec),
+			  out_vec, IOVEC_LEN(out_vec));
+
+	if (status == PSA_SUCCESS) {
+		*token_size = out_vec[0].len;
+	}
+
+	return status;
+}
+
+#else /* !PLAT_RSS_NOT_SUPPORTED */
+
+#include <string.h>
+
+static const uint8_t platform_token[] = {
+	0xD2, 0x84, 0x43, 0xA1, 0x01, 0x26, 0xA0, 0x59,
+	0x02, 0xBE, 0xAA, 0x3A, 0x00, 0x01, 0x24, 0xFF,
+	0x58, 0x20, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB,
+	0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB,
+	0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB,
+	0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB,
+	0xAB, 0xAB, 0x3A, 0x00, 0x01, 0x24, 0xFB, 0x58,
+	0x20, 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6,
+	0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE,
+	0xAF, 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6,
+	0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE,
+	0xBF, 0x3A, 0x00, 0x01, 0x25, 0x00, 0x58, 0x21,
+	0x01, 0xFA, 0x58, 0x75, 0x5F, 0x65, 0x86, 0x27,
+	0xCE, 0x54, 0x60, 0xF2, 0x9B, 0x75, 0x29, 0x67,
+	0x13, 0x24, 0x8C, 0xAE, 0x7A, 0xD9, 0xE2, 0x98,
+	0x4B, 0x90, 0x28, 0x0E, 0xFC, 0xBC, 0xB5, 0x02,
+	0x48, 0x3A, 0x00, 0x01, 0x24, 0xFA, 0x58, 0x20,
+	0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+	0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB,
+	0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC,
+	0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD,
+	0x3A, 0x00, 0x01, 0x24, 0xF8, 0x20, 0x3A, 0x00,
+	0x01, 0x24, 0xF9, 0x00, 0x3A, 0x00, 0x01, 0x24,
+	0xFD, 0x85, 0xA5, 0x05, 0x58, 0x20, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x60,
+	0x01, 0x65, 0x42, 0x4C, 0x31, 0x5F, 0x32, 0x06,
+	0x66, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x02,
+	0x58, 0x20, 0xF8, 0xB7, 0xCE, 0xAD, 0x9B, 0xE4,
+	0x5A, 0x8F, 0x5C, 0x52, 0x6F, 0x0C, 0x05, 0x25,
+	0x8F, 0xF3, 0xE9, 0x81, 0xDC, 0xBC, 0xF2, 0x05,
+	0x7F, 0x33, 0xF6, 0xBB, 0xDC, 0xD9, 0x4D, 0xA2,
+	0x34, 0x3A, 0xA5, 0x05, 0x58, 0x20, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x67,
+	0x31, 0x2E, 0x37, 0x2E, 0x32, 0x2B, 0x30, 0x01,
+	0x63, 0x42, 0x4C, 0x32, 0x06, 0x66, 0x53, 0x48,
+	0x41, 0x32, 0x35, 0x36, 0x02, 0x58, 0x20, 0x3A,
+	0xE5, 0x9E, 0x40, 0xA9, 0x6B, 0xD5, 0x29, 0x1C,
+	0xAB, 0x7A, 0x5F, 0xBD, 0x1F, 0x9A, 0xA6, 0x52,
+	0xFB, 0x77, 0x7D, 0xA3, 0xEC, 0x9C, 0x29, 0xBC,
+	0xE6, 0x5B, 0x3B, 0x43, 0xFC, 0x9D, 0x26, 0xA5,
+	0x05, 0x58, 0x20, 0xBF, 0xE6, 0xD8, 0x6F, 0x88,
+	0x26, 0xF4, 0xFF, 0x97, 0xFB, 0x96, 0xC4, 0xE6,
+	0xFB, 0xC4, 0x99, 0x3E, 0x46, 0x19, 0xFC, 0x56,
+	0x5D, 0xA2, 0x6A, 0xDF, 0x34, 0xC3, 0x29, 0x48,
+	0x9A, 0xDC, 0x38, 0x04, 0x67, 0x31, 0x2E, 0x35,
+	0x2E, 0x30, 0x2B, 0x30, 0x01, 0x64, 0x52, 0x54,
+	0x5F, 0x30, 0x06, 0x66, 0x53, 0x48, 0x41, 0x32,
+	0x35, 0x36, 0x02, 0x58, 0x20, 0x47, 0x94, 0x9D,
+	0x27, 0x33, 0x82, 0x45, 0x1A, 0xDD, 0x25, 0xF4,
+	0x9A, 0x89, 0x6F, 0x5F, 0xD9, 0xB0, 0xE8, 0x14,
+	0xD3, 0xA4, 0x9B, 0x53, 0xB0, 0x44, 0x0B, 0xCF,
+	0x32, 0x1A, 0xC4, 0xD2, 0x65, 0xA5, 0x05, 0x58,
+	0x20, 0xB3, 0x60, 0xCA, 0xF5, 0xC9, 0x8C, 0x6B,
+	0x94, 0x2A, 0x48, 0x82, 0xFA, 0x9D, 0x48, 0x23,
+	0xEF, 0xB1, 0x66, 0xA9, 0xEF, 0x6A, 0x6E, 0x4A,
+	0xA3, 0x7C, 0x19, 0x19, 0xED, 0x1F, 0xCC, 0xC0,
+	0x49, 0x04, 0x67, 0x30, 0x2E, 0x30, 0x2E, 0x37,
+	0x2B, 0x30, 0x01, 0x64, 0x52, 0x54, 0x5F, 0x31,
+	0x06, 0x66, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36,
+	0x02, 0x58, 0x20, 0xCD, 0x38, 0xBE, 0xC8, 0xB7,
+	0xC0, 0x9E, 0xD5, 0x24, 0x30, 0xFE, 0xC8, 0xD0,
+	0x19, 0x12, 0x56, 0xB2, 0x7A, 0xA5, 0x53, 0x6F,
+	0xBC, 0x7D, 0x09, 0xCA, 0x11, 0xDD, 0x90, 0xD7,
+	0xD6, 0x70, 0xFD, 0xA5, 0x05, 0x58, 0x20, 0xAA,
+	0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+	0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+	0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+	0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0x04,
+	0x60, 0x01, 0x60, 0x06, 0x66, 0x53, 0x48, 0x41,
+	0x32, 0x35, 0x36, 0x02, 0x58, 0x20, 0x28, 0x3D,
+	0x0C, 0x25, 0x22, 0x0C, 0x87, 0x46, 0xA0, 0x58,
+	0x64, 0x6C, 0x0B, 0x14, 0x37, 0x39, 0x40, 0x9D,
+	0x2D, 0x11, 0xD1, 0xCC, 0x54, 0x51, 0xB4, 0x29,
+	0x22, 0xCD, 0x70, 0x92, 0x71, 0xC3, 0x3A, 0x00,
+	0x01, 0x25, 0x01, 0x77, 0x77, 0x77, 0x77, 0x2E,
+	0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x66,
+	0x69, 0x72, 0x6D, 0x77, 0x61, 0x72, 0x65, 0x2E,
+	0x6F, 0x72, 0x67, 0x3A, 0x00, 0x01, 0x24, 0xF7,
+	0x71, 0x50, 0x53, 0x41, 0x5F, 0x49, 0x4F, 0x54,
+	0x5F, 0x50, 0x52, 0x4F, 0x46, 0x49, 0x4C, 0x45,
+	0x5F, 0x31, 0x3A, 0x00, 0x01, 0x24, 0xFC, 0x70,
+	0x30, 0x36, 0x30, 0x34, 0x35, 0x36, 0x35, 0x32,
+	0x37, 0x32, 0x38, 0x32, 0x39, 0x31, 0x30, 0x30,
+	0x58, 0x40, 0x1E, 0x0D, 0x2B, 0xD8, 0x7A, 0xC9,
+	0x2D, 0xCB, 0x73, 0xD1, 0x42, 0x2F, 0xBF, 0xDA,
+	0x24, 0x71, 0xE2, 0xAF, 0xEA, 0x48, 0x60, 0x17,
+	0x23, 0x75, 0x64, 0xAC, 0xCC, 0x23, 0xA2, 0x67,
+	0xC4, 0xE7, 0x8F, 0x1C, 0x7C, 0x68, 0x49, 0x42,
+	0x4D, 0xDA, 0xC6, 0xD6, 0x21, 0x1C, 0xAA, 0x00,
+	0xDA, 0x1E, 0x68, 0x56, 0xA3, 0x48, 0xEE, 0xA7,
+	0x92, 0xA9, 0x09, 0x83, 0x42, 0x04, 0x06, 0x9E,
+	0x62, 0xBB
+};
+
+psa_status_t
+psa_initial_attest_get_token(const uint8_t *auth_challenge,
+			     size_t         challenge_size,
+			     uint8_t       *token_buf,
+			     size_t         token_buf_size,
+			     size_t        *token_size)
+{
+	(void)auth_challenge;
+	(void)challenge_size;
+
+	if (token_buf_size < sizeof(platform_token)) {
+		return PSA_ERROR_BUFFER_TOO_SMALL;
+	}
+
+	(void)memcpy(token_buf, platform_token, sizeof(platform_token));
+	*token_size = sizeof(platform_token);
+
+	return PSA_SUCCESS;
+}
+#endif /* !PLAT_RSS_NOT_SUPPORTED */
diff --git a/lib/psa/measured_boot.c b/lib/psa/measured_boot.c
new file mode 100644
index 0000000..5d3ca8e
--- /dev/null
+++ b/lib/psa/measured_boot.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <string.h>
+
+#include <common/debug.h>
+#include <measured_boot.h>
+#include <psa/client.h>
+#include <psa_manifest/sid.h>
+
+#include "measured_boot_private.h"
+
+static void print_byte_array(const uint8_t *array, size_t len)
+{
+	unsigned int i;
+
+	if (array == NULL || len == 0U) {
+		(void)printf("\n");
+	}
+
+	for (i = 0U; i < len; ++i) {
+		(void)printf(" %02x", array[i]);
+		if ((i & U(0xF)) == U(0xF)) {
+			(void)printf("\n");
+			if (i < (len - 1U)) {
+				INFO("\t\t:");
+			}
+		}
+	}
+}
+
+static void log_measurement(uint8_t index,
+			    const uint8_t *signer_id,
+			    size_t signer_id_size,
+			    const uint8_t *version,     /* string */
+			    uint32_t measurement_algo,
+			    const uint8_t *sw_type,     /* string */
+			    const uint8_t *measurement_value,
+			    size_t measurement_value_size,
+			    bool lock_measurement)
+{
+	INFO("Measured boot extend measurement:\n");
+	INFO(" - slot        : %u\n", index);
+	INFO(" - signer_id   :");
+	print_byte_array(signer_id, signer_id_size);
+	INFO(" - version     : %s\n", version);
+	INFO(" - algorithm   : %x\n", measurement_algo);
+	INFO(" - sw_type     : %s\n", sw_type);
+	INFO(" - measurement :");
+	print_byte_array(measurement_value, measurement_value_size);
+	INFO(" - locking     : %s\n", lock_measurement ? "true" : "false");
+}
+
+#if !PLAT_RSS_NOT_SUPPORTED
+psa_status_t
+rss_measured_boot_extend_measurement(uint8_t index,
+				     const uint8_t *signer_id,
+				     size_t signer_id_size,
+				     const uint8_t *version,
+				     size_t version_size,
+				     uint32_t measurement_algo,
+				     const uint8_t *sw_type,
+				     size_t sw_type_size,
+				     const uint8_t *measurement_value,
+				     size_t measurement_value_size,
+				     bool lock_measurement)
+{
+	struct measured_boot_extend_iovec_t extend_iov = {
+		.index = index,
+		.lock_measurement = lock_measurement,
+		.measurement_algo = measurement_algo,
+		.sw_type = {0},
+		.sw_type_size = sw_type_size,
+	};
+
+	psa_invec in_vec[] = {
+		{.base = &extend_iov,
+			.len = sizeof(struct measured_boot_extend_iovec_t)},
+		{.base = signer_id, .len = signer_id_size},
+		{.base = version, .len = version_size},
+		{.base = measurement_value, .len = measurement_value_size}
+	};
+
+	uint32_t sw_type_size_limited;
+
+	if (sw_type != NULL) {
+		sw_type_size_limited = (sw_type_size < SW_TYPE_MAX_SIZE) ?
+					sw_type_size : SW_TYPE_MAX_SIZE;
+		memcpy(extend_iov.sw_type, sw_type, sw_type_size_limited);
+	}
+
+	log_measurement(index, signer_id, signer_id_size,
+			version, measurement_algo, sw_type,
+			measurement_value, measurement_value_size,
+			lock_measurement);
+
+	return psa_call(RSS_MEASURED_BOOT_HANDLE,
+			RSS_MEASURED_BOOT_EXTEND,
+			in_vec, IOVEC_LEN(in_vec),
+			NULL, 0);
+}
+
+#else /* !PLAT_RSS_NOT_SUPPORTED */
+
+psa_status_t
+rss_measured_boot_extend_measurement(uint8_t index,
+				     const uint8_t *signer_id,
+				     size_t signer_id_size,
+				     const uint8_t *version,
+				     size_t version_size,
+				     uint32_t measurement_algo,
+				     const uint8_t *sw_type,
+				     size_t sw_type_size,
+				     const uint8_t *measurement_value,
+				     size_t measurement_value_size,
+				     bool lock_measurement)
+{
+	log_measurement(index, signer_id, signer_id_size,
+			version, measurement_algo, sw_type,
+			measurement_value, measurement_value_size,
+			lock_measurement);
+
+	return PSA_SUCCESS;
+}
+#endif /* !PLAT_RSS_NOT_SUPPORTED */
diff --git a/lib/psa/measured_boot_private.h b/lib/psa/measured_boot_private.h
new file mode 100644
index 0000000..649c3f6
--- /dev/null
+++ b/lib/psa/measured_boot_private.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PSA_MEASURED_BOOT_PRIVATE_H
+#define PSA_MEASURED_BOOT_PRIVATE_H
+
+#include <stdint.h>
+
+/* Measured boot message types that distinguish its services */
+#define RSS_MEASURED_BOOT_EXTEND	1002U
+
+struct measured_boot_extend_iovec_t {
+	uint8_t  index;
+	uint8_t  lock_measurement;
+	uint32_t measurement_algo;
+	uint8_t  sw_type[SW_TYPE_MAX_SIZE];
+	uint8_t  sw_type_size;
+};
+
+#endif /* PSA_MEASURED_BOOT_PRIVATE_H */
diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk
index 6e57237..fab6bf6 100644
--- a/make_helpers/defaults.mk
+++ b/make_helpers/defaults.mk
@@ -460,3 +460,9 @@
 # SCR_EL3.TWEDEL(4bit) field, when FEAT_TWED is implemented.
 # By default it takes 0, and need to be updated by the platforms.
 TWED_DELAY			:= 0
+
+# By default, disable the mocking of RSS provided services
+PLAT_RSS_NOT_SUPPORTED		:= 0
+
+# Dynamic Root of Trust for Measurement support
+DRTM_SUPPORT			:= 0
diff --git a/package-lock.json b/package-lock.json
index 469c5f5..4284d71 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -843,9 +843,9 @@
       }
     },
     "node_modules/commitizen/node_modules/ansi-regex": {
-      "version": "4.1.0",
-      "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz",
-      "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==",
+      "version": "4.1.1",
+      "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.1.tgz",
+      "integrity": "sha512-ILlv4k/3f6vfQ4OoP2AGvirOktlQ98ZEL1k9FaQjxa3L1abBgbuTDAdPOpvbGncC0BTVQrl+OM8xZGK6tWXt7g==",
       "dev": true,
       "engines": {
         "node": ">=6"
@@ -1073,9 +1073,9 @@
       }
     },
     "node_modules/commitizen/node_modules/string-width/node_modules/ansi-regex": {
-      "version": "3.0.0",
-      "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz",
-      "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=",
+      "version": "3.0.1",
+      "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.1.tgz",
+      "integrity": "sha512-+O9Jct8wf++lXxxFc4hc8LsjaSq0HFzzL7cVsw8pRDIPdjKD2mT4ytDZlLuSBZ4cLKZFXIrMGO7DbQCtMJJMKw==",
       "dev": true,
       "engines": {
         "node": ">=4"
@@ -4792,9 +4792,9 @@
           "dev": true
         },
         "ansi-regex": {
-          "version": "4.1.0",
-          "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz",
-          "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==",
+          "version": "4.1.1",
+          "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.1.tgz",
+          "integrity": "sha512-ILlv4k/3f6vfQ4OoP2AGvirOktlQ98ZEL1k9FaQjxa3L1abBgbuTDAdPOpvbGncC0BTVQrl+OM8xZGK6tWXt7g==",
           "dev": true
         },
         "ansi-styles": {
@@ -4975,9 +4975,9 @@
           },
           "dependencies": {
             "ansi-regex": {
-              "version": "3.0.0",
-              "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz",
-              "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=",
+              "version": "3.0.1",
+              "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.1.tgz",
+              "integrity": "sha512-+O9Jct8wf++lXxxFc4hc8LsjaSq0HFzzL7cVsw8pRDIPdjKD2mT4ytDZlLuSBZ4cLKZFXIrMGO7DbQCtMJJMKw==",
               "dev": true
             },
             "strip-ansi": {
diff --git a/plat/arm/board/fvp/fvp_bl1_measured_boot.c b/plat/arm/board/fvp/fvp_bl1_measured_boot.c
index 5468555..76cd918 100644
--- a/plat/arm/board/fvp/fvp_bl1_measured_boot.c
+++ b/plat/arm/board/fvp/fvp_bl1_measured_boot.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -7,6 +7,7 @@
 #include <stdint.h>
 
 #include <drivers/measured_boot/event_log/event_log.h>
+#include <drivers/measured_boot/rss/rss_measured_boot.h>
 #include <plat/arm/common/plat_arm.h>
 
 /* Event Log data */
@@ -21,10 +22,39 @@
 	{ EVLOG_INVALID_ID, NULL, (unsigned int)(-1) }	/* Terminator */
 };
 
+/* FVP table with platform specific image IDs and metadata. Intentionally not a
+ * const struct, some members might set by bootloaders during trusted boot.
+ */
+struct rss_mboot_metadata fvp_rss_mboot_metadata[] = {
+	{
+		.id = FW_CONFIG_ID,
+		.slot = U(6),
+		.signer_id_size = SIGNER_ID_MIN_SIZE,
+		.sw_type = RSS_MBOOT_FW_CONFIG_STRING,
+		.lock_measurement = true },
+	{
+		.id = TB_FW_CONFIG_ID,
+		.slot = U(7),
+		.signer_id_size = SIGNER_ID_MIN_SIZE,
+		.sw_type = RSS_MBOOT_TB_FW_CONFIG_STRING,
+		.lock_measurement = true },
+	{
+		.id = BL2_IMAGE_ID,
+		.slot = U(8),
+		.signer_id_size = SIGNER_ID_MIN_SIZE,
+		.sw_type = RSS_MBOOT_BL2_STRING,
+		.lock_measurement = true },
+
+	{
+		.id = RSS_MBOOT_INVALID_ID }
+};
+
 void bl1_plat_mboot_init(void)
 {
 	event_log_init(event_log, event_log + sizeof(event_log));
 	event_log_write_header();
+
+	rss_measured_boot_init();
 }
 
 void bl1_plat_mboot_finish(void)
diff --git a/plat/arm/board/fvp/fvp_bl2_measured_boot.c b/plat/arm/board/fvp/fvp_bl2_measured_boot.c
index 1f38278..fd15b70 100644
--- a/plat/arm/board/fvp/fvp_bl2_measured_boot.c
+++ b/plat/arm/board/fvp/fvp_bl2_measured_boot.c
@@ -7,6 +7,7 @@
 #include <stdint.h>
 
 #include <drivers/measured_boot/event_log/event_log.h>
+#include <drivers/measured_boot/rss/rss_measured_boot.h>
 #include <tools_share/tbbr_oid.h>
 #include <fvp_critical_data.h>
 
@@ -35,6 +36,38 @@
 	{ EVLOG_INVALID_ID, NULL, (unsigned int)(-1) }	/* Terminator */
 };
 
+/* FVP table with platform specific image IDs and metadata. Intentionally not a
+ * const struct, some members might set by bootloaders during trusted boot.
+ */
+struct rss_mboot_metadata fvp_rss_mboot_metadata[] = {
+	{
+		.id = BL31_IMAGE_ID,
+		.slot = U(9),
+		.signer_id_size = SIGNER_ID_MIN_SIZE,
+		.sw_type = RSS_MBOOT_BL31_STRING,
+		.lock_measurement = true },
+	{
+		.id = HW_CONFIG_ID,
+		.slot = U(10),
+		.signer_id_size = SIGNER_ID_MIN_SIZE,
+		.sw_type = RSS_MBOOT_HW_CONFIG_STRING,
+		.lock_measurement = true },
+	{
+		.id = SOC_FW_CONFIG_ID,
+		.slot = U(11),
+		.signer_id_size = SIGNER_ID_MIN_SIZE,
+		.sw_type = RSS_MBOOT_SOC_FW_CONFIG_STRING,
+		.lock_measurement = true },
+	{
+		.id = RMM_IMAGE_ID,
+		.slot = U(12),
+		.signer_id_size = SIGNER_ID_MIN_SIZE,
+		.sw_type = RSS_MBOOT_RMM_STRING,
+		.lock_measurement = true },
+	{
+		.id = RSS_MBOOT_INVALID_ID }
+};
+
 void bl2_plat_mboot_init(void)
 {
 	uint8_t *event_log_start;
@@ -64,6 +97,8 @@
 				       PLAT_ARM_EVENT_LOG_MAX_SIZE);
 
 	event_log_init((uint8_t *)event_log_start, event_log_finish);
+
+	rss_measured_boot_init();
 }
 
 int plat_mboot_measure_critical_data(unsigned int critical_data_id,
diff --git a/plat/arm/board/fvp/fvp_common_measured_boot.c b/plat/arm/board/fvp/fvp_common_measured_boot.c
index 6a403d9..93aa055 100644
--- a/plat/arm/board/fvp/fvp_common_measured_boot.c
+++ b/plat/arm/board/fvp/fvp_common_measured_boot.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -9,27 +9,47 @@
 
 #include <common/desc_image_load.h>
 #include <drivers/measured_boot/event_log/event_log.h>
+#include <drivers/measured_boot/rss/rss_measured_boot.h>
 #include <plat/arm/common/plat_arm.h>
 #include <plat/common/platform.h>
 
 extern event_log_metadata_t fvp_event_log_metadata[];
+extern struct rss_mboot_metadata fvp_rss_mboot_metadata[];
 
 const event_log_metadata_t *plat_event_log_get_metadata(void)
 {
 	return fvp_event_log_metadata;
 }
 
+struct rss_mboot_metadata *plat_rss_mboot_get_metadata(void)
+{
+	return fvp_rss_mboot_metadata;
+}
+
 int plat_mboot_measure_image(unsigned int image_id, image_info_t *image_data)
 {
+	int err;
+	int rc = 0;
+
 	/* Calculate image hash and record data in Event Log */
-	int err = event_log_measure_and_record(image_data->image_base,
-					       image_data->image_size,
-					       image_id);
+	err = event_log_measure_and_record(image_data->image_base,
+					   image_data->image_size,
+					   image_id);
 	if (err != 0) {
 		ERROR("%s%s image id %u (%i)\n",
-		      "Failed to ", "record", image_id, err);
-		return err;
+		      "Failed to ", "record in event log", image_id, err);
+		rc = err;
 	}
 
-	return 0;
+	/* Calculate image hash and record data in RSS */
+	err = rss_mboot_measure_and_record(image_data->image_base,
+					   image_data->image_size,
+					   image_id);
+	if (err != 0) {
+		ERROR("%s%s image id %u (%i)\n",
+		      "Failed to ", "record in RSS", image_id, err);
+		rc = (rc == 0) ? err : -1;
+	}
+
+	return rc;
 }
diff --git a/plat/arm/board/fvp/fvp_el3_spmc.c b/plat/arm/board/fvp/fvp_el3_spmc.c
new file mode 100644
index 0000000..2b347ed
--- /dev/null
+++ b/plat/arm/board/fvp/fvp_el3_spmc.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <services/el3_spmc_ffa_memory.h>
+
+#include <platform_def.h>
+
+/*
+ * On the FVP platform when using the EL3 SPMC implementation allocate the
+ * datastore for tracking shared memory descriptors in the TZC DRAM section
+ * to ensure sufficient storage can be allocated.
+ * Provide an implementation of the accessor method to allow the datastore
+ * details to be retrieved by the SPMC.
+ * The SPMC will take care of initializing the memory region.
+ */
+
+#define PLAT_SPMC_SHMEM_DATASTORE_SIZE 512 * 1024
+
+__section("arm_el3_tzc_dram") static uint8_t
+plat_spmc_shmem_datastore[PLAT_SPMC_SHMEM_DATASTORE_SIZE];
+
+int plat_spmc_shmem_datastore_get(uint8_t **datastore, size_t *size)
+{
+	*datastore = plat_spmc_shmem_datastore;
+	*size = PLAT_SPMC_SHMEM_DATASTORE_SIZE;
+	return 0;
+}
+
+/*
+ * Add dummy implementations of memory management related platform hooks.
+ * These can be used to implement platform specific functionality to support
+ * a memory sharing/lending operation.
+ *
+ * Note: The hooks must be located as part of the initial share request and
+ * final reclaim to prevent order dependencies with operations that may take
+ * place in the normal world without visibility of the SPMC.
+ */
+int plat_spmc_shmem_begin(struct ffa_mtd *desc)
+{
+	return 0;
+}
+int plat_spmc_shmem_reclaim(struct ffa_mtd *desc)
+{
+	return 0;
+}
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
index d89e91f..54c5e75 100644
--- a/plat/arm/board/fvp/platform.mk
+++ b/plat/arm/board/fvp/platform.mk
@@ -367,14 +367,36 @@
     override BL1_SOURCES =
 endif
 
+# Include Measured Boot makefile before any Crypto library makefile.
+# Crypto library makefile may need default definitions of Measured Boot build
+# flags present in Measured Boot makefile.
+ifeq (${MEASURED_BOOT},1)
+    RSS_MEASURED_BOOT_MK := drivers/measured_boot/rss/rss_measured_boot.mk
+    $(info Including ${RSS_MEASURED_BOOT_MK})
+    include ${RSS_MEASURED_BOOT_MK}
+
+    BL1_SOURCES		+=	${MEASURED_BOOT_SOURCES}
+    BL2_SOURCES		+=	${MEASURED_BOOT_SOURCES}
+endif
+
 include plat/arm/board/common/board_common.mk
 include plat/arm/common/arm_common.mk
 
 ifeq (${MEASURED_BOOT},1)
 BL1_SOURCES		+=	plat/arm/board/fvp/fvp_common_measured_boot.c	\
-				plat/arm/board/fvp/fvp_bl1_measured_boot.c
+				plat/arm/board/fvp/fvp_bl1_measured_boot.c	\
+				lib/psa/measured_boot.c
+
 BL2_SOURCES		+=	plat/arm/board/fvp/fvp_common_measured_boot.c	\
-				plat/arm/board/fvp/fvp_bl2_measured_boot.c
+				plat/arm/board/fvp/fvp_bl2_measured_boot.c	\
+				lib/psa/measured_boot.c
+
+PLAT_INCLUDES		+=	-Iinclude/lib/psa
+
+# RSS is not supported on FVP right now. Thus, we use the mocked version
+# of PSA Measured Boot APIs. They return with success and hard-coded data.
+PLAT_RSS_NOT_SUPPORTED	:= 1
+
 endif
 
 ifeq (${TRUSTED_BOARD_BOOT}, 1)
@@ -403,3 +425,7 @@
 
 # enable trace filter control registers access to NS by default
 ENABLE_TRF_FOR_NS		:= 1
+
+ifeq (${SPMC_AT_EL3}, 1)
+PLAT_BL_COMMON_SOURCES	+=	plat/arm/board/fvp/fvp_el3_spmc.c
+endif
diff --git a/plat/arm/board/n1sdp/fdts/n1sdp_fw_config.dts b/plat/arm/board/n1sdp/fdts/n1sdp_fw_config.dts
new file mode 100644
index 0000000..f61e30b
--- /dev/null
+++ b/plat/arm/board/n1sdp/fdts/n1sdp_fw_config.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/tbbr/tbbr_img_def.h>
+
+/dts-v1/;
+/ {
+	dtb-registry {
+		compatible = "fconf,dyn_cfg-dtb_registry";
+		tb_fw-config {
+			load-address = <0x0 0x4001300>;
+			max-size = <0x200>;
+			id = <TB_FW_CONFIG_ID>;
+		};
+
+		nt_fw-config {
+			load-address = <0x0 0xFEF00000>;
+			max-size = <0x0100000>;
+			id = <NT_FW_CONFIG_ID>;
+		};
+	};
+};
diff --git a/plat/arm/board/n1sdp/fdts/n1sdp_nt_fw_config.dts b/plat/arm/board/n1sdp/fdts/n1sdp_nt_fw_config.dts
new file mode 100644
index 0000000..da5e04d
--- /dev/null
+++ b/plat/arm/board/n1sdp/fdts/n1sdp_nt_fw_config.dts
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/dts-v1/;
+/ {
+	/* compatible string */
+	compatible = "arm,n1sdp";
+
+	/*
+	 * Place holder for platform-info node with default values.
+	 * The values will be set to the correct values during
+	 * the BL2 stage of boot.
+	 */
+	platform-info {
+		multichip-mode = <0x0>;
+		secondary-chip-count = <0x0>;
+		local-ddr-size = <0x0>;
+		remote-ddr-size = <0x0>;
+	};
+};
\ No newline at end of file
diff --git a/plat/arm/board/n1sdp/fdts/n1sdp_tb_fw_config.dts b/plat/arm/board/n1sdp/fdts/n1sdp_tb_fw_config.dts
new file mode 100644
index 0000000..e5ffba3
--- /dev/null
+++ b/plat/arm/board/n1sdp/fdts/n1sdp_tb_fw_config.dts
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/dts-v1/;
+/ {
+	tb_fw-config {
+		compatible = "arm,tb_fw";
+
+		/* Disable authentication for development */
+		disable_auth = <0x0>;
+
+		/*
+		 * The following two entries are placeholders for Mbed TLS
+		 * heap information. The default values don't matter since
+		 * they will be overwritten by BL1.
+		 * In case of having shared Mbed TLS heap between BL1 and BL2,
+		 * BL1 will populate these two properties with the respective
+		 * info about the shared heap. This info will be available for
+		 * BL2 in order to locate and re-use the heap.
+		 */
+		mbedtls_heap_addr = <0x0 0x0>;
+		mbedtls_heap_size = <0x0>;
+	};
+};
diff --git a/plat/arm/board/n1sdp/include/platform_def.h b/plat/arm/board/n1sdp/include/platform_def.h
index cc07852..c9b81ba 100644
--- a/plat/arm/board/n1sdp/include/platform_def.h
+++ b/plat/arm/board/n1sdp/include/platform_def.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -27,6 +27,27 @@
 #define PLAT_ARM_DRAM2_BASE			ULL(0x8080000000)
 #define PLAT_ARM_DRAM2_SIZE			ULL(0xF80000000)
 
+#define MAX_IO_DEVICES			U(3)
+#define MAX_IO_HANDLES			U(4)
+
+#define PLAT_ARM_FLASH_IMAGE_BASE			0x18200000
+#define PLAT_ARM_FLASH_IMAGE_MAX_SIZE			0x00800000
+
+#define PLAT_ARM_NVM_BASE			0x18200000
+#define PLAT_ARM_NVM_SIZE			0x00800000
+
+#if defined NS_BL1U_BASE
+# undef NS_BL1U_BASE
+# define NS_BL1U_BASE			(PLAT_ARM_NVM_BASE + UL(0x00800000))
+#endif
+
+/* Non-volatile counters */
+#define SOC_TRUSTED_NVCTR_BASE		0x7fe70000
+#define TFW_NVCTR_BASE			(SOC_TRUSTED_NVCTR_BASE)
+#define TFW_NVCTR_SIZE			U(4)
+#define NTFW_CTR_BASE			(SOC_TRUSTED_NVCTR_BASE + 0x0004)
+#define NTFW_CTR_SIZE			U(4)
+
 /* N1SDP remote chip at 4 TB offset */
 #define PLAT_ARM_REMOTE_CHIP_OFFSET		(ULL(1) << 42)
 
@@ -59,8 +80,42 @@
 #define PLAT_CSS_SCP_COM_SHARED_MEM_BASE	0x45400000
 #endif
 
-#define PLAT_ARM_TRUSTED_SRAM_SIZE		0x00080000	/* 512 KB */
-#define PLAT_ARM_MAX_BL31_SIZE			0X20000
+/*
+ * Trusted SRAM in N1SDP is 512 KB but only the bottom 384 KB
+ * is used for trusted board boot flow. The top 128 KB is used
+ * to load AP-BL1 image.
+ */
+#define PLAT_ARM_TRUSTED_SRAM_SIZE                      0x00060000      /* 384 KB */
+
+/*
+ * PLAT_ARM_MAX_BL1_RW_SIZE is calculated using the current BL1 RW debug size
+ * plus a little space for growth.
+ */
+#define PLAT_ARM_MAX_BL1_RW_SIZE	0xE000
+
+/*
+ * PLAT_ARM_MAX_ROMLIB_RW_SIZE is define to use a full page
+ */
+
+#if USE_ROMLIB
+# define PLAT_ARM_MAX_ROMLIB_RW_SIZE	0x1000
+# define PLAT_ARM_MAX_ROMLIB_RO_SIZE	0xe000
+#else
+# define PLAT_ARM_MAX_ROMLIB_RW_SIZE	U(0)
+# define PLAT_ARM_MAX_ROMLIB_RO_SIZE	U(0)
+#endif
+
+/*
+ * PLAT_ARM_MAX_BL2_SIZE is calculated using the current BL2 debug size plus a
+ * little space for growth.
+ */
+#if TRUSTED_BOARD_BOOT
+# define PLAT_ARM_MAX_BL2_SIZE		0x20000
+#else
+# define PLAT_ARM_MAX_BL2_SIZE		0x14000
+#endif
+
+#define PLAT_ARM_MAX_BL31_SIZE		UL(0x3B000)
 
 /*******************************************************************************
  * N1SDP topology related constants
@@ -83,10 +138,48 @@
  * PLAT_ARM_MMAP_ENTRIES depends on the number of entries in the
  * plat_arm_mmap array defined for each BL stage.
  */
-#define PLAT_ARM_MMAP_ENTRIES			9
-#define MAX_XLAT_TABLES				10
 
-#define PLATFORM_STACK_SIZE			0x400
+#ifdef IMAGE_BL1
+# define PLAT_ARM_MMAP_ENTRIES		U(6)
+# define MAX_XLAT_TABLES		U(5)
+#endif
+
+#ifdef IMAGE_BL2
+#  define PLAT_ARM_MMAP_ENTRIES		U(11)
+#  define MAX_XLAT_TABLES		U(10)
+#endif
+
+#ifdef IMAGE_BL31
+#  define PLAT_ARM_MMAP_ENTRIES		U(12)
+#  define MAX_XLAT_TABLES		U(12)
+#endif
+
+/*
+ * Size of cacheable stacks
+ */
+#if defined(IMAGE_BL1)
+# if TRUSTED_BOARD_BOOT
+#  define PLATFORM_STACK_SIZE	0x1000
+# else
+#  define PLATFORM_STACK_SIZE	0x440
+# endif
+#elif defined(IMAGE_BL2)
+# if TRUSTED_BOARD_BOOT
+#  define PLATFORM_STACK_SIZE	0x1000
+# else
+#  define PLATFORM_STACK_SIZE	0x400
+# endif
+#elif defined(IMAGE_BL2U)
+# define PLATFORM_STACK_SIZE	0x400
+#elif defined(IMAGE_BL31)
+# if SPM_MM
+#  define PLATFORM_STACK_SIZE	0x500
+# else
+#  define PLATFORM_STACK_SIZE	0x400
+# endif
+#elif defined(IMAGE_BL32)
+# define PLATFORM_STACK_SIZE	0x440
+#endif
 
 #define PLAT_ARM_NSTIMER_FRAME_ID		0
 #define PLAT_CSS_MHU_BASE			0x45000000
@@ -106,6 +199,10 @@
 						PLAT_ARM_REMOTE_CHIP_OFFSET
 #define N1SDP_REMOTE_DEVICE_SIZE		N1SDP_DEVICE_SIZE
 
+/* Real base is 0x0. Changed to load BL1 at this address */
+# define PLAT_ARM_TRUSTED_ROM_BASE	0x04060000
+# define PLAT_ARM_TRUSTED_ROM_SIZE	0x00020000	/* 128KB */
+
 #define N1SDP_MAP_DEVICE		MAP_REGION_FLAT(	\
 					N1SDP_DEVICE_BASE,	\
 					N1SDP_DEVICE_SIZE,	\
diff --git a/plat/arm/board/n1sdp/n1sdp_bl1_setup.c b/plat/arm/board/n1sdp/n1sdp_bl1_setup.c
new file mode 100644
index 0000000..ed93222
--- /dev/null
+++ b/plat/arm/board/n1sdp/n1sdp_bl1_setup.c
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <plat/arm/common/plat_arm.h>
+
+/*******************************************************************************
+ * Perform any BL1 specific platform actions.
+ ******************************************************************************/
+
+void soc_css_init_nic400(void)
+{
+}
+
+void soc_css_init_pcie(void)
+{
+}
diff --git a/plat/arm/board/n1sdp/n1sdp_bl2_setup.c b/plat/arm/board/n1sdp/n1sdp_bl2_setup.c
new file mode 100644
index 0000000..5f8af9f
--- /dev/null
+++ b/plat/arm/board/n1sdp/n1sdp_bl2_setup.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/debug.h>
+#include <drivers/arm/css/sds.h>
+#include <lib/mmio.h>
+#include <lib/utils.h>
+
+#include "n1sdp_def.h"
+#include <plat/arm/common/plat_arm.h>
+
+struct n1sdp_plat_info {
+	bool multichip_mode;
+	uint8_t secondary_count;
+	uint8_t local_ddr_size;
+	uint8_t remote_ddr_size;
+} __packed;
+
+/*
+ * N1SDP platform supports RDIMMs with ECC capability. To use the ECC
+ * capability, the entire DDR memory space has to be zeroed out before
+ * enabling the ECC bits in DMC620. Zeroing out several gigabytes of
+ * memory from SCP is quite time consuming so the following function
+ * is added to zero out the DDR memory from application processor which is
+ * much faster compared to SCP.
+ */
+
+void dmc_ecc_setup(uint8_t ddr_size_gb)
+{
+	uint64_t dram2_size;
+
+	dram2_size = (ddr_size_gb * 1024UL * 1024UL * 1024UL) -
+			ARM_DRAM1_SIZE;
+
+	INFO("Zeroing DDR memories\n");
+	zero_normalmem((void *)ARM_DRAM1_BASE, ARM_DRAM1_SIZE);
+	flush_dcache_range(ARM_DRAM1_BASE, ARM_DRAM1_SIZE);
+	zero_normalmem((void *)ARM_DRAM2_BASE, dram2_size);
+	flush_dcache_range(ARM_DRAM2_BASE, dram2_size);
+
+	INFO("Enabling ECC on DMCs\n");
+	/* Set DMCs to CONFIG state before writing ERR0CTLR0 register */
+	mmio_write_32(N1SDP_DMC0_MEMC_CMD_REG, N1SDP_DMC_MEMC_CMD_CONFIG);
+	mmio_write_32(N1SDP_DMC1_MEMC_CMD_REG, N1SDP_DMC_MEMC_CMD_CONFIG);
+
+	/* Enable ECC in DMCs */
+	mmio_setbits_32(N1SDP_DMC0_ERR0CTLR0_REG, N1SDP_DMC_ERR0CTLR0_ECC_EN);
+	mmio_setbits_32(N1SDP_DMC1_ERR0CTLR0_REG, N1SDP_DMC_ERR0CTLR0_ECC_EN);
+
+	/* Set DMCs to READY state */
+	mmio_write_32(N1SDP_DMC0_MEMC_CMD_REG, N1SDP_DMC_MEMC_CMD_READY);
+	mmio_write_32(N1SDP_DMC1_MEMC_CMD_REG, N1SDP_DMC_MEMC_CMD_READY);
+}
+
+void bl2_platform_setup(void)
+{
+	int ret;
+	struct n1sdp_plat_info plat_info;
+
+	ret = sds_init();
+	if (ret != SDS_OK) {
+		ERROR("SDS initialization failed\n");
+		panic();
+	}
+
+	ret = sds_struct_read(N1SDP_SDS_PLATFORM_INFO_STRUCT_ID,
+				N1SDP_SDS_PLATFORM_INFO_OFFSET,
+				&plat_info,
+				N1SDP_SDS_PLATFORM_INFO_SIZE,
+				SDS_ACCESS_MODE_NON_CACHED);
+	if (ret != SDS_OK) {
+		ERROR("Error getting platform info from SDS\n");
+		panic();
+	}
+	/* Validate plat_info SDS */
+	if ((plat_info.local_ddr_size == 0)
+		|| (plat_info.local_ddr_size > N1SDP_MAX_DDR_CAPACITY_GB)
+		|| (plat_info.remote_ddr_size > N1SDP_MAX_DDR_CAPACITY_GB)
+		|| (plat_info.secondary_count > N1SDP_MAX_SECONDARY_COUNT)) {
+		ERROR("platform info SDS is corrupted\n");
+		panic();
+	}
+
+	dmc_ecc_setup(plat_info.local_ddr_size);
+	arm_bl2_platform_setup();
+}
diff --git a/plat/arm/board/n1sdp/n1sdp_bl31_setup.c b/plat/arm/board/n1sdp/n1sdp_bl31_setup.c
index d7003e9..5e897fe 100644
--- a/plat/arm/board/n1sdp/n1sdp_bl31_setup.c
+++ b/plat/arm/board/n1sdp/n1sdp_bl31_setup.c
@@ -1,11 +1,9 @@
 /*
- * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
-#include <platform_def.h>
-
 #include <common/debug.h>
 #include <drivers/arm/css/css_mhu_doorbell.h>
 #include <drivers/arm/css/scmi.h>
@@ -16,6 +14,7 @@
 #include <plat/arm/common/plat_arm.h>
 
 #include "n1sdp_def.h"
+#include <platform_def.h>
 
 /*
  * Platform information structure stored in SDS.
@@ -24,28 +23,17 @@
  * enabling the ECC capability as well as information
  * about multichip setup
  * 	- multichip mode
- * 	- slave_count
+ * 	- secondary_count
  * 	- Local DDR size in GB, DDR memory in master board
- * 	- Remote DDR size in GB, DDR memory in slave board
+ * 	- Remote DDR size in GB, DDR memory in secondary board
  */
 struct n1sdp_plat_info {
 	bool multichip_mode;
-	uint8_t slave_count;
+	uint8_t secondary_count;
 	uint8_t local_ddr_size;
 	uint8_t remote_ddr_size;
 } __packed;
 
-/*
- * BL33 image information structure stored in SDS.
- * This structure holds the source & destination addresses and
- * the size of the BL33 image which will be loaded by BL31.
- */
-struct n1sdp_bl33_info {
-	uint32_t bl33_src_addr;
-	uint32_t bl33_dst_addr;
-	uint32_t bl33_size;
-};
-
 static scmi_channel_plat_info_t n1sdp_scmi_plat_info = {
 	.scmi_mbx_mem = N1SDP_SCMI_PAYLOAD_BASE,
 	.db_reg_addr = PLAT_CSS_MHU_BASE + CSS_SCMI_MHU_DB_REG_OFF,
@@ -90,38 +78,10 @@
  * enabling the ECC bits in DMC620. Zeroing out several gigabytes of
  * memory from SCP is quite time consuming so the following function
  * is added to zero out the DDR memory from application processor which is
- * much faster compared to SCP. BL33 binary cannot be copied to DDR memory
- * before enabling ECC so copy_bl33 function is added to copy BL33 binary
- * from IOFPGA-DDR3 memory to main DDR4 memory.
+ * much faster compared to SCP. Local DDR memory is zeroed out during BL2
+ * stage. If remote chip is connected, it's DDR memory is zeroed out here.
  */
 
-void dmc_ecc_setup(uint8_t ddr_size_gb)
-{
-	uint64_t dram2_size;
-
-	dram2_size = (ddr_size_gb * 1024UL * 1024UL * 1024UL) -
-			ARM_DRAM1_SIZE;
-
-	INFO("Zeroing DDR memories\n");
-	zero_normalmem((void *)ARM_DRAM1_BASE, ARM_DRAM1_SIZE);
-	flush_dcache_range(ARM_DRAM1_BASE, ARM_DRAM1_SIZE);
-	zero_normalmem((void *)ARM_DRAM2_BASE, dram2_size);
-	flush_dcache_range(ARM_DRAM2_BASE, dram2_size);
-
-	INFO("Enabling ECC on DMCs\n");
-	/* Set DMCs to CONFIG state before writing ERR0CTLR0 register */
-	mmio_write_32(N1SDP_DMC0_MEMC_CMD_REG, N1SDP_DMC_MEMC_CMD_CONFIG);
-	mmio_write_32(N1SDP_DMC1_MEMC_CMD_REG, N1SDP_DMC_MEMC_CMD_CONFIG);
-
-	/* Enable ECC in DMCs */
-	mmio_setbits_32(N1SDP_DMC0_ERR0CTLR0_REG, N1SDP_DMC_ERR0CTLR0_ECC_EN);
-	mmio_setbits_32(N1SDP_DMC1_ERR0CTLR0_REG, N1SDP_DMC_ERR0CTLR0_ECC_EN);
-
-	/* Set DMCs to READY state */
-	mmio_write_32(N1SDP_DMC0_MEMC_CMD_REG, N1SDP_DMC_MEMC_CMD_READY);
-	mmio_write_32(N1SDP_DMC1_MEMC_CMD_REG, N1SDP_DMC_MEMC_CMD_READY);
-}
-
 void remote_dmc_ecc_setup(uint8_t remote_ddr_size)
 {
 	uint64_t remote_dram2_size;
@@ -154,22 +114,6 @@
 	mmio_write_32(N1SDP_REMOTE_DMC1_MEMC_CMD_REG, N1SDP_DMC_MEMC_CMD_READY);
 }
 
-void copy_bl33(uint32_t src, uint32_t dst, uint32_t size)
-{
-	uint32_t i;
-
-	INFO("Copying BL33 to DDR memory\n");
-	for (i = 0; i < size; i = i + 8)
-		mmio_write_64((dst + i), mmio_read_64(src + i));
-
-	for (i = 0; i < size; i = i + 8) {
-		if (mmio_read_64(src + i) != mmio_read_64(dst + i)) {
-			ERROR("Copy failed!\n");
-			panic();
-		}
-	}
-}
-
 void n1sdp_bl31_multichip_setup(void)
 {
 	plat_arm_override_gicr_frames(n1sdp_multichip_gicr_frames);
@@ -180,7 +124,6 @@
 {
 	int ret;
 	struct n1sdp_plat_info plat_info;
-	struct n1sdp_bl33_info bl33_info;
 
 	ret = sds_init();
 	if (ret != SDS_OK) {
@@ -201,41 +144,18 @@
 	if ((plat_info.local_ddr_size == 0)
 		|| (plat_info.local_ddr_size > N1SDP_MAX_DDR_CAPACITY_GB)
 		|| (plat_info.remote_ddr_size > N1SDP_MAX_DDR_CAPACITY_GB)
-		|| (plat_info.slave_count > N1SDP_MAX_SLAVE_COUNT)) {
+		|| (plat_info.secondary_count > N1SDP_MAX_SECONDARY_COUNT)) {
 		ERROR("platform info SDS is corrupted\n");
 		panic();
 	}
 
 	if (plat_info.multichip_mode) {
-		n1sdp_multichip_data.chip_count = plat_info.slave_count + 1;
+		n1sdp_multichip_data.chip_count = plat_info.secondary_count + 1;
 		n1sdp_bl31_multichip_setup();
 	}
 	arm_bl31_platform_setup();
 
-	dmc_ecc_setup(plat_info.local_ddr_size);
-
 	/* Check if remote memory is present */
 	if ((plat_info.multichip_mode) && (plat_info.remote_ddr_size != 0))
 		remote_dmc_ecc_setup(plat_info.remote_ddr_size);
-
-	ret = sds_struct_read(N1SDP_SDS_BL33_INFO_STRUCT_ID,
-				N1SDP_SDS_BL33_INFO_OFFSET,
-				&bl33_info,
-				N1SDP_SDS_BL33_INFO_SIZE,
-				SDS_ACCESS_MODE_NON_CACHED);
-	if (ret != SDS_OK) {
-		ERROR("Error getting BL33 info from SDS\n");
-		panic();
-	}
-	copy_bl33(bl33_info.bl33_src_addr,
-			bl33_info.bl33_dst_addr,
-			bl33_info.bl33_size);
-	/*
-	 * Pass platform information to BL33. This method is followed as
-	 * currently there is no BL1/BL2 involved in boot flow of N1SDP.
-	 * When TBBR is implemented for N1SDP, this method should be removed
-	 * and platform information should be passed to BL33 using NT_FW_CONFIG
-	 * passing mechanism.
-	 */
-	mmio_write_32(N1SDP_PLATFORM_INFO_BASE, *(uint32_t *)&plat_info);
 }
diff --git a/plat/arm/board/n1sdp/n1sdp_def.h b/plat/arm/board/n1sdp/n1sdp_def.h
index 30e29a7..ffa6a03 100644
--- a/plat/arm/board/n1sdp/n1sdp_def.h
+++ b/plat/arm/board/n1sdp/n1sdp_def.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -20,12 +20,7 @@
 #define N1SDP_SDS_PLATFORM_INFO_OFFSET		0
 #define N1SDP_SDS_PLATFORM_INFO_SIZE		4
 #define N1SDP_MAX_DDR_CAPACITY_GB		64
-#define N1SDP_MAX_SLAVE_COUNT			16
-
-/* SDS BL33 image information defines */
-#define N1SDP_SDS_BL33_INFO_STRUCT_ID		9
-#define N1SDP_SDS_BL33_INFO_OFFSET		0
-#define N1SDP_SDS_BL33_INFO_SIZE		12
+#define N1SDP_MAX_SECONDARY_COUNT		16
 
 /* DMC memory command registers */
 #define N1SDP_DMC0_MEMC_CMD_REG			0x4E000008
@@ -54,7 +49,4 @@
 /* DMC ECC enable bit in ERR0CTLR0 register */
 #define N1SDP_DMC_ERR0CTLR0_ECC_EN		0x1
 
-/* Base address of non-secure SRAM where Platform information will be filled */
-#define N1SDP_PLATFORM_INFO_BASE		0x06008000
-
 #endif /* N1SDP_DEF_H */
diff --git a/plat/arm/board/n1sdp/n1sdp_err.c b/plat/arm/board/n1sdp/n1sdp_err.c
new file mode 100644
index 0000000..629e76a
--- /dev/null
+++ b/plat/arm/board/n1sdp/n1sdp_err.c
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <plat/arm/common/plat_arm.h>
+
+/*
+ * n1sdp error handler
+ */
+void __dead2 plat_arm_error_handler(int err)
+{
+	while (true) {
+		wfi();
+	}
+}
diff --git a/plat/arm/board/n1sdp/n1sdp_image_load.c b/plat/arm/board/n1sdp/n1sdp_image_load.c
new file mode 100644
index 0000000..6c3528c
--- /dev/null
+++ b/plat/arm/board/n1sdp/n1sdp_image_load.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <common/desc_image_load.h>
+#include <drivers/arm/css/sds.h>
+#include <libfdt.h>
+#include <plat/common/platform.h>
+
+#include "n1sdp_def.h"
+#include <plat/arm/common/plat_arm.h>
+
+/*
+ * Platform information structure stored in SDS.
+ * This structure holds information about platform's DDR
+ * size which will be used to zero out the memory before
+ * enabling the ECC capability as well as information
+ * about multichip setup
+ * 	- multichip mode
+ * 	- secondary_count
+ * 	- Local DDR size in GB, DDR memory in master board
+ * 	- Remote DDR size in GB, DDR memory in secondary board
+ */
+struct n1sdp_plat_info {
+	bool multichip_mode;
+	uint8_t secondary_count;
+	uint8_t local_ddr_size;
+	uint8_t remote_ddr_size;
+} __packed;
+
+/*******************************************************************************
+ * This function inserts Platform information via device tree nodes as,
+ *	platform-info {
+ *		multichip-mode = <0x0>;
+ *		secondary-chip-count = <0x0>;
+ *		local-ddr-size = <0x0>;
+ *		remote-ddr-size = <0x0>;
+ *	};
+ ******************************************************************************/
+static int plat_n1sdp_append_config_node(struct n1sdp_plat_info *plat_info)
+{
+	bl_mem_params_node_t *mem_params;
+	void *fdt;
+	int nodeoffset, err;
+
+	mem_params = get_bl_mem_params_node(NT_FW_CONFIG_ID);
+	if (mem_params == NULL) {
+		ERROR("NT_FW CONFIG base address is NULL\n");
+		return -1;
+	}
+
+	fdt = (void *)(mem_params->image_info.image_base);
+
+	/* Check the validity of the fdt */
+	if (fdt_check_header(fdt) != 0) {
+		ERROR("Invalid NT_FW_CONFIG DTB passed\n");
+		return -1;
+	}
+
+	nodeoffset = fdt_subnode_offset(fdt, 0, "platform-info");
+	if (nodeoffset < 0) {
+		ERROR("NT_FW_CONFIG: Failed to get platform-info node offset\n");
+		return -1;
+	}
+
+	err = fdt_setprop_u32(fdt, nodeoffset, "multichip-mode",
+			plat_info->multichip_mode);
+	if (err < 0) {
+		ERROR("NT_FW_CONFIG: Failed to set multichip-mode\n");
+		return -1;
+	}
+
+	err = fdt_setprop_u32(fdt, nodeoffset, "secondary-chip-count",
+			plat_info->secondary_count);
+	if (err < 0) {
+		ERROR("NT_FW_CONFIG: Failed to set secondary-chip-count\n");
+		return -1;
+	}
+
+	err = fdt_setprop_u32(fdt, nodeoffset, "local-ddr-size",
+			plat_info->local_ddr_size);
+	if (err < 0) {
+		ERROR("NT_FW_CONFIG: Failed to set local-ddr-size\n");
+		return -1;
+	}
+
+	err = fdt_setprop_u32(fdt, nodeoffset, "remote-ddr-size",
+			plat_info->remote_ddr_size);
+	if (err < 0) {
+		ERROR("NT_FW_CONFIG: Failed to set remote-ddr-size\n");
+		return -1;
+	}
+
+	flush_dcache_range((uintptr_t)fdt, mem_params->image_info.image_size);
+
+	return 0;
+}
+
+/*******************************************************************************
+ * This function returns the list of executable images.
+ ******************************************************************************/
+bl_params_t *plat_get_next_bl_params(void)
+{
+	int ret;
+	struct n1sdp_plat_info plat_info;
+
+	ret = sds_init();
+	if (ret != SDS_OK) {
+		ERROR("SDS initialization failed. ret:%d\n", ret);
+		panic();
+	}
+
+	ret = sds_struct_read(N1SDP_SDS_PLATFORM_INFO_STRUCT_ID,
+				N1SDP_SDS_PLATFORM_INFO_OFFSET,
+				&plat_info,
+				N1SDP_SDS_PLATFORM_INFO_SIZE,
+				SDS_ACCESS_MODE_NON_CACHED);
+	if (ret != SDS_OK) {
+		ERROR("Error getting platform info from SDS. ret:%d\n", ret);
+		panic();
+	}
+
+	/* Validate plat_info SDS */
+	if ((plat_info.local_ddr_size == 0U)
+		|| (plat_info.local_ddr_size > N1SDP_MAX_DDR_CAPACITY_GB)
+		|| (plat_info.remote_ddr_size > N1SDP_MAX_DDR_CAPACITY_GB)
+		|| (plat_info.secondary_count > N1SDP_MAX_SECONDARY_COUNT)
+		){
+		ERROR("platform info SDS is corrupted\n");
+		panic();
+	}
+
+	ret = plat_n1sdp_append_config_node(&plat_info);
+	if (ret != 0) {
+		panic();
+	}
+
+	return arm_get_next_bl_params();
+}
diff --git a/plat/arm/board/n1sdp/n1sdp_plat.c b/plat/arm/board/n1sdp/n1sdp_plat.c
index 951a562..502268c 100644
--- a/plat/arm/board/n1sdp/n1sdp_plat.c
+++ b/plat/arm/board/n1sdp/n1sdp_plat.c
@@ -1,16 +1,13 @@
 /*
- * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
-#include <platform_def.h>
+#include <assert.h>
 
-#include <common/bl_common.h>
-#include <common/debug.h>
-#include <plat/arm/common/plat_arm.h>
-#include <plat/common/platform.h>
 #include <drivers/arm/sbsa.h>
+#include <plat/arm/common/plat_arm.h>
 
 #include "n1sdp_def.h"
 
@@ -19,17 +16,51 @@
  * Replace or extend the below regions as required
  */
 
+#if IMAGE_BL1
+const mmap_region_t plat_arm_mmap[] = {
+	ARM_MAP_SHARED_RAM,
+	N1SDP_MAP_DEVICE,
+	N1SDP_MAP_NS_SRAM,
+	ARM_MAP_DRAM1,
+	{0}
+};
+#endif
+
+#if IMAGE_BL2
 const mmap_region_t plat_arm_mmap[] = {
 	ARM_MAP_SHARED_RAM,
 	N1SDP_MAP_DEVICE,
 	N1SDP_MAP_NS_SRAM,
 	ARM_MAP_DRAM1,
 	ARM_MAP_DRAM2,
+#if TRUSTED_BOARD_BOOT && !BL2_AT_EL3
+	ARM_MAP_BL1_RW,
+#endif
+	{0}
+};
+#endif
+
+#if IMAGE_BL31
+const mmap_region_t plat_arm_mmap[] = {
+	ARM_MAP_SHARED_RAM,
+	N1SDP_MAP_DEVICE,
+	N1SDP_MAP_NS_SRAM,
 	N1SDP_MAP_REMOTE_DEVICE,
 	N1SDP_MAP_REMOTE_DRAM1,
 	N1SDP_MAP_REMOTE_DRAM2,
 	{0}
 };
+#endif
+
+#if TRUSTED_BOARD_BOOT
+int plat_get_mbedtls_heap(void **heap_addr, size_t *heap_size)
+{
+	assert(heap_addr != NULL);
+	assert(heap_size != NULL);
+
+	return arm_get_mbedtls_heap(heap_addr, heap_size);
+}
+#endif
 
 void plat_arm_secure_wdt_start(void)
 {
diff --git a/plat/arm/board/n1sdp/n1sdp_trusted_boot.c b/plat/arm/board/n1sdp/n1sdp_trusted_boot.c
new file mode 100644
index 0000000..c7dc47f
--- /dev/null
+++ b/plat/arm/board/n1sdp/n1sdp_trusted_boot.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdint.h>
+
+#include <plat/arm/common/plat_arm.h>
+
+/*
+ * Return the non-volatile counter value stored in the platform. The cookie
+ * will contain the OID of the counter in the certificate.
+ *
+ * Return: 0 = success, Otherwise = error
+ */
+int plat_get_nv_ctr(void *cookie, unsigned int *nv_ctr)
+{
+	*nv_ctr = N1SDP_FW_NVCTR_VAL;
+	return 0;
+}
+
+/*
+ * Store a new non-volatile counter value. By default on ARM development
+ * platforms, the non-volatile counters are RO and cannot be modified. We expect
+ * the values in the certificates to always match the RO values so that this
+ * function is never called.
+ *
+ * Return: 0 = success, Otherwise = error
+ */
+int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr)
+{
+	return 1;
+}
+
+/*
+ * Return the ROTPK hash in the following ASN.1 structure in DER format:
+ *
+ * AlgorithmIdentifier  ::=  SEQUENCE  {
+ *     algorithm         OBJECT IDENTIFIER,
+ *     parameters        ANY DEFINED BY algorithm OPTIONAL
+ * }
+ *
+ * DigestInfo ::= SEQUENCE {
+ *     digestAlgorithm   AlgorithmIdentifier,
+ *     digest            OCTET STRING
+ * }
+ */
+int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len,
+			 unsigned int *flags)
+{
+	return arm_get_rotpk_info(cookie, key_ptr, key_len, flags);
+}
+
diff --git a/plat/arm/board/n1sdp/platform.mk b/plat/arm/board/n1sdp/platform.mk
index f20397a..740fb29 100644
--- a/plat/arm/board/n1sdp/platform.mk
+++ b/plat/arm/board/n1sdp/platform.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2018-2022, Arm Limited. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -28,30 +28,59 @@
 PLAT_BL_COMMON_SOURCES	:=	${N1SDP_BASE}/n1sdp_plat.c	        \
 				${N1SDP_BASE}/aarch64/n1sdp_helper.S
 
-BL1_SOURCES		+=	drivers/arm/sbsa/sbsa.c
+BL1_SOURCES		:=	${N1SDP_CPU_SOURCES}                \
+				${INTERCONNECT_SOURCES}             \
+				${N1SDP_BASE}/n1sdp_err.c           \
+				${N1SDP_BASE}/n1sdp_trusted_boot.c  \
+				${N1SDP_BASE}/n1sdp_bl1_setup.c     \
+				drivers/arm/sbsa/sbsa.c
+
+BL2_SOURCES		:=	${N1SDP_BASE}/n1sdp_security.c      \
+				${N1SDP_BASE}/n1sdp_err.c           \
+				${N1SDP_BASE}/n1sdp_trusted_boot.c  \
+				lib/utils/mem_region.c              \
+				${N1SDP_BASE}/n1sdp_bl2_setup.c     \
+				${N1SDP_BASE}/n1sdp_image_load.c     \
+				drivers/arm/css/sds/sds.c
 
 BL31_SOURCES		:=	${N1SDP_CPU_SOURCES}			\
 				${INTERCONNECT_SOURCES}			\
 				${N1SDP_GIC_SOURCES}			\
-				${N1SDP_BASE}/n1sdp_bl31_setup.c	        \
+				${N1SDP_BASE}/n1sdp_bl31_setup.c	\
 				${N1SDP_BASE}/n1sdp_topology.c	        \
 				${N1SDP_BASE}/n1sdp_security.c		\
 				drivers/arm/css/sds/sds.c
 
 FDT_SOURCES		+=	fdts/${PLAT}-single-chip.dts	\
-				fdts/${PLAT}-multi-chip.dts
+				fdts/${PLAT}-multi-chip.dts	\
+				${N1SDP_BASE}/fdts/n1sdp_fw_config.dts	\
+				${N1SDP_BASE}/fdts/n1sdp_tb_fw_config.dts	\
+				${N1SDP_BASE}/fdts/n1sdp_nt_fw_config.dts
+
+FW_CONFIG		:=	${BUILD_PLAT}/fdts/n1sdp_fw_config.dtb
+TB_FW_CONFIG		:=	${BUILD_PLAT}/fdts/n1sdp_tb_fw_config.dtb
+NT_FW_CONFIG		:=	${BUILD_PLAT}/fdts/n1sdp_nt_fw_config.dtb
+
+# Add the FW_CONFIG to FIP and specify the same to certtool
+$(eval $(call TOOL_ADD_PAYLOAD,${FW_CONFIG},--fw-config,${FW_CONFIG}))
+# Add the TB_FW_CONFIG to FIP and specify the same to certtool
+$(eval $(call TOOL_ADD_PAYLOAD,${TB_FW_CONFIG},--tb-fw-config,${TB_FW_CONFIG}))
+# Add the NT_FW_CONFIG to FIP and specify the same to certtool
+$(eval $(call TOOL_ADD_PAYLOAD,${NT_FW_CONFIG},--nt-fw-config,${NT_FW_CONFIG}))
+
+# Setting to 0 as no NVCTR in N1SDP
+N1SDP_FW_NVCTR_VAL	:=	0
+TFW_NVCTR_VAL		:=	${N1SDP_FW_NVCTR_VAL}
+NTFW_NVCTR_VAL		:=	${N1SDP_FW_NVCTR_VAL}
+
+# Add N1SDP_FW_NVCTR_VAL
+$(eval $(call add_define,N1SDP_FW_NVCTR_VAL))
 
 # TF-A not required to load the SCP Images
 override CSS_LOAD_SCP_IMAGES	  	:=	0
 
-# BL1/BL2 Image not a part of the capsule Image for n1sdp
-override NEED_BL1		  	:=	no
-override NEED_BL2		  	:=	no
 override NEED_BL2U		  	:=	no
 
-#TFA for n1sdp starts from BL31
-override RESET_TO_BL31            	:=	1
-
 # 32 bit mode not supported
 override CTX_INCLUDE_AARCH32_REGS 	:=	0
 
@@ -73,4 +102,3 @@
 include plat/arm/common/arm_common.mk
 include plat/arm/css/common/css_common.mk
 include plat/arm/board/common/board_common.mk
-
diff --git a/plat/arm/css/sgi/sgi-common.mk b/plat/arm/css/sgi/sgi-common.mk
index f56fe35..76c8025 100644
--- a/plat/arm/css/sgi/sgi-common.mk
+++ b/plat/arm/css/sgi/sgi-common.mk
@@ -23,6 +23,8 @@
 # Do not enable SVE
 ENABLE_SVE_FOR_NS		:=	0
 
+CTX_INCLUDE_FPREGS		:=	1
+
 INTERCONNECT_SOURCES	:=	${CSS_ENT_BASE}/sgi_interconnect.c
 
 PLAT_INCLUDES		+=	-I${CSS_ENT_BASE}/include
diff --git a/plat/hisilicon/hikey/platform.mk b/plat/hisilicon/hikey/platform.mk
index 18197cf..3e1771c 100644
--- a/plat/hisilicon/hikey/platform.mk
+++ b/plat/hisilicon/hikey/platform.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -95,6 +95,10 @@
 BL2_SOURCES		+=	lib/optee/optee_utils.c
 endif
 
+include lib/zlib/zlib.mk
+PLAT_INCLUDES		+=	-Ilib/zlib
+BL2_SOURCES		+=	$(ZLIB_SOURCES)
+
 HIKEY_GIC_SOURCES	:=	drivers/arm/gic/common/gic_common.c	\
 				drivers/arm/gic/v2/gicv2_main.c		\
 				drivers/arm/gic/v2/gicv2_helpers.c	\
diff --git a/plat/hisilicon/hikey960/platform.mk b/plat/hisilicon/hikey960/platform.mk
index fc2c209..608fe09 100644
--- a/plat/hisilicon/hikey960/platform.mk
+++ b/plat/hisilicon/hikey960/platform.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -94,6 +94,10 @@
 BL2_SOURCES		+=	lib/optee/optee_utils.c
 endif
 
+include lib/zlib/zlib.mk
+PLAT_INCLUDES		+=	-Ilib/zlib
+BL2_SOURCES		+=	$(ZLIB_SOURCES)
+
 BL31_SOURCES		+=	drivers/arm/cci/cci.c			\
 				drivers/arm/pl061/pl061_gpio.c		\
 				drivers/gpio/gpio.c			\
diff --git a/plat/intel/soc/agilex/bl2_plat_setup.c b/plat/intel/soc/agilex/bl2_plat_setup.c
index 03adcf3..211a7b7 100644
--- a/plat/intel/soc/agilex/bl2_plat_setup.c
+++ b/plat/intel/soc/agilex/bl2_plat_setup.c
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2019-2021, ARM Limited and Contributors. All rights reserved.
- * Copyright (c) 2019-2021, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -23,6 +23,7 @@
 #include "ccu/ncore_ccu.h"
 #include "qspi/cadence_qspi.h"
 #include "socfpga_emac.h"
+#include "socfpga_f2sdram_manager.h"
 #include "socfpga_handoff.h"
 #include "socfpga_mailbox.h"
 #include "socfpga_private.h"
@@ -81,8 +82,10 @@
 	mailbox_init();
 	agx_mmc_init();
 
-	if (!intel_mailbox_is_fpga_not_ready())
-		socfpga_bridges_enable();
+	if (!intel_mailbox_is_fpga_not_ready()) {
+		socfpga_bridges_enable(SOC2FPGA_MASK | LWHPS2FPGA_MASK |
+					FPGA2SOC_MASK);
+	}
 }
 
 
diff --git a/plat/intel/soc/agilex/include/agilex_noc.h b/plat/intel/soc/agilex/include/agilex_noc.h
deleted file mode 100644
index 9aba3c3..0000000
--- a/plat/intel/soc/agilex/include/agilex_noc.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef AGX_NOC_H
-#define AGX_NOC_H
-
-
-#define AXI_AP					(1<<0)
-#define FPGA2SOC				(1<<16)
-#define MPU					(1<<24)
-#define AGX_NOC_PER_SCR_NAND			0xffd21000
-#define AGX_NOC_PER_SCR_NAND_DATA		0xffd21004
-#define AGX_NOC_PER_SCR_USB0			0xffd2100c
-#define AGX_NOC_PER_SCR_USB1			0xffd21010
-#define AGX_NOC_PER_SCR_SPI_M0			0xffd2101c
-#define AGX_NOC_PER_SCR_SPI_M1			0xffd21020
-#define AGX_NOC_PER_SCR_SPI_S0			0xffd21024
-#define AGX_NOC_PER_SCR_SPI_S1			0xffd21028
-#define AGX_NOC_PER_SCR_EMAC0			0xffd2102c
-#define AGX_NOC_PER_SCR_EMAC1			0xffd21030
-#define AGX_NOC_PER_SCR_EMAC2			0xffd21034
-#define AGX_NOC_PER_SCR_SDMMC			0xffd21040
-#define AGX_NOC_PER_SCR_GPIO0			0xffd21044
-#define AGX_NOC_PER_SCR_GPIO1			0xffd21048
-#define AGX_NOC_PER_SCR_I2C0			0xffd21050
-#define AGX_NOC_PER_SCR_I2C1			0xffd21058
-#define AGX_NOC_PER_SCR_I2C2			0xffd2105c
-#define AGX_NOC_PER_SCR_I2C3			0xffd21060
-#define AGX_NOC_PER_SCR_SP_TIMER0		0xffd21064
-#define AGX_NOC_PER_SCR_SP_TIMER1		0xffd21068
-#define AGX_NOC_PER_SCR_UART0			0xffd2106c
-#define AGX_NOC_PER_SCR_UART1			0xffd21070
-
-
-#define AGX_NOC_SYS_SCR_DMA_ECC			0xffd21108
-#define AGX_NOC_SYS_SCR_EMAC0RX_ECC		0xffd2110c
-#define AGX_NOC_SYS_SCR_EMAC0TX_ECC		0xffd21110
-#define AGX_NOC_SYS_SCR_EMAC1RX_ECC		0xffd21114
-#define AGX_NOC_SYS_SCR_EMAC1TX_ECC		0xffd21118
-#define AGX_NOC_SYS_SCR_EMAC2RX_ECC		0xffd2111c
-#define AGX_NOC_SYS_SCR_EMAC2TX_ECC		0xffd21120
-#define AGX_NOC_SYS_SCR_NAND_ECC		0xffd2112c
-#define AGX_NOC_SYS_SCR_NAND_READ_ECC		0xffd21130
-#define AGX_NOC_SYS_SCR_NAND_WRITE_ECC		0xffd21134
-#define AGX_NOC_SYS_SCR_OCRAM_ECC		0xffd21138
-#define AGX_NOC_SYS_SCR_SDMMC_ECC		0xffd21140
-#define AGX_NOC_SYS_SCR_USB0_ECC		0xffd21144
-#define AGX_NOC_SYS_SCR_USB1_ECC		0xffd21148
-#define AGX_NOC_SYS_SCR_CLK_MGR			0xffd2114c
-#define AGX_NOC_SYS_SCR_IO_MGR			0xffd21154
-#define AGX_NOC_SYS_SCR_RST_MGR			0xffd21158
-#define AGX_NOC_SYS_SCR_SYS_MGR			0xffd2115c
-#define AGX_NOC_SYS_SCR_OSC0_TIMER		0xffd21160
-#define AGX_NOC_SYS_SCR_OSC1_TIMER		0xffd21164
-#define AGX_NOC_SYS_SCR_WATCHDOG0		0xffd21168
-#define AGX_NOC_SYS_SCR_WATCHDOG1		0xffd2116c
-#define AGX_NOC_SYS_SCR_WATCHDOG2		0xffd21170
-#define AGX_NOC_SYS_SCR_WATCHDOG3		0xffd21174
-#define AGX_NOC_SYS_SCR_DAP			0xffd21178
-#define AGX_NOC_SYS_SCR_L4_NOC_PROBES		0xffd21190
-#define AGX_NOC_SYS_SCR_L4_NOC_QOS		0xffd21194
-
-#define AGX_CCU_NOC_BRIDGE_CPU0_RAM		0xf7004688
-#define AGX_CCU_NOC_BRIDGE_IOM_RAM		0xf7004688
-
-#endif
diff --git a/plat/intel/soc/agilex/include/socfpga_plat_def.h b/plat/intel/soc/agilex/include/socfpga_plat_def.h
index 499684d..b216ab1 100644
--- a/plat/intel/soc/agilex/include/socfpga_plat_def.h
+++ b/plat/intel/soc/agilex/include/socfpga_plat_def.h
@@ -20,6 +20,7 @@
 
 /* Register Mapping */
 #define SOCFPGA_CCU_NOC_REG_BASE		0xf7000000
+#define SOCFPGA_F2SDRAMMGR_REG_BASE		U(0xf8024000)
 
 #define SOCFPGA_MMC_REG_BASE			0xff808000
 
diff --git a/plat/intel/soc/agilex/platform.mk b/plat/intel/soc/agilex/platform.mk
index 0e5f911..ccb4e07 100644
--- a/plat/intel/soc/agilex/platform.mk
+++ b/plat/intel/soc/agilex/platform.mk
@@ -56,6 +56,10 @@
 		plat/intel/soc/common/drivers/qspi/cadence_qspi.c	\
 		plat/intel/soc/common/drivers/wdt/watchdog.c
 
+include lib/zlib/zlib.mk
+PLAT_INCLUDES	+=	-Ilib/zlib
+BL2_SOURCES	+=	$(ZLIB_SOURCES)
+
 BL31_SOURCES	+=	\
 		drivers/arm/cci/cci.c					\
 		lib/cpus/aarch64/aem_generic.S				\
@@ -65,6 +69,7 @@
 		plat/intel/soc/agilex/soc/agilex_clock_manager.c	\
 		plat/intel/soc/common/socfpga_psci.c			\
 		plat/intel/soc/common/socfpga_sip_svc.c			\
+		plat/intel/soc/common/socfpga_sip_svc_v2.c		\
 		plat/intel/soc/common/socfpga_topology.c		\
 		plat/intel/soc/common/sip/socfpga_sip_ecc.c		\
 		plat/intel/soc/common/sip/socfpga_sip_fcs.c		\
diff --git a/plat/intel/soc/common/include/socfpga_f2sdram_manager.h b/plat/intel/soc/common/include/socfpga_f2sdram_manager.h
new file mode 100644
index 0000000..82bb6cb
--- /dev/null
+++ b/plat/intel/soc/common/include/socfpga_f2sdram_manager.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SOCFPGA_F2SDRAMMANAGER_H
+#define SOCFPGA_F2SDRAMMANAGER_H
+
+#include "socfpga_plat_def.h"
+
+/* FPGA2SDRAM Register Map */
+#define SOCFPGA_F2SDRAMMGR_SIDEBANDMGR_FLAGINSTATUS0	0x14
+#define SOCFPGA_F2SDRAMMGR_SIDEBANDMGR_FLAGOUTCLR0	0x54
+#define SOCFPGA_F2SDRAMMGR_SIDEBANDMGR_FLAGOUTSET0	0x50
+
+#define FLAGOUTSETCLR_F2SDRAM0_ENABLE		(BIT(1))
+#define FLAGOUTSETCLR_F2SDRAM1_ENABLE		(BIT(4))
+#define FLAGOUTSETCLR_F2SDRAM2_ENABLE		(BIT(7))
+
+#define FLAGOUTSETCLR_F2SDRAM0_IDLEREQ		(BIT(0))
+#define FLAGOUTSETCLR_F2SDRAM1_IDLEREQ		(BIT(3))
+#define FLAGOUTSETCLR_F2SDRAM2_IDLEREQ		(BIT(6))
+#define FLAGINTSTATUS_F2SDRAM0_IDLEACK		(BIT(1))
+#define FLAGINTSTATUS_F2SDRAM1_IDLEACK		(BIT(5))
+#define FLAGINTSTATUS_F2SDRAM2_IDLEACK		(BIT(9))
+#define FLAGOUTSETCLR_F2SDRAM0_FORCE_DRAIN	(BIT(2))
+#define FLAGOUTSETCLR_F2SDRAM1_FORCE_DRAIN	(BIT(5))
+#define FLAGOUTSETCLR_F2SDRAM2_FORCE_DRAIN	(BIT(8))
+
+#define FLAGINTSTATUS_F2SOC_RESPEMPTY		(BIT(3))
+#define FLAGINTSTATUS_F2SDRAM0_RESPEMPTY	(BIT(3))
+#define FLAGINTSTATUS_F2SDRAM1_RESPEMPTY	(BIT(7))
+#define FLAGINTSTATUS_F2SDRAM2_RESPEMPTY	(BIT(11))
+
+#define SOCFPGA_F2SDRAMMGR(_reg)	(SOCFPGA_F2SDRAMMGR_REG_BASE \
+						+ (SOCFPGA_F2SDRAMMGR_##_reg))
+
+#endif /* SOCFPGA_F2SDRAMMGR_H */
diff --git a/plat/intel/soc/common/include/socfpga_fcs.h b/plat/intel/soc/common/include/socfpga_fcs.h
index d3b7141..893551d 100644
--- a/plat/intel/soc/common/include/socfpga_fcs.h
+++ b/plat/intel/soc/common/include/socfpga_fcs.h
@@ -9,38 +9,300 @@
 
 /* FCS Definitions */
 
-#define FCS_RANDOM_WORD_SIZE		8U
-#define FCS_PROV_DATA_WORD_SIZE		44U
-#define FCS_SHA384_WORD_SIZE		12U
+#define FCS_RANDOM_WORD_SIZE					8U
+#define FCS_PROV_DATA_WORD_SIZE					44U
+#define FCS_SHA384_WORD_SIZE					12U
 
-#define FCS_RANDOM_BYTE_SIZE		(FCS_RANDOM_WORD_SIZE * 4U)
-#define FCS_PROV_DATA_BYTE_SIZE		(FCS_PROV_DATA_WORD_SIZE * 4U)
-#define FCS_SHA384_BYTE_SIZE		(FCS_SHA384_WORD_SIZE * 4U)
+#define FCS_RANDOM_BYTE_SIZE					(FCS_RANDOM_WORD_SIZE * 4U)
+#define FCS_RANDOM_EXT_MAX_WORD_SIZE				1020U
+#define FCS_PROV_DATA_BYTE_SIZE					(FCS_PROV_DATA_WORD_SIZE * 4U)
+#define FCS_SHA384_BYTE_SIZE					(FCS_SHA384_WORD_SIZE * 4U)
 
-#define FCS_CRYPTION_DATA_0		0x10100
+#define FCS_RANDOM_EXT_OFFSET					3
 
+#define FCS_MODE_DECRYPT					0x0
+#define FCS_MODE_ENCRYPT					0x1
+#define FCS_ENCRYPTION_DATA_0					0x10100
+#define FCS_DECRYPTION_DATA_0					0x10102
+#define FCS_OWNER_ID_OFFSET					0xC
+#define FCS_CRYPTION_CRYPTO_HEADER				0x07000000
+#define FCS_CRYPTION_RESP_WORD_SIZE				4U
+#define FCS_CRYPTION_RESP_SIZE_OFFSET				3U
+
+#define PSGSIGMA_TEARDOWN_MAGIC					0xB852E2A4
+#define	PSGSIGMA_SESSION_ID_ONE					0x1
+#define PSGSIGMA_UNKNOWN_SESSION				0xFFFFFFFF
+
+#define	RESERVED_AS_ZERO					0x0
+/* FCS Single cert */
+
+#define FCS_BIG_CNTR_SEL					0x1
+
+#define FCS_SVN_CNTR_0_SEL					0x2
+#define FCS_SVN_CNTR_1_SEL					0x3
+#define FCS_SVN_CNTR_2_SEL					0x4
+#define FCS_SVN_CNTR_3_SEL					0x5
+
+#define FCS_BIG_CNTR_VAL_MAX					495U
+#define FCS_SVN_CNTR_VAL_MAX					64U
+
+/* FCS Attestation Cert Request Parameter */
+
+#define FCS_ATTEST_FIRMWARE_CERT				0x01
+#define FCS_ATTEST_DEV_ID_SELF_SIGN_CERT			0x02
+#define FCS_ATTEST_DEV_ID_ENROLL_CERT				0x04
+#define FCS_ATTEST_ENROLL_SELF_SIGN_CERT			0x08
+#define FCS_ATTEST_ALIAS_CERT					0x10
+#define FCS_ATTEST_CERT_MAX_REQ_PARAM				0xFF
+
+/* FCS Crypto Service */
+
+#define FCS_CS_KEY_OBJ_MAX_WORD_SIZE				88U
+#define FCS_CS_KEY_INFO_MAX_WORD_SIZE				36U
+#define FCS_CS_KEY_RESP_STATUS_MASK				0xFF
+#define FCS_CS_KEY_RESP_STATUS_OFFSET				16U
+
+#define FCS_CS_FIELD_SIZE_MASK					0xFFFF
+#define FCS_CS_FIELD_FLAG_OFFSET				24
+#define FCS_CS_FIELD_FLAG_INIT					BIT(0)
+#define FCS_CS_FIELD_FLAG_UPDATE				BIT(1)
+#define FCS_CS_FIELD_FLAG_FINALIZE				BIT(2)
+
+#define FCS_AES_MAX_DATA_SIZE					0x10000000	/* 256 MB */
+#define FCS_AES_MIN_DATA_SIZE					0x20		/* 32 Byte */
+#define FCS_AES_CMD_MAX_WORD_SIZE				15U
+
+#define FCS_GET_DIGEST_CMD_MAX_WORD_SIZE			7U
+#define FCS_GET_DIGEST_RESP_MAX_WORD_SIZE			19U
+#define FCS_MAC_VERIFY_CMD_MAX_WORD_SIZE			23U
+#define FCS_MAC_VERIFY_RESP_MAX_WORD_SIZE			4U
+#define FCS_SHA_HMAC_CRYPTO_PARAM_SIZE_OFFSET			8U
+
+#define FCS_ECDSA_GET_PUBKEY_MAX_WORD_SIZE			5U
+#define FCS_ECDSA_SHA2_DATA_SIGN_CMD_MAX_WORD_SIZE		7U
+#define FCS_ECDSA_SHA2_DATA_SIG_VERIFY_CMD_MAX_WORD_SIZE	43U
+#define FCS_ECDSA_HASH_SIGN_CMD_MAX_WORD_SIZE			17U
+#define FCS_ECDSA_HASH_SIG_VERIFY_CMD_MAX_WORD_SIZE		52U
+#define FCS_ECDH_REQUEST_CMD_MAX_WORD_SIZE			29U
 /* FCS Payload Structure */
+typedef struct fcs_rng_payload_t {
+	uint32_t session_id;
+	uint32_t context_id;
+	uint32_t crypto_header;
+	uint32_t size;
+} fcs_rng_payload;
 
-typedef struct fcs_crypt_payload_t {
+typedef struct fcs_encrypt_payload_t {
 	uint32_t first_word;
 	uint32_t src_addr;
 	uint32_t src_size;
 	uint32_t dst_addr;
 	uint32_t dst_size;
-} fcs_crypt_payload;
+} fcs_encrypt_payload;
+
+typedef struct fcs_decrypt_payload_t {
+	uint32_t first_word;
+	uint32_t owner_id[2];
+	uint32_t src_addr;
+	uint32_t src_size;
+	uint32_t dst_addr;
+	uint32_t dst_size;
+} fcs_decrypt_payload;
+
+typedef struct fcs_encrypt_ext_payload_t {
+	uint32_t session_id;
+	uint32_t context_id;
+	uint32_t crypto_header;
+	uint32_t src_addr;
+	uint32_t src_size;
+	uint32_t dst_addr;
+	uint32_t dst_size;
+} fcs_encrypt_ext_payload;
+
+typedef struct fcs_decrypt_ext_payload_t {
+	uint32_t session_id;
+	uint32_t context_id;
+	uint32_t crypto_header;
+	uint32_t owner_id[2];
+	uint32_t src_addr;
+	uint32_t src_size;
+	uint32_t dst_addr;
+	uint32_t dst_size;
+} fcs_decrypt_ext_payload;
+
+typedef struct psgsigma_teardown_msg_t {
+	uint32_t reserved_word;
+	uint32_t magic_word;
+	uint32_t session_id;
+} psgsigma_teardown_msg;
+
+typedef struct fcs_cntr_set_preauth_payload_t {
+	uint32_t first_word;
+	uint32_t counter_value;
+} fcs_cntr_set_preauth_payload;
+
+typedef struct fcs_cs_key_payload_t {
+	uint32_t session_id;
+	uint32_t reserved0;
+	uint32_t reserved1;
+	uint32_t key_id;
+} fcs_cs_key_payload;
+
+typedef struct fcs_crypto_service_data_t {
+	uint32_t session_id;
+	uint32_t context_id;
+	uint32_t key_id;
+	uint32_t crypto_param_size;
+	uint64_t crypto_param;
+	uint8_t is_updated;
+} fcs_crypto_service_data;
+
+typedef struct fcs_crypto_service_aes_data_t {
+	uint32_t session_id;
+	uint32_t context_id;
+	uint32_t param_size;
+	uint32_t key_id;
+	uint32_t crypto_param[7];
+	uint8_t is_updated;
+} fcs_crypto_service_aes_data;
 
 /* Functions Definitions */
 
 uint32_t intel_fcs_random_number_gen(uint64_t addr, uint64_t *ret_size,
 				uint32_t *mbox_error);
+int intel_fcs_random_number_gen_ext(uint32_t session_id, uint32_t context_id,
+				uint32_t size, uint32_t *send_id);
 uint32_t intel_fcs_send_cert(uint64_t addr, uint64_t size,
 				uint32_t *send_id);
 uint32_t intel_fcs_get_provision_data(uint32_t *send_id);
-uint32_t intel_fcs_cryption(uint32_t mode, uint32_t src_addr,
-			uint32_t src_size, uint32_t dst_addr,
-			uint32_t dst_size, uint32_t *send_id);
+uint32_t intel_fcs_cntr_set_preauth(uint8_t counter_type,
+				int32_t counter_value,
+				uint32_t test_bit,
+				uint32_t *mbox_error);
+uint32_t intel_fcs_encryption(uint32_t src_addr, uint32_t src_size,
+				uint32_t dst_addr, uint32_t dst_size,
+				uint32_t *send_id);
 
+uint32_t intel_fcs_decryption(uint32_t src_addr, uint32_t src_size,
+				uint32_t dst_addr, uint32_t dst_size,
+				uint32_t *send_id);
+
+int intel_fcs_encryption_ext(uint32_t session_id, uint32_t context_id,
+				uint32_t src_addr, uint32_t src_size,
+				uint32_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error);
+int intel_fcs_decryption_ext(uint32_t sesion_id, uint32_t context_id,
+				uint32_t src_addr, uint32_t src_size,
+				uint32_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error);
+
+int intel_fcs_sigma_teardown(uint32_t session_id, uint32_t *mbox_error);
+int intel_fcs_chip_id(uint32_t *id_low, uint32_t *id_high, uint32_t *mbox_error);
+int intel_fcs_attestation_subkey(uint64_t src_addr, uint32_t src_size,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error);
+int intel_fcs_get_measurement(uint64_t src_addr, uint32_t src_size,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error);
 uint32_t intel_fcs_get_rom_patch_sha384(uint64_t addr, uint64_t *ret_size,
 				uint32_t *mbox_error);
 
+int intel_fcs_create_cert_on_reload(uint32_t cert_request,
+				uint32_t *mbox_error);
+int intel_fcs_get_attestation_cert(uint32_t cert_request, uint64_t dst_addr,
+				uint32_t *dst_size, uint32_t *mbox_error);
+
+int intel_fcs_open_crypto_service_session(uint32_t *session_id,
+				uint32_t *mbox_error);
+int intel_fcs_close_crypto_service_session(uint32_t session_id,
+				uint32_t *mbox_error);
+
+int intel_fcs_import_crypto_service_key(uint64_t src_addr, uint32_t src_size,
+				uint32_t *mbox_error);
+int intel_fcs_export_crypto_service_key(uint32_t session_id, uint32_t key_id,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error);
+int intel_fcs_remove_crypto_service_key(uint32_t session_id, uint32_t key_id,
+				uint32_t *mbox_error);
+int intel_fcs_get_crypto_service_key_info(uint32_t session_id, uint32_t key_id,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error);
+
+int intel_fcs_get_digest_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error);
+int intel_fcs_get_digest_update_finalize(uint32_t session_id, uint32_t context_id,
+				uint32_t src_addr, uint32_t src_size,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint8_t is_finalised, uint32_t *mbox_error);
+
+int intel_fcs_mac_verify_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error);
+int intel_fcs_mac_verify_update_finalize(uint32_t session_id, uint32_t context_id,
+				uint32_t src_addr, uint32_t src_size,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t data_size, uint8_t is_finalised,
+				uint32_t *mbox_error);
+
+int intel_fcs_ecdsa_hash_sign_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error);
+int intel_fcs_ecdsa_hash_sign_finalize(uint32_t session_id, uint32_t context_id,
+				uint32_t src_addr, uint32_t src_size,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error);
+
+int intel_fcs_ecdsa_hash_sig_verify_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error);
+int intel_fcs_ecdsa_hash_sig_verify_finalize(uint32_t session_id, uint32_t context_id,
+				uint32_t src_addr, uint32_t src_size,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error);
+
+int intel_fcs_ecdsa_sha2_data_sign_init(uint32_t session_id,
+				uint32_t context_id, uint32_t key_id,
+				uint32_t param_size, uint64_t param_data,
+				uint32_t *mbox_error);
+int intel_fcs_ecdsa_sha2_data_sign_update_finalize(uint32_t session_id,
+				uint32_t context_id, uint32_t src_addr,
+				uint32_t src_size, uint64_t dst_addr,
+				uint32_t *dst_size, uint8_t is_finalised,
+				uint32_t *mbox_error);
+
+int intel_fcs_ecdsa_sha2_data_sig_verify_init(uint32_t session_id,
+				uint32_t context_id, uint32_t key_id,
+				uint32_t param_size, uint64_t param_data,
+				uint32_t *mbox_error);
+int intel_fcs_ecdsa_sha2_data_sig_verify_update_finalize(uint32_t session_id,
+				uint32_t context_id, uint32_t src_addr,
+				uint32_t src_size, uint64_t dst_addr,
+				uint32_t *dst_size, uint32_t data_size,
+				uint8_t is_finalised, uint32_t *mbox_error);
+
+int intel_fcs_ecdsa_get_pubkey_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error);
+int intel_fcs_ecdsa_get_pubkey_finalize(uint32_t session_id, uint32_t context_id,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error);
+
+int intel_fcs_ecdh_request_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error);
+int intel_fcs_ecdh_request_finalize(uint32_t session_id, uint32_t context_id,
+				uint32_t src_addr, uint32_t src_size,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error);
+
+int intel_fcs_aes_crypt_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint64_t param_addr,
+				uint32_t param_size, uint32_t *mbox_error);
+int intel_fcs_aes_crypt_update_finalize(uint32_t session_id,
+				uint32_t context_id, uint64_t src_addr,
+				uint32_t src_size, uint64_t dst_addr,
+				uint32_t dst_size, uint8_t is_finalised,
+				uint32_t *send_id);
+
 #endif /* SOCFPGA_FCS_H */
diff --git a/plat/intel/soc/common/include/socfpga_mailbox.h b/plat/intel/soc/common/include/socfpga_mailbox.h
index b260a62..1f4b2a4 100644
--- a/plat/intel/soc/common/include/socfpga_mailbox.h
+++ b/plat/intel/soc/common/include/socfpga_mailbox.h
@@ -10,95 +10,124 @@
 #include <lib/utils_def.h>
 
 
-#define MBOX_OFFSET			0xffa30000
+#define MBOX_OFFSET					0xffa30000
 
-#define MBOX_ATF_CLIENT_ID		0x1U
-#define MBOX_MAX_JOB_ID			0xFU
-#define MBOX_MAX_IND_JOB_ID		(MBOX_MAX_JOB_ID - 1U)
-#define MBOX_JOB_ID			MBOX_MAX_JOB_ID
-
+#define MBOX_ATF_CLIENT_ID				0x1U
+#define MBOX_MAX_JOB_ID					0xFU
+#define MBOX_MAX_IND_JOB_ID				(MBOX_MAX_JOB_ID - 1U)
+#define MBOX_JOB_ID					MBOX_MAX_JOB_ID
+#define MBOX_TEST_BIT					BIT(31)
 
 /* Mailbox Shared Memory Register Map */
-#define MBOX_CIN			0x00
-#define MBOX_ROUT			0x04
-#define MBOX_URG			0x08
-#define MBOX_INT			0x0C
-#define MBOX_COUT			0x20
-#define MBOX_RIN			0x24
-#define MBOX_STATUS			0x2C
-#define MBOX_CMD_BUFFER			0x40
-#define MBOX_RESP_BUFFER		0xC0
+#define MBOX_CIN					0x00
+#define MBOX_ROUT					0x04
+#define MBOX_URG					0x08
+#define MBOX_INT					0x0C
+#define MBOX_COUT					0x20
+#define MBOX_RIN					0x24
+#define MBOX_STATUS					0x2C
+#define MBOX_CMD_BUFFER					0x40
+#define MBOX_RESP_BUFFER				0xC0
 
 /* Mailbox SDM doorbell */
-#define MBOX_DOORBELL_TO_SDM		0x400
-#define MBOX_DOORBELL_FROM_SDM		0x480
+#define MBOX_DOORBELL_TO_SDM				0x400
+#define MBOX_DOORBELL_FROM_SDM				0x480
 
 
 /* Mailbox commands */
 
-#define MBOX_CMD_NOOP			0x00
-#define MBOX_CMD_SYNC			0x01
-#define MBOX_CMD_RESTART		0x02
-#define MBOX_CMD_CANCEL			0x03
-#define MBOX_CMD_VAB_SRC_CERT		0x0B
-#define MBOX_CMD_GET_IDCODE		0x10
-#define MBOX_CMD_GET_USERCODE		0x13
-#define MBOX_CMD_REBOOT_HPS		0x47
+#define MBOX_CMD_NOOP					0x00
+#define MBOX_CMD_SYNC					0x01
+#define MBOX_CMD_RESTART				0x02
+#define MBOX_CMD_CANCEL					0x03
+#define MBOX_CMD_VAB_SRC_CERT				0x0B
+#define MBOX_CMD_GET_IDCODE				0x10
+#define MBOX_CMD_GET_USERCODE				0x13
+#define MBOX_CMD_GET_CHIPID				0x12
+#define MBOX_CMD_REBOOT_HPS				0x47
 
 /* Reconfiguration Commands */
-#define MBOX_CONFIG_STATUS		0x04
-#define MBOX_RECONFIG			0x06
-#define MBOX_RECONFIG_DATA		0x08
-#define MBOX_RECONFIG_STATUS		0x09
+#define MBOX_CONFIG_STATUS				0x04
+#define MBOX_RECONFIG					0x06
+#define MBOX_RECONFIG_DATA				0x08
+#define MBOX_RECONFIG_STATUS				0x09
 
 /* HWMON Commands */
-#define MBOX_HWMON_READVOLT		0x18
-#define MBOX_HWMON_READTEMP		0x19
+#define MBOX_HWMON_READVOLT				0x18
+#define MBOX_HWMON_READTEMP				0x19
 
 
 /* QSPI Commands */
-#define MBOX_CMD_QSPI_OPEN		0x32
-#define MBOX_CMD_QSPI_CLOSE		0x33
-#define MBOX_CMD_QSPI_SET_CS		0x34
-#define MBOX_CMD_QSPI_DIRECT		0x3B
+#define MBOX_CMD_QSPI_OPEN				0x32
+#define MBOX_CMD_QSPI_CLOSE				0x33
+#define MBOX_CMD_QSPI_SET_CS				0x34
+#define MBOX_CMD_QSPI_DIRECT				0x3B
 
 /* RSU Commands */
-#define MBOX_GET_SUBPARTITION_TABLE	0x5A
-#define MBOX_RSU_STATUS			0x5B
-#define MBOX_RSU_UPDATE			0x5C
-#define MBOX_HPS_STAGE_NOTIFY		0x5D
+#define MBOX_GET_SUBPARTITION_TABLE			0x5A
+#define MBOX_RSU_STATUS					0x5B
+#define MBOX_RSU_UPDATE					0x5C
+#define MBOX_HPS_STAGE_NOTIFY				0x5D
 
 /* FCS Command */
-#define MBOX_FCS_GET_PROVISION			0x7B
-#define MBOX_FCS_ENCRYPT_REQ			0x7E
-#define MBOX_FCS_DECRYPT_REQ			0x7F
-#define MBOX_FCS_RANDOM_GEN			0x80
+#define MBOX_FCS_GET_PROVISION				0x7B
+#define MBOX_FCS_CNTR_SET_PREAUTH			0x7C
+#define MBOX_FCS_ENCRYPT_REQ				0x7E
+#define MBOX_FCS_DECRYPT_REQ				0x7F
+#define MBOX_FCS_RANDOM_GEN				0x80
+#define MBOX_FCS_AES_CRYPT_REQ				0x81
+#define MBOX_FCS_GET_DIGEST_REQ				0x82
+#define MBOX_FCS_MAC_VERIFY_REQ				0x83
+#define MBOX_FCS_ECDSA_HASH_SIGN_REQ			0x84
+#define MBOX_FCS_ECDSA_SHA2_DATA_SIGN_REQ		0x85
+#define MBOX_FCS_ECDSA_HASH_SIG_VERIFY			0x86
+#define MBOX_FCS_ECDSA_SHA2_DATA_SIGN_VERIFY		0x87
+#define MBOX_FCS_ECDSA_GET_PUBKEY			0x88
+#define MBOX_FCS_ECDH_REQUEST				0x89
+#define MBOX_FCS_OPEN_CS_SESSION			0xA0
+#define MBOX_FCS_CLOSE_CS_SESSION			0xA1
+#define MBOX_FCS_IMPORT_CS_KEY				0xA5
+#define MBOX_FCS_EXPORT_CS_KEY				0xA6
+#define MBOX_FCS_REMOVE_CS_KEY				0xA7
+#define MBOX_FCS_GET_CS_KEY_INFO			0xA8
+
+/* PSG SIGMA Commands */
+#define MBOX_PSG_SIGMA_TEARDOWN				0xD5
+
+/* Attestation Commands */
+#define MBOX_CREATE_CERT_ON_RELOAD			0x180
+#define MBOX_GET_ATTESTATION_CERT			0x181
+#define MBOX_ATTESTATION_SUBKEY				0x182
+#define MBOX_GET_MEASUREMENT				0x183
+
 /* Miscellaneous commands */
 #define MBOX_GET_ROM_PATCH_SHA384	0x1B0
 
 /* Mailbox Definitions */
 
-#define CMD_DIRECT			0
-#define CMD_INDIRECT			1
-#define CMD_CASUAL			0
-#define CMD_URGENT			1
+#define CMD_DIRECT					0
+#define CMD_INDIRECT					1
+#define CMD_CASUAL					0
+#define CMD_URGENT					1
 
-#define MBOX_WORD_BYTE			4U
-#define MBOX_RESP_BUFFER_SIZE		16
-#define MBOX_CMD_BUFFER_SIZE		32
+#define MBOX_WORD_BYTE					4U
+#define MBOX_RESP_BUFFER_SIZE				16
+#define MBOX_CMD_BUFFER_SIZE				32
+#define MBOX_INC_HEADER_MAX_WORD_SIZE			1024U
 
 /* Execution states for HPS_STAGE_NOTIFY */
-#define HPS_EXECUTION_STATE_FSBL	0
-#define HPS_EXECUTION_STATE_SSBL	1
-#define HPS_EXECUTION_STATE_OS		2
+#define HPS_EXECUTION_STATE_FSBL			0
+#define HPS_EXECUTION_STATE_SSBL			1
+#define HPS_EXECUTION_STATE_OS				2
 
 /* Status Response */
-#define MBOX_RET_OK			0
-#define MBOX_RET_ERROR			-1
-#define MBOX_NO_RESPONSE		-2
-#define MBOX_WRONG_ID			-3
-#define MBOX_BUFFER_FULL		-4
-#define MBOX_TIMEOUT			-2047
+#define MBOX_RET_OK					0
+#define MBOX_RET_ERROR					-1
+#define MBOX_NO_RESPONSE				-2
+#define MBOX_WRONG_ID					-3
+#define MBOX_BUFFER_FULL				-4
+#define MBOX_BUSY					-5
+#define MBOX_TIMEOUT					-2047
 
 /* Reconfig Status Response */
 #define RECONFIG_STATUS_STATE				0
@@ -123,39 +152,56 @@
 
 /* Mailbox Macros */
 
-#define MBOX_ENTRY_TO_ADDR(_buf, ptr)	(MBOX_OFFSET + (MBOX_##_buf##_BUFFER) \
-						+ MBOX_WORD_BYTE * (ptr))
+#define MBOX_ENTRY_TO_ADDR(_buf, ptr)			(MBOX_OFFSET + (MBOX_##_buf##_BUFFER) \
+								+ MBOX_WORD_BYTE * (ptr))
 
 /* Mailbox interrupt flags and masks */
-#define MBOX_INT_FLAG_COE		0x1
-#define MBOX_INT_FLAG_RIE		0x2
-#define MBOX_INT_FLAG_UAE		0x100
-#define MBOX_COE_BIT(INTERRUPT)		((INTERRUPT) & 0x3)
-#define MBOX_UAE_BIT(INTERRUPT)		(((INTERRUPT) & (1<<8)))
+#define MBOX_INT_FLAG_COE				0x1
+#define MBOX_INT_FLAG_RIE				0x2
+#define MBOX_INT_FLAG_UAE				0x100
+#define MBOX_COE_BIT(INTERRUPT)				((INTERRUPT) & 0x3)
+#define MBOX_UAE_BIT(INTERRUPT)				(((INTERRUPT) & (1<<8)))
 
 /* Mailbox response and status */
-#define MBOX_RESP_ERR(BUFFER)		((BUFFER) & 0x00000fff)
-#define MBOX_RESP_LEN(BUFFER)		(((BUFFER) & 0x007ff000) >> 12)
-#define MBOX_RESP_CLIENT_ID(BUFFER)	(((BUFFER) & 0xf0000000) >> 28)
-#define MBOX_RESP_JOB_ID(BUFFER)	(((BUFFER) & 0x0f000000) >> 24)
-#define MBOX_STATUS_UA_MASK		(1<<8)
+#define MBOX_RESP_ERR(BUFFER)				((BUFFER) & 0x000007ff)
+#define MBOX_RESP_LEN(BUFFER)				(((BUFFER) & 0x007ff000) >> 12)
+#define MBOX_RESP_CLIENT_ID(BUFFER)			(((BUFFER) & 0xf0000000) >> 28)
+#define MBOX_RESP_JOB_ID(BUFFER)			(((BUFFER) & 0x0f000000) >> 24)
+#define MBOX_STATUS_UA_MASK				(1<<8)
 
 /* Mailbox command and response */
-#define MBOX_CLIENT_ID_CMD(CLIENT_ID)	((CLIENT_ID) << 28)
-#define MBOX_JOB_ID_CMD(JOB_ID)		(JOB_ID<<24)
-#define MBOX_CMD_LEN_CMD(CMD_LEN)	((CMD_LEN) << 12)
-#define MBOX_INDIRECT(val)		((val) << 11)
-#define MBOX_CMD_MASK(header)		((header) & 0x7ff)
+#define MBOX_CLIENT_ID_CMD(CLIENT_ID)			((CLIENT_ID) << 28)
+#define MBOX_JOB_ID_CMD(JOB_ID)				(JOB_ID<<24)
+#define MBOX_CMD_LEN_CMD(CMD_LEN)			((CMD_LEN) << 12)
+#define MBOX_INDIRECT(val)				((val) << 11)
+#define MBOX_CMD_MASK(header)				((header) & 0x7ff)
+
+/* Mailbox payload */
+#define MBOX_DATA_MAX_LEN				0x3ff
+#define MBOX_PAYLOAD_FLAG_BUSY				BIT(0)
 
 /* RSU Macros */
-#define RSU_VERSION_ACMF		BIT(8)
-#define RSU_VERSION_ACMF_MASK		0xff00
+#define RSU_VERSION_ACMF				BIT(8)
+#define RSU_VERSION_ACMF_MASK				0xff00
 
 /* Config Status Macros */
 #define CONFIG_STATUS_WORD_SIZE		16U
 #define CONFIG_STATUS_FW_VER_OFFSET	1
 #define CONFIG_STATUS_FW_VER_MASK	0x00FFFFFF
 
+/* Data structure */
+
+typedef struct mailbox_payload {
+	uint32_t header;
+	uint32_t data[MBOX_DATA_MAX_LEN];
+} mailbox_payload_t;
+
+typedef struct mailbox_container {
+	uint32_t flag;
+	uint32_t index;
+	mailbox_payload_t *payload;
+} mailbox_container_t;
+
 /* Mailbox Function Definitions */
 
 void mailbox_set_int(uint32_t interrupt_input);
@@ -168,8 +214,13 @@
 			unsigned int *resp_len);
 int mailbox_send_cmd_async(uint32_t *job_id, uint32_t cmd, uint32_t *args,
 			unsigned int len, unsigned int indirect);
+int mailbox_send_cmd_async_ext(uint32_t header_cmd, uint32_t *args,
+			unsigned int len);
 int mailbox_read_response(uint32_t *job_id, uint32_t *response,
 			unsigned int *resp_len);
+int mailbox_read_response_async(uint32_t *job_id, uint32_t *header,
+			uint32_t *response, unsigned int *resp_len,
+			uint8_t ignore_client_id);
 int iterate_resp(uint32_t mbox_resp_len, uint32_t *resp_buf,
 			unsigned int *resp_len);
 
diff --git a/plat/intel/soc/common/include/socfpga_reset_manager.h b/plat/intel/soc/common/include/socfpga_reset_manager.h
index a976df7..cce16ab 100644
--- a/plat/intel/soc/common/include/socfpga_reset_manager.h
+++ b/plat/intel/soc/common/include/socfpga_reset_manager.h
@@ -9,11 +9,22 @@
 
 #include "socfpga_plat_def.h"
 
+#define SOCFPGA_BRIDGE_ENABLE			BIT(0)
+#define SOCFPGA_BRIDGE_HAS_MASK			BIT(1)
+
+#define SOC2FPGA_MASK				(1<<0)
+#define LWHPS2FPGA_MASK				(1<<1)
+#define FPGA2SOC_MASK				(1<<2)
+#define F2SDRAM0_MASK				(1<<3)
+#define F2SDRAM1_MASK				(1<<4)
+#define F2SDRAM2_MASK				(1<<5)
 
 /* Register Mapping */
 
 #define SOCFPGA_RSTMGR_STAT			0x000
 #define SOCFPGA_RSTMGR_HDSKEN			0x010
+#define SOCFPGA_RSTMGR_HDSKREQ			0x014
+#define SOCFPGA_RSTMGR_HDSKACK			0x018
 #define SOCFPGA_RSTMGR_MPUMODRST		0x020
 #define SOCFPGA_RSTMGR_PER0MODRST		0x024
 #define SOCFPGA_RSTMGR_PER1MODRST		0x028
@@ -78,14 +89,20 @@
 #define RSTMGR_HDSKEN_DEBUG_L3NOC		0x00020000
 #define RSTMGR_HDSKEN_SDRSELFREFEN		0x00000001
 
+#define RSTMGR_HDSKEQ_FPGAHSREQ			0x4
+
 #define RSTMGR_BRGMODRST_SOC2FPGA		0x1
 #define RSTMGR_BRGMODRST_LWHPS2FPGA		0x2
 #define RSTMGR_BRGMODRST_FPGA2SOC		0x4
+#define RSTMGR_BRGMODRST_F2SSDRAM0		0x8
 #define RSTMGR_BRGMODRST_F2SSDRAM1		0x10
 #define RSTMGR_BRGMODRST_F2SSDRAM2		0x20
 #define RSTMGR_BRGMODRST_MPFE			0x40
 #define RSTMGR_BRGMODRST_DDRSCH			0x40
 
+#define RSTMGR_HDSKREQ_FPGAHSREQ		(BIT(2))
+#define RSTMGR_HDSKACK_FPGAHSACK_MASK		(BIT(2))
+
 /* Definitions */
 
 #define RSTMGR_L2_MODRST			0x0100
@@ -94,7 +111,7 @@
 /* Macros */
 
 #define SOCFPGA_RSTMGR(_reg)		(SOCFPGA_RSTMGR_REG_BASE \
-						+ (SOCFPGA_RSTMGR_##_reg))
+					+ (SOCFPGA_RSTMGR_##_reg))
 #define RSTMGR_FIELD(_reg, _field)	(RSTMGR_##_reg##MODRST_##_field)
 
 /* Function Declarations */
@@ -102,7 +119,7 @@
 void deassert_peripheral_reset(void);
 void config_hps_hs_before_warm_reset(void);
 
-int socfpga_bridges_enable(void);
-int socfpga_bridges_disable(void);
+int socfpga_bridges_enable(uint32_t mask);
+int socfpga_bridges_disable(uint32_t mask);
 
 #endif /* SOCFPGA_RESETMANAGER_H */
diff --git a/plat/intel/soc/common/include/socfpga_sip_svc.h b/plat/intel/soc/common/include/socfpga_sip_svc.h
index 43f3dc4..0803eb5 100644
--- a/plat/intel/soc/common/include/socfpga_sip_svc.h
+++ b/plat/intel/soc/common/include/socfpga_sip_svc.h
@@ -9,29 +9,43 @@
 
 
 /* SiP status response */
-#define INTEL_SIP_SMC_STATUS_OK				0
-#define INTEL_SIP_SMC_STATUS_BUSY			0x1
-#define INTEL_SIP_SMC_STATUS_REJECTED			0x2
-#define INTEL_SIP_SMC_STATUS_ERROR			0x4
-#define INTEL_SIP_SMC_RSU_ERROR				0x7
+#define INTEL_SIP_SMC_STATUS_OK					0
+#define INTEL_SIP_SMC_STATUS_BUSY				0x1
+#define INTEL_SIP_SMC_STATUS_REJECTED				0x2
+#define INTEL_SIP_SMC_STATUS_NO_RESPONSE			0x3
+#define INTEL_SIP_SMC_STATUS_ERROR				0x4
+#define INTEL_SIP_SMC_RSU_ERROR					0x7
 
 /* SiP mailbox error code */
-#define GENERIC_RESPONSE_ERROR				0x3FF
+#define GENERIC_RESPONSE_ERROR					0x3FF
 
-/* SMC SiP service function identifier */
+/* SiP V2 command code range */
+#define INTEL_SIP_SMC_CMD_MASK					0xFFFF
+#define INTEL_SIP_SMC_CMD_V2_RANGE_BEGIN			0x400
+#define INTEL_SIP_SMC_CMD_V2_RANGE_END				0x4FF
+
+/* SiP V2 protocol header */
+#define INTEL_SIP_SMC_HEADER_JOB_ID_MASK			0xF
+#define INTEL_SIP_SMC_HEADER_JOB_ID_OFFSET			0U
+#define INTEL_SIP_SMC_HEADER_CID_MASK				0xF
+#define INTEL_SIP_SMC_HEADER_CID_OFFSET				4U
+#define INTEL_SIP_SMC_HEADER_VERSION_MASK			0xF
+#define INTEL_SIP_SMC_HEADER_VERSION_OFFSET			60U
+
+/* SMC SiP service function identifier for version 1 */
 
 /* FPGA Reconfig */
-#define INTEL_SIP_SMC_FPGA_CONFIG_START			0xC2000001
-#define INTEL_SIP_SMC_FPGA_CONFIG_WRITE			0x42000002
-#define INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE	0xC2000003
-#define INTEL_SIP_SMC_FPGA_CONFIG_ISDONE		0xC2000004
-#define INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM		0xC2000005
+#define INTEL_SIP_SMC_FPGA_CONFIG_START				0xC2000001
+#define INTEL_SIP_SMC_FPGA_CONFIG_WRITE				0x42000002
+#define INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE		0xC2000003
+#define INTEL_SIP_SMC_FPGA_CONFIG_ISDONE			0xC2000004
+#define INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM			0xC2000005
 
 /* FPGA Bitstream Flag */
-#define FLAG_PARTIAL_CONFIG				BIT(0)
-#define FLAG_AUTHENTICATION				BIT(1)
-#define CONFIG_TEST_FLAG(_flag, _type)			(((flag) & FLAG_##_type) \
-							== FLAG_##_type)
+#define FLAG_PARTIAL_CONFIG					BIT(0)
+#define FLAG_AUTHENTICATION					BIT(1)
+#define CONFIG_TEST_FLAG(_flag, _type)				(((flag) & FLAG_##_type) \
+								== FLAG_##_type)
 
 /* Secure Register Access */
 #define INTEL_SIP_SMC_REG_READ				0xC2000007
@@ -39,56 +53,121 @@
 #define INTEL_SIP_SMC_REG_UPDATE			0xC2000009
 
 /* Remote System Update */
-#define INTEL_SIP_SMC_RSU_STATUS			0xC200000B
-#define INTEL_SIP_SMC_RSU_UPDATE			0xC200000C
-#define INTEL_SIP_SMC_RSU_NOTIFY			0xC200000E
-#define INTEL_SIP_SMC_RSU_RETRY_COUNTER			0xC200000F
-#define INTEL_SIP_SMC_RSU_DCMF_VERSION			0xC2000010
-#define INTEL_SIP_SMC_RSU_COPY_DCMF_VERSION		0xC2000011
-#define INTEL_SIP_SMC_RSU_MAX_RETRY			0xC2000012
-#define INTEL_SIP_SMC_RSU_COPY_MAX_RETRY		0xC2000013
-#define INTEL_SIP_SMC_RSU_DCMF_STATUS			0xC2000014
-#define INTEL_SIP_SMC_RSU_COPY_DCMF_STATUS		0xC2000015
+#define INTEL_SIP_SMC_RSU_STATUS				0xC200000B
+#define INTEL_SIP_SMC_RSU_UPDATE				0xC200000C
+#define INTEL_SIP_SMC_RSU_NOTIFY				0xC200000E
+#define INTEL_SIP_SMC_RSU_RETRY_COUNTER				0xC200000F
+#define INTEL_SIP_SMC_RSU_DCMF_VERSION				0xC2000010
+#define INTEL_SIP_SMC_RSU_COPY_DCMF_VERSION			0xC2000011
+#define INTEL_SIP_SMC_RSU_MAX_RETRY				0xC2000012
+#define INTEL_SIP_SMC_RSU_COPY_MAX_RETRY			0xC2000013
+#define INTEL_SIP_SMC_RSU_DCMF_STATUS				0xC2000014
+#define INTEL_SIP_SMC_RSU_COPY_DCMF_STATUS			0xC2000015
 
 /* Hardware monitor */
-#define INTEL_SIP_SMC_HWMON_READTEMP			0xC2000020
-#define INTEL_SIP_SMC_HWMON_READVOLT			0xC2000021
-#define TEMP_CHANNEL_MAX				(1 << 15)
-#define VOLT_CHANNEL_MAX				(1 << 15)
+#define INTEL_SIP_SMC_HWMON_READTEMP				0xC2000020
+#define INTEL_SIP_SMC_HWMON_READVOLT				0xC2000021
+#define TEMP_CHANNEL_MAX					(1 << 15)
+#define VOLT_CHANNEL_MAX					(1 << 15)
 
 /* ECC */
-#define INTEL_SIP_SMC_ECC_DBE				0xC200000D
+#define INTEL_SIP_SMC_ECC_DBE					0xC200000D
 
 /* Generic Command */
-#define INTEL_SIP_SMC_GET_ROM_PATCH_SHA384		0xC2000040
+#define INTEL_SIP_SMC_SERVICE_COMPLETED				0xC200001E
+#define INTEL_SIP_SMC_FIRMWARE_VERSION				0xC200001F
+#define INTEL_SIP_SMC_HPS_SET_BRIDGES				0xC2000032
+#define INTEL_SIP_SMC_GET_ROM_PATCH_SHA384			0xC2000040
 
-/* Send Mailbox Command */
-#define INTEL_SIP_SMC_MBOX_SEND_CMD			0xC200001E
-#define INTEL_SIP_SMC_FIRMWARE_VERSION			0xC200001F
-#define INTEL_SIP_SMC_HPS_SET_BRIDGES			0xC2000032
+#define SERVICE_COMPLETED_MODE_ASYNC				0x00004F4E
 
 /* Mailbox Command */
-#define INTEL_SIP_SMC_GET_USERCODE			0xC200003D
+#define INTEL_SIP_SMC_MBOX_SEND_CMD				0xC200003C
+#define INTEL_SIP_SMC_GET_USERCODE				0xC200003D
 
-/* SiP Definitions */
+/* FPGA Crypto Services */
+#define INTEL_SIP_SMC_FCS_RANDOM_NUMBER				0xC200005A
+#define INTEL_SIP_SMC_FCS_RANDOM_NUMBER_EXT			0x4200008F
+#define INTEL_SIP_SMC_FCS_CRYPTION				0x4200005B
+#define INTEL_SIP_SMC_FCS_CRYPTION_EXT				0xC2000090
+#define INTEL_SIP_SMC_FCS_SERVICE_REQUEST			0x4200005C
+#define INTEL_SIP_SMC_FCS_SEND_CERTIFICATE			0x4200005D
+#define INTEL_SIP_SMC_FCS_GET_PROVISION_DATA			0x4200005E
+#define INTEL_SIP_SMC_FCS_CNTR_SET_PREAUTH			0xC200005F
+#define INTEL_SIP_SMC_FCS_PSGSIGMA_TEARDOWN			0xC2000064
+#define INTEL_SIP_SMC_FCS_CHIP_ID				0xC2000065
+#define INTEL_SIP_SMC_FCS_ATTESTATION_SUBKEY			0xC2000066
+#define INTEL_SIP_SMC_FCS_ATTESTATION_MEASUREMENTS		0xC2000067
+#define INTEL_SIP_SMC_FCS_GET_ATTESTATION_CERT			0xC2000068
+#define INTEL_SIP_SMC_FCS_CREATE_CERT_ON_RELOAD			0xC2000069
+#define INTEL_SIP_SMC_FCS_OPEN_CS_SESSION			0xC200006E
+#define INTEL_SIP_SMC_FCS_CLOSE_CS_SESSION			0xC200006F
+#define INTEL_SIP_SMC_FCS_IMPORT_CS_KEY				0x42000070
+#define INTEL_SIP_SMC_FCS_EXPORT_CS_KEY				0xC2000071
+#define INTEL_SIP_SMC_FCS_REMOVE_CS_KEY				0xC2000072
+#define INTEL_SIP_SMC_FCS_GET_CS_KEY_INFO			0xC2000073
+#define INTEL_SIP_SMC_FCS_AES_CRYPT_INIT			0xC2000074
+#define INTEL_SIP_SMC_FCS_AES_CRYPT_UPDATE			0x42000075
+#define INTEL_SIP_SMC_FCS_AES_CRYPT_FINALIZE			0x42000076
+#define INTEL_SIP_SMC_FCS_GET_DIGEST_INIT			0xC2000077
+#define INTEL_SIP_SMC_FCS_GET_DIGEST_UPDATE			0xC2000078
+#define INTEL_SIP_SMC_FCS_GET_DIGEST_FINALIZE			0xC2000079
+#define INTEL_SIP_SMC_FCS_MAC_VERIFY_INIT			0xC200007A
+#define INTEL_SIP_SMC_FCS_MAC_VERIFY_UPDATE			0xC200007B
+#define INTEL_SIP_SMC_FCS_MAC_VERIFY_FINALIZE			0xC200007C
+#define INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGN_INIT			0xC200007D
+#define INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGN_FINALIZE		0xC200007F
+#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_INIT		0xC2000080
+#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_UPDATE		0xC2000081
+#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_FINALIZE		0xC2000082
+#define INTEL_SIP_SMC_FCS_ECDSA_HASH_SIG_VERIFY_INIT		0xC2000083
+#define INTEL_SIP_SMC_FCS_ECDSA_HASH_SIG_VERIFY_FINALIZE	0xC2000085
+#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_INIT	0xC2000086
+#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_UPDATE	0xC2000087
+#define INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_FINALIZE	0xC2000088
+#define INTEL_SIP_SMC_FCS_ECDSA_GET_PUBKEY_INIT			0xC2000089
+#define INTEL_SIP_SMC_FCS_ECDSA_GET_PUBKEY_FINALIZE		0xC200008B
+#define INTEL_SIP_SMC_FCS_ECDH_REQUEST_INIT			0xC200008C
+#define INTEL_SIP_SMC_FCS_ECDH_REQUEST_FINALIZE			0xC200008E
+
+#define INTEL_SIP_SMC_FCS_SHA_MODE_MASK				0xF
+#define INTEL_SIP_SMC_FCS_DIGEST_SIZE_MASK			0xF
+#define INTEL_SIP_SMC_FCS_DIGEST_SIZE_OFFSET			4U
+#define INTEL_SIP_SMC_FCS_ECC_ALGO_MASK				0xF
 
 /* ECC DBE */
-#define WARM_RESET_WFI_FLAG				BIT(31)
-#define SYSMGR_ECC_DBE_COLD_RST_MASK			(SYSMGR_ECC_OCRAM_MASK |\
-							SYSMGR_ECC_DDR0_MASK |\
-							SYSMGR_ECC_DDR1_MASK)
+#define WARM_RESET_WFI_FLAG					BIT(31)
+#define SYSMGR_ECC_DBE_COLD_RST_MASK				(SYSMGR_ECC_OCRAM_MASK |\
+								SYSMGR_ECC_DDR0_MASK |\
+								SYSMGR_ECC_DDR1_MASK)
 
 /* Non-mailbox SMC Call */
-#define INTEL_SIP_SMC_SVC_VERSION			0xC2000200
+#define INTEL_SIP_SMC_SVC_VERSION				0xC2000200
+
+/**
+ * SMC SiP service function identifier for version 2
+ * Command code from 0x400 ~ 0x4FF
+ */
+
+/* V2: Non-mailbox function identifier */
+#define INTEL_SIP_SMC_V2_GET_SVC_VERSION			0xC2000400
+#define INTEL_SIP_SMC_V2_REG_READ				0xC2000401
+#define INTEL_SIP_SMC_V2_REG_WRITE				0xC2000402
+#define INTEL_SIP_SMC_V2_REG_UPDATE				0xC2000403
+#define INTEL_SIP_SMC_V2_HPS_SET_BRIDGES			0xC2000404
+
+/* V2: Mailbox function identifier */
+#define INTEL_SIP_SMC_V2_MAILBOX_SEND_COMMAND			0xC2000420
+#define INTEL_SIP_SMC_V2_MAILBOX_POLL_RESPONSE			0xC2000421
 
 /* SMC function IDs for SiP Service queries */
-#define SIP_SVC_CALL_COUNT				0x8200ff00
-#define SIP_SVC_UID					0x8200ff01
-#define SIP_SVC_VERSION					0x8200ff03
+#define SIP_SVC_CALL_COUNT					0x8200ff00
+#define SIP_SVC_UID						0x8200ff01
+#define SIP_SVC_VERSION						0x8200ff03
 
 /* SiP Service Calls version numbers */
-#define SIP_SVC_VERSION_MAJOR				1
-#define SIP_SVC_VERSION_MINOR				0
+#define SIP_SVC_VERSION_MAJOR					1
+#define SIP_SVC_VERSION_MINOR					0
 
 
 /* Structure Definitions */
@@ -101,12 +180,38 @@
 	int block_number;
 };
 
-/* Function Definitions */
+typedef enum {
+	NO_REQUEST = 0,
+	RECONFIGURATION,
+	BITSTREAM_AUTH
+} config_type;
 
+/* Function Definitions */
+bool is_size_4_bytes_aligned(uint32_t size);
 bool is_address_in_ddr_range(uint64_t addr, uint64_t size);
 
 /* ECC DBE */
 bool cold_reset_for_ecc_dbe(void);
 uint32_t intel_ecc_dbe_notification(uint64_t dbe_value);
 
+/* Secure register access */
+uint32_t intel_secure_reg_read(uint64_t reg_addr, uint32_t *retval);
+uint32_t intel_secure_reg_write(uint64_t reg_addr, uint32_t val,
+				uint32_t *retval);
+uint32_t intel_secure_reg_update(uint64_t reg_addr, uint32_t mask,
+				 uint32_t val, uint32_t *retval);
+
+/* Miscellaneous HPS services */
+uint32_t intel_hps_set_bridges(uint64_t enable, uint64_t mask);
+
+/* SiP Service handler for version 2 */
+uintptr_t sip_smc_handler_v2(uint32_t smc_fid,
+			 u_register_t x1,
+			 u_register_t x2,
+			 u_register_t x3,
+			 u_register_t x4,
+			 void *cookie,
+			 void *handle,
+			 u_register_t flags);
+
 #endif /* SOCFPGA_SIP_SVC_H */
diff --git a/plat/intel/soc/common/include/socfpga_system_manager.h b/plat/intel/soc/common/include/socfpga_system_manager.h
index a77734d..7f67313 100644
--- a/plat/intel/soc/common/include/socfpga_system_manager.h
+++ b/plat/intel/soc/common/include/socfpga_system_manager.h
@@ -38,8 +38,8 @@
 #define SYSMGR_SDMMC_DRVSEL(x)			(((x) & 0x7) << 0)
 #define SYSMGR_SDMMC_SMPLSEL(x)			(((x) & 0x7) << 4)
 
-#define IDLE_DATA_LWSOC2FPGA				BIT(0)
-#define IDLE_DATA_SOC2FPGA				BIT(4)
+#define IDLE_DATA_LWSOC2FPGA				BIT(4)
+#define IDLE_DATA_SOC2FPGA				BIT(0)
 #define IDLE_DATA_MASK		(IDLE_DATA_LWSOC2FPGA | IDLE_DATA_SOC2FPGA)
 
 #define SYSMGR_ECC_OCRAM_MASK				BIT(1)
diff --git a/plat/intel/soc/common/sip/socfpga_sip_fcs.c b/plat/intel/soc/common/sip/socfpga_sip_fcs.c
index 85551a4..eacc4dd 100644
--- a/plat/intel/soc/common/sip/socfpga_sip_fcs.c
+++ b/plat/intel/soc/common/sip/socfpga_sip_fcs.c
@@ -11,7 +11,18 @@
 #include "socfpga_mailbox.h"
 #include "socfpga_sip_svc.h"
 
-static bool is_size_4_bytes_aligned(uint32_t size)
+/* FCS static variables */
+static fcs_crypto_service_aes_data fcs_aes_init_payload;
+static fcs_crypto_service_data fcs_sha_get_digest_param;
+static fcs_crypto_service_data fcs_sha_mac_verify_param;
+static fcs_crypto_service_data fcs_ecdsa_hash_sign_param;
+static fcs_crypto_service_data fcs_ecdsa_hash_sig_verify_param;
+static fcs_crypto_service_data fcs_sha2_data_sign_param;
+static fcs_crypto_service_data fcs_sha2_data_sig_verify_param;
+static fcs_crypto_service_data fcs_ecdsa_get_pubkey_param;
+static fcs_crypto_service_data fcs_ecdh_request_param;
+
+bool is_size_4_bytes_aligned(uint32_t size)
 {
 	if ((size % MBOX_WORD_BYTE) != 0U) {
 		return false;
@@ -20,6 +31,53 @@
 	}
 }
 
+static bool is_8_bytes_aligned(uint32_t data)
+{
+	if ((data % (MBOX_WORD_BYTE * 2U)) != 0U) {
+		return false;
+	} else {
+		return true;
+	}
+}
+
+static bool is_32_bytes_aligned(uint32_t data)
+{
+	if ((data % (8U * MBOX_WORD_BYTE)) != 0U) {
+		return false;
+	} else {
+		return true;
+	}
+}
+
+static int intel_fcs_crypto_service_init(uint32_t session_id,
+			uint32_t context_id, uint32_t key_id,
+			uint32_t param_size, uint64_t param_data,
+			fcs_crypto_service_data *data_addr,
+			uint32_t *mbox_error)
+{
+	if (mbox_error == NULL) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (param_size != 4) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	memset(data_addr, 0, sizeof(fcs_crypto_service_data));
+
+	data_addr->session_id = session_id;
+	data_addr->context_id = context_id;
+	data_addr->key_id = key_id;
+	data_addr->crypto_param_size = param_size;
+	data_addr->crypto_param = param_data;
+
+	data_addr->is_updated = 0;
+
+	*mbox_error = 0;
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
 uint32_t intel_fcs_random_number_gen(uint64_t addr, uint64_t *ret_size,
 					uint32_t *mbox_error)
 {
@@ -57,6 +115,45 @@
 	return INTEL_SIP_SMC_STATUS_OK;
 }
 
+int intel_fcs_random_number_gen_ext(uint32_t session_id, uint32_t context_id,
+				uint32_t size, uint32_t *send_id)
+{
+	int status;
+	uint32_t payload_size;
+	uint32_t crypto_header;
+
+	if (size > (FCS_RANDOM_EXT_MAX_WORD_SIZE *
+		MBOX_WORD_BYTE) || size == 0U) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_size_4_bytes_aligned(size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	crypto_header = (FCS_CS_FIELD_FLAG_INIT | FCS_CS_FIELD_FLAG_FINALIZE) <<
+			FCS_CS_FIELD_FLAG_OFFSET;
+
+	fcs_rng_payload payload = {
+		session_id,
+		context_id,
+		crypto_header,
+		size
+	};
+
+	payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+	status = mailbox_send_cmd_async(send_id, MBOX_FCS_RANDOM_GEN,
+					(uint32_t *) &payload, payload_size,
+					CMD_INDIRECT);
+
+	if (status < 0) {
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
 uint32_t intel_fcs_send_cert(uint64_t addr, uint64_t size,
 					uint32_t *send_id)
 {
@@ -74,6 +171,8 @@
 				(uint32_t *)addr, size / MBOX_WORD_BYTE,
 				CMD_DIRECT);
 
+	flush_dcache_range(addr, size);
+
 	if (status < 0) {
 		return INTEL_SIP_SMC_STATUS_ERROR;
 	}
@@ -95,37 +194,78 @@
 	return INTEL_SIP_SMC_STATUS_OK;
 }
 
-uint32_t intel_fcs_cryption(uint32_t mode, uint32_t src_addr,
-		uint32_t src_size, uint32_t dst_addr,
-		uint32_t dst_size, uint32_t *send_id)
+uint32_t intel_fcs_cntr_set_preauth(uint8_t counter_type, int32_t counter_value,
+					uint32_t test_bit, uint32_t *mbox_error)
 {
 	int status;
-	uint32_t cmd;
+	uint32_t first_word;
+	uint32_t payload_size;
 
-	fcs_crypt_payload payload = {
-		FCS_CRYPTION_DATA_0,
+	if ((test_bit != MBOX_TEST_BIT) &&
+		(test_bit != 0)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if ((counter_type < FCS_BIG_CNTR_SEL) ||
+		(counter_type > FCS_SVN_CNTR_3_SEL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if ((counter_type == FCS_BIG_CNTR_SEL) &&
+		(counter_value > FCS_BIG_CNTR_VAL_MAX)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if ((counter_type >= FCS_SVN_CNTR_0_SEL) &&
+		(counter_type <= FCS_SVN_CNTR_3_SEL) &&
+		(counter_value > FCS_SVN_CNTR_VAL_MAX)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	first_word = test_bit | counter_type;
+	fcs_cntr_set_preauth_payload payload = {
+		first_word,
+		counter_value
+	};
+
+	payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+	status =  mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_CNTR_SET_PREAUTH,
+				  (uint32_t *) &payload, payload_size,
+				  CMD_CASUAL, NULL, NULL);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+uint32_t intel_fcs_encryption(uint32_t src_addr, uint32_t src_size,
+		uint32_t dst_addr, uint32_t dst_size, uint32_t *send_id)
+{
+	int status;
+	uint32_t load_size;
+
+	fcs_encrypt_payload payload = {
+		FCS_ENCRYPTION_DATA_0,
 		src_addr,
 		src_size,
 		dst_addr,
 		dst_size };
+	load_size = sizeof(payload) / MBOX_WORD_BYTE;
 
 	if (!is_address_in_ddr_range(src_addr, src_size) ||
 		!is_address_in_ddr_range(dst_addr, dst_size)) {
 		return INTEL_SIP_SMC_STATUS_REJECTED;
 	}
 
-	if (!is_size_4_bytes_aligned(sizeof(fcs_crypt_payload))) {
+	if (!is_size_4_bytes_aligned(src_size)) {
 		return INTEL_SIP_SMC_STATUS_REJECTED;
 	}
 
-	if (mode != 0U) {
-		cmd = MBOX_FCS_ENCRYPT_REQ;
-	} else {
-		cmd = MBOX_FCS_DECRYPT_REQ;
-	}
-
-	status = mailbox_send_cmd_async(send_id, cmd, (uint32_t *) &payload,
-				sizeof(fcs_crypt_payload) / MBOX_WORD_BYTE,
+	status = mailbox_send_cmd_async(send_id, MBOX_FCS_ENCRYPT_REQ,
+				(uint32_t *) &payload, load_size,
 				CMD_INDIRECT);
 	inv_dcache_range(dst_addr, dst_size);
 
@@ -136,6 +276,261 @@
 	return INTEL_SIP_SMC_STATUS_OK;
 }
 
+uint32_t intel_fcs_decryption(uint32_t src_addr, uint32_t src_size,
+		uint32_t dst_addr, uint32_t dst_size, uint32_t *send_id)
+{
+	int status;
+	uint32_t load_size;
+	uintptr_t id_offset;
+
+	id_offset = src_addr + FCS_OWNER_ID_OFFSET;
+	fcs_decrypt_payload payload = {
+		FCS_DECRYPTION_DATA_0,
+		{mmio_read_32(id_offset),
+		mmio_read_32(id_offset + MBOX_WORD_BYTE)},
+		src_addr,
+		src_size,
+		dst_addr,
+		dst_size };
+	load_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_size_4_bytes_aligned(src_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	status = mailbox_send_cmd_async(send_id, MBOX_FCS_DECRYPT_REQ,
+				(uint32_t *) &payload, load_size,
+				CMD_INDIRECT);
+	inv_dcache_range(dst_addr, dst_size);
+
+	if (status < 0) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_encryption_ext(uint32_t session_id, uint32_t context_id,
+		uint32_t src_addr, uint32_t src_size,
+		uint32_t dst_addr, uint32_t *dst_size, uint32_t *mbox_error)
+{
+	int status;
+	uint32_t payload_size;
+	uint32_t resp_len = FCS_CRYPTION_RESP_WORD_SIZE;
+	uint32_t resp_data[FCS_CRYPTION_RESP_WORD_SIZE] = {0U};
+
+	if ((dst_size == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_size_4_bytes_aligned(src_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	fcs_encrypt_ext_payload payload = {
+		session_id,
+		context_id,
+		FCS_CRYPTION_CRYPTO_HEADER,
+		src_addr,
+		src_size,
+		dst_addr,
+		*dst_size
+	};
+
+	payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_ENCRYPT_REQ,
+				(uint32_t *) &payload, payload_size,
+				CMD_CASUAL, resp_data, &resp_len);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	if (resp_len != FCS_CRYPTION_RESP_WORD_SIZE) {
+		*mbox_error = MBOX_RET_ERROR;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = resp_data[FCS_CRYPTION_RESP_SIZE_OFFSET];
+	inv_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_decryption_ext(uint32_t session_id, uint32_t context_id,
+		uint32_t src_addr, uint32_t src_size,
+		uint32_t dst_addr, uint32_t *dst_size, uint32_t *mbox_error)
+{
+	int status;
+	uintptr_t id_offset;
+	uint32_t payload_size;
+	uint32_t resp_len = FCS_CRYPTION_RESP_WORD_SIZE;
+	uint32_t resp_data[FCS_CRYPTION_RESP_WORD_SIZE] = {0U};
+
+	if ((dst_size == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_size_4_bytes_aligned(src_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	id_offset = src_addr + FCS_OWNER_ID_OFFSET;
+	fcs_decrypt_ext_payload payload = {
+		session_id,
+		context_id,
+		FCS_CRYPTION_CRYPTO_HEADER,
+		{mmio_read_32(id_offset),
+		mmio_read_32(id_offset + MBOX_WORD_BYTE)},
+		src_addr,
+		src_size,
+		dst_addr,
+		*dst_size
+	};
+
+	payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_DECRYPT_REQ,
+				(uint32_t *) &payload, payload_size,
+				CMD_CASUAL, resp_data, &resp_len);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	if (resp_len != FCS_CRYPTION_RESP_WORD_SIZE) {
+		*mbox_error = MBOX_RET_ERROR;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = resp_data[FCS_CRYPTION_RESP_SIZE_OFFSET];
+	inv_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_sigma_teardown(uint32_t session_id, uint32_t *mbox_error)
+{
+	int status;
+
+	if ((session_id != PSGSIGMA_SESSION_ID_ONE) &&
+		(session_id != PSGSIGMA_UNKNOWN_SESSION)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	psgsigma_teardown_msg message = {
+		RESERVED_AS_ZERO,
+		PSGSIGMA_TEARDOWN_MAGIC,
+		session_id
+	};
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_PSG_SIGMA_TEARDOWN,
+			(uint32_t *) &message, sizeof(message) / MBOX_WORD_BYTE,
+			CMD_CASUAL, NULL, NULL);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_chip_id(uint32_t *id_low, uint32_t *id_high, uint32_t *mbox_error)
+{
+	int status;
+	uint32_t load_size;
+	uint32_t chip_id[2];
+
+	load_size = sizeof(chip_id) / MBOX_WORD_BYTE;
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_GET_CHIPID, NULL,
+			0U, CMD_CASUAL, (uint32_t *) chip_id, &load_size);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*id_low = chip_id[0];
+	*id_high = chip_id[1];
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_attestation_subkey(uint64_t src_addr, uint32_t src_size,
+		uint64_t dst_addr, uint32_t *dst_size, uint32_t *mbox_error)
+{
+	int status;
+	uint32_t send_size = src_size / MBOX_WORD_BYTE;
+	uint32_t ret_size = *dst_size / MBOX_WORD_BYTE;
+
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_ATTESTATION_SUBKEY,
+			(uint32_t *) src_addr, send_size, CMD_CASUAL,
+			(uint32_t *) dst_addr, &ret_size);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = ret_size * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_get_measurement(uint64_t src_addr, uint32_t src_size,
+		uint64_t dst_addr, uint32_t *dst_size, uint32_t *mbox_error)
+{
+	int status;
+	uint32_t send_size = src_size / MBOX_WORD_BYTE;
+	uint32_t ret_size = *dst_size / MBOX_WORD_BYTE;
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_GET_MEASUREMENT,
+			(uint32_t *) src_addr, send_size, CMD_CASUAL,
+			(uint32_t *) dst_addr, &ret_size);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = ret_size * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
 uint32_t intel_fcs_get_rom_patch_sha384(uint64_t addr, uint64_t *ret_size,
 					uint32_t *mbox_error)
 {
@@ -165,3 +560,1180 @@
 
 	return INTEL_SIP_SMC_STATUS_OK;
 }
+
+int intel_fcs_get_attestation_cert(uint32_t cert_request, uint64_t dst_addr,
+			uint32_t *dst_size, uint32_t *mbox_error)
+{
+	int status;
+	uint32_t ret_size = *dst_size / MBOX_WORD_BYTE;
+
+	if (mbox_error == NULL) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (cert_request < FCS_ATTEST_FIRMWARE_CERT ||
+		cert_request > FCS_ATTEST_CERT_MAX_REQ_PARAM) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_GET_ATTESTATION_CERT,
+			(uint32_t *) &cert_request, 1U, CMD_CASUAL,
+			(uint32_t *) dst_addr, &ret_size);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = ret_size * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_create_cert_on_reload(uint32_t cert_request,
+			uint32_t *mbox_error)
+{
+	int status;
+
+	if (mbox_error == NULL) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (cert_request < FCS_ATTEST_FIRMWARE_CERT ||
+		cert_request > FCS_ATTEST_CERT_MAX_REQ_PARAM) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_CREATE_CERT_ON_RELOAD,
+			(uint32_t *) &cert_request, 1U, CMD_CASUAL,
+			NULL, NULL);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_open_crypto_service_session(uint32_t *session_id,
+			uint32_t *mbox_error)
+{
+	int status;
+	uint32_t resp_len = 1U;
+
+	if ((session_id == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_OPEN_CS_SESSION,
+			NULL, 0U, CMD_CASUAL, session_id, &resp_len);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_close_crypto_service_session(uint32_t session_id,
+			uint32_t *mbox_error)
+{
+	int status;
+
+	if (mbox_error == NULL) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_CLOSE_CS_SESSION,
+			&session_id, 1U, CMD_CASUAL, NULL, NULL);
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_import_crypto_service_key(uint64_t src_addr, uint32_t src_size,
+		uint32_t *send_id)
+{
+	int status;
+
+	if (src_size > (FCS_CS_KEY_OBJ_MAX_WORD_SIZE *
+		MBOX_WORD_BYTE)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(src_addr, src_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	status = mailbox_send_cmd_async(send_id, MBOX_FCS_IMPORT_CS_KEY,
+				(uint32_t *)src_addr, src_size / MBOX_WORD_BYTE,
+				CMD_INDIRECT);
+
+	if (status < 0) {
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_export_crypto_service_key(uint32_t session_id, uint32_t key_id,
+		uint64_t dst_addr, uint32_t *dst_size,
+		uint32_t *mbox_error)
+{
+	int status;
+	uint32_t i;
+	uint32_t payload_size;
+	uint32_t resp_len = FCS_CS_KEY_OBJ_MAX_WORD_SIZE;
+	uint32_t resp_data[FCS_CS_KEY_OBJ_MAX_WORD_SIZE] = {0U};
+	uint32_t op_status = 0U;
+
+	if ((dst_size == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	fcs_cs_key_payload payload = {
+		session_id,
+		RESERVED_AS_ZERO,
+		RESERVED_AS_ZERO,
+		key_id
+	};
+
+	payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_EXPORT_CS_KEY,
+			(uint32_t *) &payload, payload_size,
+			CMD_CASUAL, resp_data, &resp_len);
+
+	if (resp_len > 0) {
+		op_status = resp_data[0] & FCS_CS_KEY_RESP_STATUS_MASK;
+	}
+
+	if (status < 0) {
+		*mbox_error = (-status) | (op_status << FCS_CS_KEY_RESP_STATUS_OFFSET);
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	if (resp_len > 1) {
+
+		/* Export key object is start at second response data */
+		*dst_size = (resp_len - 1) * MBOX_WORD_BYTE;
+
+		for (i = 1U; i < resp_len; i++) {
+			mmio_write_32(dst_addr, resp_data[i]);
+			dst_addr += MBOX_WORD_BYTE;
+		}
+
+		flush_dcache_range(dst_addr - *dst_size, *dst_size);
+
+	} else {
+
+		/* Unexpected response, missing key object in response */
+		*mbox_error = MBOX_RET_ERROR;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_remove_crypto_service_key(uint32_t session_id, uint32_t key_id,
+		uint32_t *mbox_error)
+{
+	int status;
+	uint32_t payload_size;
+	uint32_t resp_len = 1U;
+	uint32_t resp_data = 0U;
+	uint32_t op_status = 0U;
+
+	if (mbox_error == NULL) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	fcs_cs_key_payload payload = {
+		session_id,
+		RESERVED_AS_ZERO,
+		RESERVED_AS_ZERO,
+		key_id
+	};
+
+	payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_REMOVE_CS_KEY,
+			(uint32_t *) &payload, payload_size,
+			CMD_CASUAL, &resp_data, &resp_len);
+
+	if (resp_len > 0) {
+		op_status = resp_data & FCS_CS_KEY_RESP_STATUS_MASK;
+	}
+
+	if (status < 0) {
+		*mbox_error = (-status) | (op_status << FCS_CS_KEY_RESP_STATUS_OFFSET);
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_get_crypto_service_key_info(uint32_t session_id, uint32_t key_id,
+		uint64_t dst_addr, uint32_t *dst_size,
+		uint32_t *mbox_error)
+{
+	int status;
+	uint32_t payload_size;
+	uint32_t resp_len = FCS_CS_KEY_INFO_MAX_WORD_SIZE;
+	uint32_t op_status = 0U;
+
+	if ((dst_size == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	fcs_cs_key_payload payload = {
+		session_id,
+		RESERVED_AS_ZERO,
+		RESERVED_AS_ZERO,
+		key_id
+	};
+
+	payload_size = sizeof(payload) / MBOX_WORD_BYTE;
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_GET_CS_KEY_INFO,
+				(uint32_t *) &payload, payload_size,
+				CMD_CASUAL, (uint32_t *) dst_addr, &resp_len);
+
+	if (resp_len > 0) {
+		op_status = mmio_read_32(dst_addr) &
+			FCS_CS_KEY_RESP_STATUS_MASK;
+	}
+
+	if (status < 0) {
+		*mbox_error = (-status) | (op_status << FCS_CS_KEY_RESP_STATUS_OFFSET);
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = resp_len * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_get_digest_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error)
+{
+	return intel_fcs_crypto_service_init(session_id, context_id,
+				key_id, param_size, param_data,
+				(void *) &fcs_sha_get_digest_param,
+				mbox_error);
+}
+
+int intel_fcs_get_digest_update_finalize(uint32_t session_id,
+				uint32_t context_id, uint32_t src_addr,
+				uint32_t src_size, uint64_t dst_addr,
+				uint32_t *dst_size, uint8_t is_finalised,
+				uint32_t *mbox_error)
+{
+	int status;
+	uint32_t i;
+	uint32_t flag;
+	uint32_t crypto_header;
+	uint32_t resp_len;
+	uint32_t payload[FCS_GET_DIGEST_CMD_MAX_WORD_SIZE] = {0U};
+
+	if (dst_size == NULL || mbox_error == NULL) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (fcs_sha_get_digest_param.session_id != session_id ||
+	    fcs_sha_get_digest_param.context_id != context_id) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	/* Source data must be 8 bytes aligned */
+	if (!is_8_bytes_aligned(src_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		 !is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	resp_len = *dst_size / MBOX_WORD_BYTE;
+
+	/* Prepare crypto header */
+	flag = 0;
+
+	if (fcs_sha_get_digest_param.is_updated) {
+		fcs_sha_get_digest_param.crypto_param_size = 0;
+	} else {
+		flag |=  FCS_CS_FIELD_FLAG_INIT;
+	}
+
+	if (is_finalised != 0U) {
+		flag |=  FCS_CS_FIELD_FLAG_FINALIZE;
+	} else {
+		flag |=  FCS_CS_FIELD_FLAG_UPDATE;
+		fcs_sha_get_digest_param.is_updated = 1;
+	}
+
+	crypto_header = ((flag << FCS_CS_FIELD_FLAG_OFFSET) |
+			(fcs_sha_get_digest_param.crypto_param_size &
+			FCS_CS_FIELD_SIZE_MASK));
+
+	/* Prepare command payload */
+	i = 0;
+	payload[i] = fcs_sha_get_digest_param.session_id;
+	i++;
+	payload[i] = fcs_sha_get_digest_param.context_id;
+	i++;
+	payload[i] = crypto_header;
+	i++;
+
+	if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+		FCS_CS_FIELD_FLAG_INIT) {
+		payload[i] = fcs_sha_get_digest_param.key_id;
+		i++;
+		/* Crypto parameters */
+		payload[i] = fcs_sha_get_digest_param.crypto_param
+				& INTEL_SIP_SMC_FCS_SHA_MODE_MASK;
+		payload[i] |= ((fcs_sha_get_digest_param.crypto_param
+				>> INTEL_SIP_SMC_FCS_DIGEST_SIZE_OFFSET)
+				& INTEL_SIP_SMC_FCS_DIGEST_SIZE_MASK)
+				<< FCS_SHA_HMAC_CRYPTO_PARAM_SIZE_OFFSET;
+		i++;
+	}
+	/* Data source address and size */
+	payload[i] = src_addr;
+	i++;
+	payload[i] = src_size;
+	i++;
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_GET_DIGEST_REQ,
+				payload, i, CMD_CASUAL,
+				(uint32_t *) dst_addr, &resp_len);
+
+	if (is_finalised != 0U) {
+		memset((void *)&fcs_sha_get_digest_param, 0,
+		sizeof(fcs_crypto_service_data));
+	}
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = resp_len * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_mac_verify_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error)
+{
+	return intel_fcs_crypto_service_init(session_id, context_id,
+				key_id, param_size, param_data,
+				(void *) &fcs_sha_mac_verify_param,
+				mbox_error);
+}
+
+int intel_fcs_mac_verify_update_finalize(uint32_t session_id,
+				uint32_t context_id, uint32_t src_addr,
+				uint32_t src_size, uint64_t dst_addr,
+				uint32_t *dst_size, uint32_t data_size,
+				uint8_t is_finalised, uint32_t *mbox_error)
+{
+	int status;
+	uint32_t i;
+	uint32_t flag;
+	uint32_t crypto_header;
+	uint32_t resp_len;
+	uint32_t payload[FCS_MAC_VERIFY_CMD_MAX_WORD_SIZE] = {0U};
+	uintptr_t mac_offset;
+
+	if (dst_size == NULL || mbox_error == NULL) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (fcs_sha_mac_verify_param.session_id != session_id ||
+		fcs_sha_mac_verify_param.context_id != context_id) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (data_size >= src_size) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_size_4_bytes_aligned(src_size) ||
+		!is_8_bytes_aligned(data_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	resp_len = *dst_size / MBOX_WORD_BYTE;
+
+	/* Prepare crypto header */
+	flag = 0;
+
+	if (fcs_sha_mac_verify_param.is_updated) {
+		fcs_sha_mac_verify_param.crypto_param_size = 0;
+	} else {
+		flag |=  FCS_CS_FIELD_FLAG_INIT;
+	}
+
+	if (is_finalised) {
+		flag |=  FCS_CS_FIELD_FLAG_FINALIZE;
+	} else {
+		flag |=  FCS_CS_FIELD_FLAG_UPDATE;
+		fcs_sha_mac_verify_param.is_updated = 1;
+	}
+
+	crypto_header = ((flag << FCS_CS_FIELD_FLAG_OFFSET) |
+			(fcs_sha_mac_verify_param.crypto_param_size &
+			FCS_CS_FIELD_SIZE_MASK));
+
+	/* Prepare command payload */
+	i = 0;
+	payload[i] = fcs_sha_mac_verify_param.session_id;
+	i++;
+	payload[i] = fcs_sha_mac_verify_param.context_id;
+	i++;
+	payload[i] = crypto_header;
+	i++;
+
+	if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+		FCS_CS_FIELD_FLAG_INIT) {
+		payload[i] = fcs_sha_mac_verify_param.key_id;
+		i++;
+		/* Crypto parameters */
+		payload[i] = ((fcs_sha_mac_verify_param.crypto_param
+				>> INTEL_SIP_SMC_FCS_DIGEST_SIZE_OFFSET)
+				& INTEL_SIP_SMC_FCS_DIGEST_SIZE_MASK)
+				<< FCS_SHA_HMAC_CRYPTO_PARAM_SIZE_OFFSET;
+		i++;
+	}
+	/* Data source address and size */
+	payload[i] = src_addr;
+	i++;
+	payload[i] = data_size;
+	i++;
+
+	if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+		FCS_CS_FIELD_FLAG_FINALIZE) {
+		/* Copy mac data to command */
+		mac_offset = src_addr + data_size;
+		memcpy((uint8_t *) &payload[i], (uint8_t *) mac_offset,
+		src_size - data_size);
+
+		i += (src_size - data_size) / MBOX_WORD_BYTE;
+	}
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_MAC_VERIFY_REQ,
+				payload, i, CMD_CASUAL,
+				(uint32_t *) dst_addr, &resp_len);
+
+	if (is_finalised) {
+		memset((void *)&fcs_sha_mac_verify_param, 0,
+		sizeof(fcs_crypto_service_data));
+	}
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = resp_len * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_ecdsa_hash_sign_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error)
+{
+	return intel_fcs_crypto_service_init(session_id, context_id,
+				key_id, param_size, param_data,
+				(void *) &fcs_ecdsa_hash_sign_param,
+				mbox_error);
+}
+
+int intel_fcs_ecdsa_hash_sign_finalize(uint32_t session_id, uint32_t context_id,
+				uint32_t src_addr, uint32_t src_size,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error)
+{
+	int status;
+	uint32_t i;
+	uint32_t payload[FCS_ECDSA_HASH_SIGN_CMD_MAX_WORD_SIZE] = {0U};
+	uint32_t resp_len;
+	uintptr_t hash_data_addr;
+
+	if ((dst_size == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (fcs_ecdsa_hash_sign_param.session_id != session_id ||
+		fcs_ecdsa_hash_sign_param.context_id != context_id) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	resp_len = *dst_size / MBOX_WORD_BYTE;
+
+	/* Prepare command payload */
+	/* Crypto header */
+	i = 0;
+	payload[i] = fcs_ecdsa_hash_sign_param.session_id;
+	i++;
+	payload[i] = fcs_ecdsa_hash_sign_param.context_id;
+
+	i++;
+	payload[i] = fcs_ecdsa_hash_sign_param.crypto_param_size
+			& FCS_CS_FIELD_SIZE_MASK;
+	payload[i] |= (FCS_CS_FIELD_FLAG_INIT | FCS_CS_FIELD_FLAG_UPDATE
+			| FCS_CS_FIELD_FLAG_FINALIZE)
+			<< FCS_CS_FIELD_FLAG_OFFSET;
+	i++;
+	payload[i] = fcs_ecdsa_hash_sign_param.key_id;
+
+	/* Crypto parameters */
+	i++;
+	payload[i] = fcs_ecdsa_hash_sign_param.crypto_param
+			& INTEL_SIP_SMC_FCS_ECC_ALGO_MASK;
+
+	/* Hash Data */
+	i++;
+	hash_data_addr = src_addr;
+	memcpy((uint8_t *) &payload[i], (uint8_t *) hash_data_addr,
+			src_size);
+
+	i += src_size / MBOX_WORD_BYTE;
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_ECDSA_HASH_SIGN_REQ,
+			payload, i, CMD_CASUAL, (uint32_t *) dst_addr,
+			&resp_len);
+
+	memset((void *) &fcs_ecdsa_hash_sign_param,
+			0, sizeof(fcs_crypto_service_data));
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = resp_len * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_ecdsa_hash_sig_verify_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error)
+{
+	return intel_fcs_crypto_service_init(session_id, context_id,
+				key_id, param_size, param_data,
+				(void *) &fcs_ecdsa_hash_sig_verify_param,
+				mbox_error);
+}
+
+int intel_fcs_ecdsa_hash_sig_verify_finalize(uint32_t session_id, uint32_t context_id,
+				uint32_t src_addr, uint32_t src_size,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error)
+{
+	int status;
+	uint32_t i = 0;
+	uint32_t payload[FCS_ECDSA_HASH_SIG_VERIFY_CMD_MAX_WORD_SIZE] = {0U};
+	uint32_t resp_len;
+	uintptr_t hash_sig_pubkey_addr;
+
+	if ((dst_size == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (fcs_ecdsa_hash_sig_verify_param.session_id != session_id ||
+	fcs_ecdsa_hash_sig_verify_param.context_id != context_id) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	resp_len = *dst_size / MBOX_WORD_BYTE;
+
+	/* Prepare command payload */
+	/* Crypto header */
+	i = 0;
+	payload[i] = fcs_ecdsa_hash_sig_verify_param.session_id;
+
+	i++;
+	payload[i] = fcs_ecdsa_hash_sig_verify_param.context_id;
+
+	i++;
+	payload[i] = fcs_ecdsa_hash_sig_verify_param.crypto_param_size
+			& FCS_CS_FIELD_SIZE_MASK;
+	payload[i] |= (FCS_CS_FIELD_FLAG_INIT | FCS_CS_FIELD_FLAG_UPDATE
+			| FCS_CS_FIELD_FLAG_FINALIZE)
+			<< FCS_CS_FIELD_FLAG_OFFSET;
+
+	i++;
+	payload[i] = fcs_ecdsa_hash_sig_verify_param.key_id;
+
+	/* Crypto parameters */
+	i++;
+	payload[i] = fcs_ecdsa_hash_sig_verify_param.crypto_param
+			& INTEL_SIP_SMC_FCS_ECC_ALGO_MASK;
+
+	/* Hash Data Word, Signature Data Word and Public Key Data word */
+	i++;
+	hash_sig_pubkey_addr = src_addr;
+	memcpy((uint8_t *) &payload[i],
+			(uint8_t *) hash_sig_pubkey_addr, src_size);
+
+	i += (src_size / MBOX_WORD_BYTE);
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_ECDSA_HASH_SIG_VERIFY,
+			payload, i, CMD_CASUAL, (uint32_t *) dst_addr,
+			&resp_len);
+
+	memset((void *)&fcs_ecdsa_hash_sig_verify_param,
+			0, sizeof(fcs_crypto_service_data));
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = resp_len * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_ecdsa_sha2_data_sign_init(uint32_t session_id,
+				uint32_t context_id, uint32_t key_id,
+				uint32_t param_size, uint64_t param_data,
+				uint32_t *mbox_error)
+{
+	return intel_fcs_crypto_service_init(session_id, context_id,
+				key_id, param_size, param_data,
+				(void *) &fcs_sha2_data_sign_param,
+				mbox_error);
+}
+
+int intel_fcs_ecdsa_sha2_data_sign_update_finalize(uint32_t session_id,
+				uint32_t context_id, uint32_t src_addr,
+				uint32_t src_size, uint64_t dst_addr,
+				uint32_t *dst_size, uint8_t is_finalised,
+				uint32_t *mbox_error)
+{
+	int status;
+	int i;
+	uint32_t flag;
+	uint32_t crypto_header;
+	uint32_t payload[FCS_ECDSA_SHA2_DATA_SIGN_CMD_MAX_WORD_SIZE] = {0U};
+	uint32_t resp_len;
+
+	if ((dst_size == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (fcs_sha2_data_sign_param.session_id != session_id ||
+		fcs_sha2_data_sign_param.context_id != context_id) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	/* Source data must be 8 bytes aligned */
+	if (!is_8_bytes_aligned(src_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	resp_len = *dst_size / MBOX_WORD_BYTE;
+
+	/* Prepare crypto header */
+	flag = 0;
+	if (fcs_sha2_data_sign_param.is_updated) {
+		fcs_sha2_data_sign_param.crypto_param_size = 0;
+	} else {
+		flag |= FCS_CS_FIELD_FLAG_INIT;
+	}
+
+	if (is_finalised != 0U) {
+		flag |= FCS_CS_FIELD_FLAG_FINALIZE;
+	} else {
+		flag |= FCS_CS_FIELD_FLAG_UPDATE;
+		fcs_sha2_data_sign_param.is_updated = 1;
+	}
+	crypto_header = (flag << FCS_CS_FIELD_FLAG_OFFSET) |
+			fcs_sha2_data_sign_param.crypto_param_size;
+
+	/* Prepare command payload */
+	i = 0;
+	payload[i] = fcs_sha2_data_sign_param.session_id;
+	i++;
+	payload[i] = fcs_sha2_data_sign_param.context_id;
+	i++;
+	payload[i] = crypto_header;
+	i++;
+
+	if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+		FCS_CS_FIELD_FLAG_INIT) {
+		payload[i] = fcs_sha2_data_sign_param.key_id;
+		/* Crypto parameters */
+		i++;
+		payload[i] = fcs_sha2_data_sign_param.crypto_param
+				& INTEL_SIP_SMC_FCS_ECC_ALGO_MASK;
+		i++;
+	}
+
+	/* Data source address and size */
+	payload[i] = src_addr;
+	i++;
+	payload[i] = src_size;
+	i++;
+	status = mailbox_send_cmd(MBOX_JOB_ID,
+			MBOX_FCS_ECDSA_SHA2_DATA_SIGN_REQ, payload,
+			i, CMD_CASUAL, (uint32_t *) dst_addr,
+			&resp_len);
+
+	if (is_finalised != 0U) {
+		memset((void *)&fcs_sha2_data_sign_param, 0,
+			sizeof(fcs_crypto_service_data));
+	}
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = resp_len * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_ecdsa_sha2_data_sig_verify_init(uint32_t session_id,
+				uint32_t context_id, uint32_t key_id,
+				uint32_t param_size, uint64_t param_data,
+				uint32_t *mbox_error)
+{
+	return intel_fcs_crypto_service_init(session_id, context_id,
+				key_id, param_size, param_data,
+				(void *) &fcs_sha2_data_sig_verify_param,
+				mbox_error);
+}
+
+int intel_fcs_ecdsa_sha2_data_sig_verify_update_finalize(uint32_t session_id,
+				uint32_t context_id, uint32_t src_addr,
+				uint32_t src_size, uint64_t dst_addr,
+				uint32_t *dst_size, uint32_t data_size,
+				uint8_t is_finalised, uint32_t *mbox_error)
+{
+	int status;
+	uint32_t i;
+	uint32_t flag;
+	uint32_t crypto_header;
+	uint32_t payload[FCS_ECDSA_SHA2_DATA_SIG_VERIFY_CMD_MAX_WORD_SIZE] = {0U};
+	uint32_t resp_len;
+	uintptr_t sig_pubkey_offset;
+
+	if ((dst_size == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (fcs_sha2_data_sig_verify_param.session_id != session_id ||
+		fcs_sha2_data_sig_verify_param.context_id != context_id) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_size_4_bytes_aligned(src_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_8_bytes_aligned(data_size) ||
+		!is_8_bytes_aligned(src_addr)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	resp_len = *dst_size / MBOX_WORD_BYTE;
+
+	/* Prepare crypto header */
+	flag = 0;
+	if (fcs_sha2_data_sig_verify_param.is_updated)
+		fcs_sha2_data_sig_verify_param.crypto_param_size = 0;
+	else
+		flag |= FCS_CS_FIELD_FLAG_INIT;
+
+	if (is_finalised != 0U)
+		flag |= FCS_CS_FIELD_FLAG_FINALIZE;
+	else {
+		flag |= FCS_CS_FIELD_FLAG_UPDATE;
+		fcs_sha2_data_sig_verify_param.is_updated = 1;
+	}
+	crypto_header = (flag << FCS_CS_FIELD_FLAG_OFFSET) |
+			fcs_sha2_data_sig_verify_param.crypto_param_size;
+
+	/* Prepare command payload */
+	i = 0;
+	payload[i] = fcs_sha2_data_sig_verify_param.session_id;
+	i++;
+	payload[i] = fcs_sha2_data_sig_verify_param.context_id;
+	i++;
+	payload[i] = crypto_header;
+	i++;
+
+	if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+		FCS_CS_FIELD_FLAG_INIT) {
+		payload[i] = fcs_sha2_data_sig_verify_param.key_id;
+		i++;
+		/* Crypto parameters */
+		payload[i] = fcs_sha2_data_sig_verify_param.crypto_param
+				& INTEL_SIP_SMC_FCS_ECC_ALGO_MASK;
+		i++;
+	}
+
+	/* Data source address and size */
+	payload[i] = src_addr;
+	i++;
+	payload[i] = data_size;
+	i++;
+
+	if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+		FCS_CS_FIELD_FLAG_FINALIZE) {
+		/* Signature + Public Key Data */
+		sig_pubkey_offset = src_addr + data_size;
+		memcpy((uint8_t *) &payload[i], (uint8_t *) sig_pubkey_offset,
+			src_size - data_size);
+
+		i += (src_size - data_size) / MBOX_WORD_BYTE;
+	}
+
+	status = mailbox_send_cmd(MBOX_JOB_ID,
+			MBOX_FCS_ECDSA_SHA2_DATA_SIGN_VERIFY, payload, i,
+			CMD_CASUAL, (uint32_t *) dst_addr, &resp_len);
+
+	if (is_finalised != 0U) {
+		memset((void *) &fcs_sha2_data_sig_verify_param, 0,
+			sizeof(fcs_crypto_service_data));
+	}
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = resp_len * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_ecdsa_get_pubkey_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error)
+{
+	return intel_fcs_crypto_service_init(session_id, context_id,
+				key_id, param_size, param_data,
+				(void *) &fcs_ecdsa_get_pubkey_param,
+				mbox_error);
+}
+
+int intel_fcs_ecdsa_get_pubkey_finalize(uint32_t session_id, uint32_t context_id,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error)
+{
+	int status;
+	int i;
+	uint32_t crypto_header;
+	uint32_t ret_size;
+	uint32_t payload[FCS_ECDSA_GET_PUBKEY_MAX_WORD_SIZE] = {0U};
+
+	if ((dst_size == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (fcs_ecdsa_get_pubkey_param.session_id != session_id ||
+		fcs_ecdsa_get_pubkey_param.context_id != context_id) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	ret_size = *dst_size / MBOX_WORD_BYTE;
+
+	crypto_header = ((FCS_CS_FIELD_FLAG_INIT |
+			FCS_CS_FIELD_FLAG_UPDATE |
+			FCS_CS_FIELD_FLAG_FINALIZE) <<
+			FCS_CS_FIELD_FLAG_OFFSET) |
+			fcs_ecdsa_get_pubkey_param.crypto_param_size;
+	i = 0;
+	/* Prepare command payload */
+	payload[i] = session_id;
+	i++;
+	payload[i] = context_id;
+	i++;
+	payload[i] = crypto_header;
+	i++;
+	payload[i] = fcs_ecdsa_get_pubkey_param.key_id;
+	i++;
+	payload[i] = (uint32_t) fcs_ecdsa_get_pubkey_param.crypto_param &
+			INTEL_SIP_SMC_FCS_ECC_ALGO_MASK;
+	i++;
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_ECDSA_GET_PUBKEY,
+			payload, i, CMD_CASUAL,
+			(uint32_t *) dst_addr, &ret_size);
+
+	memset((void *) &fcs_ecdsa_get_pubkey_param, 0,
+		sizeof(fcs_crypto_service_data));
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = ret_size * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_ecdh_request_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint32_t param_size,
+				uint64_t param_data, uint32_t *mbox_error)
+{
+	return intel_fcs_crypto_service_init(session_id, context_id,
+				key_id, param_size, param_data,
+				(void *) &fcs_ecdh_request_param,
+				mbox_error);
+}
+
+int intel_fcs_ecdh_request_finalize(uint32_t session_id, uint32_t context_id,
+				uint32_t src_addr, uint32_t src_size,
+				uint64_t dst_addr, uint32_t *dst_size,
+				uint32_t *mbox_error)
+{
+	int status;
+	uint32_t i;
+	uint32_t payload[FCS_ECDH_REQUEST_CMD_MAX_WORD_SIZE] = {0U};
+	uint32_t resp_len;
+	uintptr_t pubkey;
+
+	if ((dst_size == NULL) || (mbox_error == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (fcs_ecdh_request_param.session_id != session_id ||
+		fcs_ecdh_request_param.context_id != context_id) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(src_addr, src_size) ||
+		!is_address_in_ddr_range(dst_addr, *dst_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	resp_len = *dst_size / MBOX_WORD_BYTE;
+
+	/* Prepare command payload */
+	i = 0;
+	/* Crypto header */
+	payload[i] = fcs_ecdh_request_param.session_id;
+	i++;
+	payload[i] = fcs_ecdh_request_param.context_id;
+	i++;
+	payload[i] = fcs_ecdh_request_param.crypto_param_size
+			& FCS_CS_FIELD_SIZE_MASK;
+	payload[i] |= (FCS_CS_FIELD_FLAG_INIT | FCS_CS_FIELD_FLAG_UPDATE
+			| FCS_CS_FIELD_FLAG_FINALIZE)
+			<< FCS_CS_FIELD_FLAG_OFFSET;
+	i++;
+	payload[i] = fcs_ecdh_request_param.key_id;
+	i++;
+	/* Crypto parameters */
+	payload[i] = fcs_ecdh_request_param.crypto_param
+			& INTEL_SIP_SMC_FCS_ECC_ALGO_MASK;
+	i++;
+	/* Public key data */
+	pubkey = src_addr;
+	memcpy((uint8_t *) &payload[i], (uint8_t *) pubkey, src_size);
+	i += src_size / MBOX_WORD_BYTE;
+
+	status = mailbox_send_cmd(MBOX_JOB_ID, MBOX_FCS_ECDH_REQUEST,
+			payload, i, CMD_CASUAL, (uint32_t *) dst_addr,
+			&resp_len);
+
+	memset((void *)&fcs_ecdh_request_param, 0,
+			sizeof(fcs_crypto_service_data));
+
+	if (status < 0) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	*dst_size = resp_len * MBOX_WORD_BYTE;
+	flush_dcache_range(dst_addr, *dst_size);
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_aes_crypt_init(uint32_t session_id, uint32_t context_id,
+				uint32_t key_id, uint64_t param_addr,
+				uint32_t param_size, uint32_t *mbox_error)
+{
+	if (mbox_error == NULL) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	memset((void *)&fcs_aes_init_payload, 0U, sizeof(fcs_aes_init_payload));
+
+	fcs_aes_init_payload.session_id = session_id;
+	fcs_aes_init_payload.context_id = context_id;
+	fcs_aes_init_payload.param_size = param_size;
+	fcs_aes_init_payload.key_id	= key_id;
+
+	memcpy((uint8_t *) fcs_aes_init_payload.crypto_param,
+		(uint8_t *) param_addr, param_size);
+
+	fcs_aes_init_payload.is_updated = 0;
+
+	*mbox_error = 0;
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+int intel_fcs_aes_crypt_update_finalize(uint32_t session_id,
+				uint32_t context_id, uint64_t src_addr,
+				uint32_t src_size, uint64_t dst_addr,
+				uint32_t dst_size, uint8_t is_finalised,
+				uint32_t *send_id)
+{
+	int status;
+	int i;
+	uint32_t flag;
+	uint32_t crypto_header;
+	uint32_t fcs_aes_crypt_payload[FCS_AES_CMD_MAX_WORD_SIZE];
+
+	if (fcs_aes_init_payload.session_id != session_id ||
+		fcs_aes_init_payload.context_id != context_id) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if ((!is_8_bytes_aligned(src_addr)) ||
+		(!is_32_bytes_aligned(src_size)) ||
+		(!is_address_in_ddr_range(src_addr, src_size))) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if ((!is_8_bytes_aligned(dst_addr)) ||
+		(!is_32_bytes_aligned(dst_size))) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if ((dst_size > FCS_AES_MAX_DATA_SIZE ||
+		dst_size < FCS_AES_MIN_DATA_SIZE) ||
+		(src_size > FCS_AES_MAX_DATA_SIZE ||
+		src_size < FCS_AES_MIN_DATA_SIZE)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	/* Prepare crypto header*/
+	flag = 0;
+	if (fcs_aes_init_payload.is_updated) {
+		fcs_aes_init_payload.param_size = 0;
+	} else {
+		flag |= FCS_CS_FIELD_FLAG_INIT;
+	}
+
+	if (is_finalised != 0U) {
+		flag |= FCS_CS_FIELD_FLAG_FINALIZE;
+	} else {
+		flag |= FCS_CS_FIELD_FLAG_UPDATE;
+		fcs_aes_init_payload.is_updated = 1;
+	}
+	crypto_header = (flag << FCS_CS_FIELD_FLAG_OFFSET) |
+			fcs_aes_init_payload.param_size;
+
+	i = 0U;
+	fcs_aes_crypt_payload[i] = session_id;
+	i++;
+	fcs_aes_crypt_payload[i] = context_id;
+	i++;
+	fcs_aes_crypt_payload[i] = crypto_header;
+	i++;
+
+	if ((crypto_header >> FCS_CS_FIELD_FLAG_OFFSET) &
+		FCS_CS_FIELD_FLAG_INIT) {
+		fcs_aes_crypt_payload[i] = fcs_aes_init_payload.key_id;
+		i++;
+
+		memcpy((uint8_t *) &fcs_aes_crypt_payload[i],
+			(uint8_t *) fcs_aes_init_payload.crypto_param,
+			fcs_aes_init_payload.param_size);
+
+		i += fcs_aes_init_payload.param_size / MBOX_WORD_BYTE;
+	}
+
+	fcs_aes_crypt_payload[i] = (uint32_t) src_addr;
+	i++;
+	fcs_aes_crypt_payload[i] = src_size;
+	i++;
+	fcs_aes_crypt_payload[i] = (uint32_t) dst_addr;
+	i++;
+	fcs_aes_crypt_payload[i] = dst_size;
+	i++;
+
+	status = mailbox_send_cmd_async(send_id, MBOX_FCS_AES_CRYPT_REQ,
+					fcs_aes_crypt_payload, i,
+					CMD_INDIRECT);
+
+	if (is_finalised != 0U) {
+		memset((void *)&fcs_aes_init_payload, 0,
+			sizeof(fcs_aes_init_payload));
+	}
+
+	if (status < 0U) {
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
diff --git a/plat/intel/soc/common/soc/socfpga_mailbox.c b/plat/intel/soc/common/soc/socfpga_mailbox.c
index 8ecd6db..778d4af 100644
--- a/plat/intel/soc/common/soc/socfpga_mailbox.c
+++ b/plat/intel/soc/common/soc/socfpga_mailbox.c
@@ -11,6 +11,8 @@
 #include "socfpga_mailbox.h"
 #include "socfpga_sip_svc.h"
 
+static mailbox_payload_t mailbox_resp_payload;
+static mailbox_container_t mailbox_resp_ctr = {0, 0, &mailbox_resp_payload};
 
 static bool is_mailbox_cmdbuf_full(uint32_t cin)
 {
@@ -171,6 +173,95 @@
 	return MBOX_NO_RESPONSE;
 }
 
+int mailbox_read_response_async(unsigned int *job_id, uint32_t *header,
+				uint32_t *response, unsigned int *resp_len,
+				uint8_t ignore_client_id)
+{
+	uint32_t rin;
+	uint32_t rout;
+	uint32_t resp_data;
+	uint32_t ret_resp_len = 0;
+	uint8_t is_done = 0;
+
+	if ((mailbox_resp_ctr.flag & MBOX_PAYLOAD_FLAG_BUSY) != 0) {
+		ret_resp_len = MBOX_RESP_LEN(
+				mailbox_resp_ctr.payload->header) -
+				mailbox_resp_ctr.index;
+	}
+
+	if (mmio_read_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM) == 1U) {
+		mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM, 0U);
+	}
+
+	rin = mmio_read_32(MBOX_OFFSET + MBOX_RIN);
+	rout = mmio_read_32(MBOX_OFFSET + MBOX_ROUT);
+
+	while (rout != rin && !is_done) {
+
+		resp_data = mmio_read_32(MBOX_ENTRY_TO_ADDR(RESP, (rout)++));
+
+		rout %= MBOX_RESP_BUFFER_SIZE;
+		mmio_write_32(MBOX_OFFSET + MBOX_ROUT, rout);
+		rin = mmio_read_32(MBOX_OFFSET + MBOX_RIN);
+
+		if ((mailbox_resp_ctr.flag & MBOX_PAYLOAD_FLAG_BUSY) != 0) {
+			mailbox_resp_ctr.payload->data[mailbox_resp_ctr.index] = resp_data;
+			mailbox_resp_ctr.index++;
+			ret_resp_len--;
+		} else {
+			if (!ignore_client_id) {
+				if (MBOX_RESP_CLIENT_ID(resp_data) != MBOX_ATF_CLIENT_ID) {
+					*resp_len = 0;
+					return MBOX_WRONG_ID;
+				}
+			}
+
+			*job_id = MBOX_RESP_JOB_ID(resp_data);
+			ret_resp_len = MBOX_RESP_LEN(resp_data);
+			mailbox_resp_ctr.payload->header = resp_data;
+			mailbox_resp_ctr.flag |= MBOX_PAYLOAD_FLAG_BUSY;
+		}
+
+		if (ret_resp_len == 0) {
+			is_done = 1;
+		}
+	}
+
+	if (is_done != 0) {
+
+		/* copy header data to input address if applicable */
+		if (header != 0) {
+			*header = mailbox_resp_ctr.payload->header;
+		}
+
+		/* copy response data to input buffer if applicable */
+		ret_resp_len = MBOX_RESP_LEN(mailbox_resp_ctr.payload->header);
+		if ((ret_resp_len > 0) && (response == NULL) && resp_len) {
+			if (*resp_len > ret_resp_len) {
+				*resp_len = ret_resp_len;
+			}
+
+			memcpy((uint8_t *) response,
+				(uint8_t *) mailbox_resp_ctr.payload->data,
+				*resp_len * MBOX_WORD_BYTE);
+		}
+
+		/* reset async response param */
+		mailbox_resp_ctr.index = 0;
+		mailbox_resp_ctr.flag = 0;
+
+		if (MBOX_RESP_ERR(mailbox_resp_ctr.payload->header) > 0U) {
+			INFO("Error in async response: %x\n",
+				mailbox_resp_ctr.payload->header);
+			return -MBOX_RESP_ERR(mailbox_resp_ctr.payload->header);
+		}
+
+		return MBOX_RET_OK;
+	}
+
+	*resp_len = 0;
+	return (mailbox_resp_ctr.flag & MBOX_PAYLOAD_FLAG_BUSY) ? MBOX_BUSY : MBOX_NO_RESPONSE;
+}
 
 int mailbox_poll_response(uint32_t job_id, uint32_t urgent, uint32_t *response,
 				unsigned int *resp_len)
@@ -294,6 +385,12 @@
 	return MBOX_RET_OK;
 }
 
+int mailbox_send_cmd_async_ext(uint32_t header_cmd, uint32_t *args,
+			unsigned int len)
+{
+	return fill_mailbox_circular_buffer(header_cmd, args, len);
+}
+
 int mailbox_send_cmd_async(uint32_t *job_id, uint32_t cmd, uint32_t *args,
 			  unsigned int len, unsigned int indirect)
 {
diff --git a/plat/intel/soc/common/soc/socfpga_reset_manager.c b/plat/intel/soc/common/soc/socfpga_reset_manager.c
index b0de60e..bb4efab 100644
--- a/plat/intel/soc/common/soc/socfpga_reset_manager.c
+++ b/plat/intel/soc/common/soc/socfpga_reset_manager.c
@@ -4,10 +4,12 @@
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
-#include <common/debug.h>
 #include <errno.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
 #include <lib/mmio.h>
 
+#include "socfpga_f2sdram_manager.h"
 #include "socfpga_mailbox.h"
 #include "socfpga_reset_manager.h"
 #include "socfpga_system_manager.h"
@@ -89,58 +91,241 @@
 
 static int poll_idle_status(uint32_t addr, uint32_t mask, uint32_t match)
 {
-	int time_out = 1000;
+	int time_out = 300;
 
 	while (time_out--) {
 		if ((mmio_read_32(addr) & mask) == match) {
 			return 0;
 		}
+		udelay(1000);
 	}
 	return -ETIMEDOUT;
 }
 
-int socfpga_bridges_enable(void)
+static void socfpga_s2f_bridge_mask(uint32_t mask,
+				uint32_t *brg_mask,
+				uint32_t *noc_mask)
 {
-	/* Clear idle request */
-	mmio_setbits_32(SOCFPGA_SYSMGR(NOC_IDLEREQ_CLR), ~0);
+	*brg_mask = 0;
+	*noc_mask = 0;
 
-	/* De-assert all bridges */
-	mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), ~0);
+	if ((mask & SOC2FPGA_MASK) != 0U) {
+		*brg_mask |= RSTMGR_FIELD(BRG, SOC2FPGA);
+		*noc_mask |= IDLE_DATA_SOC2FPGA;
+	}
 
-	/* Wait until idle ack becomes 0 */
-	return poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLEACK),
-				IDLE_DATA_MASK, 0);
+	if ((mask & LWHPS2FPGA_MASK) != 0U) {
+		*brg_mask |= RSTMGR_FIELD(BRG, LWHPS2FPGA);
+		*noc_mask |= IDLE_DATA_LWSOC2FPGA;
+	}
 }
 
-int socfpga_bridges_disable(void)
+static void socfpga_f2s_bridge_mask(uint32_t mask,
+				uint32_t *brg_mask,
+				uint32_t *f2s_idlereq,
+				uint32_t *f2s_force_drain,
+				uint32_t *f2s_en,
+				uint32_t *f2s_idleack,
+				uint32_t *f2s_respempty)
 {
-	/* Set idle request */
-	mmio_write_32(SOCFPGA_SYSMGR(NOC_IDLEREQ_SET), ~0);
+	*brg_mask = 0;
+	*f2s_idlereq = 0;
+	*f2s_force_drain = 0;
+	*f2s_en = 0;
+	*f2s_idleack = 0;
+	*f2s_respempty = 0;
 
-	/* Enable NOC timeout */
-	mmio_setbits_32(SOCFPGA_SYSMGR(NOC_TIMEOUT), 1);
-
-	/* Wait until each idle ack bit toggle to 1 */
-	if (poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLEACK),
-				IDLE_DATA_MASK, IDLE_DATA_MASK))
-		return -ETIMEDOUT;
-
-	/* Wait until each idle status bit toggle to 1 */
-	if (poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLESTATUS),
-				IDLE_DATA_MASK, IDLE_DATA_MASK))
-		return -ETIMEDOUT;
-
-	/* Assert all bridges */
 #if PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10
-	mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST),
-		~(RSTMGR_FIELD(BRG, DDRSCH) | RSTMGR_FIELD(BRG, FPGA2SOC)));
+	if ((mask & FPGA2SOC_MASK) != 0U) {
+		*brg_mask |= RSTMGR_FIELD(BRG, FPGA2SOC);
+	}
+	if ((mask & F2SDRAM0_MASK) != 0U) {
+		*brg_mask |= RSTMGR_FIELD(BRG, F2SSDRAM0);
+		*f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM0_IDLEREQ;
+		*f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM0_FORCE_DRAIN;
+		*f2s_en |= FLAGOUTSETCLR_F2SDRAM0_ENABLE;
+		*f2s_idleack |= FLAGINTSTATUS_F2SDRAM0_IDLEACK;
+		*f2s_respempty |= FLAGINTSTATUS_F2SDRAM0_RESPEMPTY;
+	}
+	if ((mask & F2SDRAM1_MASK) != 0U) {
+		*brg_mask |= RSTMGR_FIELD(BRG, F2SSDRAM1);
+		*f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM1_IDLEREQ;
+		*f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM1_FORCE_DRAIN;
+		*f2s_en |= FLAGOUTSETCLR_F2SDRAM1_ENABLE;
+		*f2s_idleack |= FLAGINTSTATUS_F2SDRAM1_IDLEACK;
+		*f2s_respempty |= FLAGINTSTATUS_F2SDRAM1_RESPEMPTY;
+	}
+	if ((mask & F2SDRAM2_MASK) != 0U) {
+		*brg_mask |= RSTMGR_FIELD(BRG, F2SSDRAM2);
+		*f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM2_IDLEREQ;
+		*f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM2_FORCE_DRAIN;
+		*f2s_en |= FLAGOUTSETCLR_F2SDRAM2_ENABLE;
+		*f2s_idleack |= FLAGINTSTATUS_F2SDRAM2_IDLEACK;
+		*f2s_respempty |= FLAGINTSTATUS_F2SDRAM2_RESPEMPTY;
+	}
 #else
-	mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST),
-		~(RSTMGR_FIELD(BRG, MPFE) | RSTMGR_FIELD(BRG, FPGA2SOC)));
+	if ((mask & FPGA2SOC_MASK) != 0U) {
+		*brg_mask |= RSTMGR_FIELD(BRG, FPGA2SOC);
+		*f2s_idlereq |= FLAGOUTSETCLR_F2SDRAM0_IDLEREQ;
+		*f2s_force_drain |= FLAGOUTSETCLR_F2SDRAM0_FORCE_DRAIN;
+		*f2s_en |= FLAGOUTSETCLR_F2SDRAM0_ENABLE;
+		*f2s_idleack |= FLAGINTSTATUS_F2SDRAM0_IDLEACK;
+		*f2s_respempty |= FLAGINTSTATUS_F2SDRAM0_RESPEMPTY;
+	}
 #endif
+}
 
-	/* Disable NOC timeout */
-	mmio_clrbits_32(SOCFPGA_SYSMGR(NOC_TIMEOUT), 1);
+int socfpga_bridges_enable(uint32_t mask)
+{
+	int ret = 0;
+	uint32_t brg_mask = 0;
+	uint32_t noc_mask = 0;
+	uint32_t f2s_idlereq = 0;
+	uint32_t f2s_force_drain = 0;
+	uint32_t f2s_en = 0;
+	uint32_t f2s_idleack = 0;
+	uint32_t f2s_respempty = 0;
 
-	return 0;
+	/* Enable s2f bridge */
+	socfpga_s2f_bridge_mask(mask, &brg_mask, &noc_mask);
+	if (brg_mask != 0U) {
+		/* Clear idle request */
+		mmio_setbits_32(SOCFPGA_SYSMGR(NOC_IDLEREQ_CLR),
+				noc_mask);
+
+		/* De-assert all bridges */
+		mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), brg_mask);
+
+		/* Wait until idle ack becomes 0 */
+		ret = poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLEACK),
+						noc_mask, 0);
+		if (ret < 0) {
+			ERROR("S2F bridge enable: "
+					"Timeout waiting for idle ack\n");
+		}
+	}
+
+	/* Enable f2s bridge */
+	socfpga_f2s_bridge_mask(mask, &brg_mask, &f2s_idlereq,
+						&f2s_force_drain, &f2s_en,
+						&f2s_idleack, &f2s_respempty);
+	if (brg_mask != 0U) {
+		mmio_clrbits_32(SOCFPGA_RSTMGR(BRGMODRST), brg_mask);
+
+		mmio_clrbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0),
+			f2s_idlereq);
+
+		ret = poll_idle_status(SOCFPGA_F2SDRAMMGR(
+			SIDEBANDMGR_FLAGINSTATUS0), f2s_idleack, 0);
+		if (ret < 0) {
+			ERROR("F2S bridge enable: "
+					"Timeout waiting for idle ack");
+		}
+
+		mmio_clrbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0),
+			f2s_force_drain);
+		udelay(5);
+
+		mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0),
+			f2s_en);
+		udelay(5);
+	}
+
+	return ret;
+}
+
+int socfpga_bridges_disable(uint32_t mask)
+{
+	int ret = 0;
+	int timeout = 300;
+	uint32_t brg_mask = 0;
+	uint32_t noc_mask = 0;
+	uint32_t f2s_idlereq = 0;
+	uint32_t f2s_force_drain = 0;
+	uint32_t f2s_en = 0;
+	uint32_t f2s_idleack = 0;
+	uint32_t f2s_respempty = 0;
+
+	/* Disable s2f bridge */
+	socfpga_s2f_bridge_mask(mask, &brg_mask, &noc_mask);
+	if (brg_mask != 0U) {
+		mmio_setbits_32(SOCFPGA_SYSMGR(NOC_IDLEREQ_SET),
+				noc_mask);
+
+		mmio_write_32(SOCFPGA_SYSMGR(NOC_TIMEOUT), 1);
+
+		ret = poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLEACK),
+						noc_mask, noc_mask);
+		if (ret < 0) {
+			ERROR("S2F Bridge disable: "
+					"Timeout waiting for idle ack\n");
+		}
+
+		ret = poll_idle_status(SOCFPGA_SYSMGR(NOC_IDLESTATUS),
+						noc_mask, noc_mask);
+		if (ret < 0) {
+			ERROR("S2F Bridge disable: "
+					"Timeout waiting for idle status\n");
+		}
+
+		mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST), brg_mask);
+
+		mmio_write_32(SOCFPGA_SYSMGR(NOC_TIMEOUT), 0);
+	}
+
+	/* Disable f2s bridge */
+	socfpga_f2s_bridge_mask(mask, &brg_mask, &f2s_idlereq,
+						&f2s_force_drain, &f2s_en,
+						&f2s_idleack, &f2s_respempty);
+	if (brg_mask != 0U) {
+		mmio_setbits_32(SOCFPGA_RSTMGR(HDSKEN),
+				RSTMGR_HDSKEN_FPGAHSEN);
+
+		mmio_setbits_32(SOCFPGA_RSTMGR(HDSKREQ),
+				RSTMGR_HDSKREQ_FPGAHSREQ);
+
+		poll_idle_status(SOCFPGA_RSTMGR(HDSKACK),
+				RSTMGR_HDSKACK_FPGAHSACK_MASK,
+				RSTMGR_HDSKACK_FPGAHSACK_MASK);
+
+		mmio_clrbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0),
+				f2s_en);
+		udelay(5);
+
+		mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTSET0),
+				f2s_force_drain);
+		udelay(5);
+
+		do {
+			/* Read response queue status to ensure it is empty */
+			uint32_t idle_status;
+
+			idle_status = mmio_read_32(SOCFPGA_F2SDRAMMGR(
+				SIDEBANDMGR_FLAGINSTATUS0));
+			if ((idle_status & f2s_respempty) != 0U) {
+				idle_status = mmio_read_32(SOCFPGA_F2SDRAMMGR(
+					SIDEBANDMGR_FLAGINSTATUS0));
+				if ((idle_status & f2s_respempty) != 0U) {
+					break;
+				}
+			}
+			udelay(1000);
+		} while (timeout-- > 0);
+
+#if PLATFORM_MODEL == PLAT_SOCFPGA_STRATIX10
+		/* Software must never write a 0x1 to FPGA2SOC_MASK bit */
+		mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST),
+				brg_mask & ~RSTMGR_FIELD(BRG, FPGA2SOC));
+#else
+		mmio_setbits_32(SOCFPGA_RSTMGR(BRGMODRST),
+				brg_mask);
+#endif
+		mmio_clrbits_32(SOCFPGA_RSTMGR(HDSKREQ),
+				RSTMGR_HDSKEQ_FPGAHSREQ);
+
+		mmio_setbits_32(SOCFPGA_F2SDRAMMGR(SIDEBANDMGR_FLAGOUTCLR0),
+				f2s_idlereq);
+	}
+
+	return ret;
 }
diff --git a/plat/intel/soc/common/socfpga_delay_timer.c b/plat/intel/soc/common/socfpga_delay_timer.c
index 957738c..dcd51e2 100644
--- a/plat/intel/soc/common/socfpga_delay_timer.c
+++ b/plat/intel/soc/common/socfpga_delay_timer.c
@@ -36,7 +36,6 @@
 
 	timer_init(&plat_timer_ops);
 
-	NOTICE("BL31: MPU clock frequency: %d MHz\n", plat_timer_ops.clk_div);
 }
 
 void socfpga_delay_timer_init(void)
diff --git a/plat/intel/soc/common/socfpga_sip_svc.c b/plat/intel/soc/common/socfpga_sip_svc.c
index f22c2ee..f079349 100644
--- a/plat/intel/soc/common/socfpga_sip_svc.c
+++ b/plat/intel/soc/common/socfpga_sip_svc.c
@@ -19,6 +19,7 @@
 /* Total buffer the driver can hold */
 #define FPGA_CONFIG_BUFFER_SIZE 4
 
+static config_type request_type = NO_REQUEST;
 static int current_block, current_buffer;
 static int read_block, max_blocks;
 static uint32_t send_id, rcv_id;
@@ -27,10 +28,8 @@
 
 /* RSU static variables */
 static uint32_t rsu_dcmf_ver[4] = {0};
-
-/* RSU Max Retry */
-static uint32_t rsu_max_retry;
 static uint16_t rsu_dcmf_stat[4] = {0};
+static uint32_t rsu_max_retry;
 
 /*  SiP Service UUID */
 DEFINE_SVC_UUID2(intl_svc_uid,
@@ -63,8 +62,9 @@
 			args[2] = buffer->size - buffer->size_written;
 			current_buffer++;
 			current_buffer %= FPGA_CONFIG_BUFFER_SIZE;
-		} else
+		} else {
 			args[2] = bytes_per_block;
+		}
 
 		buffer->size_written += args[2];
 		mailbox_send_cmd_async(&send_id, MBOX_RECONFIG_DATA, args,
@@ -79,35 +79,48 @@
 
 static int intel_fpga_sdm_write_all(void)
 {
-	for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++)
+	for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) {
 		if (intel_fpga_sdm_write_buffer(
-			&fpga_config_buffers[current_buffer]))
+			&fpga_config_buffers[current_buffer])) {
 			break;
+		}
+	}
 	return 0;
 }
 
-static uint32_t intel_mailbox_fpga_config_isdone(uint32_t query_type)
+static uint32_t intel_mailbox_fpga_config_isdone(void)
 {
 	uint32_t ret;
 
-	if (query_type == 1U) {
-		ret = intel_mailbox_get_config_status(MBOX_CONFIG_STATUS, false);
-	} else {
-		ret = intel_mailbox_get_config_status(MBOX_RECONFIG_STATUS, true);
+	switch (request_type) {
+	case RECONFIGURATION:
+		ret = intel_mailbox_get_config_status(MBOX_RECONFIG_STATUS,
+							true);
+		break;
+	case BITSTREAM_AUTH:
+		ret = intel_mailbox_get_config_status(MBOX_RECONFIG_STATUS,
+							false);
+		break;
+	default:
+		ret = intel_mailbox_get_config_status(MBOX_CONFIG_STATUS,
+							false);
+		break;
 	}
 
 	if (ret != 0U) {
 		if (ret == MBOX_CFGSTAT_STATE_CONFIG) {
 			return INTEL_SIP_SMC_STATUS_BUSY;
 		} else {
+			request_type = NO_REQUEST;
 			return INTEL_SIP_SMC_STATUS_ERROR;
 		}
 	}
 
-	if (bridge_disable) {
-		socfpga_bridges_enable();	/* Enable bridge */
+	if (bridge_disable != 0U) {
+		socfpga_bridges_enable(~0);	/* Enable bridge */
 		bridge_disable = false;
 	}
+	request_type = NO_REQUEST;
 
 	return INTEL_SIP_SMC_STATUS_OK;
 }
@@ -166,6 +179,7 @@
 		if (status != MBOX_NO_RESPONSE &&
 			status != MBOX_TIMEOUT && resp_len != 0) {
 			mailbox_clear_response();
+			request_type = NO_REQUEST;
 			return INTEL_SIP_SMC_STATUS_ERROR;
 		}
 
@@ -174,10 +188,11 @@
 
 	intel_fpga_sdm_write_all();
 
-	if (*count > 0)
+	if (*count > 0) {
 		status = INTEL_SIP_SMC_STATUS_OK;
-	else if (*count == 0)
+	} else if (*count == 0) {
 		status = INTEL_SIP_SMC_STATUS_BUSY;
+	}
 
 	for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) {
 		if (fpga_config_buffers[i].write_requested != 0) {
@@ -186,8 +201,9 @@
 		}
 	}
 
-	if (all_completed == 1)
+	if (all_completed == 1) {
 		return INTEL_SIP_SMC_STATUS_OK;
+	}
 
 	return status;
 }
@@ -200,6 +216,8 @@
 	unsigned int size = 0;
 	unsigned int resp_len = ARRAY_SIZE(response);
 
+	request_type = RECONFIGURATION;
+
 	if (!CONFIG_TEST_FLAG(flag, PARTIAL_CONFIG)) {
 		bridge_disable = true;
 	}
@@ -207,6 +225,7 @@
 	if (CONFIG_TEST_FLAG(flag, AUTHENTICATION)) {
 		size = 1;
 		bridge_disable = false;
+		request_type = BITSTREAM_AUTH;
 	}
 
 	mailbox_clear_response();
@@ -219,6 +238,7 @@
 
 	if (status < 0) {
 		bridge_disable = false;
+		request_type = NO_REQUEST;
 		return INTEL_SIP_SMC_STATUS_ERROR;
 	}
 
@@ -241,7 +261,7 @@
 
 	/* Disable bridge on full reconfiguration */
 	if (bridge_disable) {
-		socfpga_bridges_disable();
+		socfpga_bridges_disable(~0);
 	}
 
 	return INTEL_SIP_SMC_STATUS_OK;
@@ -249,9 +269,11 @@
 
 static bool is_fpga_config_buffer_full(void)
 {
-	for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++)
-		if (!fpga_config_buffers[i].write_requested)
+	for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) {
+		if (!fpga_config_buffers[i].write_requested) {
 			return false;
+		}
+	}
 	return true;
 }
 
@@ -260,12 +282,15 @@
 	if (!addr && !size) {
 		return true;
 	}
-	if (size > (UINT64_MAX - addr))
+	if (size > (UINT64_MAX - addr)) {
 		return false;
-	if (addr < BL31_LIMIT)
+	}
+	if (addr < BL31_LIMIT) {
 		return false;
-	if (addr + size > DRAM_BASE + DRAM_SIZE)
+	}
+	if (addr + size > DRAM_BASE + DRAM_SIZE) {
 		return false;
+	}
 
 	return true;
 }
@@ -349,8 +374,9 @@
 /* Secure register access */
 uint32_t intel_secure_reg_read(uint64_t reg_addr, uint32_t *retval)
 {
-	if (is_out_of_sec_range(reg_addr))
+	if (is_out_of_sec_range(reg_addr)) {
 		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
 
 	*retval = mmio_read_32(reg_addr);
 
@@ -360,8 +386,9 @@
 uint32_t intel_secure_reg_write(uint64_t reg_addr, uint32_t val,
 				uint32_t *retval)
 {
-	if (is_out_of_sec_range(reg_addr))
+	if (is_out_of_sec_range(reg_addr)) {
 		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
 
 	mmio_write_32(reg_addr, val);
 
@@ -385,8 +412,9 @@
 
 static uint32_t intel_rsu_status(uint64_t *respbuf, unsigned int respbuf_sz)
 {
-	if (mailbox_rsu_status((uint32_t *)respbuf, respbuf_sz) < 0)
+	if (mailbox_rsu_status((uint32_t *)respbuf, respbuf_sz) < 0) {
 		return INTEL_SIP_SMC_RSU_ERROR;
+	}
 
 	return INTEL_SIP_SMC_STATUS_OK;
 }
@@ -399,8 +427,9 @@
 
 static uint32_t intel_rsu_notify(uint32_t execution_stage)
 {
-	if (mailbox_hps_stage_notify(execution_stage) < 0)
+	if (mailbox_hps_stage_notify(execution_stage) < 0) {
 		return INTEL_SIP_SMC_RSU_ERROR;
+	}
 
 	return INTEL_SIP_SMC_STATUS_OK;
 }
@@ -408,8 +437,9 @@
 static uint32_t intel_rsu_retry_counter(uint32_t *respbuf, uint32_t respbuf_sz,
 					uint32_t *ret_stat)
 {
-	if (mailbox_rsu_status((uint32_t *)respbuf, respbuf_sz) < 0)
+	if (mailbox_rsu_status((uint32_t *)respbuf, respbuf_sz) < 0) {
 		return INTEL_SIP_SMC_RSU_ERROR;
+	}
 
 	*ret_stat = respbuf[8];
 	return INTEL_SIP_SMC_STATUS_OK;
@@ -487,19 +517,19 @@
 }
 
 static uint32_t intel_mbox_send_cmd(uint32_t cmd, uint32_t *args,
-				unsigned int len,
-				uint32_t urgent, uint32_t *response,
+				unsigned int len, uint32_t urgent, uint64_t response,
 				unsigned int resp_len, int *mbox_status,
 				unsigned int *len_in_resp)
 {
 	*len_in_resp = 0;
-	*mbox_status = 0;
+	*mbox_status = GENERIC_RESPONSE_ERROR;
 
-	if (!is_address_in_ddr_range((uint64_t)args, sizeof(uint32_t) * len))
+	if (!is_address_in_ddr_range((uint64_t)args, sizeof(uint32_t) * len)) {
 		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
 
 	int status = mailbox_send_cmd(MBOX_JOB_ID, cmd, args, len, urgent,
-				      response, &resp_len);
+					(uint32_t *) response, &resp_len);
 
 	if (status < 0) {
 		*mbox_status = -status;
@@ -508,6 +538,9 @@
 
 	*mbox_status = 0;
 	*len_in_resp = resp_len;
+
+	flush_dcache_range(response, resp_len * MBOX_WORD_BYTE);
+
 	return INTEL_SIP_SMC_STATUS_OK;
 }
 
@@ -526,13 +559,73 @@
 	return INTEL_SIP_SMC_STATUS_OK;
 }
 
-/* Miscellaneous HPS services */
-static uint32_t intel_hps_set_bridges(uint64_t enable)
+uint32_t intel_smc_service_completed(uint64_t addr, uint32_t size,
+				uint32_t mode, uint32_t *job_id,
+				uint32_t *ret_size, uint32_t *mbox_error)
 {
-	if (enable != 0U) {
-		socfpga_bridges_enable();
+	int status = 0;
+	uint32_t resp_len = size / MBOX_WORD_BYTE;
+
+	if (resp_len > MBOX_DATA_MAX_LEN) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_address_in_ddr_range(addr, size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (mode == SERVICE_COMPLETED_MODE_ASYNC) {
+		status = mailbox_read_response_async(job_id,
+				NULL, (uint32_t *) addr, &resp_len, 0);
 	} else {
-		socfpga_bridges_disable();
+		status = mailbox_read_response(job_id,
+				(uint32_t *) addr, &resp_len);
+
+		if (status == MBOX_NO_RESPONSE) {
+			status = MBOX_BUSY;
+		}
+	}
+
+	if (status == MBOX_NO_RESPONSE) {
+		return INTEL_SIP_SMC_STATUS_NO_RESPONSE;
+	}
+
+	if (status == MBOX_BUSY) {
+		return INTEL_SIP_SMC_STATUS_BUSY;
+	}
+
+	*ret_size = resp_len * MBOX_WORD_BYTE;
+	flush_dcache_range(addr, *ret_size);
+
+	if (status != MBOX_RET_OK) {
+		*mbox_error = -status;
+		return INTEL_SIP_SMC_STATUS_ERROR;
+	}
+
+	return INTEL_SIP_SMC_STATUS_OK;
+}
+
+/* Miscellaneous HPS services */
+uint32_t intel_hps_set_bridges(uint64_t enable, uint64_t mask)
+{
+	int status = 0;
+
+	if ((enable & SOCFPGA_BRIDGE_ENABLE) != 0U) {
+		if ((enable & SOCFPGA_BRIDGE_HAS_MASK) != 0U) {
+			status = socfpga_bridges_enable((uint32_t)mask);
+		} else {
+			status = socfpga_bridges_enable(~0);
+		}
+	} else {
+		if ((enable & SOCFPGA_BRIDGE_HAS_MASK) != 0U) {
+			status = socfpga_bridges_disable((uint32_t)mask);
+		} else {
+			status = socfpga_bridges_disable(~0);
+		}
+	}
+
+	if (status < 0) {
+		return INTEL_SIP_SMC_STATUS_ERROR;
 	}
 
 	return INTEL_SIP_SMC_STATUS_OK;
@@ -542,7 +635,7 @@
  * This function is responsible for handling all SiP calls from the NS world
  */
 
-uintptr_t sip_smc_handler(uint32_t smc_fid,
+uintptr_t sip_smc_handler_v1(uint32_t smc_fid,
 			 u_register_t x1,
 			 u_register_t x2,
 			 u_register_t x3,
@@ -551,14 +644,14 @@
 			 void *handle,
 			 u_register_t flags)
 {
-	uint32_t retval = 0;
+	uint32_t retval = 0, completed_addr[3];
+	uint32_t retval2 = 0;
 	uint32_t mbox_error = 0;
-	uint32_t completed_addr[3];
 	uint64_t retval64, rsu_respbuf[9];
 	int status = INTEL_SIP_SMC_STATUS_OK;
 	int mbox_status;
 	unsigned int len_in_resp;
-	u_register_t x5, x6;
+	u_register_t x5, x6, x7;
 
 	switch (smc_fid) {
 	case SIP_SVC_UID:
@@ -566,7 +659,7 @@
 		SMC_UUID_RET(handle, intl_svc_uid);
 
 	case INTEL_SIP_SMC_FPGA_CONFIG_ISDONE:
-		status = intel_mailbox_fpga_config_isdone(x1);
+		status = intel_mailbox_fpga_config_isdone();
 		SMC_RET4(handle, status, 0, 0, 0);
 
 	case INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM:
@@ -681,6 +774,11 @@
 		status = intel_ecc_dbe_notification(x1);
 		SMC_RET1(handle, status);
 
+	case INTEL_SIP_SMC_SERVICE_COMPLETED:
+		status = intel_smc_service_completed(x1, x2, x3, &rcv_id,
+						&len_in_resp, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x1, len_in_resp);
+
 	case INTEL_SIP_SMC_FIRMWARE_VERSION:
 		status = intel_smc_fw_version(&retval);
 		SMC_RET2(handle, status, retval);
@@ -688,15 +786,295 @@
 	case INTEL_SIP_SMC_MBOX_SEND_CMD:
 		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
 		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
-		status = intel_mbox_send_cmd(x1, (uint32_t *)x2, x3, x4,
-					     (uint32_t *)x5, x6, &mbox_status,
-					     &len_in_resp);
+		status = intel_mbox_send_cmd(x1, (uint32_t *)x2, x3, x4, x5, x6,
+						&mbox_status, &len_in_resp);
 		SMC_RET3(handle, status, mbox_status, len_in_resp);
 
 	case INTEL_SIP_SMC_GET_USERCODE:
 		status = intel_smc_get_usercode(&retval);
 		SMC_RET2(handle, status, retval);
 
+	case INTEL_SIP_SMC_FCS_CRYPTION:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+
+		if (x1 == FCS_MODE_DECRYPT) {
+			status = intel_fcs_decryption(x2, x3, x4, x5, &send_id);
+		} else if (x1 == FCS_MODE_ENCRYPT) {
+			status = intel_fcs_encryption(x2, x3, x4, x5, &send_id);
+		} else {
+			status = INTEL_SIP_SMC_STATUS_REJECTED;
+		}
+
+		SMC_RET3(handle, status, x4, x5);
+
+	case INTEL_SIP_SMC_FCS_CRYPTION_EXT:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+
+		if (x3 == FCS_MODE_DECRYPT) {
+			status = intel_fcs_decryption_ext(x1, x2, x4, x5, x6,
+					(uint32_t *) &x7, &mbox_error);
+		} else if (x3 == FCS_MODE_ENCRYPT) {
+			status = intel_fcs_encryption_ext(x1, x2, x4, x5, x6,
+					(uint32_t *) &x7, &mbox_error);
+		} else {
+			status = INTEL_SIP_SMC_STATUS_REJECTED;
+		}
+
+		SMC_RET4(handle, status, mbox_error, x6, x7);
+
+	case INTEL_SIP_SMC_FCS_RANDOM_NUMBER:
+		status = intel_fcs_random_number_gen(x1, &retval64,
+							&mbox_error);
+		SMC_RET4(handle, status, mbox_error, x1, retval64);
+
+	case INTEL_SIP_SMC_FCS_RANDOM_NUMBER_EXT:
+		status = intel_fcs_random_number_gen_ext(x1, x2, x3,
+							&send_id);
+		SMC_RET1(handle, status);
+
+	case INTEL_SIP_SMC_FCS_SEND_CERTIFICATE:
+		status = intel_fcs_send_cert(x1, x2, &send_id);
+		SMC_RET1(handle, status);
+
+	case INTEL_SIP_SMC_FCS_GET_PROVISION_DATA:
+		status = intel_fcs_get_provision_data(&send_id);
+		SMC_RET1(handle, status);
+
+	case INTEL_SIP_SMC_FCS_CNTR_SET_PREAUTH:
+		status = intel_fcs_cntr_set_preauth(x1, x2, x3,
+							&mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_HPS_SET_BRIDGES:
+		status = intel_hps_set_bridges(x1, x2);
+		SMC_RET1(handle, status);
+
+	case INTEL_SIP_SMC_HWMON_READTEMP:
+		status = intel_hwmon_readtemp(x1, &retval);
+		SMC_RET2(handle, status, retval);
+
+	case INTEL_SIP_SMC_HWMON_READVOLT:
+		status = intel_hwmon_readvolt(x1, &retval);
+		SMC_RET2(handle, status, retval);
+
+	case INTEL_SIP_SMC_FCS_PSGSIGMA_TEARDOWN:
+		status = intel_fcs_sigma_teardown(x1, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_CHIP_ID:
+		status = intel_fcs_chip_id(&retval, &retval2, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, retval, retval2);
+
+	case INTEL_SIP_SMC_FCS_ATTESTATION_SUBKEY:
+		status = intel_fcs_attestation_subkey(x1, x2, x3,
+					(uint32_t *) &x4, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x3, x4);
+
+	case INTEL_SIP_SMC_FCS_ATTESTATION_MEASUREMENTS:
+		status = intel_fcs_get_measurement(x1, x2, x3,
+					(uint32_t *) &x4, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x3, x4);
+
+	case INTEL_SIP_SMC_FCS_GET_ATTESTATION_CERT:
+		status = intel_fcs_get_attestation_cert(x1, x2,
+					(uint32_t *) &x3, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x2, x3);
+
+	case INTEL_SIP_SMC_FCS_CREATE_CERT_ON_RELOAD:
+		status = intel_fcs_create_cert_on_reload(x1, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_OPEN_CS_SESSION:
+		status = intel_fcs_open_crypto_service_session(&retval, &mbox_error);
+		SMC_RET3(handle, status, mbox_error, retval);
+
+	case INTEL_SIP_SMC_FCS_CLOSE_CS_SESSION:
+		status = intel_fcs_close_crypto_service_session(x1, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_IMPORT_CS_KEY:
+		status = intel_fcs_import_crypto_service_key(x1, x2, &send_id);
+		SMC_RET1(handle, status);
+
+	case INTEL_SIP_SMC_FCS_EXPORT_CS_KEY:
+		status = intel_fcs_export_crypto_service_key(x1, x2, x3,
+					(uint32_t *) &x4, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x3, x4);
+
+	case INTEL_SIP_SMC_FCS_REMOVE_CS_KEY:
+		status = intel_fcs_remove_crypto_service_key(x1, x2,
+					&mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_GET_CS_KEY_INFO:
+		status = intel_fcs_get_crypto_service_key_info(x1, x2, x3,
+					(uint32_t *) &x4, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x3, x4);
+
+	case INTEL_SIP_SMC_FCS_GET_DIGEST_INIT:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		status = intel_fcs_get_digest_init(x1, x2, x3,
+					x4, x5, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_GET_DIGEST_UPDATE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		status = intel_fcs_get_digest_update_finalize(x1, x2, x3,
+					x4, x5, (uint32_t *) &x6, false,
+					&mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_GET_DIGEST_FINALIZE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		status = intel_fcs_get_digest_update_finalize(x1, x2, x3,
+					x4, x5, (uint32_t *) &x6, true,
+					&mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_MAC_VERIFY_INIT:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		status = intel_fcs_mac_verify_init(x1, x2, x3,
+					x4, x5, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_MAC_VERIFY_UPDATE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+		status = intel_fcs_mac_verify_update_finalize(x1, x2, x3,
+					x4, x5, (uint32_t *) &x6, x7,
+					false, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_MAC_VERIFY_FINALIZE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+		status = intel_fcs_mac_verify_update_finalize(x1, x2, x3,
+					x4, x5, (uint32_t *) &x6, x7,
+					true, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_INIT:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		status = intel_fcs_ecdsa_sha2_data_sign_init(x1, x2, x3,
+					x4, x5, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_UPDATE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		status = intel_fcs_ecdsa_sha2_data_sign_update_finalize(x1, x2,
+					x3, x4, x5, (uint32_t *) &x6, false,
+					&mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIGN_FINALIZE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		status = intel_fcs_ecdsa_sha2_data_sign_update_finalize(x1, x2,
+					x3, x4, x5, (uint32_t *) &x6, true,
+					&mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGN_INIT:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		status = intel_fcs_ecdsa_hash_sign_init(x1, x2, x3,
+					x4, x5, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_HASH_SIGN_FINALIZE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		status = intel_fcs_ecdsa_hash_sign_finalize(x1, x2, x3,
+					 x4, x5, (uint32_t *) &x6, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_HASH_SIG_VERIFY_INIT:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		status = intel_fcs_ecdsa_hash_sig_verify_init(x1, x2, x3,
+					x4, x5, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_HASH_SIG_VERIFY_FINALIZE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		status = intel_fcs_ecdsa_hash_sig_verify_finalize(x1, x2, x3,
+					 x4, x5, (uint32_t *) &x6, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_INIT:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		status = intel_fcs_ecdsa_sha2_data_sig_verify_init(x1, x2, x3,
+					x4, x5, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_UPDATE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+		status = intel_fcs_ecdsa_sha2_data_sig_verify_update_finalize(
+					x1, x2, x3, x4, x5, (uint32_t *) &x6,
+					x7, false, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_SHA2_DATA_SIG_VERIFY_FINALIZE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+		status = intel_fcs_ecdsa_sha2_data_sig_verify_update_finalize(
+					x1, x2, x3, x4, x5, (uint32_t *) &x6,
+					x7, true, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_GET_PUBKEY_INIT:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		status = intel_fcs_ecdsa_get_pubkey_init(x1, x2, x3,
+					x4, x5, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_ECDSA_GET_PUBKEY_FINALIZE:
+		status = intel_fcs_ecdsa_get_pubkey_finalize(x1, x2, x3,
+					(uint32_t *) &x4, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x3, x4);
+
+	case INTEL_SIP_SMC_FCS_ECDH_REQUEST_INIT:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		status = intel_fcs_ecdh_request_init(x1, x2, x3,
+					x4, x5, &mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_ECDH_REQUEST_FINALIZE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		status = intel_fcs_ecdh_request_finalize(x1, x2, x3,
+					 x4, x5, (uint32_t *) &x6, &mbox_error);
+		SMC_RET4(handle, status, mbox_error, x5, x6);
+
+	case INTEL_SIP_SMC_FCS_AES_CRYPT_INIT:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		status = intel_fcs_aes_crypt_init(x1, x2, x3, x4, x5,
+					&mbox_error);
+		SMC_RET2(handle, status, mbox_error);
+
+	case INTEL_SIP_SMC_FCS_AES_CRYPT_UPDATE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		status = intel_fcs_aes_crypt_update_finalize(x1, x2, x3, x4,
+					x5, x6, false, &send_id);
+		SMC_RET1(handle, status);
+
+	case INTEL_SIP_SMC_FCS_AES_CRYPT_FINALIZE:
+		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+		x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+		status = intel_fcs_aes_crypt_update_finalize(x1, x2, x3, x4,
+					x5, x6, true, &send_id);
+		SMC_RET1(handle, status);
+
 	case INTEL_SIP_SMC_GET_ROM_PATCH_SHA384:
 		status = intel_fcs_get_rom_patch_sha384(x1, &retval64,
 							&mbox_error);
@@ -707,24 +1085,33 @@
 					SIP_SVC_VERSION_MAJOR,
 					SIP_SVC_VERSION_MINOR);
 
-	case INTEL_SIP_SMC_HPS_SET_BRIDGES:
-		status = intel_hps_set_bridges(x1);
-		SMC_RET1(handle, status);
-
-	case INTEL_SIP_SMC_HWMON_READTEMP:
-		status = intel_hwmon_readtemp(x1, &retval);
-		SMC_RET2(handle, status, retval);
-
-	case INTEL_SIP_SMC_HWMON_READVOLT:
-		status = intel_hwmon_readvolt(x1, &retval);
-		SMC_RET2(handle, status, retval);
-
 	default:
 		return socfpga_sip_handler(smc_fid, x1, x2, x3, x4,
 			cookie, handle, flags);
 	}
 }
 
+uintptr_t sip_smc_handler(uint32_t smc_fid,
+			 u_register_t x1,
+			 u_register_t x2,
+			 u_register_t x3,
+			 u_register_t x4,
+			 void *cookie,
+			 void *handle,
+			 u_register_t flags)
+{
+	uint32_t cmd = smc_fid & INTEL_SIP_SMC_CMD_MASK;
+
+	if (cmd >= INTEL_SIP_SMC_CMD_V2_RANGE_BEGIN &&
+	    cmd <= INTEL_SIP_SMC_CMD_V2_RANGE_END) {
+		return sip_smc_handler_v2(smc_fid, x1, x2, x3, x4,
+			cookie, handle, flags);
+	} else {
+		return sip_smc_handler_v1(smc_fid, x1, x2, x3, x4,
+			cookie, handle, flags);
+	}
+}
+
 DECLARE_RT_SVC(
 	socfpga_sip_svc,
 	OEN_SIP_START,
diff --git a/plat/intel/soc/common/socfpga_sip_svc_v2.c b/plat/intel/soc/common/socfpga_sip_svc_v2.c
new file mode 100644
index 0000000..791c714
--- /dev/null
+++ b/plat/intel/soc/common/socfpga_sip_svc_v2.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2022, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/mmio.h>
+
+#include "socfpga_mailbox.h"
+#include "socfpga_sip_svc.h"
+
+static uint32_t intel_v2_mbox_send_cmd(uint32_t req_header,
+				uint32_t *data, uint32_t data_size)
+{
+	uint32_t value;
+	uint32_t len;
+
+	if ((data == NULL) || (data_size == 0)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (data_size > (MBOX_INC_HEADER_MAX_WORD_SIZE * MBOX_WORD_BYTE)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_size_4_bytes_aligned(data_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	/* Make sure client id align in SMC SiP V2 header and mailbox header */
+	value = (req_header >> INTEL_SIP_SMC_HEADER_CID_OFFSET) &
+				INTEL_SIP_SMC_HEADER_CID_MASK;
+
+	if (value != MBOX_RESP_CLIENT_ID(data[0])) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	/* Make sure job id align in SMC SiP V2 header and mailbox header */
+	value = (req_header >> INTEL_SIP_SMC_HEADER_JOB_ID_OFFSET) &
+				INTEL_SIP_SMC_HEADER_JOB_ID_MASK;
+
+	if (value != MBOX_RESP_JOB_ID(data[0])) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	/*
+	 * Make sure data length align in SMC SiP V2 header and
+	 * mailbox header
+	 */
+	len = (data_size / MBOX_WORD_BYTE) - 1;
+
+	if (len != MBOX_RESP_LEN(data[0])) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	return mailbox_send_cmd_async_ext(data[0], &data[1], len);
+}
+
+static uint32_t intel_v2_mbox_poll_resp(uint64_t req_header,
+				uint32_t *data, uint32_t *data_size,
+				uint64_t *resp_header)
+{
+	int status = 0;
+	uint32_t resp_len;
+	uint32_t job_id = 0;
+	uint32_t client_id = 0;
+	uint32_t version;
+
+	if ((data == NULL) || (data_size == NULL) || (resp_header == NULL)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	if (!is_size_4_bytes_aligned(*data_size)) {
+		return INTEL_SIP_SMC_STATUS_REJECTED;
+	}
+
+	resp_len = (*data_size / MBOX_WORD_BYTE) - 1;
+	status = mailbox_read_response_async(&job_id, &data[0], &data[1],
+				&resp_len, 1);
+
+	if (status == MBOX_BUSY) {
+		status = INTEL_SIP_SMC_STATUS_BUSY;
+	} else if (status == MBOX_NO_RESPONSE) {
+		status = INTEL_SIP_SMC_STATUS_NO_RESPONSE;
+	} else {
+		*data_size = 0;
+
+		if (resp_len > 0) {
+			/*
+			 * Fill in the final response length,
+			 * the length include both mailbox header and payload
+			 */
+			*data_size = (resp_len + 1) * MBOX_WORD_BYTE;
+
+			/* Extract the client id from mailbox header */
+			client_id = MBOX_RESP_CLIENT_ID(data[0]);
+		}
+
+		/*
+		 * Extract SMC SiP V2 protocol version from
+		 * SMC request header
+		 */
+		version = (req_header >> INTEL_SIP_SMC_HEADER_VERSION_OFFSET) &
+				INTEL_SIP_SMC_HEADER_VERSION_MASK;
+
+		/* Fill in SMC SiP V2 protocol response header */
+		*resp_header = 0;
+		*resp_header |= (((uint64_t)job_id) &
+				INTEL_SIP_SMC_HEADER_JOB_ID_MASK) <<
+				INTEL_SIP_SMC_HEADER_JOB_ID_OFFSET;
+		*resp_header |= (((uint64_t)client_id) &
+				INTEL_SIP_SMC_HEADER_CID_MASK) <<
+				INTEL_SIP_SMC_HEADER_CID_OFFSET;
+		*resp_header |= (((uint64_t)version) &
+				INTEL_SIP_SMC_HEADER_VERSION_MASK) <<
+				INTEL_SIP_SMC_HEADER_VERSION_OFFSET;
+	}
+
+	return status;
+}
+
+uintptr_t sip_smc_handler_v2(uint32_t smc_fid,
+				u_register_t x1,
+				u_register_t x2,
+				u_register_t x3,
+				u_register_t x4,
+				void *cookie,
+				void *handle,
+				u_register_t flags)
+{
+	uint32_t retval = 0;
+	uint64_t retval64 = 0;
+	int status = INTEL_SIP_SMC_STATUS_OK;
+
+	switch (smc_fid) {
+	case INTEL_SIP_SMC_V2_GET_SVC_VERSION:
+		SMC_RET4(handle, INTEL_SIP_SMC_STATUS_OK, x1,
+				SIP_SVC_VERSION_MAJOR,
+				SIP_SVC_VERSION_MINOR);
+
+	case INTEL_SIP_SMC_V2_REG_READ:
+		status = intel_secure_reg_read(x2, &retval);
+		SMC_RET4(handle, status, x1, retval, x2);
+
+	case INTEL_SIP_SMC_V2_REG_WRITE:
+		status = intel_secure_reg_write(x2, (uint32_t)x3, &retval);
+		SMC_RET4(handle, status, x1, retval, x2);
+
+	case INTEL_SIP_SMC_V2_REG_UPDATE:
+		status = intel_secure_reg_update(x2, (uint32_t)x3,
+				(uint32_t)x4, &retval);
+		SMC_RET4(handle, status, x1, retval, x2);
+
+	case INTEL_SIP_SMC_V2_HPS_SET_BRIDGES:
+		status = intel_hps_set_bridges(x2, x3);
+		SMC_RET2(handle, status, x1);
+
+	case INTEL_SIP_SMC_V2_MAILBOX_SEND_COMMAND:
+		status = intel_v2_mbox_send_cmd(x1, (uint32_t *)x2, x3);
+		SMC_RET2(handle, status, x1);
+
+	case INTEL_SIP_SMC_V2_MAILBOX_POLL_RESPONSE:
+		status = intel_v2_mbox_poll_resp(x1, (uint32_t *)x2,
+				(uint32_t *) &x3, &retval64);
+		SMC_RET4(handle, status, retval64, x2, x3);
+
+	default:
+		ERROR("%s: unhandled SMC V2 (0x%x)\n", __func__, smc_fid);
+		SMC_RET1(handle, SMC_UNK);
+	}
+}
diff --git a/plat/intel/soc/n5x/include/socfpga_plat_def.h b/plat/intel/soc/n5x/include/socfpga_plat_def.h
index 3ce03dc..4c36f91 100644
--- a/plat/intel/soc/n5x/include/socfpga_plat_def.h
+++ b/plat/intel/soc/n5x/include/socfpga_plat_def.h
@@ -19,6 +19,9 @@
 #define INTEL_SIP_SMC_FPGA_CONFIG_SIZE		0x2000000
 
 /* Register Mapping */
+#define SOCFPGA_CCU_NOC_REG_BASE		U(0xf7000000)
+#define SOCFPGA_F2SDRAMMGR_REG_BASE		U(0xf8024000)
+
 #define SOCFPGA_MMC_REG_BASE			U(0xff808000)
 
 #define SOCFPGA_RSTMGR_REG_BASE			U(0xffd11000)
diff --git a/plat/intel/soc/n5x/platform.mk b/plat/intel/soc/n5x/platform.mk
index b72bcc4..953bf0c 100644
--- a/plat/intel/soc/n5x/platform.mk
+++ b/plat/intel/soc/n5x/platform.mk
@@ -38,6 +38,7 @@
 		plat/intel/soc/n5x/bl31_plat_setup.c			\
 		plat/intel/soc/common/socfpga_psci.c			\
 		plat/intel/soc/common/socfpga_sip_svc.c			\
+		plat/intel/soc/common/socfpga_sip_svc_v2.c		\
 		plat/intel/soc/common/socfpga_topology.c		\
 		plat/intel/soc/common/sip/socfpga_sip_ecc.c             \
 		plat/intel/soc/common/sip/socfpga_sip_fcs.c		\
diff --git a/plat/intel/soc/stratix10/bl2_plat_setup.c b/plat/intel/soc/stratix10/bl2_plat_setup.c
index cca564a..73e3216 100644
--- a/plat/intel/soc/stratix10/bl2_plat_setup.c
+++ b/plat/intel/soc/stratix10/bl2_plat_setup.c
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2019-2021, ARM Limited and Contributors. All rights reserved.
- * Copyright (c) 2019-2021, Intel Corporation. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2022, Intel Corporation. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -18,6 +18,7 @@
 
 #include "qspi/cadence_qspi.h"
 #include "socfpga_emac.h"
+#include "socfpga_f2sdram_manager.h"
 #include "socfpga_handoff.h"
 #include "socfpga_mailbox.h"
 #include "socfpga_private.h"
@@ -79,8 +80,11 @@
 	mailbox_init();
 	s10_mmc_init();
 
-	if (!intel_mailbox_is_fpga_not_ready())
-		socfpga_bridges_enable();
+	if (!intel_mailbox_is_fpga_not_ready()) {
+		socfpga_bridges_enable(SOC2FPGA_MASK | LWHPS2FPGA_MASK |
+					FPGA2SOC_MASK | F2SDRAM0_MASK | F2SDRAM1_MASK |
+					F2SDRAM2_MASK);
+	}
 }
 
 
diff --git a/plat/intel/soc/stratix10/include/s10_noc.h b/plat/intel/soc/stratix10/include/s10_noc.h
deleted file mode 100644
index 3e1e527..0000000
--- a/plat/intel/soc/stratix10/include/s10_noc.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2019, Intel Corporation. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#define AXI_AP				(1<<0)
-#define FPGA2SOC			(1<<16)
-#define MPU				(1<<24)
-#define S10_NOC_PER_SCR_NAND		0xffd21000
-#define S10_NOC_PER_SCR_NAND_DATA	0xffd21004
-#define S10_NOC_PER_SCR_USB0		0xffd2100c
-#define S10_NOC_PER_SCR_USB1		0xffd21010
-#define S10_NOC_PER_SCR_SPI_M0		0xffd2101c
-#define S10_NOC_PER_SCR_SPI_M1		0xffd21020
-#define S10_NOC_PER_SCR_SPI_S0		0xffd21024
-#define S10_NOC_PER_SCR_SPI_S1		0xffd21028
-#define S10_NOC_PER_SCR_EMAC0		0xffd2102c
-#define S10_NOC_PER_SCR_EMAC1		0xffd21030
-#define S10_NOC_PER_SCR_EMAC2		0xffd21034
-#define S10_NOC_PER_SCR_SDMMC		0xffd21040
-#define S10_NOC_PER_SCR_GPIO0		0xffd21044
-#define S10_NOC_PER_SCR_GPIO1		0xffd21048
-#define S10_NOC_PER_SCR_I2C0		0xffd21050
-#define S10_NOC_PER_SCR_I2C1		0xffd21058
-#define S10_NOC_PER_SCR_I2C2		0xffd2105c
-#define S10_NOC_PER_SCR_I2C3		0xffd21060
-#define S10_NOC_PER_SCR_SP_TIMER0	0xffd21064
-#define S10_NOC_PER_SCR_SP_TIMER1	0xffd21068
-#define S10_NOC_PER_SCR_UART0		0xffd2106c
-#define S10_NOC_PER_SCR_UART1		0xffd21070
-
-
-#define S10_NOC_SYS_SCR_DMA_ECC			0xffd21108
-#define S10_NOC_SYS_SCR_EMAC0RX_ECC		0xffd2110c
-#define S10_NOC_SYS_SCR_EMAC0TX_ECC		0xffd21110
-#define S10_NOC_SYS_SCR_EMAC1RX_ECC		0xffd21114
-#define S10_NOC_SYS_SCR_EMAC1TX_ECC		0xffd21118
-#define S10_NOC_SYS_SCR_EMAC2RX_ECC		0xffd2111c
-#define S10_NOC_SYS_SCR_EMAC2TX_ECC		0xffd21120
-#define S10_NOC_SYS_SCR_NAND_ECC		0xffd2112c
-#define S10_NOC_SYS_SCR_NAND_READ_ECC		0xffd21130
-#define S10_NOC_SYS_SCR_NAND_WRITE_ECC		0xffd21134
-#define S10_NOC_SYS_SCR_OCRAM_ECC		0xffd21138
-#define S10_NOC_SYS_SCR_SDMMC_ECC		0xffd21140
-#define S10_NOC_SYS_SCR_USB0_ECC		0xffd21144
-#define S10_NOC_SYS_SCR_USB1_ECC		0xffd21148
-#define S10_NOC_SYS_SCR_CLK_MGR			0xffd2114c
-#define S10_NOC_SYS_SCR_IO_MGR			0xffd21154
-#define S10_NOC_SYS_SCR_RST_MGR			0xffd21158
-#define S10_NOC_SYS_SCR_SYS_MGR			0xffd2115c
-#define S10_NOC_SYS_SCR_OSC0_TIMER		0xffd21160
-#define S10_NOC_SYS_SCR_OSC1_TIMER		0xffd21164
-#define S10_NOC_SYS_SCR_WATCHDOG0		0xffd21168
-#define S10_NOC_SYS_SCR_WATCHDOG1		0xffd2116c
-#define S10_NOC_SYS_SCR_WATCHDOG2		0xffd21170
-#define S10_NOC_SYS_SCR_WATCHDOG3		0xffd21174
-#define S10_NOC_SYS_SCR_DAP			0xffd21178
-#define S10_NOC_SYS_SCR_L4_NOC_PROBES		0xffd21190
-#define S10_NOC_SYS_SCR_L4_NOC_QOS		0xffd21194
-
-#define S10_CCU_NOC_BRIDGE_CPU0_RAM		0xf7004688
-#define S10_CCU_NOC_BRIDGE_IOM_RAM		0xf7004688
diff --git a/plat/intel/soc/stratix10/include/socfpga_plat_def.h b/plat/intel/soc/stratix10/include/socfpga_plat_def.h
index ae4b674..516cc75 100644
--- a/plat/intel/soc/stratix10/include/socfpga_plat_def.h
+++ b/plat/intel/soc/stratix10/include/socfpga_plat_def.h
@@ -19,6 +19,7 @@
 
 /* Register Mapping */
 #define SOCFPGA_CCU_NOC_REG_BASE		0xf7000000
+#define SOCFPGA_F2SDRAMMGR_REG_BASE		U(0xf8024000)
 
 #define SOCFPGA_MMC_REG_BASE                    0xff808000
 
diff --git a/plat/intel/soc/stratix10/platform.mk b/plat/intel/soc/stratix10/platform.mk
index 273b975..5c0b421 100644
--- a/plat/intel/soc/stratix10/platform.mk
+++ b/plat/intel/soc/stratix10/platform.mk
@@ -55,6 +55,10 @@
 		plat/intel/soc/common/drivers/qspi/cadence_qspi.c	\
 		plat/intel/soc/common/drivers/wdt/watchdog.c
 
+include lib/zlib/zlib.mk
+PLAT_INCLUDES	+=	-Ilib/zlib
+BL2_SOURCES	+=	$(ZLIB_SOURCES)
+
 BL31_SOURCES	+=	\
 		drivers/arm/cci/cci.c					\
 		lib/cpus/aarch64/aem_generic.S				\
@@ -64,6 +68,7 @@
 		plat/intel/soc/stratix10/bl31_plat_setup.c	 	\
 		plat/intel/soc/common/socfpga_psci.c			\
 		plat/intel/soc/common/socfpga_sip_svc.c			\
+		plat/intel/soc/common/socfpga_sip_svc_v2.c		\
 		plat/intel/soc/common/socfpga_topology.c		\
 		plat/intel/soc/common/sip/socfpga_sip_ecc.c		\
 		plat/intel/soc/common/sip/socfpga_sip_fcs.c		\
diff --git a/plat/st/common/bl2_io_storage.c b/plat/st/common/bl2_io_storage.c
index 7cd5eb5..b2038bc 100644
--- a/plat/st/common/bl2_io_storage.c
+++ b/plat/st/common/bl2_io_storage.c
@@ -38,6 +38,7 @@
 #include <platform_def.h>
 #include <stm32cubeprogrammer.h>
 #include <stm32mp_fconf_getter.h>
+#include <stm32mp_io_storage.h>
 #include <usb_dfu.h>
 
 /* IO devices */
diff --git a/plat/st/stm32mp1/plat_image_load.c b/plat/st/stm32mp1/plat_image_load.c
index 76af0fc..f68eb38 100644
--- a/plat/st/stm32mp1/plat_image_load.c
+++ b/plat/st/stm32mp1/plat_image_load.c
@@ -4,6 +4,8 @@
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
+#include <assert.h>
+
 #include <common/desc_image_load.h>
 #include <plat/common/platform.h>
 
diff --git a/plat/st/stm32mp1/platform.mk b/plat/st/stm32mp1/platform.mk
index 9e67989..127e318 100644
--- a/plat/st/stm32mp1/platform.mk
+++ b/plat/st/stm32mp1/platform.mk
@@ -320,12 +320,14 @@
 				plat/st/stm32mp1/stm32mp1_security.c
 endif
 
-ifeq (${PSA_FWU_SUPPORT},1)
 include lib/zlib/zlib.mk
+
+ifeq (${PSA_FWU_SUPPORT},1)
 include drivers/fwu/fwu.mk
+endif
+
 
 BL2_SOURCES		+=	$(ZLIB_SOURCES)
-endif
 
 BL2_SOURCES		+=	drivers/io/io_block.c					\
 				drivers/io/io_mtd.c					\
diff --git a/plat/xilinx/common/include/ipi.h b/plat/xilinx/common/include/ipi.h
index 483902e..ac76bf0 100644
--- a/plat/xilinx/common/include/ipi.h
+++ b/plat/xilinx/common/include/ipi.h
@@ -47,7 +47,7 @@
  ********************************************************************/
 
 /* Initialize IPI configuration table */
-void ipi_config_table_init(const struct ipi_config *ipi_table,
+void ipi_config_table_init(const struct ipi_config *ipi_config_table,
 			   uint32_t total_ipi);
 
 /* Validate IPI mailbox access */
diff --git a/plat/xilinx/common/include/plat_startup.h b/plat/xilinx/common/include/plat_startup.h
index 66e7933..6799e21 100644
--- a/plat/xilinx/common/include/plat_startup.h
+++ b/plat/xilinx/common/include/plat_startup.h
@@ -15,8 +15,8 @@
 	FSBL_HANDOFF_TOO_MANY_PARTS
 };
 
-enum fsbl_handoff fsbl_atf_handover(entry_point_info_t *bl32_image_ep_info,
-					entry_point_info_t *bl33_image_ep_info,
+enum fsbl_handoff fsbl_atf_handover(entry_point_info_t *bl32,
+					entry_point_info_t *bl33,
 					uint64_t atf_handoff_addr);
 
 #endif /* PLAT_STARTUP_H */
diff --git a/plat/xilinx/versal/pm_service/pm_api_sys.c b/plat/xilinx/versal/pm_service/pm_api_sys.c
index c7b6047..04258cc 100644
--- a/plat/xilinx/versal/pm_service/pm_api_sys.c
+++ b/plat/xilinx/versal/pm_service/pm_api_sys.c
@@ -496,7 +496,8 @@
 		break;
 	case IOCTL_SET_SGI:
 		/* Get the sgi number */
-		if (pm_register_sgi(arg1) != 0) {
+		ret = pm_register_sgi(arg1, arg2);
+		if (ret != 0) {
 			return PM_RET_ERROR_ARGS;
 		}
 		gicd_write_irouter(gicv3_driver_data->gicd_base,
diff --git a/plat/xilinx/versal/pm_service/pm_defs.h b/plat/xilinx/versal/pm_service/pm_defs.h
index 3785650..9206120 100644
--- a/plat/xilinx/versal/pm_service/pm_defs.h
+++ b/plat/xilinx/versal/pm_service/pm_defs.h
@@ -35,6 +35,7 @@
 
 #define PM_GET_CALLBACK_DATA		0xa01U
 #define PM_GET_TRUSTZONE_VERSION	0xa03U
+#define TF_A_PM_REGISTER_SGI		0xa04U
 
 /* PM API Versions */
 #define PM_API_BASE_VERSION		1U
diff --git a/plat/xilinx/versal/pm_service/pm_svc_main.c b/plat/xilinx/versal/pm_service/pm_svc_main.c
index 75c1268..24b68e7 100644
--- a/plat/xilinx/versal/pm_service/pm_svc_main.c
+++ b/plat/xilinx/versal/pm_service/pm_svc_main.c
@@ -51,6 +51,7 @@
  * pm_register_sgi() - PM register the IPI interrupt
  *
  * @sgi -  SGI number to be used for communication.
+ * @reset -  Reset to invalid SGI when reset=1.
  * @return	On success, the initialization function must return 0.
  *		Any other return value will cause the framework to ignore
  *		the service
@@ -58,9 +59,14 @@
  * Update the SGI number to be used.
  *
  */
-int pm_register_sgi(unsigned int sgi_num)
+int pm_register_sgi(unsigned int sgi_num, unsigned int reset)
 {
-	if ((unsigned int)sgi != (unsigned int)INVALID_SGI) {
+	if (reset == 1U) {
+		sgi = INVALID_SGI;
+		return 0;
+	}
+
+	if (sgi != INVALID_SGI) {
 		return -EBUSY;
 	}
 
@@ -231,6 +237,18 @@
 {
 	switch (api_id) {
 
+	case TF_A_PM_REGISTER_SGI:
+	{
+		int ret;
+
+		ret = pm_register_sgi(pm_arg[0], pm_arg[1]);
+		if (ret != 0) {
+			SMC_RET1(handle, (uint32_t)PM_RET_ERROR_ARGS);
+		}
+
+		SMC_RET1(handle, (uint32_t)PM_RET_SUCCESS);
+	}
+
 	case PM_GET_CALLBACK_DATA:
 	{
 		uint32_t result[4] = {0};
diff --git a/plat/xilinx/versal/pm_service/pm_svc_main.h b/plat/xilinx/versal/pm_service/pm_svc_main.h
index 4f8dc2b..2dff5b2 100644
--- a/plat/xilinx/versal/pm_service/pm_svc_main.h
+++ b/plat/xilinx/versal/pm_service/pm_svc_main.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019, Xilinx, Inc. All rights reserved.
+ * Copyright (c) 2019-2022, Xilinx, Inc. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -14,5 +14,5 @@
 			uint64_t x4, void *cookie, void *handle,
 			uint64_t flags);
 
-int pm_register_sgi(unsigned int sgi_num);
+int pm_register_sgi(unsigned int sgi_num, unsigned int reset);
 #endif /* PM_SVC_MAIN_H */
diff --git a/plat/xilinx/zynqmp/bl31_zynqmp_setup.c b/plat/xilinx/zynqmp/bl31_zynqmp_setup.c
index 5ad33cc..6ded2e2 100644
--- a/plat/xilinx/zynqmp/bl31_zynqmp_setup.c
+++ b/plat/xilinx/zynqmp/bl31_zynqmp_setup.c
@@ -33,15 +33,18 @@
  * while BL32 corresponds to the secure image type. A NULL pointer is returned
  * if the image does not exist.
  */
-entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type)
 {
-	assert(sec_state_is_valid(type));
+	entry_point_info_t *next_image_info;
 
+	assert(sec_state_is_valid(type));
 	if (type == NON_SECURE) {
-		return &bl33_image_ep_info;
+		next_image_info = &bl33_image_ep_info;
+	} else {
+		next_image_info = &bl32_image_ep_info;
 	}
 
-	return &bl32_image_ep_info;
+	return next_image_info;
 }
 
 /*
diff --git a/plat/xilinx/zynqmp/plat_psci.c b/plat/xilinx/zynqmp/plat_psci.c
index 881dfe6..f337cf5 100644
--- a/plat/xilinx/zynqmp/plat_psci.c
+++ b/plat/xilinx/zynqmp/plat_psci.c
@@ -19,9 +19,9 @@
 #include "pm_api_sys.h"
 #include "pm_client.h"
 
-uintptr_t zynqmp_sec_entry;
+static uintptr_t zynqmp_sec_entry;
 
-void zynqmp_cpu_standby(plat_local_state_t cpu_state)
+static void zynqmp_cpu_standby(plat_local_state_t cpu_state)
 {
 	VERBOSE("%s: cpu_state: 0x%x\n", __func__, cpu_state);
 
@@ -171,7 +171,7 @@
 	}
 }
 
-int zynqmp_validate_power_state(unsigned int power_state,
+static int zynqmp_validate_power_state(unsigned int power_state,
 				psci_power_state_t *req_state)
 {
 	VERBOSE("%s: power_state: 0x%x\n", __func__, power_state);
@@ -194,7 +194,7 @@
 	return PSCI_E_SUCCESS;
 }
 
-void zynqmp_get_sys_suspend_power_state(psci_power_state_t *req_state)
+static void zynqmp_get_sys_suspend_power_state(psci_power_state_t *req_state)
 {
 	req_state->pwr_domain_state[PSCI_CPU_PWR_LVL] = PLAT_MAX_OFF_STATE;
 	req_state->pwr_domain_state[1] = PLAT_MAX_OFF_STATE;
diff --git a/plat/xilinx/zynqmp/platform.mk b/plat/xilinx/zynqmp/platform.mk
index 620bf6c..ea8a5d1 100644
--- a/plat/xilinx/zynqmp/platform.mk
+++ b/plat/xilinx/zynqmp/platform.mk
@@ -21,6 +21,10 @@
 
 WORKAROUND_CVE_2017_5715	:=	0
 
+ARM_XLAT_TABLES_LIB_V1         :=      1
+$(eval $(call assert_boolean,ARM_XLAT_TABLES_LIB_V1))
+$(eval $(call add_define,ARM_XLAT_TABLES_LIB_V1))
+
 ifdef ZYNQMP_ATF_MEM_BASE
     $(eval $(call add_define,ZYNQMP_ATF_MEM_BASE))
 
diff --git a/plat/xilinx/zynqmp/pm_service/pm_api_sys.h b/plat/xilinx/zynqmp/pm_service/pm_api_sys.h
index 48b3877..84b239c 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_api_sys.h
+++ b/plat/xilinx/zynqmp/pm_service/pm_api_sys.h
@@ -68,7 +68,7 @@
 /**********************************************************
  * System-level API function declarations
  **********************************************************/
-enum pm_ret_status pm_req_suspend(enum pm_node_id nid,
+enum pm_ret_status pm_req_suspend(enum pm_node_id target,
 				  enum pm_request_ack ack,
 				  unsigned int latency,
 				  unsigned int state);
@@ -78,12 +78,12 @@
 				   unsigned int state,
 				   uintptr_t address);
 
-enum pm_ret_status pm_force_powerdown(enum pm_node_id nid,
+enum pm_ret_status pm_force_powerdown(enum pm_node_id target,
 				      enum pm_request_ack ack);
 
 enum pm_ret_status pm_abort_suspend(enum pm_abort_reason reason);
 
-enum pm_ret_status pm_req_wakeup(enum pm_node_id nid,
+enum pm_ret_status pm_req_wakeup(enum pm_node_id target,
 				 unsigned int set_address,
 				 uintptr_t address,
 				 enum pm_request_ack ack);
@@ -112,7 +112,7 @@
 
 /* Miscellaneous API functions */
 enum pm_ret_status pm_get_api_version(unsigned int *version);
-enum pm_ret_status pm_get_node_status(enum pm_node_id node,
+enum pm_ret_status pm_get_node_status(enum pm_node_id nid,
 				      uint32_t *ret_buff);
 enum pm_ret_status pm_acknowledge_cb(enum pm_node_id nid,
 				     enum pm_ret_status status,
@@ -133,8 +133,8 @@
 enum pm_ret_status pm_fpga_get_status(unsigned int *value);
 
 enum pm_ret_status pm_get_chipid(uint32_t *value);
-enum pm_ret_status pm_secure_rsaaes(uint32_t address_high,
-				    uint32_t address_low,
+enum pm_ret_status pm_secure_rsaaes(uint32_t address_low,
+				    uint32_t address_high,
 				    uint32_t size,
 				    uint32_t flags);
 unsigned int pm_get_shutdown_scope(void);
@@ -157,9 +157,9 @@
 enum pm_ret_status pm_clock_getrate(unsigned int clock_id,
 				    uint64_t *rate);
 enum pm_ret_status pm_clock_setparent(unsigned int clock_id,
-				      unsigned int parent_id);
+				      unsigned int parent_index);
 enum pm_ret_status pm_clock_getparent(unsigned int clock_id,
-				      unsigned int *parent_id);
+				      unsigned int *parent_index);
 void pm_query_data(enum pm_query_id qid, unsigned int arg1, unsigned int arg2,
 		   unsigned int arg3, unsigned int *data);
 enum pm_ret_status pm_sha_hash(uint32_t address_high,
diff --git a/plat/xilinx/zynqmp/sip_svc_setup.c b/plat/xilinx/zynqmp/sip_svc_setup.c
index 4a6095c..4ce9b8a 100644
--- a/plat/xilinx/zynqmp/sip_svc_setup.c
+++ b/plat/xilinx/zynqmp/sip_svc_setup.c
@@ -53,7 +53,7 @@
  * Handler for all SiP SMC calls. Handles standard SIP requests
  * and calls PM SMC handler if the call is for a PM-API function.
  */
-uintptr_t sip_svc_smc_handler(uint32_t smc_fid,
+static uintptr_t sip_svc_smc_handler(uint32_t smc_fid,
 			      u_register_t x1,
 			      u_register_t x2,
 			      u_register_t x3,
diff --git a/services/std_svc/spm/el3_spmc/spmc.h b/services/std_svc/spm/el3_spmc/spmc.h
index 0915d0b..d62be91 100644
--- a/services/std_svc/spm/el3_spmc/spmc.h
+++ b/services/std_svc/spm/el3_spmc/spmc.h
@@ -33,9 +33,29 @@
 /* Align with Hafnium implementation */
 #define INV_SP_ID		0x7FFF
 
-/* FF-A warm boot types. */
-#define FFA_WB_TYPE_S2RAM	0
-#define FFA_WB_TYPE_NOTS2RAM	1
+/* FF-A Related helper macros. */
+#define FFA_ID_MASK			U(0xFFFF)
+#define FFA_PARTITION_ID_SHIFT		U(16)
+#define FFA_FEATURES_BIT31_MASK		U(0x1u << 31)
+#define FFA_FEATURES_RET_REQ_NS_BIT	U(0x1 << 1)
+
+#define FFA_RUN_EP_ID(ep_vcpu_ids) \
+		((ep_vcpu_ids >> FFA_PARTITION_ID_SHIFT) & FFA_ID_MASK)
+#define FFA_RUN_VCPU_ID(ep_vcpu_ids) \
+		(ep_vcpu_ids & FFA_ID_MASK)
+
+#define FFA_PAGE_SIZE (4096)
+#define FFA_RXTX_PAGE_COUNT_MASK 0x1F
+
+/* Ensure that the page size used by TF-A is 4k aligned. */
+CASSERT((PAGE_SIZE % FFA_PAGE_SIZE) == 0, assert_aligned_page_size);
+
+/*
+ * Defines to allow an SP to subscribe for power management messages
+ */
+#define FFA_PM_MSG_SUB_CPU_OFF			U(1 << 0)
+#define FFA_PM_MSG_SUB_CPU_SUSPEND		U(1 << 1)
+#define FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME	U(1 << 2)
 
 /*
  * Runtime states of an execution context as per the FF-A v1.1 specification.
@@ -146,6 +166,17 @@
 
 	/* Secondary entrypoint. Only valid for a S-EL1 SP. */
 	uintptr_t secondary_ep;
+
+	/*
+	 * Store whether the SP has subscribed to any power management messages.
+	 */
+	uint16_t pwr_mgmt_msgs;
+
+	/*
+	 * Store whether the SP has requested the use of the NS bit for memory
+	 * management transactions if it is using FF-A v1.0.
+	 */
+	bool ns_bit_requested;
 };
 
 /*
@@ -178,9 +209,31 @@
 	uint32_t ffa_version;
 };
 
+/**
+ * Holds information returned for each partition by the FFA_PARTITION_INFO_GET
+ * interface.
+ */
+struct ffa_partition_info_v1_0 {
+	uint16_t ep_id;
+	uint16_t execution_ctx_count;
+	uint32_t properties;
+};
+
+/* Extended structure for v1.1. */
+struct ffa_partition_info_v1_1 {
+	uint16_t ep_id;
+	uint16_t execution_ctx_count;
+	uint32_t properties;
+	uint32_t uuid[4];
+};
+
+/* Reference to power management hooks */
+extern const spd_pm_ops_t spmc_pm;
+
 /* Setup Function for different SP types. */
 void spmc_sp_common_setup(struct secure_partition_desc *sp,
-			  entry_point_info_t *ep_info);
+			  entry_point_info_t *ep_info,
+			  int32_t boot_info_reg);
 void spmc_el1_sp_setup(struct secure_partition_desc *sp,
 		       entry_point_info_t *ep_info);
 void spmc_sp_common_ep_commit(struct secure_partition_desc *sp,
@@ -221,4 +274,22 @@
  */
 struct el3_lp_desc *get_el3_lp_array(void);
 
+/*
+ * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
+ * or OS kernel in the normal world or the last SP that was run.
+ */
+struct mailbox *spmc_get_mbox_desc(bool secure_origin);
+
+/*
+ * Helper function to obtain the context of an SP with a given partition ID.
+ */
+struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id);
+
+/*
+ * Add helper function to obtain the FF-A version of the calling
+ * partition.
+ */
+uint32_t get_partition_ffa_version(bool secure_origin);
+
+
 #endif /* SPMC_H */
diff --git a/services/std_svc/spm/el3_spmc/spmc.mk b/services/std_svc/spm/el3_spmc/spmc.mk
index 8067c74..aa591d9 100644
--- a/services/std_svc/spm/el3_spmc/spmc.mk
+++ b/services/std_svc/spm/el3_spmc/spmc.mk
@@ -11,7 +11,9 @@
 SPMC_SOURCES	:=	$(addprefix services/std_svc/spm/el3_spmc/,	\
 			spmc_main.c				\
 			spmc_setup.c				\
-			logical_sp.c)
+			logical_sp.c				\
+			spmc_pm.c				\
+			spmc_shared_mem.c)
 
 # Specify platform specific logical partition implementation.
 SPMC_LP_SOURCES  := $(addprefix ${PLAT_DIR}/, \
diff --git a/services/std_svc/spm/el3_spmc/spmc_main.c b/services/std_svc/spm/el3_spmc/spmc_main.c
index 35def25..9b8621a 100644
--- a/services/std_svc/spm/el3_spmc/spmc_main.c
+++ b/services/std_svc/spm/el3_spmc/spmc_main.c
@@ -10,9 +10,11 @@
 #include <arch_helpers.h>
 #include <bl31/bl31.h>
 #include <bl31/ehf.h>
+#include <bl31/interrupt_mgmt.h>
 #include <common/debug.h>
 #include <common/fdt_wrappers.h>
 #include <common/runtime_svc.h>
+#include <common/uuid.h>
 #include <lib/el3_runtime/context_mgmt.h>
 #include <lib/smccc.h>
 #include <lib/utils.h>
@@ -24,9 +26,13 @@
 #include <services/spmc_svc.h>
 #include <services/spmd_svc.h>
 #include "spmc.h"
+#include "spmc_shared_mem.h"
 
 #include <platform_def.h>
 
+/* Declare the maximum number of SPs and El3 LPs. */
+#define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT
+
 /*
  * Allocate a secure partition descriptor to describe each SP in the system that
  * does not reside at EL3.
@@ -41,6 +47,11 @@
  */
 static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT];
 
+static uint64_t spmc_sp_interrupt_handler(uint32_t id,
+					  uint32_t flags,
+					  void *handle,
+					  void *cookie);
+
 /*
  * Helper function to obtain the array storing the EL3
  * Logical Partition descriptors.
@@ -72,7 +83,7 @@
 /* Helper function to get pointer to SP context from its ID. */
 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
 {
-	/* Check for SWd Partitions. */
+	/* Check for Secure World Partitions. */
 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
 		if (sp_desc[i].sp_id == id) {
 			return &(sp_desc[i]);
@@ -81,6 +92,29 @@
 	return NULL;
 }
 
+/*
+ * Helper function to obtain the descriptor of the Hypervisor or OS kernel.
+ * We assume that the first descriptor is reserved for this entity.
+ */
+struct ns_endpoint_desc *spmc_get_hyp_ctx(void)
+{
+	return &(ns_ep_desc[0]);
+}
+
+/*
+ * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
+ * or OS kernel in the normal world or the last SP that was run.
+ */
+struct mailbox *spmc_get_mbox_desc(bool secure_origin)
+{
+	/* Obtain the RX/TX buffer pair descriptor. */
+	if (secure_origin) {
+		return &(spmc_get_current_sp_ctx()->mailbox);
+	} else {
+		return &(spmc_get_hyp_ctx()->mailbox);
+	}
+}
+
 /******************************************************************************
  * This function returns to the place where spmc_sp_synchronous_entry() was
  * called originally.
@@ -207,13 +241,20 @@
  ******************************************************************************/
 static inline bool direct_msg_validate_arg2(uint64_t x2)
 {
-	/*
-	 * We currently only support partition messages, therefore ensure x2 is
-	 * not set.
-	 */
-	if (x2 != (uint64_t) 0) {
-		VERBOSE("Arg2 MBZ for partition messages (0x%lx).\n", x2);
-		return false;
+	/* Check message type. */
+	if (x2 & FFA_FWK_MSG_BIT) {
+		/* We have a framework message, ensure it is a known message. */
+		if (x2 & ~(FFA_FWK_MSG_MASK | FFA_FWK_MSG_BIT)) {
+			VERBOSE("Invalid message format 0x%lx.\n", x2);
+			return false;
+		}
+	} else {
+		/* We have a partition messages, ensure x2 is not set. */
+		if (x2 != (uint64_t) 0) {
+			VERBOSE("Arg2 MBZ for partition messages. (0x%lx).\n",
+				x2);
+			return false;
+		}
 	}
 	return true;
 }
@@ -491,6 +532,861 @@
 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
 }
 
+static uint64_t ffa_version_handler(uint32_t smc_fid,
+				    bool secure_origin,
+				    uint64_t x1,
+				    uint64_t x2,
+				    uint64_t x3,
+				    uint64_t x4,
+				    void *cookie,
+				    void *handle,
+				    uint64_t flags)
+{
+	uint32_t requested_version = x1 & FFA_VERSION_MASK;
+
+	if (requested_version & FFA_VERSION_BIT31_MASK) {
+		/* Invalid encoding, return an error. */
+		SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED);
+		/* Execution stops here. */
+	}
+
+	/* Determine the caller to store the requested version. */
+	if (secure_origin) {
+		/*
+		 * Ensure that the SP is reporting the same version as
+		 * specified in its manifest. If these do not match there is
+		 * something wrong with the SP.
+		 * TODO: Should we abort the SP? For now assert this is not
+		 *       case.
+		 */
+		assert(requested_version ==
+		       spmc_get_current_sp_ctx()->ffa_version);
+	} else {
+		/*
+		 * If this is called by the normal world, record this
+		 * information in its descriptor.
+		 */
+		spmc_get_hyp_ctx()->ffa_version = requested_version;
+	}
+
+	SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
+					  FFA_VERSION_MINOR));
+}
+
+/*******************************************************************************
+ * Helper function to obtain the FF-A version of the calling partition.
+ ******************************************************************************/
+uint32_t get_partition_ffa_version(bool secure_origin)
+{
+	if (secure_origin) {
+		return spmc_get_current_sp_ctx()->ffa_version;
+	} else {
+		return spmc_get_hyp_ctx()->ffa_version;
+	}
+}
+
+static uint64_t rxtx_map_handler(uint32_t smc_fid,
+				 bool secure_origin,
+				 uint64_t x1,
+				 uint64_t x2,
+				 uint64_t x3,
+				 uint64_t x4,
+				 void *cookie,
+				 void *handle,
+				 uint64_t flags)
+{
+	int ret;
+	uint32_t error_code;
+	uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS;
+	struct mailbox *mbox;
+	uintptr_t tx_address = x1;
+	uintptr_t rx_address = x2;
+	uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */
+	uint32_t buf_size = page_count * FFA_PAGE_SIZE;
+
+	/*
+	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
+	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
+	 * ABI on behalf of a VM and reject it if this is the case.
+	 */
+	if (tx_address == 0 || rx_address == 0) {
+		WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n");
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	/* Ensure the specified buffers are not the same. */
+	if (tx_address == rx_address) {
+		WARN("TX Buffer must not be the same as RX Buffer.\n");
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	/* Ensure the buffer size is not 0. */
+	if (buf_size == 0U) {
+		WARN("Buffer size must not be 0\n");
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	/*
+	 * Ensure the buffer size is a multiple of the translation granule size
+	 * in TF-A.
+	 */
+	if (buf_size % PAGE_SIZE != 0U) {
+		WARN("Buffer size must be aligned to translation granule.\n");
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	/* Obtain the RX/TX buffer pair descriptor. */
+	mbox = spmc_get_mbox_desc(secure_origin);
+
+	spin_lock(&mbox->lock);
+
+	/* Check if buffers have already been mapped. */
+	if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
+		WARN("RX/TX Buffers already mapped (%p/%p)\n",
+		     (void *) mbox->rx_buffer, (void *)mbox->tx_buffer);
+		error_code = FFA_ERROR_DENIED;
+		goto err;
+	}
+
+	/* memmap the TX buffer as read only. */
+	ret = mmap_add_dynamic_region(tx_address, /* PA */
+			tx_address, /* VA */
+			buf_size, /* size */
+			mem_atts | MT_RO_DATA); /* attrs */
+	if (ret != 0) {
+		/* Return the correct error code. */
+		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
+						FFA_ERROR_INVALID_PARAMETER;
+		WARN("Unable to map TX buffer: %d\n", error_code);
+		goto err;
+	}
+
+	/* memmap the RX buffer as read write. */
+	ret = mmap_add_dynamic_region(rx_address, /* PA */
+			rx_address, /* VA */
+			buf_size, /* size */
+			mem_atts | MT_RW_DATA); /* attrs */
+
+	if (ret != 0) {
+		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
+						FFA_ERROR_INVALID_PARAMETER;
+		WARN("Unable to map RX buffer: %d\n", error_code);
+		/* Unmap the TX buffer again. */
+		mmap_remove_dynamic_region(tx_address, buf_size);
+		goto err;
+	}
+
+	mbox->tx_buffer = (void *) tx_address;
+	mbox->rx_buffer = (void *) rx_address;
+	mbox->rxtx_page_count = page_count;
+	spin_unlock(&mbox->lock);
+
+	SMC_RET1(handle, FFA_SUCCESS_SMC32);
+	/* Execution stops here. */
+err:
+	spin_unlock(&mbox->lock);
+	return spmc_ffa_error_return(handle, error_code);
+}
+
+static uint64_t rxtx_unmap_handler(uint32_t smc_fid,
+				   bool secure_origin,
+				   uint64_t x1,
+				   uint64_t x2,
+				   uint64_t x3,
+				   uint64_t x4,
+				   void *cookie,
+				   void *handle,
+				   uint64_t flags)
+{
+	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+	uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
+
+	/*
+	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
+	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
+	 * ABI on behalf of a VM and reject it if this is the case.
+	 */
+	if (x1 != 0UL) {
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	spin_lock(&mbox->lock);
+
+	/* Check if buffers are currently mapped. */
+	if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) {
+		spin_unlock(&mbox->lock);
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	/* Unmap RX Buffer */
+	if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer,
+				       buf_size) != 0) {
+		WARN("Unable to unmap RX buffer!\n");
+	}
+
+	mbox->rx_buffer = 0;
+
+	/* Unmap TX Buffer */
+	if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer,
+				       buf_size) != 0) {
+		WARN("Unable to unmap TX buffer!\n");
+	}
+
+	mbox->tx_buffer = 0;
+	mbox->rxtx_page_count = 0;
+
+	spin_unlock(&mbox->lock);
+	SMC_RET1(handle, FFA_SUCCESS_SMC32);
+}
+
+/*
+ * Collate the partition information in a v1.1 partition information
+ * descriptor format, this will be converter later if required.
+ */
+static int partition_info_get_handler_v1_1(uint32_t *uuid,
+					   struct ffa_partition_info_v1_1
+						  *partitions,
+					   uint32_t max_partitions,
+					   uint32_t *partition_count)
+{
+	uint32_t index;
+	struct ffa_partition_info_v1_1 *desc;
+	bool null_uuid = is_null_uuid(uuid);
+	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
+
+	/* Deal with Logical Partitions. */
+	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
+		if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) {
+			/* Found a matching UUID, populate appropriately. */
+			if (*partition_count >= max_partitions) {
+				return FFA_ERROR_NO_MEMORY;
+			}
+
+			desc = &partitions[*partition_count];
+			desc->ep_id = el3_lp_descs[index].sp_id;
+			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
+			desc->properties = el3_lp_descs[index].properties;
+			if (null_uuid) {
+				copy_uuid(desc->uuid, el3_lp_descs[index].uuid);
+			}
+			(*partition_count)++;
+		}
+	}
+
+	/* Deal with physical SP's. */
+	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
+		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
+			/* Found a matching UUID, populate appropriately. */
+			if (*partition_count >= max_partitions) {
+				return FFA_ERROR_NO_MEMORY;
+			}
+
+			desc = &partitions[*partition_count];
+			desc->ep_id = sp_desc[index].sp_id;
+			/*
+			 * Execution context count must match No. cores for
+			 * S-EL1 SPs.
+			 */
+			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
+			desc->properties = sp_desc[index].properties;
+			if (null_uuid) {
+				copy_uuid(desc->uuid, sp_desc[index].uuid);
+			}
+			(*partition_count)++;
+		}
+	}
+	return 0;
+}
+
+/*
+ * Handle the case where that caller only wants the count of partitions
+ * matching a given UUID and does not want the corresponding descriptors
+ * populated.
+ */
+static uint32_t partition_info_get_handler_count_only(uint32_t *uuid)
+{
+	uint32_t index = 0;
+	uint32_t partition_count = 0;
+	bool null_uuid = is_null_uuid(uuid);
+	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
+
+	/* Deal with Logical Partitions. */
+	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
+		if (null_uuid ||
+		    uuid_match(uuid, el3_lp_descs[index].uuid)) {
+			(partition_count)++;
+		}
+	}
+
+	/* Deal with physical SP's. */
+	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
+		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
+			(partition_count)++;
+		}
+	}
+	return partition_count;
+}
+
+/*
+ * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate
+ * the coresponding descriptor format from the v1.1 descriptor array.
+ */
+static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1
+					     *partitions,
+					     struct mailbox *mbox,
+					     int partition_count)
+{
+	uint32_t index;
+	uint32_t buf_size;
+	uint32_t descriptor_size;
+	struct ffa_partition_info_v1_0 *v1_0_partitions =
+		(struct ffa_partition_info_v1_0 *) mbox->rx_buffer;
+
+	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
+	descriptor_size = partition_count *
+			  sizeof(struct ffa_partition_info_v1_0);
+
+	if (descriptor_size > buf_size) {
+		return FFA_ERROR_NO_MEMORY;
+	}
+
+	for (index = 0U; index < partition_count; index++) {
+		v1_0_partitions[index].ep_id = partitions[index].ep_id;
+		v1_0_partitions[index].execution_ctx_count =
+			partitions[index].execution_ctx_count;
+		v1_0_partitions[index].properties =
+			partitions[index].properties;
+	}
+	return 0;
+}
+
+/*
+ * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and
+ * v1.0 implementations.
+ */
+static uint64_t partition_info_get_handler(uint32_t smc_fid,
+					   bool secure_origin,
+					   uint64_t x1,
+					   uint64_t x2,
+					   uint64_t x3,
+					   uint64_t x4,
+					   void *cookie,
+					   void *handle,
+					   uint64_t flags)
+{
+	int ret;
+	uint32_t partition_count = 0;
+	uint32_t size = 0;
+	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
+	struct mailbox *mbox;
+	uint64_t info_get_flags;
+	bool count_only;
+	uint32_t uuid[4];
+
+	uuid[0] = x1;
+	uuid[1] = x2;
+	uuid[2] = x3;
+	uuid[3] = x4;
+
+	/* Determine if the Partition descriptors should be populated. */
+	info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5);
+	count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK);
+
+	/* Handle the case where we don't need to populate the descriptors. */
+	if (count_only) {
+		partition_count = partition_info_get_handler_count_only(uuid);
+		if (partition_count == 0) {
+			return spmc_ffa_error_return(handle,
+						FFA_ERROR_INVALID_PARAMETER);
+		}
+	} else {
+		struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS];
+
+		/*
+		 * Handle the case where the partition descriptors are required,
+		 * check we have the buffers available and populate the
+		 * appropriate structure version.
+		 */
+
+		/* Obtain the v1.1 format of the descriptors. */
+		ret = partition_info_get_handler_v1_1(uuid, partitions,
+						      MAX_SP_LP_PARTITIONS,
+						      &partition_count);
+
+		/* Check if an error occurred during discovery. */
+		if (ret != 0) {
+			goto err;
+		}
+
+		/* If we didn't find any matches the UUID is unknown. */
+		if (partition_count == 0) {
+			ret = FFA_ERROR_INVALID_PARAMETER;
+			goto err;
+		}
+
+		/* Obtain the partition mailbox RX/TX buffer pair descriptor. */
+		mbox = spmc_get_mbox_desc(secure_origin);
+
+		/*
+		 * If the caller has not bothered registering its RX/TX pair
+		 * then return an error code.
+		 */
+		spin_lock(&mbox->lock);
+		if (mbox->rx_buffer == NULL) {
+			ret = FFA_ERROR_BUSY;
+			goto err_unlock;
+		}
+
+		/* Ensure the RX buffer is currently free. */
+		if (mbox->state != MAILBOX_STATE_EMPTY) {
+			ret = FFA_ERROR_BUSY;
+			goto err_unlock;
+		}
+
+		/* Zero the RX buffer before populating. */
+		(void)memset(mbox->rx_buffer, 0,
+			     mbox->rxtx_page_count * FFA_PAGE_SIZE);
+
+		/*
+		 * Depending on the FF-A version of the requesting partition
+		 * we may need to convert to a v1.0 format otherwise we can copy
+		 * directly.
+		 */
+		if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) {
+			ret = partition_info_populate_v1_0(partitions,
+							   mbox,
+							   partition_count);
+			if (ret != 0) {
+				goto err_unlock;
+			}
+		} else {
+			uint32_t buf_size = mbox->rxtx_page_count *
+					    FFA_PAGE_SIZE;
+
+			/* Ensure the descriptor will fit in the buffer. */
+			size = sizeof(struct ffa_partition_info_v1_1);
+			if (partition_count * size  > buf_size) {
+				ret = FFA_ERROR_NO_MEMORY;
+				goto err_unlock;
+			}
+			memcpy(mbox->rx_buffer, partitions,
+			       partition_count * size);
+		}
+
+		mbox->state = MAILBOX_STATE_FULL;
+		spin_unlock(&mbox->lock);
+	}
+	SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size);
+
+err_unlock:
+	spin_unlock(&mbox->lock);
+err:
+	return spmc_ffa_error_return(handle, ret);
+}
+
+static uint64_t ffa_feature_success(void *handle, uint32_t arg2)
+{
+	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, arg2);
+}
+
+static uint64_t ffa_features_retrieve_request(bool secure_origin,
+					      uint32_t input_properties,
+					      void *handle)
+{
+	/*
+	 * If we're called by the normal world we don't support any
+	 * additional features.
+	 */
+	if (!secure_origin) {
+		if ((input_properties & FFA_FEATURES_RET_REQ_NS_BIT) != 0U) {
+			return spmc_ffa_error_return(handle,
+						     FFA_ERROR_NOT_SUPPORTED);
+		}
+
+	} else {
+		struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+		/*
+		 * If v1.1 the NS bit must be set otherwise it is an invalid
+		 * call. If v1.0 check and store whether the SP has requested
+		 * the use of the NS bit.
+		 */
+		if (sp->ffa_version == MAKE_FFA_VERSION(1, 1)) {
+			if ((input_properties &
+			     FFA_FEATURES_RET_REQ_NS_BIT) == 0U) {
+				return spmc_ffa_error_return(handle,
+						       FFA_ERROR_NOT_SUPPORTED);
+			}
+			return ffa_feature_success(handle,
+						   FFA_FEATURES_RET_REQ_NS_BIT);
+		} else {
+			sp->ns_bit_requested = (input_properties &
+					       FFA_FEATURES_RET_REQ_NS_BIT) !=
+					       0U;
+		}
+		if (sp->ns_bit_requested) {
+			return ffa_feature_success(handle,
+						   FFA_FEATURES_RET_REQ_NS_BIT);
+		}
+	}
+	SMC_RET1(handle, FFA_SUCCESS_SMC32);
+}
+
+static uint64_t ffa_features_handler(uint32_t smc_fid,
+				     bool secure_origin,
+				     uint64_t x1,
+				     uint64_t x2,
+				     uint64_t x3,
+				     uint64_t x4,
+				     void *cookie,
+				     void *handle,
+				     uint64_t flags)
+{
+	uint32_t function_id = (uint32_t) x1;
+	uint32_t input_properties = (uint32_t) x2;
+
+	/* Check if a Feature ID was requested. */
+	if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) {
+		/* We currently don't support any additional features. */
+		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+	}
+
+	/*
+	 * Handle the cases where we have separate handlers due to additional
+	 * properties.
+	 */
+	switch (function_id) {
+	case FFA_MEM_RETRIEVE_REQ_SMC32:
+	case FFA_MEM_RETRIEVE_REQ_SMC64:
+		return ffa_features_retrieve_request(secure_origin,
+						     input_properties,
+						     handle);
+	}
+
+	/*
+	 * We don't currently support additional input properties for these
+	 * other ABIs therefore ensure this value is set to 0.
+	 */
+	if (input_properties != 0U) {
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_NOT_SUPPORTED);
+	}
+
+	/* Report if any other FF-A ABI is supported. */
+	switch (function_id) {
+	/* Supported features from both worlds. */
+	case FFA_ERROR:
+	case FFA_SUCCESS_SMC32:
+	case FFA_INTERRUPT:
+	case FFA_SPM_ID_GET:
+	case FFA_ID_GET:
+	case FFA_FEATURES:
+	case FFA_VERSION:
+	case FFA_RX_RELEASE:
+	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
+	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
+	case FFA_PARTITION_INFO_GET:
+	case FFA_RXTX_MAP_SMC32:
+	case FFA_RXTX_MAP_SMC64:
+	case FFA_RXTX_UNMAP:
+	case FFA_MEM_FRAG_TX:
+	case FFA_MSG_RUN:
+
+		/*
+		 * We are relying on the fact that the other registers
+		 * will be set to 0 as these values align with the
+		 * currently implemented features of the SPMC. If this
+		 * changes this function must be extended to handle
+		 * reporting the additional functionality.
+		 */
+
+		SMC_RET1(handle, FFA_SUCCESS_SMC32);
+		/* Execution stops here. */
+
+	/* Supported ABIs only from the secure world. */
+	case FFA_SECONDARY_EP_REGISTER_SMC64:
+	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
+	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
+	case FFA_MEM_RELINQUISH:
+	case FFA_MSG_WAIT:
+
+		if (!secure_origin) {
+			return spmc_ffa_error_return(handle,
+				FFA_ERROR_NOT_SUPPORTED);
+		}
+		SMC_RET1(handle, FFA_SUCCESS_SMC32);
+		/* Execution stops here. */
+
+	/* Supported features only from the normal world. */
+	case FFA_MEM_SHARE_SMC32:
+	case FFA_MEM_SHARE_SMC64:
+	case FFA_MEM_LEND_SMC32:
+	case FFA_MEM_LEND_SMC64:
+	case FFA_MEM_RECLAIM:
+	case FFA_MEM_FRAG_RX:
+
+		if (secure_origin) {
+			return spmc_ffa_error_return(handle,
+					FFA_ERROR_NOT_SUPPORTED);
+		}
+		SMC_RET1(handle, FFA_SUCCESS_SMC32);
+		/* Execution stops here. */
+
+	default:
+		return spmc_ffa_error_return(handle,
+					FFA_ERROR_NOT_SUPPORTED);
+	}
+}
+
+static uint64_t ffa_id_get_handler(uint32_t smc_fid,
+				   bool secure_origin,
+				   uint64_t x1,
+				   uint64_t x2,
+				   uint64_t x3,
+				   uint64_t x4,
+				   void *cookie,
+				   void *handle,
+				   uint64_t flags)
+{
+	if (secure_origin) {
+		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
+			 spmc_get_current_sp_ctx()->sp_id);
+	} else {
+		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
+			 spmc_get_hyp_ctx()->ns_ep_id);
+	}
+}
+
+/*
+ * Enable an SP to query the ID assigned to the SPMC.
+ */
+static uint64_t ffa_spm_id_get_handler(uint32_t smc_fid,
+				       bool secure_origin,
+				       uint64_t x1,
+				       uint64_t x2,
+				       uint64_t x3,
+				       uint64_t x4,
+				       void *cookie,
+				       void *handle,
+				       uint64_t flags)
+{
+	assert(x1 == 0UL);
+	assert(x2 == 0UL);
+	assert(x3 == 0UL);
+	assert(x4 == 0UL);
+	assert(SMC_GET_GP(handle, CTX_GPREG_X5) == 0UL);
+	assert(SMC_GET_GP(handle, CTX_GPREG_X6) == 0UL);
+	assert(SMC_GET_GP(handle, CTX_GPREG_X7) == 0UL);
+
+	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, FFA_SPMC_ID);
+}
+
+static uint64_t ffa_run_handler(uint32_t smc_fid,
+				bool secure_origin,
+				uint64_t x1,
+				uint64_t x2,
+				uint64_t x3,
+				uint64_t x4,
+				void *cookie,
+				void *handle,
+				uint64_t flags)
+{
+	struct secure_partition_desc *sp;
+	uint16_t target_id = FFA_RUN_EP_ID(x1);
+	uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1);
+	unsigned int idx;
+	unsigned int *rt_state;
+	unsigned int *rt_model;
+
+	/* Can only be called from the normal world. */
+	if (secure_origin) {
+		ERROR("FFA_RUN can only be called from NWd.\n");
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	/* Cannot run a Normal world partition. */
+	if (ffa_is_normal_world_id(target_id)) {
+		ERROR("Cannot run a NWd partition (0x%x).\n", target_id);
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	/* Check that the target SP exists. */
+	sp = spmc_get_sp_ctx(target_id);
+		ERROR("Unknown partition ID (0x%x).\n", target_id);
+	if (sp == NULL) {
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	idx = get_ec_index(sp);
+	if (idx != vcpu_id) {
+		ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id);
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+	rt_state = &((sp->ec[idx]).rt_state);
+	rt_model = &((sp->ec[idx]).rt_model);
+	if (*rt_state == RT_STATE_RUNNING) {
+		ERROR("Partition (0x%x) is already running.\n", target_id);
+		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
+	}
+
+	/*
+	 * Sanity check that if the execution context was not waiting then it
+	 * was either in the direct request or the run partition runtime model.
+	 */
+	if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) {
+		assert(*rt_model == RT_MODEL_RUN ||
+		       *rt_model == RT_MODEL_DIR_REQ);
+	}
+
+	/*
+	 * If the context was waiting then update the partition runtime model.
+	 */
+	if (*rt_state == RT_STATE_WAITING) {
+		*rt_model = RT_MODEL_RUN;
+	}
+
+	/*
+	 * Forward the request to the correct SP vCPU after updating
+	 * its state.
+	 */
+	*rt_state = RT_STATE_RUNNING;
+
+	return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0,
+			       handle, cookie, flags, target_id);
+}
+
+static uint64_t rx_release_handler(uint32_t smc_fid,
+				   bool secure_origin,
+				   uint64_t x1,
+				   uint64_t x2,
+				   uint64_t x3,
+				   uint64_t x4,
+				   void *cookie,
+				   void *handle,
+				   uint64_t flags)
+{
+	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+
+	spin_lock(&mbox->lock);
+
+	if (mbox->state != MAILBOX_STATE_FULL) {
+		spin_unlock(&mbox->lock);
+		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
+	}
+
+	mbox->state = MAILBOX_STATE_EMPTY;
+	spin_unlock(&mbox->lock);
+
+	SMC_RET1(handle, FFA_SUCCESS_SMC32);
+}
+
+/*
+ * Perform initial validation on the provided secondary entry point.
+ * For now ensure it does not lie within the BL31 Image or the SP's
+ * RX/TX buffers as these are mapped within EL3.
+ * TODO: perform validation for additional invalid memory regions.
+ */
+static int validate_secondary_ep(uintptr_t ep, struct secure_partition_desc *sp)
+{
+	struct mailbox *mb;
+	uintptr_t buffer_size;
+	uintptr_t sp_rx_buffer;
+	uintptr_t sp_tx_buffer;
+	uintptr_t sp_rx_buffer_limit;
+	uintptr_t sp_tx_buffer_limit;
+
+	mb = &sp->mailbox;
+	buffer_size = (uintptr_t) (mb->rxtx_page_count * FFA_PAGE_SIZE);
+	sp_rx_buffer = (uintptr_t) mb->rx_buffer;
+	sp_tx_buffer = (uintptr_t) mb->tx_buffer;
+	sp_rx_buffer_limit = sp_rx_buffer + buffer_size;
+	sp_tx_buffer_limit = sp_tx_buffer + buffer_size;
+
+	/*
+	 * Check if the entry point lies within BL31, or the
+	 * SP's RX or TX buffer.
+	 */
+	if ((ep >= BL31_BASE && ep < BL31_LIMIT) ||
+	    (ep >= sp_rx_buffer && ep < sp_rx_buffer_limit) ||
+	    (ep >= sp_tx_buffer && ep < sp_tx_buffer_limit)) {
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/*******************************************************************************
+ * This function handles the FFA_SECONDARY_EP_REGISTER SMC to allow an SP to
+ *  register an entry point for initialization during a secondary cold boot.
+ ******************************************************************************/
+static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid,
+					    bool secure_origin,
+					    uint64_t x1,
+					    uint64_t x2,
+					    uint64_t x3,
+					    uint64_t x4,
+					    void *cookie,
+					    void *handle,
+					    uint64_t flags)
+{
+	struct secure_partition_desc *sp;
+	struct sp_exec_ctx *sp_ctx;
+
+	/* This request cannot originate from the Normal world. */
+	if (!secure_origin) {
+		WARN("%s: Can only be called from SWd.\n", __func__);
+		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+	}
+
+	/* Get the context of the current SP. */
+	sp = spmc_get_current_sp_ctx();
+	if (sp == NULL) {
+		WARN("%s: Cannot find SP context.\n", __func__);
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	/* Only an S-EL1 SP should be invoking this ABI. */
+	if (sp->runtime_el != S_EL1) {
+		WARN("%s: Can only be called for a S-EL1 SP.\n", __func__);
+		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
+	}
+
+	/* Ensure the SP is in its initialization state. */
+	sp_ctx = spmc_get_sp_ec(sp);
+	if (sp_ctx->rt_model != RT_MODEL_INIT) {
+		WARN("%s: Can only be called during SP initialization.\n",
+		     __func__);
+		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
+	}
+
+	/* Perform initial validation of the secondary entry point. */
+	if (validate_secondary_ep(x1, sp)) {
+		WARN("%s: Invalid entry point provided (0x%lx).\n",
+		     __func__, x1);
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	/*
+	 * Update the secondary entrypoint in SP context.
+	 * We don't need a lock here as during partition initialization there
+	 * will only be a single core online.
+	 */
+	sp->secondary_ep = x1;
+	VERBOSE("%s: 0x%lx\n", __func__, sp->secondary_ep);
+
+	SMC_RET1(handle, FFA_SUCCESS_SMC32);
+}
+
 /*******************************************************************************
  * This function will parse the Secure Partition Manifest. From manifest, it
  * will fetch details for preparing Secure partition image context and secure
@@ -498,7 +1394,8 @@
  ******************************************************************************/
 static int sp_manifest_parse(void *sp_manifest, int offset,
 			     struct secure_partition_desc *sp,
-			     entry_point_info_t *ep_info)
+			     entry_point_info_t *ep_info,
+			     int32_t *boot_info_reg)
 {
 	int32_t ret, node;
 	uint32_t config_32;
@@ -545,6 +1442,23 @@
 	sp->execution_state = config_32;
 
 	ret = fdt_read_uint32(sp_manifest, node,
+			      "messaging-method", &config_32);
+	if (ret != 0) {
+		ERROR("Missing Secure Partition messaging method.\n");
+		return ret;
+	}
+
+	/* Validate this entry, we currently only support direct messaging. */
+	if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV |
+			  FFA_PARTITION_DIRECT_REQ_SEND)) != 0U) {
+		WARN("Invalid Secure Partition messaging method (0x%x)\n",
+		     config_32);
+		return -EINVAL;
+	}
+
+	sp->properties = config_32;
+
+	ret = fdt_read_uint32(sp_manifest, node,
 			      "execution-ctx-count", &config_32);
 
 	if (ret != 0) {
@@ -579,6 +1493,39 @@
 		sp->sp_id = config_32;
 	}
 
+	ret = fdt_read_uint32(sp_manifest, node,
+			      "power-management-messages", &config_32);
+	if (ret != 0) {
+		WARN("Missing Power Management Messages entry.\n");
+	} else {
+		/*
+		 * Ensure only the currently supported power messages have
+		 * been requested.
+		 */
+		if (config_32 & ~(FFA_PM_MSG_SUB_CPU_OFF |
+				  FFA_PM_MSG_SUB_CPU_SUSPEND |
+				  FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME)) {
+			ERROR("Requested unsupported PM messages (%x)\n",
+			      config_32);
+			return -EINVAL;
+		}
+		sp->pwr_mgmt_msgs = config_32;
+	}
+
+	ret = fdt_read_uint32(sp_manifest, node,
+			      "gp-register-num", &config_32);
+	if (ret != 0) {
+		WARN("Missing boot information register.\n");
+	} else {
+		/* Check if a register number between 0-3 is specified. */
+		if (config_32 < 4) {
+			*boot_info_reg = config_32;
+		} else {
+			WARN("Incorrect boot information register (%u).\n",
+			     config_32);
+		}
+	}
+
 	return 0;
 }
 
@@ -594,7 +1541,7 @@
 	uintptr_t manifest_base;
 	uintptr_t manifest_base_align;
 	entry_point_info_t *next_image_ep_info;
-	int32_t ret;
+	int32_t ret, boot_info_reg = -1;
 	struct secure_partition_desc *sp;
 
 	next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
@@ -653,7 +1600,8 @@
 		       SECURE | EP_ST_ENABLE);
 
 	/* Parse the SP manifest. */
-	ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info);
+	ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info,
+				&boot_info_reg);
 	if (ret != 0) {
 		ERROR("Error in Secure Partition manifest parsing.\n");
 		return ret;
@@ -666,7 +1614,7 @@
 	}
 
 	/* Perform any common initialisation. */
-	spmc_sp_common_setup(sp, next_image_ep_info);
+	spmc_sp_common_setup(sp, next_image_ep_info, boot_info_reg);
 
 	/* Perform any initialisation specific to S-EL1 SPs. */
 	spmc_el1_sp_setup(sp, next_image_ep_info);
@@ -817,11 +1765,24 @@
 int32_t spmc_setup(void)
 {
 	int32_t ret;
+	uint32_t flags;
 
 	/* Initialize endpoint descriptors */
 	initalize_sp_descs();
 	initalize_ns_ep_descs();
 
+	/*
+	 * Retrieve the information of the datastore for tracking shared memory
+	 * requests allocated by platform code and zero the region if available.
+	 */
+	ret = plat_spmc_shmem_datastore_get(&spmc_shmem_obj_state.data,
+					    &spmc_shmem_obj_state.data_size);
+	if (ret != 0) {
+		ERROR("Failed to obtain memory descriptor backing store!\n");
+		return ret;
+	}
+	memset(spmc_shmem_obj_state.data, 0, spmc_shmem_obj_state.data_size);
+
 	/* Setup logical SPs. */
 	ret = logical_sp_init();
 	if (ret != 0) {
@@ -843,6 +1804,24 @@
 		return ret;
 	}
 
+	/* Register power management hooks with PSCI */
+	psci_register_spd_pm_hook(&spmc_pm);
+
+	/*
+	 * Register an interrupt handler for S-EL1 interrupts
+	 * when generated during code executing in the
+	 * non-secure state.
+	 */
+	flags = 0;
+	set_interrupt_rm_flag(flags, NON_SECURE);
+	ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
+					      spmc_sp_interrupt_handler,
+					      flags);
+	if (ret != 0) {
+		ERROR("Failed to register interrupt handler! (%d)\n", ret);
+		panic();
+	}
+
 	/* Register init function for deferred init.  */
 	bl31_register_bl32_init(&sp_init);
 
@@ -866,6 +1845,27 @@
 {
 	switch (smc_fid) {
 
+	case FFA_VERSION:
+		return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3,
+					   x4, cookie, handle, flags);
+
+	case FFA_SPM_ID_GET:
+		return ffa_spm_id_get_handler(smc_fid, secure_origin, x1, x2,
+					     x3, x4, cookie, handle, flags);
+
+	case FFA_ID_GET:
+		return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3,
+					  x4, cookie, handle, flags);
+
+	case FFA_FEATURES:
+		return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3,
+					    x4, cookie, handle, flags);
+
+	case FFA_SECONDARY_EP_REGISTER_SMC64:
+		return ffa_sec_ep_register_handler(smc_fid, secure_origin, x1,
+						   x2, x3, x4, cookie, handle,
+						   flags);
+
 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
 		return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
@@ -876,6 +1876,24 @@
 		return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2,
 					       x3, x4, cookie, handle, flags);
 
+	case FFA_RXTX_MAP_SMC32:
+	case FFA_RXTX_MAP_SMC64:
+		return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4,
+					cookie, handle, flags);
+
+	case FFA_RXTX_UNMAP:
+		return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3,
+					  x4, cookie, handle, flags);
+
+	case FFA_PARTITION_INFO_GET:
+		return partition_info_get_handler(smc_fid, secure_origin, x1,
+						  x2, x3, x4, cookie, handle,
+						  flags);
+
+	case FFA_RX_RELEASE:
+		return rx_release_handler(smc_fid, secure_origin, x1, x2, x3,
+					  x4, cookie, handle, flags);
+
 	case FFA_MSG_WAIT:
 		return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4,
 					cookie, handle, flags);
@@ -884,9 +1902,94 @@
 		return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4,
 					cookie, handle, flags);
 
+	case FFA_MSG_RUN:
+		return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4,
+				       cookie, handle, flags);
+
+	case FFA_MEM_SHARE_SMC32:
+	case FFA_MEM_SHARE_SMC64:
+	case FFA_MEM_LEND_SMC32:
+	case FFA_MEM_LEND_SMC64:
+		return spmc_ffa_mem_send(smc_fid, secure_origin, x1, x2, x3, x4,
+					 cookie, handle, flags);
+
+	case FFA_MEM_FRAG_TX:
+		return spmc_ffa_mem_frag_tx(smc_fid, secure_origin, x1, x2, x3,
+					    x4, cookie, handle, flags);
+
+	case FFA_MEM_FRAG_RX:
+		return spmc_ffa_mem_frag_rx(smc_fid, secure_origin, x1, x2, x3,
+					    x4, cookie, handle, flags);
+
+	case FFA_MEM_RETRIEVE_REQ_SMC32:
+	case FFA_MEM_RETRIEVE_REQ_SMC64:
+		return spmc_ffa_mem_retrieve_req(smc_fid, secure_origin, x1, x2,
+						 x3, x4, cookie, handle, flags);
+
+	case FFA_MEM_RELINQUISH:
+		return spmc_ffa_mem_relinquish(smc_fid, secure_origin, x1, x2,
+					       x3, x4, cookie, handle, flags);
+
+	case FFA_MEM_RECLAIM:
+		return spmc_ffa_mem_reclaim(smc_fid, secure_origin, x1, x2, x3,
+					    x4, cookie, handle, flags);
+
 	default:
 		WARN("Unsupported FF-A call 0x%08x.\n", smc_fid);
 		break;
 	}
 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
 }
+
+/*******************************************************************************
+ * This function is the handler registered for S-EL1 interrupts by the SPMC. It
+ * validates the interrupt and upon success arranges entry into the SP for
+ * handling the interrupt.
+ ******************************************************************************/
+static uint64_t spmc_sp_interrupt_handler(uint32_t id,
+					  uint32_t flags,
+					  void *handle,
+					  void *cookie)
+{
+	struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+	struct sp_exec_ctx *ec;
+	uint32_t linear_id = plat_my_core_pos();
+
+	/* Sanity check for a NULL pointer dereference. */
+	assert(sp != NULL);
+
+	/* Check the security state when the exception was generated. */
+	assert(get_interrupt_src_ss(flags) == NON_SECURE);
+
+	/* Panic if not an S-EL1 Partition. */
+	if (sp->runtime_el != S_EL1) {
+		ERROR("Interrupt received for a non S-EL1 SP on core%u.\n",
+		      linear_id);
+		panic();
+	}
+
+	/* Obtain a reference to the SP execution context. */
+	ec = spmc_get_sp_ec(sp);
+
+	/* Ensure that the execution context is in waiting state else panic. */
+	if (ec->rt_state != RT_STATE_WAITING) {
+		ERROR("SP EC on core%u is not waiting (%u), it is (%u).\n",
+		      linear_id, RT_STATE_WAITING, ec->rt_state);
+		panic();
+	}
+
+	/* Update the runtime model and state of the partition. */
+	ec->rt_model = RT_MODEL_INTR;
+	ec->rt_state = RT_STATE_RUNNING;
+
+	VERBOSE("SP (0x%x) interrupt start on core%u.\n", sp->sp_id, linear_id);
+
+	/*
+	 * Forward the interrupt to the S-EL1 SP. The interrupt ID is not
+	 * populated as the SP can determine this by itself.
+	 */
+	return spmd_smc_switch_state(FFA_INTERRUPT, false,
+				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+				     handle);
+}
diff --git a/services/std_svc/spm/el3_spmc/spmc_pm.c b/services/std_svc/spm/el3_spmc/spmc_pm.c
new file mode 100644
index 0000000..d25344c
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc_pm.c
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/spinlock.h>
+#include <plat/common/common_def.h>
+#include <plat/common/platform.h>
+#include <services/ffa_svc.h>
+#include "spmc.h"
+
+#include <platform_def.h>
+
+/*******************************************************************************
+ * spmc_build_pm_message
+ *
+ * Builds an SPMC to SP direct message request.
+ ******************************************************************************/
+static void spmc_build_pm_message(gp_regs_t *gpregs,
+				  unsigned long long message,
+				  uint8_t pm_msg_type,
+				  uint16_t sp_id)
+{
+	write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
+	write_ctx_reg(gpregs, CTX_GPREG_X1,
+		      (FFA_SPMC_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
+		      sp_id);
+	write_ctx_reg(gpregs, CTX_GPREG_X2, FFA_FWK_MSG_BIT |
+		      (pm_msg_type & FFA_FWK_MSG_MASK));
+	write_ctx_reg(gpregs, CTX_GPREG_X3, message);
+}
+
+/*******************************************************************************
+ * This CPU has been turned on. Enter the SP to initialise S-EL1.
+ ******************************************************************************/
+static void spmc_cpu_on_finish_handler(u_register_t unused)
+{
+	struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+	struct sp_exec_ctx *ec;
+	unsigned int linear_id = plat_my_core_pos();
+	entry_point_info_t sec_ec_ep_info = {0};
+	uint64_t rc;
+
+	/* Sanity check for a NULL pointer dereference. */
+	assert(sp != NULL);
+
+	/* Initialize entry point information for the SP. */
+	SET_PARAM_HEAD(&sec_ec_ep_info, PARAM_EP, VERSION_1,
+		       SECURE | EP_ST_ENABLE);
+
+	/*
+	 * Check if the primary execution context registered an entry point else
+	 * bail out early.
+	 * TODO: Add support for boot reason in manifest to allow jumping to
+	 * entrypoint into the primary execution context.
+	 */
+	if (sp->secondary_ep == 0) {
+		WARN("%s: No secondary ep on core%u\n", __func__, linear_id);
+		return;
+	}
+
+	sec_ec_ep_info.pc = sp->secondary_ep;
+
+	/*
+	 * Setup and initialise the SP execution context on this physical cpu.
+	 */
+	spmc_el1_sp_setup(sp, &sec_ec_ep_info);
+	spmc_sp_common_ep_commit(sp, &sec_ec_ep_info);
+
+	/* Obtain a reference to the SP execution context. */
+	ec = spmc_get_sp_ec(sp);
+
+	/*
+	 * TODO: Should we do some PM related state tracking of the SP execution
+	 * context here?
+	 */
+
+	/* Update the runtime model and state of the partition. */
+	ec->rt_model = RT_MODEL_INIT;
+	ec->rt_state = RT_STATE_RUNNING;
+
+	INFO("SP (0x%x) init start on core%u.\n", sp->sp_id, linear_id);
+
+	rc = spmc_sp_synchronous_entry(ec);
+	if (rc != 0ULL) {
+		ERROR("%s failed (%lu) on CPU%u\n", __func__, rc, linear_id);
+	}
+
+	/* Update the runtime state of the partition. */
+	ec->rt_state = RT_STATE_WAITING;
+
+	VERBOSE("CPU %u on!\n", linear_id);
+}
+/*******************************************************************************
+ * Helper function to send a FF-A power management message to an SP.
+ ******************************************************************************/
+static int32_t spmc_send_pm_msg(uint8_t pm_msg_type,
+				unsigned long long psci_event)
+{
+	struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+	struct sp_exec_ctx *ec;
+	gp_regs_t *gpregs_ctx;
+	unsigned int linear_id = plat_my_core_pos();
+	u_register_t resp;
+	uint64_t rc;
+
+	/* Obtain a reference to the SP execution context. */
+	ec = spmc_get_sp_ec(sp);
+
+	/*
+	 * TODO: Should we do some PM related state tracking of the SP execution
+	 * context here?
+	 */
+
+	/*
+	 * Build an SPMC to SP direct message request.
+	 * Note that x4-x6 should be populated with the original PSCI arguments.
+	 */
+	spmc_build_pm_message(get_gpregs_ctx(&ec->cpu_ctx),
+			      psci_event,
+			      pm_msg_type,
+			      sp->sp_id);
+
+	/* Sanity check partition state. */
+	assert(ec->rt_state == RT_STATE_WAITING);
+
+	/* Update the runtime model and state of the partition. */
+	ec->rt_model = RT_MODEL_DIR_REQ;
+	ec->rt_state = RT_STATE_RUNNING;
+
+	rc = spmc_sp_synchronous_entry(ec);
+	if (rc != 0ULL) {
+		ERROR("%s failed (%lu) on CPU%u.\n", __func__, rc, linear_id);
+		assert(false);
+		return -EINVAL;
+	}
+
+	/*
+	 * Validate we receive an expected response from the SP.
+	 * TODO: We don't currently support aborting an SP in the scenario
+	 * where it is misbehaving so assert these conditions are not
+	 * met for now.
+	 */
+	gpregs_ctx = get_gpregs_ctx(&ec->cpu_ctx);
+
+	/* Expect a direct message response from the SP. */
+	resp = read_ctx_reg(gpregs_ctx, CTX_GPREG_X0);
+	if (resp != FFA_MSG_SEND_DIRECT_RESP_SMC32) {
+		ERROR("%s invalid SP response (%lx).\n", __func__, resp);
+		assert(false);
+		return -EINVAL;
+	}
+
+	/* Ensure the sender and receiver are populated correctly. */
+	resp = read_ctx_reg(gpregs_ctx, CTX_GPREG_X1);
+	if (!(ffa_endpoint_source(resp) == sp->sp_id &&
+	      ffa_endpoint_destination(resp) == FFA_SPMC_ID)) {
+		ERROR("%s invalid src/dst response (%lx).\n", __func__, resp);
+		assert(false);
+		return -EINVAL;
+	}
+
+	/* Expect a PM message response from the SP. */
+	resp = read_ctx_reg(gpregs_ctx, CTX_GPREG_X2);
+	if ((resp & FFA_FWK_MSG_BIT) == 0U ||
+	    ((resp & FFA_FWK_MSG_MASK) != FFA_PM_MSG_PM_RESP)) {
+		ERROR("%s invalid PM response (%lx).\n", __func__, resp);
+		assert(false);
+		return -EINVAL;
+	}
+
+	/* Update the runtime state of the partition. */
+	ec->rt_state = RT_STATE_WAITING;
+
+	/* Return the status code returned by the SP */
+	return read_ctx_reg(gpregs_ctx, CTX_GPREG_X3);
+}
+
+/*******************************************************************************
+ * spmc_cpu_suspend_finish_handler
+ ******************************************************************************/
+static void spmc_cpu_suspend_finish_handler(u_register_t unused)
+{
+	struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+	unsigned int linear_id = plat_my_core_pos();
+	int32_t rc;
+
+	/* Sanity check for a NULL pointer dereference. */
+	assert(sp != NULL);
+
+	/*
+	 * Check if the SP has subscribed for this power management message.
+	 * If not then we don't have anything else to do here.
+	 */
+	if ((sp->pwr_mgmt_msgs & FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME) == 0U) {
+		goto exit;
+	}
+
+	rc = spmc_send_pm_msg(FFA_PM_MSG_WB_REQ, FFA_WB_TYPE_NOTS2RAM);
+	if (rc < 0) {
+		ERROR("%s failed (%d) on CPU%u\n", __func__, rc, linear_id);
+		return;
+	}
+
+exit:
+	VERBOSE("CPU %u resumed!\n", linear_id);
+}
+
+/*******************************************************************************
+ * spmc_cpu_suspend_handler
+ ******************************************************************************/
+static void spmc_cpu_suspend_handler(u_register_t unused)
+{
+	struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+	unsigned int linear_id = plat_my_core_pos();
+	int32_t rc;
+
+	/* Sanity check for a NULL pointer dereference. */
+	assert(sp != NULL);
+
+	/*
+	 * Check if the SP has subscribed for this power management message.
+	 * If not then we don't have anything else to do here.
+	 */
+	if ((sp->pwr_mgmt_msgs & FFA_PM_MSG_SUB_CPU_SUSPEND) == 0U) {
+		goto exit;
+	}
+
+	rc = spmc_send_pm_msg(FFA_FWK_MSG_PSCI, PSCI_CPU_SUSPEND_AARCH64);
+	if (rc < 0) {
+		ERROR("%s failed (%d) on CPU%u\n", __func__, rc, linear_id);
+		return;
+	}
+exit:
+	VERBOSE("CPU %u suspend!\n", linear_id);
+}
+
+/*******************************************************************************
+ * spmc_cpu_off_handler
+ ******************************************************************************/
+static int32_t spmc_cpu_off_handler(u_register_t unused)
+{
+	struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+	unsigned int linear_id = plat_my_core_pos();
+	int32_t ret = 0;
+
+	/* Sanity check for a NULL pointer dereference. */
+	assert(sp != NULL);
+
+	/*
+	 * Check if the SP has subscribed for this power management message.
+	 * If not then we don't have anything else to do here.
+	 */
+	if ((sp->pwr_mgmt_msgs & FFA_PM_MSG_SUB_CPU_OFF) == 0U) {
+		goto exit;
+	}
+
+	ret = spmc_send_pm_msg(FFA_FWK_MSG_PSCI, PSCI_CPU_OFF);
+	if (ret < 0) {
+		ERROR("%s failed (%d) on CPU%u\n", __func__, ret, linear_id);
+		return ret;
+	}
+
+exit:
+	VERBOSE("CPU %u off!\n", linear_id);
+	return ret;
+}
+
+/*******************************************************************************
+ * Structure populated by the SPM Core to perform any bookkeeping before
+ * PSCI executes a power mgmt. operation.
+ ******************************************************************************/
+const spd_pm_ops_t spmc_pm = {
+	.svc_on_finish = spmc_cpu_on_finish_handler,
+	.svc_off = spmc_cpu_off_handler,
+	.svc_suspend = spmc_cpu_suspend_handler,
+	.svc_suspend_finish = spmc_cpu_suspend_finish_handler
+};
diff --git a/services/std_svc/spm/el3_spmc/spmc_setup.c b/services/std_svc/spm/el3_spmc/spmc_setup.c
index 7b23c9e..8ebae28 100644
--- a/services/std_svc/spm/el3_spmc/spmc_setup.c
+++ b/services/std_svc/spm/el3_spmc/spmc_setup.c
@@ -10,19 +10,139 @@
 #include <arch.h>
 #include <arch_helpers.h>
 #include <common/debug.h>
+#include <common/fdt_wrappers.h>
 #include <context.h>
 #include <lib/el3_runtime/context_mgmt.h>
 #include <lib/utils.h>
 #include <lib/xlat_tables/xlat_tables_v2.h>
+#include <libfdt.h>
 #include <plat/common/common_def.h>
 #include <plat/common/platform.h>
 #include <services/ffa_svc.h>
 #include "spm_common.h"
 #include "spmc.h"
+#include <tools_share/firmware_image_package.h>
 
 #include <platform_def.h>
 
 /*
+ * Statically allocate a page of memory for passing boot information to an SP.
+ */
+static uint8_t ffa_boot_info_mem[PAGE_SIZE] __aligned(PAGE_SIZE);
+
+/*
+ * This function creates a initialization descriptor in the memory reserved
+ * for passing boot information to an SP. It then copies the partition manifest
+ * into this region and ensures that its reference in the initialization
+ * descriptor is updated.
+ */
+static void spmc_create_boot_info(entry_point_info_t *ep_info,
+				  struct secure_partition_desc *sp)
+{
+	struct ffa_boot_info_header *boot_header;
+	struct ffa_boot_info_desc *boot_descriptor;
+	uintptr_t manifest_addr;
+
+	/*
+	 * Calculate the maximum size of the manifest that can be accommodated
+	 * in the boot information memory region.
+	 */
+	const unsigned int
+	max_manifest_sz = sizeof(ffa_boot_info_mem) -
+			  (sizeof(struct ffa_boot_info_header) +
+			   sizeof(struct ffa_boot_info_desc));
+
+	/*
+	 * The current implementation only supports the FF-A v1.1
+	 * implementation of the boot protocol, therefore check
+	 * that a v1.0 SP has not requested use of the protocol.
+	 */
+	if (sp->ffa_version == MAKE_FFA_VERSION(1, 0)) {
+		ERROR("FF-A boot protocol not supported for v1.0 clients\n");
+		return;
+	}
+
+	/*
+	 * Check if the manifest will fit into the boot info memory region else
+	 * bail.
+	 */
+	if (ep_info->args.arg1 > max_manifest_sz) {
+		WARN("Unable to copy manifest into boot information. ");
+		WARN("Max sz = %u bytes. Manifest sz = %lu bytes\n",
+		     max_manifest_sz, ep_info->args.arg1);
+		return;
+	}
+
+	/* Zero the memory region before populating. */
+	memset(ffa_boot_info_mem, 0, PAGE_SIZE);
+
+	/*
+	 * Populate the ffa_boot_info_header at the start of the boot info
+	 * region.
+	 */
+	boot_header = (struct ffa_boot_info_header *) ffa_boot_info_mem;
+
+	/* Position the ffa_boot_info_desc after the ffa_boot_info_header. */
+	boot_header->offset_boot_info_desc =
+					sizeof(struct ffa_boot_info_header);
+	boot_descriptor = (struct ffa_boot_info_desc *)
+			  (ffa_boot_info_mem +
+			   boot_header->offset_boot_info_desc);
+
+	/*
+	 * We must use the FF-A version coresponding to the version implemented
+	 * by the SP. Currently this can only be v1.1.
+	 */
+	boot_header->version = sp->ffa_version;
+
+	/* Populate the boot information header. */
+	boot_header->size_boot_info_desc = sizeof(struct ffa_boot_info_desc);
+
+	/* Set the signature "0xFFA". */
+	boot_header->signature = FFA_INIT_DESC_SIGNATURE;
+
+	/* Set the count. Currently 1 since only the manifest is specified. */
+	boot_header->count_boot_info_desc = 1;
+
+	/* Populate the boot information descriptor for the manifest. */
+	boot_descriptor->type =
+		FFA_BOOT_INFO_TYPE(FFA_BOOT_INFO_TYPE_STD) |
+		FFA_BOOT_INFO_TYPE_ID(FFA_BOOT_INFO_TYPE_ID_FDT);
+
+	boot_descriptor->flags =
+		FFA_BOOT_INFO_FLAG_NAME(FFA_BOOT_INFO_FLAG_NAME_UUID) |
+		FFA_BOOT_INFO_FLAG_CONTENT(FFA_BOOT_INFO_FLAG_CONTENT_ADR);
+
+	/*
+	 * Copy the manifest into boot info region after the boot information
+	 * descriptor.
+	 */
+	boot_descriptor->size_boot_info = (uint32_t) ep_info->args.arg1;
+
+	manifest_addr = (uintptr_t) (ffa_boot_info_mem +
+				     boot_header->offset_boot_info_desc +
+				     boot_header->size_boot_info_desc);
+
+	memcpy((void *) manifest_addr, (void *) ep_info->args.arg0,
+	       boot_descriptor->size_boot_info);
+
+	boot_descriptor->content = manifest_addr;
+
+	/* Calculate the size of the total boot info blob. */
+	boot_header->size_boot_info_blob = boot_header->offset_boot_info_desc +
+					   boot_descriptor->size_boot_info +
+					   (boot_header->count_boot_info_desc *
+					    boot_header->size_boot_info_desc);
+
+	INFO("SP boot info @ 0x%lx, size: %u bytes.\n",
+	     (uintptr_t) ffa_boot_info_mem,
+	     boot_header->size_boot_info_blob);
+	INFO("SP manifest @ 0x%lx, size: %u bytes.\n",
+	     boot_descriptor->content,
+	     boot_descriptor->size_boot_info);
+}
+
+/*
  * We are assuming that the index of the execution
  * context used is the linear index of the current physical cpu.
  */
@@ -44,6 +164,12 @@
 				DISABLE_ALL_EXCEPTIONS);
 
 	/*
+	 * TF-A Implementation defined behaviour to provide the linear
+	 * core ID in the x4 register.
+	 */
+	ep_info->args.arg4 = (uintptr_t) plat_my_core_pos();
+
+	/*
 	 * Check whether setup is being performed for the primary or a secondary
 	 * execution context. In the latter case, indicate to the SP that this
 	 * is a warm boot.
@@ -62,7 +188,8 @@
 
 /* Common initialisation for all SPs. */
 void spmc_sp_common_setup(struct secure_partition_desc *sp,
-			  entry_point_info_t *ep_info)
+			  entry_point_info_t *ep_info,
+			  int32_t boot_info_reg)
 {
 	uint16_t sp_id;
 
@@ -90,11 +217,50 @@
 	 */
 	assert(sp->runtime_el == S_EL1);
 
-	/*
-	 * Clear the general purpose registers. These should be populated as
-	 * required.
-	 */
-	zeromem(&ep_info->args, sizeof(ep_info->args));
+	/* Check if the SP wants to use the FF-A boot protocol. */
+	if (boot_info_reg >= 0) {
+		/*
+		 * Create a boot information descriptor and copy the partition
+		 * manifest into the reserved memory region for consumption by
+		 * the SP.
+		 */
+		spmc_create_boot_info(ep_info, sp);
+
+		/*
+		 * We have consumed what we need from ep args so we can now
+		 * zero them before we start populating with new information
+		 * specifically for the SP.
+		 */
+		zeromem(&ep_info->args, sizeof(ep_info->args));
+
+		/*
+		 * Pass the address of the boot information in the
+		 * boot_info_reg.
+		 */
+		switch (boot_info_reg) {
+		case 0:
+			ep_info->args.arg0 = (uintptr_t) ffa_boot_info_mem;
+			break;
+		case 1:
+			ep_info->args.arg1 = (uintptr_t) ffa_boot_info_mem;
+			break;
+		case 2:
+			ep_info->args.arg2 = (uintptr_t) ffa_boot_info_mem;
+			break;
+		case 3:
+			ep_info->args.arg3 = (uintptr_t) ffa_boot_info_mem;
+			break;
+		default:
+			ERROR("Invalid value for \"gp-register-num\" %d.\n",
+			      boot_info_reg);
+		}
+	} else {
+		/*
+		 * We don't need any of the information that was populated
+		 * in ep_args so we can clear them.
+		 */
+		zeromem(&ep_info->args, sizeof(ep_info->args));
+	}
 }
 
 /*
diff --git a/services/std_svc/spm/el3_spmc/spmc_shared_mem.c b/services/std_svc/spm/el3_spmc/spmc_shared_mem.c
new file mode 100644
index 0000000..1602981
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc_shared_mem.c
@@ -0,0 +1,1812 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <assert.h>
+#include <errno.h>
+
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/object_pool.h>
+#include <lib/spinlock.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <services/ffa_svc.h>
+#include "spmc.h"
+#include "spmc_shared_mem.h"
+
+#include <platform_def.h>
+
+/**
+ * struct spmc_shmem_obj - Shared memory object.
+ * @desc_size:      Size of @desc.
+ * @desc_filled:    Size of @desc already received.
+ * @in_use:         Number of clients that have called ffa_mem_retrieve_req
+ *                  without a matching ffa_mem_relinquish call.
+ * @desc:           FF-A memory region descriptor passed in ffa_mem_share.
+ */
+struct spmc_shmem_obj {
+	size_t desc_size;
+	size_t desc_filled;
+	size_t in_use;
+	struct ffa_mtd desc;
+};
+
+/*
+ * Declare our data structure to store the metadata of memory share requests.
+ * The main datastore is allocated on a per platform basis to ensure enough
+ * storage can be made available.
+ * The address of the data store will be populated by the SPMC during its
+ * initialization.
+ */
+
+struct spmc_shmem_obj_state spmc_shmem_obj_state = {
+	/* Set start value for handle so top 32 bits are needed quickly. */
+	.next_handle = 0xffffffc0U,
+};
+
+/**
+ * spmc_shmem_obj_size - Convert from descriptor size to object size.
+ * @desc_size:  Size of struct ffa_memory_region_descriptor object.
+ *
+ * Return: Size of struct spmc_shmem_obj object.
+ */
+static size_t spmc_shmem_obj_size(size_t desc_size)
+{
+	return desc_size + offsetof(struct spmc_shmem_obj, desc);
+}
+
+/**
+ * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
+ * @state:      Global state.
+ * @desc_size:  Size of struct ffa_memory_region_descriptor object that
+ *              allocated object will hold.
+ *
+ * Return: Pointer to newly allocated object, or %NULL if there not enough space
+ *         left. The returned pointer is only valid while @state is locked, to
+ *         used it again after unlocking @state, spmc_shmem_obj_lookup must be
+ *         called.
+ */
+static struct spmc_shmem_obj *
+spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
+{
+	struct spmc_shmem_obj *obj;
+	size_t free = state->data_size - state->allocated;
+
+	if (state->data == NULL) {
+		ERROR("Missing shmem datastore!\n");
+		return NULL;
+	}
+
+	if (spmc_shmem_obj_size(desc_size) > free) {
+		WARN("%s(0x%zx) failed, free 0x%zx\n",
+		     __func__, desc_size, free);
+		return NULL;
+	}
+	obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
+	obj->desc = (struct ffa_mtd) {0};
+	obj->desc_size = desc_size;
+	obj->desc_filled = 0;
+	obj->in_use = 0;
+	state->allocated += spmc_shmem_obj_size(desc_size);
+	return obj;
+}
+
+/**
+ * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
+ * @state:      Global state.
+ * @obj:        Object to free.
+ *
+ * Release memory used by @obj. Other objects may move, so on return all
+ * pointers to struct spmc_shmem_obj object should be considered invalid, not
+ * just @obj.
+ *
+ * The current implementation always compacts the remaining objects to simplify
+ * the allocator and to avoid fragmentation.
+ */
+
+static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
+				  struct spmc_shmem_obj *obj)
+{
+	size_t free_size = spmc_shmem_obj_size(obj->desc_size);
+	uint8_t *shift_dest = (uint8_t *)obj;
+	uint8_t *shift_src = shift_dest + free_size;
+	size_t shift_size = state->allocated - (shift_src - state->data);
+
+	if (shift_size != 0U) {
+		memmove(shift_dest, shift_src, shift_size);
+	}
+	state->allocated -= free_size;
+}
+
+/**
+ * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
+ * @state:      Global state.
+ * @handle:     Unique handle of object to return.
+ *
+ * Return: struct spmc_shmem_obj_state object with handle matching @handle.
+ *         %NULL, if not object in @state->data has a matching handle.
+ */
+static struct spmc_shmem_obj *
+spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
+{
+	uint8_t *curr = state->data;
+
+	while (curr - state->data < state->allocated) {
+		struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
+
+		if (obj->desc.handle == handle) {
+			return obj;
+		}
+		curr += spmc_shmem_obj_size(obj->desc_size);
+	}
+	return NULL;
+}
+
+/**
+ * spmc_shmem_obj_get_next - Get the next memory object from an offset.
+ * @offset:     Offset used to track which objects have previously been
+ *              returned.
+ *
+ * Return: the next struct spmc_shmem_obj_state object from the provided
+ *	   offset.
+ *	   %NULL, if there are no more objects.
+ */
+static struct spmc_shmem_obj *
+spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
+{
+	uint8_t *curr = state->data + *offset;
+
+	if (curr - state->data < state->allocated) {
+		struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
+
+		*offset += spmc_shmem_obj_size(obj->desc_size);
+
+		return obj;
+	}
+	return NULL;
+}
+
+/*******************************************************************************
+ * FF-A memory descriptor helper functions.
+ ******************************************************************************/
+/**
+ * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
+ *                           clients FF-A version.
+ * @desc:         The memory transaction descriptor.
+ * @index:        The index of the emad element to be accessed.
+ * @ffa_version:  FF-A version of the provided structure.
+ * @emad_size:    Will be populated with the size of the returned emad
+ *                descriptor.
+ * Return: A pointer to the requested emad structure.
+ */
+static void *
+spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
+			uint32_t ffa_version, size_t *emad_size)
+{
+	uint8_t *emad;
+	/*
+	 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
+	 * format, otherwise assume it is a v1.1 format.
+	 */
+	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
+		/* Cast our descriptor to the v1.0 format. */
+		struct ffa_mtd_v1_0 *mtd_v1_0 =
+					(struct ffa_mtd_v1_0 *) desc;
+		emad = (uint8_t *)  &(mtd_v1_0->emad);
+		*emad_size = sizeof(struct ffa_emad_v1_0);
+	} else {
+		if (!is_aligned(desc->emad_offset, 16)) {
+			WARN("Emad offset is not aligned.\n");
+			return NULL;
+		}
+		emad = ((uint8_t *) desc + desc->emad_offset);
+		*emad_size = desc->emad_size;
+	}
+	return (emad + (*emad_size * index));
+}
+
+/**
+ * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
+ *				 FF-A version of the descriptor.
+ * @obj:    Object containing ffa_memory_region_descriptor.
+ *
+ * Return: struct ffa_comp_mrd object corresponding to the composite memory
+ *	   region descriptor.
+ */
+static struct ffa_comp_mrd *
+spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
+{
+	size_t emad_size;
+	/*
+	 * The comp_mrd_offset field of the emad descriptor remains consistent
+	 * between FF-A versions therefore we can use the v1.0 descriptor here
+	 * in all cases.
+	 */
+	struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
+							     ffa_version,
+							     &emad_size);
+	/* Ensure the emad array was found. */
+	if (emad == NULL) {
+		return NULL;
+	}
+
+	/* Ensure the composite descriptor offset is aligned. */
+	if (!is_aligned(emad->comp_mrd_offset, 8)) {
+		WARN("Unaligned composite memory region descriptor offset.\n");
+		return NULL;
+	}
+
+	return (struct ffa_comp_mrd *)
+	       ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
+}
+
+/**
+ * spmc_shmem_obj_ffa_constituent_size - Calculate variable size part of obj.
+ * @obj:    Object containing ffa_memory_region_descriptor.
+ *
+ * Return: Size of ffa_constituent_memory_region_descriptors in @obj.
+ */
+static size_t
+spmc_shmem_obj_ffa_constituent_size(struct spmc_shmem_obj *obj,
+				    uint32_t ffa_version)
+{
+	struct ffa_comp_mrd *comp_mrd;
+
+	comp_mrd = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
+	if (comp_mrd == NULL) {
+		return 0;
+	}
+	return comp_mrd->address_range_count * sizeof(struct ffa_cons_mrd);
+}
+
+/*
+ * Compare two memory regions to determine if any range overlaps with another
+ * ongoing memory transaction.
+ */
+static bool
+overlapping_memory_regions(struct ffa_comp_mrd *region1,
+			   struct ffa_comp_mrd *region2)
+{
+	uint64_t region1_start;
+	uint64_t region1_size;
+	uint64_t region1_end;
+	uint64_t region2_start;
+	uint64_t region2_size;
+	uint64_t region2_end;
+
+	assert(region1 != NULL);
+	assert(region2 != NULL);
+
+	if (region1 == region2) {
+		return true;
+	}
+
+	/*
+	 * Check each memory region in the request against existing
+	 * transactions.
+	 */
+	for (size_t i = 0; i < region1->address_range_count; i++) {
+
+		region1_start = region1->address_range_array[i].address;
+		region1_size =
+			region1->address_range_array[i].page_count *
+			PAGE_SIZE_4KB;
+		region1_end = region1_start + region1_size;
+
+		for (size_t j = 0; j < region2->address_range_count; j++) {
+
+			region2_start = region2->address_range_array[j].address;
+			region2_size =
+				region2->address_range_array[j].page_count *
+				PAGE_SIZE_4KB;
+			region2_end = region2_start + region2_size;
+
+			if ((region1_start >= region2_start &&
+			     region1_start < region2_end) ||
+			    (region1_end >= region2_start
+			     && region1_end < region2_end)) {
+				WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
+				     region1_start, region1_end,
+				     region2_start, region2_end);
+				return true;
+			}
+		}
+	}
+	return false;
+}
+
+/*******************************************************************************
+ * FF-A v1.0 Memory Descriptor Conversion Helpers.
+ ******************************************************************************/
+/**
+ * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
+ *                                     converted descriptor.
+ * @orig:       The original v1.0 memory transaction descriptor.
+ * @desc_size:  The size of the original v1.0 memory transaction descriptor.
+ *
+ * Return: the size required to store the descriptor store in the v1.1 format.
+ */
+static size_t
+spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
+{
+	size_t size = 0;
+	struct ffa_comp_mrd *mrd;
+	struct ffa_emad_v1_0 *emad_array = orig->emad;
+
+	/* Get the size of the v1.1 descriptor. */
+	size += sizeof(struct ffa_mtd);
+
+	/* Add the size of the emad descriptors. */
+	size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
+
+	/* Add the size of the composite mrds. */
+	size += sizeof(struct ffa_comp_mrd);
+
+	/* Add the size of the constituent mrds. */
+	mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
+	      emad_array[0].comp_mrd_offset);
+
+	/* Check the calculated address is within the memory descriptor. */
+	if ((uintptr_t) mrd >= (uintptr_t)((uint8_t *) orig + desc_size)) {
+		return 0;
+	}
+	size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
+
+	return size;
+}
+
+/**
+ * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
+ *                                     converted descriptor.
+ * @orig:       The original v1.1 memory transaction descriptor.
+ * @desc_size:  The size of the original v1.1 memory transaction descriptor.
+ *
+ * Return: the size required to store the descriptor store in the v1.0 format.
+ */
+static size_t
+spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
+{
+	size_t size = 0;
+	struct ffa_comp_mrd *mrd;
+	struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
+					   ((uint8_t *) orig +
+					    orig->emad_offset);
+
+	/* Get the size of the v1.0 descriptor. */
+	size += sizeof(struct ffa_mtd_v1_0);
+
+	/* Add the size of the v1.0 emad descriptors. */
+	size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
+
+	/* Add the size of the composite mrds. */
+	size += sizeof(struct ffa_comp_mrd);
+
+	/* Add the size of the constituent mrds. */
+	mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
+	      emad_array[0].comp_mrd_offset);
+
+	/* Check the calculated address is within the memory descriptor. */
+	if ((uintptr_t) mrd >= (uintptr_t)((uint8_t *) orig + desc_size)) {
+		return 0;
+	}
+	size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
+
+	return size;
+}
+
+/**
+ * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
+ * @out_obj:	The shared memory object to populate the converted descriptor.
+ * @orig:	The shared memory object containing the v1.0 descriptor.
+ *
+ * Return: true if the conversion is successful else false.
+ */
+static bool
+spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
+				     struct spmc_shmem_obj *orig)
+{
+	struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
+	struct ffa_mtd *out = &out_obj->desc;
+	struct ffa_emad_v1_0 *emad_array_in;
+	struct ffa_emad_v1_0 *emad_array_out;
+	struct ffa_comp_mrd *mrd_in;
+	struct ffa_comp_mrd *mrd_out;
+
+	size_t mrd_in_offset;
+	size_t mrd_out_offset;
+	size_t mrd_size = 0;
+
+	/* Populate the new descriptor format from the v1.0 struct. */
+	out->sender_id = mtd_orig->sender_id;
+	out->memory_region_attributes = mtd_orig->memory_region_attributes;
+	out->flags = mtd_orig->flags;
+	out->handle = mtd_orig->handle;
+	out->tag = mtd_orig->tag;
+	out->emad_count = mtd_orig->emad_count;
+	out->emad_size = sizeof(struct ffa_emad_v1_0);
+
+	/*
+	 * We will locate the emad descriptors directly after the ffa_mtd
+	 * struct. This will be 8-byte aligned.
+	 */
+	out->emad_offset = sizeof(struct ffa_mtd);
+
+	emad_array_in = mtd_orig->emad;
+	emad_array_out = (struct ffa_emad_v1_0 *)
+			 ((uint8_t *) out + out->emad_offset);
+
+	/* Copy across the emad structs. */
+	for (unsigned int i = 0U; i < out->emad_count; i++) {
+		memcpy(&emad_array_out[i], &emad_array_in[i],
+		       sizeof(struct ffa_emad_v1_0));
+	}
+
+	/* Place the mrd descriptors after the end of the emad descriptors.*/
+	mrd_in_offset = emad_array_in->comp_mrd_offset;
+	mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
+	mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
+
+	/* Add the size of the composite memory region descriptor. */
+	mrd_size += sizeof(struct ffa_comp_mrd);
+
+	/* Find the mrd descriptor. */
+	mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
+
+	/* Add the size of the constituent memory region descriptors. */
+	mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
+
+	/*
+	 * Update the offset in the emads by the delta between the input and
+	 * output addresses.
+	 */
+	for (unsigned int i = 0U; i < out->emad_count; i++) {
+		emad_array_out[i].comp_mrd_offset =
+			emad_array_in[i].comp_mrd_offset +
+			(mrd_out_offset - mrd_in_offset);
+	}
+
+	/* Verify that we stay within bound of the memory descriptors. */
+	if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
+	     (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
+	    ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
+	     (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
+		ERROR("%s: Invalid mrd structure.\n", __func__);
+		return false;
+	}
+
+	/* Copy the mrd descriptors directly. */
+	memcpy(mrd_out, mrd_in, mrd_size);
+
+	return true;
+}
+
+/**
+ * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
+ *                                v1.0 memory object.
+ * @out_obj:    The shared memory object to populate the v1.0 descriptor.
+ * @orig:       The shared memory object containing the v1.1 descriptor.
+ *
+ * Return: true if the conversion is successful else false.
+ */
+static bool
+spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
+			     struct spmc_shmem_obj *orig)
+{
+	struct ffa_mtd *mtd_orig = &orig->desc;
+	struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
+	struct ffa_emad_v1_0 *emad_in;
+	struct ffa_emad_v1_0 *emad_array_in;
+	struct ffa_emad_v1_0 *emad_array_out;
+	struct ffa_comp_mrd *mrd_in;
+	struct ffa_comp_mrd *mrd_out;
+
+	size_t mrd_in_offset;
+	size_t mrd_out_offset;
+	size_t emad_out_array_size;
+	size_t mrd_size = 0;
+
+	/* Populate the v1.0 descriptor format from the v1.1 struct. */
+	out->sender_id = mtd_orig->sender_id;
+	out->memory_region_attributes = mtd_orig->memory_region_attributes;
+	out->flags = mtd_orig->flags;
+	out->handle = mtd_orig->handle;
+	out->tag = mtd_orig->tag;
+	out->emad_count = mtd_orig->emad_count;
+
+	/* Determine the location of the emad array in both descriptors. */
+	emad_array_in = (struct ffa_emad_v1_0 *)
+			((uint8_t *) mtd_orig + mtd_orig->emad_offset);
+	emad_array_out = out->emad;
+
+	/* Copy across the emad structs. */
+	emad_in = emad_array_in;
+	for (unsigned int i = 0U; i < out->emad_count; i++) {
+		memcpy(&emad_array_out[i], emad_in,
+		       sizeof(struct ffa_emad_v1_0));
+
+		emad_in +=  mtd_orig->emad_size;
+	}
+
+	/* Place the mrd descriptors after the end of the emad descriptors. */
+	emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
+
+	mrd_out_offset =  (uint8_t *) out->emad - (uint8_t *) out +
+			  emad_out_array_size;
+
+	mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
+
+	mrd_in_offset = mtd_orig->emad_offset +
+			(mtd_orig->emad_size * mtd_orig->emad_count);
+
+	/* Add the size of the composite memory region descriptor. */
+	mrd_size += sizeof(struct ffa_comp_mrd);
+
+	/* Find the mrd descriptor. */
+	mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
+
+	/* Add the size of the constituent memory region descriptors. */
+	mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
+
+	/*
+	 * Update the offset in the emads by the delta between the input and
+	 * output addresses.
+	 */
+	emad_in = emad_array_in;
+
+	for (unsigned int i = 0U; i < out->emad_count; i++) {
+		emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
+						    (mrd_out_offset -
+						     mrd_in_offset);
+		emad_in +=  mtd_orig->emad_size;
+	}
+
+	/* Verify that we stay within bound of the memory descriptors. */
+	if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
+	     (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
+	    ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
+	     (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
+		ERROR("%s: Invalid mrd structure.\n", __func__);
+		return false;
+	}
+
+	/* Copy the mrd descriptors directly. */
+	memcpy(mrd_out, mrd_in, mrd_size);
+
+	return true;
+}
+
+/**
+ * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
+ *                                     the v1.0 format and populates the
+ *                                     provided buffer.
+ * @dst:	    Buffer to populate v1.0 ffa_memory_region_descriptor.
+ * @orig_obj:	    Object containing v1.1 ffa_memory_region_descriptor.
+ * @buf_size:	    Size of the buffer to populate.
+ * @offset:	    The offset of the converted descriptor to copy.
+ * @copy_size:	    Will be populated with the number of bytes copied.
+ * @out_desc_size:  Will be populated with the total size of the v1.0
+ *                  descriptor.
+ *
+ * Return: 0 if conversion and population succeeded.
+ * Note: This function invalidates the reference to @orig therefore
+ * `spmc_shmem_obj_lookup` must be called if further usage is required.
+ */
+static uint32_t
+spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
+				 size_t buf_size, size_t offset,
+				 size_t *copy_size, size_t *v1_0_desc_size)
+{
+		struct spmc_shmem_obj *v1_0_obj;
+
+		/* Calculate the size that the v1.0 descriptor will require. */
+		*v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
+					&orig_obj->desc, orig_obj->desc_size);
+
+		if (*v1_0_desc_size == 0) {
+			ERROR("%s: cannot determine size of descriptor.\n",
+			      __func__);
+			return FFA_ERROR_INVALID_PARAMETER;
+		}
+
+		/* Get a new obj to store the v1.0 descriptor. */
+		v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
+						*v1_0_desc_size);
+
+		if (!v1_0_obj) {
+			return FFA_ERROR_NO_MEMORY;
+		}
+
+		/* Perform the conversion from v1.1 to v1.0. */
+		if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
+			spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
+			return FFA_ERROR_INVALID_PARAMETER;
+		}
+
+		*copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
+		memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
+
+		/*
+		 * We're finished with the v1.0 descriptor for now so free it.
+		 * Note that this will invalidate any references to the v1.1
+		 * descriptor.
+		 */
+		spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
+
+		return 0;
+}
+
+/**
+ * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
+ * @obj:	  Object containing ffa_memory_region_descriptor.
+ * @ffa_version:  FF-A version of the provided descriptor.
+ *
+ * Return: 0 if object is valid, -EINVAL if constituent_memory_region_descriptor
+ * offset or count is invalid.
+ */
+static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
+				uint32_t ffa_version)
+{
+	uint32_t comp_mrd_offset = 0;
+
+	if (obj->desc.emad_count == 0U) {
+		WARN("%s: unsupported attribute desc count %u.\n",
+		     __func__, obj->desc.emad_count);
+		return -EINVAL;
+	}
+
+	for (size_t emad_num = 0; emad_num < obj->desc.emad_count; emad_num++) {
+		size_t size;
+		size_t count;
+		size_t expected_size;
+		size_t total_page_count;
+		size_t emad_size;
+		size_t desc_size;
+		size_t header_emad_size;
+		uint32_t offset;
+		struct ffa_comp_mrd *comp;
+		struct ffa_emad_v1_0 *emad;
+
+		emad = spmc_shmem_obj_get_emad(&obj->desc, emad_num,
+					       ffa_version, &emad_size);
+		if (emad == NULL) {
+			WARN("%s: invalid emad structure.\n", __func__);
+			return -EINVAL;
+		}
+
+		/*
+		 * Validate the calculated emad address resides within the
+		 * descriptor.
+		 */
+		if ((uintptr_t) emad >=
+		    (uintptr_t)((uint8_t *) &obj->desc + obj->desc_size)) {
+			WARN("Invalid emad access.\n");
+			return -EINVAL;
+		}
+
+		offset = emad->comp_mrd_offset;
+
+		if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
+			desc_size =  sizeof(struct ffa_mtd_v1_0);
+		} else {
+			desc_size =  sizeof(struct ffa_mtd);
+		}
+
+		header_emad_size = desc_size +
+			(obj->desc.emad_count * emad_size);
+
+		if (offset < header_emad_size) {
+			WARN("%s: invalid object, offset %u < header + emad %zu\n",
+			     __func__, offset, header_emad_size);
+			return -EINVAL;
+		}
+
+		size = obj->desc_size;
+
+		if (offset > size) {
+			WARN("%s: invalid object, offset %u > total size %zu\n",
+			     __func__, offset, obj->desc_size);
+			return -EINVAL;
+		}
+		size -= offset;
+
+		if (size < sizeof(struct ffa_comp_mrd)) {
+			WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
+			     __func__, offset, obj->desc_size);
+			return -EINVAL;
+		}
+		size -= sizeof(struct ffa_comp_mrd);
+
+		count = size / sizeof(struct ffa_cons_mrd);
+
+		comp = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
+
+		if (comp == NULL) {
+			WARN("%s: invalid comp_mrd offset\n", __func__);
+			return -EINVAL;
+		}
+
+		if (comp->address_range_count != count) {
+			WARN("%s: invalid object, desc count %u != %zu\n",
+			     __func__, comp->address_range_count, count);
+			return -EINVAL;
+		}
+
+		expected_size = offset + sizeof(*comp) +
+				spmc_shmem_obj_ffa_constituent_size(obj,
+								    ffa_version);
+
+		if (expected_size != obj->desc_size) {
+			WARN("%s: invalid object, computed size %zu != size %zu\n",
+			       __func__, expected_size, obj->desc_size);
+			return -EINVAL;
+		}
+
+		if (obj->desc_filled < obj->desc_size) {
+			/*
+			 * The whole descriptor has not yet been received.
+			 * Skip final checks.
+			 */
+			return 0;
+		}
+
+		/*
+		 * The offset provided to the composite memory region descriptor
+		 * should be consistent across endpoint descriptors. Store the
+		 * first entry and compare against subsequent entries.
+		 */
+		if (comp_mrd_offset == 0) {
+			comp_mrd_offset = offset;
+		} else {
+			if (comp_mrd_offset != offset) {
+				ERROR("%s: mismatching offsets provided, %u != %u\n",
+				       __func__, offset, comp_mrd_offset);
+				return -EINVAL;
+			}
+		}
+
+		total_page_count = 0;
+
+		for (size_t i = 0; i < count; i++) {
+			total_page_count +=
+				comp->address_range_array[i].page_count;
+		}
+		if (comp->total_page_count != total_page_count) {
+			WARN("%s: invalid object, desc total_page_count %u != %zu\n",
+			     __func__, comp->total_page_count,
+			total_page_count);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+/**
+ * spmc_shmem_check_state_obj - Check if the descriptor describes memory
+ *				regions that are currently involved with an
+ *				existing memory transactions. This implies that
+ *				the memory is not in a valid state for lending.
+ * @obj:    Object containing ffa_memory_region_descriptor.
+ *
+ * Return: 0 if object is valid, -EINVAL if invalid memory state.
+ */
+static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
+				      uint32_t ffa_version)
+{
+	size_t obj_offset = 0;
+	struct spmc_shmem_obj *inflight_obj;
+
+	struct ffa_comp_mrd *other_mrd;
+	struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
+								  ffa_version);
+
+	if (requested_mrd == NULL) {
+		return -EINVAL;
+	}
+
+	inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
+					       &obj_offset);
+
+	while (inflight_obj != NULL) {
+		/*
+		 * Don't compare the transaction to itself or to partially
+		 * transmitted descriptors.
+		 */
+		if ((obj->desc.handle != inflight_obj->desc.handle) &&
+		    (obj->desc_size == obj->desc_filled)) {
+			other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
+								ffa_version);
+			if (other_mrd == NULL) {
+				return -EINVAL;
+			}
+			if (overlapping_memory_regions(requested_mrd,
+						       other_mrd)) {
+				return -EINVAL;
+			}
+		}
+
+		inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
+						       &obj_offset);
+	}
+	return 0;
+}
+
+static long spmc_ffa_fill_desc(struct mailbox *mbox,
+			       struct spmc_shmem_obj *obj,
+			       uint32_t fragment_length,
+			       ffa_mtd_flag32_t mtd_flag,
+			       uint32_t ffa_version,
+			       void *smc_handle)
+{
+	int ret;
+	size_t emad_size;
+	uint32_t handle_low;
+	uint32_t handle_high;
+	struct ffa_emad_v1_0 *emad;
+	struct ffa_emad_v1_0 *other_emad;
+
+	if (mbox->rxtx_page_count == 0U) {
+		WARN("%s: buffer pair not registered.\n", __func__);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_arg;
+	}
+
+	if (fragment_length > mbox->rxtx_page_count * PAGE_SIZE_4KB) {
+		WARN("%s: bad fragment size %u > %u buffer size\n", __func__,
+		     fragment_length, mbox->rxtx_page_count * PAGE_SIZE_4KB);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_arg;
+	}
+
+	memcpy((uint8_t *)&obj->desc + obj->desc_filled,
+	       (uint8_t *) mbox->tx_buffer, fragment_length);
+
+	if (fragment_length > obj->desc_size - obj->desc_filled) {
+		WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
+		     fragment_length, obj->desc_size - obj->desc_filled);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_arg;
+	}
+
+	/* Ensure that the sender ID resides in the normal world. */
+	if (ffa_is_secure_world_id(obj->desc.sender_id)) {
+		WARN("%s: Invalid sender ID 0x%x.\n",
+		     __func__, obj->desc.sender_id);
+		ret = FFA_ERROR_DENIED;
+		goto err_arg;
+	}
+
+	/* Ensure the NS bit is set to 0. */
+	if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
+		WARN("%s: NS mem attributes flags MBZ.\n", __func__);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_arg;
+	}
+
+	/*
+	 * We don't currently support any optional flags so ensure none are
+	 * requested.
+	 */
+	if (obj->desc.flags != 0U && mtd_flag != 0U &&
+	    (obj->desc.flags != mtd_flag)) {
+		WARN("%s: invalid memory transaction flags %u != %u\n",
+		     __func__, obj->desc.flags, mtd_flag);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_arg;
+	}
+
+	if (obj->desc_filled == 0U) {
+		/* First fragment, descriptor header has been copied */
+		obj->desc.handle = spmc_shmem_obj_state.next_handle++;
+		obj->desc.flags |= mtd_flag;
+	}
+
+	obj->desc_filled += fragment_length;
+	ret = spmc_shmem_check_obj(obj, ffa_version);
+	if (ret != 0) {
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_bad_desc;
+	}
+
+	handle_low = (uint32_t)obj->desc.handle;
+	handle_high = obj->desc.handle >> 32;
+
+	if (obj->desc_filled != obj->desc_size) {
+		SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
+			 handle_high, obj->desc_filled,
+			 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
+	}
+
+	/* The full descriptor has been received, perform any final checks. */
+
+	/*
+	 * If a partition ID resides in the secure world validate that the
+	 * partition ID is for a known partition. Ignore any partition ID
+	 * belonging to the normal world as it is assumed the Hypervisor will
+	 * have validated these.
+	 */
+	for (size_t i = 0; i < obj->desc.emad_count; i++) {
+		emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
+					       &emad_size);
+		if (emad == NULL) {
+			ret = FFA_ERROR_INVALID_PARAMETER;
+			goto err_bad_desc;
+		}
+
+		ffa_endpoint_id16_t ep_id = emad->mapd.endpoint_id;
+
+		if (ffa_is_secure_world_id(ep_id)) {
+			if (spmc_get_sp_ctx(ep_id) == NULL) {
+				WARN("%s: Invalid receiver id 0x%x\n",
+				     __func__, ep_id);
+				ret = FFA_ERROR_INVALID_PARAMETER;
+				goto err_bad_desc;
+			}
+		}
+	}
+
+	/* Ensure partition IDs are not duplicated. */
+	for (size_t i = 0; i < obj->desc.emad_count; i++) {
+		emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
+					       &emad_size);
+		if (emad == NULL) {
+			ret = FFA_ERROR_INVALID_PARAMETER;
+			goto err_bad_desc;
+		}
+		for (size_t j = i + 1; j < obj->desc.emad_count; j++) {
+			other_emad = spmc_shmem_obj_get_emad(&obj->desc, j,
+							     ffa_version,
+							     &emad_size);
+			if (other_emad == NULL) {
+				ret = FFA_ERROR_INVALID_PARAMETER;
+				goto err_bad_desc;
+			}
+
+			if (emad->mapd.endpoint_id ==
+				other_emad->mapd.endpoint_id) {
+				WARN("%s: Duplicated endpoint id 0x%x\n",
+				     __func__, emad->mapd.endpoint_id);
+				ret = FFA_ERROR_INVALID_PARAMETER;
+				goto err_bad_desc;
+			}
+		}
+	}
+
+	ret = spmc_shmem_check_state_obj(obj, ffa_version);
+	if (ret) {
+		ERROR("%s: invalid memory region descriptor.\n", __func__);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_bad_desc;
+	}
+
+	/*
+	 * Everything checks out, if the sender was using FF-A v1.0, convert
+	 * the descriptor format to use the v1.1 structures.
+	 */
+	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
+		struct spmc_shmem_obj *v1_1_obj;
+		uint64_t mem_handle;
+
+		/* Calculate the size that the v1.1 descriptor will required. */
+		size_t v1_1_desc_size =
+		    spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
+						      fragment_length);
+
+		if (v1_1_desc_size == 0U) {
+			ERROR("%s: cannot determine size of descriptor.\n",
+			      __func__);
+			goto err_arg;
+		}
+
+		/* Get a new obj to store the v1.1 descriptor. */
+		v1_1_obj =
+		    spmc_shmem_obj_alloc(&spmc_shmem_obj_state, v1_1_desc_size);
+
+		if (!obj) {
+			ret = FFA_ERROR_NO_MEMORY;
+			goto err_arg;
+		}
+
+		/* Perform the conversion from v1.0 to v1.1. */
+		v1_1_obj->desc_size = v1_1_desc_size;
+		v1_1_obj->desc_filled = v1_1_desc_size;
+		if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
+			ERROR("%s: Could not convert mtd!\n", __func__);
+			spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
+			goto err_arg;
+		}
+
+		/*
+		 * We're finished with the v1.0 descriptor so free it
+		 * and continue our checks with the new v1.1 descriptor.
+		 */
+		mem_handle = obj->desc.handle;
+		spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
+		obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
+		if (obj == NULL) {
+			ERROR("%s: Failed to find converted descriptor.\n",
+			     __func__);
+			ret = FFA_ERROR_INVALID_PARAMETER;
+			return spmc_ffa_error_return(smc_handle, ret);
+		}
+	}
+
+	/* Allow for platform specific operations to be performed. */
+	ret = plat_spmc_shmem_begin(&obj->desc);
+	if (ret != 0) {
+		goto err_arg;
+	}
+
+	SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
+		 0, 0, 0);
+
+err_bad_desc:
+err_arg:
+	spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
+	return spmc_ffa_error_return(smc_handle, ret);
+}
+
+/**
+ * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
+ * @client:             Client state.
+ * @total_length:       Total length of shared memory descriptor.
+ * @fragment_length:    Length of fragment of shared memory descriptor passed in
+ *                      this call.
+ * @address:            Not supported, must be 0.
+ * @page_count:         Not supported, must be 0.
+ * @smc_handle:         Handle passed to smc call. Used to return
+ *                      FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
+ *
+ * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
+ * to share or lend memory from non-secure os to secure os (with no stream
+ * endpoints).
+ *
+ * Return: 0 on success, error code on failure.
+ */
+long spmc_ffa_mem_send(uint32_t smc_fid,
+			bool secure_origin,
+			uint64_t total_length,
+			uint32_t fragment_length,
+			uint64_t address,
+			uint32_t page_count,
+			void *cookie,
+			void *handle,
+			uint64_t flags)
+
+{
+	long ret;
+	struct spmc_shmem_obj *obj;
+	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+	ffa_mtd_flag32_t mtd_flag;
+	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
+
+	if (address != 0U || page_count != 0U) {
+		WARN("%s: custom memory region for message not supported.\n",
+		     __func__);
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	if (secure_origin) {
+		WARN("%s: unsupported share direction.\n", __func__);
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	/*
+	 * Check if the descriptor is smaller than the v1.0 descriptor. The
+	 * descriptor cannot be smaller than this structure.
+	 */
+	if (fragment_length < sizeof(struct ffa_mtd_v1_0)) {
+		WARN("%s: bad first fragment size %u < %zu\n",
+		     __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
+		mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
+	} else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
+		mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
+	} else {
+		WARN("%s: invalid memory management operation.\n", __func__);
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	spin_lock(&spmc_shmem_obj_state.lock);
+	obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
+	if (obj == NULL) {
+		ret = FFA_ERROR_NO_MEMORY;
+		goto err_unlock;
+	}
+
+	spin_lock(&mbox->lock);
+	ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
+				 ffa_version, handle);
+	spin_unlock(&mbox->lock);
+
+	spin_unlock(&spmc_shmem_obj_state.lock);
+	return ret;
+
+err_unlock:
+	spin_unlock(&spmc_shmem_obj_state.lock);
+	return spmc_ffa_error_return(handle, ret);
+}
+
+/**
+ * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
+ * @client:             Client state.
+ * @handle_low:         Handle_low value returned from FFA_MEM_FRAG_RX.
+ * @handle_high:        Handle_high value returned from FFA_MEM_FRAG_RX.
+ * @fragment_length:    Length of fragments transmitted.
+ * @sender_id:          Vmid of sender in bits [31:16]
+ * @smc_handle:         Handle passed to smc call. Used to return
+ *                      FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
+ *
+ * Return: @smc_handle on success, error code on failure.
+ */
+long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
+			  bool secure_origin,
+			  uint64_t handle_low,
+			  uint64_t handle_high,
+			  uint32_t fragment_length,
+			  uint32_t sender_id,
+			  void *cookie,
+			  void *handle,
+			  uint64_t flags)
+{
+	long ret;
+	uint32_t desc_sender_id;
+	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
+	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+
+	struct spmc_shmem_obj *obj;
+	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
+
+	spin_lock(&spmc_shmem_obj_state.lock);
+
+	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
+	if (obj == NULL) {
+		WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
+		     __func__, mem_handle);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock;
+	}
+
+	desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
+	if (sender_id != desc_sender_id) {
+		WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
+		     sender_id, desc_sender_id);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock;
+	}
+
+	if (obj->desc_filled == obj->desc_size) {
+		WARN("%s: object desc already filled, %zu\n", __func__,
+		     obj->desc_filled);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock;
+	}
+
+	spin_lock(&mbox->lock);
+	ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
+				 handle);
+	spin_unlock(&mbox->lock);
+
+	spin_unlock(&spmc_shmem_obj_state.lock);
+	return ret;
+
+err_unlock:
+	spin_unlock(&spmc_shmem_obj_state.lock);
+	return spmc_ffa_error_return(handle, ret);
+}
+
+/**
+ * spmc_ffa_mem_retrieve_set_ns_bit - Set the NS bit in the response descriptor
+ *				      if the caller implements a version greater
+ *				      than FF-A 1.0 or if they have requested
+ *				      the functionality.
+ *				      TODO: We are assuming that the caller is
+ *				      an SP. To support retrieval from the
+ *				      normal world this function will need to be
+ *				      expanded accordingly.
+ * @resp:       Descriptor populated in callers RX buffer.
+ * @sp_ctx:     Context of the calling SP.
+ */
+void spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd *resp,
+			 struct secure_partition_desc *sp_ctx)
+{
+	if (sp_ctx->ffa_version > MAKE_FFA_VERSION(1, 0) ||
+	    sp_ctx->ns_bit_requested) {
+		/*
+		 * Currently memory senders must reside in the normal
+		 * world, and we do not have the functionlaity to change
+		 * the state of memory dynamically. Therefore we can always set
+		 * the NS bit to 1.
+		 */
+		resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
+	}
+}
+
+/**
+ * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
+ * @smc_fid:            FID of SMC
+ * @total_length:       Total length of retrieve request descriptor if this is
+ *                      the first call. Otherwise (unsupported) must be 0.
+ * @fragment_length:    Length of fragment of retrieve request descriptor passed
+ *                      in this call. Only @fragment_length == @length is
+ *                      supported by this implementation.
+ * @address:            Not supported, must be 0.
+ * @page_count:         Not supported, must be 0.
+ * @smc_handle:         Handle passed to smc call. Used to return
+ *                      FFA_MEM_RETRIEVE_RESP.
+ *
+ * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
+ * Used by secure os to retrieve memory already shared by non-secure os.
+ * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
+ * the client must call FFA_MEM_FRAG_RX until the full response has been
+ * received.
+ *
+ * Return: @handle on success, error code on failure.
+ */
+long
+spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
+			  bool secure_origin,
+			  uint32_t total_length,
+			  uint32_t fragment_length,
+			  uint64_t address,
+			  uint32_t page_count,
+			  void *cookie,
+			  void *handle,
+			  uint64_t flags)
+{
+	int ret;
+	size_t buf_size;
+	size_t copy_size = 0;
+	size_t min_desc_size;
+	size_t out_desc_size = 0;
+
+	/*
+	 * Currently we are only accessing fields that are the same in both the
+	 * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
+	 * here. We only need validate against the appropriate struct size.
+	 */
+	struct ffa_mtd *resp;
+	const struct ffa_mtd *req;
+	struct spmc_shmem_obj *obj = NULL;
+	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
+	struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
+
+	if (!secure_origin) {
+		WARN("%s: unsupported retrieve req direction.\n", __func__);
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	if (address != 0U || page_count != 0U) {
+		WARN("%s: custom memory region not supported.\n", __func__);
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	spin_lock(&mbox->lock);
+
+	req = mbox->tx_buffer;
+	resp = mbox->rx_buffer;
+	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
+
+	if (mbox->rxtx_page_count == 0U) {
+		WARN("%s: buffer pair not registered.\n", __func__);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock_mailbox;
+	}
+
+	if (mbox->state != MAILBOX_STATE_EMPTY) {
+		WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
+		ret = FFA_ERROR_DENIED;
+		goto err_unlock_mailbox;
+	}
+
+	if (fragment_length != total_length) {
+		WARN("%s: fragmented retrieve request not supported.\n",
+		     __func__);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock_mailbox;
+	}
+
+	if (req->emad_count == 0U) {
+		WARN("%s: unsupported attribute desc count %u.\n",
+		     __func__, obj->desc.emad_count);
+		return -EINVAL;
+	}
+
+	/* Determine the appropriate minimum descriptor size. */
+	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
+		min_desc_size = sizeof(struct ffa_mtd_v1_0);
+	} else {
+		min_desc_size = sizeof(struct ffa_mtd);
+	}
+	if (total_length < min_desc_size) {
+		WARN("%s: invalid length %u < %zu\n", __func__, total_length,
+		     min_desc_size);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock_mailbox;
+	}
+
+	spin_lock(&spmc_shmem_obj_state.lock);
+
+	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
+	if (obj == NULL) {
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock_all;
+	}
+
+	if (obj->desc_filled != obj->desc_size) {
+		WARN("%s: incomplete object desc filled %zu < size %zu\n",
+		     __func__, obj->desc_filled, obj->desc_size);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock_all;
+	}
+
+	if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
+		WARN("%s: wrong sender id 0x%x != 0x%x\n",
+		     __func__, req->sender_id, obj->desc.sender_id);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock_all;
+	}
+
+	if (req->emad_count != 0U && req->tag != obj->desc.tag) {
+		WARN("%s: wrong tag 0x%lx != 0x%lx\n",
+		     __func__, req->tag, obj->desc.tag);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock_all;
+	}
+
+	if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
+		WARN("%s: mistmatch of endpoint counts %u != %u\n",
+		     __func__, req->emad_count, obj->desc.emad_count);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock_all;
+	}
+
+	/* Ensure the NS bit is set to 0 in the request. */
+	if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
+		WARN("%s: NS mem attributes flags MBZ.\n", __func__);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock_all;
+	}
+
+	if (req->flags != 0U) {
+		if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
+		    (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
+			/*
+			 * If the retrieve request specifies the memory
+			 * transaction ensure it matches what we expect.
+			 */
+			WARN("%s: wrong mem transaction flags %x != %x\n",
+			__func__, req->flags, obj->desc.flags);
+			ret = FFA_ERROR_INVALID_PARAMETER;
+			goto err_unlock_all;
+		}
+
+		if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
+		    req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
+			/*
+			 * Current implementation does not support donate and
+			 * it supports no other flags.
+			 */
+			WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
+			ret = FFA_ERROR_INVALID_PARAMETER;
+			goto err_unlock_all;
+		}
+	}
+
+	/* Validate that the provided emad offset and structure is valid.*/
+	for (size_t i = 0; i < req->emad_count; i++) {
+		size_t emad_size;
+		struct ffa_emad_v1_0 *emad;
+
+		emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
+					       &emad_size);
+		if (emad == NULL) {
+			WARN("%s: invalid emad structure.\n", __func__);
+			ret = FFA_ERROR_INVALID_PARAMETER;
+			goto err_unlock_all;
+		}
+
+		if ((uintptr_t) emad >= (uintptr_t)
+					((uint8_t *) req + total_length)) {
+			WARN("Invalid emad access.\n");
+			ret = FFA_ERROR_INVALID_PARAMETER;
+			goto err_unlock_all;
+		}
+	}
+
+	/*
+	 * Validate all the endpoints match in the case of multiple
+	 * borrowers. We don't mandate that the order of the borrowers
+	 * must match in the descriptors therefore check to see if the
+	 * endpoints match in any order.
+	 */
+	for (size_t i = 0; i < req->emad_count; i++) {
+		bool found = false;
+		size_t emad_size;
+		struct ffa_emad_v1_0 *emad;
+		struct ffa_emad_v1_0 *other_emad;
+
+		emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
+					       &emad_size);
+		if (emad == NULL) {
+			ret = FFA_ERROR_INVALID_PARAMETER;
+			goto err_unlock_all;
+		}
+
+		for (size_t j = 0; j < obj->desc.emad_count; j++) {
+			other_emad = spmc_shmem_obj_get_emad(
+					&obj->desc, j, MAKE_FFA_VERSION(1, 1),
+					&emad_size);
+
+			if (other_emad == NULL) {
+				ret = FFA_ERROR_INVALID_PARAMETER;
+				goto err_unlock_all;
+			}
+
+			if (req->emad_count &&
+			    emad->mapd.endpoint_id ==
+			    other_emad->mapd.endpoint_id) {
+				found = true;
+				break;
+			}
+		}
+
+		if (!found) {
+			WARN("%s: invalid receiver id (0x%x).\n",
+			     __func__, emad->mapd.endpoint_id);
+			ret = FFA_ERROR_INVALID_PARAMETER;
+			goto err_unlock_all;
+		}
+	}
+
+	mbox->state = MAILBOX_STATE_FULL;
+
+	if (req->emad_count != 0U) {
+		obj->in_use++;
+	}
+
+	/*
+	 * If the caller is v1.0 convert the descriptor, otherwise copy
+	 * directly.
+	 */
+	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
+		ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
+							&copy_size,
+							&out_desc_size);
+		if (ret != 0U) {
+			ERROR("%s: Failed to process descriptor.\n", __func__);
+			goto err_unlock_all;
+		}
+	} else {
+		copy_size = MIN(obj->desc_size, buf_size);
+		out_desc_size = obj->desc_size;
+
+		memcpy(resp, &obj->desc, copy_size);
+	}
+
+	/* Set the NS bit in the response if applicable. */
+	spmc_ffa_mem_retrieve_set_ns_bit(resp, sp_ctx);
+
+	spin_unlock(&spmc_shmem_obj_state.lock);
+	spin_unlock(&mbox->lock);
+
+	SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
+		 copy_size, 0, 0, 0, 0, 0);
+
+err_unlock_all:
+	spin_unlock(&spmc_shmem_obj_state.lock);
+err_unlock_mailbox:
+	spin_unlock(&mbox->lock);
+	return spmc_ffa_error_return(handle, ret);
+}
+
+/**
+ * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
+ * @client:             Client state.
+ * @handle_low:         Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
+ * @handle_high:        Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
+ * @fragment_offset:    Byte offset in descriptor to resume at.
+ * @sender_id:          Bit[31:16]: Endpoint id of sender if client is a
+ *                      hypervisor. 0 otherwise.
+ * @smc_handle:         Handle passed to smc call. Used to return
+ *                      FFA_MEM_FRAG_TX.
+ *
+ * Return: @smc_handle on success, error code on failure.
+ */
+long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
+			  bool secure_origin,
+			  uint32_t handle_low,
+			  uint32_t handle_high,
+			  uint32_t fragment_offset,
+			  uint32_t sender_id,
+			  void *cookie,
+			  void *handle,
+			  uint64_t flags)
+{
+	int ret;
+	void *src;
+	size_t buf_size;
+	size_t copy_size;
+	size_t full_copy_size;
+	uint32_t desc_sender_id;
+	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
+	struct spmc_shmem_obj *obj;
+	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
+
+	if (!secure_origin) {
+		WARN("%s: can only be called from swld.\n",
+		     __func__);
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	spin_lock(&spmc_shmem_obj_state.lock);
+
+	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
+	if (obj == NULL) {
+		WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
+		     __func__, mem_handle);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock_shmem;
+	}
+
+	desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
+	if (sender_id != 0U && sender_id != desc_sender_id) {
+		WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
+		     sender_id, desc_sender_id);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock_shmem;
+	}
+
+	if (fragment_offset >= obj->desc_size) {
+		WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
+		     __func__, fragment_offset, obj->desc_size);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock_shmem;
+	}
+
+	spin_lock(&mbox->lock);
+
+	if (mbox->rxtx_page_count == 0U) {
+		WARN("%s: buffer pair not registered.\n", __func__);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock_all;
+	}
+
+	if (mbox->state != MAILBOX_STATE_EMPTY) {
+		WARN("%s: RX Buffer is full!\n", __func__);
+		ret = FFA_ERROR_DENIED;
+		goto err_unlock_all;
+	}
+
+	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
+
+	mbox->state = MAILBOX_STATE_FULL;
+
+	/*
+	 * If the caller is v1.0 convert the descriptor, otherwise copy
+	 * directly.
+	 */
+	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
+		size_t out_desc_size;
+
+		ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
+							buf_size,
+							fragment_offset,
+							&copy_size,
+							&out_desc_size);
+		if (ret != 0U) {
+			ERROR("%s: Failed to process descriptor.\n", __func__);
+			goto err_unlock_all;
+		}
+	} else {
+		full_copy_size = obj->desc_size - fragment_offset;
+		copy_size = MIN(full_copy_size, buf_size);
+
+		src = &obj->desc;
+
+		memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
+	}
+
+	spin_unlock(&mbox->lock);
+	spin_unlock(&spmc_shmem_obj_state.lock);
+
+	SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
+		 copy_size, sender_id, 0, 0, 0);
+
+err_unlock_all:
+	spin_unlock(&mbox->lock);
+err_unlock_shmem:
+	spin_unlock(&spmc_shmem_obj_state.lock);
+	return spmc_ffa_error_return(handle, ret);
+}
+
+/**
+ * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
+ * @client:             Client state.
+ *
+ * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
+ * Used by secure os release previously shared memory to non-secure os.
+ *
+ * The handle to release must be in the client's (secure os's) transmit buffer.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int spmc_ffa_mem_relinquish(uint32_t smc_fid,
+			    bool secure_origin,
+			    uint32_t handle_low,
+			    uint32_t handle_high,
+			    uint32_t fragment_offset,
+			    uint32_t sender_id,
+			    void *cookie,
+			    void *handle,
+			    uint64_t flags)
+{
+	int ret;
+	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+	struct spmc_shmem_obj *obj;
+	const struct ffa_mem_relinquish_descriptor *req;
+
+	if (!secure_origin) {
+		WARN("%s: unsupported relinquish direction.\n", __func__);
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	spin_lock(&mbox->lock);
+
+	if (mbox->rxtx_page_count == 0U) {
+		WARN("%s: buffer pair not registered.\n", __func__);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock_mailbox;
+	}
+
+	req = mbox->tx_buffer;
+
+	if (req->flags != 0U) {
+		WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock_mailbox;
+	}
+
+	if (req->endpoint_count == 0) {
+		WARN("%s: endpoint count cannot be 0.\n", __func__);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock_mailbox;
+	}
+
+	spin_lock(&spmc_shmem_obj_state.lock);
+
+	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
+	if (obj == NULL) {
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock_all;
+	}
+
+	if (obj->desc.emad_count != req->endpoint_count) {
+		WARN("%s: mismatch of endpoint count %u != %u\n", __func__,
+		     obj->desc.emad_count, req->endpoint_count);
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock_all;
+	}
+
+	/* Validate requested endpoint IDs match descriptor. */
+	for (size_t i = 0; i < req->endpoint_count; i++) {
+		bool found = false;
+		size_t emad_size;
+		struct ffa_emad_v1_0 *emad;
+
+		for (unsigned int j = 0; j < obj->desc.emad_count; j++) {
+			emad = spmc_shmem_obj_get_emad(&obj->desc, j,
+							MAKE_FFA_VERSION(1, 1),
+							&emad_size);
+			if (req->endpoint_array[i] ==
+			    emad->mapd.endpoint_id) {
+				found = true;
+				break;
+			}
+		}
+
+		if (!found) {
+			WARN("%s: Invalid endpoint ID (0x%x).\n",
+			     __func__, req->endpoint_array[i]);
+			ret = FFA_ERROR_INVALID_PARAMETER;
+			goto err_unlock_all;
+		}
+	}
+
+	if (obj->in_use == 0U) {
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock_all;
+	}
+	obj->in_use--;
+
+	spin_unlock(&spmc_shmem_obj_state.lock);
+	spin_unlock(&mbox->lock);
+
+	SMC_RET1(handle, FFA_SUCCESS_SMC32);
+
+err_unlock_all:
+	spin_unlock(&spmc_shmem_obj_state.lock);
+err_unlock_mailbox:
+	spin_unlock(&mbox->lock);
+	return spmc_ffa_error_return(handle, ret);
+}
+
+/**
+ * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
+ * @client:         Client state.
+ * @handle_low:     Unique handle of shared memory object to reclaim. Bit[31:0].
+ * @handle_high:    Unique handle of shared memory object to reclaim.
+ *                  Bit[63:32].
+ * @flags:          Unsupported, ignored.
+ *
+ * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
+ * Used by non-secure os reclaim memory previously shared with secure os.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int spmc_ffa_mem_reclaim(uint32_t smc_fid,
+			 bool secure_origin,
+			 uint32_t handle_low,
+			 uint32_t handle_high,
+			 uint32_t mem_flags,
+			 uint64_t x4,
+			 void *cookie,
+			 void *handle,
+			 uint64_t flags)
+{
+	int ret;
+	struct spmc_shmem_obj *obj;
+	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
+
+	if (secure_origin) {
+		WARN("%s: unsupported reclaim direction.\n", __func__);
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	if (mem_flags != 0U) {
+		WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
+		return spmc_ffa_error_return(handle,
+					     FFA_ERROR_INVALID_PARAMETER);
+	}
+
+	spin_lock(&spmc_shmem_obj_state.lock);
+
+	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
+	if (obj == NULL) {
+		ret = FFA_ERROR_INVALID_PARAMETER;
+		goto err_unlock;
+	}
+	if (obj->in_use != 0U) {
+		ret = FFA_ERROR_DENIED;
+		goto err_unlock;
+	}
+
+	/* Allow for platform specific operations to be performed. */
+	ret = plat_spmc_shmem_reclaim(&obj->desc);
+	if (ret != 0) {
+		goto err_unlock;
+	}
+
+	spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
+	spin_unlock(&spmc_shmem_obj_state.lock);
+
+	SMC_RET1(handle, FFA_SUCCESS_SMC32);
+
+err_unlock:
+	spin_unlock(&spmc_shmem_obj_state.lock);
+	return spmc_ffa_error_return(handle, ret);
+}
diff --git a/services/std_svc/spm/el3_spmc/spmc_shared_mem.h b/services/std_svc/spm/el3_spmc/spmc_shared_mem.h
new file mode 100644
index 0000000..839f7a1
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc_shared_mem.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPMC_SHARED_MEM_H
+#define SPMC_SHARED_MEM_H
+
+#include <services/el3_spmc_ffa_memory.h>
+
+/**
+ * struct ffa_mem_relinquish_descriptor - Relinquish request descriptor.
+ * @handle:
+ *         Id of shared memory object to relinquish.
+ * @flags:
+ *         If bit 0 is set clear memory after unmapping from borrower. Must be 0
+ *         for share. Bit[1]: Time slicing. Not supported, must be 0. All other
+ *         bits are reserved 0.
+ * @endpoint_count:
+ *         Number of entries in @endpoint_array.
+ * @endpoint_array:
+ *         Array of endpoint ids.
+ */
+struct ffa_mem_relinquish_descriptor {
+	uint64_t handle;
+	uint32_t flags;
+	uint32_t endpoint_count;
+	ffa_endpoint_id16_t endpoint_array[];
+};
+CASSERT(sizeof(struct ffa_mem_relinquish_descriptor) == 16,
+	assert_ffa_mem_relinquish_descriptor_size_mismatch);
+
+/**
+ * struct spmc_shmem_obj_state - Global state.
+ * @data:           Backing store for spmc_shmem_obj objects.
+ * @data_size:      The size allocated for the backing store.
+ * @allocated:      Number of bytes allocated in @data.
+ * @next_handle:    Handle used for next allocated object.
+ * @lock:           Lock protecting all state in this file.
+ */
+struct spmc_shmem_obj_state {
+	uint8_t *data;
+	size_t data_size;
+	size_t allocated;
+	uint64_t next_handle;
+	spinlock_t lock;
+};
+
+extern struct spmc_shmem_obj_state spmc_shmem_obj_state;
+extern int plat_spmc_shmem_begin(struct ffa_mtd *desc);
+extern int plat_spmc_shmem_reclaim(struct ffa_mtd *desc);
+
+long spmc_ffa_mem_send(uint32_t smc_fid,
+		       bool secure_origin,
+		       uint64_t total_length,
+		       uint32_t fragment_length,
+		       uint64_t address,
+		       uint32_t page_count,
+		       void *cookie,
+		       void *handle,
+		       uint64_t flags);
+
+long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
+			  bool secure_origin,
+			  uint64_t handle_low,
+			  uint64_t handle_high,
+			  uint32_t fragment_length,
+			  uint32_t sender_id,
+			  void *cookie,
+			  void *handle,
+			  uint64_t flags);
+
+long spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
+			       bool secure_origin,
+			       uint32_t total_length,
+			       uint32_t fragment_length,
+			       uint64_t address,
+			       uint32_t page_count,
+			       void *cookie,
+			       void *handle,
+			       uint64_t flags);
+
+long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
+			  bool secure_origin,
+			  uint32_t handle_low,
+			  uint32_t handle_high,
+			  uint32_t fragment_offset,
+			  uint32_t sender_id,
+			  void *cookie,
+			  void *handle,
+			  uint64_t flags);
+
+
+int spmc_ffa_mem_relinquish(uint32_t smc_fid,
+			    bool secure_origin,
+			    uint32_t handle_low,
+			    uint32_t handle_high,
+			    uint32_t fragment_offset,
+			    uint32_t sender_id,
+			    void *cookie,
+			    void *handle,
+			    uint64_t flags);
+
+int spmc_ffa_mem_reclaim(uint32_t smc_fid,
+			 bool secure_origin,
+			 uint32_t handle_low,
+			 uint32_t handle_high,
+			 uint32_t mem_flags,
+			 uint64_t x4,
+			 void *cookie,
+			 void *handle,
+			 uint64_t flags);
+
+#endif /* SPMC_SHARED_MEM_H */
diff --git a/services/std_svc/spm/spm_mm/spm_mm.mk b/services/std_svc/spm/spm_mm/spm_mm.mk
index 78ef0c9..f6691c3 100644
--- a/services/std_svc/spm/spm_mm/spm_mm.mk
+++ b/services/std_svc/spm/spm_mm/spm_mm.mk
@@ -16,6 +16,9 @@
 ifeq (${ENABLE_SME_FOR_NS},1)
         $(error "Error: SPM_MM is not compatible with ENABLE_SME_FOR_NS")
 endif
+ifeq (${CTX_INCLUDE_FPREGS},0)
+        $(warning "Warning: SPM_MM: CTX_INCLUDE_FPREGS is set to 0")
+endif
 
 SPM_MM_SOURCES	:=	$(addprefix services/std_svc/spm/spm_mm/,	\
 			${ARCH}/spm_mm_shim_exceptions.S		\
diff --git a/services/std_svc/spm/spm_mm/spm_mm_main.c b/services/std_svc/spm/spm_mm/spm_mm_main.c
index e71e65b..8525cd2 100644
--- a/services/std_svc/spm/spm_mm/spm_mm_main.c
+++ b/services/std_svc/spm/spm_mm/spm_mm_main.c
@@ -190,6 +190,14 @@
 	uint64_t rc;
 	sp_context_t *sp_ptr = &sp_ctx;
 
+#if CTX_INCLUDE_FPREGS
+	/*
+	 * SP runs to completion, no need to restore FP registers of secure context.
+	 * Save FP registers only for non secure context.
+	 */
+	fpregs_context_save(get_fpregs_ctx(cm_get_context(NON_SECURE)));
+#endif
+
 	/* Wait until the Secure Partition is idle and set it to busy. */
 	sp_state_wait_switch(sp_ptr, SP_STATE_IDLE, SP_STATE_BUSY);
 
@@ -208,6 +216,14 @@
 	assert(sp_ptr->state == SP_STATE_BUSY);
 	sp_state_set(sp_ptr, SP_STATE_IDLE);
 
+#if CTX_INCLUDE_FPREGS
+	/*
+	 * SP runs to completion, no need to save FP registers of secure context.
+	 * Restore only non secure world FP registers.
+	 */
+	fpregs_context_restore(get_fpregs_ctx(cm_get_context(NON_SECURE)));
+#endif
+
 	return rc;
 }
 
diff --git a/services/std_svc/spmd/spmd_main.c b/services/std_svc/spmd/spmd_main.c
index 5b131cd..e388784 100644
--- a/services/std_svc/spmd/spmd_main.c
+++ b/services/std_svc/spmd/spmd_main.c
@@ -626,7 +626,8 @@
 		 * If caller is secure and SPMC was initialized,
 		 * return FFA_VERSION of SPMD.
 		 * If caller is non secure and SPMC was initialized,
-		 * return SPMC's version.
+		 * forward to the EL3 SPMC if enabled, otherwise return
+		 * the SPMC version if implemented at a lower EL.
 		 * Sanity check to "input_version".
 		 * If the EL3 SPMC is enabled, ignore the SPMC state as
 		 * this is not used.
@@ -635,6 +636,17 @@
 		    (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) {
 			ret = FFA_ERROR_NOT_SUPPORTED;
 		} else if (!secure_origin) {
+			if (is_spmc_at_el3()) {
+				/*
+				 * Forward the call directly to the EL3 SPMC, if
+				 * enabled, as we don't need to wrap the call in
+				 * a direct request.
+				 */
+				return spmd_smc_forward(smc_fid, secure_origin,
+							x1, x2, x3, x4, cookie,
+							handle, flags);
+			}
+
 			gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
 			uint64_t rc;
 
@@ -672,7 +684,7 @@
 			    (SMC_GET_GP(gpregs, CTX_GPREG_X0) !=
 				FFA_MSG_SEND_DIRECT_RESP_SMC32) ||
 			    (SMC_GET_GP(gpregs, CTX_GPREG_X2) !=
-				(SPMD_FWK_MSG_BIT |
+				(FFA_FWK_MSG_BIT |
 				 SPMD_FWK_MSG_FFA_VERSION_RESP))) {
 				ERROR("Failed to forward FFA_VERSION\n");
 				ret = FFA_ERROR_NOT_SUPPORTED;
@@ -863,6 +875,8 @@
 	case FFA_MEM_RETRIEVE_RESP:
 	case FFA_MEM_RELINQUISH:
 	case FFA_MEM_RECLAIM:
+	case FFA_MEM_FRAG_TX:
+	case FFA_MEM_FRAG_RX:
 	case FFA_SUCCESS_SMC32:
 	case FFA_SUCCESS_SMC64:
 		/*
diff --git a/services/std_svc/spmd/spmd_pm.c b/services/std_svc/spmd/spmd_pm.c
index b719161..a2704dd 100644
--- a/services/std_svc/spmd/spmd_pm.c
+++ b/services/std_svc/spmd/spmd_pm.c
@@ -123,7 +123,7 @@
 
 	/* Build an SPMD to SPMC direct message request. */
 	spmd_build_spmc_message(get_gpregs_ctx(&ctx->cpu_ctx),
-				SPMD_FWK_MSG_PSCI, PSCI_CPU_OFF);
+				FFA_FWK_MSG_PSCI, PSCI_CPU_OFF);
 
 	rc = spmd_spm_core_sync_entry(ctx);
 	if (rc != 0ULL) {
diff --git a/services/std_svc/spmd/spmd_private.h b/services/std_svc/spmd/spmd_private.h
index 4c298c9..07fecb6 100644
--- a/services/std_svc/spmd/spmd_private.h
+++ b/services/std_svc/spmd/spmd_private.h
@@ -59,8 +59,6 @@
 #define FFA_NS_ENDPOINT_ID			U(0)
 
 /* Define SPMD target function IDs for framework messages to the SPMC */
-#define SPMD_FWK_MSG_BIT			BIT(31)
-#define SPMD_FWK_MSG_PSCI			U(0)
 #define SPMD_FWK_MSG_FFA_VERSION_REQ		U(0x8)
 #define SPMD_FWK_MSG_FFA_VERSION_RESP		U(0x9)
 
diff --git a/tools/cert_create/Makefile b/tools/cert_create/Makefile
index 77d2007..ca548b8 100644
--- a/tools/cert_create/Makefile
+++ b/tools/cert_create/Makefile
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2015-2022, ARM Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -62,7 +62,14 @@
 # Make soft links and include from local directory otherwise wrong headers
 # could get pulled in from firmware tree.
 INC_DIR += -I ./include -I ${PLAT_INCLUDE} -I ${OPENSSL_DIR}/include
-LIB_DIR := -L ${OPENSSL_DIR}/lib
+
+# Include library directories where OpenSSL library files are located.
+# For a normal installation (i.e.: when ${OPENSSL_DIR} = /usr or
+# /usr/local), binaries are located under the ${OPENSSL_DIR}/lib/
+# directory. However, for a local build of OpenSSL, the built binaries are
+# located under the main project directory (i.e.: ${OPENSSL_DIR}, not
+# ${OPENSSL_DIR}/lib/).
+LIB_DIR := -L ${OPENSSL_DIR}/lib -L ${OPENSSL_DIR}
 LIB := -lssl -lcrypto
 
 HOSTCC ?= gcc
diff --git a/tools/cert_create/src/cert.c b/tools/cert_create/src/cert.c
index 4b35d73..67ae1d6 100644
--- a/tools/cert_create/src/cert.c
+++ b/tools/cert_create/src/cert.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -39,7 +39,7 @@
 	if (!btmp)
 		return 0;
 
-	if (!BN_pseudo_rand(btmp, SERIAL_RAND_BITS, 0, 0))
+	if (!BN_rand(btmp, SERIAL_RAND_BITS, 0, 0))
 		goto error;
 	if (ai && !BN_to_ASN1_INTEGER(btmp, ai))
 		goto error;
diff --git a/tools/cert_create/src/key.c b/tools/cert_create/src/key.c
index 6435975..2857a3b 100644
--- a/tools/cert_create/src/key.c
+++ b/tools/cert_create/src/key.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -40,69 +40,25 @@
 
 static int key_create_rsa(key_t *key, int key_bits)
 {
-	BIGNUM *e;
-	RSA *rsa = NULL;
-
-	e = BN_new();
-	if (e == NULL) {
-		printf("Cannot create RSA exponent\n");
-		goto err;
-	}
-
-	if (!BN_set_word(e, RSA_F4)) {
-		printf("Cannot assign RSA exponent\n");
-		goto err;
-	}
-
-	rsa = RSA_new();
+	EVP_PKEY *rsa = EVP_RSA_gen(key_bits);
 	if (rsa == NULL) {
-		printf("Cannot create RSA key\n");
-		goto err;
-	}
-
-	if (!RSA_generate_key_ex(rsa, key_bits, e, NULL)) {
 		printf("Cannot generate RSA key\n");
-		goto err;
+		return 0;
 	}
-
-	if (!EVP_PKEY_assign_RSA(key->key, rsa)) {
-		printf("Cannot assign RSA key\n");
-		goto err;
-	}
-
-	BN_free(e);
+	key->key = rsa;
 	return 1;
-err:
-	RSA_free(rsa);
-	BN_free(e);
-	return 0;
 }
 
 #ifndef OPENSSL_NO_EC
 static int key_create_ecdsa(key_t *key, int key_bits)
 {
-	EC_KEY *ec;
-
-	ec = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1);
+	EVP_PKEY *ec = EVP_EC_gen("prime256v1");
 	if (ec == NULL) {
-		printf("Cannot create EC key\n");
-		goto err;
-	}
-	if (!EC_KEY_generate_key(ec)) {
 		printf("Cannot generate EC key\n");
-		goto err;
+		return 0;
 	}
-	EC_KEY_set_flags(ec, EC_PKEY_NO_PARAMETERS);
-	EC_KEY_set_asn1_flag(ec, OPENSSL_EC_NAMED_CURVE);
-	if (!EVP_PKEY_assign_EC_KEY(key->key, ec)) {
-		printf("Cannot assign EC key\n");
-		goto err;
-	}
-
+	key->key = ec;
 	return 1;
-err:
-	EC_KEY_free(ec);
-	return 0;
 }
 #endif /* OPENSSL_NO_EC */
 
diff --git a/tools/cert_create/src/sha.c b/tools/cert_create/src/sha.c
index 3d977fb..06ef360 100644
--- a/tools/cert_create/src/sha.c
+++ b/tools/cert_create/src/sha.c
@@ -1,26 +1,38 @@
 /*
- * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2022, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
 
-#include <openssl/sha.h>
 #include <stdio.h>
 #include "debug.h"
 #include "key.h"
+#include <openssl/evp.h>
+#include <openssl/obj_mac.h>
 
 #define BUFFER_SIZE	256
 
+static int get_algorithm_nid(int hash_alg)
+{
+	int nids[] = {NID_sha256, NID_sha384, NID_sha512};
+	if (hash_alg < 0 || hash_alg >= sizeof(nids) / sizeof(*nids)) {
+		return NID_undef;
+	}
+	return nids[hash_alg];
+}
+
 int sha_file(int md_alg, const char *filename, unsigned char *md)
 {
 	FILE *inFile;
-	SHA256_CTX shaContext;
-	SHA512_CTX sha512Context;
+	EVP_MD_CTX *mdctx;
+	const EVP_MD *md_type;
 	int bytes;
+	int alg_nid;
+	unsigned int total_bytes;
 	unsigned char data[BUFFER_SIZE];
 
 	if ((filename == NULL) || (md == NULL)) {
-		ERROR("%s(): NULL argument\n", __FUNCTION__);
+		ERROR("%s(): NULL argument\n", __func__);
 		return 0;
 	}
 
@@ -30,26 +42,37 @@
 		return 0;
 	}
 
-	if (md_alg == HASH_ALG_SHA384) {
-		SHA384_Init(&sha512Context);
-		while ((bytes = fread(data, 1, BUFFER_SIZE, inFile)) != 0) {
-			SHA384_Update(&sha512Context, data, bytes);
-		}
-		SHA384_Final(md, &sha512Context);
-	} else if (md_alg == HASH_ALG_SHA512) {
-		SHA512_Init(&sha512Context);
-		while ((bytes = fread(data, 1, BUFFER_SIZE, inFile)) != 0) {
-			SHA512_Update(&sha512Context, data, bytes);
-		}
-		SHA512_Final(md, &sha512Context);
-	} else {
-		SHA256_Init(&shaContext);
-		while ((bytes = fread(data, 1, BUFFER_SIZE, inFile)) != 0) {
-			SHA256_Update(&shaContext, data, bytes);
-		}
-		SHA256_Final(md, &shaContext);
+	mdctx = EVP_MD_CTX_new();
+	if (mdctx == NULL) {
+		fclose(inFile);
+		ERROR("%s(): Could not create EVP MD context\n", __func__);
+		return 0;
 	}
 
+	alg_nid = get_algorithm_nid(md_alg);
+	if (alg_nid == NID_undef) {
+		ERROR("%s(): Invalid hash algorithm\n", __func__);
+		goto err;
+	}
+
+	md_type = EVP_get_digestbynid(alg_nid);
+	if (EVP_DigestInit_ex(mdctx, md_type, NULL) == 0) {
+		ERROR("%s(): Could not initialize EVP MD digest\n", __func__);
+		goto err;
+	}
+
+	while ((bytes = fread(data, 1, BUFFER_SIZE, inFile)) != 0) {
+		EVP_DigestUpdate(mdctx, data, bytes);
+	}
+	EVP_DigestFinal_ex(mdctx, md, &total_bytes);
+
 	fclose(inFile);
+	EVP_MD_CTX_free(mdctx);
 	return 1;
+
+err:
+	fclose(inFile);
+	EVP_MD_CTX_free(mdctx);
+	return 0;
 }
+
diff --git a/tools/encrypt_fw/Makefile b/tools/encrypt_fw/Makefile
index 96dff23..60bd8ea 100644
--- a/tools/encrypt_fw/Makefile
+++ b/tools/encrypt_fw/Makefile
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2019-2020, Linaro Limited. All rights reserved.
+# Copyright (c) 2019-2022, Linaro Limited. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -39,7 +39,14 @@
 # Make soft links and include from local directory otherwise wrong headers
 # could get pulled in from firmware tree.
 INC_DIR := -I ./include -I ../../include/tools_share -I ${OPENSSL_DIR}/include
-LIB_DIR := -L ${OPENSSL_DIR}/lib
+
+# Include library directories where OpenSSL library files are located.
+# For a normal installation (i.e.: when ${OPENSSL_DIR} = /usr or
+# /usr/local), binaries are located under the ${OPENSSL_DIR}/lib/
+# directory. However, for a local build of OpenSSL, the built binaries are
+# located under the main project directory (i.e.: ${OPENSSL_DIR}, not
+# ${OPENSSL_DIR}/lib/).
+LIB_DIR := -L ${OPENSSL_DIR}/lib -L ${OPENSSL_DIR}
 LIB := -lssl -lcrypto
 
 HOSTCC ?= gcc
diff --git a/tools/fiptool/Makefile b/tools/fiptool/Makefile
index 7c2a083..e6aeba9 100644
--- a/tools/fiptool/Makefile
+++ b/tools/fiptool/Makefile
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2014-2021, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2014-2022, ARM Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -22,7 +22,14 @@
 else
   HOSTCCFLAGS += -O2
 endif
-LDLIBS := -L${OPENSSL_DIR}/lib -lcrypto
+
+# Include library directories where OpenSSL library files are located.
+# For a normal installation (i.e.: when ${OPENSSL_DIR} = /usr or
+# /usr/local), binaries are located under the ${OPENSSL_DIR}/lib/
+# directory. However, for a local build of OpenSSL, the built binaries are
+# located under the main project directory (i.e.: ${OPENSSL_DIR}, not
+# ${OPENSSL_DIR}/lib/).
+LDLIBS := -L${OPENSSL_DIR}/lib -L${OPENSSL_DIR} -lcrypto
 
 ifeq (${V},0)
   Q := @