lib: sm: FF-A messages now carry Trusty SM calls

If FFA_MSG_WAIT is supported, then Trusty API calls
are encapsulated within FF-A direct messages.

Bug: 284057071
Signed-off-by: Ben Horgan <ben.horgan@arm.com>
Change-Id: I7abee0c070f4d45ed7518691c0f0c332ce5e813f
diff --git a/lib/sm/include/lib/sm.h b/lib/sm/include/lib/sm.h
index ce4be93..5c96b3f 100644
--- a/lib/sm/include/lib/sm.h
+++ b/lib/sm/include/lib/sm.h
@@ -92,6 +92,12 @@
 status_t sm_intc_fiq_enter(void);
 enum handler_return sm_intc_enable_interrupts(void);
 
+/*
+ * Ring the doorbell or equivalent interrupt on the primary scheduler
+ * so that it enqueues a NOP for Trusty.
+ */
+void sm_intc_raise_doorbell_irq(void);
+
 /* Get the argument block passed in by the bootloader */
 status_t sm_get_boot_args(void** boot_argsp, size_t* args_sizep);
 
diff --git a/lib/sm/include/lib/sm/smcall.h b/lib/sm/include/lib/sm/smcall.h
index 0b1c539..8486a11 100644
--- a/lib/sm/include/lib/sm/smcall.h
+++ b/lib/sm/include/lib/sm/smcall.h
@@ -162,4 +162,66 @@
 #define SMC_SC_HANDLE_QL_TIPC_DEV_CMD SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 32)
 #define SMC_FC_HANDLE_QL_TIPC_DEV_CMD SMC_FASTCALL_NR(SMC_ENTITY_TRUSTED_OS, 32)
 
+/**
+ * TRUSTY_FFA_MSG_RUN_FASTCALL - Run a Trusty fastcall synchronously.
+ *
+ * @r3: The value of %TRUSTY_FFA_MSG_RUN_FASTCALL.
+ * @r4: The fid of the Trusty fastcall.
+ * @r5: The 1st argument of the fastcall.
+ * @r6: The 2nd argument of the fastcall.
+ * @r7: The 3rd argument of the fastcall.
+ *
+ * Execute a Trusty fastcall synchronously with interrupts disabled,
+ * blocking until it completes and returning its result directly
+ * as a direct message response.
+ */
+#define TRUSTY_FFA_MSG_RUN_FASTCALL (0)
+
+/**
+ * TRUSTY_FFA_MSG_QUEUE_STDCALL - Asynchronously queue a Trusty stdcall.
+ *
+ * @r3: The value of %TRUSTY_FFA_MSG_QUEUE_STDCALL.
+ * @r4: The fid of the Trusty stdcall.
+ * @r5: The 1st argument of the stdcall.
+ * @r6: The 2nd argument of the stdcall.
+ * @r7: The 3rd argument of the stdcall.
+ *
+ * Queue a Trusty stdcall asynchronously for execution in the stdcall thread.
+ * The non-secure world should assign cycles to Trusty separately and
+ * call %TRUSTY_FFA_MSG_GET_STDCALL_RET to check if the call completed.
+ *
+ * Returns 0 on success, or %SM_ERR_BUSY if Trusty has another queued stdcall.
+ */
+#define TRUSTY_FFA_MSG_QUEUE_STDCALL (1)
+
+/**
+ * TRUSTY_FFA_MSG_GET_STDCALL_RET - Get the result of a Trusty stdcall.
+ *
+ * @r3: [out] The result of the call.
+ *
+ * The non-secure world should call this interface to
+ * retrieve the result of a previously queued stdcall.
+ * The request will return %SM_ERR_CPU_IDLE if the stdcall is still running.
+ */
+#define TRUSTY_FFA_MSG_GET_STDCALL_RET (2)
+
+/**
+ * TRUSTY_FFA_MSG_RUN_NOPCALL - Run the Trusty handler for a nopcall.
+ *
+ * @r3: The value of %TRUSTY_FFA_MSG_RUN_NOPCALL.
+ * @r4: The 1st argument of the nopcall.
+ * @r5: The 2nd argument of the nopcall.
+ * @r6: The 3rd argument of the nopcall.
+ *
+ * Returns 0 in @r3 on success, or one of the libsm error codes
+ * in case of failure.
+ *
+ * Execute a Trusty nopcall handler synchronously with interrupts disabled,
+ * blocking until it completes and returning its result directly
+ * as a direct message response. If Trusty should get more cycles to run
+ * the second half of the nopcall (triggered by the handler), it should
+ * signal the primary scheduler to enqueue a Trusty NOP.
+ */
+#define TRUSTY_FFA_MSG_RUN_NOPCALL (3)
+
 #endif /* __LIB_SM_SMCALL_H */
diff --git a/lib/sm/rules.mk b/lib/sm/rules.mk
index c1b1bb8..ba64870 100644
--- a/lib/sm/rules.mk
+++ b/lib/sm/rules.mk
@@ -28,6 +28,10 @@
 GLOBAL_DEFINES += \
 	WITH_LIB_SM=1 \
 
+ifeq (true,$(call TOBOOL,$(LIB_SM_WITH_FFA_LOOP)))
+MODULE_DEFINES += LIB_SM_WITH_FFA_LOOP=1
+endif
+
 GLOBAL_INCLUDES += \
 	$(LOCAL_DIR)/include
 
diff --git a/lib/sm/sm.c b/lib/sm/sm.c
index cec2df6..f19f7af 100644
--- a/lib/sm/sm.c
+++ b/lib/sm/sm.c
@@ -21,11 +21,14 @@
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <arch/mp.h>
 #include <err.h>
+#include <interface/arm_ffa/arm_ffa.h>
 #include <kernel/event.h>
 #include <kernel/mutex.h>
 #include <kernel/thread.h>
 #include <kernel/vm.h>
+#include <lib/arm_ffa/arm_ffa.h>
 #include <lib/heap.h>
 #include <lib/sm.h>
 #include <lib/sm/sm_err.h>
@@ -64,6 +67,13 @@
 static spin_lock_t sm_api_version_lock;
 static atomic_bool platform_halted;
 
+#if LIB_SM_WITH_FFA_LOOP
+static bool sm_use_ffa = true;
+static atomic_bool sm_ffa_valid_call;
+#else
+static bool sm_use_ffa = false;
+#endif
+
 static event_t nsirqevent[SMP_MAX_CPUS];
 static thread_t* nsirqthreads[SMP_MAX_CPUS];
 static thread_t* nsidlethreads[SMP_MAX_CPUS];
@@ -80,6 +90,8 @@
 extern smc32_handler_t sm_nopcall_table[];
 extern smc32_handler_t sm_fastcall_table[];
 
+static long sm_get_stdcall_ret(ext_mem_obj_id_t);
+
 long smc_sm_api_version(struct smc32_args* args) {
     uint32_t api_version = args->params[0];
 
@@ -213,7 +225,13 @@
     event_signal(&stdcallstate.event, false);
 
 restart_stdcall:
-    stdcallstate.active_cpu = cpu;
+    if (!sm_use_ffa) {
+        /*
+         * On FF-A, we do not keep track of the active CPU since
+         * get_stdcall_ret is called by a separate direct message.
+         */
+        stdcallstate.active_cpu = cpu;
+    }
     ret = 0;
 
 err:
@@ -222,7 +240,196 @@
     return ret;
 }
 
+#if LIB_SM_WITH_FFA_LOOP
+static long sm_ffa_handle_direct_req(long ret, struct smc_ret8* regs) {
+    struct smc32_args args;
+    uint16_t client_id = (regs->r1 >> 16) & 0xFFFFU;
+
+    switch (regs->r3) {
+    case TRUSTY_FFA_MSG_RUN_FASTCALL:
+        if (SMC_IS_SMC64(regs->r4)) {
+            return SM_ERR_NOT_SUPPORTED;
+        }
+        if (!SMC_IS_FASTCALL(regs->r4)) {
+            dprintf(CRITICAL, "Synchronous message is not a fastcall: %lx\n",
+                    regs->r4);
+            return SM_ERR_INVALID_PARAMETERS;
+        }
+
+        args.smc_nr = regs->r4;
+        args.params[0] = regs->r5;
+        args.params[1] = regs->r6;
+        args.params[2] = regs->r7;
+        args.client_id = client_id;
+        return sm_fastcall_table[SMC_ENTITY(args.smc_nr)](&args);
+
+    case TRUSTY_FFA_MSG_QUEUE_STDCALL:
+        if (SMC_IS_SMC64(regs->r4)) {
+            return SM_ERR_NOT_SUPPORTED;
+        }
+        if (SMC_IS_FASTCALL(regs->r4)) {
+            dprintf(CRITICAL, "Asynchronous message is a fastcall: %lx\n",
+                    regs->r4);
+            return SM_ERR_INVALID_PARAMETERS;
+        }
+
+        args.smc_nr = regs->r4;
+        args.params[0] = regs->r5;
+        args.params[1] = regs->r6;
+        args.params[2] = regs->r7;
+        args.client_id = client_id;
+
+        ret = sm_queue_stdcall(&args);
+        if (!ret) {
+            /* Ring the doorbell on the host so it queues a Trusty NOP */
+            sm_intc_raise_doorbell_irq();
+        }
+        return ret;
+
+    case TRUSTY_FFA_MSG_GET_STDCALL_RET:
+        return sm_get_stdcall_ret((ext_mem_obj_id_t)client_id);
+
+    case TRUSTY_FFA_MSG_RUN_NOPCALL:
+        args.smc_nr = SMC_SC_NOP;
+        args.params[0] = regs->r4;
+        args.params[1] = regs->r5;
+        args.params[2] = regs->r6;
+        args.client_id = client_id;
+
+#if !ARM_MERGE_FIQ_IRQ
+#error "FF-A libsm requires ARM_MERGE_FIQ_IRQ"
+#endif
+        ret = sm_nopcall_table[SMC_ENTITY(args.params[0])](&args);
+        if (!ret) {
+            /* Ring the doorbell on the host so it queues a Trusty NOP */
+            sm_intc_raise_doorbell_irq();
+        }
+        return ret;
+
+    default:
+        dprintf(CRITICAL,
+                "Unsupported FF-A message from client %" PRIu16 ": %lx\n",
+                client_id, regs->r3);
+        return SM_ERR_NOT_SUPPORTED;
+    }
+}
+
+static void sm_ffa_loop(long ret, struct smc32_args* args) {
+    struct smc_ret8 regs = {0};
+    enum arm_ffa_init_state ffa_init_state = arm_ffa_init_state();
+
+    if (atomic_load(&platform_halted)) {
+        regs = arm_ffa_call_error(FFA_ERROR_ABORTED);
+    } else if (ffa_init_state == ARM_FFA_INIT_UNINIT) {
+        panic("FF-A not initialized before main loop\n");
+    } else if (ffa_init_state == ARM_FFA_INIT_FAILED) {
+        TRACEF("FF-A failed to initialize, "
+               "falling back to legacy SPD SMCs\n");
+        sm_use_ffa = false;
+        return;
+    } else {
+        /*
+         * Linux will check the shadow priority next and
+         * give us more cycles if it's anything other than IDLE
+         */
+        LTRACEF_LEVEL(5, "Calling FFA_MSG_WAIT (%ld)\n", ret);
+        regs = arm_ffa_call_msg_wait();
+    }
+
+    while (true) {
+        LTRACEF_LEVEL(5, "Incoming FF-A SMC (%lx)\n", regs.r0);
+        switch ((uint32_t)regs.r0) {
+        case SMC_FC_FFA_MSG_SEND_DIRECT_REQ:
+        case SMC_FC64_FFA_MSG_SEND_DIRECT_REQ:
+            if (atomic_load(&platform_halted)) {
+                /* Return to NS since we have nothing to do */
+                regs = arm_ffa_call_error(FFA_ERROR_ABORTED);
+                break;
+            }
+            atomic_store(&sm_ffa_valid_call, true);
+
+            if (regs.r2 & (1U << 31)) {
+                /* TODO: support framework messages */
+                dprintf(CRITICAL, "Unhandled FF-A framework message: %lx\n",
+                        regs.r2 & 0xFFU);
+                regs = arm_ffa_call_error(FFA_ERROR_NOT_SUPPORTED);
+                break;
+            }
+
+            ret = sm_ffa_handle_direct_req(ret, &regs);
+
+            LTRACEF_LEVEL(5, "Calling FFA_MSG_SEND_DIRECT_RESP (%ld)\n", ret);
+            regs = arm_ffa_msg_send_direct_resp(&regs, (ulong)ret, 0, 0, 0, 0);
+            break;
+
+        case SMC_FC_FFA_RUN:
+            if (atomic_load(&platform_halted)) {
+                /* Return to NS since we have nothing to do */
+                regs = arm_ffa_call_error(FFA_ERROR_ABORTED);
+                break;
+            }
+            atomic_store(&sm_ffa_valid_call, true);
+
+            args->smc_nr = SMC_SC_NOP;
+            args->params[0] = args->params[1] = args->params[2] = 0;
+            return;
+
+        case SMC_FC_FFA_INTERRUPT:
+            atomic_store(&sm_ffa_valid_call, true);
+            sm_intc_fiq_enter();
+            /*
+             * sm_intc_fiq_enter rings the doorbell,
+             * so we do not need to do it again here.
+             */
+            regs = arm_ffa_call_msg_wait();
+            break;
+
+        case SMC_FC_FFA_ERROR:
+            if (atomic_load(&platform_halted)) {
+                /*
+                 * Loop forever if we halted and
+                 * got back here from FFA_ERROR_ABORTED,
+                 * there is not much else we can do
+                 */
+                break;
+            }
+            if ((int32_t)regs.r2 == FFA_ERROR_NOT_SUPPORTED &&
+                !atomic_load(&sm_ffa_valid_call)) {
+                TRACEF("Using legacy SPD SMCs\n");
+                sm_use_ffa = false;
+                return;
+            }
+            panic("Received FFA_ERROR from SPMC: (%lx, %lx)\n", regs.r1,
+                  regs.r2);
+
+        case SMC_UNKNOWN:
+            if (atomic_load(&sm_ffa_valid_call)) {
+                /* We already got a valid FF-A call earlier */
+                panic("Received SMC_UNKNOWN from SPMC\n");
+            }
+            TRACEF("Using legacy SPD SMCs\n");
+            sm_use_ffa = false;
+            return;
+
+        default:
+            dprintf(CRITICAL, "Unhandled FF-A SMC: %lx\n", regs.r0);
+            regs = arm_ffa_call_error(FFA_ERROR_NOT_SUPPORTED);
+        }
+    }
+}
+#endif
+
 static void sm_sched_nonsecure_fiq_loop(long ret, struct smc32_args* args) {
+#if LIB_SM_WITH_FFA_LOOP
+    if (sm_use_ffa) {
+        sm_ffa_loop(ret, args);
+        /* Check again in case we switched to the legacy SPD SMCs */
+        if (sm_use_ffa) {
+            return;
+        }
+    }
+#endif
+
     while (true) {
         if (atomic_load(&platform_halted)) {
             ret = SM_ERR_PANIC;
@@ -261,6 +468,7 @@
             LTRACEF_LEVEL(3, "cpu %d, got nop\n", cpu);
             ret = sm_nopcall_table[SMC_ENTITY(args.params[0])](&args);
         } else {
+            DEBUG_ASSERT(!sm_use_ffa);
             ret = sm_queue_stdcall(&args);
         }
     } while (ret);
@@ -311,20 +519,26 @@
 }
 
 /* must be called with irqs disabled */
-static long sm_get_stdcall_ret(void) {
+static long sm_get_stdcall_ret(ext_mem_obj_id_t client_id) {
     long ret;
     uint cpu = arch_curr_cpu_num();
 
     spin_lock(&stdcallstate.lock);
 
-    if (stdcallstate.active_cpu != (int)cpu) {
+    if (!sm_use_ffa && stdcallstate.active_cpu != (int)cpu) {
         dprintf(CRITICAL, "%s: stdcallcpu, a%d != curr-cpu %d, l%d, i%d\n",
                 __func__, stdcallstate.active_cpu, cpu, stdcallstate.last_cpu,
                 stdcallstate.initial_cpu);
         ret = SM_ERR_INTERNAL_FAILURE;
         goto err;
     }
-    stdcallstate.last_cpu = stdcallstate.active_cpu;
+    if (stdcallstate.args.client_id != client_id) {
+        dprintf(CRITICAL, "%s: stdcallcpu, client %" PRIx64 " != %" PRIx64 "\n",
+                __func__, stdcallstate.args.client_id, client_id);
+        ret = SM_ERR_NOT_ALLOWED;
+        goto err;
+    }
+    stdcallstate.last_cpu = (int)cpu;
     stdcallstate.active_cpu = -1;
 
     if (stdcallstate.done) {
@@ -383,7 +597,7 @@
         cpu = enter_smcall_critical_section();
 
         if (cpu == stdcallstate.active_cpu)
-            ret = sm_get_stdcall_ret();
+            ret = sm_get_stdcall_ret(stdcallstate.args.client_id);
         else
             ret = SM_ERR_NOP_DONE;
 
@@ -494,6 +708,7 @@
 void sm_handle_fiq(void) {
     uint32_t expected_return;
     struct smc32_args args = SMC32_ARGS_INITIAL_VALUE(args);
+    DEBUG_ASSERT(!sm_use_ffa);
     if (sm_check_and_lock_api_version(TRUSTY_API_VERSION_RESTART_FIQ)) {
         sm_sched_nonsecure_fiq_loop(SM_ERR_FIQ_INTERRUPTED, &args);
         expected_return = SMC_SC_RESTART_FIQ;