Update kernel to ab/13436148

Change-Id: Id861d984a1bc466dca9bbfa2ae3d70dbe56ef816
diff --git a/BUILD.bazel b/BUILD.bazel
new file mode 100644
index 0000000..656c0d0
--- /dev/null
+++ b/BUILD.bazel
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: GPL-2.0
+
+load("//build/kernel/kleaf:kernel.bzl", "kernel_module")
+
+filegroup(
+    name = "headers",
+    srcs = glob([
+        "include/**/*.h",
+    ]),
+    visibility = [
+        "//private/devices/google:__subpackages__",
+        "//private/google-modules/aoc:__pkg__",
+        "//private/google-modules/hdcp/samsung:__pkg__",
+        "//private/google-modules/soc/gs:__pkg__",
+    ],
+)
+
+filegroup(
+    name = "trusty.kconfig",
+    srcs = [
+        "drivers/trusty/Kconfig",
+    ],
+    visibility = [
+        "//private/devices/google:__subpackages__",
+        "//private/google-modules/soc/gs:__pkg__",
+    ],
+)
+
+kernel_module(
+    name = "trusty",
+    srcs = glob([
+        "drivers/trusty/*.c",
+        "drivers/trusty/*.h",
+        "drivers/trusty/*.S",
+    ]) + [
+        "Kbuild",
+        "drivers/trusty/Kbuild",
+        "//private/google-modules/soc/gs:gs_soc_headers",
+        "//private/google-modules/trusty:headers",
+    ],
+    outs = [
+        "trusty-core.ko",
+        "trusty-ipc.ko",
+        "trusty-log.ko",
+        "trusty-test.ko",
+        "trusty-virtio.ko",
+    ],
+    kernel_build = "//private/devices/google/common:kernel",
+    visibility = [
+        "//private/devices/google:__subpackages__",
+        "//private/google-modules/hdcp/samsung:__pkg__",
+        "//private/google-modules/soc/gs:__pkg__",
+    ],
+)
diff --git a/Documentation/devicetree/bindings/trusty/trusty-irq.txt b/Documentation/devicetree/bindings/trusty/trusty-irq.txt
new file mode 100644
index 0000000..cbb545a
--- /dev/null
+++ b/Documentation/devicetree/bindings/trusty/trusty-irq.txt
@@ -0,0 +1,67 @@
+Trusty irq interface
+
+Trusty requires non-secure irqs to be forwarded to the secure OS.
+
+Required properties:
+- compatible: "android,trusty-irq-v1"
+
+Optional properties:
+
+- interrupt-templates: is an optional property that works together
+  with "interrupt-ranges" to specify secure side to kernel IRQs mapping.
+
+  It is a list of entries, each one of which defines a group of interrupts
+  having common properties, and has the following format:
+    < phandle irq_id_pos [templ_data]>
+      phandle - phandle of interrupt controller this template is for
+      irq_id_pos - the position of irq id in interrupt specifier array
+                   for interrupt controller referenced by phandle.
+      templ_data - is an array of u32 values (could be empty) in the same
+                   format as interrupt specifier for interrupt controller
+                   referenced by phandle but with omitted irq id field.
+
+- interrupt-ranges: list of entries that specifies secure side to kernel
+  IRQs mapping.
+
+  Each entry in the "interrupt-ranges" list has the following format:
+    <beg end templ_idx>
+      beg - first entry in this range
+      end - last entry in this range
+      templ_idx  - index of entry in "interrupt-templates" property
+                   that must be used as a template for all interrupts
+                   in this range
+
+- ipi-range: optional mapping of a linear range of trusty IRQs to a linear range
+  of IPIs (inter-processor interrupts).  This has the following format:
+    <beg end ipi_base>
+      beg - first trusty IRQ number that is an IPI
+      end - last trusty IRQ number that is an IPI
+      ipi_base - IPI number of 'beg'
+
+Example:
+{
+	gic: interrupt-controller@50041000 {
+		compatible = "arm,gic-400";
+		#interrupt-cells = <3>;
+		interrupt-controller;
+		...
+	};
+	...
+	trusty {
+		compatible = "android,trusty-smc-v1";
+		ranges;
+		#address-cells = <2>;
+		#size-cells = <2>;
+
+		irq {
+			compatible = "android,trusty-irq-v1";
+			interrupt-templates = <&gic 1 GIC_PPI 0>,
+					      <&gic 1 GIC_SPI 0>;
+			interrupt-ranges = <16  31 0>,
+					   <32 223 1>;
+			ipi-range = <8 15 8>;
+		};
+	}
+}
+
+Must be a child of the node that provides the trusty std/fast call interface.
diff --git a/Documentation/devicetree/bindings/trusty/trusty-smc.txt b/Documentation/devicetree/bindings/trusty/trusty-smc.txt
new file mode 100644
index 0000000..1b39ad3
--- /dev/null
+++ b/Documentation/devicetree/bindings/trusty/trusty-smc.txt
@@ -0,0 +1,6 @@
+Trusty smc interface
+
+Trusty is running in secure mode on the same (arm) cpu(s) as the current os.
+
+Required properties:
+- compatible: "android,trusty-smc-v1"
diff --git a/Kbuild b/Kbuild
new file mode 100644
index 0000000..7bb445c
--- /dev/null
+++ b/Kbuild
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+subdir-ccflags-y += -I$(srctree)/$(src)/include
+
+obj-y += drivers/trusty/
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..65946e6
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,9 @@
+M ?= $(shell pwd)
+
+KBASE_PATH_RELATIVE = $(M)
+
+EXTRA_CFLAGS += -Werror
+
+modules modules_install clean:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) W=1 \
+	EXTRA_CFLAGS="$(EXTRA_CFLAGS)" KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" $(@)
diff --git a/drivers/trusty/Kbuild b/drivers/trusty/Kbuild
new file mode 100644
index 0000000..1efdd13
--- /dev/null
+++ b/drivers/trusty/Kbuild
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for trusty components
+#
+
+# Needed for the trace points
+ccflags-y += -I$(srctree)/$(src)/
+
+obj-$(CONFIG_TRUSTY)		+= trusty-core.o
+trusty-core-objs		+= trusty.o trusty-mem.o trusty-sched-share.o
+trusty-core-$(CONFIG_ARM)	+= trusty-smc-arm.o
+trusty-core-$(CONFIG_ARM64)	+= trusty-smc-arm64.o
+trusty-core-$(CONFIG_TRUSTY_IRQ)+= trusty-irq.o
+obj-$(CONFIG_TRUSTY_LOG)	+= trusty-log.o
+obj-$(CONFIG_TRUSTY_TEST)	+= trusty-test.o
+obj-$(CONFIG_TRUSTY_VIRTIO)	+= trusty-virtio.o
+obj-$(CONFIG_TRUSTY_VIRTIO_IPC)	+= trusty-ipc.o
diff --git a/drivers/trusty/Kconfig b/drivers/trusty/Kconfig
new file mode 100644
index 0000000..92b164a
--- /dev/null
+++ b/drivers/trusty/Kconfig
@@ -0,0 +1,117 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Trusty driver
+#
+
+menu "Trusty driver"
+
+config TRUSTY
+	tristate "Trusty core driver"
+	depends on ARM || ARM64
+	help
+	  Trusty is a secure OS that provides a Trusted Execution Environment
+	  (TEE) for Android.  Trusty runs on the same processor as Linux but is
+	  isolated from the rest of the system by both hardware and software.
+
+	  This option enables the core part of the Linux kernel driver for
+	  Trusty.  This doesn't do much by itself; you'll need to enable some of
+	  the sub-modules too.
+
+	  If you build this as a module, it will be called trusty-core.
+
+if TRUSTY
+
+config TRUSTY_IRQ
+	bool "Trusty IRQ support"
+	default y
+	help
+	  Enable forwarding of IRQs from Linux to Trusty.  This driver retrieves
+	  from Trusty a list of IRQs that Trusty uses, and it registers handlers
+	  for them which notify Trusty that the IRQ has been received.
+
+	  If you build the trusty core driver as a module, this will be part of the
+	  trusty-core module.
+
+	  Usually this is needed for Trusty to work, so say 'y'.
+
+config TRUSTY_LOG
+	tristate "Trusty log support"
+	default y
+	help
+	  Print log messages generated by the secure OS to the Linux kernel log.
+
+	  While this module is loaded, messages are retrieved and printed after
+	  each call into Trusty, and also during Linux kernel panics.
+
+	  If you build this as a module, it will be called trusty-log.
+
+config TRUSTY_TEST
+	tristate "Trusty stdcall test"
+	default y
+	help
+	  Allow running tests of the Trusty stdcall interface.  Running these
+	  tests is initiated by userspace writing to a sysfs file.
+
+	  This depends on having a test sevice running on the Trusty side.
+
+	  If you build this as a module, it will be called trusty-test.
+
+config TRUSTY_VIRTIO
+	tristate "Trusty virtio support"
+	select VIRTIO
+	default y
+	help
+	  Enable the Trusty virtio driver, which is responsible for management
+	  and interaction with virtio devices exposed by Trusty.  This driver
+	  requests the virtio device descriptors from Trusty, then parses them
+	  and adds the corresponding virtio devices.
+
+	  If you build this as a module, it will be called trusty-virtio.
+
+config TRUSTY_VIRTIO_IPC
+	tristate "Trusty Virtio IPC driver"
+	depends on TRUSTY_VIRTIO
+	default y
+	help
+	  Enable support for communicating with Trusty services.
+
+	  If you build this as a module, it will be called trusty-ipc.
+
+config TRUSTY_DMA_BUF_FFA_TAG
+	bool "Availability of trusty_dma_buf_get_ffa_tag"
+	default n
+	help
+	  Whether trusty_dma_buf_get_ffa_tag is provided on this platform.
+	  Providing this function will allow the platform to select what tag
+	  should be passed to the SPM when attempting to transfer the buffer
+	  to secure world. The value passed here is implementation defined and
+	  may depend on your SPM.
+
+	  If set to N, a default implementation which returns 0 will be used.
+
+config TRUSTY_DMA_BUF_SHARED_MEM_ID
+	bool "Availability of trusty_dma_buf_get_shared_mem_id"
+	default n
+	help
+	  Whether trusty_dma_buf_get_shared_mem_id is provided on this platform.
+	  Providing this function allows the platform to manage memory
+	  transaction life cycle of DMA bufs independently of Trusty IPC driver.
+	  The latter can query trusty_shared_mem_id_t value allocated for a
+	  given DMA buf using trusty_dma_buf_get_shared_mem_id interface.
+
+	  If set to N, a default implementation which does not allocate any IDs
+	  will be used.
+
+config TRUSTY_CRASH_IS_PANIC
+	bool "When trusty panics, then panic the kernel"
+	help
+	 This option will treat Trusty panics as fatal.  This is useful if
+	 your system cannot recover from Trusty panic/halt and you require
+	 the system to reboot to recover.
+
+	 If N, it will contine to run the kernel, but trusty operations will
+	 return errors.
+
+endif # TRUSTY
+
+endmenu
diff --git a/drivers/trusty/trusty-ipc-trace.h b/drivers/trusty/trusty-ipc-trace.h
new file mode 100644
index 0000000..60d6338
--- /dev/null
+++ b/drivers/trusty/trusty-ipc-trace.h
@@ -0,0 +1,245 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Google, Inc.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM trusty
+
+#if !defined(_TRUSTY_IPC_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRUSTY_IPC_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <uapi/linux/trusty/ipc.h>
+#include <linux/trusty/trusty_ipc.h>
+
+/* One of the requirements of checkpatch.pl script is to have "parentheses
+ * to sheild macro arguments from the effects of operator precedence."
+ * Otherwise checkpatch.pl script shall throw below checkpatch error
+ *
+ * "ERROR: Macros with complex values should be enclosed in parentheses"
+ *
+ * Hence extra parenthese are used to avoid the above checkpatch error.
+ * And the extra paentheses are removed using DELETE_PAREN to avoid
+ * compilation errors.
+ */
+#define _DELETE_PAREN(args...)	args
+#define DELETE_PAREN(arg)	_DELETE_PAREN(_DELETE_PAREN arg)
+
+#define TIPC_CHANNEL_STATE_LIST (	\
+	tipc_state(DISCONNECTED)	\
+	tipc_state(CONNECTING)		\
+	tipc_state(CONNECTED)		\
+	tipc_state_end(STALE)		\
+	)
+
+#undef tipc_state
+#undef tipc_state_end
+
+#define tipc_state_define_enum(x)	(TRACE_DEFINE_ENUM(TIPC_##x);)
+#define tipc_state(x)			DELETE_PAREN(tipc_state_define_enum(x))
+#define tipc_state_end(x)		DELETE_PAREN(tipc_state_define_enum(x))
+
+DELETE_PAREN(TIPC_CHANNEL_STATE_LIST)
+
+#undef tipc_state
+#undef tipc_state_end
+
+#define tipc_state(x)		{ TIPC_##x, #x },
+#define tipc_state_end(x)	{ TIPC_##x, #x }
+
+#define tipc_channel_state_name(x)	\
+	__print_symbolic(x, DELETE_PAREN(TIPC_CHANNEL_STATE_LIST))
+
+TRACE_EVENT(trusty_ipc_connect,
+	TP_PROTO(struct tipc_chan *chan, const char *port),
+	TP_ARGS(chan, port),
+	TP_STRUCT__entry(
+		__field(u32, chan)
+		__string(port, port)
+		__field(int, state)
+	),
+	TP_fast_assign(
+		__entry->chan = chan ? chan->local : ~0U;
+		__assign_str(port, port);
+		__entry->state = chan ? chan->state : 0;
+	),
+	TP_printk("chan=%u port=%s state=%s", __entry->chan, __get_str(port),
+		tipc_channel_state_name(__entry->state))
+);
+
+TRACE_EVENT(trusty_ipc_connect_end,
+	TP_PROTO(struct tipc_chan *chan, int err),
+	TP_ARGS(chan, err),
+	TP_STRUCT__entry(
+		__field(u32, chan)
+		__field(int, err)
+		__field(int, state)
+	),
+	TP_fast_assign(
+		__entry->chan = chan ? chan->local : ~0U;
+		__entry->err = err;
+		__entry->state = chan ? chan->state : 0;
+	),
+	TP_printk("chan=%u err=%d state=%s", __entry->chan, __entry->err,
+		tipc_channel_state_name(__entry->state))
+);
+
+#define TIPC_CHANNEL_EVENT_LIST (	\
+	tipc_event(CONNECTED)		\
+	tipc_event(DISCONNECTED)	\
+	tipc_event_end(SHUTDOWN)	\
+	)
+
+#undef tipc_event
+#undef tipc_event_end
+
+#define tipc_event_define_enum(x)	(TRACE_DEFINE_ENUM(TIPC_CHANNEL_##x);)
+#define tipc_event(x)			DELETE_PAREN(tipc_event_define_enum(x))
+#define tipc_event_end(x)		DELETE_PAREN(tipc_event_define_enum(x))
+
+DELETE_PAREN(TIPC_CHANNEL_EVENT_LIST)
+
+#undef tipc_event
+#undef tipc_event_end
+
+#define tipc_event(x)		{ TIPC_CHANNEL_##x, #x },
+#define tipc_event_end(x)	{ TIPC_CHANNEL_##x, #x }
+
+#define tipc_channel_event_name(x)	\
+	__print_symbolic(x, DELETE_PAREN(TIPC_CHANNEL_EVENT_LIST))
+
+TRACE_EVENT(trusty_ipc_handle_event,
+	TP_PROTO(struct tipc_chan *chan, u32 event_id),
+	TP_ARGS(chan, event_id),
+	TP_STRUCT__entry(
+		__field(u32, chan)
+		__array(char, srv_name, MAX_SRV_NAME_LEN)
+		__field(u32, event_id)
+	),
+	TP_fast_assign(
+		__entry->chan = chan ? chan->local : ~0U;
+		memcpy(__entry->srv_name, chan ? chan->srv_name : "", MAX_SRV_NAME_LEN);
+		__entry->event_id = event_id;
+	),
+	TP_printk("chan=%u srv_name=%s event=%s", __entry->chan, __entry->srv_name,
+		tipc_channel_event_name(__entry->event_id))
+);
+
+TRACE_EVENT(trusty_ipc_write,
+	TP_PROTO(struct tipc_chan *chan,
+		int len_or_err,
+		struct tipc_msg_buf *txbuf,
+		struct trusty_shm *shm),
+	TP_ARGS(chan, len_or_err, txbuf, shm),
+	TP_STRUCT__entry(
+		__field(int, len_or_err)
+		__field(u32, chan)
+		__array(char, srv_name, MAX_SRV_NAME_LEN)
+		__field(u64, buf_id)
+		__field(size_t, shm_cnt)
+		__dynamic_array(int, kind_shm, txbuf ? txbuf->shm_cnt : 0)
+	),
+	TP_fast_assign(
+		size_t x;
+
+		__entry->len_or_err = len_or_err;
+		__entry->chan = chan ? chan->local : ~0U;
+		memcpy(__entry->srv_name, chan ? chan->srv_name : "", MAX_SRV_NAME_LEN);
+		__entry->buf_id = txbuf ? txbuf->buf_id : ~0ULL;
+		__entry->shm_cnt = txbuf ? txbuf->shm_cnt : 0;
+		if (shm) {
+			for (x = 0; x < __entry->shm_cnt; x++)
+				*((int *)__get_dynamic_array(kind_shm) + x) = shm[x].transfer;
+		}
+	),
+	TP_printk("len_or_err=%d chan=%u srv_name=%s buf_id=0x%llx shm_cnt=%zu kind_shm=%s",
+		__entry->len_or_err, __entry->chan, __entry->srv_name, __entry->buf_id,
+		__entry->shm_cnt, __print_array(__get_dynamic_array(kind_shm),
+		__get_dynamic_array_len(kind_shm) / sizeof(int), sizeof(int)))
+);
+
+TRACE_EVENT(trusty_ipc_read,
+	TP_PROTO(struct tipc_chan *chan),
+	TP_ARGS(chan),
+	TP_STRUCT__entry(
+		__field(u32, chan)
+		__array(char, srv_name, MAX_SRV_NAME_LEN)
+	),
+	TP_fast_assign(
+		__entry->chan = chan ? chan->local : ~0U;
+		memcpy(__entry->srv_name, chan ? chan->srv_name : "", MAX_SRV_NAME_LEN);
+	),
+	TP_printk("chan=%u srv_name=%s", __entry->chan, __entry->srv_name)
+);
+
+TRACE_EVENT(trusty_ipc_read_end,
+	TP_PROTO(struct tipc_chan *chan,
+		int len_or_err,
+		trusty_shared_mem_id_t buf_id,
+		size_t shm_cnt),
+	TP_ARGS(chan, len_or_err, buf_id, shm_cnt),
+	TP_STRUCT__entry(
+		__field(int, len_or_err)
+		__field(u32, chan)
+		__array(char, srv_name, MAX_SRV_NAME_LEN)
+		__field(u64, buf_id)
+		__field(size_t, shm_cnt)
+	),
+	TP_fast_assign(
+		__entry->len_or_err = len_or_err;
+		__entry->chan = chan ? chan->local : ~0U;
+		memcpy(__entry->srv_name, chan ? chan->srv_name : "", MAX_SRV_NAME_LEN);
+		__entry->buf_id = buf_id;
+		__entry->shm_cnt = shm_cnt;
+	),
+	TP_printk("len_or_err=%d chan=%u srv_name=%s buf_id=0x%llx shm_cnt=%zu",
+		__entry->len_or_err, __entry->chan, __entry->srv_name,
+		__entry->buf_id, __entry->shm_cnt)
+);
+
+TRACE_EVENT(trusty_ipc_poll,
+	TP_PROTO(struct tipc_chan *chan,
+		unsigned int poll_mask),
+	TP_ARGS(chan, poll_mask),
+	TP_STRUCT__entry(
+		__field(unsigned int, poll_mask)
+		__field(u32, chan)
+		__array(char, srv_name, MAX_SRV_NAME_LEN)
+	),
+	TP_fast_assign(
+		__entry->poll_mask = poll_mask;
+		__entry->chan = chan ? chan->local : ~0U;
+		memcpy(__entry->srv_name, chan ? chan->srv_name : "", MAX_SRV_NAME_LEN);
+	),
+	TP_printk("poll_mask=%u chan=%u srv_name=%s",
+		__entry->poll_mask, __entry->chan, __entry->srv_name)
+);
+
+/*
+ * tracepoint when a message buffer is received from trusty
+ * and is awaiting for its HAL consumer to read it
+ */
+TRACE_EVENT(trusty_ipc_rx,
+	TP_PROTO(struct tipc_chan *chan, struct tipc_msg_buf *rxbuf),
+	TP_ARGS(chan, rxbuf),
+	TP_STRUCT__entry(
+		__field(u32, chan)
+		__array(char, srv_name, MAX_SRV_NAME_LEN)
+		__field(u64, buf_id)
+	),
+	TP_fast_assign(
+		__entry->chan = chan ? chan->local : ~0U;
+		memcpy(__entry->srv_name, chan ? chan->srv_name : "", MAX_SRV_NAME_LEN);
+		__entry->buf_id = rxbuf ? rxbuf->buf_id : ~0ULL;
+	),
+	TP_printk("chan=%u srv_name=%s buf_id=0x%llx", __entry->chan,
+		__entry->srv_name, __entry->buf_id)
+);
+#endif /* _TRUSTY_IPC_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trusty-ipc-trace
+#include <trace/define_trace.h>
diff --git a/drivers/trusty/trusty-ipc.c b/drivers/trusty/trusty-ipc.c
new file mode 100644
index 0000000..b75d8ff
--- /dev/null
+++ b/drivers/trusty/trusty-ipc.c
@@ -0,0 +1,2325 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Google, Inc.
+ */
+
+#include <linux/aio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/idr.h>
+#include <linux/completion.h>
+#include <linux/dma-buf.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/compat.h>
+#include <linux/uio.h>
+#include <linux/file.h>
+
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+
+#include <linux/trusty/trusty.h>
+#include <linux/trusty/trusty_ipc.h>
+
+#include <uapi/linux/trusty/ipc.h>
+
+#include "trusty-ipc-trace.h"
+
+#define MAX_DEVICES			4
+
+#define REPLY_TIMEOUT			5000
+#define TXBUF_TIMEOUT			15000
+
+#define MAX_SRV_NAME_LEN		256
+#define MAX_DEV_NAME_LEN		32
+
+#define DEFAULT_MSG_BUF_SIZE		PAGE_SIZE
+#define DEFAULT_MSG_BUF_ALIGN		PAGE_SIZE
+
+#define TIPC_CTRL_ADDR			53
+#define TIPC_ANY_ADDR			0xFFFFFFFF
+
+#define TIPC_MIN_LOCAL_ADDR		1024
+
+#ifdef CONFIG_COMPAT
+#define TIPC_IOC32_CONNECT	_IOW(TIPC_IOC_MAGIC, 0x80, compat_uptr_t)
+#endif
+
+struct tipc_virtio_dev;
+
+struct tipc_dev_config {
+	u32 msg_buf_max_size;
+	u32 msg_buf_alignment;
+	char dev_name[MAX_DEV_NAME_LEN];
+} __packed;
+
+struct tipc_shm {
+	trusty_shared_mem_id_t obj_id;
+	u64 size;
+	u64 tag;
+};
+
+struct tipc_msg_hdr {
+	u32 src;
+	u32 dst;
+	u16 reserved;
+	u16 shm_cnt;
+	u16 len;
+	u16 flags;
+	u8 data[];
+} __packed;
+
+enum tipc_ctrl_msg_types {
+	TIPC_CTRL_MSGTYPE_GO_ONLINE = 1,
+	TIPC_CTRL_MSGTYPE_GO_OFFLINE,
+	TIPC_CTRL_MSGTYPE_CONN_REQ,
+	TIPC_CTRL_MSGTYPE_CONN_RSP,
+	TIPC_CTRL_MSGTYPE_DISC_REQ,
+	TIPC_CTRL_MSGTYPE_RELEASE,
+};
+
+struct tipc_ctrl_msg {
+	u32 type;
+	u32 body_len;
+	u8  body[];
+} __packed;
+
+struct tipc_conn_req_body {
+	char name[MAX_SRV_NAME_LEN];
+} __packed;
+
+struct tipc_conn_rsp_body {
+	u32 target;
+	u32 status;
+	u32 remote;
+	u32 max_msg_size;
+	u32 max_msg_cnt;
+} __packed;
+
+struct tipc_disc_req_body {
+	u32 target;
+} __packed;
+
+struct tipc_release_body {
+	trusty_shared_mem_id_t id;
+} __packed;
+
+struct tipc_cdev_node {
+	struct cdev cdev;
+	struct device *dev;
+	unsigned int minor;
+};
+
+enum tipc_device_state {
+	VDS_OFFLINE = 0,
+	VDS_ONLINE,
+	VDS_DEAD,
+};
+
+struct tipc_virtio_dev {
+	struct kref refcount;
+	struct mutex lock; /* protects access to this device */
+	struct virtio_device *vdev;
+	struct virtqueue *rxvq;
+	struct virtqueue *txvq;
+	unsigned int msg_buf_cnt;
+	unsigned int msg_buf_max_cnt;
+	size_t msg_buf_max_sz;
+	unsigned int free_msg_buf_cnt;
+	struct list_head free_buf_list;
+	wait_queue_head_t sendq;
+	struct idr addr_idr;
+	enum tipc_device_state state;
+	struct tipc_cdev_node cdev_node;
+	/* protects shared_handles, dev lock never acquired while held */
+	struct mutex shared_handles_lock;
+	struct rb_root shared_handles;
+	char   cdev_name[MAX_DEV_NAME_LEN];
+};
+
+enum tipc_chan_state {
+	TIPC_DISCONNECTED = 0,
+	TIPC_CONNECTING,
+	TIPC_CONNECTED,
+	TIPC_STALE,
+};
+
+struct tipc_chan {
+	struct mutex lock; /* protects channel state  */
+	struct kref refcount;
+	enum tipc_chan_state state;
+	struct tipc_virtio_dev *vds;
+	const struct tipc_chan_ops *ops;
+	void *ops_arg;
+	u32 remote;
+	u32 local;
+	u32 max_msg_size;
+	u32 max_msg_cnt;
+	char srv_name[MAX_SRV_NAME_LEN];
+};
+
+struct tipc_shared_handle {
+	struct rb_node node;
+	struct tipc_shm tipc;
+	struct tipc_virtio_dev *vds;
+	struct dma_buf *dma_buf;
+	bool shared;
+	/*
+	 * Following fields are only used if dma_buf does not own a
+	 * trusty_shared_mem_id_t.
+	 */
+	struct dma_buf_attachment *attach;
+	struct sg_table *sgt;
+};
+
+static struct class *tipc_class;
+static unsigned int tipc_major;
+
+static struct virtio_device *default_vdev;
+
+static DEFINE_IDR(tipc_devices);
+static DEFINE_MUTEX(tipc_devices_lock);
+
+static u64 (*dma_buf_get_ffa_tag)(struct dma_buf *dma_buf) = NULL;
+static int (*dma_buf_get_shared_mem_id)(struct dma_buf *dma_buf,
+	trusty_shared_mem_id_t *id) = NULL;
+
+static inline u64 trusty_dma_buf_get_ffa_tag(struct dma_buf *dma_buf)
+{
+	return dma_buf_get_ffa_tag ? dma_buf_get_ffa_tag(dma_buf) : 0;
+}
+
+static int trusty_dma_buf_get_shared_mem_id(struct dma_buf *dma_buf,
+					    trusty_shared_mem_id_t *id)
+{
+	return  dma_buf_get_shared_mem_id ?
+		dma_buf_get_shared_mem_id(dma_buf, id) : -ENODATA;
+}
+
+static int _match_any(int id, void *p, void *data)
+{
+	return id;
+}
+
+static int _match_data(int id, void *p, void *data)
+{
+	return (p == data);
+}
+
+static void *_alloc_shareable_mem(size_t sz, gfp_t gfp)
+{
+	return alloc_pages_exact(sz, gfp);
+}
+
+static void _free_shareable_mem(size_t sz, void *va)
+{
+	free_pages_exact(va, sz);
+}
+
+static struct tipc_msg_buf *vds_alloc_msg_buf(struct tipc_virtio_dev *vds,
+					      bool share_write)
+{
+	int ret;
+	struct tipc_msg_buf *mb;
+	size_t sz = ALIGN(vds->msg_buf_max_sz, PAGE_SIZE);
+	pgprot_t pgprot = share_write ? PAGE_KERNEL : PAGE_KERNEL_RO;
+
+	/* allocate tracking structure */
+	mb = kzalloc(sizeof(struct tipc_msg_buf), GFP_KERNEL);
+	if (!mb)
+		return NULL;
+
+	/* allocate buffer that can be shared with secure world */
+	mb->buf_va = _alloc_shareable_mem(sz, GFP_KERNEL);
+	if (!mb->buf_va)
+		goto err_alloc;
+
+	sg_init_one(&mb->sg, mb->buf_va, sz);
+	ret = trusty_share_memory_compat(vds->vdev->dev.parent->parent,
+					 &mb->buf_id, &mb->sg, 1, pgprot);
+	if (ret) {
+		dev_err(&vds->vdev->dev, "trusty_share_memory failed: %d\n",
+			ret);
+		goto err_share;
+	}
+
+	mb->buf_sz = sz;
+	mb->shm_cnt = 0;
+
+	return mb;
+
+err_share:
+	_free_shareable_mem(sz, mb->buf_va);
+err_alloc:
+	kfree(mb);
+	return NULL;
+}
+
+static void vds_free_msg_buf(struct tipc_virtio_dev *vds,
+			     struct tipc_msg_buf *mb)
+{
+	int ret;
+
+	ret = trusty_reclaim_memory(vds->vdev->dev.parent->parent, mb->buf_id,
+				    &mb->sg, 1);
+	if (WARN_ON(ret)) {
+		dev_err(&vds->vdev->dev,
+			"trusty_revoke_memory failed: %d txbuf %lld\n",
+			ret, mb->buf_id);
+
+		/*
+		 * It is not safe to free this memory if trusty_revoke_memory
+		 * fails. Leak it in that case.
+		 */
+	} else {
+		_free_shareable_mem(mb->buf_sz, mb->buf_va);
+	}
+	kfree(mb);
+}
+
+static void vds_free_msg_buf_list(struct tipc_virtio_dev *vds,
+				  struct list_head *list)
+{
+	struct tipc_msg_buf *mb = NULL;
+
+	mb = list_first_entry_or_null(list, struct tipc_msg_buf, node);
+	while (mb) {
+		list_del(&mb->node);
+		vds_free_msg_buf(vds, mb);
+		mb = list_first_entry_or_null(list, struct tipc_msg_buf, node);
+	}
+}
+
+static inline void mb_reset(struct tipc_msg_buf *mb)
+{
+	mb->wpos = 0;
+	mb->rpos = 0;
+}
+
+static inline void mb_reset_read(struct tipc_msg_buf *mb)
+{
+	mb->rpos = 0;
+}
+
+static void _free_vds(struct kref *kref)
+{
+	struct tipc_virtio_dev *vds =
+		container_of(kref, struct tipc_virtio_dev, refcount);
+	/*
+	 * If this WARN triggers, we're leaking remote memory references.
+	 *
+	 * No need to lock shared_handles_lock. All references to this lock
+	 * should already be gone by this point, since we are freeing it in this
+	 * function.
+	 */
+	WARN_ON(!RB_EMPTY_ROOT(&vds->shared_handles));
+	kfree(vds);
+}
+
+static void _free_chan(struct kref *kref)
+{
+	struct tipc_chan *ch = container_of(kref, struct tipc_chan, refcount);
+
+	if (ch->ops && ch->ops->handle_release)
+		ch->ops->handle_release(ch->ops_arg);
+
+	kref_put(&ch->vds->refcount, _free_vds);
+	kfree(ch);
+}
+
+static bool _put_txbuf_locked(struct tipc_virtio_dev *vds,
+			      struct tipc_msg_buf *mb)
+{
+	list_add_tail(&mb->node, &vds->free_buf_list);
+	return vds->free_msg_buf_cnt++ == 0;
+}
+
+static struct tipc_msg_buf *_get_txbuf_locked(struct tipc_virtio_dev *vds)
+{
+	struct tipc_msg_buf *mb;
+
+	if (vds->state != VDS_ONLINE)
+		return  ERR_PTR(-ENODEV);
+
+	if (vds->free_msg_buf_cnt) {
+		/* take it out of free list */
+		mb = list_first_entry(&vds->free_buf_list,
+				      struct tipc_msg_buf, node);
+		list_del(&mb->node);
+		mb->shm_cnt = 0;
+		vds->free_msg_buf_cnt--;
+	} else {
+		if (vds->msg_buf_cnt >= vds->msg_buf_max_cnt)
+			return ERR_PTR(-EAGAIN);
+
+		/* try to allocate it */
+		mb = vds_alloc_msg_buf(vds, false);
+		if (!mb)
+			return ERR_PTR(-ENOMEM);
+
+		vds->msg_buf_cnt++;
+	}
+	return mb;
+}
+
+static struct tipc_msg_buf *_vds_get_txbuf(struct tipc_virtio_dev *vds)
+{
+	struct tipc_msg_buf *mb;
+
+	mutex_lock(&vds->lock);
+	mb = _get_txbuf_locked(vds);
+	mutex_unlock(&vds->lock);
+
+	return mb;
+}
+
+static void vds_put_txbuf(struct tipc_virtio_dev *vds, struct tipc_msg_buf *mb)
+{
+	mutex_lock(&vds->lock);
+	_put_txbuf_locked(vds, mb);
+	wake_up_interruptible(&vds->sendq);
+	mutex_unlock(&vds->lock);
+}
+
+static struct tipc_msg_buf *vds_get_txbuf(struct tipc_virtio_dev *vds,
+					  long timeout)
+{
+	struct tipc_msg_buf *mb;
+
+	mb = _vds_get_txbuf(vds);
+
+	if ((PTR_ERR(mb) == -EAGAIN) && timeout) {
+		DEFINE_WAIT_FUNC(wait, woken_wake_function);
+
+		timeout = msecs_to_jiffies(timeout);
+		add_wait_queue(&vds->sendq, &wait);
+		for (;;) {
+			timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
+					     timeout);
+			if (!timeout) {
+				mb = ERR_PTR(-ETIMEDOUT);
+				break;
+			}
+
+			if (signal_pending(current)) {
+				mb = ERR_PTR(-ERESTARTSYS);
+				break;
+			}
+
+			mb = _vds_get_txbuf(vds);
+			if (PTR_ERR(mb) != -EAGAIN)
+				break;
+		}
+		remove_wait_queue(&vds->sendq, &wait);
+	}
+
+	if (IS_ERR(mb))
+		return mb;
+
+	if (WARN_ON(!mb))
+		return ERR_PTR(-EINVAL);
+
+	/* reset and reserve space for message header */
+	mb_reset(mb);
+	mb_put_data(mb, sizeof(struct tipc_msg_hdr));
+
+	return mb;
+}
+
+static int vds_queue_txbuf(struct tipc_virtio_dev *vds,
+			   struct tipc_msg_buf *mb)
+{
+	int err;
+	struct scatterlist sg;
+	bool need_notify = false;
+
+	mutex_lock(&vds->lock);
+	if (vds->state == VDS_ONLINE) {
+		sg_init_one(&sg, mb, mb->wpos);
+		err = virtqueue_add_outbuf(vds->txvq, &sg, 1, mb, GFP_KERNEL);
+		need_notify = virtqueue_kick_prepare(vds->txvq);
+	} else {
+		err = -ENODEV;
+	}
+	mutex_unlock(&vds->lock);
+
+	if (need_notify)
+		virtqueue_notify(vds->txvq);
+
+	return err;
+}
+
+static int vds_add_channel(struct tipc_virtio_dev *vds,
+			   struct tipc_chan *chan)
+{
+	int ret;
+
+	mutex_lock(&vds->lock);
+	if (vds->state == VDS_ONLINE) {
+		ret = idr_alloc(&vds->addr_idr, chan,
+				TIPC_MIN_LOCAL_ADDR, TIPC_ANY_ADDR - 1,
+				GFP_KERNEL);
+		if (ret > 0) {
+			chan->local = ret;
+			kref_get(&chan->refcount);
+			ret = 0;
+		}
+	} else {
+		ret = -EINVAL;
+	}
+	mutex_unlock(&vds->lock);
+
+	return ret;
+}
+
+static void vds_del_channel(struct tipc_virtio_dev *vds,
+			    struct tipc_chan *chan)
+{
+	mutex_lock(&vds->lock);
+	if (chan->local) {
+		idr_remove(&vds->addr_idr, chan->local);
+		chan->local = 0;
+		chan->remote = 0;
+		kref_put(&chan->refcount, _free_chan);
+	}
+	mutex_unlock(&vds->lock);
+}
+
+static struct tipc_chan *vds_lookup_channel(struct tipc_virtio_dev *vds,
+					    u32 addr)
+{
+	int id;
+	struct tipc_chan *chan = NULL;
+
+	mutex_lock(&vds->lock);
+	if (addr == TIPC_ANY_ADDR) {
+		id = idr_for_each(&vds->addr_idr, _match_any, NULL);
+		if (id > 0)
+			chan = idr_find(&vds->addr_idr, id);
+	} else {
+		chan = idr_find(&vds->addr_idr, addr);
+	}
+	if (chan)
+		kref_get(&chan->refcount);
+	mutex_unlock(&vds->lock);
+
+	return chan;
+}
+
+static struct tipc_chan *vds_create_channel(struct tipc_virtio_dev *vds,
+					    const struct tipc_chan_ops *ops,
+					    void *ops_arg)
+{
+	int ret;
+	struct tipc_chan *chan = NULL;
+
+	if (!vds)
+		return ERR_PTR(-ENOENT);
+
+	if (!ops)
+		return ERR_PTR(-EINVAL);
+
+	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+	if (!chan)
+		return ERR_PTR(-ENOMEM);
+
+	kref_get(&vds->refcount);
+	chan->vds = vds;
+	chan->ops = ops;
+	chan->ops_arg = ops_arg;
+	mutex_init(&chan->lock);
+	kref_init(&chan->refcount);
+	chan->state = TIPC_DISCONNECTED;
+
+	ret = vds_add_channel(vds, chan);
+	if (ret) {
+		kfree(chan);
+		kref_put(&vds->refcount, _free_vds);
+		return ERR_PTR(ret);
+	}
+
+	return chan;
+}
+
+static void fill_msg_hdr(struct tipc_msg_buf *mb, u32 src, u32 dst)
+{
+	struct tipc_msg_hdr *hdr = mb_get_data(mb, sizeof(*hdr));
+
+	hdr->src = src;
+	hdr->dst = dst;
+	hdr->len = mb_avail_data(mb);
+	hdr->flags = 0;
+	hdr->shm_cnt = mb->shm_cnt;
+	hdr->reserved = 0;
+}
+
+static int tipc_shared_handle_new(struct tipc_shared_handle **shared_handle,
+				  struct tipc_virtio_dev *vds)
+{
+	struct tipc_shared_handle *out = kzalloc(sizeof(*out), GFP_KERNEL);
+
+	if (!out)
+		return -ENOMEM;
+
+	out->vds = vds;
+	*shared_handle = out;
+
+	return 0;
+}
+
+static struct device *tipc_shared_handle_dev(struct tipc_shared_handle
+					     *shared_handle)
+{
+	return shared_handle->vds->vdev->dev.parent->parent;
+}
+
+static bool is_same_memory_region(struct tipc_shared_handle *h1,
+				  struct tipc_shared_handle *h2)
+{
+	return h1->tipc.obj_id == h2->tipc.obj_id &&
+			h1->tipc.size == h2->tipc.size &&
+			h1->tipc.tag == h2->tipc.tag &&
+			h1->dma_buf == h2->dma_buf &&
+			h1->shared == h2->shared;
+}
+
+static bool dma_buf_owns_shared_mem_id(struct tipc_shared_handle *h)
+{
+	/* h->shared is true only if dma_buf did not own an shared memory ID */
+	return !h->shared;
+}
+
+static void tipc_shared_handle_register(struct tipc_shared_handle
+					*new_handle)
+{
+	struct tipc_virtio_dev *vds = new_handle->vds;
+	struct rb_node **new;
+	struct rb_node *parent = NULL;
+
+	mutex_lock(&vds->shared_handles_lock);
+
+	new = &vds->shared_handles.rb_node;
+	while (*new) {
+		struct tipc_shared_handle *handle =
+			rb_entry(*new, struct tipc_shared_handle, node);
+		parent = *new;
+		/*
+		 * An obj_id can be registered multiple times if it's owned by a
+		 * dma_buf, because in this case we use the same obj_id across
+		 * multiple memory transfer operations.
+		 */
+		if (handle->tipc.obj_id == new_handle->tipc.obj_id) {
+			if (dma_buf_owns_shared_mem_id(new_handle)) {
+				WARN_ON(!is_same_memory_region(handle,
+							       new_handle));
+			} else {
+				WARN(1, "This handle is already registered");
+				goto already_registered;
+			}
+		}
+
+		if (handle->tipc.obj_id > new_handle->tipc.obj_id)
+			new = &((*new)->rb_left);
+		else
+			new = &((*new)->rb_right);
+	}
+
+	rb_link_node(&new_handle->node, parent, new);
+	rb_insert_color(&new_handle->node, &vds->shared_handles);
+
+already_registered:
+	mutex_unlock(&vds->shared_handles_lock);
+}
+
+static struct tipc_shared_handle *tipc_shared_handle_take(struct tipc_virtio_dev
+							  *vds,
+							  trusty_shared_mem_id_t
+							  obj_id)
+{
+	struct rb_node *node;
+	struct tipc_shared_handle *out = NULL;
+
+	mutex_lock(&vds->shared_handles_lock);
+
+	node = vds->shared_handles.rb_node;
+	while (node) {
+		struct tipc_shared_handle *handle =
+			rb_entry(node, struct tipc_shared_handle, node);
+		if (obj_id == handle->tipc.obj_id) {
+			rb_erase(node, &vds->shared_handles);
+			out = handle;
+			break;
+		} else if (obj_id < handle->tipc.obj_id) {
+			node = node->rb_left;
+		} else {
+			node = node->rb_right;
+		}
+	}
+
+	mutex_unlock(&vds->shared_handles_lock);
+
+	return out;
+}
+
+static int tipc_shared_handle_drop(struct tipc_shared_handle *shared_handle)
+{
+	int ret;
+	struct tipc_virtio_dev *vds = shared_handle->vds;
+	struct device *dev = tipc_shared_handle_dev(shared_handle);
+
+	if (shared_handle->shared) {
+		/*
+		 * If this warning fires, it means this shared handle was still
+		 * in the set of active handles. This shouldn't happen (calling
+		 * code should ensure it is out if the tree) but this serves as
+		 * an extra check before it is released.
+		 *
+		 * However, the take itself should clean this incorrect state up
+		 * by removing the handle from the tree.
+		 *
+		 * This warning is only applicable when registering a handle
+		 * multiple times is not allowed, i.e. when dma_buf doesn't own
+		 * the handle.
+		 */
+		WARN_ON(tipc_shared_handle_take(vds,
+						shared_handle->tipc.obj_id));
+
+		ret = trusty_reclaim_memory(dev,
+					    shared_handle->tipc.obj_id,
+					    shared_handle->sgt->sgl,
+					    shared_handle->sgt->orig_nents);
+		if (ret) {
+			/*
+			 * We can't safely release this, it may still be in
+			 * use outside Linux.
+			 */
+			dev_warn(dev, "Failed to drop handle, leaking...\n");
+			return ret;
+		}
+	}
+
+	if (shared_handle->sgt)
+		dma_buf_unmap_attachment(shared_handle->attach,
+					 shared_handle->sgt, DMA_BIDIRECTIONAL);
+	if (shared_handle->attach)
+		dma_buf_detach(shared_handle->dma_buf, shared_handle->attach);
+	if (shared_handle->dma_buf)
+		dma_buf_put(shared_handle->dma_buf);
+
+	kfree(shared_handle);
+
+	return 0;
+}
+
+/*****************************************************************************/
+
+struct tipc_chan *tipc_create_channel(struct device *dev,
+				      const struct tipc_chan_ops *ops,
+				      void *ops_arg)
+{
+	struct virtio_device *vd;
+	struct tipc_chan *chan;
+	struct tipc_virtio_dev *vds;
+
+	mutex_lock(&tipc_devices_lock);
+	if (dev) {
+		vd = container_of(dev, struct virtio_device, dev);
+	} else {
+		vd = default_vdev;
+		if (!vd) {
+			mutex_unlock(&tipc_devices_lock);
+			return ERR_PTR(-ENOENT);
+		}
+	}
+	vds = vd->priv;
+	kref_get(&vds->refcount);
+	mutex_unlock(&tipc_devices_lock);
+
+	chan = vds_create_channel(vds, ops, ops_arg);
+	kref_put(&vds->refcount, _free_vds);
+	return chan;
+}
+EXPORT_SYMBOL(tipc_create_channel);
+
+struct tipc_msg_buf *tipc_chan_get_rxbuf(struct tipc_chan *chan)
+{
+	return vds_alloc_msg_buf(chan->vds, true);
+}
+EXPORT_SYMBOL(tipc_chan_get_rxbuf);
+
+void tipc_chan_put_rxbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb)
+{
+	vds_free_msg_buf(chan->vds, mb);
+}
+EXPORT_SYMBOL(tipc_chan_put_rxbuf);
+
+struct tipc_msg_buf *tipc_chan_get_txbuf_timeout(struct tipc_chan *chan,
+						 long timeout)
+{
+	return vds_get_txbuf(chan->vds, timeout);
+}
+EXPORT_SYMBOL(tipc_chan_get_txbuf_timeout);
+
+void tipc_chan_put_txbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb)
+{
+	vds_put_txbuf(chan->vds, mb);
+}
+EXPORT_SYMBOL(tipc_chan_put_txbuf);
+
+int tipc_chan_queue_msg(struct tipc_chan *chan, struct tipc_msg_buf *mb)
+{
+	int err;
+
+	mutex_lock(&chan->lock);
+	switch (chan->state) {
+	case TIPC_CONNECTED:
+		fill_msg_hdr(mb, chan->local, chan->remote);
+		err = vds_queue_txbuf(chan->vds, mb);
+		if (err) {
+			/* this should never happen */
+			dev_err(&chan->vds->vdev->dev,
+				"%s: failed to queue tx buffer (%d)\n",
+			       __func__, err);
+		}
+		break;
+	case TIPC_DISCONNECTED:
+	case TIPC_CONNECTING:
+		err = -ENOTCONN;
+		break;
+	case TIPC_STALE:
+		err = -ESHUTDOWN;
+		break;
+	default:
+		err = -EBADFD;
+		dev_err(&chan->vds->vdev->dev,
+			"%s: unexpected channel state %d\n",
+			__func__, chan->state);
+	}
+	mutex_unlock(&chan->lock);
+	return err;
+}
+EXPORT_SYMBOL(tipc_chan_queue_msg);
+
+
+int tipc_chan_connect(struct tipc_chan *chan, const char *name)
+{
+	int err;
+	struct tipc_ctrl_msg *msg;
+	struct tipc_conn_req_body *body;
+	struct tipc_msg_buf *txbuf;
+
+	trace_trusty_ipc_connect(chan, name);
+
+	txbuf = vds_get_txbuf(chan->vds, TXBUF_TIMEOUT);
+	if (IS_ERR(txbuf))
+		return PTR_ERR(txbuf);
+
+	/* reserve space for connection request control message */
+	msg = mb_put_data(txbuf, sizeof(*msg) + sizeof(*body));
+	body = (struct tipc_conn_req_body *)msg->body;
+
+	/* fill message */
+	msg->type = TIPC_CTRL_MSGTYPE_CONN_REQ;
+	msg->body_len  = sizeof(*body);
+
+	strncpy(body->name, name, sizeof(body->name));
+	body->name[sizeof(body->name)-1] = '\0';
+
+	mutex_lock(&chan->lock);
+	switch (chan->state) {
+	case TIPC_DISCONNECTED:
+		/* save service name we are connecting to */
+		strcpy(chan->srv_name, body->name);
+
+		fill_msg_hdr(txbuf, chan->local, TIPC_CTRL_ADDR);
+		err = vds_queue_txbuf(chan->vds, txbuf);
+		if (err) {
+			/* this should never happen */
+			dev_err(&chan->vds->vdev->dev,
+				"%s: failed to queue tx buffer (%d)\n",
+				__func__, err);
+		} else {
+			chan->state = TIPC_CONNECTING;
+			txbuf = NULL; /* prevents discarding buffer */
+		}
+		break;
+	case TIPC_CONNECTED:
+	case TIPC_CONNECTING:
+		/* check if we are trying to connect to the same service */
+		if (strcmp(chan->srv_name, body->name) == 0)
+			err = 0;
+		else
+			if (chan->state == TIPC_CONNECTING)
+				err = -EALREADY; /* in progress */
+			else
+				err = -EISCONN;  /* already connected */
+		break;
+
+	case TIPC_STALE:
+		err = -ESHUTDOWN;
+		break;
+	default:
+		err = -EBADFD;
+		dev_err(&chan->vds->vdev->dev,
+			"%s: unexpected channel state %d\n",
+			__func__, chan->state);
+		break;
+	}
+	mutex_unlock(&chan->lock);
+
+	if (txbuf)
+		tipc_chan_put_txbuf(chan, txbuf); /* discard it */
+
+	return err;
+}
+EXPORT_SYMBOL(tipc_chan_connect);
+
+int tipc_chan_shutdown(struct tipc_chan *chan)
+{
+	int err;
+	struct tipc_ctrl_msg *msg;
+	struct tipc_disc_req_body *body;
+	struct tipc_msg_buf *txbuf = NULL;
+
+	/* get tx buffer */
+	txbuf = vds_get_txbuf(chan->vds, TXBUF_TIMEOUT);
+	if (IS_ERR(txbuf))
+		return PTR_ERR(txbuf);
+
+	mutex_lock(&chan->lock);
+	if (chan->state == TIPC_CONNECTED || chan->state == TIPC_CONNECTING) {
+		/* reserve space for disconnect request control message */
+		msg = mb_put_data(txbuf, sizeof(*msg) + sizeof(*body));
+		body = (struct tipc_disc_req_body *)msg->body;
+
+		msg->type = TIPC_CTRL_MSGTYPE_DISC_REQ;
+		msg->body_len = sizeof(*body);
+		body->target = chan->remote;
+
+		fill_msg_hdr(txbuf, chan->local, TIPC_CTRL_ADDR);
+		err = vds_queue_txbuf(chan->vds, txbuf);
+		if (err) {
+			/* this should never happen */
+			dev_err(&chan->vds->vdev->dev,
+				"%s: failed to queue tx buffer (%d)\n",
+				__func__, err);
+		}
+	} else {
+		err = -ENOTCONN;
+	}
+	chan->state = TIPC_STALE;
+	mutex_unlock(&chan->lock);
+
+	if (err) {
+		/* release buffer */
+		tipc_chan_put_txbuf(chan, txbuf);
+	}
+
+	return err;
+}
+EXPORT_SYMBOL(tipc_chan_shutdown);
+
+void tipc_chan_destroy(struct tipc_chan *chan)
+{
+	vds_del_channel(chan->vds, chan);
+	kref_put(&chan->refcount, _free_chan);
+}
+EXPORT_SYMBOL(tipc_chan_destroy);
+
+/***************************************************************************/
+
+struct tipc_dn_chan {
+	int state;
+	struct mutex lock; /* protects rx_msg_queue list and channel state */
+	struct tipc_chan *chan;
+	wait_queue_head_t readq;
+	struct completion reply_comp;
+	struct list_head rx_msg_queue;
+};
+
+static int dn_wait_for_reply(struct tipc_dn_chan *dn, int timeout)
+{
+	int ret;
+
+	ret = wait_for_completion_interruptible_timeout(&dn->reply_comp,
+					msecs_to_jiffies(timeout));
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&dn->lock);
+	if (!ret) {
+		/* no reply from remote */
+		dn->state = TIPC_STALE;
+		ret = -ETIMEDOUT;
+	} else {
+		/* got reply */
+		if (dn->state == TIPC_CONNECTED)
+			ret = 0;
+		else if (dn->state == TIPC_DISCONNECTED)
+			if (!list_empty(&dn->rx_msg_queue))
+				ret = 0;
+			else
+				ret = -ENOTCONN;
+		else
+			ret = -EIO;
+	}
+	mutex_unlock(&dn->lock);
+
+	return ret;
+}
+
+static struct tipc_msg_buf *dn_handle_msg(void *data,
+					  struct tipc_msg_buf *rxbuf)
+{
+	struct tipc_dn_chan *dn = data;
+	struct tipc_msg_buf *newbuf = rxbuf;
+
+	mutex_lock(&dn->lock);
+	if (dn->state == TIPC_CONNECTED) {
+		/* buffer received from trusty */
+		trace_trusty_ipc_rx(dn->chan, rxbuf);
+		/* get new buffer */
+		newbuf = tipc_chan_get_rxbuf(dn->chan);
+		if (newbuf) {
+			/* queue an old buffer and return a new one */
+			list_add_tail(&rxbuf->node, &dn->rx_msg_queue);
+			wake_up_interruptible(&dn->readq);
+		} else {
+			/*
+			 * return an old buffer effectively discarding
+			 * incoming message
+			 */
+			dev_err(&dn->chan->vds->vdev->dev,
+				"%s: discard incoming message\n", __func__);
+			newbuf = rxbuf;
+		}
+	}
+	mutex_unlock(&dn->lock);
+
+	return newbuf;
+}
+
+static void dn_connected(struct tipc_dn_chan *dn)
+{
+	mutex_lock(&dn->lock);
+	dn->state = TIPC_CONNECTED;
+
+	/* complete all pending  */
+	complete(&dn->reply_comp);
+
+	mutex_unlock(&dn->lock);
+}
+
+static void dn_disconnected(struct tipc_dn_chan *dn)
+{
+	mutex_lock(&dn->lock);
+	dn->state = TIPC_DISCONNECTED;
+
+	/* complete all pending  */
+	complete(&dn->reply_comp);
+
+	/* wakeup all readers */
+	wake_up_interruptible_all(&dn->readq);
+
+	mutex_unlock(&dn->lock);
+}
+
+static void dn_shutdown(struct tipc_dn_chan *dn)
+{
+	mutex_lock(&dn->lock);
+
+	/* set state to STALE */
+	dn->state = TIPC_STALE;
+
+	/* complete all pending  */
+	complete(&dn->reply_comp);
+
+	/* wakeup all readers */
+	wake_up_interruptible_all(&dn->readq);
+
+	mutex_unlock(&dn->lock);
+}
+
+static void dn_handle_event(void *data, int event)
+{
+	struct tipc_dn_chan *dn = data;
+	trace_trusty_ipc_handle_event(dn->chan, event);
+
+	switch (event) {
+	case TIPC_CHANNEL_SHUTDOWN:
+		dn_shutdown(dn);
+		break;
+
+	case TIPC_CHANNEL_DISCONNECTED:
+		dn_disconnected(dn);
+		break;
+
+	case TIPC_CHANNEL_CONNECTED:
+		dn_connected(dn);
+		break;
+
+	default:
+		dev_err(&dn->chan->vds->vdev->dev,
+			"%s: unhandled event %d\n", __func__, event);
+		break;
+	}
+}
+
+static void dn_handle_release(void *data)
+{
+	kfree(data);
+}
+
+static const struct tipc_chan_ops _dn_ops = {
+	.handle_msg = dn_handle_msg,
+	.handle_event = dn_handle_event,
+	.handle_release = dn_handle_release,
+};
+
+#define cdev_to_cdn(c) container_of((c), struct tipc_cdev_node, cdev)
+#define cdn_to_vds(cdn) container_of((cdn), struct tipc_virtio_dev, cdev_node)
+
+static struct tipc_virtio_dev *_dn_lookup_vds(struct tipc_cdev_node *cdn)
+{
+	int ret;
+	struct tipc_virtio_dev *vds = NULL;
+
+	mutex_lock(&tipc_devices_lock);
+	ret = idr_for_each(&tipc_devices, _match_data, cdn);
+	if (ret) {
+		vds = cdn_to_vds(cdn);
+		kref_get(&vds->refcount);
+	}
+	mutex_unlock(&tipc_devices_lock);
+	return vds;
+}
+
+static int tipc_open(struct inode *inode, struct file *filp)
+{
+	int ret;
+	struct tipc_virtio_dev *vds;
+	struct tipc_dn_chan *dn;
+	struct tipc_cdev_node *cdn = cdev_to_cdn(inode->i_cdev);
+
+	vds = _dn_lookup_vds(cdn);
+	if (!vds) {
+		ret = -ENOENT;
+		goto err_vds_lookup;
+	}
+
+	dn = kzalloc(sizeof(*dn), GFP_KERNEL);
+	if (!dn) {
+		ret = -ENOMEM;
+		goto err_alloc_chan;
+	}
+
+	mutex_init(&dn->lock);
+	init_waitqueue_head(&dn->readq);
+	init_completion(&dn->reply_comp);
+	INIT_LIST_HEAD(&dn->rx_msg_queue);
+
+	dn->state = TIPC_DISCONNECTED;
+
+	dn->chan = vds_create_channel(vds, &_dn_ops, dn);
+	if (IS_ERR(dn->chan)) {
+		ret = PTR_ERR(dn->chan);
+		goto err_create_chan;
+	}
+
+	filp->private_data = dn;
+	kref_put(&vds->refcount, _free_vds);
+	return 0;
+
+err_create_chan:
+	kfree(dn);
+err_alloc_chan:
+	kref_put(&vds->refcount, _free_vds);
+err_vds_lookup:
+	return ret;
+}
+
+
+static int dn_connect_ioctl(struct tipc_dn_chan *dn, char __user *usr_name)
+{
+	int ret;
+	char name[MAX_SRV_NAME_LEN];
+
+	/* copy in service name from user space */
+	ret = strncpy_from_user(name, usr_name, sizeof(name));
+	if (ret < 0)
+		return ret;
+	if (ret == sizeof(name))
+		return -ENAMETOOLONG;
+
+	/* send connect request */
+	ret = tipc_chan_connect(dn->chan, name);
+	if (ret)
+		goto err_handle;
+
+	/* and wait for reply */
+	ret = dn_wait_for_reply(dn, REPLY_TIMEOUT);
+
+err_handle:
+	trace_trusty_ipc_connect_end(dn->chan, ret);
+	return ret;
+}
+
+static int dn_share_fd(struct tipc_dn_chan *dn, int fd,
+		       enum transfer_kind transfer_kind,
+		       struct tipc_shared_handle **out)
+{
+	int ret = 0;
+	struct tipc_shared_handle *shared_handle = NULL;
+	struct file *file = NULL;
+	struct device *dev = &dn->chan->vds->vdev->dev;
+	bool writable = false;
+	pgprot_t prot;
+	u64 tag = 0;
+	trusty_shared_mem_id_t mem_id;
+	bool lend;
+
+	if (dn->state != TIPC_CONNECTED) {
+		dev_dbg(dev, "Tried to share fd while not connected\n");
+		return -ENOTCONN;
+	}
+
+	file = fget(fd);
+	if (!file) {
+		dev_dbg(dev, "Invalid fd (%d)\n", fd);
+		return -EBADF;
+	}
+
+	if (!(file->f_mode & FMODE_READ)) {
+		dev_dbg(dev, "Cannot create write-only mapping\n");
+		fput(file);
+		return -EACCES;
+	}
+
+	writable = file->f_mode & FMODE_WRITE;
+	prot = writable ? PAGE_KERNEL : PAGE_KERNEL_RO;
+	fput(file);
+	file = NULL;
+
+	ret = tipc_shared_handle_new(&shared_handle, dn->chan->vds);
+	if (ret)
+		return ret;
+
+	shared_handle->dma_buf = dma_buf_get(fd);
+	if (IS_ERR(shared_handle->dma_buf)) {
+		ret = PTR_ERR(shared_handle->dma_buf);
+		shared_handle->dma_buf = NULL;
+		dev_dbg(dev, "Unable to get dma buf from fd (%d)\n", ret);
+		goto cleanup_handle;
+	}
+
+	tag = trusty_dma_buf_get_ffa_tag(shared_handle->dma_buf);
+	ret = trusty_dma_buf_get_shared_mem_id(shared_handle->dma_buf, &mem_id);
+	/*
+	 * Buffers with a preallocated mem_id should only be sent to Trusty
+	 * using TRUSTY_SEND_SECURE. And conversely, TRUSTY_SEND_SECURE should
+	 * only be used to send buffers with preallocated mem_id.
+	 */
+	if (!ret) {
+		/* Use shared memory ID owned by dma_buf */
+		if (transfer_kind != TRUSTY_SEND_SECURE) {
+			dev_err(dev, "transfer_kind: %d, must be TRUSTY_SEND_SECURE\n",
+				transfer_kind);
+			ret = -EINVAL;
+			goto cleanup_handle;
+		}
+		goto mem_id_allocated;
+	}
+
+	if (ret != -ENODATA) {
+		dev_err(dev, "dma_buf can't be transferred (%d)\n", ret);
+		goto cleanup_handle;
+	}
+
+	if (transfer_kind == TRUSTY_SEND_SECURE) {
+		dev_err(dev, "No mem ID for TRUSTY_SEND_SECURE\n");
+		goto cleanup_handle;
+	}
+	lend = (transfer_kind == TRUSTY_LEND);
+
+	shared_handle->attach = dma_buf_attach(shared_handle->dma_buf, dev);
+	if (IS_ERR(shared_handle->attach)) {
+		ret = PTR_ERR(shared_handle->attach);
+		shared_handle->attach = NULL;
+		dev_dbg(dev, "Unable to attach to dma_buf (%d)\n", ret);
+		goto cleanup_handle;
+	}
+
+	shared_handle->sgt = dma_buf_map_attachment(shared_handle->attach,
+						    DMA_BIDIRECTIONAL);
+	if (IS_ERR(shared_handle->sgt)) {
+		ret = PTR_ERR(shared_handle->sgt);
+		shared_handle->sgt = NULL;
+		dev_dbg(dev, "Failed to match attachment (%d)\n", ret);
+		goto cleanup_handle;
+	}
+
+	ret = trusty_transfer_memory(tipc_shared_handle_dev(shared_handle),
+				     &mem_id, shared_handle->sgt->sgl,
+				     shared_handle->sgt->orig_nents, prot, tag,
+				     lend);
+
+	if (ret < 0) {
+		dev_dbg(dev, "Transferring memory failed: %d\n", ret);
+		/*
+		 * The handle now has a sgt containing the pages, so we no
+		 * longer need to clean up the pages directly.
+		 */
+		goto cleanup_handle;
+	}
+	shared_handle->shared = true;
+
+mem_id_allocated:
+	shared_handle->tipc.obj_id = mem_id;
+	shared_handle->tipc.size = shared_handle->dma_buf->size;
+	shared_handle->tipc.tag = tag;
+	*out = shared_handle;
+	return 0;
+
+cleanup_handle:
+	tipc_shared_handle_drop(shared_handle);
+	return ret;
+}
+
+static ssize_t txbuf_write_iter(struct tipc_msg_buf *txbuf,
+				struct iov_iter *iter)
+{
+	size_t len;
+	/* message length */
+	len = iov_iter_count(iter);
+
+	/* check available space */
+	if (len > mb_avail_space(txbuf))
+		return -EMSGSIZE;
+
+	/* copy in message data */
+	if (copy_from_iter(mb_put_data(txbuf, len), len, iter) != len)
+		return -EFAULT;
+
+	return len;
+}
+
+static ssize_t txbuf_write_handles(struct tipc_msg_buf *txbuf,
+				   struct tipc_shared_handle **shm_handles,
+				   size_t shm_cnt)
+{
+	size_t idx;
+
+	/* message length */
+	size_t len = shm_cnt * sizeof(struct tipc_shm);
+
+	/* check available space */
+	if (len > mb_avail_space(txbuf))
+		return -EMSGSIZE;
+
+	/* copy over handles */
+	for (idx = 0; idx < shm_cnt; idx++) {
+		memcpy(mb_put_data(txbuf, sizeof(struct tipc_shm)),
+		       &shm_handles[idx]->tipc,
+		       sizeof(struct tipc_shm));
+	}
+
+	txbuf->shm_cnt += shm_cnt;
+
+	return len;
+}
+
+static long filp_send_ioctl(struct file *filp,
+			    const struct tipc_send_msg_req __user *arg)
+{
+	struct tipc_send_msg_req req;
+	struct iovec fast_iovs[UIO_FASTIOV];
+	struct iovec *iov = fast_iovs;
+	struct iov_iter iter;
+	struct trusty_shm *shm = NULL;
+	struct tipc_shared_handle **shm_handles = NULL;
+	int shm_idx = 0;
+	int release_idx;
+	struct tipc_dn_chan *dn = filp->private_data;
+	struct tipc_virtio_dev *vds = dn->chan->vds;
+	struct device *dev = &vds->vdev->dev;
+	long timeout = TXBUF_TIMEOUT;
+	struct tipc_msg_buf *txbuf = NULL;
+	long ret = 0;
+	ssize_t data_len = 0;
+	ssize_t shm_len = 0;
+
+	if (copy_from_user(&req, arg, sizeof(req)))
+		return -EFAULT;
+
+	if (req.shm_cnt > U16_MAX)
+		return -E2BIG;
+
+	shm = kmalloc_array(req.shm_cnt, sizeof(*shm), GFP_KERNEL);
+	if (!shm)
+		return -ENOMEM;
+
+	shm_handles = kmalloc_array(req.shm_cnt, sizeof(*shm_handles),
+				    GFP_KERNEL);
+	if (!shm_handles) {
+		ret = -ENOMEM;
+		goto shm_handles_alloc_failed;
+	}
+
+	if (copy_from_user(shm, u64_to_user_ptr(req.shm),
+			   req.shm_cnt * sizeof(struct trusty_shm))) {
+		ret = -EFAULT;
+		goto load_shm_args_failed;
+	}
+
+	ret = import_iovec(WRITE, u64_to_user_ptr(req.iov), req.iov_cnt,
+			   ARRAY_SIZE(fast_iovs), &iov, &iter);
+	if (ret < 0) {
+		dev_dbg(dev, "Failed to import iovec\n");
+		goto iov_import_failed;
+	}
+
+	for (shm_idx = 0; shm_idx < req.shm_cnt; shm_idx++) {
+		switch (shm[shm_idx].transfer) {
+		case TRUSTY_SHARE:
+		case TRUSTY_LEND:
+		case TRUSTY_SEND_SECURE:
+			break;
+		default:
+			dev_err(dev, "Unknown transfer type: 0x%x\n",
+				shm[shm_idx].transfer);
+			goto shm_share_failed;
+		}
+		ret = dn_share_fd(dn, shm[shm_idx].fd, shm[shm_idx].transfer,
+				  &shm_handles[shm_idx]);
+		if (ret) {
+			dev_dbg(dev, "Forwarding memory failed\n"
+				);
+			goto shm_share_failed;
+		}
+	}
+
+	if (filp->f_flags & O_NONBLOCK)
+		timeout = 0;
+
+	txbuf = tipc_chan_get_txbuf_timeout(dn->chan, timeout);
+	if (IS_ERR(txbuf)) {
+		dev_dbg(dev, "Failed to get txbuffer\n");
+		ret = PTR_ERR(txbuf);
+		goto get_txbuf_failed;
+	}
+
+	data_len = txbuf_write_iter(txbuf, &iter);
+	if (data_len < 0) {
+		ret = data_len;
+		goto txbuf_write_failed;
+	}
+
+	shm_len = txbuf_write_handles(txbuf, shm_handles, req.shm_cnt);
+	if (shm_len < 0) {
+		ret = shm_len;
+		goto txbuf_write_failed;
+	}
+
+	/*
+	 * These need to be aded to the index before queueing the message.
+	 * As soon as the message is sent, we may receive a message back from
+	 * Trusty saying it's no longer in use, and the shared_handle needs
+	 * to be there when that happens.
+	 */
+	for (shm_idx = 0; shm_idx < req.shm_cnt; shm_idx++)
+		tipc_shared_handle_register(shm_handles[shm_idx]);
+
+	trace_trusty_ipc_write(dn->chan, ret, NULL, NULL);
+
+	ret = tipc_chan_queue_msg(dn->chan, txbuf);
+
+	if (ret)
+		goto queue_failed;
+
+	ret = data_len;
+
+common_cleanup:
+	kfree(iov);
+iov_import_failed:
+load_shm_args_failed:
+	kfree(shm_handles);
+shm_handles_alloc_failed:
+	kfree(shm);
+
+	if (ret)
+		trace_trusty_ipc_write(dn->chan, ret, txbuf, shm);
+	return ret;
+
+queue_failed:
+	for (release_idx = 0; release_idx < req.shm_cnt; release_idx++)
+		tipc_shared_handle_take(vds,
+					shm_handles[release_idx]->tipc.obj_id);
+txbuf_write_failed:
+	tipc_chan_put_txbuf(dn->chan, txbuf);
+get_txbuf_failed:
+shm_share_failed:
+	for (shm_idx--; shm_idx >= 0; shm_idx--)
+		tipc_shared_handle_drop(shm_handles[shm_idx]);
+
+	goto common_cleanup;
+}
+
+static long tipc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	struct tipc_dn_chan *dn = filp->private_data;
+
+	switch (cmd) {
+	case TIPC_IOC_CONNECT:
+		return dn_connect_ioctl(dn, (char __user *)arg);
+	case TIPC_IOC_SEND_MSG:
+		return filp_send_ioctl(filp,
+				       (const struct tipc_send_msg_req __user *)
+				       arg);
+	default:
+		dev_dbg(&dn->chan->vds->vdev->dev,
+			"Unhandled ioctl cmd: 0x%x\n", cmd);
+		return -ENOTTY;
+	}
+}
+
+#ifdef CONFIG_COMPAT
+static long tipc_compat_ioctl(struct file *filp,
+			      unsigned int cmd, unsigned long arg)
+{
+	struct tipc_dn_chan *dn = filp->private_data;
+
+	switch (cmd) {
+	case TIPC_IOC32_CONNECT:
+		cmd = TIPC_IOC_CONNECT;
+		break;
+	default:
+		dev_dbg(&dn->chan->vds->vdev->dev,
+			"Unhandled compat ioctl command: 0x%x\n", cmd);
+		return -ENOTTY;
+	}
+	return tipc_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static inline bool _got_rx(struct tipc_dn_chan *dn)
+{
+	if (dn->state != TIPC_CONNECTED)
+		return true;
+
+	if (!list_empty(&dn->rx_msg_queue))
+		return true;
+
+	return false;
+}
+
+static ssize_t tipc_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+	ssize_t ret;
+	size_t len;
+	trusty_shared_mem_id_t buf_id = ~0ULL;
+	size_t shm_cnt = 0;
+	struct tipc_msg_buf *mb = NULL;
+	struct file *filp = iocb->ki_filp;
+	struct tipc_dn_chan *dn = filp->private_data;
+
+	mutex_lock(&dn->lock);
+
+	trace_trusty_ipc_read(dn->chan);
+
+	while (list_empty(&dn->rx_msg_queue)) {
+		if (dn->state != TIPC_CONNECTED) {
+			if (dn->state == TIPC_CONNECTING)
+				ret = -ENOTCONN;
+			else if (dn->state == TIPC_DISCONNECTED)
+				ret = -ENOTCONN;
+			else if (dn->state == TIPC_STALE)
+				ret = -ESHUTDOWN;
+			else
+				ret = -EBADFD;
+			goto out;
+		}
+
+		mutex_unlock(&dn->lock);
+
+		if (filp->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+
+		if (wait_event_interruptible(dn->readq, _got_rx(dn)))
+			return -ERESTARTSYS;
+
+		mutex_lock(&dn->lock);
+	}
+
+	mb = list_first_entry(&dn->rx_msg_queue, struct tipc_msg_buf, node);
+
+	buf_id = mb->buf_id;
+	shm_cnt = mb->shm_cnt;
+	len = mb_avail_data(mb);
+	if (len > iov_iter_count(iter)) {
+		ret = -EMSGSIZE;
+		goto out;
+	}
+
+	if (copy_to_iter(mb_get_data(mb, len), len, iter) != len) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	ret = len;
+	list_del(&mb->node);
+	tipc_chan_put_rxbuf(dn->chan, mb);
+
+out:
+	trace_trusty_ipc_read_end(dn->chan, ret, buf_id, shm_cnt);
+	mutex_unlock(&dn->lock);
+	return ret;
+}
+
+static ssize_t tipc_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+	struct file *filp = iocb->ki_filp;
+	struct tipc_dn_chan *dn = filp->private_data;
+	long timeout = TXBUF_TIMEOUT;
+	struct tipc_msg_buf *txbuf = NULL;
+	ssize_t ret = 0;
+	ssize_t len = 0;
+
+	if (filp->f_flags & O_NONBLOCK)
+		timeout = 0;
+
+	txbuf = tipc_chan_get_txbuf_timeout(dn->chan, timeout);
+
+	if (IS_ERR(txbuf)) {
+		ret = PTR_ERR(txbuf);
+		goto exit_out;
+	}
+
+	len = txbuf_write_iter(txbuf, iter);
+	if (len < 0) {
+		ret = len;
+		goto err_out;
+	}
+
+	/* queue message */
+	ret = tipc_chan_queue_msg(dn->chan, txbuf);
+	if (ret)
+		goto err_out;
+
+	trace_trusty_ipc_write(dn->chan, len, txbuf, NULL);
+
+	return len;
+
+err_out:
+	tipc_chan_put_txbuf(dn->chan, txbuf);
+exit_out:
+	trace_trusty_ipc_write(dn->chan, ret, NULL, NULL);
+	return ret;
+}
+
+static __poll_t tipc_poll(struct file *filp, poll_table *wait)
+{
+	__poll_t mask = 0;
+	struct tipc_dn_chan *dn = filp->private_data;
+
+	mutex_lock(&dn->lock);
+
+	poll_wait(filp, &dn->readq, wait);
+
+	/* Writes always succeed for now */
+	mask |= EPOLLOUT | EPOLLWRNORM;
+
+	if (!list_empty(&dn->rx_msg_queue))
+		mask |= EPOLLIN | EPOLLRDNORM;
+
+	if (dn->state != TIPC_CONNECTED)
+		mask |= EPOLLERR;
+
+	mutex_unlock(&dn->lock);
+
+	trace_trusty_ipc_poll(dn->chan, mask);
+	return mask;
+}
+
+
+static int tipc_release(struct inode *inode, struct file *filp)
+{
+	struct tipc_dn_chan *dn = filp->private_data;
+
+	dn_shutdown(dn);
+
+	/* free all pending buffers */
+	vds_free_msg_buf_list(dn->chan->vds, &dn->rx_msg_queue);
+
+	/* shutdown channel  */
+	tipc_chan_shutdown(dn->chan);
+
+	/* and destroy it */
+	tipc_chan_destroy(dn->chan);
+
+	return 0;
+}
+
+static const struct file_operations tipc_fops = {
+	.open		= tipc_open,
+	.release	= tipc_release,
+	.unlocked_ioctl	= tipc_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= tipc_compat_ioctl,
+#endif
+	.read_iter	= tipc_read_iter,
+	.write_iter	= tipc_write_iter,
+	.poll		= tipc_poll,
+	.owner		= THIS_MODULE,
+};
+
+/*****************************************************************************/
+
+static void chan_trigger_event(struct tipc_chan *chan, int event)
+{
+	if (!event)
+		return;
+
+	chan->ops->handle_event(chan->ops_arg, event);
+}
+
+static void _cleanup_vq(struct tipc_virtio_dev *vds, struct virtqueue *vq)
+{
+	struct tipc_msg_buf *mb;
+
+	while ((mb = virtqueue_detach_unused_buf(vq)) != NULL)
+		vds_free_msg_buf(vds, mb);
+}
+
+static int _create_cdev_node(struct device *parent,
+			     struct tipc_cdev_node *cdn,
+			     const char *name)
+{
+	int ret;
+	dev_t devt;
+
+	if (!name) {
+		dev_dbg(parent, "%s: cdev name has to be provided\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	/* allocate minor */
+	ret = idr_alloc(&tipc_devices, cdn, 0, MAX_DEVICES, GFP_KERNEL);
+	if (ret < 0) {
+		dev_dbg(parent, "%s: failed (%d) to get id\n",
+			__func__, ret);
+		return ret;
+	}
+
+	cdn->minor = ret;
+	cdev_init(&cdn->cdev, &tipc_fops);
+	cdn->cdev.owner = THIS_MODULE;
+
+	/* Add character device */
+	devt = MKDEV(tipc_major, cdn->minor);
+	ret = cdev_add(&cdn->cdev, devt, 1);
+	if (ret) {
+		dev_dbg(parent, "%s: cdev_add failed (%d)\n",
+			__func__, ret);
+		goto err_add_cdev;
+	}
+
+	/* Create a device node */
+	cdn->dev = device_create(tipc_class, parent,
+				 devt, NULL, "trusty-ipc-%s", name);
+	if (IS_ERR(cdn->dev)) {
+		ret = PTR_ERR(cdn->dev);
+		dev_dbg(parent, "%s: device_create failed: %d\n",
+			__func__, ret);
+		goto err_device_create;
+	}
+
+	return 0;
+
+err_device_create:
+	cdn->dev = NULL;
+	cdev_del(&cdn->cdev);
+err_add_cdev:
+	idr_remove(&tipc_devices, cdn->minor);
+	return ret;
+}
+
+static void create_cdev_node(struct tipc_virtio_dev *vds,
+			     struct tipc_cdev_node *cdn)
+{
+	int err;
+
+	mutex_lock(&tipc_devices_lock);
+
+	if (!default_vdev) {
+		kref_get(&vds->refcount);
+		default_vdev = vds->vdev;
+	}
+
+	if (vds->cdev_name[0] && !cdn->dev) {
+		kref_get(&vds->refcount);
+		err = _create_cdev_node(&vds->vdev->dev, cdn, vds->cdev_name);
+		if (err) {
+			dev_err(&vds->vdev->dev,
+				"failed (%d) to create cdev node\n", err);
+			kref_put(&vds->refcount, _free_vds);
+		}
+	}
+	mutex_unlock(&tipc_devices_lock);
+}
+
+static void destroy_cdev_node(struct tipc_virtio_dev *vds,
+			      struct tipc_cdev_node *cdn)
+{
+	mutex_lock(&tipc_devices_lock);
+	if (cdn->dev) {
+		device_destroy(tipc_class, MKDEV(tipc_major, cdn->minor));
+		cdev_del(&cdn->cdev);
+		idr_remove(&tipc_devices, cdn->minor);
+		cdn->dev = NULL;
+		kref_put(&vds->refcount, _free_vds);
+	}
+
+	if (default_vdev == vds->vdev) {
+		default_vdev = NULL;
+		kref_put(&vds->refcount, _free_vds);
+	}
+
+	mutex_unlock(&tipc_devices_lock);
+}
+
+static void _go_online(struct tipc_virtio_dev *vds)
+{
+	mutex_lock(&vds->lock);
+	if (vds->state == VDS_OFFLINE)
+		vds->state = VDS_ONLINE;
+	mutex_unlock(&vds->lock);
+
+	create_cdev_node(vds, &vds->cdev_node);
+
+	dev_info(&vds->vdev->dev, "is online\n");
+}
+
+static void _go_offline(struct tipc_virtio_dev *vds)
+{
+	struct tipc_chan *chan;
+
+	/* change state to OFFLINE */
+	mutex_lock(&vds->lock);
+	if (vds->state != VDS_ONLINE) {
+		mutex_unlock(&vds->lock);
+		return;
+	}
+	vds->state = VDS_OFFLINE;
+	mutex_unlock(&vds->lock);
+
+	/* wakeup all waiters */
+	wake_up_interruptible_all(&vds->sendq);
+
+	/* shutdown all channels */
+	while ((chan = vds_lookup_channel(vds, TIPC_ANY_ADDR))) {
+		mutex_lock(&chan->lock);
+		chan->state = TIPC_STALE;
+		chan->remote = 0;
+		chan_trigger_event(chan, TIPC_CHANNEL_SHUTDOWN);
+		mutex_unlock(&chan->lock);
+		kref_put(&chan->refcount, _free_chan);
+	}
+
+	/* shutdown device node */
+	destroy_cdev_node(vds, &vds->cdev_node);
+
+	dev_info(&vds->vdev->dev, "is offline\n");
+}
+
+static void _handle_conn_rsp(struct tipc_virtio_dev *vds,
+			     struct tipc_conn_rsp_body *rsp, size_t len)
+{
+	struct tipc_chan *chan;
+
+	if (sizeof(*rsp) != len) {
+		dev_err(&vds->vdev->dev, "%s: Invalid response length %zd\n",
+			__func__, len);
+		return;
+	}
+
+	dev_dbg(&vds->vdev->dev,
+		"%s: connection response: for addr 0x%x: status %d remote addr 0x%x\n",
+		__func__, rsp->target, rsp->status, rsp->remote);
+
+	/* Lookup channel */
+	chan = vds_lookup_channel(vds, rsp->target);
+	if (chan) {
+		mutex_lock(&chan->lock);
+		if (chan->state == TIPC_CONNECTING) {
+			if (!rsp->status) {
+				chan->state = TIPC_CONNECTED;
+				chan->remote = rsp->remote;
+				chan->max_msg_cnt = rsp->max_msg_cnt;
+				chan->max_msg_size = rsp->max_msg_size;
+				chan_trigger_event(chan,
+						   TIPC_CHANNEL_CONNECTED);
+			} else {
+				chan->state = TIPC_DISCONNECTED;
+				chan->remote = 0;
+				chan_trigger_event(chan,
+						   TIPC_CHANNEL_DISCONNECTED);
+			}
+		}
+		mutex_unlock(&chan->lock);
+		kref_put(&chan->refcount, _free_chan);
+	}
+}
+
+static void _handle_disc_req(struct tipc_virtio_dev *vds,
+			     struct tipc_disc_req_body *req, size_t len)
+{
+	struct tipc_chan *chan;
+
+	if (sizeof(*req) != len) {
+		dev_err(&vds->vdev->dev, "%s: Invalid request length %zd\n",
+			__func__, len);
+		return;
+	}
+
+	dev_dbg(&vds->vdev->dev, "%s: disconnect request: for addr 0x%x\n",
+		__func__, req->target);
+
+	chan = vds_lookup_channel(vds, req->target);
+	if (chan) {
+		mutex_lock(&chan->lock);
+		if (chan->state == TIPC_CONNECTED ||
+			chan->state == TIPC_CONNECTING) {
+			chan->state = TIPC_DISCONNECTED;
+			chan->remote = 0;
+			chan_trigger_event(chan, TIPC_CHANNEL_DISCONNECTED);
+		}
+		mutex_unlock(&chan->lock);
+		kref_put(&chan->refcount, _free_chan);
+	}
+}
+
+static void _handle_release(struct tipc_virtio_dev *vds,
+			    struct tipc_release_body *req, size_t len)
+{
+	struct tipc_shared_handle *handle = NULL;
+	struct device *dev = &vds->vdev->dev;
+	int ret = 0;
+
+	if (len < sizeof(*req)) {
+		dev_err(dev, "Received undersized release control message\n");
+		return;
+	}
+
+	handle = tipc_shared_handle_take(vds, req->id);
+	if (!handle) {
+		dev_err(dev,
+			"Received release control message for untracked handle: 0x%llx\n",
+			req->id);
+		return;
+	}
+
+	ret = tipc_shared_handle_drop(handle);
+
+	if (ret) {
+		dev_err(dev,
+			"Failed to release handle 0x%llx upon request: (%d)\n",
+			req->id, ret);
+		/*
+		 * Put the handle back in case we got a spurious release now and
+		 * get a real one later. This path should not happen, we're
+		 * just trying to be robust.
+		 */
+		tipc_shared_handle_register(handle);
+	}
+}
+
+static void _handle_ctrl_msg(struct tipc_virtio_dev *vds,
+			     void *data, int len, u32 src)
+{
+	struct tipc_ctrl_msg *msg = data;
+
+	if ((len < sizeof(*msg)) || (sizeof(*msg) + msg->body_len != len)) {
+		dev_err(&vds->vdev->dev,
+			"%s: Invalid message length ( %d vs. %d)\n",
+			__func__, (int)(sizeof(*msg) + msg->body_len), len);
+		return;
+	}
+
+	dev_dbg(&vds->vdev->dev,
+		"%s: Incoming ctrl message: src 0x%x type %d len %d\n",
+		__func__, src, msg->type, msg->body_len);
+
+	switch (msg->type) {
+	case TIPC_CTRL_MSGTYPE_GO_ONLINE:
+		_go_online(vds);
+		break;
+
+	case TIPC_CTRL_MSGTYPE_GO_OFFLINE:
+		_go_offline(vds);
+		break;
+
+	case TIPC_CTRL_MSGTYPE_CONN_RSP:
+		_handle_conn_rsp(vds, (struct tipc_conn_rsp_body *)msg->body,
+				 msg->body_len);
+		break;
+
+	case TIPC_CTRL_MSGTYPE_DISC_REQ:
+		_handle_disc_req(vds, (struct tipc_disc_req_body *)msg->body,
+				 msg->body_len);
+		break;
+
+	case TIPC_CTRL_MSGTYPE_RELEASE:
+		_handle_release(vds, (struct tipc_release_body *)msg->body,
+				msg->body_len);
+	break;
+
+	default:
+		dev_warn(&vds->vdev->dev,
+			 "%s: Unexpected message type: %d\n",
+			 __func__, msg->type);
+	}
+}
+
+static void handle_dropped_chan_msg(struct tipc_virtio_dev *vds,
+				    struct tipc_msg_buf *mb,
+				    struct tipc_msg_hdr *msg)
+{
+	int shm_idx;
+	struct tipc_shm *shm;
+	struct tipc_shared_handle *shared_handle;
+	struct device *dev = &vds->vdev->dev;
+	size_t len;
+
+	if (msg->len < msg->shm_cnt * sizeof(*shm)) {
+		dev_err(dev, "shm_cnt does not fit in dropped message");
+		/* The message is corrupt, so we can't recover resources */
+		return;
+	}
+
+	len = msg->len - msg->shm_cnt * sizeof(*shm);
+	/* skip normal data */
+	(void)mb_get_data(mb, len);
+
+	for (shm_idx = 0; shm_idx < msg->shm_cnt; shm_idx++) {
+		shm = mb_get_data(mb, sizeof(*shm));
+		shared_handle = tipc_shared_handle_take(vds, shm->obj_id);
+		if (shared_handle) {
+			if (tipc_shared_handle_drop(shared_handle))
+				dev_err(dev,
+					"Failed to drop handle found in dropped buffer");
+		} else {
+			dev_err(dev,
+				"Found handle in dropped buffer which was not registered to tipc device...");
+		}
+	}
+}
+
+static void handle_dropped_mb(struct tipc_virtio_dev *vds,
+			      struct tipc_msg_buf *mb)
+{
+	struct tipc_msg_hdr *msg;
+
+	mb_reset_read(mb);
+	msg = mb_get_data(mb, sizeof(*msg));
+	if (msg->dst != TIPC_CTRL_ADDR) {
+		handle_dropped_chan_msg(vds, mb, msg);
+	}
+}
+
+static int _handle_rxbuf(struct tipc_virtio_dev *vds,
+			 struct tipc_msg_buf *rxbuf, size_t rxlen)
+{
+	int err;
+	struct scatterlist sg;
+	struct tipc_msg_hdr *msg;
+	struct device *dev = &vds->vdev->dev;
+
+	/* message sanity check */
+	if (rxlen > rxbuf->buf_sz) {
+		dev_warn(dev, "inbound msg is too big: %zd\n", rxlen);
+		goto drop_it;
+	}
+
+	if (rxlen < sizeof(*msg)) {
+		dev_warn(dev, "inbound msg is too short: %zd\n", rxlen);
+		goto drop_it;
+	}
+
+	/* reset buffer and put data  */
+	mb_reset(rxbuf);
+	mb_put_data(rxbuf, rxlen);
+
+	/* get message header */
+	msg = mb_get_data(rxbuf, sizeof(*msg));
+	if (mb_avail_data(rxbuf) != msg->len) {
+		dev_warn(dev, "inbound msg length mismatch: (%zu vs. %d)\n",
+			 mb_avail_data(rxbuf), msg->len);
+		goto drop_it;
+	}
+
+	dev_dbg(dev, "From: %d, To: %d, Len: %d, Flags: 0x%x, Reserved: %d, shm_cnt: %d\n",
+		msg->src, msg->dst, msg->len, msg->flags, msg->reserved,
+		msg->shm_cnt);
+
+	/* message directed to control endpoint is a special case */
+	if (msg->dst == TIPC_CTRL_ADDR) {
+		_handle_ctrl_msg(vds, msg->data, msg->len, msg->src);
+	} else {
+		struct tipc_chan *chan = NULL;
+		/* Lookup channel */
+		chan = vds_lookup_channel(vds, msg->dst);
+		if (chan) {
+			/* handle it */
+			rxbuf = chan->ops->handle_msg(chan->ops_arg, rxbuf);
+			kref_put(&chan->refcount, _free_chan);
+			if (WARN_ON(!rxbuf))
+				return -EINVAL;
+		}
+	}
+
+drop_it:
+	/* add the buffer back to the virtqueue */
+	sg_init_one(&sg, rxbuf, rxbuf->buf_sz);
+	err = virtqueue_add_inbuf(vds->rxvq, &sg, 1, rxbuf, GFP_KERNEL);
+	if (err < 0) {
+		dev_err(dev, "failed to add a virtqueue buffer: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static void _rxvq_cb(struct virtqueue *rxvq)
+{
+	unsigned int len;
+	struct tipc_msg_buf *mb;
+	unsigned int msg_cnt = 0;
+	struct tipc_virtio_dev *vds = rxvq->vdev->priv;
+
+	while ((mb = virtqueue_get_buf(rxvq, &len)) != NULL) {
+		if (_handle_rxbuf(vds, mb, len))
+			break;
+		msg_cnt++;
+	}
+
+	/* tell the other size that we added rx buffers */
+	if (msg_cnt)
+		virtqueue_kick(rxvq);
+}
+
+static void _txvq_cb(struct virtqueue *txvq)
+{
+	unsigned int len;
+	struct tipc_msg_buf *mb;
+	bool need_wakeup = false;
+	struct tipc_virtio_dev *vds = txvq->vdev->priv;
+
+	/* detach all buffers */
+	mutex_lock(&vds->lock);
+	while ((mb = virtqueue_get_buf(txvq, &len)) != NULL) {
+		if ((int)len < 0)
+			handle_dropped_mb(vds, mb);
+		need_wakeup |= _put_txbuf_locked(vds, mb);
+	}
+	mutex_unlock(&vds->lock);
+
+	if (need_wakeup) {
+		/* wake up potential senders waiting for a tx buffer */
+		wake_up_interruptible_all(&vds->sendq);
+	}
+}
+
+static int tipc_virtio_probe(struct virtio_device *vdev)
+{
+	int err, i;
+	struct tipc_virtio_dev *vds;
+	struct tipc_dev_config config;
+	struct virtqueue *vqs[2];
+	vq_callback_t *vq_cbs[] = {_rxvq_cb, _txvq_cb};
+	static const char * const vq_names[] = { "rx", "tx" };
+
+	vds = kzalloc(sizeof(*vds), GFP_KERNEL);
+	if (!vds)
+		return -ENOMEM;
+
+	vds->vdev = vdev;
+
+	mutex_init(&vds->lock);
+	mutex_init(&vds->shared_handles_lock);
+	kref_init(&vds->refcount);
+	init_waitqueue_head(&vds->sendq);
+	INIT_LIST_HEAD(&vds->free_buf_list);
+	idr_init(&vds->addr_idr);
+	vds->shared_handles = RB_ROOT;
+	dma_coerce_mask_and_coherent(&vds->vdev->dev,
+				     *vds->vdev->dev.parent->parent->dma_mask);
+
+	/* set default max message size and alignment */
+	memset(&config, 0, sizeof(config));
+	config.msg_buf_max_size  = DEFAULT_MSG_BUF_SIZE;
+	config.msg_buf_alignment = DEFAULT_MSG_BUF_ALIGN;
+
+	/* get configuration if present */
+	vdev->config->get(vdev, 0, &config, sizeof(config));
+
+	/* copy dev name */
+	strncpy(vds->cdev_name, config.dev_name, sizeof(vds->cdev_name));
+	vds->cdev_name[sizeof(vds->cdev_name)-1] = '\0';
+
+	/* find tx virtqueues (rx and tx and in this order) */
+	err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, vq_names, NULL,
+				     NULL);
+	if (err)
+		goto err_find_vqs;
+
+	vds->rxvq = vqs[0];
+	vds->txvq = vqs[1];
+
+	/* save max buffer size and count */
+	vds->msg_buf_max_sz = config.msg_buf_max_size;
+	vds->msg_buf_max_cnt = virtqueue_get_vring_size(vds->txvq);
+
+	/* set up the receive buffers */
+	for (i = 0; i < virtqueue_get_vring_size(vds->rxvq); i++) {
+		struct scatterlist sg;
+		struct tipc_msg_buf *rxbuf;
+
+		rxbuf = vds_alloc_msg_buf(vds, true);
+		if (!rxbuf) {
+			dev_err(&vdev->dev, "failed to allocate rx buffer\n");
+			err = -ENOMEM;
+			goto err_free_rx_buffers;
+		}
+
+		sg_init_one(&sg, rxbuf, rxbuf->buf_sz);
+		err = virtqueue_add_inbuf(vds->rxvq, &sg, 1, rxbuf, GFP_KERNEL);
+		WARN_ON(err); /* sanity check; this can't really happen */
+	}
+
+	vdev->priv = vds;
+	vds->state = VDS_OFFLINE;
+
+	dev_dbg(&vdev->dev, "%s: done\n", __func__);
+	return 0;
+
+err_free_rx_buffers:
+	_cleanup_vq(vds, vds->rxvq);
+err_find_vqs:
+	kref_put(&vds->refcount, _free_vds);
+	return err;
+}
+
+static void tipc_virtio_remove(struct virtio_device *vdev)
+{
+	struct tipc_virtio_dev *vds = vdev->priv;
+
+	_go_offline(vds);
+
+	mutex_lock(&vds->lock);
+	vds->state = VDS_DEAD;
+	vds->vdev = NULL;
+	mutex_unlock(&vds->lock);
+
+	vdev->config->reset(vdev);
+
+	idr_destroy(&vds->addr_idr);
+
+	_cleanup_vq(vds, vds->rxvq);
+	_cleanup_vq(vds, vds->txvq);
+	vds_free_msg_buf_list(vds, &vds->free_buf_list);
+
+	vdev->config->del_vqs(vds->vdev);
+
+	kref_put(&vds->refcount, _free_vds);
+}
+
+// TODO (b/207176288) This needs to be sent upstream
+#define VIRTIO_ID_TRUSTY_IPC 13 /* virtio trusty ipc */
+static const struct virtio_device_id tipc_virtio_id_table[] = {
+	{ VIRTIO_ID_TRUSTY_IPC, VIRTIO_DEV_ANY_ID },
+	{ 0 },
+};
+
+static const unsigned int features[] = {
+	0,
+};
+
+static struct virtio_driver virtio_tipc_driver = {
+	.feature_table	= features,
+	.feature_table_size = ARRAY_SIZE(features),
+	.driver.name	= KBUILD_MODNAME,
+	.driver.owner	= THIS_MODULE,
+	.id_table	= tipc_virtio_id_table,
+	.probe		= tipc_virtio_probe,
+	.remove		= tipc_virtio_remove,
+};
+
+static int __init tipc_init(void)
+{
+	int ret;
+	dev_t dev;
+
+	ret = alloc_chrdev_region(&dev, 0, MAX_DEVICES, KBUILD_MODNAME);
+	if (ret) {
+		pr_err("%s: alloc_chrdev_region failed: %d\n", __func__, ret);
+		return ret;
+	}
+
+	tipc_major = MAJOR(dev);
+	tipc_class = class_create(THIS_MODULE, KBUILD_MODNAME);
+	if (IS_ERR(tipc_class)) {
+		ret = PTR_ERR(tipc_class);
+		pr_err("%s: class_create failed: %d\n", __func__, ret);
+		goto err_class_create;
+	}
+
+	ret = register_virtio_driver(&virtio_tipc_driver);
+	if (ret) {
+		pr_err("failed to register virtio driver: %d\n", ret);
+		goto err_register_virtio_drv;
+	}
+
+	return 0;
+
+err_register_virtio_drv:
+	class_destroy(tipc_class);
+
+err_class_create:
+	unregister_chrdev_region(dev, MAX_DEVICES);
+	return ret;
+}
+
+static void __exit tipc_exit(void)
+{
+	unregister_virtio_driver(&virtio_tipc_driver);
+	class_destroy(tipc_class);
+	unregister_chrdev_region(MKDEV(tipc_major, 0), MAX_DEVICES);
+}
+
+void trusty_register_func_for_dma_buf(
+	u64 (*get_ffa_tag)(struct dma_buf *dma_buf),
+	int (*get_shared_mem_id)(struct dma_buf *dma_buf,
+		trusty_shared_mem_id_t *id))
+{
+	dma_buf_get_ffa_tag = get_ffa_tag;
+	dma_buf_get_shared_mem_id = get_shared_mem_id;
+}
+EXPORT_SYMBOL_GPL(trusty_register_func_for_dma_buf);
+
+/* We need to init this early */
+subsys_initcall(tipc_init);
+module_exit(tipc_exit);
+
+#define CREATE_TRACE_POINTS
+#include "trusty-ipc-trace.h"
+
+MODULE_DEVICE_TABLE(tipc, tipc_virtio_id_table);
+MODULE_DESCRIPTION("Trusty IPC driver");
+MODULE_IMPORT_NS(DMA_BUF);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/trusty/trusty-irq-trace.h b/drivers/trusty/trusty-irq-trace.h
new file mode 100644
index 0000000..3df61b8
--- /dev/null
+++ b/drivers/trusty/trusty-irq-trace.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Google, Inc.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM trusty
+
+#if !defined(_TRUSTY_IRQ_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRUSTY_IRQ_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(trusty_irq,
+	TP_PROTO(int irq),
+	TP_ARGS(irq),
+	TP_STRUCT__entry(
+		__field(int, irq)
+	),
+	TP_fast_assign(
+		__entry->irq = irq;
+	),
+	TP_printk("irq=%d", __entry->irq)
+);
+
+#endif /* _TRUSTY_IRQ_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trusty-irq-trace
+#include <trace/define_trace.h>
diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c
new file mode 100644
index 0000000..caa24ef
--- /dev/null
+++ b/drivers/trusty/trusty-irq.c
@@ -0,0 +1,646 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Google, Inc.
+ */
+
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/trusty/smcall.h>
+#include <linux/trusty/sm_err.h>
+#include <linux/trusty/trusty.h>
+
+#include "trusty-irq.h"
+#include "trusty-irq-trace.h"
+
+struct trusty_irq {
+	struct trusty_irq_state *is;
+	struct hlist_node node;
+	unsigned int irq;
+	bool percpu;
+	bool enable;
+	bool doorbell;
+	struct trusty_irq __percpu *percpu_ptr;
+};
+
+struct trusty_irq_irqset {
+	struct hlist_head pending;
+	struct hlist_head inactive;
+};
+
+struct trusty_irq_state {
+	struct device *dev;
+	struct device *trusty_dev;
+	struct trusty_irq_irqset normal_irqs;
+	spinlock_t normal_irqs_lock;
+	struct trusty_irq_irqset __percpu *percpu_irqs;
+	struct notifier_block trusty_call_notifier;
+	struct hlist_node cpuhp_node;
+};
+
+static int trusty_irq_cpuhp_slot = -1;
+
+static void trusty_irq_enable_pending_irqs(struct trusty_irq_state *is,
+					   struct trusty_irq_irqset *irqset,
+					   bool percpu)
+{
+	struct hlist_node *n;
+	struct trusty_irq *trusty_irq;
+
+	hlist_for_each_entry_safe(trusty_irq, n, &irqset->pending, node) {
+		dev_dbg(is->dev,
+			"%s: enable pending irq %d, percpu %d, cpu %d\n",
+			__func__, trusty_irq->irq, percpu, smp_processor_id());
+		if (percpu)
+			enable_percpu_irq(trusty_irq->irq, 0);
+		else
+			enable_irq(trusty_irq->irq);
+		hlist_del(&trusty_irq->node);
+		hlist_add_head(&trusty_irq->node, &irqset->inactive);
+	}
+}
+
+static void trusty_irq_enable_irqset(struct trusty_irq_state *is,
+				      struct trusty_irq_irqset *irqset)
+{
+	struct trusty_irq *trusty_irq;
+
+	hlist_for_each_entry(trusty_irq, &irqset->inactive, node) {
+		if (trusty_irq->enable) {
+			dev_warn(is->dev,
+				 "%s: percpu irq %d already enabled, cpu %d\n",
+				 __func__, trusty_irq->irq, smp_processor_id());
+			continue;
+		}
+		dev_dbg(is->dev, "%s: enable percpu irq %d, cpu %d\n",
+			__func__, trusty_irq->irq, smp_processor_id());
+		enable_percpu_irq(trusty_irq->irq, 0);
+		trusty_irq->enable = true;
+	}
+}
+
+static void trusty_irq_disable_irqset(struct trusty_irq_state *is,
+				      struct trusty_irq_irqset *irqset)
+{
+	struct hlist_node *n;
+	struct trusty_irq *trusty_irq;
+
+	hlist_for_each_entry(trusty_irq, &irqset->inactive, node) {
+		if (!trusty_irq->enable) {
+			dev_warn(is->dev,
+				 "irq %d already disabled, percpu %d, cpu %d\n",
+				 trusty_irq->irq, trusty_irq->percpu,
+				 smp_processor_id());
+			continue;
+		}
+		dev_dbg(is->dev, "%s: disable irq %d, percpu %d, cpu %d\n",
+			__func__, trusty_irq->irq, trusty_irq->percpu,
+			smp_processor_id());
+		trusty_irq->enable = false;
+		if (trusty_irq->percpu)
+			disable_percpu_irq(trusty_irq->irq);
+		else
+			disable_irq_nosync(trusty_irq->irq);
+	}
+	hlist_for_each_entry_safe(trusty_irq, n, &irqset->pending, node) {
+		if (!trusty_irq->enable) {
+			dev_warn(is->dev,
+				 "pending irq %d already disabled, percpu %d, cpu %d\n",
+				 trusty_irq->irq, trusty_irq->percpu,
+				 smp_processor_id());
+		}
+		dev_dbg(is->dev,
+			"%s: disable pending irq %d, percpu %d, cpu %d\n",
+			__func__, trusty_irq->irq, trusty_irq->percpu,
+			smp_processor_id());
+		trusty_irq->enable = false;
+		hlist_del(&trusty_irq->node);
+		hlist_add_head(&trusty_irq->node, &irqset->inactive);
+	}
+}
+
+static int trusty_irq_call_notify(struct notifier_block *nb,
+				  unsigned long action, void *data)
+{
+	struct trusty_irq_state *is;
+
+	if (WARN_ON(!irqs_disabled()))
+		return NOTIFY_DONE;
+
+	if (action != TRUSTY_CALL_PREPARE)
+		return NOTIFY_DONE;
+
+	is = container_of(nb, struct trusty_irq_state, trusty_call_notifier);
+
+	spin_lock(&is->normal_irqs_lock);
+	trusty_irq_enable_pending_irqs(is, &is->normal_irqs, false);
+	spin_unlock(&is->normal_irqs_lock);
+	trusty_irq_enable_pending_irqs(is, this_cpu_ptr(is->percpu_irqs), true);
+
+	return NOTIFY_OK;
+}
+
+static irqreturn_t trusty_irq_handler(int irq, void *data)
+{
+	struct trusty_irq *trusty_irq = data;
+	struct trusty_irq_state *is = trusty_irq->is;
+	struct trusty_irq_irqset *irqset;
+
+	dev_dbg(is->dev, "%s: irq %d, percpu %d, cpu %d, enable %d\n",
+		__func__, irq, trusty_irq->irq, smp_processor_id(),
+		trusty_irq->enable);
+	trace_trusty_irq(irq);
+
+	if (!trusty_irq->doorbell) {
+		if (trusty_irq->percpu) {
+			disable_percpu_irq(irq);
+			irqset = this_cpu_ptr(is->percpu_irqs);
+		} else {
+			disable_irq_nosync(irq);
+			irqset = &is->normal_irqs;
+		}
+
+		spin_lock(&is->normal_irqs_lock);
+		if (trusty_irq->enable) {
+			hlist_del(&trusty_irq->node);
+			hlist_add_head(&trusty_irq->node, &irqset->pending);
+		}
+		spin_unlock(&is->normal_irqs_lock);
+	}
+
+	trusty_enqueue_nop(is->trusty_dev, NULL);
+
+	dev_dbg(is->dev, "%s: irq %d done\n", __func__, irq);
+
+	return IRQ_HANDLED;
+}
+
+static int trusty_irq_cpu_up(unsigned int cpu, struct hlist_node *node)
+{
+	unsigned long irq_flags;
+	struct trusty_irq_state *is;
+
+	is = container_of(node, struct trusty_irq_state, cpuhp_node);
+
+	dev_dbg(is->dev, "%s: cpu %d\n", __func__, cpu);
+
+	local_irq_save(irq_flags);
+	trusty_irq_enable_irqset(is, this_cpu_ptr(is->percpu_irqs));
+	local_irq_restore(irq_flags);
+
+	/*
+	 * Temporary workaround blindly enqueuing work to force trusty scheduler
+	 * to run after a cpu suspend.
+	 * Root causing the workqueue being inappropriately empty
+	 * (e.g. loss of an IPI) may make this workaround unnecessary
+	 * in the future.
+	 */
+	trusty_enqueue_nop(is->trusty_dev, NULL);
+
+	return 0;
+}
+
+static int trusty_irq_cpu_down(unsigned int cpu, struct hlist_node *node)
+{
+	unsigned long irq_flags;
+	struct trusty_irq_state *is;
+
+	is = container_of(node, struct trusty_irq_state, cpuhp_node);
+
+	dev_dbg(is->dev, "%s: cpu %d\n", __func__, cpu);
+
+	local_irq_save(irq_flags);
+	trusty_irq_disable_irqset(is, this_cpu_ptr(is->percpu_irqs));
+	local_irq_restore(irq_flags);
+
+	return 0;
+}
+
+static int trusty_irq_map_ipi(struct trusty_irq_state *is, int irq)
+{
+	int ret;
+	u32 ipi_range[3];
+	struct device_node *gic;
+	struct of_phandle_args oirq = {};
+	u32 beg, end, ipi_base;
+
+	ret = of_property_read_u32_array(is->dev->of_node, "ipi-range",
+					 ipi_range, ARRAY_SIZE(ipi_range));
+	if (ret != 0)
+		return -ENODATA;
+	beg = ipi_range[0];
+	end = ipi_range[1];
+	ipi_base = ipi_range[2];
+
+	if (irq < beg || irq > end)
+		return -ENODATA;
+
+	gic = of_irq_find_parent(is->dev->of_node);
+	if (!gic)
+		return -ENXIO;
+
+	oirq.np = gic;
+	oirq.args_count = 1;
+	oirq.args[0] = ipi_base + (irq - beg);
+
+	ret = irq_create_of_mapping(&oirq);
+
+	of_node_put(gic);
+	return (!ret) ? -EINVAL : ret;
+}
+
+static int trusty_irq_create_irq_mapping(struct trusty_irq_state *is, int irq)
+{
+	int ret;
+	int index;
+	u32 irq_pos;
+	u32 templ_idx;
+	u32 range_base;
+	u32 range_end;
+	struct of_phandle_args oirq;
+
+	/* check if this is an IPI (inter-processor interrupt) */
+	ret = trusty_irq_map_ipi(is, irq);
+	if (ret != -ENODATA)
+		return ret;
+
+	/* check if "interrupt-ranges" property is present */
+	if (!of_find_property(is->dev->of_node, "interrupt-ranges", NULL)) {
+		/* fallback to old behavior to be backward compatible with
+		 * systems that do not need IRQ domains.
+		 */
+		return irq;
+	}
+
+	/* find irq range */
+	for (index = 0;; index += 3) {
+		ret = of_property_read_u32_index(is->dev->of_node,
+						 "interrupt-ranges",
+						 index, &range_base);
+		if (ret)
+			return ret;
+
+		ret = of_property_read_u32_index(is->dev->of_node,
+						 "interrupt-ranges",
+						 index + 1, &range_end);
+		if (ret)
+			return ret;
+
+		if (irq >= range_base && irq <= range_end)
+			break;
+	}
+
+	/*  read the rest of range entry: template index and irq_pos */
+	ret = of_property_read_u32_index(is->dev->of_node,
+					 "interrupt-ranges",
+					 index + 2, &templ_idx);
+	if (ret)
+		return ret;
+
+	/* read irq template */
+	ret = of_parse_phandle_with_args(is->dev->of_node,
+					 "interrupt-templates",
+					 "#interrupt-cells",
+					 templ_idx, &oirq);
+	if (ret)
+		return ret;
+
+	WARN_ON(!oirq.np);
+	WARN_ON(!oirq.args_count);
+
+	/*
+	 * An IRQ template is a non empty array of u32 values describing group
+	 * of interrupts having common properties. The u32 entry with index
+	 * zero contains the position of irq_id in interrupt specifier array
+	 * followed by data representing interrupt specifier array with irq id
+	 * field omitted, so to convert irq template to interrupt specifier
+	 * array we have to move down one slot the first irq_pos entries and
+	 * replace the resulting gap with real irq id.
+	 */
+	irq_pos = oirq.args[0];
+
+	if (irq_pos >= oirq.args_count) {
+		dev_err(is->dev, "irq pos is out of range: %d\n", irq_pos);
+		return -EINVAL;
+	}
+
+	for (index = 1; index <= irq_pos; index++)
+		oirq.args[index - 1] = oirq.args[index];
+
+	oirq.args[irq_pos] = irq - range_base;
+
+	ret = irq_create_of_mapping(&oirq);
+
+	return (!ret) ? -EINVAL : ret;
+}
+
+static int trusty_irq_init_normal_irq(struct trusty_irq_state *is, int tirq)
+{
+	int ret;
+	int irq;
+	unsigned long irq_flags;
+	struct trusty_irq *trusty_irq;
+
+	dev_dbg(is->dev, "%s: irq %d\n", __func__, tirq);
+
+	irq = trusty_irq_create_irq_mapping(is, tirq);
+	if (irq < 0) {
+		dev_err(is->dev,
+			"trusty_irq_create_irq_mapping failed (%d)\n", irq);
+		return irq;
+	}
+
+	trusty_irq = kzalloc(sizeof(*trusty_irq), GFP_KERNEL);
+	if (!trusty_irq)
+		return -ENOMEM;
+
+	trusty_irq->is = is;
+	trusty_irq->irq = irq;
+	trusty_irq->enable = true;
+
+	spin_lock_irqsave(&is->normal_irqs_lock, irq_flags);
+	hlist_add_head(&trusty_irq->node, &is->normal_irqs.inactive);
+	spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags);
+
+	ret = request_irq(irq, trusty_irq_handler, IRQF_NO_THREAD,
+			  "trusty", trusty_irq);
+	if (ret) {
+		dev_err(is->dev, "request_irq failed %d\n", ret);
+		goto err_request_irq;
+	}
+	return 0;
+
+err_request_irq:
+	spin_lock_irqsave(&is->normal_irqs_lock, irq_flags);
+	hlist_del(&trusty_irq->node);
+	spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags);
+	kfree(trusty_irq);
+	return ret;
+}
+
+static int trusty_irq_init_per_cpu_irq(struct trusty_irq_state *is, int tirq,
+				       unsigned int type)
+{
+	int ret;
+	int irq;
+	unsigned int cpu;
+	struct trusty_irq __percpu *trusty_irq_handler_data;
+
+	dev_dbg(is->dev, "%s: irq %d\n", __func__, tirq);
+
+	irq = trusty_irq_create_irq_mapping(is, tirq);
+	if (irq <= 0) {
+		dev_err(is->dev,
+			"trusty_irq_create_irq_mapping failed (%d)\n", irq);
+		return irq;
+	}
+
+	trusty_irq_handler_data = alloc_percpu(struct trusty_irq);
+	if (!trusty_irq_handler_data)
+		return -ENOMEM;
+
+	for_each_possible_cpu(cpu) {
+		struct trusty_irq *trusty_irq;
+		struct trusty_irq_irqset *irqset;
+
+		trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu);
+		irqset = per_cpu_ptr(is->percpu_irqs, cpu);
+
+		trusty_irq->is = is;
+		hlist_add_head(&trusty_irq->node, &irqset->inactive);
+		trusty_irq->irq = irq;
+		trusty_irq->percpu = true;
+		trusty_irq->doorbell = type == TRUSTY_IRQ_TYPE_DOORBELL;
+		trusty_irq->percpu_ptr = trusty_irq_handler_data;
+	}
+
+	ret = request_percpu_irq(irq, trusty_irq_handler, "trusty",
+				 trusty_irq_handler_data);
+	if (ret) {
+		dev_err(is->dev, "request_percpu_irq failed %d\n", ret);
+		goto err_request_percpu_irq;
+	}
+
+	return 0;
+
+err_request_percpu_irq:
+	for_each_possible_cpu(cpu) {
+		struct trusty_irq *trusty_irq;
+
+		trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu);
+		hlist_del(&trusty_irq->node);
+	}
+
+	free_percpu(trusty_irq_handler_data);
+	return ret;
+}
+
+static int trusty_smc_get_next_irq(struct trusty_irq_state *is,
+				   unsigned long min_irq, unsigned int type)
+{
+	return trusty_fast_call32(is->trusty_dev, SMC_FC_GET_NEXT_IRQ,
+				  min_irq, type, 0);
+}
+
+static int trusty_irq_init_one(struct trusty_irq_state *is,
+			       int irq, unsigned int type)
+{
+	int ret;
+
+	irq = trusty_smc_get_next_irq(is, irq, type);
+	if (irq < 0)
+		return irq;
+
+	if (type != TRUSTY_IRQ_TYPE_NORMAL)
+		ret = trusty_irq_init_per_cpu_irq(is, irq, type);
+	else
+		ret = trusty_irq_init_normal_irq(is, irq);
+
+	if (ret) {
+		dev_warn(is->dev,
+			 "failed to initialize irq %d, irq will be ignored\n",
+			 irq);
+	}
+
+	return irq + 1;
+}
+
+static void trusty_irq_free_irqs(struct trusty_irq_state *is)
+{
+	struct trusty_irq *irq;
+	struct hlist_node *n;
+	unsigned int cpu;
+
+	hlist_for_each_entry_safe(irq, n, &is->normal_irqs.inactive, node) {
+		dev_dbg(is->dev, "%s: irq %d\n", __func__, irq->irq);
+		free_irq(irq->irq, irq);
+		hlist_del(&irq->node);
+		kfree(irq);
+	}
+	hlist_for_each_entry_safe(irq, n,
+				  &this_cpu_ptr(is->percpu_irqs)->inactive,
+				  node) {
+		struct trusty_irq __percpu *trusty_irq_handler_data;
+
+		dev_dbg(is->dev, "%s: percpu irq %d\n", __func__, irq->irq);
+		trusty_irq_handler_data = irq->percpu_ptr;
+		free_percpu_irq(irq->irq, trusty_irq_handler_data);
+		for_each_possible_cpu(cpu) {
+			struct trusty_irq *irq_tmp;
+
+			irq_tmp = per_cpu_ptr(trusty_irq_handler_data, cpu);
+			hlist_del(&irq_tmp->node);
+		}
+		free_percpu(trusty_irq_handler_data);
+	}
+}
+
+static int trusty_irq_probe(struct platform_device *pdev)
+{
+	int ret;
+	int irq;
+	unsigned long irq_flags;
+	struct trusty_irq_state *is;
+
+	is = kzalloc(sizeof(*is), GFP_KERNEL);
+	if (!is) {
+		ret = -ENOMEM;
+		goto err_alloc_is;
+	}
+
+	is->dev = &pdev->dev;
+	is->trusty_dev = is->dev->parent;
+	spin_lock_init(&is->normal_irqs_lock);
+	is->percpu_irqs = alloc_percpu(struct trusty_irq_irqset);
+	if (!is->percpu_irqs) {
+		ret = -ENOMEM;
+		goto err_alloc_pending_percpu_irqs;
+	}
+
+	platform_set_drvdata(pdev, is);
+
+	is->trusty_call_notifier.notifier_call = trusty_irq_call_notify;
+	ret = trusty_call_notifier_register(is->trusty_dev,
+					    &is->trusty_call_notifier);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"failed to register trusty call notifier\n");
+		goto err_trusty_call_notifier_register;
+	}
+
+	for (irq = 0; irq >= 0;)
+		irq = trusty_irq_init_one(is, irq, TRUSTY_IRQ_TYPE_PER_CPU);
+	for (irq = 0; irq >= 0;)
+		irq = trusty_irq_init_one(is, irq, TRUSTY_IRQ_TYPE_NORMAL);
+	for (irq = 0; irq >= 0;)
+		irq = trusty_irq_init_one(is, irq, TRUSTY_IRQ_TYPE_DOORBELL);
+
+	ret = cpuhp_state_add_instance(trusty_irq_cpuhp_slot, &is->cpuhp_node);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "cpuhp_state_add_instance failed %d\n",
+			ret);
+		goto err_add_cpuhp_instance;
+	}
+
+	return 0;
+
+err_add_cpuhp_instance:
+	spin_lock_irqsave(&is->normal_irqs_lock, irq_flags);
+	trusty_irq_disable_irqset(is, &is->normal_irqs);
+	spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags);
+	trusty_irq_free_irqs(is);
+	trusty_call_notifier_unregister(is->trusty_dev,
+					&is->trusty_call_notifier);
+err_trusty_call_notifier_register:
+	free_percpu(is->percpu_irqs);
+err_alloc_pending_percpu_irqs:
+	kfree(is);
+err_alloc_is:
+	return ret;
+}
+
+static int trusty_irq_remove(struct platform_device *pdev)
+{
+	int ret;
+	unsigned long irq_flags;
+	struct trusty_irq_state *is = platform_get_drvdata(pdev);
+
+	ret = cpuhp_state_remove_instance(trusty_irq_cpuhp_slot,
+					  &is->cpuhp_node);
+	if (WARN_ON(ret))
+		return ret;
+
+	spin_lock_irqsave(&is->normal_irqs_lock, irq_flags);
+	trusty_irq_disable_irqset(is, &is->normal_irqs);
+	spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags);
+
+	trusty_irq_free_irqs(is);
+
+	trusty_call_notifier_unregister(is->trusty_dev,
+					&is->trusty_call_notifier);
+	free_percpu(is->percpu_irqs);
+	kfree(is);
+
+	return 0;
+}
+
+static const struct of_device_id trusty_test_of_match[] = {
+	{ .compatible = "android,trusty-irq-v1", },
+	{},
+};
+
+MODULE_DEVICE_TABLE(trusty, trusty_test_of_match);
+
+static struct platform_driver trusty_irq_driver = {
+	.probe = trusty_irq_probe,
+	.remove = trusty_irq_remove,
+	.driver	= {
+		.name = "trusty-irq",
+		.of_match_table = trusty_test_of_match,
+	},
+};
+
+int __init trusty_irq_driver_init(void)
+{
+	int ret;
+
+	/* allocate dynamic cpuhp state slot */
+	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+				      "trusty-irq:cpu:online",
+				      trusty_irq_cpu_up,
+				      trusty_irq_cpu_down);
+	if (ret < 0)
+		return ret;
+	trusty_irq_cpuhp_slot = ret;
+
+	/* Register platform driver */
+	ret = platform_driver_register(&trusty_irq_driver);
+	if (ret < 0)
+		goto err_driver_register;
+
+	return ret;
+
+err_driver_register:
+	/* undo cpuhp slot allocation */
+	cpuhp_remove_multi_state(trusty_irq_cpuhp_slot);
+	trusty_irq_cpuhp_slot = -1;
+
+	return ret;
+}
+
+void trusty_irq_driver_exit(void)
+{
+	platform_driver_unregister(&trusty_irq_driver);
+	cpuhp_remove_multi_state(trusty_irq_cpuhp_slot);
+	trusty_irq_cpuhp_slot = -1;
+}
+
+#define CREATE_TRACE_POINTS
+#include "trusty-irq-trace.h"
diff --git a/drivers/trusty/trusty-irq.h b/drivers/trusty/trusty-irq.h
new file mode 100644
index 0000000..d3444f9
--- /dev/null
+++ b/drivers/trusty/trusty-irq.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 Google, Inc.
+ */
+
+#if !defined(_TRUSTY_IRQ_H)
+#define _TRUSTY_IRQ_H
+
+#ifdef CONFIG_TRUSTY_IRQ
+int __init trusty_irq_driver_init(void);
+void trusty_irq_driver_exit(void);
+#else
+static inline int trusty_irq_driver_init(void) { return 0; }
+static inline void trusty_irq_driver_exit(void) {}
+#endif
+
+#endif /* _TRUSTY_IRQ_H */
diff --git a/drivers/trusty/trusty-log.c b/drivers/trusty/trusty-log.c
new file mode 100644
index 0000000..e57f085
--- /dev/null
+++ b/drivers/trusty/trusty-log.c
@@ -0,0 +1,1000 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Google, Inc.
+ */
+#include <linux/platform_device.h>
+#include <linux/trusty/smcall.h>
+#include <linux/trusty/trusty.h>
+#include <linux/notifier.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/log2.h>
+#include <linux/miscdevice.h>
+#include <linux/poll.h>
+#include <linux/seq_file.h>
+#include <linux/panic_notifier.h>
+#include <asm/page.h>
+#include "trusty-log.h"
+
+/*
+ * Rationale for the chosen default log buffer size:
+ *  - the log buffer shall contain unthrottled Trusty crash dump.
+ *  - the register list portion of a crash dump is about 1KB
+ *  - the memory-around-registers portion of a crash dump can be up to 12 KB
+ *  - an average size backtrace is about 1 KB
+ *  - average length of non-crash trusty logs during boot is about 85 characters
+ *  - a crash dump with 50 lines of context therefore requires up to 18 KB
+ *  - buffer size needs to be power-of-two number of bytes
+ *  - rounding up to power of two from 18 KB gives 32 KB
+ *  The log size can be adjusted by setting the "trusty_log.log_size" parameter
+ *  on the kernel command line. The specified value will be adjusted as needed.
+ */
+
+#define TRUSTY_LOG_DEFAULT_SIZE (32768)
+#define TRUSTY_LOG_MIN_SIZE (PAGE_SIZE / 2)
+#define TRUSTY_LOG_MAX_SIZE (1 * 1024 * 1024 * 1024)
+#define TRUSTY_LINE_BUFFER_SIZE (256)
+
+static size_t log_size_param = TRUSTY_LOG_DEFAULT_SIZE;
+
+static int trusty_log_size_set(const char *val, const struct kernel_param *kp)
+{
+	unsigned long long requested = memparse(val, NULL);
+
+	if (requested < TRUSTY_LOG_MIN_SIZE)
+		requested = TRUSTY_LOG_MIN_SIZE;
+	if (requested > TRUSTY_LOG_MAX_SIZE)
+		requested = TRUSTY_LOG_MAX_SIZE;
+	requested = rounddown_pow_of_two(requested);
+	log_size_param = requested;
+	return 0;
+}
+
+static int trusty_log_size_get(char *buffer, const struct kernel_param *kp)
+{
+	return scnprintf(buffer, PAGE_SIZE, "%zu\n", log_size_param);
+}
+
+module_param_call(log_size, trusty_log_size_set, trusty_log_size_get, NULL,
+		  0644);
+
+/*
+ * The "log_to_dmesg" parameter can have three values: "never", "always",
+ * and "until_first_reader". "never" indicates Trusty logs will never be
+ * copied to the linux kernel log, while "always" indicates they will always
+ * be copied to the linux kernel log. In both cases Trusty logs can still
+ * be read from the /dev/trusty-logX virtual file. In the case of "always"
+ * that may mean logs show up duplicated in logcat.
+ * The third option, "until_first_reader", copies Trusty logs to the linux
+ * kernel log, but only until /dev/trusty-logX is first opened. After that
+ * Trusty logs will no longer be copied to the kernel log and are only
+ * available from /dev/trusty-logX.
+ */
+
+enum log_to_dmesg_options {
+	NEVER,
+	ALWAYS,
+	UNTIL_FIRST_READER
+};
+
+static const char * const log_to_dmesg_opt_names[] = {
+	"never", "always", "until_first_reader"
+};
+
+static int log_to_dmesg_param = NEVER;
+
+static int trusty_log_mode_set(const char *val, const struct kernel_param *kp)
+{
+	int i = sysfs_match_string(log_to_dmesg_opt_names, val);
+	if (i < 0)
+		return i;
+
+	log_to_dmesg_param = i;
+	return 0;
+}
+
+static int trusty_log_mode_get(char *buffer, const struct kernel_param *kp)
+{
+	int i;
+
+	/*
+	 * Output buffer is PAGE_SIZE, of which we'll use only 35 bytes,
+	 * so bounds checks are not necessary in the following code.
+	 */
+	*buffer = 0;
+	for (i = 0; i < ARRAY_SIZE(log_to_dmesg_opt_names); i++) {
+		if (log_to_dmesg_param == i)
+			strcat(buffer, "[");
+		strcat(buffer, log_to_dmesg_opt_names[i]);
+		if (log_to_dmesg_param == i)
+			strcat(buffer, "]");
+		strcat(buffer, " ");
+	}
+	strcat(buffer, "\n");
+	return strlen(buffer);
+}
+
+module_param_call(log_to_dmesg, trusty_log_mode_set, trusty_log_mode_get, NULL,
+		  0644);
+/*
+ * If we log too much and a UART or other slow source is connected, we can stall
+ * out another thread which is doing printk.
+ *
+ * Trusty crash logs are currently ~16 lines, so 100 should include context and
+ * the crash most of the time.
+ */
+static struct ratelimit_state trusty_log_rate_limit =
+	RATELIMIT_STATE_INIT("trusty_log", 1 * HZ, 100);
+
+module_param_named(log_ratelimit_burst, trusty_log_rate_limit.burst, int, 0644);
+module_param_named(log_ratelimit_interval, trusty_log_rate_limit.interval, int,
+		   0644);
+
+/*
+ * The kernel panic notifier will unconditionaly dump the trusty logs when
+ * called, if this is set.  Otherwise the kernel panic notifier will only dump
+ * Trusty logs if Trusty has itself panicked.
+ */
+static bool trusty_log_force_on_panic = false;
+
+module_param_named(log_force_on_panic, trusty_log_force_on_panic, bool, 0644);
+
+/**
+ * struct trusty_log_sfile - trusty log misc device state
+ *
+ * @misc:          misc device created for the trusty log virtual file
+ * @device_name:   misc device name following the convention
+ *                 "trusty-<name><id>"
+ */
+struct trusty_log_sfile {
+	struct miscdevice misc;
+	char device_name[64];
+};
+
+/**
+ * struct trusty_log_sink_state - trusty log sink state
+ *
+ * @get:              current read unwrapped index
+ * @last_successful_next:
+ *                    index for the next line after the last successful get
+ * @trusty_panicked:  trusty panic status at the start of the sink interation
+ *                    (only used for kernel log sink)
+ * @sfile:            seq_file used for sinking to a virtual file (misc device);
+ *                    set to NULL for the kernel log sink.
+ * @ignore_overflow:  ignore_overflow used to coalesce overflow messages and
+ *                    avoid reporting an overflow when sinking the oldest
+ *                    line to the virtual file (only used for virtual file sink)
+ *
+ * A sink state structure is used for both the kernel log sink
+ * and the virtual device sink.
+ * An instance of the sink state structure is dynamically created
+ * for each read iteration of the trusty log virtual file (misc device).
+ *
+ */
+struct trusty_log_sink_state {
+	u32 get;
+	u32 last_successful_next;
+	bool trusty_panicked;
+
+	/* virtual file sink specific attributes */
+	struct seq_file *sfile;
+	bool ignore_overflow;
+};
+
+struct trusty_log_state {
+	struct device *dev;
+	struct device *trusty_dev;
+	struct trusty_log_sfile log_sfile;
+	struct kref refcount;
+
+	/*
+	 * This lock is here to ensure only one consumer will read
+	 * from the log ring buffer at a time.
+	 */
+	spinlock_t lock;
+	struct log_rb *log;
+	struct trusty_log_sink_state klog_sink;
+
+	u32 log_num_pages;
+	struct scatterlist *sg;
+	trusty_shared_mem_id_t log_pages_shared_mem_id;
+
+	struct notifier_block call_notifier;
+	struct notifier_block panic_notifier;
+	char line_buffer[TRUSTY_LINE_BUFFER_SIZE];
+	wait_queue_head_t poll_waiters;
+	/* this lock protects access to wake_put */
+	spinlock_t wake_up_lock;
+	u32 last_wake_put;
+	bool have_first_reader;
+	bool registered;
+};
+
+static inline u32 u32_add_overflow(u32 a, u32 b)
+{
+	u32 d;
+
+	if (check_add_overflow(a, b, &d)) {
+		/*
+		 * silence the overflow,
+		 * what matters in the log buffer context
+		 * is the casted addition
+		 */
+	}
+	return d;
+}
+
+static inline u32 u32_sub_overflow(u32 a, u32 b)
+{
+	u32 d;
+
+	if (check_sub_overflow(a, b, &d)) {
+		/*
+		 * silence the overflow,
+		 * what matters in the log buffer context
+		 * is the casted substraction
+		 */
+	}
+	return d;
+}
+
+static int log_read_line(struct trusty_log_state *s, u32 put, u32 get)
+{
+	struct log_rb *log = s->log;
+	int i;
+	char c = '\0';
+	size_t max_to_read =
+		min_t(size_t,
+		      u32_sub_overflow(put, get),
+		      sizeof(s->line_buffer) - 1);
+	size_t mask = log->sz - 1;
+
+	for (i = 0; i < max_to_read && c != '\n';) {
+		c = log->data[get & mask];
+		s->line_buffer[i++] = c;
+		get = u32_add_overflow(get, 1);
+	}
+	s->line_buffer[i] = '\0';
+
+	return i;
+}
+
+/**
+ * trusty_log_has_data() - returns true when more data is available to sink
+ * @s:         Current log state.
+ * @sink:      trusty_log_sink_state holding the get index on a given sink
+ *
+ * Return: true if data is available.
+ */
+static bool trusty_log_has_data(struct trusty_log_state *s,
+				struct trusty_log_sink_state *sink)
+{
+	struct log_rb *log = s->log;
+
+	return (log->put != sink->get);
+}
+
+/**
+ * trusty_log_start() - initialize the sink iteration either to kernel log
+ * or to secondary log_sfile
+ * @s:         Current log state.
+ * @sink:      trusty_log_sink_state holding the get index on a given sink
+ * @index:     Unwrapped ring buffer index from where iteration shall start
+ *
+ * Return: 0 if successful, negative error code otherwise
+ */
+static int trusty_log_start(struct trusty_log_state *s,
+			    struct trusty_log_sink_state *sink,
+			    u32 index)
+{
+	struct log_rb *log;
+
+	if (WARN_ON(!s))
+		return -EINVAL;
+
+	log = s->log;
+	if (WARN_ON(!is_power_of_2(log->sz)))
+		return -EINVAL;
+
+	sink->get = index;
+	return 0;
+}
+
+/**
+ * trusty_log_show() - sink log entry at current iteration
+ * @s:         Current log state.
+ * @sink:      trusty_log_sink_state holding the get index on a given sink
+ */
+static void trusty_log_show(struct trusty_log_state *s,
+			    struct trusty_log_sink_state *sink)
+{
+	struct log_rb *log = s->log;
+	u32 alloc, put, get;
+	int read_chars;
+
+	if (sink->sfile && sink == &s->klog_sink)
+		dev_warn(s->dev, "klog_sink has seq_file\n");
+
+	/*
+	 * For this ring buffer, at any given point, alloc >= put >= get.
+	 * The producer side of the buffer is not locked, so the put and alloc
+	 * pointers must be read in a defined order (put before alloc) so
+	 * that the above condition is maintained. A read barrier is needed
+	 * to make sure the hardware and compiler keep the reads ordered.
+	 */
+	get = sink->get;
+	put = log->put;
+
+	/* Make sure that the read of put occurs before the read of log data */
+	rmb();
+
+	/* Read a line from the log */
+	read_chars = log_read_line(s, put, get);
+
+	/* Force the loads from log_read_line to complete. */
+	rmb();
+	alloc = log->alloc;
+
+	/*
+	 * Discard the line that was just read if the data could
+	 * have been corrupted by the producer.
+	 */
+	if (u32_sub_overflow(alloc, get) > log->sz) {
+		/*
+		 * this condition is acceptable in the case of the sfile sink
+		 * when attempting to read the oldest entry (at alloc-log->sz)
+		 * which may be overrun by a new one when ring buffer write
+		 * index wraps around.
+		 * So the overrun is not reported in case the oldest line
+		 * was being read.
+		 */
+		if (sink->sfile) {
+			if (!sink->ignore_overflow)
+				seq_puts(sink->sfile, "log overflow.\n");
+			/* coalesce subsequent contiguous overflows. */
+			sink->ignore_overflow = true;
+		} else {
+			dev_err(s->dev, "log overflow.\n");
+		}
+		sink->get = u32_sub_overflow(alloc, log->sz);
+		return;
+	}
+	/* compute next line index */
+	sink->get = u32_add_overflow(get, read_chars);
+	/* once a line is valid, ignore_overflow must be disabled */
+	sink->ignore_overflow = false;
+	if (sink->sfile) {
+		seq_printf(sink->sfile, "%s", s->line_buffer);
+	} else {
+		if (sink->trusty_panicked ||
+		    __ratelimit(&trusty_log_rate_limit)) {
+			dev_info(s->dev, "%s", s->line_buffer);
+			/* next line after last successful get */
+			sink->last_successful_next = sink->get;
+		}
+	}
+}
+
+static void *trusty_log_seq_start(struct seq_file *sfile, loff_t *pos)
+{
+	struct trusty_log_sfile *lb;
+	struct trusty_log_state *s;
+	struct log_rb *log;
+	struct trusty_log_sink_state *log_sfile_sink;
+	u32 index;
+	int rc;
+
+	if (WARN_ON(!pos))
+		return ERR_PTR(-EINVAL);
+
+	lb = sfile->private;
+	if (WARN_ON(!lb))
+		return ERR_PTR(-EINVAL);
+
+	log_sfile_sink = kzalloc(sizeof(*log_sfile_sink), GFP_KERNEL);
+	if (!log_sfile_sink)
+		return ERR_PTR(-ENOMEM);
+
+	s = container_of(lb, struct trusty_log_state, log_sfile);
+	log_sfile_sink->sfile = sfile;
+	log = s->log;
+	if (*pos == 0) {
+		/* start at the oldest line */
+		index = 0;
+		if (log->alloc > log->sz)
+			index = u32_sub_overflow(log->alloc, log->sz);
+	} else {
+		/*
+		 * '*pos>0': pos hold the 32bits unwrapped index from where
+		 * to start iterating
+		 */
+		index = (u32)*pos;
+	}
+	pr_debug("%s start=%u\n", __func__, index);
+
+	log_sfile_sink->ignore_overflow = true;
+	rc = trusty_log_start(s, log_sfile_sink, index);
+	if (rc < 0)
+		goto free_sink;
+
+	if (!trusty_log_has_data(s, log_sfile_sink))
+		goto free_sink;
+
+	return log_sfile_sink;
+
+free_sink:
+	pr_debug("%s kfree\n", __func__);
+	kfree(log_sfile_sink);
+	return rc < 0 ? ERR_PTR(rc) : NULL;
+}
+
+static void *trusty_log_seq_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+	struct trusty_log_sfile *lb;
+	struct trusty_log_state *s;
+	struct trusty_log_sink_state *log_sfile_sink = v;
+	int rc = 0;
+
+	if (WARN_ON(!log_sfile_sink))
+		return ERR_PTR(-EINVAL);
+
+	lb = sfile->private;
+	if (WARN_ON(!lb)) {
+		rc = -EINVAL;
+		goto end_of_iter;
+	}
+	s = container_of(lb, struct trusty_log_state, log_sfile);
+
+	if (WARN_ON(!pos)) {
+		rc = -EINVAL;
+		goto end_of_iter;
+	}
+	/*
+	 * When starting a virtual file sink, the start function is invoked
+	 * with a pos argument which value is set to zero.
+	 * Subsequent starts are invoked with pos being set to
+	 * the unwrapped read index (get).
+	 * Upon u32 wraparound, the get index could be reset to zero.
+	 * Thus a msb is used to distinguish the `get` zero value
+	 * from the `start of file` zero value.
+	 */
+	*pos = (1UL << 32) + log_sfile_sink->get;
+	if (!trusty_log_has_data(s, log_sfile_sink))
+		goto end_of_iter;
+
+	return log_sfile_sink;
+
+end_of_iter:
+	pr_debug("%s kfree\n", __func__);
+	kfree(log_sfile_sink);
+	return rc < 0 ? ERR_PTR(rc) : NULL;
+}
+
+static void trusty_log_seq_stop(struct seq_file *sfile, void *v)
+{
+	/*
+	 * When iteration completes or on error, the next callback frees
+	 * the sink structure and returns NULL/error-code.
+	 * In that case stop (being invoked with void* v set to the last next
+	 * return value) would be invoked with v == NULL or error code.
+	 * When user space stops the iteration earlier than the end
+	 * (in case of user-space memory allocation limit for example)
+	 * then the stop function receives a non NULL get pointer
+	 * and is in charge or freeing the sink structure.
+	 */
+	struct trusty_log_sink_state *log_sfile_sink = v;
+
+	/* nothing to do - sink structure already freed */
+	if (IS_ERR_OR_NULL(log_sfile_sink))
+		return;
+
+	kfree(log_sfile_sink);
+
+	pr_debug("%s kfree\n", __func__);
+}
+
+static int trusty_log_seq_show(struct seq_file *sfile, void *v)
+{
+	struct trusty_log_sfile *lb;
+	struct trusty_log_state *s;
+	struct trusty_log_sink_state *log_sfile_sink = v;
+
+	if (WARN_ON(!log_sfile_sink))
+		return -EINVAL;
+
+	lb = sfile->private;
+	if (WARN_ON(!lb))
+		return -EINVAL;
+
+	s = container_of(lb, struct trusty_log_state, log_sfile);
+
+	trusty_log_show(s, log_sfile_sink);
+	return 0;
+}
+
+static void trusty_dump_logs(struct trusty_log_state *s)
+{
+	u32 start;
+	int rc;
+	/*
+	 * note: klopg_sink.get and last_successful_next
+	 * initialized to zero by kzalloc
+	 */
+	s->klog_sink.trusty_panicked = trusty_get_panic_status(s->trusty_dev);
+
+	start = s->klog_sink.trusty_panicked ?
+			s->klog_sink.last_successful_next :
+			s->klog_sink.get;
+	rc = trusty_log_start(s, &s->klog_sink, start);
+	if (rc < 0)
+		return;
+
+	while (trusty_log_has_data(s, &s->klog_sink))
+		trusty_log_show(s, &s->klog_sink);
+}
+
+static int trusty_log_call_notify(struct notifier_block *nb,
+				  unsigned long action, void *data)
+{
+	struct trusty_log_state *s;
+	unsigned long flags;
+	u32 cur_put;
+
+	if (action != TRUSTY_CALL_RETURNED)
+		return NOTIFY_DONE;
+
+	s = container_of(nb, struct trusty_log_state, call_notifier);
+	spin_lock_irqsave(&s->wake_up_lock, flags);
+	cur_put = s->log->put;
+	if (cur_put != s->last_wake_put) {
+		s->last_wake_put = cur_put;
+		wake_up_all(&s->poll_waiters);
+	}
+	spin_unlock_irqrestore(&s->wake_up_lock, flags);
+	if (log_to_dmesg_param == ALWAYS ||
+	    (log_to_dmesg_param == UNTIL_FIRST_READER &&
+	     !s->have_first_reader)) {
+		spin_lock_irqsave(&s->lock, flags);
+		trusty_dump_logs(s);
+		spin_unlock_irqrestore(&s->lock, flags);
+	}
+	return NOTIFY_OK;
+}
+
+static int trusty_log_panic_notify(struct notifier_block *nb,
+				   unsigned long action, void *data)
+{
+	struct trusty_log_state *s;
+
+	/*
+	 * Don't grab the spin lock to hold up the panic notifier, even
+	 * though this is racy.
+	 */
+	s = container_of(nb, struct trusty_log_state, panic_notifier);
+
+	if (trusty_log_force_on_panic ||
+				trusty_get_panic_status(s->trusty_dev)) {
+		dev_info(s->dev, "panic notifier - trusty version %s",
+			 				trusty_version_str_get(s->trusty_dev));
+		trusty_dump_logs(s);
+	}
+
+	return NOTIFY_OK;
+}
+
+const struct seq_operations trusty_log_seq_ops = {
+	.start = trusty_log_seq_start,
+	.stop = trusty_log_seq_stop,
+	.next = trusty_log_seq_next,
+	.show = trusty_log_seq_show,
+};
+
+static int trusty_log_sfile_dev_open(struct inode *inode, struct file *file)
+{
+	struct trusty_log_sfile *ls;
+	struct trusty_log_state *s;
+	struct seq_file *sfile;
+	int rc;
+
+	/*
+	 * file->private_data contains a pointer to the misc_device struct
+	 * passed to misc_register()
+	 */
+	if (WARN_ON(!file->private_data))
+		return -EINVAL;
+
+	ls = container_of(file->private_data, struct trusty_log_sfile, misc);
+
+	/*
+	 * seq_open uses file->private_data to store the seq_file associated
+	 * with the struct file, but it must be NULL when seq_open is called
+	 */
+	file->private_data = NULL;
+	rc = seq_open(file, &trusty_log_seq_ops);
+	if (rc < 0)
+		return rc;
+
+	sfile = file->private_data;
+	if (WARN_ON(!sfile))
+		return -EINVAL;
+
+	sfile->private = ls;
+	s = container_of(ls, struct trusty_log_state, log_sfile);
+	s->have_first_reader = true;
+	kref_get(&s->refcount);
+	return 0;
+}
+
+static unsigned int trusty_log_sfile_dev_poll(struct file *filp,
+					      struct poll_table_struct *wait)
+{
+	struct seq_file *sfile;
+	struct trusty_log_sfile *lb;
+	struct trusty_log_state *s;
+	struct log_rb *log;
+
+	/*
+	 * trusty_log_sfile_dev_open() pointed filp->private_data to a
+	 * seq_file, and that seq_file->private to the trusty_log_sfile
+	 * field of a trusty_log_state
+	 */
+	sfile = filp->private_data;
+	lb = sfile->private;
+	s = container_of(lb, struct trusty_log_state, log_sfile);
+
+	if (!s->registered) {
+		dev_err(s->dev, "invalid poll fd\n");
+		return -EINVAL;
+	}
+
+	poll_wait(filp, &s->poll_waiters, wait);
+	log = s->log;
+
+	/*
+	 * Userspace has read up to sfile->index so far. Update klog_sink
+	 * to indicate that, so that we don't end up dumping the entire
+	 * Trusty log in case of panic. Only do this when not logging to
+	 * klog_sink, since logging to klog_sink already updates this.
+	 */
+	if (log_to_dmesg_param != ALWAYS)
+		s->klog_sink.last_successful_next = (u32)sfile->index;
+
+	if (log->put != (u32)sfile->index) {
+		/* data ready to read */
+		return EPOLLIN | EPOLLRDNORM;
+	}
+	/* no data available, go to sleep */
+	return 0;
+}
+
+static void trusty_log_cleanup(struct kref *ref);
+
+static int trusty_log_sfile_dev_release(struct inode *inode,
+					struct file *filp)
+{
+	struct seq_file *sfile;
+	struct trusty_log_sfile *lb;
+	struct trusty_log_state *s;
+
+	sfile = filp->private_data;
+	lb = sfile->private;
+	s = container_of(lb, struct trusty_log_state, log_sfile);
+
+	kref_put(&s->refcount, trusty_log_cleanup);
+
+	seq_release(inode, filp);
+	return 0;
+}
+
+static ssize_t trusty_log_sfile_dev_read(struct file *filp, char __user *buf,
+				  size_t size, loff_t *ppos)
+{
+	struct seq_file *sfile;
+	struct trusty_log_sfile *lb;
+	struct trusty_log_state *s;
+
+	sfile = filp->private_data;
+	lb = sfile->private;
+	s = container_of(lb, struct trusty_log_state, log_sfile);
+
+	if (!s->registered) {
+		dev_err(s->dev, "invalid read fd\n");
+		return -EINVAL;
+	}
+
+	return seq_read(filp, buf, size, ppos);
+}
+
+static const struct file_operations log_sfile_dev_operations = {
+	.owner = THIS_MODULE,
+	.open = trusty_log_sfile_dev_open,
+	.poll = trusty_log_sfile_dev_poll,
+	.read = trusty_log_sfile_dev_read,
+	.release = trusty_log_sfile_dev_release,
+};
+
+static int trusty_log_sfile_register(struct trusty_log_state *s)
+{
+	int ret;
+	struct trusty_log_sfile *ls = &s->log_sfile;
+
+	if (WARN_ON(!ls))
+		return -EINVAL;
+
+	snprintf(ls->device_name, sizeof(ls->device_name),
+		 "trusty-log%d", s->dev->id);
+	ls->misc.minor = MISC_DYNAMIC_MINOR;
+	ls->misc.name = ls->device_name;
+	ls->misc.fops = &log_sfile_dev_operations;
+
+	ret = misc_register(&ls->misc);
+	if (ret) {
+		dev_err(s->dev,
+			"log_sfile error while doing misc_register ret=%d\n",
+			ret);
+		return ret;
+	}
+	s->registered = true;
+	dev_info(s->dev, "/dev/%s registered\n",
+		 ls->device_name);
+	return 0;
+}
+
+static void trusty_log_sfile_unregister(struct trusty_log_state *s)
+{
+	struct trusty_log_sfile *ls = &s->log_sfile;
+
+	misc_deregister(&ls->misc);
+	if (s->dev) {
+		dev_info(s->dev, "/dev/%s unregistered\n",
+			 ls->misc.name);
+		s->registered = false;
+	}
+}
+
+static bool trusty_supports_logging(struct device *device)
+{
+	int result;
+
+	result = trusty_std_call32(device, SMC_SC_SHARED_LOG_VERSION,
+				   TRUSTY_LOG_API_VERSION, 0, 0);
+	if (result == SM_ERR_UNDEFINED_SMC) {
+		dev_info(device, "trusty-log not supported on secure side.\n");
+		return false;
+	} else if (result < 0) {
+		dev_err(device,
+			"trusty std call (SMC_SC_SHARED_LOG_VERSION) failed: %d\n",
+			result);
+		return false;
+	}
+
+	if (result != TRUSTY_LOG_API_VERSION) {
+		dev_info(device, "unsupported api version: %d, supported: %d\n",
+			 result, TRUSTY_LOG_API_VERSION);
+		return false;
+	}
+	return true;
+}
+
+static int trusty_log_init(struct platform_device *pdev)
+{
+	struct trusty_log_state *s;
+	struct scatterlist *sg;
+	unsigned char *mem;
+	int i;
+	int result;
+	trusty_shared_mem_id_t mem_id;
+	int log_size;
+
+	s = kzalloc(sizeof(*s), GFP_KERNEL);
+	if (!s) {
+		result = -ENOMEM;
+		goto error_alloc_state;
+	}
+
+	spin_lock_init(&s->lock);
+	s->dev = &pdev->dev;
+	s->trusty_dev = s->dev->parent;
+
+	s->log_num_pages = DIV_ROUND_UP(log_size_param + sizeof(struct log_rb),
+					PAGE_SIZE);
+	s->sg = kcalloc(s->log_num_pages, sizeof(*s->sg), GFP_KERNEL);
+	if (!s->sg) {
+		result = -ENOMEM;
+		goto error_alloc_sg;
+	}
+
+	log_size = s->log_num_pages * PAGE_SIZE;
+	mem = vzalloc(log_size);
+	if (!mem) {
+		result = -ENOMEM;
+		goto error_alloc_log;
+	}
+
+	s->log = (struct log_rb *)mem;
+
+	sg_init_table(s->sg, s->log_num_pages);
+	for_each_sg(s->sg, sg, s->log_num_pages, i) {
+		struct page *pg = vmalloc_to_page(mem + (i * PAGE_SIZE));
+
+		if (!pg) {
+			result = -ENOMEM;
+			goto err_share_memory;
+		}
+		sg_set_page(sg, pg, PAGE_SIZE, 0);
+	}
+	/*
+	 * This will fail for Trusty api version < TRUSTY_API_VERSION_MEM_OBJ
+	 * if s->log_num_pages > 1
+	 * Use trusty_share_memory_compat instead of trusty_share_memory in case
+	 * s->log_num_pages == 1 and api version < TRUSTY_API_VERSION_MEM_OBJ,
+	 * In that case SMC_SC_SHARED_LOG_ADD expects a different value than
+	 * what trusty_share_memory returns
+	 */
+	result = trusty_share_memory_compat(s->trusty_dev, &mem_id, s->sg,
+					    s->log_num_pages, PAGE_KERNEL);
+	if (result) {
+		dev_err(s->dev, "trusty_share_memory failed: %d\n", result);
+		goto err_share_memory;
+	}
+	s->log_pages_shared_mem_id = mem_id;
+
+	result = trusty_std_call32(s->trusty_dev,
+				   SMC_SC_SHARED_LOG_ADD,
+				   (u32)(mem_id), (u32)(mem_id >> 32),
+				   log_size);
+	if (result < 0) {
+		dev_err(s->dev,
+			"trusty std call (SMC_SC_SHARED_LOG_ADD) failed: %d 0x%llx\n",
+			result, mem_id);
+		goto error_std_call;
+	}
+
+	init_waitqueue_head(&s->poll_waiters);
+	spin_lock_init(&s->wake_up_lock);
+
+	s->call_notifier.notifier_call = trusty_log_call_notify;
+	result = trusty_call_notifier_register(s->trusty_dev,
+					       &s->call_notifier);
+	if (result < 0) {
+		dev_err(&pdev->dev,
+			"failed to register trusty call notifier\n");
+		goto error_call_notifier;
+	}
+
+	s->panic_notifier.notifier_call = trusty_log_panic_notify;
+	result = atomic_notifier_chain_register(&panic_notifier_list,
+						&s->panic_notifier);
+	if (result < 0) {
+		dev_err(&pdev->dev,
+			"failed to register panic notifier\n");
+		goto error_panic_notifier;
+	}
+
+	result = trusty_log_sfile_register(s);
+	if (result < 0) {
+		dev_err(&pdev->dev, "failed to register log_sfile\n");
+		goto error_log_sfile;
+	}
+
+	kref_init(&s->refcount);
+	platform_set_drvdata(pdev, s);
+
+	return 0;
+
+error_log_sfile:
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+					 &s->panic_notifier);
+error_panic_notifier:
+	trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier);
+error_call_notifier:
+	trusty_std_call32(s->trusty_dev, SMC_SC_SHARED_LOG_RM,
+			  (u32)mem_id, (u32)(mem_id >> 32), 0);
+error_std_call:
+	if (WARN_ON(trusty_reclaim_memory(s->trusty_dev, mem_id, s->sg,
+					  s->log_num_pages))) {
+		dev_err(&pdev->dev, "trusty_revoke_memory failed: %d 0x%llx\n",
+			result, mem_id);
+		/*
+		 * It is not safe to free this memory if trusty_revoke_memory
+		 * fails. Leak it in that case.
+		 */
+	} else {
+err_share_memory:
+		vfree(s->log);
+	}
+error_alloc_log:
+	kfree(s->sg);
+error_alloc_sg:
+	kfree(s);
+error_alloc_state:
+	return result;
+}
+
+static int trusty_log_probe(struct platform_device *pdev)
+{
+	int rc;
+
+	if (!trusty_supports_logging(pdev->dev.parent))
+		return -ENXIO;
+
+	rc = trusty_log_init(pdev);
+	if (rc && log_size_param > TRUSTY_LOG_MIN_SIZE) {
+		dev_warn(&pdev->dev, "init failed, retrying with 1-page log\n");
+		log_size_param = TRUSTY_LOG_MIN_SIZE;
+		rc = trusty_log_init(pdev);
+	}
+	return rc;
+}
+
+static int trusty_log_remove(struct platform_device *pdev)
+{
+	struct trusty_log_state *s = platform_get_drvdata(pdev);
+
+	trusty_log_sfile_unregister(s);
+	kref_put(&s->refcount, trusty_log_cleanup);
+	return 0;
+}
+
+static void trusty_log_cleanup(struct kref *ref)
+{
+	int result;
+	struct trusty_log_state *s;
+	trusty_shared_mem_id_t mem_id;
+
+	s = container_of(ref, struct trusty_log_state, refcount);
+	dev_info(s->dev, "log_cleanup\n");
+	mem_id = s->log_pages_shared_mem_id;
+
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+					 &s->panic_notifier);
+	trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier);
+
+	result = trusty_std_call32(s->trusty_dev, SMC_SC_SHARED_LOG_RM,
+				   (u32)mem_id, (u32)(mem_id >> 32), 0);
+	if (result) {
+		dev_err(s->dev,
+			"trusty std call (SMC_SC_SHARED_LOG_RM) failed: %d\n",
+			result);
+	}
+	result = trusty_reclaim_memory(s->trusty_dev, mem_id, s->sg,
+				       s->log_num_pages);
+	if (WARN_ON(result)) {
+		dev_err(s->dev,
+			"trusty failed to remove shared memory: %d\n", result);
+	} else {
+		/*
+		 * It is not safe to free this memory if trusty_revoke_memory
+		 * fails. Leak it in that case.
+		 */
+		vfree(s->log);
+	}
+	kfree(s->sg);
+	kfree(s);
+}
+
+static const struct of_device_id trusty_test_of_match[] = {
+	{ .compatible = "android,trusty-log-v1", },
+	{},
+};
+
+MODULE_DEVICE_TABLE(trusty, trusty_test_of_match);
+
+static struct platform_driver trusty_log_driver = {
+	.probe = trusty_log_probe,
+	.remove = trusty_log_remove,
+	.driver = {
+		.name = "trusty-log",
+		.of_match_table = trusty_test_of_match,
+	},
+};
+
+module_platform_driver(trusty_log_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Trusty logging driver");
diff --git a/drivers/trusty/trusty-log.h b/drivers/trusty/trusty-log.h
new file mode 100644
index 0000000..7b5e609
--- /dev/null
+++ b/drivers/trusty/trusty-log.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2015 Google, Inc.
+ *
+ * Trusty also has a copy of this header.  Please keep the copies in sync.
+ */
+#ifndef _TRUSTY_LOG_H_
+#define _TRUSTY_LOG_H_
+
+/*
+ * Ring buffer that supports one secure producer thread and one
+ * linux side consumer thread.
+ */
+struct log_rb {
+	volatile uint32_t alloc;
+	volatile uint32_t put;
+	uint32_t sz;
+	volatile char data[];
+} __packed;
+
+#define SMC_SC_SHARED_LOG_VERSION	SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 0)
+#define SMC_SC_SHARED_LOG_ADD		SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 1)
+#define SMC_SC_SHARED_LOG_RM		SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 2)
+
+#define TRUSTY_LOG_API_VERSION	1
+
+#endif
+
diff --git a/drivers/trusty/trusty-mem.c b/drivers/trusty/trusty-mem.c
new file mode 100644
index 0000000..8a36029
--- /dev/null
+++ b/drivers/trusty/trusty-mem.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Google, Inc.
+ */
+
+#include <linux/types.h>
+#include <linux/printk.h>
+#include <linux/trusty/arm_ffa.h>
+#include <linux/trusty/trusty.h>
+#include <linux/trusty/smcall.h>
+
+#define MEM_ATTR_STRONGLY_ORDERED (0x00U)
+#define MEM_ATTR_DEVICE (0x04U)
+#define MEM_ATTR_NORMAL_NON_CACHEABLE (0x44U)
+#define MEM_ATTR_NORMAL_WRITE_THROUGH (0xAAU)
+#define MEM_ATTR_NORMAL_WRITE_BACK_READ_ALLOCATE (0xEEU)
+#define MEM_ATTR_NORMAL_WRITE_BACK_WRITE_ALLOCATE (0xFFU)
+
+#define ATTR_RDONLY (1U << 7)
+#define ATTR_INNER_SHAREABLE (3U << 8)
+
+static int get_mem_attr(struct page *page, pgprot_t pgprot)
+{
+#if defined(CONFIG_ARM64)
+	u64 mair;
+	unsigned int attr_index = (pgprot_val(pgprot) & PTE_ATTRINDX_MASK) >> 2;
+
+	asm ("mrs %0, mair_el1\n" : "=&r" (mair));
+	return (mair >> (attr_index * 8)) & 0xff;
+
+#elif defined(CONFIG_ARM_LPAE)
+	u32 mair;
+	unsigned int attr_index = ((pgprot_val(pgprot) & L_PTE_MT_MASK) >> 2);
+
+	if (attr_index >= 4) {
+		attr_index -= 4;
+		asm volatile("mrc p15, 0, %0, c10, c2, 1\n" : "=&r" (mair));
+	} else {
+		asm volatile("mrc p15, 0, %0, c10, c2, 0\n" : "=&r" (mair));
+	}
+	return (mair >> (attr_index * 8)) & 0xff;
+
+#elif defined(CONFIG_ARM)
+	/* check memory type */
+	switch (pgprot_val(pgprot) & L_PTE_MT_MASK) {
+	case L_PTE_MT_WRITEALLOC:
+		return MEM_ATTR_NORMAL_WRITE_BACK_WRITE_ALLOCATE;
+
+	case L_PTE_MT_BUFFERABLE:
+		return MEM_ATTR_NORMAL_NON_CACHEABLE;
+
+	case L_PTE_MT_WRITEBACK:
+		return MEM_ATTR_NORMAL_WRITE_BACK_READ_ALLOCATE;
+
+	case L_PTE_MT_WRITETHROUGH:
+		return MEM_ATTR_NORMAL_WRITE_THROUGH;
+
+	case L_PTE_MT_UNCACHED:
+		return MEM_ATTR_STRONGLY_ORDERED;
+
+	case L_PTE_MT_DEV_SHARED:
+	case L_PTE_MT_DEV_NONSHARED:
+		return MEM_ATTR_DEVICE;
+
+	default:
+		return -EINVAL;
+	}
+#else
+	return 0;
+#endif
+}
+
+int trusty_encode_page_info(struct ns_mem_page_info *inf,
+			    struct page *page, pgprot_t pgprot)
+{
+	int mem_attr;
+	u64 pte;
+	u8 ffa_mem_attr;
+	u8 ffa_mem_perm = 0;
+
+	if (!inf || !page)
+		return -EINVAL;
+
+	/* get physical address */
+	pte = (u64)page_to_phys(page);
+
+	/* get memory attributes */
+	mem_attr = get_mem_attr(page, pgprot);
+	if (mem_attr < 0)
+		return mem_attr;
+
+	switch (mem_attr) {
+	case MEM_ATTR_STRONGLY_ORDERED:
+		ffa_mem_attr = FFA_MEM_ATTR_DEVICE_NGNRNE;
+		break;
+
+	case MEM_ATTR_DEVICE:
+		ffa_mem_attr = FFA_MEM_ATTR_DEVICE_NGNRE;
+		break;
+
+	case MEM_ATTR_NORMAL_NON_CACHEABLE:
+		ffa_mem_attr = FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED;
+		break;
+
+	case MEM_ATTR_NORMAL_WRITE_BACK_READ_ALLOCATE:
+	case MEM_ATTR_NORMAL_WRITE_BACK_WRITE_ALLOCATE:
+		ffa_mem_attr = FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	inf->paddr = pte;
+
+	/* add other attributes */
+#if defined(CONFIG_ARM64) || defined(CONFIG_ARM_LPAE)
+	pte |= pgprot_val(pgprot);
+#elif defined(CONFIG_ARM)
+	if (pgprot_val(pgprot) & L_PTE_RDONLY)
+		pte |= ATTR_RDONLY;
+	if (pgprot_val(pgprot) & L_PTE_SHARED)
+		pte |= ATTR_INNER_SHAREABLE; /* inner sharable */
+#endif
+
+	if (!(pte & ATTR_RDONLY))
+		ffa_mem_perm |= FFA_MEM_PERM_RW;
+	else
+		ffa_mem_perm |= FFA_MEM_PERM_RO;
+
+	if ((pte & ATTR_INNER_SHAREABLE) == ATTR_INNER_SHAREABLE)
+		ffa_mem_attr |= FFA_MEM_ATTR_INNER_SHAREABLE;
+
+	inf->ffa_mem_attr = ffa_mem_attr;
+	inf->ffa_mem_perm = ffa_mem_perm;
+	inf->compat_attr = (pte & 0x0000FFFFFFFFFFFFull) |
+			   ((u64)mem_attr << 48);
+	return 0;
+}
diff --git a/drivers/trusty/trusty-sched-share-api.h b/drivers/trusty/trusty-sched-share-api.h
new file mode 100644
index 0000000..58c76d7
--- /dev/null
+++ b/drivers/trusty/trusty-sched-share-api.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2022 Google, Inc.
+ *
+ * This header file contains the definitions of APIs, used for the
+ * registration/unregistration of shared-memory used for the
+ * exchange of info between the Linux Trusty-Driver and the Trusty-Kernel.
+ */
+#ifndef _TRUSTY_SCHED_SHARE_API_H_
+#define _TRUSTY_SCHED_SHARE_API_H_
+
+#include <linux/device.h>
+
+struct trusty_sched_share_state;
+
+int trusty_alloc_sched_share(struct device *device,
+		struct trusty_sched_share_state **state);
+void trusty_register_sched_share(struct device *device,
+		struct trusty_sched_share_state *sched_share_state);
+void trusty_unregister_sched_share(struct trusty_sched_share_state *sched_share_state);
+void trusty_free_sched_share(struct trusty_sched_share_state *sched_share_state);
+
+int trusty_get_requested_nice(unsigned int cpu_num, struct trusty_sched_share_state *tcpu_state);
+void trusty_set_actual_nice(unsigned int cpu_num, struct trusty_sched_share_state *tcpu_state,
+		int nice);
+
+#endif /* _TRUSTY_SCHED_SHARE_API_H_ */
diff --git a/drivers/trusty/trusty-sched-share.c b/drivers/trusty/trusty-sched-share.c
new file mode 100644
index 0000000..9aac0a0
--- /dev/null
+++ b/drivers/trusty/trusty-sched-share.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Google, Inc.
+ *
+ * This trusty-driver module contains the SMC API for the trusty-driver to
+ * communicate with the trusty-kernel for shared memory
+ * registration/unregistration.
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/trusty/trusty.h>
+#include "trusty-sched-share.h"
+#include "trusty-sched-share-api.h"
+
+/**
+ * struct trusty_sched_share_state - Trusty share resources state local to Trusty-Driver
+ * @dev: ptr to the trusty-device instance
+ * @sg: ptr to the scatter-gather list used for shared-memory buffers
+ * @sched_shared_mem_id: trusty-priority shared-memory id
+ * @sched_shared_vm: vm ptr to the shared-memory block
+ * @mem_size: size of trusty shared-memory block in bytes
+ * @buf_size: page-aligned size of trusty shared-memory buffer in bytes
+ * @num_pages: number of pages containing the allocated shared-memory buffer
+ * @is_registered: flag set to true when this driver has successfully registered
+ *                 the sched share memory with Trusty (and must be unregistered
+ *                 during cleanup). If Trusty image doesn't support sched
+ *                 share, this will remain false (and the memory can be reclaimed).
+ * @vm_is_shared:  flag set to true when this driver has shared the sched memory
+ *                 with Trusty via FF-A (and must be reclaimed during cleanup
+ *                 after it has been unmapped by Trusty due to an unregister)
+ */
+struct trusty_sched_share_state {
+	struct device *dev;
+	struct scatterlist *sg;
+	trusty_shared_mem_id_t sched_shared_mem_id;
+	char *sched_shared_vm;
+	u32 mem_size;
+	u32 buf_size;
+	u32 num_pages;
+	bool is_registered;
+	bool vm_is_shared;
+};
+
+static inline struct trusty_percpu_data *trusty_get_trusty_percpu_data(
+		struct trusty_sched_shared *tsh, int cpu_num)
+{
+	return (struct trusty_percpu_data *)((unsigned char *)tsh +
+			sizeof(struct trusty_sched_shared) +
+			(cpu_num * sizeof(struct trusty_percpu_data)));
+}
+
+static void trusty_sched_share_reclaim_memory(
+		struct trusty_sched_share_state *sched_share_state)
+{
+	int result;
+
+	if (!sched_share_state->vm_is_shared) {
+		dev_warn(sched_share_state->dev,
+				"%s called unexpectedly when vm not shared\n", __func__);
+		return;
+	}
+
+	result = trusty_reclaim_memory(sched_share_state->dev,
+				       sched_share_state->sched_shared_mem_id,
+				       sched_share_state->sg,
+				       sched_share_state->num_pages);
+	if (result != 0) {
+		dev_err(sched_share_state->dev,
+			"trusty_reclaim_memory() failed: ret=%d mem_id=0x%llx\n",
+			result, sched_share_state->sched_shared_mem_id);
+		/*
+		 * It is not safe to free this memory if trusty_reclaim_memory()
+		 * failed. Leak it in that case.
+		 */
+		dev_err(sched_share_state->dev,
+			"WARNING: leaking some allocated resources!!\n");
+	} else {
+		sched_share_state->vm_is_shared = false;
+	}
+}
+
+int trusty_alloc_sched_share(struct device *device,
+		struct trusty_sched_share_state **state)
+{
+	struct trusty_sched_share_state *sched_share_state = NULL;
+	struct trusty_sched_shared *shared;
+	uint sched_share_state_size;
+	unsigned int cpu;
+
+	sched_share_state_size = sizeof(*sched_share_state);
+
+	sched_share_state = kzalloc(sched_share_state_size, GFP_KERNEL);
+	if (!sched_share_state)
+		goto err_sched_state_alloc;
+	sched_share_state->dev = device;
+	sched_share_state->is_registered = false;
+	sched_share_state->vm_is_shared = false;
+
+	sched_share_state->mem_size = sizeof(struct trusty_sched_shared) +
+				nr_cpu_ids * sizeof(struct trusty_percpu_data);
+	sched_share_state->num_pages =
+		round_up(sched_share_state->mem_size, PAGE_SIZE) / PAGE_SIZE;
+	sched_share_state->buf_size = sched_share_state->num_pages * PAGE_SIZE;
+
+	dev_dbg(sched_share_state->dev,
+		"%s: mem_size=%d,  num_pages=%d,  buf_size=%d", __func__,
+		sched_share_state->mem_size, sched_share_state->num_pages,
+		sched_share_state->buf_size);
+
+	sched_share_state->sched_shared_vm = vzalloc(sched_share_state->buf_size);
+	if (!sched_share_state->sched_shared_vm)
+		goto err_resources_alloc;
+	dev_dbg(sched_share_state->dev, "%s: sched_shared_vm=%p  size=%d\n",
+		__func__, sched_share_state->sched_shared_vm, sched_share_state->buf_size);
+
+	shared = (struct trusty_sched_shared *)sched_share_state->sched_shared_vm;
+	shared->cpu_count = nr_cpu_ids;
+	shared->hdr_size = sizeof(struct trusty_sched_shared);
+	shared->percpu_data_size = sizeof(struct trusty_percpu_data);
+
+	for_each_possible_cpu(cpu) {
+		trusty_get_trusty_percpu_data(shared, cpu)->ask_shadow_priority
+				= TRUSTY_SHADOW_PRIORITY_NORMAL;
+	}
+
+	*state = sched_share_state;
+	return 0;
+
+err_resources_alloc:
+	kfree(sched_share_state);
+err_sched_state_alloc:
+	return -ENOMEM;
+}
+
+void trusty_register_sched_share(struct device *device,
+		struct trusty_sched_share_state *sched_share_state)
+{
+	int result = 0;
+	struct scatterlist *sg;
+	unsigned char *mem = sched_share_state->sched_shared_vm;
+	trusty_shared_mem_id_t mem_id;
+	int i;
+
+	/* allocate and initialize scatterlist */
+	sched_share_state->sg = kcalloc(sched_share_state->num_pages,
+				  sizeof(*sched_share_state->sg), GFP_KERNEL);
+	if (!sched_share_state->sg) {
+		result = -ENOMEM;
+		goto err_rsrc_alloc_sg;
+	}
+
+	sg_init_table(sched_share_state->sg, sched_share_state->num_pages);
+	for_each_sg(sched_share_state->sg, sg, sched_share_state->num_pages, i) {
+		struct page *pg = vmalloc_to_page(mem + (i * PAGE_SIZE));
+
+		if (!pg) {
+			result = -ENOMEM;
+			dev_err(sched_share_state->dev, "%s: failed to map page i= %d\n",
+					__func__, i);
+			goto err_rsrc_sg_lookup;
+		}
+		sg_set_page(sg, pg, PAGE_SIZE, 0);
+	}
+
+	/* share memory with Trusty */
+	result = trusty_share_memory(sched_share_state->dev, &mem_id, sched_share_state->sg,
+				     sched_share_state->num_pages, PAGE_KERNEL);
+	if (result != 0) {
+		dev_err(sched_share_state->dev, "trusty_share_memory failed: %d\n",
+			result);
+		goto err_rsrc_share_mem;
+	}
+	dev_dbg(sched_share_state->dev, "%s: sched_shared_mem_id=0x%llx", __func__,
+		mem_id);
+	sched_share_state->sched_shared_mem_id = mem_id;
+	sched_share_state->vm_is_shared = true;
+
+	dev_dbg(device, "%s: calling api SMC_SC_SCHED_SHARE_REGISTER...\n",
+		__func__);
+
+	/* tell sched share code on Trusty side to share priorities */
+	result = trusty_std_call32(
+		sched_share_state->dev, SMC_SC_SCHED_SHARE_REGISTER,
+		(u32)sched_share_state->sched_shared_mem_id,
+		(u32)(sched_share_state->sched_shared_mem_id >> 32),
+		sched_share_state->buf_size);
+	if (result == SM_ERR_UNDEFINED_SMC) {
+		dev_warn(
+			sched_share_state->dev,
+			"trusty-share not supported on secure side, error=%d\n",
+			result);
+		goto err_smc_std_call32;
+	} else if (result < 0) {
+		dev_err(device,
+			"trusty std call32 (SMC_SC_SCHED_SHARE_REGISTER) failed: %d\n",
+			result);
+		goto err_smc_std_call32;
+	}
+	dev_dbg(device, "%s: sched_share_state=%llx\n", __func__,
+		(u64)sched_share_state);
+
+	sched_share_state->is_registered = true;
+
+	return;
+
+err_smc_std_call32:
+	trusty_sched_share_reclaim_memory(sched_share_state);
+err_rsrc_share_mem:
+err_rsrc_sg_lookup:
+	kfree(sched_share_state->sg);
+	sched_share_state->sg = NULL;
+err_rsrc_alloc_sg:
+	return;
+
+}
+
+void trusty_unregister_sched_share(struct trusty_sched_share_state *sched_share_state)
+{
+	int result;
+
+	if (!sched_share_state->is_registered)
+		return;
+
+	/* ask Trusty to release the Trusty-side resources */
+	result = trusty_std_call32(
+		sched_share_state->dev, SMC_SC_SCHED_SHARE_UNREGISTER,
+		(u32)sched_share_state->sched_shared_mem_id,
+		(u32)(sched_share_state->sched_shared_mem_id >> 32), 0);
+	if (result) {
+		dev_err(sched_share_state->dev,
+			"call SMC_SC_SCHED_SHARE_UNREGISTER failed, error=%d\n",
+			result);
+	}
+
+
+	trusty_sched_share_reclaim_memory(sched_share_state);
+
+	kfree(sched_share_state->sg);
+}
+
+void trusty_free_sched_share(struct trusty_sched_share_state *sched_share_state)
+{
+	if (!sched_share_state->vm_is_shared)
+		vfree(sched_share_state->sched_shared_vm);
+
+	kfree(sched_share_state);
+}
+
+static inline int map_trusty_prio_to_linux_nice(int trusty_prio)
+{
+	int new_nice;
+
+	switch (trusty_prio) {
+	case TRUSTY_SHADOW_PRIORITY_HIGH:
+		new_nice = LINUX_NICE_FOR_TRUSTY_PRIORITY_HIGH;
+		break;
+	case TRUSTY_SHADOW_PRIORITY_LOW:
+		new_nice = LINUX_NICE_FOR_TRUSTY_PRIORITY_LOW;
+		break;
+	case TRUSTY_SHADOW_PRIORITY_NORMAL:
+	default:
+		new_nice = LINUX_NICE_FOR_TRUSTY_PRIORITY_NORMAL;
+		break;
+	}
+
+	return new_nice;
+}
+
+int trusty_get_requested_nice(unsigned int cpu_num, struct trusty_sched_share_state *tcpu_state)
+{
+	struct trusty_sched_shared *tsh = (struct trusty_sched_shared *)tcpu_state->sched_shared_vm;
+
+	return map_trusty_prio_to_linux_nice(
+			trusty_get_trusty_percpu_data(tsh, cpu_num)->ask_shadow_priority);
+}
+
+void trusty_set_actual_nice(unsigned int cpu_num,
+		struct trusty_sched_share_state *tcpu_state, int act_nice)
+{
+	struct trusty_sched_shared *tsh = (struct trusty_sched_shared *)tcpu_state->sched_shared_vm;
+	int new_prio;
+
+	if (act_nice >= map_trusty_prio_to_linux_nice(TRUSTY_SHADOW_PRIORITY_LOW))
+		new_prio = TRUSTY_SHADOW_PRIORITY_LOW;
+	else if (act_nice <= map_trusty_prio_to_linux_nice(TRUSTY_SHADOW_PRIORITY_HIGH))
+		new_prio = TRUSTY_SHADOW_PRIORITY_HIGH;
+	else
+		new_prio = TRUSTY_SHADOW_PRIORITY_NORMAL;
+
+	trusty_get_trusty_percpu_data(tsh, cpu_num)->cur_shadow_priority = new_prio;
+}
diff --git a/drivers/trusty/trusty-sched-share.h b/drivers/trusty/trusty-sched-share.h
new file mode 100644
index 0000000..7e59eec
--- /dev/null
+++ b/drivers/trusty/trusty-sched-share.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2022 Google, Inc.
+ *
+ * This header file defines the SMC API and the shared data info between
+ * Linux and Trusty.
+ *
+ * Important: Copy of this header file is used in Trusty.
+ * Trusty header file:
+ *   trusty/trusty/kernel/lib/trusty/include/lib/trusty/trusty_share.h
+ * Please keep the copies in sync.
+ */
+#ifndef _TRUSTY_SCHED_SHARE_H_
+#define _TRUSTY_SCHED_SHARE_H_
+
+#include <linux/trusty/smcall.h>
+
+/*
+ * trusty-shadow-priority valid values
+ */
+#define TRUSTY_SHADOW_PRIORITY_LOW 1
+#define TRUSTY_SHADOW_PRIORITY_NORMAL 2
+#define TRUSTY_SHADOW_PRIORITY_HIGH 3
+
+/**
+ * struct trusty_percpu_data - per-cpu trusty shared data
+ * @cur_shadow_priority: set by Trusty-Driver/Linux
+ * @ask_shadow_priority: set by Trusty Kernel
+ */
+struct trusty_percpu_data {
+	u32 cur_shadow_priority;
+	u32 ask_shadow_priority;
+};
+
+/**
+ * struct trusty_sched_shared - information in the shared memory.
+ * @hdr_size: size of the trusty_shared data-structure.
+ *            An instance of this data-structure is embedded at
+ *            the very beginning of the shared-memory block.
+ * @cpu_count: max number of available CPUs in the system.
+ * @percpu_data_size: size of the per-cpu data structure.
+ *                    The shared-memory block contains an array
+ *                    of per-cpu instances of a data-structure that
+ *                    can be indexed by cpu_id.
+ *
+ * NOTE: At the end of this data-structure, additional space is
+ * allocated to accommodate a variable length array as follows:
+ * 'struct trusty_percpu_data percpu_data_table[]',
+ * with 'cpu_count' as its number of elements.
+ */
+struct trusty_sched_shared {
+	u32 hdr_size;
+	u32 cpu_count;
+	u32 percpu_data_size;
+	/* Additional space is allocated here as noted above */
+};
+
+#endif /* _TRUSTY_SCHED_SHARE_H_ */
diff --git a/drivers/trusty/trusty-smc-arm.S b/drivers/trusty/trusty-smc-arm.S
new file mode 100644
index 0000000..8ff8354
--- /dev/null
+++ b/drivers/trusty/trusty-smc-arm.S
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Google, Inc.
+ */
+
+#include <linux/linkage.h>
+
+.arch_extension sec
+
+ENTRY(trusty_smc8)
+    /* Save stack location where r3-r7 smc arguments are stored */
+    mov     r12, sp
+
+    /* Save original r4-r7 values as caller expects these to be preserved */
+    push    {r4-r7}
+
+    /* Save return value pointer and return address */
+    push    {r0, lr}
+
+    /* arm abi shifts arguments when returning a struct, shift them back */
+    mov     r0, r1
+    mov     r1, r2
+    mov     r2, r3
+
+    /* Load stack based arguments */
+    ldmia   r12, {r3-r7}
+
+    smc     #0
+
+    /* Restore return address and get return value pointer */
+    pop     {r12, lr}
+
+    /* Copy 8-register smc return value to struct smc_ret8 return value */
+    stmia   r12, {r0-r7}
+
+    /* Restore original r4-r7 values */
+    pop     {r4-r7}
+
+    /* Return */
+    bx      lr
+ENDPROC(trusty_smc8)
diff --git a/drivers/trusty/trusty-smc-arm64.S b/drivers/trusty/trusty-smc-arm64.S
new file mode 100644
index 0000000..14c8fed
--- /dev/null
+++ b/drivers/trusty/trusty-smc-arm64.S
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Google, Inc.
+ */
+
+#include <linux/linkage.h>
+
+.macro push ra, rb
+stp \ra, \rb, [sp,#-16]!
+.endm
+
+.macro pop ra, rb
+ldp \ra, \rb, [sp], #16
+.endm
+
+lr .req x30
+
+SYM_FUNC_START(trusty_smc8)
+    /*
+     * Save x8 (return value ptr) and lr. The SMC calling convention says el3
+     * does not need to preserve x8. The normal ABI does not require either x8
+     * or lr to be preserved.
+     */
+    push    x8, lr
+    smc     #0
+    pop     x8, lr
+
+    /* Copy 8-register smc return value to struct smc_ret8 return value */
+    stp     x0, x1, [x8], #16
+    stp     x2, x3, [x8], #16
+    stp     x4, x5, [x8], #16
+    stp     x6, x7, [x8], #16
+
+    ret
+SYM_FUNC_END(trusty_smc8)
diff --git a/drivers/trusty/trusty-smc.h b/drivers/trusty/trusty-smc.h
new file mode 100644
index 0000000..b53e5ab
--- /dev/null
+++ b/drivers/trusty/trusty-smc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Google, Inc.
+ */
+#ifndef _TRUSTY_SMC_H
+#define _TRUSTY_SMC_H
+
+#include <linux/types.h>
+
+struct smc_ret8 {
+	unsigned long r0;
+	unsigned long r1;
+	unsigned long r2;
+	unsigned long r3;
+	unsigned long r4;
+	unsigned long r5;
+	unsigned long r6;
+	unsigned long r7;
+};
+
+struct smc_ret8 trusty_smc8(unsigned long r0, unsigned long r1,
+			    unsigned long r2, unsigned long r3,
+			    unsigned long r4, unsigned long r5,
+			    unsigned long r6, unsigned long r7);
+
+#endif /* _TRUSTY_SMC_H */
diff --git a/drivers/trusty/trusty-test.c b/drivers/trusty/trusty-test.c
new file mode 100644
index 0000000..8448689
--- /dev/null
+++ b/drivers/trusty/trusty-test.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Google, Inc.
+ */
+
+#include <linux/ctype.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/trusty/smcall.h>
+#include <linux/trusty/trusty.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+#include "trusty-test.h"
+
+struct trusty_test_state {
+	struct device *dev;
+	struct device *trusty_dev;
+};
+
+struct trusty_test_shmem_obj {
+	struct list_head node;
+	size_t page_count;
+	struct page **pages;
+	void *buf;
+	struct sg_table sgt;
+	trusty_shared_mem_id_t mem_id;
+};
+
+/*
+ * Allocate a test object with @page_count number of pages, map it and add it to
+ * @list.
+ * For multi-page allocations, order the pages so they are not contiguous.
+ */
+static int trusty_test_alloc_obj(struct trusty_test_state *s,
+				 size_t page_count,
+				 struct list_head *list)
+{
+	size_t i;
+	int ret = -ENOMEM;
+	struct trusty_test_shmem_obj *obj;
+
+	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+	if (!obj)
+		goto err_alloc_obj;
+	obj->page_count = page_count;
+
+	obj->pages = kmalloc_array(page_count, sizeof(*obj->pages), GFP_KERNEL);
+	if (!obj->pages) {
+		ret = -ENOMEM;
+		dev_err(s->dev, "failed to allocate page array, count %zd\n",
+			page_count);
+		goto err_alloc_pages;
+	}
+
+	for (i = 0; i < page_count; i++) {
+		obj->pages[i] = alloc_page(GFP_KERNEL);
+		if (!obj->pages[i]) {
+			ret = -ENOMEM;
+			dev_err(s->dev, "failed to allocate page %zd/%zd\n",
+				i, page_count);
+			goto err_alloc_page;
+		}
+		if (i > 0 && obj->pages[i - 1] + 1 == obj->pages[i]) {
+			/* swap adacent pages to increase fragmentation */
+			swap(obj->pages[i - 1], obj->pages[i]);
+		}
+	}
+
+	obj->buf = vmap(obj->pages, page_count, VM_MAP, PAGE_KERNEL);
+	if (!obj->buf) {
+		ret = -ENOMEM;
+		dev_err(s->dev, "failed to map test buffer page count %zd\n",
+			page_count);
+		goto err_map_pages;
+	}
+
+	ret = sg_alloc_table_from_pages(&obj->sgt, obj->pages, page_count,
+					0, page_count * PAGE_SIZE, GFP_KERNEL);
+	if (ret) {
+		dev_err(s->dev, "sg_alloc_table_from_pages failed: %d\n", ret);
+		goto err_alloc_sgt;
+	}
+	list_add_tail(&obj->node, list);
+	dev_dbg(s->dev, "buffer has %d page runs\n", obj->sgt.nents);
+	return 0;
+
+err_alloc_sgt:
+	vunmap(obj->buf);
+err_map_pages:
+	for (i = page_count; i > 0; i--) {
+		__free_page(obj->pages[i - 1]);
+err_alloc_page:
+		;
+	}
+	kfree(obj->pages);
+err_alloc_pages:
+	kfree(obj);
+err_alloc_obj:
+	return ret;
+}
+
+/* Unlink, unmap and free a test object and its pages */
+static void trusty_test_free_obj(struct trusty_test_state *s,
+				 struct trusty_test_shmem_obj *obj)
+{
+	size_t i;
+
+	list_del(&obj->node);
+	sg_free_table(&obj->sgt);
+	vunmap(obj->buf);
+	for (i = obj->page_count; i > 0; i--)
+		__free_page(obj->pages[i - 1]);
+	kfree(obj->pages);
+	kfree(obj);
+}
+
+/*
+ * Share all the pages of all the test object in &obj_list.
+ * If sharing a test object fails, free it so that every test object that
+ * remains in @obj_list has been shared when this function returns.
+ * Return a error if any test object failed to be shared.
+ */
+static int trusty_test_share_objs(struct trusty_test_state *s,
+				  struct list_head *obj_list, size_t size)
+{
+	int ret = 0;
+	int tmpret;
+	struct trusty_test_shmem_obj *obj;
+	struct trusty_test_shmem_obj *next_obj;
+	ktime_t t1;
+	ktime_t t2;
+
+	list_for_each_entry_safe(obj, next_obj, obj_list, node) {
+		t1 = ktime_get();
+		tmpret = trusty_share_memory(s->trusty_dev, &obj->mem_id,
+					     obj->sgt.sgl, obj->sgt.nents,
+					     PAGE_KERNEL);
+		t2 = ktime_get();
+		if (tmpret) {
+			ret = tmpret;
+			dev_err(s->dev,
+				"trusty_share_memory failed: %d, size=%zd\n",
+				ret, size);
+
+			/*
+			 * Free obj and continue, so we can revoke the
+			 * whole list in trusty_test_reclaim_objs.
+			 */
+			trusty_test_free_obj(s, obj);
+		}
+		dev_dbg(s->dev, "share id=0x%llx, size=%zu took %lld ns\n",
+			obj->mem_id, size,
+			ktime_to_ns(ktime_sub(t2, t1)));
+	}
+
+	return ret;
+}
+
+/* Reclaim memory shared with trusty for all test objects in @obj_list. */
+static int trusty_test_reclaim_objs(struct trusty_test_state *s,
+				    struct list_head *obj_list, size_t size)
+{
+	int ret = 0;
+	int tmpret;
+	struct trusty_test_shmem_obj *obj;
+	struct trusty_test_shmem_obj *next_obj;
+	ktime_t t1;
+	ktime_t t2;
+
+	list_for_each_entry_safe(obj, next_obj, obj_list, node) {
+		t1 = ktime_get();
+		tmpret = trusty_reclaim_memory(s->trusty_dev, obj->mem_id,
+					       obj->sgt.sgl, obj->sgt.nents);
+		t2 = ktime_get();
+		if (tmpret) {
+			ret = tmpret;
+			dev_err(s->dev,
+				"trusty_reclaim_memory failed: %d, id=0x%llx\n",
+				ret, obj->mem_id);
+
+			/*
+			 * It is not safe to free this memory if
+			 * trusty_reclaim_memory fails. Leak it in that
+			 * case.
+			 */
+			list_del(&obj->node);
+		}
+		dev_dbg(s->dev, "revoke id=0x%llx, size=%zu took %lld ns\n",
+			obj->mem_id, size,
+			ktime_to_ns(ktime_sub(t2, t1)));
+	}
+
+	return ret;
+}
+
+/*
+ * Test a test object. First, initialize the memory, then make a std call into
+ * trusty which will read it and return an error if the initialized value does
+ * not match what it expects. If trusty reads the correct values, it will modify
+ * the memory and return 0. This function then checks that it can read the
+ * correct modified value.
+ */
+static int trusty_test_rw(struct trusty_test_state *s,
+			  struct trusty_test_shmem_obj *obj)
+{
+	size_t size = obj->page_count * PAGE_SIZE;
+	int ret;
+	size_t i;
+	u64 *buf = obj->buf;
+	ktime_t t1;
+	ktime_t t2;
+
+	for (i = 0; i < size / sizeof(*buf); i++)
+		buf[i] = i;
+
+	t1 = ktime_get();
+	ret = trusty_std_call32(s->trusty_dev, SMC_SC_TEST_SHARED_MEM_RW,
+				(u32)(obj->mem_id), (u32)(obj->mem_id >> 32),
+				size);
+	t2 = ktime_get();
+	if (ret < 0) {
+		dev_err(s->dev,
+			"trusty std call (SMC_SC_TEST_SHARED_MEM_RW) failed: %d 0x%llx\n",
+			ret, obj->mem_id);
+		return ret;
+	}
+
+	for (i = 0; i < size / sizeof(*buf); i++) {
+		if (buf[i] != size - i) {
+			dev_err(s->dev,
+				"input mismatch at %zd, got 0x%llx instead of 0x%zx\n",
+				i, buf[i], size - i);
+			return -EIO;
+		}
+	}
+
+	dev_dbg(s->dev, "rw id=0x%llx, size=%zu took %lld ns\n", obj->mem_id,
+		size, ktime_to_ns(ktime_sub(t2, t1)));
+
+	return 0;
+}
+
+/*
+ * Run test on every test object in @obj_list. Repeat @repeat_access times.
+ */
+static int trusty_test_rw_objs(struct trusty_test_state *s,
+			       struct list_head *obj_list,
+			       size_t repeat_access)
+{
+	int ret;
+	size_t i;
+	struct trusty_test_shmem_obj *obj;
+
+	for (i = 0; i < repeat_access; i++) {
+		/*
+		 * Repeat test in case the memory attributes don't match
+		 * and either side see old data.
+		 */
+		list_for_each_entry(obj, obj_list, node) {
+			ret = trusty_test_rw(s, obj);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Allocate @obj_count test object that each have @page_count pages. Share each
+ * object @repeat_share times, each time running tests on every object
+ * @repeat_access times.
+ */
+static int trusty_test_run(struct trusty_test_state *s, size_t page_count,
+			   size_t obj_count, size_t repeat_share,
+			   size_t repeat_access)
+{
+	int ret = 0;
+	int tmpret;
+	size_t i;
+	size_t size = page_count * PAGE_SIZE;
+	LIST_HEAD(obj_list);
+	struct trusty_test_shmem_obj *obj;
+	struct trusty_test_shmem_obj *next_obj;
+
+	for (i = 0; i < obj_count && !ret; i++)
+		ret = trusty_test_alloc_obj(s, page_count, &obj_list);
+
+	for (i = 0; i < repeat_share && !ret; i++) {
+		ret = trusty_test_share_objs(s, &obj_list, size);
+		if (ret) {
+			dev_err(s->dev,
+				"trusty_share_memory failed: %d, i=%zd/%zd, size=%zd\n",
+				ret, i, repeat_share, size);
+		} else {
+			ret = trusty_test_rw_objs(s, &obj_list, repeat_access);
+			if (ret)
+				dev_err(s->dev,
+					"test failed: %d, i=%zd/%zd, size=%zd\n",
+					ret, i, repeat_share, size);
+		}
+		tmpret = trusty_test_reclaim_objs(s, &obj_list, size);
+		if (tmpret) {
+			ret = tmpret;
+			dev_err(s->dev,
+				"trusty_reclaim_memory failed: %d, i=%zd/%zd\n",
+				ret, i, repeat_share);
+		}
+	}
+
+	list_for_each_entry_safe(obj, next_obj, &obj_list, node)
+		trusty_test_free_obj(s, obj);
+
+	dev_info(s->dev, "[ %s ] size %zd, obj_count %zd, repeat_share %zd, repeat_access %zd\n",
+		 ret ? "FAILED" : "PASSED", size, obj_count, repeat_share,
+		 repeat_access);
+
+	return ret;
+}
+
+/*
+ * Get an optional numeric argument from @buf, update @buf and return the value.
+ * If @buf does not start with ",", return @default_val instead.
+ */
+static size_t trusty_test_get_arg(const char **buf, size_t default_val)
+{
+	char *buf_next;
+	size_t ret;
+
+	if (**buf != ',')
+		return default_val;
+
+	(*buf)++;
+	ret = simple_strtoul(*buf, &buf_next, 0);
+	if (buf_next == *buf)
+		return default_val;
+
+	*buf = buf_next;
+
+	return ret;
+}
+
+/*
+ * Run tests described by a string in this format:
+ * <obj_size>,<obj_count=1>,<repeat_share=1>,<repeat_access=3>
+ */
+static ssize_t trusty_test_run_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct trusty_test_state *s = platform_get_drvdata(pdev);
+	size_t size;
+	size_t obj_count;
+	size_t repeat_share;
+	size_t repeat_access;
+	int ret;
+	char *buf_next;
+
+	while (true) {
+		while (isspace(*buf))
+			buf++;
+		size = simple_strtoul(buf, &buf_next, 0);
+		if (buf_next == buf)
+			return count;
+		buf = buf_next;
+		obj_count = trusty_test_get_arg(&buf, 1);
+		repeat_share = trusty_test_get_arg(&buf, 1);
+		repeat_access = trusty_test_get_arg(&buf, 3);
+
+		ret = trusty_test_run(s, DIV_ROUND_UP(size, PAGE_SIZE),
+				      obj_count, repeat_share, repeat_access);
+		if (ret)
+			return ret;
+	}
+}
+
+static DEVICE_ATTR_WO(trusty_test_run);
+
+static struct attribute *trusty_test_attrs[] = {
+	&dev_attr_trusty_test_run.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(trusty_test);
+
+static int trusty_test_probe(struct platform_device *pdev)
+{
+	struct trusty_test_state *s;
+	int ret;
+
+	ret = trusty_std_call32(pdev->dev.parent, SMC_SC_TEST_VERSION,
+				TRUSTY_STDCALLTEST_API_VERSION, 0, 0);
+	if (ret != TRUSTY_STDCALLTEST_API_VERSION)
+		return -ENOENT;
+
+	s = kzalloc(sizeof(*s), GFP_KERNEL);
+	if (!s)
+		return -ENOMEM;
+
+	s->dev = &pdev->dev;
+	s->trusty_dev = s->dev->parent;
+
+	platform_set_drvdata(pdev, s);
+
+	return 0;
+}
+
+static int trusty_test_remove(struct platform_device *pdev)
+{
+	struct trusty_log_state *s = platform_get_drvdata(pdev);
+
+	kfree(s);
+	return 0;
+}
+
+static const struct of_device_id trusty_test_of_match[] = {
+	{ .compatible = "android,trusty-test-v1", },
+	{},
+};
+
+MODULE_DEVICE_TABLE(trusty, trusty_test_of_match);
+
+static struct platform_driver trusty_test_driver = {
+	.probe = trusty_test_probe,
+	.remove = trusty_test_remove,
+	.driver = {
+		.name = "trusty-test",
+		.of_match_table = trusty_test_of_match,
+		.dev_groups = trusty_test_groups,
+	},
+};
+
+module_platform_driver(trusty_test_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Trusty test driver");
diff --git a/drivers/trusty/trusty-test.h b/drivers/trusty/trusty-test.h
new file mode 100644
index 0000000..eea7beb
--- /dev/null
+++ b/drivers/trusty/trusty-test.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 Google, Inc.
+ */
+#ifndef _TRUSTY_TEST_H
+#define _TRUSTY_TEST_H
+
+#define SMC_SC_TEST_VERSION SMC_STDCALL_NR(SMC_ENTITY_TEST, 0)
+#define SMC_SC_TEST_SHARED_MEM_RW SMC_STDCALL_NR(SMC_ENTITY_TEST, 1)
+
+#define TRUSTY_STDCALLTEST_API_VERSION 1
+
+#endif /* _TRUSTY_TEST_H */
diff --git a/drivers/trusty/trusty-trace.h b/drivers/trusty/trusty-trace.h
new file mode 100644
index 0000000..e587ae1
--- /dev/null
+++ b/drivers/trusty/trusty-trace.h
@@ -0,0 +1,268 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Google, Inc.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM trusty
+
+#if !defined(_TRUSTY_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRUSTY_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <linux/trusty/smcall.h>
+
+/* One of the requirements of checkpatch.pl script is to have "parentheses
+ * to sheild macro arguments from the effects of operator precedence."
+ * Otherwise checkpatch.pl script shall throw below error
+ * "ERROR: Macros with complex values should be enclosed in parentheses"
+ *
+ * Hence extra parenthese are used to avoid the above checkpatch error.
+ * And the extra paentheses are removed using DELETE_PAREN to avoid
+ * compilation errors.
+ */
+#define _DELETE_PAREN(args...)	args
+#define DELETE_PAREN(arg)	_DELETE_PAREN(_DELETE_PAREN arg)
+
+/*
+ * SMC fast call and std call numbers from linux/trusty/smcall.h
+ */
+#define SMC_NAME_LIST (			\
+	smc_name(SC_RESTART_LAST)	\
+	smc_name(SC_LOCKED_NOP)		\
+	smc_name(SC_RESTART_FIQ)	\
+	smc_name(SC_NOP)		\
+	smc_name(FC_RESERVED)		\
+	smc_name(FC_FIQ_EXIT)		\
+	smc_name(FC_REQUEST_FIQ)	\
+	smc_name(FC_GET_NEXT_IRQ)	\
+	smc_name(FC_CPU_SUSPEND)	\
+	smc_name(FC_CPU_RESUME)		\
+	smc_name(FC_AARCH_SWITCH)	\
+	smc_name(FC_GET_VERSION_STR)	\
+	smc_name(FC_API_VERSION)	\
+	smc_name(SC_VIRTIO_GET_DESCR)	\
+	smc_name(SC_VIRTIO_START)	\
+	smc_name(SC_VIRTIO_STOP)	\
+	smc_name(SC_VDEV_RESET)		\
+	smc_name(SC_VDEV_KICK_VQ)	\
+	smc_name_end(NC_VDEV_KICK_VQ)	\
+	)
+
+#undef smc_name
+#undef smc_name_end
+
+#define smc_name_define_enum(x)	(TRACE_DEFINE_ENUM(SMC_##x);)
+#define smc_name(x)		DELETE_PAREN(smc_name_define_enum(x))
+#define smc_name_end(x)		DELETE_PAREN(smc_name_define_enum(x))
+
+DELETE_PAREN(SMC_NAME_LIST)
+
+#undef smc_name
+#undef smc_name_end
+
+#define smc_name(x)	{ SMC_##x, #x },
+#define smc_name_end(x)	{ SMC_##x, #x }
+
+#define smc_show_name(x)	\
+	__print_symbolic(x, DELETE_PAREN(SMC_NAME_LIST))
+
+DECLARE_EVENT_CLASS(trusty_smc4_class,
+	TP_PROTO(unsigned long r0, unsigned long r1, unsigned long r2,
+		 unsigned long r3),
+	TP_ARGS(r0, r1, r2, r3),
+	TP_STRUCT__entry(
+		__field(unsigned long, r0)
+		__field(unsigned long, r1)
+		__field(unsigned long, r2)
+		__field(unsigned long, r3)
+	),
+	TP_fast_assign(
+		__entry->r0 = r0;
+		__entry->r1 = r1;
+		__entry->r2 = r2;
+		__entry->r3 = r3;
+	),
+	TP_printk("smcnr=%s r0=0x%lx r1=0x%lx r2=0x%lx r3=0x%lx", smc_show_name(__entry->r0),
+		__entry->r0, __entry->r1, __entry->r2, __entry->r3)
+);
+
+#define DEFINE_TRUSTY_SMC4_EVENT(name)	\
+DEFINE_EVENT(trusty_smc4_class, name,	\
+	TP_PROTO(unsigned long r0, unsigned long r1, unsigned long r2, \
+		 unsigned long r3), \
+	TP_ARGS(r0, r1, r2, r3))
+
+DEFINE_TRUSTY_SMC4_EVENT(trusty_std_call32);
+DEFINE_TRUSTY_SMC4_EVENT(trusty_smc);
+
+DECLARE_EVENT_CLASS(trusty_smc_return_class,
+	TP_PROTO(unsigned long ret),
+	TP_ARGS(ret),
+	TP_STRUCT__entry(
+		__field(unsigned long, ret)
+	),
+	TP_fast_assign(
+		__entry->ret = ret;
+	),
+	TP_printk("ret:ulong=%lu (0x%lx) (ret:s32=%d)", __entry->ret,
+		__entry->ret, (s32)__entry->ret)
+);
+
+#define DEFINE_TRUSTY_SMC_RETURN_EVENT(name)	\
+DEFINE_EVENT(trusty_smc_return_class, name,	\
+	TP_PROTO(unsigned long ret), \
+	TP_ARGS(ret))
+
+DEFINE_TRUSTY_SMC_RETURN_EVENT(trusty_std_call32_done);
+DEFINE_TRUSTY_SMC_RETURN_EVENT(trusty_smc_done);
+
+TRACE_EVENT(trusty_share_memory,
+	TP_PROTO(size_t len, unsigned int nents, bool lend),
+	TP_ARGS(len, nents, lend),
+	TP_STRUCT__entry(
+		__field(size_t, len)
+		__field(unsigned int, nents)
+		__field(bool, lend)
+	),
+	TP_fast_assign(
+		__entry->len = len;
+		__entry->nents = nents;
+		__entry->lend = lend;
+	),
+	TP_printk("len=%zu, nents=%u, lend=%u", __entry->len, __entry->nents, __entry->lend)
+);
+
+TRACE_EVENT(trusty_share_memory_done,
+	TP_PROTO(size_t len, unsigned int nents, bool lend, u64 handle, int ret),
+	TP_ARGS(len, nents, lend, handle, ret),
+	TP_STRUCT__entry(
+		__field(size_t, len)
+		__field(unsigned int, nents)
+		__field(bool, lend)
+		__field(u64, handle)
+		__field(int, ret)
+	),
+	TP_fast_assign(
+		__entry->len = len;
+		__entry->nents = nents;
+		__entry->lend = lend;
+		__entry->handle = handle;
+		__entry->ret = ret;
+	),
+	TP_printk("len=%zu, nents=%u, lend=%u, ffa_handle=0x%llx, ret=%d", __entry->len,
+		__entry->nents, __entry->lend, __entry->handle, __entry->ret)
+);
+
+TRACE_EVENT(trusty_enqueue_nop,
+	TP_PROTO(struct trusty_nop *nop),
+	TP_ARGS(nop),
+	TP_STRUCT__entry(
+		__field(u32, arg1)
+		__field(u32, arg2)
+		__field(u32, arg3)
+	),
+	TP_fast_assign(
+		__entry->arg1 = nop ? nop->args[0] : 0U;
+		__entry->arg2 = nop ? nop->args[1] : 0U;
+		__entry->arg3 = nop ? nop->args[2] : 0U;
+	),
+	TP_printk("arg1=0x%x, arg2=0x%x, arg3=0x%x", __entry->arg1, __entry->arg2, __entry->arg3)
+);
+
+TRACE_EVENT(trusty_dequeue_nop,
+	TP_PROTO(bool signaled, bool nop_dequeued, bool queue_emptied),
+	TP_ARGS(signaled, nop_dequeued, queue_emptied),
+	TP_STRUCT__entry(
+		__field(bool, signaled)
+		__field(bool, nop_dequeued)
+		__field(bool, queue_emptied)
+	),
+	TP_fast_assign(
+		__entry->signaled = signaled;
+		__entry->nop_dequeued = nop_dequeued;
+		__entry->queue_emptied = queue_emptied;
+	),
+	TP_printk("%c%c%c",
+		__entry->signaled ? 'S' : ' ',
+		__entry->nop_dequeued ? 'D' : ' ',
+		__entry->queue_emptied ? 'E' : ' '
+	)
+);
+
+#define CPUNICE_CAUSE_LIST (			\
+	cpu_nice(CAUSE_DEFAULT)	\
+	cpu_nice(CAUSE_USE_HIGH_WQ)		\
+	cpu_nice(CAUSE_TRUSTY_REQ)	\
+	cpu_nice(CAUSE_NOP_ESCALATE)	\
+	cpu_nice_end(CAUSE_ENQUEUE_BOOST)	\
+	)
+
+#undef cpu_nice
+#undef cpu_nice_end
+
+#define cpu_nice_define_enum(x)	(TRACE_DEFINE_ENUM(CPUNICE_##x);)
+#define cpu_nice(x)		DELETE_PAREN(cpu_nice_define_enum(x))
+#define cpu_nice_end(x)		DELETE_PAREN(cpu_nice_define_enum(x))
+
+DELETE_PAREN(CPUNICE_CAUSE_LIST)
+
+#undef cpu_nice
+#undef cpu_nice_end
+
+#define cpu_nice(x)	{ CPUNICE_##x, #x },
+#define cpu_nice_end(x)	{ CPUNICE_##x, #x }
+
+#define cpunice_show_cause(x)	\
+	__print_symbolic(x, DELETE_PAREN(CPUNICE_CAUSE_LIST))
+
+TRACE_EVENT(trusty_change_cpu_nice,
+	TP_PROTO(s32 cur_nice, s32 req_nice, u32 cause_id),
+	TP_ARGS(cur_nice, req_nice, cause_id),
+	TP_STRUCT__entry(
+		__field(s32, cur_nice)
+		__field(s32, req_nice)
+		__field(u32, cause_id)
+	),
+	TP_fast_assign(
+		__entry->cur_nice = cur_nice;
+		__entry->req_nice = req_nice;
+		__entry->cause_id = cause_id;
+	),
+	TP_printk("%d->%d (%s)", __entry->cur_nice, __entry->req_nice,
+			cpunice_show_cause(__entry->cause_id))
+);
+
+TRACE_EVENT(trusty_reclaim_memory,
+	TP_PROTO(u64 id),
+	TP_ARGS(id),
+	TP_STRUCT__entry(
+		__field(u64, id)
+	),
+	TP_fast_assign(
+		__entry->id = id;
+	),
+	TP_printk("id=%llu", __entry->id)
+);
+
+TRACE_EVENT(trusty_reclaim_memory_done,
+	TP_PROTO(u64 id, int ret),
+	TP_ARGS(id, ret),
+	TP_STRUCT__entry(
+		__field(u64, id)
+		__field(int, ret)
+	),
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->ret = ret;
+	),
+	TP_printk("id=%llu ret=%d (0x%x)", __entry->id, __entry->ret, __entry->ret)
+);
+
+#endif /* _TRUSTY_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trusty-trace
+#include <trace/define_trace.h>
diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c
new file mode 100644
index 0000000..2df10d1
--- /dev/null
+++ b/drivers/trusty/trusty-virtio.c
@@ -0,0 +1,843 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Trusty Virtio driver
+ *
+ * Copyright (C) 2015 Google, Inc.
+ */
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+
+#include <linux/dma-map-ops.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/workqueue.h>
+#include <linux/remoteproc.h>
+#include <linux/slab.h>
+
+#include <linux/platform_device.h>
+#include <linux/trusty/smcall.h>
+#include <linux/trusty/trusty.h>
+#include <linux/trusty/trusty_ipc.h>
+
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_ring.h>
+
+#include <linux/atomic.h>
+
+#define  RSC_DESCR_VER  1
+
+struct trusty_vdev;
+static bool use_high_wq;
+module_param(use_high_wq, bool, 0660);
+
+struct trusty_ctx {
+	struct device		*dev;
+	void			*shared_va;
+	struct scatterlist	shared_sg;
+	trusty_shared_mem_id_t	shared_id;
+	size_t			shared_sz;
+	struct work_struct	check_vqs;
+	struct work_struct	kick_vqs;
+	struct notifier_block	call_notifier;
+	struct list_head	vdev_list;
+	struct mutex		mlock; /* protects vdev_list */
+	struct workqueue_struct	*kick_wq;
+	struct workqueue_struct	*check_wq;
+};
+
+struct trusty_vring {
+	void			*vaddr;
+	struct scatterlist	sg;
+	trusty_shared_mem_id_t	shared_mem_id;
+	size_t			size;
+	unsigned int		align;
+	unsigned int		elem_num;
+	u32			notifyid;
+	atomic_t		needs_kick;
+	struct fw_rsc_vdev_vring *vr_descr;
+	struct virtqueue	*vq;
+	struct trusty_vdev	*tvdev;
+	struct trusty_nop	kick_nop;
+};
+
+struct trusty_vdev {
+	struct list_head	node;
+	struct virtio_device	vdev;
+	struct trusty_ctx	*tctx;
+	u32			notifyid;
+	unsigned int		config_len;
+	void			*config;
+	struct fw_rsc_vdev	*vdev_descr;
+	unsigned int		vring_num;
+	struct trusty_vring	vrings[];
+};
+
+#define vdev_to_tvdev(vd)  container_of((vd), struct trusty_vdev, vdev)
+
+static void check_all_vqs(struct work_struct *work)
+{
+	unsigned int i;
+	struct trusty_ctx *tctx = container_of(work, struct trusty_ctx,
+					       check_vqs);
+	struct trusty_vdev *tvdev;
+
+	list_for_each_entry(tvdev, &tctx->vdev_list, node) {
+		for (i = 0; i < tvdev->vring_num; i++)
+			if (tvdev->vrings[i].vq)
+				vring_interrupt(0, tvdev->vrings[i].vq);
+	}
+}
+
+static int trusty_call_notify(struct notifier_block *nb,
+			      unsigned long action, void *data)
+{
+	struct trusty_ctx *tctx;
+
+	if (action != TRUSTY_CALL_RETURNED)
+		return NOTIFY_DONE;
+
+	tctx = container_of(nb, struct trusty_ctx, call_notifier);
+	queue_work(tctx->check_wq, &tctx->check_vqs);
+
+	return NOTIFY_OK;
+}
+
+static void kick_vq(struct trusty_ctx *tctx,
+		    struct trusty_vdev *tvdev,
+		    struct trusty_vring *tvr)
+{
+	int ret;
+
+	dev_dbg(tctx->dev, "%s: vdev_id=%d: vq_id=%d\n",
+		__func__, tvdev->notifyid, tvr->notifyid);
+
+	ret = trusty_std_call32(tctx->dev->parent, SMC_SC_VDEV_KICK_VQ,
+				tvdev->notifyid, tvr->notifyid, 0);
+	if (ret) {
+		dev_err(tctx->dev, "vq notify (%d, %d) returned %d\n",
+			tvdev->notifyid, tvr->notifyid, ret);
+	}
+}
+
+static void kick_vqs(struct work_struct *work)
+{
+	unsigned int i;
+	struct trusty_vdev *tvdev;
+	struct trusty_ctx *tctx = container_of(work, struct trusty_ctx,
+					       kick_vqs);
+	mutex_lock(&tctx->mlock);
+	list_for_each_entry(tvdev, &tctx->vdev_list, node) {
+		for (i = 0; i < tvdev->vring_num; i++) {
+			struct trusty_vring *tvr = &tvdev->vrings[i];
+
+			if (atomic_xchg(&tvr->needs_kick, 0))
+				kick_vq(tctx, tvdev, tvr);
+		}
+	}
+	mutex_unlock(&tctx->mlock);
+}
+
+static bool trusty_virtio_notify(struct virtqueue *vq)
+{
+	struct trusty_vring *tvr = vq->priv;
+	struct trusty_vdev *tvdev = tvr->tvdev;
+	struct trusty_ctx *tctx = tvdev->tctx;
+	u32 api_ver = trusty_get_api_version(tctx->dev->parent);
+
+	if (api_ver < TRUSTY_API_VERSION_SMP_NOP) {
+		atomic_set(&tvr->needs_kick, 1);
+		queue_work(tctx->kick_wq, &tctx->kick_vqs);
+	} else {
+		trusty_enqueue_nop(tctx->dev->parent, &tvr->kick_nop);
+	}
+
+	return true;
+}
+
+static int trusty_load_device_descr(struct trusty_ctx *tctx,
+				    trusty_shared_mem_id_t id, size_t sz)
+{
+	int ret;
+
+	dev_dbg(tctx->dev, "%s: %zu bytes @ id %llu\n", __func__, sz, id);
+
+	ret = trusty_std_call32(tctx->dev->parent, SMC_SC_VIRTIO_GET_DESCR,
+				(u32)id, id >> 32, sz);
+	if (ret < 0) {
+		dev_err(tctx->dev, "%s: virtio get descr returned (%d)\n",
+			__func__, ret);
+		return -ENODEV;
+	}
+	return ret;
+}
+
+static void trusty_virtio_stop(struct trusty_ctx *tctx,
+			       trusty_shared_mem_id_t id, size_t sz)
+{
+	int ret;
+
+	dev_dbg(tctx->dev, "%s: %zu bytes @ id %llu\n", __func__, sz, id);
+
+	ret = trusty_std_call32(tctx->dev->parent, SMC_SC_VIRTIO_STOP,
+				(u32)id, id >> 32, sz);
+	if (ret) {
+		dev_err(tctx->dev, "%s: virtio done returned (%d)\n",
+			__func__, ret);
+		return;
+	}
+}
+
+static int trusty_virtio_start(struct trusty_ctx *tctx,
+			       trusty_shared_mem_id_t id, size_t sz)
+{
+	int ret;
+
+	dev_dbg(tctx->dev, "%s: %zu bytes @ id %llu\n", __func__, sz, id);
+
+	ret = trusty_std_call32(tctx->dev->parent, SMC_SC_VIRTIO_START,
+				(u32)id, id >> 32, sz);
+	if (ret) {
+		dev_err(tctx->dev, "%s: virtio start returned (%d)\n",
+			__func__, ret);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static void trusty_virtio_reset(struct virtio_device *vdev)
+{
+	struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
+	struct trusty_ctx *tctx = tvdev->tctx;
+
+	dev_dbg(&vdev->dev, "reset vdev_id=%d\n", tvdev->notifyid);
+	trusty_std_call32(tctx->dev->parent, SMC_SC_VDEV_RESET,
+			  tvdev->notifyid, 0, 0);
+}
+
+static u64 trusty_virtio_get_features(struct virtio_device *vdev)
+{
+	struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
+
+	return tvdev->vdev_descr->dfeatures |
+		(1ULL << VIRTIO_F_ACCESS_PLATFORM);
+}
+
+static int trusty_virtio_finalize_features(struct virtio_device *vdev)
+{
+	struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
+	u64 features = vdev->features;
+
+	/*
+	 * We set VIRTIO_F_ACCESS_PLATFORM to enable the dma mapping hooks.
+	 * The other side does not need to know.
+	 */
+	features &= ~(1ULL << VIRTIO_F_ACCESS_PLATFORM);
+
+	/* Make sure we don't have any features > 32 bits! */
+	if (WARN_ON((u32)vdev->features != features))
+		return -EINVAL;
+
+	tvdev->vdev_descr->gfeatures = vdev->features;
+	return 0;
+}
+
+static void trusty_virtio_get_config(struct virtio_device *vdev,
+				     unsigned int offset, void *buf,
+				     unsigned int len)
+{
+	struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
+
+	dev_dbg(&vdev->dev, "%s: %d bytes @ offset %d\n",
+		__func__, len, offset);
+
+	if (tvdev->config) {
+		if (offset + len <= tvdev->config_len)
+			memcpy(buf, tvdev->config + offset, len);
+	}
+}
+
+static void trusty_virtio_set_config(struct virtio_device *vdev,
+				     unsigned int offset, const void *buf,
+				     unsigned int len)
+{
+}
+
+static u8 trusty_virtio_get_status(struct virtio_device *vdev)
+{
+	struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
+
+	return tvdev->vdev_descr->status;
+}
+
+static void trusty_virtio_set_status(struct virtio_device *vdev, u8 status)
+{
+	struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
+
+	tvdev->vdev_descr->status = status;
+}
+
+static void _del_vqs(struct virtio_device *vdev)
+{
+	unsigned int i;
+	int ret;
+	struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
+	struct trusty_vring *tvr = &tvdev->vrings[0];
+
+	for (i = 0; i < tvdev->vring_num; i++, tvr++) {
+		/* dequeue kick_nop */
+		trusty_dequeue_nop(tvdev->tctx->dev->parent, &tvr->kick_nop);
+
+		/* delete vq */
+		if (tvr->vq) {
+			vring_del_virtqueue(tvr->vq);
+			tvr->vq = NULL;
+		}
+		/* delete vring */
+		if (tvr->vaddr) {
+			ret = trusty_reclaim_memory(tvdev->tctx->dev->parent,
+						    tvr->shared_mem_id,
+						    &tvr->sg, 1);
+			if (WARN_ON(ret)) {
+				dev_err(&vdev->dev,
+					"trusty_revoke_memory failed: %d 0x%llx\n",
+					ret, tvr->shared_mem_id);
+				/*
+				 * It is not safe to free this memory if
+				 * trusty_revoke_memory fails. Leak it in that
+				 * case.
+				 */
+			} else {
+				free_pages_exact(tvr->vaddr, tvr->size);
+			}
+			tvr->vaddr = NULL;
+		}
+	}
+}
+
+static void trusty_virtio_del_vqs(struct virtio_device *vdev)
+{
+	_del_vqs(vdev);
+}
+
+
+static struct virtqueue *_find_vq(struct virtio_device *vdev,
+				  unsigned int id,
+				  void (*callback)(struct virtqueue *vq),
+				  const char *name,
+				  bool ctx)
+{
+	struct trusty_vring *tvr;
+	struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
+	phys_addr_t pa;
+	int ret;
+
+	if (!name)
+		return ERR_PTR(-EINVAL);
+
+	if (id >= tvdev->vring_num)
+		return ERR_PTR(-EINVAL);
+
+	tvr = &tvdev->vrings[id];
+
+	/* actual size of vring (in bytes) */
+	tvr->size = PAGE_ALIGN(vring_size(tvr->elem_num, tvr->align));
+
+	/* allocate memory for the vring. */
+	tvr->vaddr = alloc_pages_exact(tvr->size, GFP_KERNEL | __GFP_ZERO);
+	if (!tvr->vaddr) {
+		dev_err(&vdev->dev, "vring alloc failed\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	sg_init_one(&tvr->sg, tvr->vaddr, tvr->size);
+	ret = trusty_share_memory_compat(tvdev->tctx->dev->parent,
+					 &tvr->shared_mem_id, &tvr->sg, 1,
+					 PAGE_KERNEL);
+	if (ret) {
+		pa = virt_to_phys(tvr->vaddr);
+		dev_err(&vdev->dev, "trusty_share_memory failed: %d %pa\n",
+			ret, &pa);
+		goto err_share_memory;
+	}
+
+	/* save vring address to shared structure */
+	tvr->vr_descr->da = (u32)tvr->shared_mem_id;
+
+	/* da field is only 32 bit wide. Use previously unused 'reserved' field
+	 * to store top 32 bits of 64-bit shared_mem_id
+	 */
+	tvr->vr_descr->pa = (u32)(tvr->shared_mem_id >> 32);
+
+	dev_info(&vdev->dev, "vring%d: va(id)  %p(%llx) qsz %d notifyid %d\n",
+		 id, tvr->vaddr, (u64)tvr->shared_mem_id, tvr->elem_num,
+		 tvr->notifyid);
+
+	tvr->vq = vring_new_virtqueue(id, tvr->elem_num, tvr->align,
+				      vdev, true, ctx, tvr->vaddr,
+				      trusty_virtio_notify, callback, name);
+	if (!tvr->vq) {
+		dev_err(&vdev->dev, "vring_new_virtqueue %s failed\n",
+			name);
+		goto err_new_virtqueue;
+	}
+
+	tvr->vq->priv = tvr;
+
+	return tvr->vq;
+
+err_new_virtqueue:
+	ret = trusty_reclaim_memory(tvdev->tctx->dev->parent,
+				    tvr->shared_mem_id, &tvr->sg, 1);
+	if (WARN_ON(ret)) {
+		dev_err(&vdev->dev, "trusty_revoke_memory failed: %d 0x%llx\n",
+			ret, tvr->shared_mem_id);
+		/*
+		 * It is not safe to free this memory if trusty_revoke_memory
+		 * fails. Leak it in that case.
+		 */
+	} else {
+err_share_memory:
+		free_pages_exact(tvr->vaddr, tvr->size);
+	}
+	tvr->vaddr = NULL;
+	return ERR_PTR(-ENOMEM);
+}
+
+static int trusty_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
+				  struct virtqueue *vqs[],
+				  vq_callback_t *callbacks[],
+				  const char * const names[],
+				  const bool *ctxs,
+				  struct irq_affinity *desc)
+{
+	unsigned int i;
+	int ret;
+	bool ctx = false;
+
+	for (i = 0; i < nvqs; i++) {
+		ctx = false;
+		if (ctxs)
+			ctx = ctxs[i];
+		vqs[i] = _find_vq(vdev, i, callbacks[i], names[i], ctx);
+		if (IS_ERR(vqs[i])) {
+			ret = PTR_ERR(vqs[i]);
+			_del_vqs(vdev);
+			return ret;
+		}
+	}
+	return 0;
+}
+
+static const char *trusty_virtio_bus_name(struct virtio_device *vdev)
+{
+	return "trusty-virtio";
+}
+
+/* The ops structure which hooks everything together. */
+static const struct virtio_config_ops trusty_virtio_config_ops = {
+	.get_features = trusty_virtio_get_features,
+	.finalize_features = trusty_virtio_finalize_features,
+	.get = trusty_virtio_get_config,
+	.set = trusty_virtio_set_config,
+	.get_status = trusty_virtio_get_status,
+	.set_status = trusty_virtio_set_status,
+	.reset    = trusty_virtio_reset,
+	.find_vqs = trusty_virtio_find_vqs,
+	.del_vqs  = trusty_virtio_del_vqs,
+	.bus_name = trusty_virtio_bus_name,
+};
+
+static int trusty_virtio_add_device(struct trusty_ctx *tctx,
+				    struct fw_rsc_vdev *vdev_descr,
+				    struct fw_rsc_vdev_vring *vr_descr,
+				    void *config)
+{
+	int i, ret;
+	struct trusty_vdev *tvdev;
+
+	tvdev = kzalloc(struct_size(tvdev, vrings, vdev_descr->num_of_vrings),
+			GFP_KERNEL);
+	if (!tvdev)
+		return -ENOMEM;
+
+	/* setup vdev */
+	tvdev->tctx = tctx;
+	tvdev->vdev.dev.parent = tctx->dev;
+	tvdev->vdev.id.device  = vdev_descr->id;
+	tvdev->vdev.config = &trusty_virtio_config_ops;
+	tvdev->vdev_descr = vdev_descr;
+	tvdev->notifyid = vdev_descr->notifyid;
+
+	/* setup config */
+	tvdev->config = config;
+	tvdev->config_len = vdev_descr->config_len;
+
+	/* setup vrings and vdev resource */
+	tvdev->vring_num = vdev_descr->num_of_vrings;
+
+	for (i = 0; i < tvdev->vring_num; i++, vr_descr++) {
+		struct trusty_vring *tvr = &tvdev->vrings[i];
+
+		tvr->tvdev    = tvdev;
+		tvr->vr_descr = vr_descr;
+		tvr->align    = vr_descr->align;
+		tvr->elem_num = vr_descr->num;
+		tvr->notifyid = vr_descr->notifyid;
+		trusty_nop_init(&tvr->kick_nop, SMC_NC_VDEV_KICK_VQ,
+				tvdev->notifyid, tvr->notifyid);
+	}
+
+	/* register device */
+	ret = register_virtio_device(&tvdev->vdev);
+	if (ret) {
+		dev_err(tctx->dev,
+			"Failed (%d) to register device dev type %u\n",
+			ret, vdev_descr->id);
+		goto err_register;
+	}
+
+	/* add it to tracking list */
+	list_add_tail(&tvdev->node, &tctx->vdev_list);
+
+	return 0;
+
+err_register:
+	kfree(tvdev);
+	return ret;
+}
+
+static int trusty_parse_device_descr(struct trusty_ctx *tctx,
+				     void *descr_va, size_t descr_sz)
+{
+	u32 i;
+	struct resource_table *descr = descr_va;
+
+	if (descr_sz < sizeof(*descr)) {
+		dev_err(tctx->dev, "descr table is too small (0x%x)\n",
+			(int)descr_sz);
+		return -ENODEV;
+	}
+
+	if (descr->ver != RSC_DESCR_VER) {
+		dev_err(tctx->dev, "unexpected descr ver (0x%x)\n",
+			(int)descr->ver);
+		return -ENODEV;
+	}
+
+	if (descr_sz < (sizeof(*descr) + descr->num * sizeof(u32))) {
+		dev_err(tctx->dev, "descr table is too small (0x%x)\n",
+			(int)descr->ver);
+		return -ENODEV;
+	}
+
+	for (i = 0; i < descr->num; i++) {
+		struct fw_rsc_hdr *hdr;
+		struct fw_rsc_vdev *vd;
+		struct fw_rsc_vdev_vring *vr;
+		void *cfg;
+		size_t vd_sz;
+
+		u32 offset = descr->offset[i];
+
+		if (offset >= descr_sz) {
+			dev_err(tctx->dev, "offset is out of bounds (%u)\n",
+				offset);
+			return -ENODEV;
+		}
+
+		/* check space for rsc header */
+		if ((descr_sz - offset) < sizeof(struct fw_rsc_hdr)) {
+			dev_err(tctx->dev, "no space for rsc header (%u)\n",
+				offset);
+			return -ENODEV;
+		}
+		hdr = (struct fw_rsc_hdr *)((u8 *)descr + offset);
+		offset += sizeof(struct fw_rsc_hdr);
+
+		/* check type */
+		if (hdr->type != RSC_VDEV) {
+			dev_err(tctx->dev, "unsupported rsc type (%u)\n",
+				hdr->type);
+			continue;
+		}
+
+		/* got vdev: check space for vdev */
+		if ((descr_sz - offset) < sizeof(struct fw_rsc_vdev)) {
+			dev_err(tctx->dev, "no space for vdev descr (%u)\n",
+				offset);
+			return -ENODEV;
+		}
+		vd = (struct fw_rsc_vdev *)((u8 *)descr + offset);
+
+		/* check space for vrings and config area */
+		vd_sz = sizeof(struct fw_rsc_vdev) +
+			vd->num_of_vrings * sizeof(struct fw_rsc_vdev_vring) +
+			vd->config_len;
+
+		if ((descr_sz - offset) < vd_sz) {
+			dev_err(tctx->dev, "no space for vdev (%u)\n", offset);
+			return -ENODEV;
+		}
+		vr = (struct fw_rsc_vdev_vring *)vd->vring;
+		cfg = (void *)(vr + vd->num_of_vrings);
+
+		trusty_virtio_add_device(tctx, vd, vr, cfg);
+	}
+
+	return 0;
+}
+
+static void _remove_devices_locked(struct trusty_ctx *tctx)
+{
+	struct trusty_vdev *tvdev, *next;
+
+	list_for_each_entry_safe(tvdev, next, &tctx->vdev_list, node) {
+		list_del(&tvdev->node);
+		unregister_virtio_device(&tvdev->vdev);
+		kfree(tvdev);
+	}
+}
+
+static void trusty_virtio_remove_devices(struct trusty_ctx *tctx)
+{
+	mutex_lock(&tctx->mlock);
+	_remove_devices_locked(tctx);
+	mutex_unlock(&tctx->mlock);
+}
+
+static int trusty_virtio_add_devices(struct trusty_ctx *tctx)
+{
+	int ret;
+	int ret_tmp;
+	void *descr_va;
+	trusty_shared_mem_id_t descr_id;
+	size_t descr_sz;
+	size_t descr_buf_sz;
+
+	/* allocate buffer to load device descriptor into */
+	descr_buf_sz = PAGE_SIZE;
+	descr_va = alloc_pages_exact(descr_buf_sz, GFP_KERNEL | __GFP_ZERO);
+	if (!descr_va) {
+		dev_err(tctx->dev, "Failed to allocate shared area\n");
+		return -ENOMEM;
+	}
+
+	sg_init_one(&tctx->shared_sg, descr_va, descr_buf_sz);
+	ret = trusty_share_memory(tctx->dev->parent, &descr_id,
+				  &tctx->shared_sg, 1, PAGE_KERNEL);
+	if (ret) {
+		dev_err(tctx->dev, "trusty_share_memory failed: %d\n", ret);
+		goto err_share_memory;
+	}
+
+	/* load device descriptors */
+	ret = trusty_load_device_descr(tctx, descr_id, descr_buf_sz);
+	if (ret < 0) {
+		dev_err(tctx->dev, "failed (%d) to load device descr\n", ret);
+		goto err_load_descr;
+	}
+
+	descr_sz = (size_t)ret;
+
+	mutex_lock(&tctx->mlock);
+
+	/* parse device descriptor and add virtio devices */
+	ret = trusty_parse_device_descr(tctx, descr_va, descr_sz);
+	if (ret) {
+		dev_err(tctx->dev, "failed (%d) to parse device descr\n", ret);
+		goto err_parse_descr;
+	}
+
+	/* register call notifier */
+	ret = trusty_call_notifier_register(tctx->dev->parent,
+					    &tctx->call_notifier);
+	if (ret) {
+		dev_err(tctx->dev, "%s: failed (%d) to register notifier\n",
+			__func__, ret);
+		goto err_register_notifier;
+	}
+
+	/* start virtio */
+	ret = trusty_virtio_start(tctx, descr_id, descr_sz);
+	if (ret) {
+		dev_err(tctx->dev, "failed (%d) to start virtio\n", ret);
+		goto err_start_virtio;
+	}
+
+	/* attach shared area */
+	tctx->shared_va = descr_va;
+	tctx->shared_id = descr_id;
+	tctx->shared_sz = descr_buf_sz;
+
+	mutex_unlock(&tctx->mlock);
+
+	return 0;
+
+err_start_virtio:
+	trusty_call_notifier_unregister(tctx->dev->parent,
+					&tctx->call_notifier);
+	cancel_work_sync(&tctx->check_vqs);
+err_register_notifier:
+err_parse_descr:
+	_remove_devices_locked(tctx);
+	mutex_unlock(&tctx->mlock);
+	cancel_work_sync(&tctx->kick_vqs);
+	trusty_virtio_stop(tctx, descr_id, descr_sz);
+err_load_descr:
+	ret_tmp = trusty_reclaim_memory(tctx->dev->parent, descr_id,
+					&tctx->shared_sg, 1);
+	if (WARN_ON(ret_tmp)) {
+		dev_err(tctx->dev, "trusty_revoke_memory failed: %d 0x%llx\n",
+			ret_tmp, tctx->shared_id);
+		/*
+		 * It is not safe to free this memory if trusty_revoke_memory
+		 * fails. Leak it in that case.
+		 */
+	} else {
+err_share_memory:
+		free_pages_exact(descr_va, descr_buf_sz);
+	}
+	return ret;
+}
+
+static dma_addr_t trusty_virtio_dma_map_page(struct device *dev,
+					     struct page *page,
+					     unsigned long offset, size_t size,
+					     enum dma_data_direction dir,
+					     unsigned long attrs)
+{
+	struct tipc_msg_buf *buf = page_to_virt(page) + offset;
+
+	return buf->buf_id;
+}
+
+static const struct dma_map_ops trusty_virtio_dma_map_ops = {
+	.map_page = trusty_virtio_dma_map_page,
+};
+
+static int trusty_virtio_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct trusty_ctx *tctx;
+
+	tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
+	if (!tctx)
+		return -ENOMEM;
+
+	tctx->dev = &pdev->dev;
+	tctx->call_notifier.notifier_call = trusty_call_notify;
+	mutex_init(&tctx->mlock);
+	INIT_LIST_HEAD(&tctx->vdev_list);
+	INIT_WORK(&tctx->check_vqs, check_all_vqs);
+	INIT_WORK(&tctx->kick_vqs, kick_vqs);
+	platform_set_drvdata(pdev, tctx);
+
+	set_dma_ops(&pdev->dev, &trusty_virtio_dma_map_ops);
+
+	tctx->check_wq = alloc_workqueue("trusty-check-wq",
+					 WQ_UNBOUND | WQ_HIGHPRI, 0);
+	if (!tctx->check_wq) {
+		ret = -ENODEV;
+		dev_err(&pdev->dev, "Failed create trusty-check-wq\n");
+		goto err_create_check_wq;
+	}
+
+	tctx->kick_wq = alloc_workqueue("trusty-kick-wq",
+					WQ_UNBOUND | WQ_CPU_INTENSIVE, 0);
+	if (!tctx->kick_wq) {
+		ret = -ENODEV;
+		dev_err(&pdev->dev, "Failed create trusty-kick-wq\n");
+		goto err_create_kick_wq;
+	}
+
+	ret = trusty_virtio_add_devices(tctx);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to add virtio devices\n");
+		goto err_add_devices;
+	}
+
+	dev_info(&pdev->dev, "initializing done\n");
+	return 0;
+
+err_add_devices:
+	destroy_workqueue(tctx->kick_wq);
+err_create_kick_wq:
+	destroy_workqueue(tctx->check_wq);
+err_create_check_wq:
+	kfree(tctx);
+	return ret;
+}
+
+static int trusty_virtio_remove(struct platform_device *pdev)
+{
+	struct trusty_ctx *tctx = platform_get_drvdata(pdev);
+	int ret;
+
+	/* unregister call notifier and wait until workqueue is done */
+	trusty_call_notifier_unregister(tctx->dev->parent,
+					&tctx->call_notifier);
+	cancel_work_sync(&tctx->check_vqs);
+
+	/* remove virtio devices */
+	trusty_virtio_remove_devices(tctx);
+	cancel_work_sync(&tctx->kick_vqs);
+
+	/* destroy workqueues */
+	destroy_workqueue(tctx->kick_wq);
+	destroy_workqueue(tctx->check_wq);
+
+	/* notify remote that shared area goes away */
+	trusty_virtio_stop(tctx, tctx->shared_id, tctx->shared_sz);
+
+	/* free shared area */
+	ret = trusty_reclaim_memory(tctx->dev->parent, tctx->shared_id,
+				    &tctx->shared_sg, 1);
+	if (WARN_ON(ret)) {
+		dev_err(tctx->dev, "trusty_revoke_memory failed: %d 0x%llx\n",
+			ret, tctx->shared_id);
+		/*
+		 * It is not safe to free this memory if trusty_revoke_memory
+		 * fails. Leak it in that case.
+		 */
+	} else {
+		free_pages_exact(tctx->shared_va, tctx->shared_sz);
+	}
+
+	/* free context */
+	kfree(tctx);
+	return 0;
+}
+
+static const struct of_device_id trusty_of_match[] = {
+	{
+		.compatible = "android,trusty-virtio-v1",
+	},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, trusty_of_match);
+
+static struct platform_driver trusty_virtio_driver = {
+	.probe = trusty_virtio_probe,
+	.remove = trusty_virtio_remove,
+	.driver = {
+		.name = "trusty-virtio",
+		.of_match_table = trusty_of_match,
+	},
+};
+
+module_platform_driver(trusty_virtio_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Trusty virtio driver");
+/*
+ * TODO(b/168322325): trusty-virtio and trusty-ipc should be independent.
+ * However, trusty-virtio is not completely generic and is aware of trusty-ipc.
+ * See header includes. Particularly, trusty-virtio.ko can't be loaded before
+ * trusty-ipc.ko.
+ */
+MODULE_SOFTDEP("pre: trusty-ipc");
diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c
new file mode 100644
index 0000000..79257b0
--- /dev/null
+++ b/drivers/trusty/trusty.c
@@ -0,0 +1,1302 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Google, Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/trusty/arm_ffa.h>
+#include <linux/trusty/smcall.h>
+#include <linux/trusty/sm_err.h>
+#include <linux/trusty/trusty.h>
+
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+
+#if CONFIG_ARM64
+#include <asm/daifflags.h>
+#endif
+
+#include "trusty-irq.h"
+#include "trusty-smc.h"
+#include "trusty-trace.h"
+#include "trusty-sched-share-api.h"
+
+
+struct trusty_state;
+static struct platform_driver trusty_driver;
+static int trusty_cpuhp_slot = -1;
+
+static bool use_high_wq;
+module_param(use_high_wq, bool, 0660);
+
+static int nop_nice_value = -20; /* default to highest */
+module_param(nop_nice_value, int, 0660);
+
+struct trusty_work {
+	struct trusty_state *s;
+	unsigned int cpu;
+	struct task_struct *nop_thread;
+	wait_queue_head_t nop_event_wait;
+	int signaled;
+};
+
+struct trusty_state {
+	struct mutex smc_lock;
+	struct atomic_notifier_head notifier;
+	struct completion cpu_idle_completion;
+	char *version_str;
+	u32 api_version;
+	bool trusty_panicked;
+	struct device *dev;
+	struct hlist_node cpuhp_node;
+	struct trusty_work __percpu *nop_works;
+	struct list_head nop_queue;
+	spinlock_t nop_lock; /* protects nop_queue */
+	struct device_dma_parameters dma_parms;
+	struct trusty_sched_share_state *trusty_sched_share_state;
+	void *ffa_tx;
+	void *ffa_rx;
+	u16 ffa_local_id;
+	u16 ffa_remote_id;
+	struct mutex share_memory_msg_lock; /* protects share_memory_msg */
+};
+
+static inline unsigned long smc(unsigned long r0, unsigned long r1,
+				unsigned long r2, unsigned long r3)
+{
+	unsigned long ret;
+
+	trace_trusty_smc(r0, r1, r2, r3);
+	ret = trusty_smc8(r0, r1, r2, r3, 0, 0, 0, 0).r0;
+	trace_trusty_smc_done(ret);
+	return ret;
+}
+
+s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2)
+{
+	struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+	if (WARN_ON(!s))
+		return SM_ERR_INVALID_PARAMETERS;
+	if (WARN_ON(!SMC_IS_FASTCALL(smcnr)))
+		return SM_ERR_INVALID_PARAMETERS;
+	if (WARN_ON(SMC_IS_SMC64(smcnr)))
+		return SM_ERR_INVALID_PARAMETERS;
+
+	return smc(smcnr, a0, a1, a2);
+}
+EXPORT_SYMBOL(trusty_fast_call32);
+
+#ifdef CONFIG_64BIT
+s64 trusty_fast_call64(struct device *dev, u64 smcnr, u64 a0, u64 a1, u64 a2)
+{
+	struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+	if (WARN_ON(!s))
+		return SM_ERR_INVALID_PARAMETERS;
+	if (WARN_ON(!SMC_IS_FASTCALL(smcnr)))
+		return SM_ERR_INVALID_PARAMETERS;
+	if (WARN_ON(!SMC_IS_SMC64(smcnr)))
+		return SM_ERR_INVALID_PARAMETERS;
+
+	return smc(smcnr, a0, a1, a2);
+}
+EXPORT_SYMBOL(trusty_fast_call64);
+#endif
+
+static unsigned long trusty_std_call_inner(struct device *dev,
+					   unsigned long smcnr,
+					   unsigned long a0, unsigned long a1,
+					   unsigned long a2)
+{
+	unsigned long ret;
+	int retry = 5;
+
+	dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx)\n",
+		__func__, smcnr, a0, a1, a2);
+	while (true) {
+		ret = smc(smcnr, a0, a1, a2);
+		while ((s32)ret == SM_ERR_FIQ_INTERRUPTED)
+			ret = smc(SMC_SC_RESTART_FIQ, 0, 0, 0);
+		if ((int)ret != SM_ERR_BUSY || !retry)
+			break;
+
+		dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy, retry\n",
+			__func__, smcnr, a0, a1, a2);
+		retry--;
+	}
+
+	return ret;
+}
+
+#if CONFIG_ARM64
+
+static void trusty_local_irq_disable_before_smc(void)
+{
+	local_daif_mask();
+}
+
+static void trusty_local_irq_enable_after_smc(void)
+{
+	local_daif_restore(DAIF_PROCCTX);
+}
+
+#else
+
+static void trusty_local_irq_disable_before_smc(void)
+{
+	local_irq_disable();
+}
+
+static void trusty_local_irq_enable_after_smc(void)
+{
+	local_irq_enable();
+}
+
+#endif
+
+static unsigned long trusty_std_call_helper(struct device *dev,
+					    unsigned long smcnr,
+					    unsigned long a0, unsigned long a1,
+					    unsigned long a2)
+{
+	unsigned long ret;
+	int sleep_time = 1;
+	struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+	while (true) {
+		trusty_local_irq_disable_before_smc();
+
+		/* tell Trusty scheduler what the current priority is */
+		WARN_ON_ONCE(current->policy != SCHED_NORMAL);
+		trusty_set_actual_nice(smp_processor_id(),
+				s->trusty_sched_share_state, task_nice(current));
+
+		atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_PREPARE,
+					   NULL);
+
+		ret = trusty_std_call_inner(dev, smcnr, a0, a1, a2);
+		if (ret == SM_ERR_PANIC) {
+			s->trusty_panicked = true;
+			if (IS_ENABLED(CONFIG_TRUSTY_CRASH_IS_PANIC))
+				panic("trusty crashed");
+			else
+				WARN_ONCE(1, "trusty crashed");
+		}
+
+		atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_RETURNED,
+					   NULL);
+		if (ret == SM_ERR_INTERRUPTED) {
+			/*
+			 * Make sure this cpu will eventually re-enter trusty
+			 * even if the std_call resumes on another cpu.
+			 */
+			trusty_enqueue_nop(dev, NULL);
+		}
+		trusty_local_irq_enable_after_smc();
+
+		if ((int)ret != SM_ERR_BUSY)
+			break;
+
+		if (sleep_time == 256)
+			dev_warn(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy\n",
+				 __func__, smcnr, a0, a1, a2);
+		dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy, wait %d ms\n",
+			__func__, smcnr, a0, a1, a2, sleep_time);
+
+		msleep(sleep_time);
+		if (sleep_time < 1000)
+			sleep_time <<= 1;
+
+		dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) retry\n",
+			__func__, smcnr, a0, a1, a2);
+	}
+
+	if (sleep_time > 256)
+		dev_warn(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) busy cleared\n",
+			 __func__, smcnr, a0, a1, a2);
+
+	return ret;
+}
+
+static void trusty_std_call_cpu_idle(struct trusty_state *s)
+{
+	int ret;
+
+	ret = wait_for_completion_timeout(&s->cpu_idle_completion, HZ * 10);
+	if (!ret) {
+		dev_warn(s->dev,
+			 "%s: timed out waiting for cpu idle to clear, retry anyway\n",
+			 __func__);
+	}
+}
+
+s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2)
+{
+	int ret;
+	struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+	if (WARN_ON(SMC_IS_FASTCALL(smcnr)))
+		return SM_ERR_INVALID_PARAMETERS;
+
+	if (WARN_ON(SMC_IS_SMC64(smcnr)))
+		return SM_ERR_INVALID_PARAMETERS;
+
+	if (s->trusty_panicked) {
+		/*
+		 * Avoid calling the notifiers if trusty has panicked as they
+		 * can trigger more calls.
+		 */
+		return SM_ERR_PANIC;
+	}
+
+	trace_trusty_std_call32(smcnr, a0, a1, a2);
+
+	if (smcnr != SMC_SC_NOP) {
+		mutex_lock(&s->smc_lock);
+		reinit_completion(&s->cpu_idle_completion);
+	}
+
+	dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) started\n",
+		__func__, smcnr, a0, a1, a2);
+
+	ret = trusty_std_call_helper(dev, smcnr, a0, a1, a2);
+	while (ret == SM_ERR_INTERRUPTED || ret == SM_ERR_CPU_IDLE) {
+		dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) interrupted\n",
+			__func__, smcnr, a0, a1, a2);
+		if (ret == SM_ERR_CPU_IDLE)
+			trusty_std_call_cpu_idle(s);
+		ret = trusty_std_call_helper(dev, SMC_SC_RESTART_LAST, 0, 0, 0);
+	}
+	dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) returned 0x%x\n",
+		__func__, smcnr, a0, a1, a2, ret);
+
+	if (smcnr == SMC_SC_NOP)
+		complete(&s->cpu_idle_completion);
+	else
+		mutex_unlock(&s->smc_lock);
+
+	trace_trusty_std_call32_done(ret);
+
+	return ret;
+}
+EXPORT_SYMBOL(trusty_std_call32);
+
+int trusty_share_memory(struct device *dev, u64 *id,
+			struct scatterlist *sglist, unsigned int nents,
+			pgprot_t pgprot)
+{
+	return trusty_transfer_memory(dev, id, sglist, nents, pgprot, 0,
+				      false);
+}
+EXPORT_SYMBOL(trusty_share_memory);
+
+int trusty_transfer_memory(struct device *dev, u64 *id,
+			   struct scatterlist *sglist, unsigned int nents,
+			   pgprot_t pgprot, u64 tag, bool lend)
+{
+	struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+	int ret;
+	struct ns_mem_page_info pg_inf;
+	struct scatterlist *sg;
+	size_t count;
+	size_t i;
+	size_t len = 0;
+	u64 ffa_handle = 0;
+	size_t total_len;
+	size_t endpoint_count = 1;
+	struct ffa_mtd *mtd = s->ffa_tx;
+	size_t comp_mrd_offset = offsetof(struct ffa_mtd, emad[endpoint_count]);
+	struct ffa_comp_mrd *comp_mrd = s->ffa_tx + comp_mrd_offset;
+	struct ffa_cons_mrd *cons_mrd = comp_mrd->address_range_array;
+	size_t cons_mrd_offset = (void *)cons_mrd - s->ffa_tx;
+	struct smc_ret8 smc_ret;
+	u32 cookie_low;
+	u32 cookie_high;
+
+	if (WARN_ON(dev->driver != &trusty_driver.driver))
+		return -EINVAL;
+
+	if (WARN_ON(nents < 1))
+		return -EINVAL;
+
+	if (nents != 1 && s->api_version < TRUSTY_API_VERSION_MEM_OBJ) {
+		dev_err(s->dev, "%s: old trusty version does not support non-contiguous memory objects\n",
+			__func__);
+		return -EOPNOTSUPP;
+	}
+
+	count = dma_map_sg(dev, sglist, nents, DMA_BIDIRECTIONAL);
+	if (count != nents) {
+		dev_err(s->dev, "failed to dma map sg_table\n");
+		return -EINVAL;
+	}
+
+	sg = sglist;
+	ret = trusty_encode_page_info(&pg_inf, phys_to_page(sg_dma_address(sg)),
+				      pgprot);
+	if (ret) {
+		dev_err(s->dev, "%s: trusty_encode_page_info failed\n",
+			__func__);
+		goto err_encode_page_info;
+	}
+
+	if (s->api_version < TRUSTY_API_VERSION_MEM_OBJ) {
+		*id = pg_inf.compat_attr;
+		return 0;
+	}
+
+	len = 0;
+	for_each_sg(sglist, sg, nents, i)
+		len += sg_dma_len(sg);
+
+	trace_trusty_share_memory(len, nents, lend);
+
+	mutex_lock(&s->share_memory_msg_lock);
+
+	mtd->sender_id = s->ffa_local_id;
+	mtd->memory_region_attributes = pg_inf.ffa_mem_attr;
+	mtd->reserved_3 = 0;
+	mtd->flags = 0;
+	mtd->handle = 0;
+	mtd->tag = tag;
+	mtd->reserved_24_27 = 0;
+	mtd->emad_count = endpoint_count;
+	for (i = 0; i < endpoint_count; i++) {
+		struct ffa_emad *emad = &mtd->emad[i];
+		/* TODO: support stream ids */
+		emad->mapd.endpoint_id = s->ffa_remote_id;
+		emad->mapd.memory_access_permissions = pg_inf.ffa_mem_perm;
+		emad->mapd.flags = 0;
+		emad->comp_mrd_offset = comp_mrd_offset;
+		emad->reserved_8_15 = 0;
+	}
+	comp_mrd->total_page_count = len / FFA_PAGE_SIZE;
+	comp_mrd->address_range_count = nents;
+	comp_mrd->reserved_8_15 = 0;
+
+	total_len = cons_mrd_offset + nents * sizeof(*cons_mrd);
+	sg = sglist;
+	while (count) {
+		size_t lcount =
+			min_t(size_t, count, (PAGE_SIZE - cons_mrd_offset) /
+			      sizeof(*cons_mrd));
+		size_t fragment_len = lcount * sizeof(*cons_mrd) +
+				      cons_mrd_offset;
+
+		for (i = 0; i < lcount; i++) {
+			cons_mrd[i].address = sg_dma_address(sg);
+			cons_mrd[i].page_count = sg_dma_len(sg) / FFA_PAGE_SIZE;
+			cons_mrd[i].reserved_12_15 = 0;
+			sg = sg_next(sg);
+		}
+		count -= lcount;
+		if (cons_mrd_offset) {
+			u32 smc = lend ? SMC_FC_FFA_MEM_LEND :
+					 SMC_FC_FFA_MEM_SHARE;
+			/* First fragment */
+			trace_trusty_smc(smc, total_len, fragment_len, 0);
+			smc_ret = trusty_smc8(smc, total_len,
+					      fragment_len, 0, 0, 0, 0, 0);
+			trace_trusty_smc_done(smc_ret.r0);
+		} else {
+			trace_trusty_smc(SMC_FC_FFA_MEM_FRAG_TX, cookie_low,
+					cookie_high, fragment_len);
+			smc_ret = trusty_smc8(SMC_FC_FFA_MEM_FRAG_TX,
+					      cookie_low, cookie_high,
+					      fragment_len, 0, 0, 0, 0);
+			trace_trusty_smc_done(smc_ret.r0);
+		}
+		if (smc_ret.r0 == SMC_FC_FFA_MEM_FRAG_RX) {
+			cookie_low = smc_ret.r1;
+			cookie_high = smc_ret.r2;
+			dev_dbg(s->dev, "cookie %x %x", cookie_low,
+				cookie_high);
+			if (!count) {
+				/*
+				 * We have sent all our descriptors. Expected
+				 * SMC_FC_FFA_SUCCESS, not a request to send
+				 * another fragment.
+				 */
+				dev_err(s->dev, "%s: fragment_len %zd/%zd, unexpected SMC_FC_FFA_MEM_FRAG_RX\n",
+					__func__, fragment_len, total_len);
+				ret = -EIO;
+				break;
+			}
+		} else if (smc_ret.r0 == SMC_FC_FFA_SUCCESS) {
+			ffa_handle = smc_ret.r2 | (u64)smc_ret.r3 << 32;
+			dev_dbg(s->dev, "%s: fragment_len %zu/%zu, got handle 0x%llx\n",
+				__func__, fragment_len, total_len,
+				ffa_handle);
+			if (count) {
+				/*
+				 * We have not sent all our descriptors.
+				 * Expected SMC_FC_FFA_MEM_FRAG_RX not
+				 * SMC_FC_FFA_SUCCESS.
+				 */
+				dev_err(s->dev, "%s: fragment_len %zu/%zu, unexpected SMC_FC_FFA_SUCCESS, count %zu != 0\n",
+					__func__, fragment_len, total_len,
+					count);
+				ret = -EIO;
+				break;
+			}
+		} else {
+			dev_err(s->dev, "%s: fragment_len %zu/%zu, SMC_FC_FFA_MEM_SHARE failed 0x%lx 0x%lx 0x%lx",
+				__func__, fragment_len, total_len,
+				smc_ret.r0, smc_ret.r1, smc_ret.r2);
+			ret = -EIO;
+			break;
+		}
+
+		cons_mrd = s->ffa_tx;
+		cons_mrd_offset = 0;
+	}
+
+	mutex_unlock(&s->share_memory_msg_lock);
+
+	if (!ret) {
+		*id = ffa_handle;
+		dev_dbg(s->dev, "%s: done\n", __func__);
+		goto done;
+	}
+
+	dev_err(s->dev, "%s: failed %d", __func__, ret);
+
+err_encode_page_info:
+	dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL);
+done:
+	trace_trusty_share_memory_done(len, nents, lend, ffa_handle, ret);
+	return ret;
+}
+EXPORT_SYMBOL(trusty_transfer_memory);
+
+/*
+ * trusty_share_memory_compat - trusty_share_memory wrapper for old apis
+ *
+ * Call trusty_share_memory and filter out memory attributes if trusty version
+ * is old. Used by clients that used to pass just a physical address to trusty
+ * instead of a physical address plus memory attributes value.
+ */
+int trusty_share_memory_compat(struct device *dev, u64 *id,
+			       struct scatterlist *sglist, unsigned int nents,
+			       pgprot_t pgprot)
+{
+	int ret;
+	struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+	ret = trusty_share_memory(dev, id, sglist, nents, pgprot);
+	if (!ret && s->api_version < TRUSTY_API_VERSION_PHYS_MEM_OBJ)
+		*id &= 0x0000FFFFFFFFF000ull;
+
+	return ret;
+}
+EXPORT_SYMBOL(trusty_share_memory_compat);
+
+int trusty_reclaim_memory(struct device *dev, u64 id,
+			  struct scatterlist *sglist, unsigned int nents)
+{
+	struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+	int ret = 0;
+	struct smc_ret8 smc_ret;
+
+	if (WARN_ON(dev->driver != &trusty_driver.driver))
+		return -EINVAL;
+
+	if (WARN_ON(nents < 1))
+		return -EINVAL;
+
+	if (s->api_version < TRUSTY_API_VERSION_MEM_OBJ) {
+		if (nents != 1) {
+			dev_err(s->dev, "%s: not supported\n", __func__);
+			return -EOPNOTSUPP;
+		}
+
+		dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL);
+
+		dev_dbg(s->dev, "%s: done\n", __func__);
+		return 0;
+	}
+
+	trace_trusty_reclaim_memory(id);
+	mutex_lock(&s->share_memory_msg_lock);
+
+	smc_ret = trusty_smc8(SMC_FC_FFA_MEM_RECLAIM, (u32)id, id >> 32, 0, 0,
+			      0, 0, 0);
+	if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) {
+		dev_err(s->dev, "%s: SMC_FC_FFA_MEM_RECLAIM failed 0x%lx 0x%lx 0x%lx",
+			__func__, smc_ret.r0, smc_ret.r1, smc_ret.r2);
+		if (smc_ret.r0 == SMC_FC_FFA_ERROR &&
+		    smc_ret.r2 == FFA_ERROR_DENIED)
+			ret = -EBUSY;
+		else
+			ret = -EIO;
+	}
+
+	mutex_unlock(&s->share_memory_msg_lock);
+
+	if (ret != 0)
+		goto err_ffa_mem_reclaim;
+
+	dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL);
+
+	dev_dbg(s->dev, "%s: done\n", __func__);
+
+err_ffa_mem_reclaim:
+	trace_trusty_reclaim_memory_done(id, ret);
+	return ret;
+}
+EXPORT_SYMBOL(trusty_reclaim_memory);
+
+int trusty_call_notifier_register(struct device *dev, struct notifier_block *n)
+{
+	struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+	return atomic_notifier_chain_register(&s->notifier, n);
+}
+EXPORT_SYMBOL(trusty_call_notifier_register);
+
+int trusty_call_notifier_unregister(struct device *dev,
+				    struct notifier_block *n)
+{
+	struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+	return atomic_notifier_chain_unregister(&s->notifier, n);
+}
+EXPORT_SYMBOL(trusty_call_notifier_unregister);
+
+static int trusty_remove_child(struct device *dev, void *data)
+{
+	platform_device_unregister(to_platform_device(dev));
+	return 0;
+}
+
+static ssize_t trusty_version_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", s->version_str ?: "unknown");
+}
+
+static DEVICE_ATTR(trusty_version, 0400, trusty_version_show, NULL);
+
+static struct attribute *trusty_attrs[] = {
+	&dev_attr_trusty_version.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(trusty);
+
+const char *trusty_version_str_get(struct device *dev)
+{
+	struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+	return s->version_str;
+}
+EXPORT_SYMBOL(trusty_version_str_get);
+
+static int trusty_init_msg_buf(struct trusty_state *s, struct device *dev)
+{
+	phys_addr_t tx_paddr;
+	phys_addr_t rx_paddr;
+	int ret;
+	struct smc_ret8 smc_ret;
+
+	if (s->api_version < TRUSTY_API_VERSION_MEM_OBJ)
+		return 0;
+
+	/* Get supported FF-A version and check if it is compatible */
+	smc_ret = trusty_smc8(SMC_FC_FFA_VERSION, FFA_CURRENT_VERSION, 0, 0,
+			      0, 0, 0, 0);
+	if (FFA_VERSION_TO_MAJOR(smc_ret.r0) != FFA_CURRENT_VERSION_MAJOR) {
+		dev_err(s->dev,
+			"%s: Unsupported FF-A version 0x%lx, expected 0x%x\n",
+			__func__, smc_ret.r0, FFA_CURRENT_VERSION);
+		ret = -EIO;
+		goto err_version;
+	}
+
+	/* Check that SMC_FC_FFA_MEM_SHARE is implemented */
+	smc_ret = trusty_smc8(SMC_FC_FFA_FEATURES, SMC_FC_FFA_MEM_SHARE, 0, 0,
+			      0, 0, 0, 0);
+	if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) {
+		dev_err(s->dev,
+			"%s: SMC_FC_FFA_FEATURES(SMC_FC_FFA_MEM_SHARE) failed 0x%lx 0x%lx 0x%lx\n",
+			__func__, smc_ret.r0, smc_ret.r1, smc_ret.r2);
+		ret = -EIO;
+		goto err_features;
+	}
+
+	/*
+	 * Set FF-A endpoint IDs.
+	 *
+	 * Hardcode 0x8000 for the secure os.
+	 * TODO: Use FF-A call or device tree to configure this dynamically
+	 */
+	smc_ret = trusty_smc8(SMC_FC_FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0);
+	if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) {
+		dev_err(s->dev,
+			"%s: SMC_FC_FFA_ID_GET failed 0x%lx 0x%lx 0x%lx\n",
+			__func__, smc_ret.r0, smc_ret.r1, smc_ret.r2);
+		ret = -EIO;
+		goto err_id_get;
+	}
+
+	s->ffa_local_id = smc_ret.r2;
+	s->ffa_remote_id = 0x8000;
+
+	/*
+	 * The pKVM hypervisor uses the same page size as the host, including for
+	 * stage-2 mappings. So the rx/tx buffers need to be page-sized multiple,
+	 * and page-aligned.
+	 *
+	 * TODO: This can be made more generic by discovering the required size
+	 * through SMC_FC_FFA_FEATURES later.
+	 */
+	s->ffa_tx = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!s->ffa_tx) {
+		ret = -ENOMEM;
+		goto err_alloc_tx;
+	}
+	tx_paddr = virt_to_phys(s->ffa_tx);
+	if (WARN_ON(tx_paddr & (PAGE_SIZE - 1))) {
+		ret = -EINVAL;
+		goto err_unaligned_tx_buf;
+	}
+
+	s->ffa_rx = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!s->ffa_rx) {
+		ret = -ENOMEM;
+		goto err_alloc_rx;
+	}
+	rx_paddr = virt_to_phys(s->ffa_rx);
+	if (WARN_ON(rx_paddr & (PAGE_SIZE - 1))) {
+		ret = -EINVAL;
+		goto err_unaligned_rx_buf;
+	}
+
+	smc_ret = trusty_smc8(SMC_FCZ_FFA_RXTX_MAP, tx_paddr, rx_paddr,
+				PAGE_SIZE / FFA_PAGE_SIZE, 0, 0, 0, 0);
+	if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) {
+		dev_err(s->dev, "%s: SMC_FCZ_FFA_RXTX_MAP failed 0x%lx 0x%lx 0x%lx\n",
+			__func__, smc_ret.r0, smc_ret.r1, smc_ret.r2);
+		ret = -EIO;
+		goto err_rxtx_map;
+	}
+
+	return 0;
+
+err_rxtx_map:
+err_unaligned_rx_buf:
+	kfree(s->ffa_rx);
+	s->ffa_rx = NULL;
+err_alloc_rx:
+err_unaligned_tx_buf:
+	kfree(s->ffa_tx);
+	s->ffa_tx = NULL;
+err_alloc_tx:
+err_id_get:
+err_features:
+err_version:
+	return ret;
+}
+
+static void trusty_free_msg_buf(struct trusty_state *s, struct device *dev)
+{
+	struct smc_ret8 smc_ret;
+
+	smc_ret = trusty_smc8(SMC_FC_FFA_RXTX_UNMAP, 0, 0, 0, 0, 0, 0, 0);
+	if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) {
+		dev_err(s->dev, "%s: SMC_FC_FFA_RXTX_UNMAP failed 0x%lx 0x%lx 0x%lx\n",
+			__func__, smc_ret.r0, smc_ret.r1, smc_ret.r2);
+	} else {
+		kfree(s->ffa_rx);
+		kfree(s->ffa_tx);
+	}
+}
+
+static void trusty_init_version(struct trusty_state *s, struct device *dev)
+{
+	int ret;
+	int i;
+	int version_str_len;
+
+	ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, -1, 0, 0);
+	if (ret <= 0)
+		goto err_get_size;
+
+	version_str_len = ret;
+
+	s->version_str = kmalloc(version_str_len + 1, GFP_KERNEL);
+	for (i = 0; i < version_str_len; i++) {
+		ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, i, 0, 0);
+		if (ret < 0)
+			goto err_get_char;
+		s->version_str[i] = ret;
+	}
+	s->version_str[i] = '\0';
+
+	dev_info(dev, "trusty version: %s\n", s->version_str);
+	return;
+
+err_get_char:
+	kfree(s->version_str);
+	s->version_str = NULL;
+err_get_size:
+	dev_err(dev, "failed to get version: %d\n", ret);
+}
+
+u32 trusty_get_api_version(struct device *dev)
+{
+	struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+	return s->api_version;
+}
+EXPORT_SYMBOL(trusty_get_api_version);
+
+bool trusty_get_panic_status(struct device *dev)
+{
+	struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+	if (WARN_ON(dev->driver != &trusty_driver.driver))
+		return false;
+	return s->trusty_panicked;
+}
+EXPORT_SYMBOL(trusty_get_panic_status);
+
+static int trusty_init_api_version(struct trusty_state *s, struct device *dev)
+{
+	u32 api_version;
+
+	api_version = trusty_fast_call32(dev, SMC_FC_API_VERSION,
+					 TRUSTY_API_VERSION_CURRENT, 0, 0);
+	if (api_version == SM_ERR_UNDEFINED_SMC)
+		api_version = 0;
+
+	if (api_version > TRUSTY_API_VERSION_CURRENT) {
+		dev_err(dev, "unsupported api version %u > %u\n",
+			api_version, TRUSTY_API_VERSION_CURRENT);
+		return -EINVAL;
+	}
+
+	dev_info(dev, "selected api version: %u (requested %u)\n",
+		 api_version, TRUSTY_API_VERSION_CURRENT);
+	s->api_version = api_version;
+
+	return 0;
+}
+
+static bool dequeue_nop(struct trusty_state *s, u32 *args)
+{
+	unsigned long flags;
+	struct trusty_nop *nop = NULL;
+	struct trusty_work *tw;
+	bool ret = false;
+	bool signaled;
+	bool nop_dequeued = false;
+	bool queue_emptied = false;
+
+	spin_lock_irqsave(&s->nop_lock, flags);
+	tw = this_cpu_ptr(s->nop_works);
+	signaled = tw->signaled;
+	if (!list_empty(&s->nop_queue)) {
+		nop = list_first_entry(&s->nop_queue,
+				       struct trusty_nop, node);
+		list_del_init(&nop->node);
+		args[0] = nop->args[0];
+		args[1] = nop->args[1];
+		args[2] = nop->args[2];
+
+		nop_dequeued = true;
+		if (list_empty(&s->nop_queue))
+			queue_emptied = true;
+
+		ret = true;
+	} else {
+		args[0] = 0;
+		args[1] = 0;
+		args[2] = 0;
+
+		ret = tw->signaled;
+	}
+	tw->signaled = false;
+	spin_unlock_irqrestore(&s->nop_lock, flags);
+
+	/* don't log when false as it is preempt case which can be very noisy */
+	if (ret)
+		trace_trusty_dequeue_nop(signaled, nop_dequeued, queue_emptied);
+
+	return ret;
+}
+
+static void locked_nop_work_func(struct trusty_work *tw)
+{
+	int ret;
+	struct trusty_state *s = tw->s;
+
+	ret = trusty_std_call32(s->dev, SMC_SC_LOCKED_NOP, 0, 0, 0);
+	if (ret != 0)
+		dev_err(s->dev, "%s: SMC_SC_LOCKED_NOP failed %d",
+			__func__, ret);
+
+	dev_dbg(s->dev, "%s: done\n", __func__);
+}
+
+enum cpunice_cause {
+	CPUNICE_CAUSE_DEFAULT,
+	CPUNICE_CAUSE_USE_HIGH_WQ,
+	CPUNICE_CAUSE_TRUSTY_REQ,
+	CPUNICE_CAUSE_NOP_ESCALATE,
+	CPUNICE_CAUSE_ENQUEUE_BOOST,
+};
+
+static void trusty_adjust_nice_nopreempt(struct trusty_state *s, bool do_nop)
+{
+	int req_nice, cur_nice;
+	int cause_id = CPUNICE_CAUSE_DEFAULT;
+	unsigned long flags;
+	struct trusty_work *tw;
+
+	local_irq_save(flags);
+
+	cur_nice = task_nice(current);
+
+	/* check to see if another signal has come in since dequeue_nop */
+	tw = this_cpu_ptr(s->nop_works);
+	do_nop |= tw->signaled;
+
+	if (use_high_wq) {
+		/* use highest priority (lowest nice) for everything */
+		req_nice = LINUX_NICE_FOR_TRUSTY_PRIORITY_HIGH;
+		cause_id = CPUNICE_CAUSE_USE_HIGH_WQ;
+	} else {
+		/* read trusty request for this cpu if available */
+		req_nice = trusty_get_requested_nice(smp_processor_id(),
+				s->trusty_sched_share_state);
+		cause_id = CPUNICE_CAUSE_TRUSTY_REQ;
+	}
+
+	/* ensure priority will not be lower than system request
+	 * when there is more work to do
+	 */
+	if (do_nop && nop_nice_value < req_nice) {
+		req_nice = nop_nice_value;
+		cause_id = CPUNICE_CAUSE_NOP_ESCALATE;
+	}
+
+	/* trace entry only if changing */
+	if (req_nice != cur_nice)
+		trace_trusty_change_cpu_nice(cur_nice, req_nice, cause_id);
+
+	/* tell Linux the desired priority */
+	set_user_nice(current, req_nice);
+
+	local_irq_restore(flags);
+}
+
+static void nop_work_func(struct trusty_work *tw)
+{
+	int ret;
+	bool do_nop;
+	u32 args[3];
+	u32 last_arg0;
+	struct trusty_state *s = tw->s;
+
+	do_nop = dequeue_nop(s, args);
+
+	if (do_nop) {
+		/* we have been signaled or there's a nop so
+		 * adjust priority before making SMC call below
+		 */
+		trusty_adjust_nice_nopreempt(s, do_nop);
+	}
+
+	while (do_nop) {
+		dev_dbg(s->dev, "%s: %x %x %x\n",
+			__func__, args[0], args[1], args[2]);
+
+		if (kthread_should_park())
+			kthread_parkme();
+
+		preempt_disable();
+
+		if (tw != this_cpu_ptr(s->nop_works)) {
+			dev_warn_ratelimited(s->dev,
+					     "trusty-nop-%d ran on wrong cpu, %u\n",
+					     tw->cpu, smp_processor_id());
+		}
+
+		last_arg0 = args[0];
+		ret = trusty_std_call32(s->dev, SMC_SC_NOP,
+					args[0], args[1], args[2]);
+
+		do_nop = dequeue_nop(s, args);
+
+		/* adjust priority in case Trusty has requested a change */
+		trusty_adjust_nice_nopreempt(s, do_nop);
+
+		if (ret == SM_ERR_NOP_INTERRUPTED) {
+			do_nop = true;
+		} else if (ret != SM_ERR_NOP_DONE) {
+			dev_err(s->dev, "%s: SMC_SC_NOP %x failed %d",
+				__func__, last_arg0, ret);
+			if (last_arg0) {
+				/*
+				 * Don't break out of the loop if a non-default
+				 * nop-handler returns an error.
+				 */
+				do_nop = true;
+			}
+		}
+
+		preempt_enable();
+	}
+	dev_dbg(s->dev, "%s: done\n", __func__);
+}
+
+void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop)
+{
+	unsigned long flags;
+	struct trusty_work *tw;
+	struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+	int cur_nice;
+
+	trace_trusty_enqueue_nop(nop);
+	preempt_disable();
+	tw = this_cpu_ptr(s->nop_works);
+	if (nop) {
+		WARN_ON(s->api_version < TRUSTY_API_VERSION_SMP_NOP);
+
+		spin_lock_irqsave(&s->nop_lock, flags);
+		if (list_empty(&nop->node))
+			list_add_tail(&nop->node, &s->nop_queue);
+		spin_unlock_irqrestore(&s->nop_lock, flags);
+	}
+
+	/* boost the priority here so the thread can get to it fast */
+	cur_nice = task_nice(tw->nop_thread);
+	if (nop_nice_value < cur_nice) {
+		trace_trusty_change_cpu_nice(cur_nice, nop_nice_value,
+				CPUNICE_CAUSE_ENQUEUE_BOOST);
+		set_user_nice(tw->nop_thread, nop_nice_value);
+	}
+
+	/* indicate that this cpu was signaled */
+	tw->signaled = true;
+
+	wake_up_interruptible(&tw->nop_event_wait);
+	preempt_enable();
+}
+EXPORT_SYMBOL(trusty_enqueue_nop);
+
+void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop)
+{
+	unsigned long flags;
+	struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
+
+	if (WARN_ON(!nop))
+		return;
+
+	spin_lock_irqsave(&s->nop_lock, flags);
+	if (!list_empty(&nop->node))
+		list_del_init(&nop->node);
+	spin_unlock_irqrestore(&s->nop_lock, flags);
+}
+EXPORT_SYMBOL(trusty_dequeue_nop);
+
+static int trusty_nop_thread(void *context)
+{
+	struct trusty_work *tw = context;
+	struct trusty_state *s = tw->s;
+	void (*work_func)(struct trusty_work *tw);
+	int ret = 0;
+
+	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+
+	if (s->api_version < TRUSTY_API_VERSION_SMP)
+		work_func = locked_nop_work_func;
+	else
+		work_func = nop_work_func;
+
+	add_wait_queue(&tw->nop_event_wait, &wait);
+	for (;;) {
+		if (kthread_should_stop())
+			break;
+
+		wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+
+		if (kthread_should_park())
+			kthread_parkme();
+
+		/* process work */
+		work_func(tw);
+	};
+	remove_wait_queue(&tw->nop_event_wait, &wait);
+
+	return ret;
+}
+
+static int trusty_cpu_up(unsigned int cpu, struct hlist_node *node)
+{
+	struct trusty_state *s;
+	struct trusty_work *tw;
+
+	s = container_of(node, struct trusty_state, cpuhp_node);
+	tw = this_cpu_ptr(s->nop_works);
+	kthread_unpark(tw->nop_thread);
+
+	dev_dbg(s->dev, "cpu %d up\n", cpu);
+
+	return 0;
+}
+
+static int trusty_cpu_down(unsigned int cpu, struct hlist_node *node)
+{
+	struct trusty_state *s;
+	struct trusty_work *tw;
+
+	s = container_of(node, struct trusty_state, cpuhp_node);
+	tw = this_cpu_ptr(s->nop_works);
+
+	dev_dbg(s->dev, "cpu %d down\n", cpu);
+	kthread_park(tw->nop_thread);
+
+	return 0;
+}
+
+static int trusty_probe(struct platform_device *pdev)
+{
+	int ret;
+	unsigned int cpu;
+	struct trusty_state *s;
+	struct device_node *node = pdev->dev.of_node;
+
+	if (!node) {
+		dev_err(&pdev->dev, "of_node required\n");
+		return -EINVAL;
+	}
+
+	s = kzalloc(sizeof(*s), GFP_KERNEL);
+	if (!s) {
+		ret = -ENOMEM;
+		goto err_allocate_state;
+	}
+
+	s->dev = &pdev->dev;
+	spin_lock_init(&s->nop_lock);
+	INIT_LIST_HEAD(&s->nop_queue);
+	mutex_init(&s->smc_lock);
+	mutex_init(&s->share_memory_msg_lock);
+	ATOMIC_INIT_NOTIFIER_HEAD(&s->notifier);
+	init_completion(&s->cpu_idle_completion);
+
+	s->dev->dma_parms = &s->dma_parms;
+	dma_set_max_seg_size(s->dev, 0xfffff000); /* dma_parms limit */
+	/*
+	 * Set dma mask to 48 bits. This is the current limit of
+	 * trusty_encode_page_info.
+	 */
+	dma_coerce_mask_and_coherent(s->dev, DMA_BIT_MASK(48));
+
+	platform_set_drvdata(pdev, s);
+
+	trusty_init_version(s, &pdev->dev);
+
+	ret = trusty_init_api_version(s, &pdev->dev);
+	if (ret < 0)
+		goto err_api_version;
+
+	ret = trusty_init_msg_buf(s, &pdev->dev);
+	if (ret < 0)
+		goto err_init_msg_buf;
+
+	s->nop_works = alloc_percpu(struct trusty_work);
+	if (!s->nop_works) {
+		ret = -ENOMEM;
+		dev_err(&pdev->dev, "Failed to allocate works\n");
+		goto err_alloc_works;
+	}
+
+	for_each_possible_cpu(cpu) {
+		struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu);
+
+		tw->s = s;
+		tw->cpu = cpu;
+		tw->nop_thread = ERR_PTR(-EINVAL);
+		init_waitqueue_head(&tw->nop_event_wait);
+		tw->signaled = false;
+	}
+
+	for_each_possible_cpu(cpu) {
+		struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu);
+
+		tw->nop_thread = kthread_create_on_cpu(trusty_nop_thread, tw,
+						       cpu, "trusty-nop-%d");
+		if (IS_ERR(tw->nop_thread)) {
+			ret = PTR_ERR(tw->nop_thread);
+			dev_err(s->dev, "%s: failed to create thread for cpu= %d (%p)\n",
+					__func__, cpu, tw->nop_thread);
+			goto err_thread_create;
+		}
+		kthread_set_per_cpu(tw->nop_thread, cpu);
+		kthread_park(tw->nop_thread);
+	}
+
+	ret = cpuhp_state_add_instance(trusty_cpuhp_slot, &s->cpuhp_node);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "cpuhp_state_add_instance failed %d\n",
+			ret);
+		goto err_add_cpuhp_instance;
+	}
+
+	ret = trusty_alloc_sched_share(&pdev->dev, &s->trusty_sched_share_state);
+	if (ret) {
+		dev_err(s->dev, "%s: unabled to allocate sched memory (%d)\n",
+				__func__, ret);
+		goto err_alloc_sched_share;
+	}
+
+	ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to add children: %d\n", ret);
+		goto err_add_children;
+	}
+
+	/* attempt to share; it is optional for compatibility with Trusty
+	 * versions that don't support priority sharing
+	 */
+	trusty_register_sched_share(s->dev, s->trusty_sched_share_state);
+
+	return 0;
+
+err_add_children:
+	trusty_free_sched_share(s->trusty_sched_share_state);
+err_alloc_sched_share:
+	cpuhp_state_remove_instance(trusty_cpuhp_slot, &s->cpuhp_node);
+err_add_cpuhp_instance:
+err_thread_create:
+	for_each_possible_cpu(cpu) {
+		struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu);
+
+		if (!IS_ERR(tw->nop_thread))
+			kthread_stop(tw->nop_thread);
+	}
+	free_percpu(s->nop_works);
+err_alloc_works:
+	trusty_free_msg_buf(s, &pdev->dev);
+err_init_msg_buf:
+err_api_version:
+	s->dev->dma_parms = NULL;
+	kfree(s->version_str);
+	device_for_each_child(&pdev->dev, NULL, trusty_remove_child);
+	mutex_destroy(&s->share_memory_msg_lock);
+	mutex_destroy(&s->smc_lock);
+	kfree(s);
+err_allocate_state:
+	return ret;
+}
+
+static int trusty_remove(struct platform_device *pdev)
+{
+	unsigned int cpu;
+	struct trusty_state *s = platform_get_drvdata(pdev);
+
+	trusty_unregister_sched_share(s->trusty_sched_share_state);
+
+	device_for_each_child(&pdev->dev, NULL, trusty_remove_child);
+
+	cpuhp_state_remove_instance(trusty_cpuhp_slot, &s->cpuhp_node);
+
+	for_each_possible_cpu(cpu) {
+		struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu);
+
+		kthread_stop(tw->nop_thread);
+	}
+	free_percpu(s->nop_works);
+
+	trusty_free_sched_share(s->trusty_sched_share_state);
+
+	mutex_destroy(&s->share_memory_msg_lock);
+	mutex_destroy(&s->smc_lock);
+	trusty_free_msg_buf(s, &pdev->dev);
+	s->dev->dma_parms = NULL;
+	kfree(s->version_str);
+	kfree(s);
+	return 0;
+}
+
+static const struct of_device_id trusty_of_match[] = {
+	{ .compatible = "android,trusty-smc-v1", },
+	{},
+};
+
+MODULE_DEVICE_TABLE(trusty, trusty_of_match);
+
+static struct platform_driver trusty_driver = {
+	.probe = trusty_probe,
+	.remove = trusty_remove,
+	.driver	= {
+		.name = "trusty",
+		.of_match_table = trusty_of_match,
+		.dev_groups = trusty_groups,
+	},
+};
+
+static int __init trusty_driver_init(void)
+{
+	int ret;
+
+	/*
+	 * Initialize trusty_irq_driver first since trusty_probe makes an
+	 * std-call at the end where interrupts might be needed.
+	 */
+	ret = trusty_irq_driver_init();
+	if (ret < 0)
+		return ret;
+
+	/* allocate dynamic cpuhp state slot */
+	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+				      "trusty:online",
+				      trusty_cpu_up,
+				      trusty_cpu_down);
+	if (ret < 0)
+		goto err_cpuhp_setup;
+
+	trusty_cpuhp_slot = ret;
+
+	ret = platform_driver_register(&trusty_driver);
+	if (ret < 0)
+		goto err_driver_register;
+
+	return 0;
+
+err_driver_register:
+	cpuhp_remove_multi_state(trusty_cpuhp_slot);
+	trusty_cpuhp_slot = -1;
+err_cpuhp_setup:
+	trusty_irq_driver_exit();
+	return ret;
+}
+
+static void __exit trusty_driver_exit(void)
+{
+	platform_driver_unregister(&trusty_driver);
+	cpuhp_remove_multi_state(trusty_cpuhp_slot);
+	trusty_cpuhp_slot = -1;
+	trusty_irq_driver_exit();
+}
+
+subsys_initcall(trusty_driver_init);
+module_exit(trusty_driver_exit);
+
+#define CREATE_TRACE_POINTS
+#include "trusty-trace.h"
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Trusty core driver");
diff --git a/include/linux/trusty/arm_ffa.h b/include/linux/trusty/arm_ffa.h
new file mode 100644
index 0000000..95d31b3
--- /dev/null
+++ b/include/linux/trusty/arm_ffa.h
@@ -0,0 +1,597 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2020 Google, Inc.
+ *
+ * Trusty and TF-A also have a copy of this header.
+ * Please keep the copies in sync.
+ */
+#ifndef __LINUX_TRUSTY_ARM_FFA_H
+#define __LINUX_TRUSTY_ARM_FFA_H
+
+/*
+ * Subset of Arm PSA Firmware Framework for Arm v8-A 1.0 EAC 1_0
+ * (https://developer.arm.com/docs/den0077/a) needed for shared memory.
+ */
+
+#include "smcall.h"
+
+#ifndef STATIC_ASSERT
+#define STATIC_ASSERT(e) _Static_assert(e, #e)
+#endif
+
+#define FFA_CURRENT_VERSION_MAJOR (1U)
+#define FFA_CURRENT_VERSION_MINOR (0U)
+
+#define FFA_VERSION_TO_MAJOR(version) ((version) >> 16)
+#define FFA_VERSION_TO_MINOR(version) ((version) & (0xffff))
+#define FFA_VERSION(major, minor) (((major) << 16) | (minor))
+#define FFA_CURRENT_VERSION \
+	FFA_VERSION(FFA_CURRENT_VERSION_MAJOR, FFA_CURRENT_VERSION_MINOR)
+
+#define SMC_ENTITY_SHARED_MEMORY 4
+
+#define SMC_FASTCALL_NR_SHARED_MEMORY(nr) \
+	SMC_FASTCALL_NR(SMC_ENTITY_SHARED_MEMORY, nr)
+#define SMC_FASTCALL64_NR_SHARED_MEMORY(nr) \
+	SMC_FASTCALL64_NR(SMC_ENTITY_SHARED_MEMORY, nr)
+
+/**
+ * FF-A specification mentions explicitly about '4K pages'. This should
+ * not be confused with the kernel PAGE_SIZE, which is the translation
+ * granule kernel is configured and may be one among 4K, 16K and 64K.
+ */
+#define FFA_PAGE_SIZE		SZ_4K
+
+/**
+ * typedef ffa_endpoint_id16_t - Endpoint ID
+ *
+ * Current implementation only supports VMIDs. FFA spec also support stream
+ * endpoint ids.
+ */
+typedef uint16_t ffa_endpoint_id16_t;
+
+/**
+ * struct ffa_cons_mrd - Constituent memory region descriptor
+ * @address:
+ *         Start address of contiguous memory region. Must be 4K page aligned.
+ * @page_count:
+ *         Number of 4K pages in region.
+ * @reserved_12_15:
+ *         Reserve bytes 12-15 to pad struct size to 16 bytes.
+ */
+struct ffa_cons_mrd {
+	uint64_t address;
+	uint32_t page_count;
+	uint32_t reserved_12_15;
+};
+STATIC_ASSERT(sizeof(struct ffa_cons_mrd) == 16);
+
+/**
+ * struct ffa_comp_mrd - Composite memory region descriptor
+ * @total_page_count:
+ *         Number of 4k pages in memory region. Must match sum of
+ *         @address_range_array[].page_count.
+ * @address_range_count:
+ *         Number of entries in @address_range_array.
+ * @reserved_8_15:
+ *         Reserve bytes 8-15 to pad struct size to 16 byte alignment and
+ *         make @address_range_array 16 byte aligned.
+ * @address_range_array:
+ *         Array of &struct ffa_cons_mrd entries.
+ */
+struct ffa_comp_mrd {
+	uint32_t total_page_count;
+	uint32_t address_range_count;
+	uint64_t reserved_8_15;
+	struct ffa_cons_mrd address_range_array[];
+};
+STATIC_ASSERT(sizeof(struct ffa_comp_mrd) == 16);
+
+/**
+ * typedef ffa_mem_attr8_t - Memory region attributes
+ *
+ * * @FFA_MEM_ATTR_DEVICE_NGNRNE:
+ *     Device-nGnRnE.
+ * * @FFA_MEM_ATTR_DEVICE_NGNRE:
+ *     Device-nGnRE.
+ * * @FFA_MEM_ATTR_DEVICE_NGRE:
+ *     Device-nGRE.
+ * * @FFA_MEM_ATTR_DEVICE_GRE:
+ *     Device-GRE.
+ * * @FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED
+ *     Normal memory. Non-cacheable.
+ * * @FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB
+ *     Normal memory. Write-back cached.
+ * * @FFA_MEM_ATTR_NON_SHAREABLE
+ *     Non-shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*.
+ * * @FFA_MEM_ATTR_OUTER_SHAREABLE
+ *     Outer Shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*.
+ * * @FFA_MEM_ATTR_INNER_SHAREABLE
+ *     Inner Shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*.
+ */
+typedef uint8_t ffa_mem_attr8_t;
+#define FFA_MEM_ATTR_DEVICE_NGNRNE ((1U << 4) | (0x0U << 2))
+#define FFA_MEM_ATTR_DEVICE_NGNRE ((1U << 4) | (0x1U << 2))
+#define FFA_MEM_ATTR_DEVICE_NGRE ((1U << 4) | (0x2U << 2))
+#define FFA_MEM_ATTR_DEVICE_GRE ((1U << 4) | (0x3U << 2))
+#define FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED ((2U << 4) | (0x1U << 2))
+#define FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB ((2U << 4) | (0x3U << 2))
+#define FFA_MEM_ATTR_NON_SHAREABLE (0x0U << 0)
+#define FFA_MEM_ATTR_OUTER_SHAREABLE (0x2U << 0)
+#define FFA_MEM_ATTR_INNER_SHAREABLE (0x3U << 0)
+
+/**
+ * typedef ffa_mem_perm8_t - Memory access permissions
+ *
+ * * @FFA_MEM_ATTR_RO
+ *     Request or specify read-only mapping.
+ * * @FFA_MEM_ATTR_RW
+ *     Request or allow read-write mapping.
+ * * @FFA_MEM_PERM_NX
+ *     Deny executable mapping.
+ * * @FFA_MEM_PERM_X
+ *     Request executable mapping.
+ */
+typedef uint8_t ffa_mem_perm8_t;
+#define FFA_MEM_PERM_RO (1U << 0)
+#define FFA_MEM_PERM_RW (1U << 1)
+#define FFA_MEM_PERM_NX (1U << 2)
+#define FFA_MEM_PERM_X (1U << 3)
+
+/**
+ * typedef ffa_mem_flag8_t - Endpoint memory flags
+ *
+ * * @FFA_MEM_FLAG_OTHER
+ *     Other borrower. Memory region must not be or was not retrieved on behalf
+ *     of this endpoint.
+ */
+typedef uint8_t ffa_mem_flag8_t;
+#define FFA_MEM_FLAG_OTHER (1U << 0)
+
+/**
+ * typedef ffa_mtd_flag32_t - Memory transaction descriptor flags
+ *
+ * * @FFA_MTD_FLAG_ZERO_MEMORY
+ *     Zero memory after unmapping from sender (must be 0 for share).
+ * * @FFA_MTD_FLAG_TIME_SLICING
+ *     Not supported by this implementation.
+ * * @FFA_MTD_FLAG_ZERO_MEMORY_AFTER_RELINQUISH
+ *     Zero memory after unmapping from borrowers (must be 0 for share).
+ * * @FFA_MTD_FLAG_TYPE_MASK
+ *     Bit-mask to extract memory management transaction type from flags.
+ * * @FFA_MTD_FLAG_TYPE_SHARE_MEMORY
+ *     Share memory transaction flag.
+ *     Used by @SMC_FC_FFA_MEM_RETRIEVE_RESP to indicate that memory came from
+ *     @SMC_FC_FFA_MEM_SHARE and by @SMC_FC_FFA_MEM_RETRIEVE_REQ to specify that
+ *     it must have.
+ * * @FFA_MTD_FLAG_ADDRESS_RANGE_ALIGNMENT_HINT_MASK
+ *     Not supported by this implementation.
+ */
+typedef uint32_t ffa_mtd_flag32_t;
+#define FFA_MTD_FLAG_ZERO_MEMORY (1U << 0)
+#define FFA_MTD_FLAG_TIME_SLICING (1U << 1)
+#define FFA_MTD_FLAG_ZERO_MEMORY_AFTER_RELINQUISH (1U << 2)
+#define FFA_MTD_FLAG_TYPE_MASK (3U << 3)
+#define FFA_MTD_FLAG_TYPE_SHARE_MEMORY (1U << 3)
+#define FFA_MTD_FLAG_ADDRESS_RANGE_ALIGNMENT_HINT_MASK (0x1FU << 5)
+
+/**
+ * struct ffa_mapd - Memory access permissions descriptor
+ * @endpoint_id:
+ *         Endpoint id that @memory_access_permissions and @flags apply to.
+ *         (&typedef ffa_endpoint_id16_t).
+ * @memory_access_permissions:
+ *         FFA_MEM_PERM_* values or'ed together (&typedef ffa_mem_perm8_t).
+ * @flags:
+ *         FFA_MEM_FLAG_* values or'ed together (&typedef ffa_mem_flag8_t).
+ */
+struct ffa_mapd {
+	ffa_endpoint_id16_t endpoint_id;
+	ffa_mem_perm8_t memory_access_permissions;
+	ffa_mem_flag8_t flags;
+};
+STATIC_ASSERT(sizeof(struct ffa_mapd) == 4);
+
+/**
+ * struct ffa_emad - Endpoint memory access descriptor.
+ * @mapd:  &struct ffa_mapd.
+ * @comp_mrd_offset:
+ *         Offset of &struct ffa_comp_mrd form start of &struct ffa_mtd.
+ * @reserved_8_15:
+ *         Reserved bytes 8-15. Must be 0.
+ */
+struct ffa_emad {
+	struct ffa_mapd mapd;
+	uint32_t comp_mrd_offset;
+	uint64_t reserved_8_15;
+};
+STATIC_ASSERT(sizeof(struct ffa_emad) == 16);
+
+/**
+ * struct ffa_mtd - Memory transaction descriptor.
+ * @sender_id:
+ *         Sender endpoint id.
+ * @memory_region_attributes:
+ *         FFA_MEM_ATTR_* values or'ed together (&typedef ffa_mem_attr8_t).
+ * @reserved_3:
+ *         Reserved bytes 3. Must be 0.
+ * @flags:
+ *         FFA_MTD_FLAG_* values or'ed together (&typedef ffa_mtd_flag32_t).
+ * @handle:
+ *         Id of shared memory object. Most be 0 for MEM_SHARE.
+ * @tag:   Client allocated tag. Must match original value.
+ * @reserved_24_27:
+ *         Reserved bytes 24-27. Must be 0.
+ * @emad_count:
+ *         Number of entries in @emad. Must be 1 in current implementation.
+ *         FFA spec allows more entries.
+ * @emad:
+ *         Endpoint memory access descriptor array (see @struct ffa_emad).
+ */
+struct ffa_mtd {
+	ffa_endpoint_id16_t sender_id;
+	ffa_mem_attr8_t memory_region_attributes;
+	uint8_t reserved_3;
+	ffa_mtd_flag32_t flags;
+	uint64_t handle;
+	uint64_t tag;
+	uint32_t reserved_24_27;
+	uint32_t emad_count;
+	struct ffa_emad emad[];
+};
+STATIC_ASSERT(sizeof(struct ffa_mtd) == 32);
+
+/**
+ * struct ffa_mem_relinquish_descriptor - Relinquish request descriptor.
+ * @handle:
+ *         Id of shared memory object to relinquish.
+ * @flags:
+ *         If bit 0 is set clear memory after unmapping from borrower. Must be 0
+ *         for share. Bit[1]: Time slicing. Not supported, must be 0. All other
+ *         bits are reserved 0.
+ * @endpoint_count:
+ *         Number of entries in @endpoint_array.
+ * @endpoint_array:
+ *         Array of endpoint ids.
+ */
+struct ffa_mem_relinquish_descriptor {
+	uint64_t handle;
+	uint32_t flags;
+	uint32_t endpoint_count;
+	ffa_endpoint_id16_t endpoint_array[];
+};
+STATIC_ASSERT(sizeof(struct ffa_mem_relinquish_descriptor) == 16);
+
+/**
+ * enum ffa_error - FF-A error code
+ * @FFA_ERROR_NOT_SUPPORTED:
+ *         Operation contained possibly valid parameters not supported by the
+ *         current implementation. Does not match FF-A 1.0 EAC 1_0 definition.
+ * @FFA_ERROR_INVALID_PARAMETERS:
+ *         Invalid parameters. Conditions function specific.
+ * @FFA_ERROR_NO_MEMORY:
+ *         Not enough memory.
+ * @FFA_ERROR_DENIED:
+ *         Operation not allowed. Conditions function specific.
+ *
+ * FF-A 1.0 EAC 1_0 defines other error codes as well but the current
+ * implementation does not use them.
+ */
+enum ffa_error {
+	FFA_ERROR_NOT_SUPPORTED = -1,
+	FFA_ERROR_INVALID_PARAMETERS = -2,
+	FFA_ERROR_NO_MEMORY = -3,
+	FFA_ERROR_DENIED = -6,
+};
+
+/**
+ * SMC_FC32_FFA_MIN - First 32 bit SMC opcode reserved for FFA
+ */
+#define SMC_FC32_FFA_MIN SMC_FASTCALL_NR_SHARED_MEMORY(0x60)
+
+/**
+ * SMC_FC32_FFA_MAX - Last 32 bit SMC opcode reserved for FFA
+ */
+#define SMC_FC32_FFA_MAX SMC_FASTCALL_NR_SHARED_MEMORY(0x7F)
+
+/**
+ * SMC_FC64_FFA_MIN - First 64 bit SMC opcode reserved for FFA
+ */
+#define SMC_FC64_FFA_MIN SMC_FASTCALL64_NR_SHARED_MEMORY(0x60)
+
+/**
+ * SMC_FC64_FFA_MAX - Last 64 bit SMC opcode reserved for FFA
+ */
+#define SMC_FC64_FFA_MAX SMC_FASTCALL64_NR_SHARED_MEMORY(0x7F)
+
+/**
+ * SMC_FC_FFA_ERROR - SMC error return opcode
+ *
+ * Register arguments:
+ *
+ * * w1:     VMID in [31:16], vCPU in [15:0]
+ * * w2:     Error code (&enum ffa_error)
+ */
+#define SMC_FC_FFA_ERROR SMC_FASTCALL_NR_SHARED_MEMORY(0x60)
+
+/**
+ * SMC_FC_FFA_SUCCESS - 32 bit SMC success return opcode
+ *
+ * Register arguments:
+ *
+ * * w1:     VMID in [31:16], vCPU in [15:0]
+ * * w2-w7:  Function specific
+ */
+#define SMC_FC_FFA_SUCCESS SMC_FASTCALL_NR_SHARED_MEMORY(0x61)
+
+/**
+ * SMC_FC64_FFA_SUCCESS - 64 bit SMC success return opcode
+ *
+ * Register arguments:
+ *
+ * * w1:             VMID in [31:16], vCPU in [15:0]
+ * * w2/x2-w7/x7:    Function specific
+ */
+#define SMC_FC64_FFA_SUCCESS SMC_FASTCALL64_NR_SHARED_MEMORY(0x61)
+
+/**
+ * SMC_FC_FFA_VERSION - SMC opcode to return supported FF-A version
+ *
+ * Register arguments:
+ *
+ * * w1:     Major version bit[30:16] and minor version in bit[15:0] supported
+ *           by caller. Bit[31] must be 0.
+ *
+ * Return:
+ * * w0:     &SMC_FC_FFA_SUCCESS
+ * * w2:     Major version bit[30:16], minor version in bit[15:0], bit[31] must
+ *           be 0.
+ *
+ * or
+ *
+ * * w0:     SMC_FC_FFA_ERROR
+ * * w2:     FFA_ERROR_NOT_SUPPORTED if major version passed in is less than the
+ *           minimum major version supported.
+ */
+#define SMC_FC_FFA_VERSION SMC_FASTCALL_NR_SHARED_MEMORY(0x63)
+
+/**
+ * SMC_FC_FFA_FEATURES - SMC opcode to check optional feature support
+ *
+ * Register arguments:
+ *
+ * * w1:     FF-A function ID
+ *
+ * Return:
+ * * w0:     &SMC_FC_FFA_SUCCESS
+ * * w2:     Bit[0]: Supports custom buffers for memory transactions.
+ *           Bit[1:0]: For RXTX_MAP min buffer size and alignment boundary.
+ *           Other bits must be 0.
+ * * w3:     For FFA_MEM_RETRIEVE_REQ, bit[7-0]: Number of times receiver can
+ *           retrieve each memory region before relinquishing it specified as
+ *           ((1U << (value + 1)) - 1 (or value = bits in reference count - 1).
+ *           For all other bits and commands: must be 0.
+ * or
+ *
+ * * w0:     SMC_FC_FFA_ERROR
+ * * w2:     FFA_ERROR_NOT_SUPPORTED if function is not implemented, or
+ *           FFA_ERROR_INVALID_PARAMETERS if function id is not valid.
+ */
+#define SMC_FC_FFA_FEATURES SMC_FASTCALL_NR_SHARED_MEMORY(0x64)
+
+/**
+ * SMC_FC_FFA_RXTX_MAP - 32 bit SMC opcode to map message buffers
+ *
+ * Register arguments:
+ *
+ * * w1:     TX address
+ * * w2:     RX address
+ * * w3:     RX/TX page count in bit[5:0]
+ *
+ * Return:
+ * * w0:     &SMC_FC_FFA_SUCCESS
+ */
+#define SMC_FC_FFA_RXTX_MAP SMC_FASTCALL_NR_SHARED_MEMORY(0x66)
+
+/**
+ * SMC_FC64_FFA_RXTX_MAP - 64 bit SMC opcode to map message buffers
+ *
+ * Register arguments:
+ *
+ * * x1:     TX address
+ * * x2:     RX address
+ * * x3:     RX/TX page count in bit[5:0]
+ *
+ * Return:
+ * * w0:     &SMC_FC_FFA_SUCCESS
+ */
+#define SMC_FC64_FFA_RXTX_MAP SMC_FASTCALL64_NR_SHARED_MEMORY(0x66)
+#ifdef CONFIG_64BIT
+#define SMC_FCZ_FFA_RXTX_MAP SMC_FC64_FFA_RXTX_MAP
+#else
+#define SMC_FCZ_FFA_RXTX_MAP SMC_FC_FFA_RXTX_MAP
+#endif
+
+/**
+ * SMC_FC_FFA_RXTX_UNMAP - SMC opcode to unmap message buffers
+ *
+ * Register arguments:
+ *
+ * * w1:     ID in [31:16]
+ *
+ * Return:
+ * * w0:     &SMC_FC_FFA_SUCCESS
+ */
+#define SMC_FC_FFA_RXTX_UNMAP SMC_FASTCALL_NR_SHARED_MEMORY(0x67)
+
+/**
+ * SMC_FC_FFA_ID_GET - SMC opcode to get endpoint id of caller
+ *
+ * Return:
+ * * w0:     &SMC_FC_FFA_SUCCESS
+ * * w2:     ID in bit[15:0], bit[31:16] must be 0.
+ */
+#define SMC_FC_FFA_ID_GET SMC_FASTCALL_NR_SHARED_MEMORY(0x69)
+
+/**
+ * SMC_FC_FFA_MEM_DONATE - 32 bit SMC opcode to donate memory
+ *
+ * Not supported.
+ */
+#define SMC_FC_FFA_MEM_DONATE SMC_FASTCALL_NR_SHARED_MEMORY(0x71)
+
+/**
+ * SMC_FC_FFA_MEM_LEND - 32 bit SMC opcode to lend memory
+ *
+ * Not currently supported.
+ */
+#define SMC_FC_FFA_MEM_LEND SMC_FASTCALL_NR_SHARED_MEMORY(0x72)
+
+/**
+ * SMC_FC_FFA_MEM_SHARE - 32 bit SMC opcode to share memory
+ *
+ * Register arguments:
+ *
+ * * w1:     Total length
+ * * w2:     Fragment length
+ * * w3:     Address
+ * * w4:     Page count
+ *
+ * Return:
+ * * w0:     &SMC_FC_FFA_SUCCESS
+ * * w2/w3:  Handle
+ *
+ * or
+ *
+ * * w0:     &SMC_FC_FFA_MEM_FRAG_RX
+ * * w1-:    See &SMC_FC_FFA_MEM_FRAG_RX
+ *
+ * or
+ *
+ * * w0:     SMC_FC_FFA_ERROR
+ * * w2:     Error code (&enum ffa_error)
+ */
+#define SMC_FC_FFA_MEM_SHARE SMC_FASTCALL_NR_SHARED_MEMORY(0x73)
+
+/**
+ * SMC_FC64_FFA_MEM_SHARE - 64 bit SMC opcode to share memory
+ *
+ * Register arguments:
+ *
+ * * w1:     Total length
+ * * w2:     Fragment length
+ * * x3:     Address
+ * * w4:     Page count
+ *
+ * Return:
+ * * w0:     &SMC_FC_FFA_SUCCESS
+ * * w2/w3:  Handle
+ *
+ * or
+ *
+ * * w0:     &SMC_FC_FFA_MEM_FRAG_RX
+ * * w1-:    See &SMC_FC_FFA_MEM_FRAG_RX
+ *
+ * or
+ *
+ * * w0:     SMC_FC_FFA_ERROR
+ * * w2:     Error code (&enum ffa_error)
+ */
+#define SMC_FC64_FFA_MEM_SHARE SMC_FASTCALL64_NR_SHARED_MEMORY(0x73)
+
+/**
+ * SMC_FC_FFA_MEM_RETRIEVE_REQ - 32 bit SMC opcode to retrieve shared memory
+ *
+ * Register arguments:
+ *
+ * * w1:     Total length
+ * * w2:     Fragment length
+ * * w3:     Address
+ * * w4:     Page count
+ *
+ * Return:
+ * * w0:             &SMC_FC_FFA_MEM_RETRIEVE_RESP
+ * * w1/x1-w5/x5:    See &SMC_FC_FFA_MEM_RETRIEVE_RESP
+ */
+#define SMC_FC_FFA_MEM_RETRIEVE_REQ SMC_FASTCALL_NR_SHARED_MEMORY(0x74)
+
+/**
+ * SMC_FC64_FFA_MEM_RETRIEVE_REQ - 64 bit SMC opcode to retrieve shared memory
+ *
+ * Register arguments:
+ *
+ * * w1:     Total length
+ * * w2:     Fragment length
+ * * x3:     Address
+ * * w4:     Page count
+ *
+ * Return:
+ * * w0:             &SMC_FC_FFA_MEM_RETRIEVE_RESP
+ * * w1/x1-w5/x5:    See &SMC_FC_FFA_MEM_RETRIEVE_RESP
+ */
+#define SMC_FC64_FFA_MEM_RETRIEVE_REQ SMC_FASTCALL64_NR_SHARED_MEMORY(0x74)
+
+/**
+ * SMC_FC_FFA_MEM_RETRIEVE_RESP - Retrieve 32 bit SMC return opcode
+ *
+ * Register arguments:
+ *
+ * * w1:     Total length
+ * * w2:     Fragment length
+ */
+#define SMC_FC_FFA_MEM_RETRIEVE_RESP SMC_FASTCALL_NR_SHARED_MEMORY(0x75)
+
+/**
+ * SMC_FC_FFA_MEM_RELINQUISH - SMC opcode to relinquish shared memory
+ *
+ * Input in &struct ffa_mem_relinquish_descriptor format in message buffer.
+ *
+ * Return:
+ * * w0:     &SMC_FC_FFA_SUCCESS
+ */
+#define SMC_FC_FFA_MEM_RELINQUISH SMC_FASTCALL_NR_SHARED_MEMORY(0x76)
+
+/**
+ * SMC_FC_FFA_MEM_RECLAIM - SMC opcode to reclaim shared memory
+ *
+ * Register arguments:
+ *
+ * * w1/w2:  Handle
+ * * w3:     Flags
+ *
+ * Return:
+ * * w0:     &SMC_FC_FFA_SUCCESS
+ */
+#define SMC_FC_FFA_MEM_RECLAIM SMC_FASTCALL_NR_SHARED_MEMORY(0x77)
+
+/**
+ * SMC_FC_FFA_MEM_FRAG_RX - SMC opcode to request next fragment.
+ *
+ * Register arguments:
+ *
+ * * w1/w2:  Cookie
+ * * w3:     Fragment offset.
+ * * w4:     Endpoint id ID in [31:16], if client is hypervisor.
+ *
+ * Return:
+ * * w0:             &SMC_FC_FFA_MEM_FRAG_TX
+ * * w1/x1-w5/x5:    See &SMC_FC_FFA_MEM_FRAG_TX
+ */
+#define SMC_FC_FFA_MEM_FRAG_RX SMC_FASTCALL_NR_SHARED_MEMORY(0x7A)
+
+/**
+ * SMC_FC_FFA_MEM_FRAG_TX - SMC opcode to transmit next fragment
+ *
+ * Register arguments:
+ *
+ * * w1/w2:  Cookie
+ * * w3:     Fragment length.
+ * * w4:     Sender endpoint id ID in [31:16], if client is hypervisor.
+ *
+ * Return:
+ * * w0:             &SMC_FC_FFA_MEM_FRAG_RX or &SMC_FC_FFA_SUCCESS.
+ * * w1/x1-w5/x5:    See opcode in w0.
+ */
+#define SMC_FC_FFA_MEM_FRAG_TX SMC_FASTCALL_NR_SHARED_MEMORY(0x7B)
+
+#endif /* __LINUX_TRUSTY_ARM_FFA_H */
diff --git a/include/linux/trusty/sm_err.h b/include/linux/trusty/sm_err.h
new file mode 100644
index 0000000..f650444
--- /dev/null
+++ b/include/linux/trusty/sm_err.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2013 Google Inc. All rights reserved
+ *
+ * Trusty and TF-A also have a copy of this header.
+ * Please keep the copies in sync.
+ */
+#ifndef __LINUX_TRUSTY_SM_ERR_H
+#define __LINUX_TRUSTY_SM_ERR_H
+
+/* Errors from the secure monitor */
+#define SM_ERR_UNDEFINED_SMC		0xFFFFFFFF /* Unknown SMC (defined by ARM DEN 0028A(0.9.0) */
+#define SM_ERR_INVALID_PARAMETERS	-2
+#define SM_ERR_INTERRUPTED		-3	/* Got interrupted. Call back with restart SMC */
+#define SM_ERR_UNEXPECTED_RESTART	-4	/* Got an restart SMC when we didn't expect it */
+#define SM_ERR_BUSY			-5	/* Temporarily busy. Call back with original args */
+#define SM_ERR_INTERLEAVED_SMC		-6	/* Got a trusted_service SMC when a restart SMC is required */
+#define SM_ERR_INTERNAL_FAILURE		-7	/* Unknown error */
+#define SM_ERR_NOT_SUPPORTED		-8
+#define SM_ERR_NOT_ALLOWED		-9	/* SMC call not allowed */
+#define SM_ERR_END_OF_INPUT		-10
+#define SM_ERR_PANIC			-11	/* Secure OS crashed */
+#define SM_ERR_FIQ_INTERRUPTED		-12	/* Got interrupted by FIQ. Call back with SMC_SC_RESTART_FIQ on same CPU */
+#define SM_ERR_CPU_IDLE			-13	/* SMC call waiting for another CPU */
+#define SM_ERR_NOP_INTERRUPTED		-14	/* Got interrupted. Call back with new SMC_SC_NOP */
+#define SM_ERR_NOP_DONE			-15	/* Cpu idle after SMC_SC_NOP (not an error) */
+
+#endif
diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h
new file mode 100644
index 0000000..9ce498d
--- /dev/null
+++ b/include/linux/trusty/smcall.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2013-2014 Google Inc. All rights reserved
+ *
+ * Trusty and TF-A also have a copy of this header.
+ * Please keep the copies in sync.
+ */
+#ifndef __LINUX_TRUSTY_SMCALL_H
+#define __LINUX_TRUSTY_SMCALL_H
+
+#define SMC_NUM_ENTITIES	64
+#define SMC_NUM_ARGS		4
+#define SMC_NUM_PARAMS		(SMC_NUM_ARGS - 1)
+
+#define SMC_IS_FASTCALL(smc_nr)	((smc_nr) & 0x80000000)
+#define SMC_IS_SMC64(smc_nr)	((smc_nr) & 0x40000000)
+#define SMC_ENTITY(smc_nr)	(((smc_nr) & 0x3F000000) >> 24)
+#define SMC_FUNCTION(smc_nr)	((smc_nr) & 0x0000FFFF)
+
+#define SMC_NR(entity, fn, fastcall, smc64) ((((fastcall) & 0x1U) << 31) | \
+					     (((smc64) & 0x1U) << 30) | \
+					     (((entity) & 0x3FU) << 24) | \
+					     ((fn) & 0xFFFFU) \
+					    )
+
+#define SMC_FASTCALL_NR(entity, fn)	SMC_NR((entity), (fn), 1, 0)
+#define SMC_STDCALL_NR(entity, fn)	SMC_NR((entity), (fn), 0, 0)
+#define SMC_FASTCALL64_NR(entity, fn)	SMC_NR((entity), (fn), 1, 1)
+#define SMC_STDCALL64_NR(entity, fn)	SMC_NR((entity), (fn), 0, 1)
+
+#define	SMC_ENTITY_ARCH			0	/* ARM Architecture calls */
+#define	SMC_ENTITY_CPU			1	/* CPU Service calls */
+#define	SMC_ENTITY_SIP			2	/* SIP Service calls */
+#define	SMC_ENTITY_OEM			3	/* OEM Service calls */
+#define	SMC_ENTITY_STD			4	/* Standard Service calls */
+#define	SMC_ENTITY_RESERVED		5	/* Reserved for future use */
+#define	SMC_ENTITY_TRUSTED_APP		48	/* Trusted Application calls */
+#define	SMC_ENTITY_TRUSTED_OS		50	/* Trusted OS calls */
+#define	SMC_ENTITY_LOGGING		51	/* Used for secure -> nonsecure logging */
+#define	SMC_ENTITY_TEST			52	/* Used for secure -> nonsecure tests */
+#define	SMC_ENTITY_SECURE_MONITOR	60	/* Trusted OS calls internal to secure monitor */
+
+/* FC = Fast call, SC = Standard call */
+#define SMC_SC_RESTART_LAST	SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 0)
+#define SMC_SC_LOCKED_NOP	SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 1)
+
+/**
+ * SMC_SC_RESTART_FIQ - Re-enter trusty after it was interrupted by an fiq
+ *
+ * No arguments, no return value.
+ *
+ * Re-enter trusty after returning to ns to process an fiq. Must be called iff
+ * trusty returns SM_ERR_FIQ_INTERRUPTED.
+ *
+ * Enable by selecting api version TRUSTY_API_VERSION_RESTART_FIQ (1) or later.
+ */
+#define SMC_SC_RESTART_FIQ	SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 2)
+
+/**
+ * SMC_SC_NOP - Enter trusty to run pending work.
+ *
+ * No arguments.
+ *
+ * Returns SM_ERR_NOP_INTERRUPTED or SM_ERR_NOP_DONE.
+ * If SM_ERR_NOP_INTERRUPTED is returned, the call must be repeated.
+ *
+ * Enable by selecting api version TRUSTY_API_VERSION_SMP (2) or later.
+ */
+#define SMC_SC_NOP		SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 3)
+
+/**
+ * SMC API for supporting shared-memory based trusty-linux info exchange
+ *
+ * SMC_SC_SCHED_SHARE_REGISTER - enter trusty to establish a shared-memory region
+ * Arguments:
+ * param[0]: shared-memory client-id
+ * param[1]: shared-memory buffer-id
+ * param[2]: shared-memory block size
+ *
+ * returns:
+ * SM_ERR_INTERNAL_FAILURE on failure.
+ * 0 on success.
+ */
+#define SMC_SC_SCHED_SHARE_REGISTER   SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 4)
+
+/**
+ * SMC API for supporting shared-memory based trusty-linux info exchange
+ *
+ * SMC_SC_SCHED_SHARE_UNREGISTER - enter trusty to release the shared-memory region
+ * Arguments:
+ * param[0]: shared-memory client-id
+ * param[1]: shared-memory buffer-id
+ *
+ * returns:
+ * SM_ERR_INTERNAL_FAILURE on failure.
+ * 0 on success.
+ */
+#define SMC_SC_SCHED_SHARE_UNREGISTER \
+	SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 5)
+
+/*
+ * Return from secure os to non-secure os with return value in r1
+ */
+#define SMC_SC_NS_RETURN	SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 0)
+
+#define SMC_FC_RESERVED		SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 0)
+#define SMC_FC_FIQ_EXIT		SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 1)
+#define SMC_FC_REQUEST_FIQ	SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 2)
+
+#define TRUSTY_IRQ_TYPE_NORMAL		(0)
+#define TRUSTY_IRQ_TYPE_PER_CPU		(1)
+#define TRUSTY_IRQ_TYPE_DOORBELL	(2)
+#define SMC_FC_GET_NEXT_IRQ	SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 3)
+
+#define SMC_FC_CPU_SUSPEND	SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 7)
+#define SMC_FC_CPU_RESUME	SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 8)
+
+#define SMC_FC_AARCH_SWITCH	SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 9)
+#define SMC_FC_GET_VERSION_STR	SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 10)
+
+/**
+ * SMC_FC_API_VERSION - Find and select supported API version.
+ *
+ * @r1: Version supported by client.
+ *
+ * Returns version supported by trusty.
+ *
+ * If multiple versions are supported, the client should start by calling
+ * SMC_FC_API_VERSION with the largest version it supports. Trusty will then
+ * return a version it supports. If the client does not support the version
+ * returned by trusty and the version returned is less than the version
+ * requested, repeat the call with the largest supported version less than the
+ * last returned version.
+ *
+ * This call must be made before any calls that are affected by the api version.
+ */
+#define TRUSTY_API_VERSION_RESTART_FIQ	(1)
+#define TRUSTY_API_VERSION_SMP		(2)
+#define TRUSTY_API_VERSION_SMP_NOP	(3)
+#define TRUSTY_API_VERSION_PHYS_MEM_OBJ	(4)
+#define TRUSTY_API_VERSION_MEM_OBJ	(5)
+#define TRUSTY_API_VERSION_CURRENT	(5)
+#define SMC_FC_API_VERSION	SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 11)
+
+/* TRUSTED_OS entity calls */
+#define SMC_SC_VIRTIO_GET_DESCR	SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 20)
+#define SMC_SC_VIRTIO_START	SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 21)
+#define SMC_SC_VIRTIO_STOP	SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 22)
+
+#define SMC_SC_VDEV_RESET	SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 23)
+#define SMC_SC_VDEV_KICK_VQ	SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 24)
+#define SMC_NC_VDEV_KICK_VQ	SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 25)
+
+#endif /* __LINUX_TRUSTY_SMCALL_H */
diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h
new file mode 100644
index 0000000..41680f9
--- /dev/null
+++ b/include/linux/trusty/trusty.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Google, Inc.
+ */
+#ifndef __LINUX_TRUSTY_TRUSTY_H
+#define __LINUX_TRUSTY_TRUSTY_H
+
+#include <linux/kernel.h>
+#include <linux/trusty/sm_err.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/pagemap.h>
+
+
+/*
+ * map Trusty priorities to Linux nice values (see trusty-sched-share.h)
+ */
+#define LINUX_NICE_FOR_TRUSTY_PRIORITY_LOW  10
+#define LINUX_NICE_FOR_TRUSTY_PRIORITY_NORMAL  0
+#define LINUX_NICE_FOR_TRUSTY_PRIORITY_HIGH  MIN_NICE
+
+#if IS_ENABLED(CONFIG_TRUSTY)
+s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2);
+s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2);
+#ifdef CONFIG_64BIT
+s64 trusty_fast_call64(struct device *dev, u64 smcnr, u64 a0, u64 a1, u64 a2);
+#endif
+#else
+static inline s32 trusty_std_call32(struct device *dev, u32 smcnr,
+				    u32 a0, u32 a1, u32 a2)
+{
+	return SM_ERR_UNDEFINED_SMC;
+}
+static inline s32 trusty_fast_call32(struct device *dev, u32 smcnr,
+				     u32 a0, u32 a1, u32 a2)
+{
+	return SM_ERR_UNDEFINED_SMC;
+}
+#ifdef CONFIG_64BIT
+static inline s64 trusty_fast_call64(struct device *dev,
+				     u64 smcnr, u64 a0, u64 a1, u64 a2)
+{
+	return SM_ERR_UNDEFINED_SMC;
+}
+#endif
+#endif
+
+struct notifier_block;
+enum {
+	TRUSTY_CALL_PREPARE,
+	TRUSTY_CALL_RETURNED,
+};
+int trusty_call_notifier_register(struct device *dev,
+				  struct notifier_block *n);
+int trusty_call_notifier_unregister(struct device *dev,
+				    struct notifier_block *n);
+const char *trusty_version_str_get(struct device *dev);
+u32 trusty_get_api_version(struct device *dev);
+bool trusty_get_panic_status(struct device *dev);
+
+struct ns_mem_page_info {
+	u64 paddr;
+	u8 ffa_mem_attr;
+	u8 ffa_mem_perm;
+	u64 compat_attr;
+};
+
+int trusty_encode_page_info(struct ns_mem_page_info *inf,
+			    struct page *page, pgprot_t pgprot);
+
+struct scatterlist;
+typedef u64 trusty_shared_mem_id_t;
+int trusty_share_memory(struct device *dev, trusty_shared_mem_id_t *id,
+			struct scatterlist *sglist, unsigned int nents,
+			pgprot_t pgprot);
+int trusty_share_memory_compat(struct device *dev, trusty_shared_mem_id_t *id,
+			       struct scatterlist *sglist, unsigned int nents,
+			       pgprot_t pgprot);
+int trusty_transfer_memory(struct device *dev, u64 *id,
+			   struct scatterlist *sglist, unsigned int nents,
+			   pgprot_t pgprot, u64 tag, bool lend);
+int trusty_reclaim_memory(struct device *dev, trusty_shared_mem_id_t id,
+			  struct scatterlist *sglist, unsigned int nents);
+
+struct dma_buf;
+void trusty_register_func_for_dma_buf(
+	u64 (*get_ffa_tag)(struct dma_buf *dma_buf),
+	int (*get_shared_mem_id)(struct dma_buf *dma_buf,
+		trusty_shared_mem_id_t *id));
+
+struct trusty_nop {
+	struct list_head node;
+	u32 args[3];
+};
+
+static inline void trusty_nop_init(struct trusty_nop *nop,
+				   u32 arg0, u32 arg1, u32 arg2) {
+	INIT_LIST_HEAD(&nop->node);
+	nop->args[0] = arg0;
+	nop->args[1] = arg1;
+	nop->args[2] = arg2;
+}
+
+void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop);
+void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop);
+
+#endif
diff --git a/include/linux/trusty/trusty_ipc.h b/include/linux/trusty/trusty_ipc.h
new file mode 100644
index 0000000..3b17ddb
--- /dev/null
+++ b/include/linux/trusty/trusty_ipc.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Google, Inc.
+ */
+#ifndef __LINUX_TRUSTY_TRUSTY_IPC_H
+#define __LINUX_TRUSTY_TRUSTY_IPC_H
+
+#include <linux/list.h>
+#include <linux/scatterlist.h>
+#include <linux/trusty/trusty.h>
+#include <linux/types.h>
+#include <uapi/linux/trusty/ipc.h>
+
+struct tipc_chan;
+
+struct tipc_msg_buf {
+	void *buf_va;
+	struct scatterlist sg;
+	trusty_shared_mem_id_t buf_id;
+	size_t buf_sz;
+	size_t wpos;
+	size_t rpos;
+	size_t shm_cnt;
+	struct list_head node;
+};
+
+enum tipc_chan_event {
+	TIPC_CHANNEL_CONNECTED = 1,
+	TIPC_CHANNEL_DISCONNECTED,
+	TIPC_CHANNEL_SHUTDOWN,
+};
+
+struct tipc_chan_ops {
+	void (*handle_event)(void *cb_arg, int event);
+	struct tipc_msg_buf *(*handle_msg)(void *cb_arg,
+					   struct tipc_msg_buf *mb);
+	void (*handle_release)(void *cb_arg);
+};
+
+struct tipc_chan *tipc_create_channel(struct device *dev,
+				      const struct tipc_chan_ops *ops,
+				      void *cb_arg);
+
+int tipc_chan_connect(struct tipc_chan *chan, const char *port);
+
+int tipc_chan_queue_msg(struct tipc_chan *chan, struct tipc_msg_buf *mb);
+
+int tipc_chan_shutdown(struct tipc_chan *chan);
+
+void tipc_chan_destroy(struct tipc_chan *chan);
+
+struct tipc_msg_buf *tipc_chan_get_rxbuf(struct tipc_chan *chan);
+
+void tipc_chan_put_rxbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb);
+
+struct tipc_msg_buf *
+tipc_chan_get_txbuf_timeout(struct tipc_chan *chan, long timeout);
+
+void tipc_chan_put_txbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb);
+
+static inline size_t mb_avail_space(struct tipc_msg_buf *mb)
+{
+	return mb->buf_sz - mb->wpos;
+}
+
+static inline size_t mb_avail_data(struct tipc_msg_buf *mb)
+{
+	return mb->wpos - mb->rpos;
+}
+
+static inline void *mb_put_data(struct tipc_msg_buf *mb, size_t len)
+{
+	void *pos = (u8 *)mb->buf_va + mb->wpos;
+
+	BUG_ON(mb->wpos + len > mb->buf_sz);
+	mb->wpos += len;
+	return pos;
+}
+
+static inline void *mb_get_data(struct tipc_msg_buf *mb, size_t len)
+{
+	void *pos = (u8 *)mb->buf_va + mb->rpos;
+
+	BUG_ON(mb->rpos + len > mb->wpos);
+	mb->rpos += len;
+	return pos;
+}
+
+#endif /* __LINUX_TRUSTY_TRUSTY_IPC_H */
+
diff --git a/include/uapi/linux/trusty/ipc.h b/include/uapi/linux/trusty/ipc.h
new file mode 100644
index 0000000..af91035
--- /dev/null
+++ b/include/uapi/linux/trusty/ipc.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef _UAPI_LINUX_TRUSTY_IPC_H_
+#define _UAPI_LINUX_TRUSTY_IPC_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/uio.h>
+
+/**
+ * enum transfer_kind - How to send an fd to Trusty
+ * @TRUSTY_SHARE:       Memory will be accessible by Linux and Trusty. On ARM it
+ *                      will be mapped as nonsecure. Suitable for shared memory.
+ *                      The paired fd must be a "dma_buf".
+ * @TRUSTY_LEND:        Memory will be accessible only to Trusty. On ARM it will
+ *                      be transitioned to "Secure" memory if Trusty is in
+ *                      TrustZone. This transfer kind is suitable for donating
+ *                      video buffers or other similar resources. The paired fd
+ *                      may need to come from a platform-specific allocator for
+ *                      memory that may be transitioned to "Secure".
+ * @TRUSTY_SEND_SECURE: Send memory that is already "Secure". Memory will be
+ *                      accessible only to Trusty. The paired fd may need to
+ *                      come from a platform-specific allocator that returns
+ *                      "Secure" buffers.
+ *
+ * Describes how the user would like the resource in question to be sent to
+ * Trusty. Options may be valid only for certain kinds of fds.
+ */
+enum transfer_kind {
+	TRUSTY_SHARE = 0,
+	TRUSTY_LEND = 1,
+	TRUSTY_SEND_SECURE = 2,
+};
+
+/**
+ * struct trusty_shm - Describes a transfer of memory to Trusty
+ * @fd:       The fd to transfer
+ * @transfer: How to transfer it - see &enum transfer_kind
+ */
+struct trusty_shm {
+	__s32 fd;
+	__u32 transfer;
+};
+
+/**
+ * struct tipc_send_msg_req - Request struct for @TIPC_IOC_SEND_MSG
+ * @iov:     Pointer to an array of &struct iovec describing data to be sent
+ * @shm:     Pointer to an array of &struct trusty_shm describing any file
+ *           descriptors to be transferred.
+ * @iov_cnt: Number of elements in the @iov array
+ * @shm_cnt: Number of elements in the @shm array
+ */
+struct tipc_send_msg_req {
+	__u64 iov;
+	__u64 shm;
+	__u64 iov_cnt;
+	__u64 shm_cnt;
+};
+
+#define TIPC_IOC_MAGIC			'r'
+#define TIPC_IOC_CONNECT		_IOW(TIPC_IOC_MAGIC, 0x80, char *)
+#define TIPC_IOC_SEND_MSG		_IOW(TIPC_IOC_MAGIC, 0x81, \
+					     struct tipc_send_msg_req)
+
+#endif