Merge android12-gs-pixel-5.10-sc-qpr3 into android13-gs-pixel-5.10

SBMerger: 410055097
Change-Id: I8c3dfd80b76c8b2a8fa813a7c91f9f6e4585a0cf
Signed-off-by: SecurityBot <android-nexus-securitybot@system.gserviceaccount.com>
diff --git a/BUILD.bazel b/BUILD.bazel
new file mode 100644
index 0000000..3a6c04c
--- /dev/null
+++ b/BUILD.bazel
@@ -0,0 +1,20 @@
+# NOTE: THIS FILE IS EXPERIMENTAL FOR THE BAZEL MIGRATION AND NOT USED FOR
+# YOUR BUILDS CURRENTLY.
+#
+# It is not yet the source of truth for your build. If you're looking to modify
+# the build file, modify the Android.bp file instead. Do *not* modify this file
+# unless you have coordinated with the team managing the Soong to Bazel
+# migration.
+
+load("//build/kleaf:kernel.bzl", "kernel_module")
+
+kernel_module(
+    name = "lwis.cloudripper",
+    outs = [
+        "lwis.ko",
+    ],
+    kernel_build = "//private/gs-google:cloudripper",
+    visibility = [
+        "//private/gs-google:__pkg__",
+    ],
+)
diff --git a/Kbuild b/Kbuild
index 448e2df..876de74 100644
--- a/Kbuild
+++ b/Kbuild
@@ -19,11 +19,19 @@
 lwis-objs += lwis_buffer.o
 lwis-objs += lwis_util.o
 lwis-objs += lwis_debug.o
+lwis-objs += lwis_io_entry.o
+lwis-objs += lwis_allocator.o
 
-# GS101 specific files
+# Anchorage specific files
 ifeq ($(CONFIG_SOC_GS101), y)
-lwis-objs += platform/gs101/lwis_platform_gs101.o
-lwis-objs += platform/gs101/lwis_platform_gs101_dma.o
+lwis-objs += platform/anchorage/lwis_platform_anchorage.o
+lwis-objs += platform/anchorage/lwis_platform_anchorage_dma.o
+endif
+
+# Busan specific files
+ifeq ($(CONFIG_SOC_GS201), y)
+lwis-objs += platform/busan/lwis_platform_busan.o
+lwis-objs += platform/busan/lwis_platform_busan_dma.o
 endif
 
 # Device tree specific file
diff --git a/lwis_allocator.c b/lwis_allocator.c
new file mode 100644
index 0000000..4a624de
--- /dev/null
+++ b/lwis_allocator.c
@@ -0,0 +1,423 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Google LWIS Recycling Memory Allocator
+ *
+ * Copyright (c) 2021 Google, LLC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME "-allocator: " fmt
+
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/preempt.h>
+#include <linux/slab.h>
+#include "lwis_allocator.h"
+#include "lwis_commands.h"
+
+static void allocator_block_pool_free_locked(struct lwis_device *lwis_dev,
+					     struct lwis_allocator_block_pool *block_pool)
+{
+	struct lwis_allocator_block_mgr *block_mgr = lwis_dev->block_mgr;
+
+	if (block_pool == NULL) {
+		dev_err(lwis_dev->dev, "block_pool is NULL\n");
+		return;
+	}
+	if (block_pool->in_use_count != 0 || block_pool->in_use != NULL) {
+		dev_err(lwis_dev->dev, "block_pool %s still has %d block(s) in use\n",
+			block_pool->name, block_pool->in_use_count);
+	}
+
+	while (block_pool->free != NULL) {
+		struct lwis_allocator_block *curr;
+		struct lwis_allocator_block *block;
+		struct hlist_node *n;
+		int i;
+
+		curr = block_pool->free;
+		hash_for_each_safe (block_mgr->allocated_blocks, i, n, block, node) {
+			if (block->ptr == curr->ptr) {
+				hash_del(&block->node);
+			}
+		}
+		block_pool->free = curr->next;
+		block_pool->free_count--;
+		kvfree(curr->ptr);
+		kfree(curr);
+	}
+}
+
+static struct lwis_allocator_block *
+allocator_free_block_get_locked(struct lwis_allocator_block_pool *block_pool)
+{
+	struct lwis_allocator_block *head;
+
+	if (block_pool == NULL) {
+		pr_err("block_pool is NULL\n");
+		return NULL;
+	}
+	if (block_pool->free == NULL) {
+		return NULL;
+	}
+
+	head = block_pool->free;
+	block_pool->free = head->next;
+	if (block_pool->free != NULL) {
+		block_pool->free->prev = NULL;
+	}
+	block_pool->free_count--;
+
+	head->next = block_pool->in_use;
+	if (head->next != NULL) {
+		head->next->prev = head;
+	}
+	block_pool->in_use = head;
+	block_pool->in_use_count++;
+
+	return head;
+}
+
+static void allocator_free_block_put_locked(struct lwis_allocator_block_pool *block_pool,
+					    struct lwis_allocator_block *block)
+{
+	if (block_pool == NULL) {
+		pr_err("block_pool is NULL\n");
+		return;
+	}
+	if (block == NULL) {
+		pr_err("block is NULL\n");
+		return;
+	}
+
+	if (block->next != NULL) {
+		block->next->prev = block->prev;
+	}
+	if (block->prev != NULL) {
+		block->prev->next = block->next;
+	} else {
+		block_pool->in_use = block->next;
+	}
+	block_pool->in_use_count--;
+
+	if (block_pool->free != NULL) {
+		block_pool->free->prev = block;
+	}
+	block->next = block_pool->free;
+	block->prev = NULL;
+	block_pool->free = block;
+	block_pool->free_count++;
+}
+
+static struct lwis_allocator_block_pool *
+allocator_get_block_pool(struct lwis_allocator_block_mgr *block_mgr, int idx)
+{
+	struct lwis_allocator_block_pool *block_pool;
+
+	switch (idx) {
+	case 13:
+		block_pool = &block_mgr->pool_8k;
+		break;
+	case 14:
+		block_pool = &block_mgr->pool_16k;
+		break;
+	case 15:
+		block_pool = &block_mgr->pool_32k;
+		break;
+	case 16:
+		block_pool = &block_mgr->pool_64k;
+		break;
+	case 17:
+		block_pool = &block_mgr->pool_128k;
+		break;
+	case 18:
+		block_pool = &block_mgr->pool_256k;
+		break;
+	case 19:
+		block_pool = &block_mgr->pool_512k;
+		break;
+	default:
+		pr_err("size is not supportted\n");
+		return NULL;
+	}
+
+	return block_pool;
+}
+
+int lwis_allocator_init(struct lwis_device *lwis_dev)
+{
+	struct lwis_allocator_block_mgr *block_mgr;
+	unsigned long flags;
+
+	if (lwis_dev == NULL) {
+		dev_err(lwis_dev->dev, "lwis_dev is NULL\n");
+		return -EINVAL;
+	}
+
+	if (lwis_dev->block_mgr != NULL) {
+		block_mgr = lwis_dev->block_mgr;
+		spin_lock_irqsave(&block_mgr->lock, flags);
+		block_mgr->ref_count++;
+		spin_unlock_irqrestore(&block_mgr->lock, flags);
+		return 0;
+	}
+
+	block_mgr = kzalloc(sizeof(struct lwis_allocator_block_mgr), GFP_KERNEL);
+	if (block_mgr == NULL) {
+		dev_err(lwis_dev->dev, "Allocate block_mgr failed\n");
+		return -ENOMEM;
+	}
+
+	/* Initialize mutex */
+	spin_lock_init(&block_mgr->lock);
+
+	/* Empty hash table for allocated blocks */
+	hash_init(block_mgr->allocated_blocks);
+
+	/* Initialize block pools */
+	strlcpy(block_mgr->pool_8k.name, "lwis-block-8k", LWIS_MAX_NAME_STRING_LEN);
+	strlcpy(block_mgr->pool_16k.name, "lwis-block-16k", LWIS_MAX_NAME_STRING_LEN);
+	strlcpy(block_mgr->pool_32k.name, "lwis-block-32k", LWIS_MAX_NAME_STRING_LEN);
+	strlcpy(block_mgr->pool_64k.name, "lwis-block-64k", LWIS_MAX_NAME_STRING_LEN);
+	strlcpy(block_mgr->pool_128k.name, "lwis-block-128k", LWIS_MAX_NAME_STRING_LEN);
+	strlcpy(block_mgr->pool_256k.name, "lwis-block-256k", LWIS_MAX_NAME_STRING_LEN);
+	strlcpy(block_mgr->pool_512k.name, "lwis-block-512k", LWIS_MAX_NAME_STRING_LEN);
+	strlcpy(block_mgr->pool_large.name, "lwis-block-large", LWIS_MAX_NAME_STRING_LEN);
+
+	/* Initialize reference count */
+	block_mgr->ref_count = 1;
+
+	lwis_dev->block_mgr = block_mgr;
+	return 0;
+}
+
+void lwis_allocator_release(struct lwis_device *lwis_dev)
+{
+	struct lwis_allocator_block_mgr *block_mgr;
+	unsigned long flags;
+
+	if (lwis_dev == NULL) {
+		dev_err(lwis_dev->dev, "lwis_dev is NULL\n");
+		return;
+	}
+
+	block_mgr = lwis_dev->block_mgr;
+	if (block_mgr == NULL) {
+		dev_err(lwis_dev->dev, "block_mgr is NULL\n");
+		return;
+	}
+
+	spin_lock_irqsave(&block_mgr->lock, flags);
+	block_mgr->ref_count--;
+	if (block_mgr->ref_count > 0) {
+		spin_unlock_irqrestore(&block_mgr->lock, flags);
+		return;
+	}
+
+	allocator_block_pool_free_locked(lwis_dev, &block_mgr->pool_8k);
+	allocator_block_pool_free_locked(lwis_dev, &block_mgr->pool_16k);
+	allocator_block_pool_free_locked(lwis_dev, &block_mgr->pool_32k);
+	allocator_block_pool_free_locked(lwis_dev, &block_mgr->pool_64k);
+	allocator_block_pool_free_locked(lwis_dev, &block_mgr->pool_128k);
+	allocator_block_pool_free_locked(lwis_dev, &block_mgr->pool_256k);
+	allocator_block_pool_free_locked(lwis_dev, &block_mgr->pool_512k);
+	spin_unlock_irqrestore(&block_mgr->lock, flags);
+
+	kfree(block_mgr);
+	lwis_dev->block_mgr = NULL;
+}
+
+void *lwis_allocator_allocate(struct lwis_device *lwis_dev, size_t size)
+{
+	struct lwis_allocator_block_mgr *block_mgr;
+	struct lwis_allocator_block_pool *block_pool;
+	struct lwis_allocator_block *block;
+	uint32_t idx;
+	size_t block_size;
+	unsigned long flags;
+
+	if (lwis_dev == NULL) {
+		dev_err(lwis_dev->dev, "lwis_dev is NULL\n");
+		return NULL;
+	}
+	block_mgr = lwis_dev->block_mgr;
+	if (block_mgr == NULL) {
+		dev_err(lwis_dev->dev, "block_mgr is NULL\n");
+		return NULL;
+	}
+
+	/*
+	 * Linux already has slab allocator to cache the allocated memory within a page.
+	 * The default page size is 4K. We can leverage linux's slab implementation for
+	 * small size memory recycling.
+	 */
+	if (size <= 4 * 1024) {
+		return kmalloc(size, GFP_KERNEL);
+	}
+
+	/*
+	   fls() has better performance profile, it's currently used to mimic the
+	   behavior of kmalloc_index().
+
+	   kmalloc_index() return value as following:
+	     if (size <=          8) return 3;
+	     if (size <=         16) return 4;
+	     if (size <=         32) return 5;
+	     if (size <=         64) return 6;
+	     if (size <=        128) return 7;
+	     if (size <=        256) return 8;
+	     if (size <=        512) return 9;
+	     if (size <=       1024) return 10;
+	     if (size <=   2 * 1024) return 11;
+	     if (size <=   4 * 1024) return 12;
+	     if (size <=   8 * 1024) return 13;
+	     if (size <=  16 * 1024) return 14;
+	     if (size <=  32 * 1024) return 15;
+	     if (size <=  64 * 1024) return 16;
+	     if (size <= 128 * 1024) return 17;
+	     if (size <= 256 * 1024) return 18;
+	     if (size <= 512 * 1024) return 19;
+	     if (size <= 1024 * 1024) return 20;
+	     if (size <=  2 * 1024 * 1024) return 21;
+	     if (size <=  4 * 1024 * 1024) return 22;
+	     if (size <=  8 * 1024 * 1024) return 23;
+	     if (size <=  16 * 1024 * 1024) return 24;
+	     if (size <=  32 * 1024 * 1024) return 25;
+	*/
+	idx = fls(size - 1);
+
+	/*
+	 * For the large size memory allocation, we usually use kvmalloc() to allocate
+	 * the memory, but kvmalloc() does not take advantage of slab. For this case,
+	 * we define several memory pools and recycle to use these memory blocks. For the
+	 * size large than 512K, we do not have such use case yet. In current
+	 * implementation, I do not cache it due to prevent keeping too much unused
+	 * memory on hand.
+	 */
+	if (idx > 19) {
+		block = kmalloc(sizeof(struct lwis_allocator_block), GFP_KERNEL);
+		if (block == NULL) {
+			dev_err(lwis_dev->dev, "Allocate failed\n");
+			return NULL;
+		}
+		block->type = idx;
+		block->next = NULL;
+		block->prev = NULL;
+		block->ptr = kvmalloc(size, GFP_KERNEL);
+		if (block->ptr == NULL) {
+			dev_err(lwis_dev->dev, "Allocate failed\n");
+			kfree(block);
+			return NULL;
+		}
+		spin_lock_irqsave(&block_mgr->lock, flags);
+		block_mgr->pool_large.in_use_count++;
+		hash_add(block_mgr->allocated_blocks, &block->node, (unsigned long long)block->ptr);
+		spin_unlock_irqrestore(&block_mgr->lock, flags);
+		return block->ptr;
+	}
+
+	block_pool = allocator_get_block_pool(block_mgr, idx);
+	if (block_pool == NULL) {
+		return NULL;
+	}
+
+	/* Try to get free block from recycling block pool */
+	spin_lock_irqsave(&block_mgr->lock, flags);
+	block = allocator_free_block_get_locked(block_pool);
+	spin_unlock_irqrestore(&block_mgr->lock, flags);
+	if (block != NULL) {
+		return block->ptr;
+	}
+
+	/* Allocate new block */
+	block = kmalloc(sizeof(struct lwis_allocator_block), GFP_KERNEL);
+	if (block == NULL) {
+		dev_err(lwis_dev->dev, "Allocate failed\n");
+		return NULL;
+	}
+	block->type = idx;
+	block->next = NULL;
+	block->prev = NULL;
+	block_size = 1 << idx;
+	block->ptr = kvmalloc(block_size, GFP_KERNEL);
+	if (block->ptr == NULL) {
+		dev_err(lwis_dev->dev, "Allocate failed\n");
+		kfree(block);
+		return NULL;
+	}
+
+	spin_lock_irqsave(&block_mgr->lock, flags);
+	block->next = block_pool->in_use;
+	if (block->next != NULL) {
+		block->next->prev = block;
+	}
+	block_pool->in_use = block;
+	block_pool->in_use_count++;
+	hash_add(block_mgr->allocated_blocks, &block->node, (unsigned long long)block->ptr);
+	spin_unlock_irqrestore(&block_mgr->lock, flags);
+
+	return block->ptr;
+}
+
+void lwis_allocator_free(struct lwis_device *lwis_dev, void *ptr)
+{
+	struct lwis_allocator_block_mgr *block_mgr;
+	struct lwis_allocator_block_pool *block_pool;
+	struct lwis_allocator_block *block = NULL;
+	struct lwis_allocator_block *blk;
+	unsigned long flags;
+
+	if (lwis_dev == NULL || ptr == NULL) {
+		dev_err(lwis_dev->dev, "input is NULL\n");
+		return;
+	}
+	block_mgr = lwis_dev->block_mgr;
+	if (block_mgr == NULL) {
+		dev_err(lwis_dev->dev, "block_mgr is NULL\n");
+		return;
+	}
+	hash_for_each_possible (block_mgr->allocated_blocks, blk, node, (unsigned long long)ptr) {
+		if (blk->ptr == ptr) {
+			block = blk;
+			break;
+		}
+	}
+
+	if (block == NULL) {
+		kfree(ptr);
+		return;
+	}
+
+	if (block->type > 19) {
+		struct lwis_allocator_block *b;
+		struct hlist_node *n;
+		spin_lock_irqsave(&block_mgr->lock, flags);
+		hash_for_each_possible_safe (block_mgr->allocated_blocks, b, n, node,
+					     (unsigned long long)ptr) {
+			if (b->ptr == block->ptr) {
+				hash_del(&b->node);
+				break;
+			}
+		}
+		kvfree(block->ptr);
+		kfree(block);
+		block_mgr->pool_large.in_use_count--;
+		spin_unlock_irqrestore(&block_mgr->lock, flags);
+		return;
+	}
+
+	block_pool = allocator_get_block_pool(block_mgr, block->type);
+	if (block_pool == NULL) {
+		dev_err(lwis_dev->dev, "block type is invalid\n");
+		return;
+	}
+
+	spin_lock_irqsave(&block_mgr->lock, flags);
+	allocator_free_block_put_locked(block_pool, block);
+	spin_unlock_irqrestore(&block_mgr->lock, flags);
+
+	return;
+}
diff --git a/lwis_allocator.h b/lwis_allocator.h
new file mode 100644
index 0000000..c85f88b
--- /dev/null
+++ b/lwis_allocator.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * LWIS Recycling Memory Allocator
+ *
+ * Copyright (c) 2021 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef LWIS_ALLOCATOR_H_
+#define LWIS_ALLOCATOR_H_
+
+#include <linux/mutex.h>
+#include "lwis_commands.h"
+#include "lwis_device.h"
+
+struct lwis_allocator_block {
+	int type;
+	void *ptr;
+	struct lwis_allocator_block *next;
+	struct lwis_allocator_block *prev;
+	struct hlist_node node;
+};
+
+struct lwis_allocator_block_pool {
+	char name[LWIS_MAX_NAME_STRING_LEN];
+	struct lwis_allocator_block *free;
+	uint32_t free_count;
+	struct lwis_allocator_block *in_use;
+	uint32_t in_use_count;
+};
+
+struct lwis_allocator_block_mgr {
+	spinlock_t lock;
+	struct lwis_allocator_block_pool pool_8k;
+	struct lwis_allocator_block_pool pool_16k;
+	struct lwis_allocator_block_pool pool_32k;
+	struct lwis_allocator_block_pool pool_64k;
+	struct lwis_allocator_block_pool pool_128k;
+	struct lwis_allocator_block_pool pool_256k;
+	struct lwis_allocator_block_pool pool_512k;
+	struct lwis_allocator_block_pool pool_large;
+	/* Hash table of allocated buffers keyed by allocated addresses */
+	DECLARE_HASHTABLE(allocated_blocks, BUFFER_HASH_BITS);
+	int ref_count;
+};
+
+/*
+ *  lwis_allocator_init: Initialize the recycling memory allocator
+ */
+int lwis_allocator_init(struct lwis_device *lwis_dev);
+
+/*
+ *  lwis_allocator_release: Release the recycling memory allocator
+ *  and its resources
+ */
+void lwis_allocator_release(struct lwis_device *lwis_dev);
+
+/*
+ *  lwis_allocator_allocate: Allocate a block from the recycling memory allocator
+ */
+void *lwis_allocator_allocate(struct lwis_device *lwis_dev, size_t size);
+
+/*
+ *  lwis_allocator_free: Free a block to the recycling memory allocator
+ */
+void lwis_allocator_free(struct lwis_device *lwis_dev, void *ptr);
+
+#endif /* LWIS_ALLOCATOR_H_ */
diff --git a/lwis_commands.h b/lwis_commands.h
index 8f4c6e7..940c00b 100644
--- a/lwis_commands.h
+++ b/lwis_commands.h
@@ -25,6 +25,9 @@
 extern "C" {
 #endif /* __cplusplus */
 
+#pragma pack(push)
+#pragma pack(4)
+
 /*
  *  IOCTL Types and Data Structures.
  */
@@ -71,7 +74,7 @@
 };
 
 struct lwis_device_info {
-	int id;
+	int32_t id;
 	int32_t type;
 	char name[LWIS_MAX_NAME_STRING_LEN];
 	struct lwis_clk_setting clks[LWIS_MAX_CLOCK_NUM];
@@ -97,13 +100,13 @@
 	size_t size;
 	uint32_t flags; // lwis_dma_alloc_flags
 	// IOCTL output for BUFFER_ALLOC
-	int dma_fd;
-	int partition_id;
+	int32_t dma_fd;
+	int32_t partition_id;
 };
 
 struct lwis_buffer_info {
 	// IOCTL input for BUFFER_ENROLL
-	int fd;
+	int32_t fd;
 	bool dma_read;
 	bool dma_write;
 	// IOCTL output for BUFFER_ENROLL
@@ -111,7 +114,7 @@
 };
 
 struct lwis_enrolled_buffer_info {
-	int fd;
+	int32_t fd;
 	uint64_t dma_vaddr;
 };
 
@@ -127,21 +130,22 @@
 
 // For io_entry read and write types.
 struct lwis_io_entry_rw {
-	int bid;
+	int32_t bid;
 	uint64_t offset;
 	uint64_t val;
 };
 
 struct lwis_io_entry_rw_batch {
-	int bid;
+	int32_t bid;
 	uint64_t offset;
 	size_t size_in_bytes;
 	uint8_t *buf;
+	bool is_offset_fixed;
 };
 
 // For io_entry modify types.
 struct lwis_io_entry_modify {
-	int bid;
+	int32_t bid;
 	uint64_t offset;
 	uint64_t val;
 	uint64_t val_mask;
@@ -149,7 +153,7 @@
 
 // For io_entry read assert type.
 struct lwis_io_entry_read_assert {
-	int bid;
+	int32_t bid;
 	uint64_t offset;
 	uint64_t val;
 	uint64_t mask;
@@ -157,7 +161,7 @@
 };
 
 struct lwis_io_entry {
-	int type;
+	int32_t type;
 	union {
 		struct lwis_io_entry_rw rw;
 		struct lwis_io_entry_rw_batch rw_batch;
@@ -208,6 +212,8 @@
 // Event flags used for transaction events.
 #define LWIS_TRANSACTION_EVENT_FLAG (1ULL << 63)
 #define LWIS_TRANSACTION_FAILURE_EVENT_FLAG (1ULL << 62)
+#define LWIS_HW_IRQ_EVENT_FLAG (1ULL << 61)
+#define LWIS_PERIODIC_IO_EVENT_FLAG (1ULL << 60)
 
 struct lwis_event_info {
 	// IOCTL Inputs
@@ -222,6 +228,7 @@
 
 #define LWIS_EVENT_CONTROL_FLAG_IRQ_ENABLE (1ULL << 0)
 #define LWIS_EVENT_CONTROL_FLAG_QUEUE_ENABLE (1ULL << 1)
+#define LWIS_EVENT_CONTROL_FLAG_IRQ_ENABLE_ONCE (1ULL << 2)
 
 struct lwis_event_control {
 	// IOCTL Inputs
@@ -261,14 +268,14 @@
 // Actual size of this struct depends on num_entries
 struct lwis_transaction_response_header {
 	int64_t id;
-	int error_code;
-	int completion_index;
+	int32_t error_code;
+	int32_t completion_index;
 	size_t num_entries;
 	size_t results_size_bytes;
 };
 
 struct lwis_io_result {
-	int bid;
+	int32_t bid;
 	uint64_t offset;
 	size_t num_value_bytes;
 	uint8_t values[];
@@ -276,7 +283,7 @@
 
 struct lwis_periodic_io_info {
 	// Input
-	int batch_size;
+	int32_t batch_size;
 	int64_t period_ns;
 	size_t num_io_entries;
 	struct lwis_io_entry *io_entries;
@@ -290,8 +297,8 @@
 // Actual size of this struct depends on batch_size and num_entries_per_period
 struct lwis_periodic_io_response_header {
 	int64_t id;
-	int error_code;
-	int batch_size;
+	int32_t error_code;
+	int32_t batch_size;
 	size_t num_entries_per_period;
 	size_t results_size_bytes;
 };
@@ -342,7 +349,7 @@
 #define LWIS_DEVICE_ENABLE _IO(LWIS_IOC_TYPE, 6)
 #define LWIS_DEVICE_DISABLE _IO(LWIS_IOC_TYPE, 7)
 #define LWIS_BUFFER_ALLOC _IOWR(LWIS_IOC_TYPE, 8, struct lwis_alloc_buffer_info)
-#define LWIS_BUFFER_FREE _IOWR(LWIS_IOC_TYPE, 9, int)
+#define LWIS_BUFFER_FREE _IOWR(LWIS_IOC_TYPE, 9, int32_t)
 #define LWIS_TIME_QUERY _IOWR(LWIS_IOC_TYPE, 10, int64_t)
 #define LWIS_REG_IO _IOWR(LWIS_IOC_TYPE, 11, struct lwis_io_entries)
 #define LWIS_ECHO _IOWR(LWIS_IOC_TYPE, 12, struct lwis_echo)
@@ -373,6 +380,8 @@
 	uint64_t fault_flags;
 };
 
+#pragma pack(pop)
+
 #ifdef __cplusplus
 } /* extern "C" */
 #endif /* __cplusplus */
diff --git a/lwis_device.c b/lwis_device.c
index 3d28894..2d113ba 100644
--- a/lwis_device.c
+++ b/lwis_device.c
@@ -114,6 +114,9 @@
 	/* Empty hash table for client enrolled buffers */
 	hash_init(lwis_client->enrolled_buffers);
 
+	/* Initialize the allocator */
+	lwis_allocator_init(lwis_dev);
+
 	/* Start transaction processor task */
 	lwis_transaction_init(lwis_client);
 
@@ -145,8 +148,9 @@
 	/* Clear event states for this client */
 	lwis_client_event_states_clear(lwis_client);
 
-	/* Clear the event queue */
+	/* Clear the event queues */
 	lwis_client_event_queue_clear(lwis_client);
+	lwis_client_error_event_queue_clear(lwis_client);
 
 	/* Clean up all periodic io state for the client */
 	lwis_periodic_io_client_cleanup(lwis_client);
@@ -219,6 +223,10 @@
 	dev_info(lwis_dev->dev, "Closing instance %d\n", iminor(node));
 
 	rc = lwis_release_client(lwis_client);
+
+	/* Release the allocator and its cache */
+	lwis_allocator_release(lwis_dev);
+
 	mutex_lock(&lwis_dev->client_lock);
 	/* Release power if client closed without power down called */
 	if (is_client_enabled && lwis_dev->enabled > 0) {
@@ -258,7 +266,6 @@
  */
 static long lwis_ioctl(struct file *fp, unsigned int type, unsigned long param)
 {
-	int ret = 0;
 	struct lwis_client *lwis_client;
 	struct lwis_device *lwis_dev;
 
@@ -274,13 +281,7 @@
 		return -ENODEV;
 	}
 
-	ret = lwis_ioctl_handler(lwis_client, type, param);
-
-	if (ret && ret != -ENOENT && ret != -ETIMEDOUT && ret != -EAGAIN) {
-		lwis_ioctl_pr_err(lwis_dev, type, ret);
-	}
-
-	return ret;
+	return lwis_ioctl_handler(lwis_client, type, param);
 }
 /*
  *  lwis_poll: Event queue status function of LWIS
@@ -1053,10 +1054,7 @@
 	}
 }
 
-/*
- *  lwis_find_top_dev: Find LWIS top device.
- */
-struct lwis_device *lwis_find_top_dev()
+static struct lwis_device *find_top_dev()
 {
 	struct lwis_device *lwis_dev;
 
@@ -1176,7 +1174,7 @@
 		/* Assign top device to the devices probed before */
 		lwis_assign_top_to_other(lwis_dev);
 	} else {
-		lwis_dev->top_dev = lwis_find_top_dev();
+		lwis_dev->top_dev = find_top_dev();
 		if (lwis_dev->top_dev == NULL)
 			pr_warn("Top device not probed yet");
 	}
@@ -1270,6 +1268,16 @@
 				lwis_gpios_list_free(lwis_dev->gpios_list);
 				lwis_dev->gpios_list = NULL;
 			}
+			/* Release device gpio info irq list */
+			if (lwis_dev->irq_gpios_info.irq_list) {
+				lwis_interrupt_list_free(lwis_dev->irq_gpios_info.irq_list);
+				lwis_dev->irq_gpios_info.irq_list = NULL;
+			}
+			if (lwis_dev->irq_gpios_info.gpios) {
+				lwis_gpio_list_put(lwis_dev->irq_gpios_info.gpios,
+						   &lwis_dev->plat_dev->dev);
+				lwis_dev->irq_gpios_info.gpios = NULL;
+			}
 			/* Destroy device */
 			if (!IS_ERR(lwis_dev->dev)) {
 				device_destroy(core.dev_class,
@@ -1481,34 +1489,55 @@
 		}
 		pm_runtime_disable(&lwis_dev->plat_dev->dev);
 		/* Release device clock list */
-		if (lwis_dev->clocks)
+		if (lwis_dev->clocks) {
 			lwis_clock_list_free(lwis_dev->clocks);
+		}
 		/* Release device interrupt list */
-		if (lwis_dev->irqs)
+		if (lwis_dev->irqs) {
 			lwis_interrupt_list_free(lwis_dev->irqs);
+		}
 		/* Release device regulator list */
 		if (lwis_dev->regulators) {
 			lwis_regulator_put_all(lwis_dev->regulators);
 			lwis_regulator_list_free(lwis_dev->regulators);
 		}
 		/* Release device phy list */
-		if (lwis_dev->phys)
+		if (lwis_dev->phys) {
 			lwis_phy_list_free(lwis_dev->phys);
+		}
 		/* Release device power sequence list */
-		if (lwis_dev->power_up_sequence)
+		if (lwis_dev->power_up_sequence) {
 			lwis_dev_power_seq_list_free(lwis_dev->power_up_sequence);
-		if (lwis_dev->power_down_sequence)
+		}
+		if (lwis_dev->power_down_sequence) {
 			lwis_dev_power_seq_list_free(lwis_dev->power_down_sequence);
+		}
 		/* Release device gpio list */
-		if (lwis_dev->gpios_list)
+		if (lwis_dev->gpios_list) {
 			lwis_gpios_list_free(lwis_dev->gpios_list);
-		if (lwis_dev->reset_gpios)
+		}
+		/* Release device gpio info irq list */
+		if (lwis_dev->irq_gpios_info.irq_list) {
+			lwis_interrupt_list_free(lwis_dev->irq_gpios_info.irq_list);
+		}
+		if (lwis_dev->irq_gpios_info.gpios) {
+			lwis_gpio_list_put(lwis_dev->irq_gpios_info.gpios,
+					   &lwis_dev->plat_dev->dev);
+		}
+		if (lwis_dev->reset_gpios) {
 			lwis_gpio_list_put(lwis_dev->reset_gpios, &lwis_dev->plat_dev->dev);
-		if (lwis_dev->enable_gpios)
+		}
+		if (lwis_dev->enable_gpios) {
 			lwis_gpio_list_put(lwis_dev->enable_gpios, &lwis_dev->plat_dev->dev);
+		}
+		if (lwis_dev->shared_enable_gpios) {
+			lwis_gpio_list_put(lwis_dev->shared_enable_gpios, &lwis_dev->plat_dev->dev);
+		}
 		/* Release event subscription components */
-		if (lwis_dev->type == DEVICE_TYPE_TOP)
+		if (lwis_dev->type == DEVICE_TYPE_TOP) {
 			lwis_dev->top_dev->subscribe_ops.release(lwis_dev);
+		}
+
 		/* Destroy device */
 		device_destroy(core.dev_class, MKDEV(core.device_major, lwis_dev->id));
 		list_del(&lwis_dev->dev_list);
diff --git a/lwis_device.h b/lwis_device.h
index e97f257..18b9f0a 100644
--- a/lwis_device.h
+++ b/lwis_device.h
@@ -17,6 +17,7 @@
 #include <linux/hashtable.h>
 #include <linux/idr.h>
 #include <linux/kernel.h>
+#include <linux/kthread.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/platform_device.h>
@@ -51,6 +52,11 @@
 /* Forward declaration of a platform specific struct used by platform funcs */
 struct lwis_platform;
 
+/* Forward declaration of lwis allocator block manager */
+struct lwis_allocator_block_mgr;
+int lwis_allocator_init(struct lwis_device *lwis_dev);
+void lwis_allocator_release(struct lwis_device *lwis_dev);
+
 /*
  *  struct lwis_core
  *  This struct applies to all LWIS devices that are defined in the
@@ -105,12 +111,12 @@
  * Top device should be the only device to implement it.
  */
 struct lwis_event_subscribe_operations {
-	/* Subscribe an event for receiver device */
+	/* Subscribe an event for subscriber device */
 	int (*subscribe_event)(struct lwis_device *lwis_dev, int64_t trigger_event_id,
-			       int trigger_device_id, int receiver_device_id);
-	/* Unsubscribe an event for receiver device */
+			       int trigger_device_id, int subscriber_device_id);
+	/* Unsubscribe an event for subscriber device */
 	int (*unsubscribe_event)(struct lwis_device *lwis_dev, int64_t trigger_event_id,
-				 int receiver_device_id);
+				 int subscriber_device_id);
 	/* Notify subscriber when an event is happening */
 	void (*notify_event_subscriber)(struct lwis_device *lwis_dev, int64_t trigger_event_id,
 					int64_t trigger_event_count,
@@ -200,8 +206,6 @@
 	DECLARE_HASHTABLE(event_states, EVENT_HASH_BITS);
 	/* Virtual function table for sub classes */
 	struct lwis_device_subclass_operations vops;
-	/* Does the device have IOMMU. TODO: Move to platform */
-	bool has_iommu;
 	/* Mutex used to synchronize register access between clients */
 	struct mutex reg_rw_lock;
 	/* Heartbeat timer structure */
@@ -242,13 +246,26 @@
 	struct lwis_device_power_sequence_list *power_down_sequence;
 	/* GPIOs list */
 	struct lwis_gpios_list *gpios_list;
+	/* GPIO interrupts list */
+	struct lwis_gpios_info irq_gpios_info;
 
 	/* Power management hibernation state of the device */
 	int pm_hibernation;
+
 	/* Is device read only */
 	bool is_read_only;
 	/* Adjust thread priority */
-	int adjust_thread_priority;
+	u32 transaction_thread_priority;
+	u32 periodic_io_thread_priority;
+
+	/* LWIS allocator block manager */
+	struct lwis_allocator_block_mgr *block_mgr;
+
+	/* Worker thread */
+	struct kthread_worker transaction_worker;
+	struct task_struct *transaction_worker_thread;
+	struct kthread_worker periodic_io_worker;
+	struct task_struct *periodic_io_worker_thread;
 };
 
 /*
@@ -278,8 +295,6 @@
 	DECLARE_HASHTABLE(transaction_list, TRANSACTION_HASH_BITS);
 	/* Transaction task-related variables */
 	struct tasklet_struct transaction_tasklet;
-	struct workqueue_struct *transaction_wq;
-	struct work_struct transaction_work;
 	/* Spinlock used to synchronize access to transaction data structs */
 	spinlock_t transaction_lock;
 	/* List of transaction triggers */
@@ -289,9 +304,9 @@
 	int64_t transaction_counter;
 	/* Hash table of hrtimer keyed by time out duration */
 	DECLARE_HASHTABLE(timer_list, PERIODIC_IO_HASH_BITS);
-	/* Workqueue variables for periodic io */
-	struct workqueue_struct *periodic_io_wq;
-	struct work_struct periodic_io_work;
+	/* Work item */
+	struct kthread_work transaction_work;
+	struct kthread_work periodic_io_work;
 	/* Spinlock used to synchronize access to periodic io data structs */
 	spinlock_t periodic_io_lock;
 	/* Queue of all periodic_io pending processing */
@@ -318,11 +333,6 @@
 void lwis_base_unprobe(struct lwis_device *unprobe_lwis_dev);
 
 /*
- * Find LWIS top device
- */
-struct lwis_device *lwis_find_top_dev(void);
-
-/*
  * Find LWIS device by id
  */
 struct lwis_device *lwis_find_dev_by_id(int dev_id);
diff --git a/lwis_device_dpm.c b/lwis_device_dpm.c
index 139b8b7..78d264d 100644
--- a/lwis_device_dpm.c
+++ b/lwis_device_dpm.c
@@ -65,18 +65,7 @@
 	switch (qos_setting->clock_family) {
 	case CLOCK_FAMILY_MIF:
 	case CLOCK_FAMILY_INT:
-		read_bw = qos_setting->read_bw;
-		write_bw = qos_setting->write_bw;
-		peak_bw = (qos_setting->peak_bw > 0) ?
-				  qos_setting->peak_bw :
-					((read_bw > write_bw) ? read_bw : write_bw) / 4;
-		rt_bw = (qos_setting->rt_bw > 0) ? qos_setting->rt_bw : 0;
-		ret = lwis_platform_update_bts(target_dev, peak_bw, read_bw, write_bw, rt_bw);
-		if (ret < 0) {
-			dev_err(lwis_dev->dev, "Failed to update bandwidth to bts, ret: %d\n", ret);
-			break;
-		}
-		if (qos_setting->frequency_hz >= 0 && lwis_dev->id == target_dev->id) {
+		if (qos_setting->frequency_hz >= 0 && target_dev->type == DEVICE_TYPE_DPM) {
 			/* vote to qos if frequency is specified. The vote only available for dpm
 			 * device
 			 */
@@ -88,6 +77,17 @@
 					"Failed to vote to qos for clock family %d\n",
 					qos_setting->clock_family);
 			}
+		} else {
+			read_bw = qos_setting->read_bw;
+			write_bw = qos_setting->write_bw;
+			peak_bw = (qos_setting->peak_bw > 0) ?
+						qos_setting->peak_bw :
+						((read_bw > write_bw) ? read_bw : write_bw) / 4;
+			rt_bw = (qos_setting->rt_bw > 0) ? qos_setting->rt_bw : 0;
+			ret = lwis_platform_update_bts(target_dev, peak_bw, read_bw, write_bw, rt_bw);
+			if (ret < 0) {
+				dev_err(lwis_dev->dev, "Failed to update bandwidth to bts, ret: %d\n", ret);
+			}
 		}
 		break;
 	case CLOCK_FAMILY_TNR:
@@ -127,7 +127,7 @@
 
 	for (i = 0; i < num_settings; ++i) {
 		clk_index = clk_settings[i].clk_index;
-		if (clk_index >= lwis_dev->clocks->count) {
+		if (clk_index < 0 || clk_index >= lwis_dev->clocks->count) {
 			dev_err(lwis_dev->dev, "%s clk index %d is invalid\n", lwis_dev->name,
 				clk_index);
 			ret = -EINVAL;
@@ -156,7 +156,6 @@
 			 clk_get_rate(lwis_dev->clocks->clk[clk_index].clk));
 	}
 out:
-	kfree(clk_settings);
 	return ret;
 }
 
diff --git a/lwis_device_i2c.c b/lwis_device_i2c.c
index 3568856..28e6d81 100644
--- a/lwis_device_i2c.c
+++ b/lwis_device_i2c.c
@@ -15,15 +15,20 @@
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/kthread.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/pm.h>
 #include <linux/preempt.h>
+#include <linux/sched.h>
+#include <linux/sched/types.h>
 #include <linux/slab.h>
+#include <uapi/linux/sched/types.h>
 
 #include "lwis_i2c.h"
 #include "lwis_init.h"
 #include "lwis_periodic_io.h"
+#include "lwis_util.h"
 
 #ifdef CONFIG_OF
 #include "lwis_dt.h"
@@ -236,7 +241,7 @@
 	i2c_dev->base_dev.subscribe_ops = i2c_subscribe_ops;
 
 	/* Call the base device probe function */
-	ret = lwis_base_probe((struct lwis_device *)i2c_dev, plat_dev);
+	ret = lwis_base_probe(&i2c_dev->base_dev, plat_dev);
 	if (ret) {
 		pr_err("Error in lwis base probe\n");
 		goto error_probe;
@@ -246,10 +251,44 @@
 	ret = lwis_i2c_device_setup(i2c_dev);
 	if (ret) {
 		dev_err(i2c_dev->base_dev.dev, "Error in i2c device initialization\n");
-		lwis_base_unprobe((struct lwis_device *)i2c_dev);
+		lwis_base_unprobe(&i2c_dev->base_dev);
 		goto error_probe;
 	}
 
+	/* Create associated kworker threads */
+	ret = lwis_create_kthread_workers(&i2c_dev->base_dev, "lwis_i2c_trans_kthread",
+					 "lwis_i2c_prd_io_kthread");
+	if (ret) {
+		dev_err(i2c_dev->base_dev.dev,"Failed to create lwis_i2c_kthread");
+		lwis_base_unprobe(&i2c_dev->base_dev);
+		goto error_probe;
+	}
+
+	if (i2c_dev->base_dev.transaction_thread_priority != 0) {
+		ret = lwis_set_kthread_priority(&i2c_dev->base_dev,
+			i2c_dev->base_dev.transaction_worker_thread,
+			i2c_dev->base_dev.transaction_thread_priority);
+		if (ret) {
+			dev_err(i2c_dev->base_dev.dev,
+				"Failed to set LWIS I2C transaction kthread priority (%d)",
+				ret);
+			lwis_base_unprobe(&i2c_dev->base_dev);
+			goto error_probe;
+		}
+	}
+	if (i2c_dev->base_dev.periodic_io_thread_priority != 0) {
+		ret = lwis_set_kthread_priority(&i2c_dev->base_dev,
+			i2c_dev->base_dev.periodic_io_worker_thread,
+			i2c_dev->base_dev.periodic_io_thread_priority);
+		if (ret) {
+			dev_err(i2c_dev->base_dev.dev,
+				"Failed to set LWIS I2C periodic io kthread priority (%d)",
+				ret);
+			lwis_base_unprobe(&i2c_dev->base_dev);
+			goto error_probe;
+		}
+	}
+
 	dev_info(i2c_dev->base_dev.dev, "I2C Device Probe: Success\n");
 
 	return 0;
diff --git a/lwis_device_ioreg.c b/lwis_device_ioreg.c
index d1a99bb..ec3b6b9 100644
--- a/lwis_device_ioreg.c
+++ b/lwis_device_ioreg.c
@@ -18,11 +18,13 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <uapi/linux/sched/types.h>
 
 #include "lwis_init.h"
 #include "lwis_interrupt.h"
 #include "lwis_ioreg.h"
 #include "lwis_periodic_io.h"
+#include "lwis_util.h"
 
 #ifdef CONFIG_OF
 #include "lwis_dt.h"
@@ -111,7 +113,7 @@
 	ioreg_dev->base_dev.subscribe_ops = ioreg_subscribe_ops;
 
 	/* Call the base device probe function */
-	ret = lwis_base_probe((struct lwis_device *)ioreg_dev, plat_dev);
+	ret = lwis_base_probe(&ioreg_dev->base_dev, plat_dev);
 	if (ret) {
 		pr_err("Error in lwis base probe\n");
 		goto error_probe;
@@ -121,10 +123,44 @@
 	ret = lwis_ioreg_device_setup(ioreg_dev);
 	if (ret) {
 		dev_err(ioreg_dev->base_dev.dev, "Error in IOREG device initialization\n");
-		lwis_base_unprobe((struct lwis_device *)ioreg_dev);
+		lwis_base_unprobe(&ioreg_dev->base_dev);
 		goto error_probe;
 	}
 
+	/* Create associated kworker threads */
+	ret = lwis_create_kthread_workers(&ioreg_dev->base_dev, "lwis_ioreg_trans_kthread",
+					 "lwis_ioreg_prd_io_kthread");
+	if (ret) {
+		dev_err(ioreg_dev->base_dev.dev, "Failed to create lwis_ioreg_kthread");
+		lwis_base_unprobe(&ioreg_dev->base_dev);
+		goto error_probe;
+	}
+
+	if (ioreg_dev->base_dev.transaction_thread_priority != 0) {
+		ret = lwis_set_kthread_priority(&ioreg_dev->base_dev,
+			ioreg_dev->base_dev.transaction_worker_thread,
+			ioreg_dev->base_dev.transaction_thread_priority);
+		if (ret) {
+			dev_err(ioreg_dev->base_dev.dev,
+				"Failed to set LWIS IOREG transaction kthread priority (%d)",
+				ret);
+			lwis_base_unprobe(&ioreg_dev->base_dev);
+			goto error_probe;
+		}
+	}
+	if (ioreg_dev->base_dev.periodic_io_thread_priority != 0) {
+		ret = lwis_set_kthread_priority(&ioreg_dev->base_dev,
+			ioreg_dev->base_dev.periodic_io_worker_thread,
+			ioreg_dev->base_dev.periodic_io_thread_priority);
+		if (ret) {
+			dev_err(ioreg_dev->base_dev.dev,
+				"Failed to set LWIS IOREG periodic io kthread priority (%d)",
+				ret);
+			lwis_base_unprobe(&ioreg_dev->base_dev);
+			goto error_probe;
+		}
+	}
+
 	dev_info(ioreg_dev->base_dev.dev, "IOREG Device Probe: Success\n");
 
 	return 0;
diff --git a/lwis_device_top.c b/lwis_device_top.c
index c899964..e562d7f 100644
--- a/lwis_device_top.c
+++ b/lwis_device_top.c
@@ -13,6 +13,7 @@
 #include "lwis_device_top.h"
 #include "lwis_event.h"
 #include "lwis_init.h"
+#include "lwis_util.h"
 
 #include <linux/device.h>
 #include <linux/init.h>
@@ -41,9 +42,9 @@
 };
 
 static int lwis_top_event_subscribe(struct lwis_device *lwis_dev, int64_t trigger_event_id,
-				    int trigger_device_id, int receiver_device_id);
+				    int trigger_device_id, int subscriber_device_id);
 static int lwis_top_event_unsubscribe(struct lwis_device *lwis_dev, int64_t trigger_event_id,
-				      int receiver_device_id);
+				      int subscriber_device_id);
 static void lwis_top_event_notify(struct lwis_device *lwis_dev, int64_t trigger_event_id,
 				  int64_t trigger_event_count, int64_t trigger_event_timestamp,
 				  bool in_irq);
@@ -59,10 +60,18 @@
 	/* Subscribed Event ID */
 	int64_t event_id;
 	/* LWIS device who subscribed an event */
-	struct lwis_device *receiver_dev;
+	struct lwis_device *subscriber_dev;
 	/* LWIS device who will trigger an event */
 	struct lwis_device *trigger_dev;
-	/* Node in the lwis_top_device->event_subscribe hash table */
+	/* Node in the lwis_top_device->event_subscribers list */
+	struct list_head list_node;
+	/* List of event subscriber info */
+	struct lwis_event_subscriber_list *event_subscriber_list;
+};
+
+struct lwis_event_subscriber_list {
+	int64_t trigger_event_id;
+	struct list_head list;
 	struct hlist_node node;
 };
 
@@ -77,27 +86,77 @@
 	struct list_head node;
 };
 
+static struct lwis_event_subscriber_list *event_subscriber_list_find(struct lwis_device *lwis_dev,
+								     int64_t trigger_event_id)
+{
+	struct lwis_top_device *lwis_top_dev = (struct lwis_top_device *)lwis_dev;
+	struct lwis_event_subscriber_list *list;
+	hash_for_each_possible (lwis_top_dev->event_subscribers, list, node, trigger_event_id) {
+		if (list->trigger_event_id == trigger_event_id) {
+			return list;
+		}
+	}
+	return NULL;
+}
+
+static struct lwis_event_subscriber_list *event_subscriber_list_create(struct lwis_device *lwis_dev,
+								       int64_t trigger_event_id)
+{
+	struct lwis_top_device *lwis_top_dev = (struct lwis_top_device *)lwis_dev;
+	struct lwis_event_subscriber_list *event_subscriber_list =
+		kmalloc(sizeof(struct lwis_event_subscriber_list), GFP_KERNEL);
+	if (!event_subscriber_list) {
+		dev_err(lwis_dev->dev, "Can't allocate event subscriber list\n");
+		return NULL;
+	}
+	event_subscriber_list->trigger_event_id = trigger_event_id;
+	INIT_LIST_HEAD(&event_subscriber_list->list);
+	hash_add(lwis_top_dev->event_subscribers, &event_subscriber_list->node, trigger_event_id);
+	return event_subscriber_list;
+}
+
+static struct lwis_event_subscriber_list *
+event_subscriber_list_find_or_create(struct lwis_device *lwis_dev, int64_t trigger_event_id)
+{
+	struct lwis_event_subscriber_list *list =
+		event_subscriber_list_find(lwis_dev, trigger_event_id);
+	return (list == NULL) ? event_subscriber_list_create(lwis_dev, trigger_event_id) : list;
+}
+
 static void subscribe_tasklet_func(unsigned long data)
 {
 	struct lwis_top_device *lwis_top_dev = (struct lwis_top_device *)data;
 	struct lwis_trigger_event_info *trigger_event;
-	struct lwis_event_subscribe_info *p;
+	struct lwis_event_subscribe_info *subscribe_info;
 	struct list_head *it_sub, *it_sub_tmp;
+	struct lwis_event_subscriber_list *event_subscriber_list;
+	struct list_head *it_event_subscriber, *it_event_subscriber_tmp;
 	unsigned long flags;
 
 	spin_lock_irqsave(&lwis_top_dev->base_dev.lock, flags);
 	list_for_each_safe (it_sub, it_sub_tmp, &lwis_top_dev->emitted_event_list_tasklet) {
 		trigger_event = list_entry(it_sub, struct lwis_trigger_event_info, node);
 		list_del(&trigger_event->node);
-		hash_for_each_possible (lwis_top_dev->event_subscriber, p, node,
-					trigger_event->trigger_event_id) {
-			if (p->event_id == trigger_event->trigger_event_id) {
-				/* Notify subscriber an event is happening */
-				lwis_device_external_event_emit(
-					p->receiver_dev, trigger_event->trigger_event_id,
-					trigger_event->trigger_event_count,
-					trigger_event->trigger_event_timestamp, false);
-			}
+		event_subscriber_list = event_subscriber_list_find((struct lwis_device *)data,
+								   trigger_event->trigger_event_id);
+		if (!event_subscriber_list || list_empty(&event_subscriber_list->list)) {
+			spin_unlock_irqrestore(&lwis_top_dev->base_dev.lock, flags);
+			dev_err(lwis_top_dev->base_dev.dev,
+				"Failed to find event subscriber list for %llx\n",
+				trigger_event->trigger_event_id);
+			kfree(trigger_event);
+			continue;
+		}
+		list_for_each_safe (it_event_subscriber, it_event_subscriber_tmp,
+				    &event_subscriber_list->list) {
+			subscribe_info = list_entry(it_event_subscriber,
+						    struct lwis_event_subscribe_info, list_node);
+			/* Notify subscriber an event is happening */
+			lwis_device_external_event_emit(subscribe_info->subscriber_dev,
+							trigger_event->trigger_event_id,
+							trigger_event->trigger_event_count,
+							trigger_event->trigger_event_timestamp,
+							false);
 		}
 		kfree(trigger_event);
 	}
@@ -118,6 +177,7 @@
 		return;
 	}
 
+	INIT_LIST_HEAD(&trigger_event->node);
 	trigger_event->trigger_event_id = trigger_event_id;
 	trigger_event->trigger_event_count = trigger_event_count;
 	trigger_event->trigger_event_timestamp = trigger_event_timestamp;
@@ -128,79 +188,44 @@
 	tasklet_schedule(&lwis_top_dev->subscribe_tasklet);
 }
 
-static int lwis_top_event_unsubscribe(struct lwis_device *lwis_dev, int64_t trigger_event_id,
-				      int receiver_device_id)
-{
-	struct lwis_top_device *lwis_top_dev = (struct lwis_top_device *)lwis_dev;
-	struct lwis_device *trigger_dev = NULL;
-	struct lwis_event_subscribe_info *p = NULL;
-	struct hlist_node *tmp = NULL;
-	struct lwis_trigger_event_info *pending_event, *n;
-	unsigned long flags;
-	bool has_subscriber = false;
-
-	spin_lock_irqsave(&lwis_top_dev->base_dev.lock, flags);
-
-	/* Remove event from hash table */
-	hash_for_each_possible_safe (lwis_top_dev->event_subscriber, p, tmp, node,
-				     trigger_event_id) {
-		/* Clear pending events */
-		list_for_each_entry_safe (pending_event, n,
-					  &lwis_top_dev->emitted_event_list_tasklet, node) {
-			/* The condition indicates that clear pending events by subscriber
-			 * because there are possibly other subscribers have the same pending event.
-			 */
-			if (p->receiver_dev->id == receiver_device_id &&
-			    pending_event->trigger_event_id == trigger_event_id) {
-				list_del(&pending_event->node);
-				kfree(pending_event);
-			}
-		}
-
-		if (p->event_id == trigger_event_id && p->receiver_dev->id == receiver_device_id) {
-			dev_info(lwis_dev->dev,
-				 "unsubscribe event: %llx, trigger device: %s, target device: %s\n",
-				 trigger_event_id, p->trigger_dev->name, p->receiver_dev->name);
-			trigger_dev = p->trigger_dev;
-			hash_del(&p->node);
-			kfree(p);
-		} else if (p->event_id == trigger_event_id &&
-			   p->receiver_dev->id != receiver_device_id) {
-			/* The condition indicate there are other client still subscribe the event */
-			trigger_dev = p->trigger_dev;
-			has_subscriber = true;
-		}
-	}
-	spin_unlock_irqrestore(&lwis_top_dev->base_dev.lock, flags);
-	lwis_device_event_update_subscriber(trigger_dev, trigger_event_id, has_subscriber);
-	return 0;
-}
-
 static int lwis_top_event_subscribe(struct lwis_device *lwis_dev, int64_t trigger_event_id,
-				    int trigger_device_id, int receiver_device_id)
+				    int trigger_device_id, int subscriber_device_id)
 {
 	struct lwis_top_device *lwis_top_dev = (struct lwis_top_device *)lwis_dev;
 	struct lwis_device *lwis_trigger_dev = lwis_find_dev_by_id(trigger_device_id);
-	struct lwis_device *lwis_receiver_dev = lwis_find_dev_by_id(receiver_device_id);
-	struct lwis_event_subscribe_info *p, *new_subscription;
+	struct lwis_device *lwis_subscriber_dev = lwis_find_dev_by_id(subscriber_device_id);
+	struct lwis_event_subscribe_info *old_subscription;
+	struct lwis_event_subscribe_info *new_subscription;
+	struct lwis_event_subscriber_list *event_subscriber_list;
+	struct list_head *it_event_subscriber;
 	unsigned long flags;
 	int ret = 0;
 	bool has_subscriber = true;
 
-	if (lwis_trigger_dev == NULL || lwis_receiver_dev == NULL) {
-		dev_err(lwis_top_dev->base_dev.dev, "LWIS trigger/receiver device not found");
+	if (lwis_trigger_dev == NULL || lwis_subscriber_dev == NULL) {
+		dev_err(lwis_top_dev->base_dev.dev, "LWIS trigger/subscriber device not found");
 		return -EINVAL;
 	}
+
+	event_subscriber_list = event_subscriber_list_find_or_create(lwis_dev, trigger_event_id);
+	if (!event_subscriber_list) {
+		dev_err(lwis_dev->dev, "Can't find/create event subscriber list\n");
+		return -EINVAL;
+	}
+
 	spin_lock_irqsave(&lwis_top_dev->base_dev.lock, flags);
-	hash_for_each_possible (lwis_top_dev->event_subscriber, p, node, trigger_event_id) {
-		/* event already registered for this device */
-		if (p->event_id == trigger_event_id && p->receiver_dev->id == receiver_device_id) {
+	list_for_each (it_event_subscriber, &event_subscriber_list->list) {
+		old_subscription = list_entry(it_event_subscriber, struct lwis_event_subscribe_info,
+					      list_node);
+		/* Event already registered for this device */
+		if (old_subscription->subscriber_dev->id == subscriber_device_id) {
 			spin_unlock_irqrestore(&lwis_top_dev->base_dev.lock, flags);
 			dev_info(
 				lwis_dev->dev,
-				"already registered event: %llx, trigger device: %s, target device: %s\n",
-				trigger_event_id, lwis_trigger_dev->name, lwis_receiver_dev->name);
-			goto out;
+				"Already subscribed event: %llx, trigger device: %s, subscriber device: %s\n",
+				trigger_event_id, old_subscription->trigger_dev->name,
+				old_subscription->subscriber_dev->name);
+			return 0;
 		}
 	}
 	spin_unlock_irqrestore(&lwis_top_dev->base_dev.lock, flags);
@@ -210,20 +235,19 @@
 	if (!new_subscription) {
 		dev_err(lwis_top_dev->base_dev.dev,
 			"Failed to allocate memory for new subscription\n");
-		has_subscriber = false;
-		ret = -ENOMEM;
-		goto out;
+		return -ENOMEM;
 	}
+	INIT_LIST_HEAD(&new_subscription->list_node);
 	new_subscription->event_id = trigger_event_id;
-	new_subscription->receiver_dev = lwis_receiver_dev;
+	new_subscription->subscriber_dev = lwis_subscriber_dev;
 	new_subscription->trigger_dev = lwis_trigger_dev;
 	spin_lock_irqsave(&lwis_top_dev->base_dev.lock, flags);
-	hash_add(lwis_top_dev->event_subscriber, &new_subscription->node, trigger_event_id);
+	list_add_tail(&new_subscription->list_node, &event_subscriber_list->list);
+	new_subscription->event_subscriber_list = event_subscriber_list;
 	spin_unlock_irqrestore(&lwis_top_dev->base_dev.lock, flags);
-	dev_info(lwis_dev->dev, "subscribe event: %llx, trigger device: %s, target device: %s",
-		 trigger_event_id, lwis_trigger_dev->name, lwis_receiver_dev->name);
+	dev_info(lwis_dev->dev, "Subscribe event: %llx, trigger device: %s, subscriber device: %s",
+		 trigger_event_id, lwis_trigger_dev->name, lwis_subscriber_dev->name);
 
-out:
 	ret = lwis_device_event_update_subscriber(lwis_trigger_dev, trigger_event_id,
 						  has_subscriber);
 	if (ret < 0) {
@@ -233,9 +257,67 @@
 	return ret;
 }
 
+static int lwis_top_event_unsubscribe(struct lwis_device *lwis_dev, int64_t trigger_event_id,
+				      int subscriber_device_id)
+{
+	struct lwis_top_device *lwis_top_dev = (struct lwis_top_device *)lwis_dev;
+	struct lwis_device *trigger_dev = NULL;
+	struct lwis_event_subscribe_info *subscribe_info = NULL;
+	struct lwis_event_subscriber_list *event_subscriber_list;
+	struct list_head *it_event_subscriber, *it_event_subscriber_tmp;
+	struct lwis_trigger_event_info *pending_event, *n;
+	unsigned long flags;
+	bool has_subscriber = false;
+
+	spin_lock_irqsave(&lwis_top_dev->base_dev.lock, flags);
+	event_subscriber_list = event_subscriber_list_find(lwis_dev, trigger_event_id);
+	if (!event_subscriber_list || list_empty(&event_subscriber_list->list)) {
+		spin_unlock_irqrestore(&lwis_top_dev->base_dev.lock, flags);
+		dev_err(lwis_top_dev->base_dev.dev,
+			"Failed to find event subscriber list for %llx\n", trigger_event_id);
+		return -EINVAL;
+	}
+
+	list_for_each_safe (it_event_subscriber, it_event_subscriber_tmp,
+			    &event_subscriber_list->list) {
+		subscribe_info = list_entry(it_event_subscriber, struct lwis_event_subscribe_info,
+					    list_node);
+		if (subscribe_info->subscriber_dev->id == subscriber_device_id) {
+			dev_info(
+				lwis_dev->dev,
+				"Unsubscribe event: %llx, trigger device: %s, subscriber device: %s\n",
+				trigger_event_id, subscribe_info->trigger_dev->name,
+				subscribe_info->subscriber_dev->name);
+			trigger_dev = subscribe_info->trigger_dev;
+			list_del(&subscribe_info->list_node);
+			if (list_empty(&subscribe_info->event_subscriber_list->list)) {
+				/* Clear pending events */
+				list_for_each_entry_safe (pending_event, n,
+							  &lwis_top_dev->emitted_event_list_tasklet,
+							  node) {
+					if (pending_event->trigger_event_id == trigger_event_id) {
+						list_del(&pending_event->node);
+						kfree(pending_event);
+					}
+				}
+				hash_del(&subscribe_info->event_subscriber_list->node);
+				kfree(subscribe_info->event_subscriber_list);
+				kfree(subscribe_info);
+				spin_unlock_irqrestore(&lwis_top_dev->base_dev.lock, flags);
+				lwis_device_event_update_subscriber(trigger_dev, trigger_event_id,
+								    has_subscriber);
+				return 0;
+			}
+			kfree(subscribe_info);
+		}
+	}
+	spin_unlock_irqrestore(&lwis_top_dev->base_dev.lock, flags);
+	return 0;
+}
+
 static void lwis_top_event_subscribe_init(struct lwis_top_device *lwis_top_dev)
 {
-	hash_init(lwis_top_dev->event_subscriber);
+	hash_init(lwis_top_dev->event_subscribers);
 	INIT_LIST_HEAD(&lwis_top_dev->emitted_event_list_tasklet);
 	tasklet_init(&lwis_top_dev->subscribe_tasklet, subscribe_tasklet_func,
 		     (unsigned long)lwis_top_dev);
@@ -244,6 +326,8 @@
 static void lwis_top_event_subscribe_clear(struct lwis_device *lwis_dev)
 {
 	struct lwis_top_device *lwis_top_dev = (struct lwis_top_device *)lwis_dev;
+	struct lwis_event_subscriber_list *event_subscriber_list;
+	struct list_head *it_event_subscriber, *it_event_subscriber_tmp;
 	struct lwis_event_subscribe_info *subscribe_info;
 	struct hlist_node *tmp;
 	struct lwis_trigger_event_info *pending_event, *n;
@@ -252,9 +336,19 @@
 
 	spin_lock_irqsave(&lwis_top_dev->base_dev.lock, flags);
 	/* Clean up subscription table */
-	hash_for_each_safe (lwis_top_dev->event_subscriber, i, tmp, subscribe_info, node) {
-		hash_del(&subscribe_info->node);
-		kfree(subscribe_info);
+	hash_for_each_safe (lwis_top_dev->event_subscribers, i, tmp, event_subscriber_list, node) {
+		list_for_each_safe (it_event_subscriber, it_event_subscriber_tmp,
+				    &event_subscriber_list->list) {
+			subscribe_info = list_entry(it_event_subscriber,
+						    struct lwis_event_subscribe_info, list_node);
+			/* Delete the node from the hash table */
+			list_del(&subscribe_info->list_node);
+			if (list_empty(&subscribe_info->event_subscriber_list->list)) {
+				hash_del(&subscribe_info->event_subscriber_list->node);
+				kfree(subscribe_info->event_subscriber_list);
+			}
+			kfree(subscribe_info);
+		}
 	}
 
 	/* Clean up emitted event list */
@@ -282,7 +376,10 @@
 	int i;
 	uint64_t reg_value;
 
-	BUG_ON(!entry);
+	if (!entry) {
+		dev_err(top_dev->base_dev.dev, "IO entry is NULL.\n");
+		return -EINVAL;
+	}
 
 	if (entry->type == LWIS_IO_ENTRY_READ) {
 		if (entry->rw.offset >= SCRATCH_MEMORY_SIZE) {
@@ -381,7 +478,7 @@
 	top_dev->base_dev.subscribe_ops = top_subscribe_ops;
 
 	/* Call the base device probe function */
-	ret = lwis_base_probe((struct lwis_device *)top_dev, plat_dev);
+	ret = lwis_base_probe(&top_dev->base_dev, plat_dev);
 	if (ret) {
 		pr_err("Error in lwis base probe\n");
 		goto error_probe;
@@ -391,12 +488,21 @@
 	ret = lwis_top_device_setup(top_dev);
 	if (ret) {
 		dev_err(top_dev->base_dev.dev, "Error in top device initialization\n");
-		lwis_base_unprobe((struct lwis_device *)top_dev);
+		lwis_base_unprobe(&top_dev->base_dev);
 		goto error_probe;
 	}
 
 	lwis_top_event_subscribe_init(top_dev);
 
+	/* Create associated kworker threads */
+	ret = lwis_create_kthread_workers(&top_dev->base_dev, "lwis_top_trans_kthread",
+					 "lwis_top_prd_io_kthread");
+	if (ret) {
+		dev_err(top_dev->base_dev.dev, "Failed to create lwis_top_kthread");
+		lwis_base_unprobe(&top_dev->base_dev);
+		goto error_probe;
+	}
+
 	return 0;
 
 error_probe:
diff --git a/lwis_device_top.h b/lwis_device_top.h
index 354f376..3800f95 100644
--- a/lwis_device_top.h
+++ b/lwis_device_top.h
@@ -25,8 +25,8 @@
 	 * top device.
 	 */
 	uint8_t scratch_mem[SCRATCH_MEMORY_SIZE];
-	/* Hash table for event subscriber */
-	DECLARE_HASHTABLE(event_subscriber, EVENT_HASH_BITS);
+	/* Hash table of event subscribers keyed by trigger event id */
+	DECLARE_HASHTABLE(event_subscribers, EVENT_HASH_BITS);
 
 	/* Subscription tasklet */
 	struct tasklet_struct subscribe_tasklet;
diff --git a/lwis_dt.c b/lwis_dt.c
index 76fac6e..ba6074c 100644
--- a/lwis_dt.c
+++ b/lwis_dt.c
@@ -36,13 +36,11 @@
 {
 	int count;
 	struct device *dev;
-	struct device_node *dev_node;
 	struct gpio_descs *list;
 
 	*is_present = false;
 
 	dev = &lwis_dev->plat_dev->dev;
-	dev_node = dev->of_node;
 
 	count = gpiod_count(dev, name);
 
@@ -64,6 +62,114 @@
 	return 0;
 }
 
+static int parse_irq_gpios(struct lwis_device *lwis_dev)
+{
+	int count;
+	int name_count;
+	int event_count;
+	int ret;
+	struct device *dev;
+	struct device_node *dev_node;
+	struct gpio_descs *gpios;
+	const char *name;
+	char *irq_gpios_names = NULL;
+	u64 *irq_gpios_events = NULL;
+	int i;
+
+	/* Initialize the data structure */
+	strlcpy(lwis_dev->irq_gpios_info.name, "irq", LWIS_MAX_NAME_STRING_LEN);
+	lwis_dev->irq_gpios_info.gpios = NULL;
+	lwis_dev->irq_gpios_info.irq_list = NULL;
+	lwis_dev->irq_gpios_info.is_shared = false;
+	lwis_dev->irq_gpios_info.is_pulse = false;
+
+	dev = &lwis_dev->plat_dev->dev;
+	count = gpiod_count(dev, "irq");
+	/* No irq GPIO pins found, just return */
+	if (count <= 0) {
+		return 0;
+	}
+
+	dev_node = dev->of_node;
+	name_count = of_property_count_strings(dev_node, "irq-gpios-names");
+	event_count = of_property_count_elems_of_size(dev_node, "irq-gpios-events", sizeof(u64));
+	if (count != event_count || count != name_count) {
+		pr_err("Count of irq-gpios-* is not match\n");
+		return -EINVAL;
+	}
+
+	gpios = lwis_gpio_list_get(dev, "irq");
+	if (IS_ERR(gpios)) {
+		pr_err("Error parsing irq GPIO list (%ld)\n", PTR_ERR(gpios));
+		return PTR_ERR(gpios);
+	}
+	lwis_dev->irq_gpios_info.gpios = gpios;
+
+	irq_gpios_names = kmalloc(LWIS_MAX_NAME_STRING_LEN * name_count, GFP_KERNEL);
+	if (IS_ERR_OR_NULL(irq_gpios_names)) {
+		pr_err("Allocating event list failed\n");
+		ret = -ENOMEM;
+		goto error_parse_irq_gpios;
+	}
+
+	for (i = 0; i < name_count; ++i) {
+		ret = of_property_read_string_index(dev_node, "irq-gpios-names", i, &name);
+		if (ret < 0) {
+			pr_err("Error get GPIO irq name list (%d)\n", ret);
+			goto error_parse_irq_gpios;
+		}
+		strlcpy(irq_gpios_names + i * LWIS_MAX_NAME_STRING_LEN, name,
+			LWIS_MAX_NAME_STRING_LEN);
+	}
+
+	ret = lwis_gpio_list_to_irqs(lwis_dev, &lwis_dev->irq_gpios_info, irq_gpios_names);
+	if (ret) {
+		pr_err("Error get GPIO irq list (%d)\n", ret);
+		goto error_parse_irq_gpios;
+	}
+
+	irq_gpios_events = kmalloc(sizeof(u64) * event_count, GFP_KERNEL);
+	if (IS_ERR_OR_NULL(irq_gpios_events)) {
+		pr_err("Allocating event list failed\n");
+		ret = -ENOMEM;
+		goto error_parse_irq_gpios;
+	}
+
+	event_count = of_property_read_variable_u64_array(
+		dev_node, "irq-gpios-events", irq_gpios_events, event_count, event_count);
+	if (event_count != count) {
+		pr_err("Error getting irq-gpios-events: %d\n", event_count);
+		ret = event_count;
+		goto error_parse_irq_gpios;
+	}
+
+	for (i = 0; i < event_count; ++i) {
+		ret = lwis_interrupt_set_gpios_event_info(lwis_dev->irq_gpios_info.irq_list, i,
+							  irq_gpios_events[i]);
+		if (ret) {
+			pr_err("Error setting event info for gpios interrupt %d %d\n", i, ret);
+			goto error_parse_irq_gpios;
+		}
+	}
+
+	kfree(irq_gpios_names);
+	kfree(irq_gpios_events);
+	return 0;
+
+error_parse_irq_gpios:
+	if (lwis_dev->irq_gpios_info.gpios) {
+		lwis_gpio_list_put(lwis_dev->irq_gpios_info.gpios, dev);
+		lwis_dev->irq_gpios_info.gpios = NULL;
+	}
+	if (irq_gpios_names) {
+		kfree(irq_gpios_names);
+	}
+	if (irq_gpios_events) {
+		kfree(irq_gpios_events);
+	}
+	return ret;
+}
+
 static int parse_settle_time(struct lwis_device *lwis_dev)
 {
 	struct device_node *dev_node;
@@ -284,6 +390,7 @@
 
 	return critical_irq_events_num;
 }
+
 static int parse_interrupts(struct lwis_device *lwis_dev)
 {
 	int i;
@@ -553,7 +660,6 @@
 
 static void parse_bitwidths(struct lwis_device *lwis_dev)
 {
-	int ret;
 	struct device *dev;
 	struct device_node *dev_node;
 	u32 addr_bitwidth = 32;
@@ -562,12 +668,12 @@
 	dev = &(lwis_dev->plat_dev->dev);
 	dev_node = dev->of_node;
 
-	ret = of_property_read_u32(dev_node, "reg-addr-bitwidth", &addr_bitwidth);
+	of_property_read_u32(dev_node, "reg-addr-bitwidth", &addr_bitwidth);
 #ifdef LWIS_DT_DEBUG
 	pr_info("Addr bitwidth set to%s: %d\n", ret ? " default" : "", addr_bitwidth);
 #endif
 
-	ret = of_property_read_u32(dev_node, "reg-value-bitwidth", &value_bitwidth);
+	of_property_read_u32(dev_node, "reg-value-bitwidth", &value_bitwidth);
 #ifdef LWIS_DT_DEBUG
 	pr_info("Value bitwidth set to%s: %d\n", ret ? " default" : "", value_bitwidth);
 #endif
@@ -675,6 +781,7 @@
 			seq_item_name = lwis_dev->power_up_sequence->seq_info[i].name;
 
 			gpios_info->gpios = NULL;
+			gpios_info->irq_list = NULL;
 			strlcpy(gpios_info->name, seq_item_name, LWIS_MAX_NAME_STRING_LEN);
 
 			if (strncmp(SHARED_STRING, seq_item_name, strlen(SHARED_STRING)) == 0) {
@@ -841,9 +948,13 @@
 	struct device_node *dev_node;
 
 	dev_node = lwis_dev->plat_dev->dev.of_node;
-	lwis_dev->adjust_thread_priority = 0;
+	lwis_dev->transaction_thread_priority = 0;
+	lwis_dev->periodic_io_thread_priority = 0;
 
-	of_property_read_u32(dev_node, "thread-priority", &lwis_dev->adjust_thread_priority);
+	of_property_read_u32(dev_node, "transaction-thread-priority",
+			     &lwis_dev->transaction_thread_priority);
+	of_property_read_u32(dev_node, "periodic-io-thread-priority",
+			     &lwis_dev->periodic_io_thread_priority);
 
 	return 0;
 }
@@ -852,8 +963,6 @@
 {
 	struct device *dev;
 	struct device_node *dev_node;
-	struct property *iommus;
-	int iommus_len = 0;
 	const char *name_str;
 	int ret = 0;
 
@@ -892,6 +1001,12 @@
 		return ret;
 	}
 
+	ret = parse_irq_gpios(lwis_dev);
+	if (ret) {
+		pr_err("Error parsing irq-gpios\n");
+		return ret;
+	}
+
 	ret = parse_power_up_seqs(lwis_dev);
 	if (ret) {
 		pr_err("Error parsing power-up-seqs\n");
@@ -954,9 +1069,6 @@
 
 	parse_bitwidths(lwis_dev);
 
-	iommus = of_find_property(dev_node, "iommus", &iommus_len);
-	lwis_dev->has_iommu = iommus && iommus_len;
-
 	lwis_dev->bts_scenario_name = NULL;
 	of_property_read_string(dev_node, "bts-scenario", &lwis_dev->bts_scenario_name);
 
diff --git a/lwis_event.c b/lwis_event.c
index 001d65b..5702e17 100644
--- a/lwis_event.c
+++ b/lwis_event.c
@@ -21,6 +21,18 @@
 /* Maximum number of pending events in the event queues */
 #define MAX_NUM_PENDING_EVENTS 2048
 
+/* Exposes the device id embedded in the event id */
+#define EVENT_OWNER_DEVICE_ID(x) ((x >> LWIS_EVENT_ID_EVENT_CODE_LEN) & 0xFFFF)
+
+#define lwis_dev_err_ratelimited(dev, fmt, ...)					\
+	{									\
+		static int64_t timestamp = 0;					\
+		if (ktime_to_ns(lwis_get_time()) - timestamp > 200000000LL) {	\
+			dev_err(dev, fmt, ##__VA_ARGS__);			\
+			timestamp = ktime_to_ns(lwis_get_time());		\
+		}								\
+	}
+
 /*
  * lwis_client_event_state_find_locked: Looks through the provided client's
  * event state list and tries to find a lwis_client_event_state object with the
@@ -262,17 +274,12 @@
 	return state;
 }
 
-static int lwis_client_event_get_trigger_device_id(int64_t event_id)
-{
-	return (event_id >> LWIS_EVENT_ID_EVENT_CODE_LEN) & 0xFFFF;
-}
-
 static int lwis_client_event_subscribe(struct lwis_client *lwis_client, int64_t trigger_event_id)
 {
 	int ret = 0;
 	struct lwis_device *lwis_dev = lwis_client->lwis_dev;
 	struct lwis_device *trigger_device;
-	int trigger_device_id = lwis_client_event_get_trigger_device_id(trigger_event_id);
+	int trigger_device_id = EVENT_OWNER_DEVICE_ID(trigger_event_id);
 
 	/* Check if top device probe failed */
 	if (lwis_dev->top_dev == NULL) {
@@ -280,12 +287,6 @@
 		return -EINVAL;
 	}
 
-	if ((trigger_event_id & LWIS_TRANSACTION_EVENT_FLAG) ||
-	    (trigger_event_id & LWIS_TRANSACTION_FAILURE_EVENT_FLAG)) {
-		dev_err(lwis_dev->dev, "Not support SW event subscription\n");
-		return -EINVAL;
-	}
-
 	trigger_device = lwis_find_dev_by_id(trigger_device_id);
 	if (!trigger_device) {
 		dev_err(lwis_dev->dev, "Device id : %d doesn't match to any device\n",
@@ -293,16 +294,17 @@
 		return -EINVAL;
 	}
 
-	/* Create event state to trigger/receiver device
+	/* Create event state to trigger/subscriber device
 	 * Because of driver initialize in user space is sequential, it's
-	 * possible that receiver device subscribe an event before trigger
+	 * possible that subscriber device subscribe an event before trigger
 	 * device set it up
 	 */
 	if (IS_ERR_OR_NULL(lwis_device_event_state_find_or_create(lwis_dev, trigger_event_id)) ||
 	    IS_ERR_OR_NULL(lwis_client_event_state_find_or_create(lwis_client, trigger_event_id)) ||
 	    IS_ERR_OR_NULL(
 		    lwis_device_event_state_find_or_create(trigger_device, trigger_event_id))) {
-		dev_err(lwis_dev->dev, "Failed to add event id 0x%llx to trigger/receiver device\n",
+		dev_err(lwis_dev->dev,
+			"Failed to add event id 0x%llx to trigger/subscriber device\n",
 			trigger_event_id);
 
 		return -EINVAL;
@@ -345,6 +347,30 @@
 	return ret;
 }
 
+static int check_event_control_flags(struct lwis_client *lwis_client, int64_t event_id,
+				     uint64_t old_flags, uint64_t new_flags)
+{
+	if (EVENT_OWNER_DEVICE_ID(event_id) == lwis_client->lwis_dev->id) {
+		if (event_id & LWIS_HW_IRQ_EVENT_FLAG &&
+		    (new_flags & LWIS_EVENT_CONTROL_FLAG_QUEUE_ENABLE) &&
+		    !(new_flags & LWIS_EVENT_CONTROL_FLAG_IRQ_ENABLE) &&
+		    !(new_flags & LWIS_EVENT_CONTROL_FLAG_IRQ_ENABLE_ONCE)) {
+			dev_err(lwis_client->lwis_dev->dev,
+				"QUEUE_ENABLE without IRQ_ENABLE is not allowed for HW event: 0x%llx\n",
+				event_id);
+			return -EINVAL;
+		}
+	} else {
+		/* b/187758268 for fixing the hard LOCKUP when running LWIS cross-device tests. */
+		if (lwis_client->lwis_dev->type == DEVICE_TYPE_TOP) {
+			dev_err(lwis_client->lwis_dev->dev,
+				"Disallow top device being the subscriber device\n");
+			return -EPERM;
+		}
+	}
+	return 0;
+}
+
 int lwis_client_event_control_set(struct lwis_client *lwis_client,
 				  const struct lwis_event_control *control)
 {
@@ -362,6 +388,12 @@
 	old_flags = state->event_control.flags;
 	new_flags = control->flags;
 	if (old_flags != new_flags) {
+		ret = check_event_control_flags(lwis_client, control->event_id, old_flags,
+						new_flags);
+		if (ret) {
+			return ret;
+		}
+
 		state->event_control.flags = new_flags;
 		ret = lwis_device_event_flags_updated(lwis_client->lwis_dev, control->event_id,
 						      old_flags, new_flags);
@@ -371,17 +403,7 @@
 			return ret;
 		}
 
-		if (lwis_client_event_get_trigger_device_id(control->event_id) !=
-		    lwis_client->lwis_dev->id) {
-			/* b/187758268 for fixing the hard LOCKUP
-			 * when running LWIS cross-device tests.
-			 */
-			if (lwis_client->lwis_dev->type == DEVICE_TYPE_TOP) {
-				dev_err(lwis_client->lwis_dev->dev,
-					"Disallow top device being the receiver device\n");
-				return -EPERM;
-			}
-
+		if (EVENT_OWNER_DEVICE_ID(control->event_id) != lwis_client->lwis_dev->id) {
 			if (new_flags != 0) {
 				ret = lwis_client_event_subscribe(lwis_client, control->event_id);
 				if (ret) {
@@ -661,14 +683,18 @@
 	/* Disable IRQs and lock the lock */
 	spin_lock_irqsave(&lwis_dev->lock, flags);
 	/* Are we turning on the event? */
-	if (!(old_flags & LWIS_EVENT_CONTROL_FLAG_IRQ_ENABLE) &&
-	    (new_flags & LWIS_EVENT_CONTROL_FLAG_IRQ_ENABLE)) {
+	if ((!(old_flags & LWIS_EVENT_CONTROL_FLAG_IRQ_ENABLE) ||
+	     !(old_flags & LWIS_EVENT_CONTROL_FLAG_IRQ_ENABLE_ONCE)) &&
+	    ((new_flags & LWIS_EVENT_CONTROL_FLAG_IRQ_ENABLE) ||
+	     (new_flags & LWIS_EVENT_CONTROL_FLAG_IRQ_ENABLE_ONCE))) {
 		state->enable_counter++;
 		event_enabled = true;
 		call_enable_cb = (state->enable_counter == 1);
 
-	} else if ((old_flags & LWIS_EVENT_CONTROL_FLAG_IRQ_ENABLE) &&
-		   !(new_flags & LWIS_EVENT_CONTROL_FLAG_IRQ_ENABLE)) {
+	} else if (((old_flags & LWIS_EVENT_CONTROL_FLAG_IRQ_ENABLE) ||
+		    (old_flags & LWIS_EVENT_CONTROL_FLAG_IRQ_ENABLE_ONCE)) &&
+		   (!(new_flags & LWIS_EVENT_CONTROL_FLAG_IRQ_ENABLE) ||
+		    !(new_flags & LWIS_EVENT_CONTROL_FLAG_IRQ_ENABLE_ONCE))) {
 		state->enable_counter--;
 		event_enabled = false;
 		call_enable_cb = (state->enable_counter == 0);
@@ -869,8 +895,7 @@
 			}
 			ret = lwis_client_event_push_back(lwis_client, event);
 			if (ret) {
-				dev_err_ratelimited(
-					lwis_dev->dev,
+				lwis_dev_err_ratelimited(lwis_dev->dev,
 					"Failed to push event to queue: ID 0x%llx Counter %lld\n",
 					event_id, event_counter);
 				kfree(event);
@@ -905,9 +930,9 @@
 	ret = lwis_device_event_emit_impl(lwis_dev, event_id, payload, payload_size,
 					  &pending_events, in_irq);
 	if (ret) {
-		dev_err_ratelimited(lwis_dev->dev,
-				    "lwis_device_event_emit_impl failed: event ID 0x%llx\n",
-				    event_id);
+		lwis_dev_err_ratelimited(lwis_dev->dev,
+			"lwis_device_event_emit_impl failed: event ID 0x%llx\n",
+			event_id);
 		return ret;
 	}
 
@@ -1049,8 +1074,7 @@
 			event->event_info.payload_size = 0;
 			event->event_info.payload_buffer = NULL;
 			if (lwis_client_event_push_back(lwis_client, event)) {
-				dev_err_ratelimited(
-					lwis_dev->dev,
+				lwis_dev_err_ratelimited(lwis_dev->dev,
 					"Failed to push event to queue: ID 0x%llx Counter %lld\n",
 					event_id, event_counter);
 				kfree(event);
@@ -1108,9 +1132,9 @@
 			event->event_info.payload_buffer = NULL;
 		}
 		if (lwis_client_error_event_push_back(lwis_client, event)) {
-			dev_err_ratelimited(lwis_dev->dev,
-					    "Failed to push error event to queue: ID 0x%llx\n",
-					    event_id);
+			lwis_dev_err_ratelimited(lwis_dev->dev,
+				"Failed to push error event to queue: ID 0x%llx\n",
+				event_id);
 			kfree(event);
 			return;
 		}
diff --git a/lwis_gpio.c b/lwis_gpio.c
index 100fef1..0d68658 100644
--- a/lwis_gpio.c
+++ b/lwis_gpio.c
@@ -14,6 +14,7 @@
 #include <linux/kernel.h>
 
 #include "lwis_gpio.h"
+#include "lwis_interrupt.h"
 
 /* debug function */
 void lwis_gpio_list_print(char *name, struct gpio_descs *gpios)
@@ -101,6 +102,44 @@
 	return 0;
 }
 
+int lwis_gpio_list_to_irqs(struct lwis_device *lwis_dev, struct lwis_gpios_info *gpios_info,
+			   char *irq_gpios_names)
+{
+	struct gpio_descs *gpios;
+	struct lwis_interrupt_list *irq_list;
+	int i;
+	int irq;
+
+	if (!lwis_dev || !gpios_info) {
+		return -EINVAL;
+	}
+	gpios = gpios_info->gpios;
+	if (!gpios) {
+		return 0;
+	}
+
+	irq_list = lwis_interrupt_list_alloc(lwis_dev, gpios->ndescs);
+	if (IS_ERR(irq_list)) {
+		pr_err("Failed to allocate irq list\n");
+		return PTR_ERR(irq_list);
+	}
+
+	for (i = 0; i < gpios->ndescs; ++i) {
+		char *name;
+		irq = gpiod_to_irq(gpios->desc[i]);
+		if (irq < 0) {
+			pr_err("gpio to irq failed (%d)\n", irq);
+			lwis_interrupt_list_free(irq_list);
+			return irq;
+		}
+		name = irq_gpios_names + i * LWIS_MAX_NAME_STRING_LEN;
+		lwis_interrupt_get_gpio_irq(irq_list, i, name, irq);
+	}
+
+	gpios_info->irq_list = irq_list;
+	return 0;
+}
+
 struct lwis_gpios_list *lwis_gpios_list_alloc(int count)
 {
 	struct lwis_gpios_list *list;
@@ -134,6 +173,9 @@
 		return;
 	}
 
+	if (list->gpios_info->irq_list) {
+		lwis_interrupt_list_free(list->gpios_info->irq_list);
+	}
 	if (list->gpios_info) {
 		kfree(list->gpios_info);
 	}
diff --git a/lwis_gpio.h b/lwis_gpio.h
index 1d6b139..cfccd27 100644
--- a/lwis_gpio.h
+++ b/lwis_gpio.h
@@ -13,6 +13,7 @@
 
 #include <linux/gpio/consumer.h>
 #include "lwis_commands.h"
+#include "lwis_interrupt.h"
 
 /*
  * struct lwis_gpios_info
@@ -23,6 +24,7 @@
 	bool is_shared;
 	bool is_pulse;
 	struct gpio_descs *gpios;
+	struct lwis_interrupt_list *irq_list;
 };
 
 /*
@@ -71,6 +73,12 @@
 int lwis_gpio_list_set_input(struct gpio_descs *gpios);
 
 /*
+ * Get the IRQ list corresponding to the GPIO list
+ */
+int lwis_gpio_list_to_irqs(struct lwis_device *lwis_dev, struct lwis_gpios_info *gpios_info,
+			   char *irq_gpios_names);
+
+/*
  *  Allocate an instance of the lwis_gpios_info and initialize
  *  the data structures according to the number of lwis_gpios_info
  *  specified.
diff --git a/lwis_i2c.c b/lwis_i2c.c
index 83d584d..cb30e97 100644
--- a/lwis_i2c.c
+++ b/lwis_i2c.c
@@ -377,7 +377,11 @@
 {
 	int ret;
 	uint64_t reg_value;
-	BUG_ON(!entry);
+
+	if (!entry) {
+		dev_err(i2c->base_dev.dev, "IO entry is NULL.\n");
+		return -EINVAL;
+	}
 
 	if (entry->type == LWIS_IO_ENTRY_READ) {
 		return i2c_read(i2c, entry->rw.offset, &entry->rw.val);
diff --git a/lwis_interrupt.c b/lwis_interrupt.c
index 07df492..0acf8ba 100644
--- a/lwis_interrupt.c
+++ b/lwis_interrupt.c
@@ -37,6 +37,7 @@
 };
 
 static irqreturn_t lwis_interrupt_event_isr(int irq_number, void *data);
+static irqreturn_t lwis_interrupt_gpios_event_isr(int irq_number, void *data);
 
 struct lwis_interrupt_list *lwis_interrupt_list_alloc(struct lwis_device *lwis_dev, int count)
 {
@@ -88,6 +89,7 @@
 		       struct platform_device *plat_dev)
 {
 	int irq;
+	int ret = 0;
 
 	if (!list || index < 0 || index >= list->count) {
 		return -EINVAL;
@@ -102,14 +104,51 @@
 	/* Initialize the spinlock */
 	spin_lock_init(&list->irq[index].lock);
 	list->irq[index].irq = irq;
-	list->irq[index].name = name;
+	strlcpy(list->irq[index].name, name, IRQ_FULL_NAME_LENGTH);
 	snprintf(list->irq[index].full_name, IRQ_FULL_NAME_LENGTH, "lwis-%s:%s",
 		 list->lwis_dev->name, name);
 	list->irq[index].has_events = false;
 	list->irq[index].lwis_dev = list->lwis_dev;
 
-	request_irq(irq, lwis_interrupt_event_isr, IRQF_SHARED, list->irq[index].full_name,
-		    &list->irq[index]);
+	ret = request_irq(irq, lwis_interrupt_event_isr, IRQF_SHARED, list->irq[index].full_name,
+			  &list->irq[index]);
+	if (ret) {
+		dev_err(list->lwis_dev->dev, "Failed to request IRQ %d\n", irq);
+		return ret;
+	}
+
+	if (lwis_plaform_set_default_irq_affinity(list->irq[index].irq) != 0) {
+		dev_warn(list->lwis_dev->dev, "Interrupt %s cannot set affinity.\n",
+			 list->irq[index].full_name);
+	}
+
+	return 0;
+}
+
+int lwis_interrupt_get_gpio_irq(struct lwis_interrupt_list *list, int index, char *name,
+				int gpio_irq)
+{
+	int ret = 0;
+
+	if (!list || index < 0 || index >= list->count || gpio_irq <= 0) {
+		return -EINVAL;
+	}
+
+	/* Initialize the spinlock */
+	spin_lock_init(&list->irq[index].lock);
+	list->irq[index].irq = gpio_irq;
+	strlcpy(list->irq[index].name, name, IRQ_FULL_NAME_LENGTH);
+	snprintf(list->irq[index].full_name, IRQ_FULL_NAME_LENGTH, "lwis-%s:%s",
+		 list->lwis_dev->name, name);
+	list->irq[index].has_events = false;
+	list->irq[index].lwis_dev = list->lwis_dev;
+
+	ret = request_irq(gpio_irq, lwis_interrupt_gpios_event_isr, IRQF_SHARED,
+			  list->irq[index].full_name, &list->irq[index]);
+	if (ret) {
+		dev_err(list->lwis_dev->dev, "Failed to request GPIO IRQ\n");
+		return ret;
+	}
 
 	if (lwis_plaform_set_default_irq_affinity(list->irq[index].irq) != 0) {
 		dev_warn(list->lwis_dev->dev, "Interrupt %s cannot set affinity.\n",
@@ -125,7 +164,10 @@
 	/* Our hash iterator */
 	struct lwis_single_event_info *p;
 
-	BUG_ON(!irq);
+	if (!irq) {
+		pr_err("irq is NULL.\n");
+		return NULL;
+	}
 
 	/* Iterate through the hash bucket for this event_id */
 	hash_for_each_possible (irq->event_infos, p, node, event_id) {
@@ -137,13 +179,53 @@
 	return NULL;
 }
 
+static int lwis_interrupt_set_mask(struct lwis_interrupt *irq, int int_reg_bit, bool is_set)
+{
+	int ret = 0;
+	uint64_t mask_value = 0;
+
+	if (!irq) {
+		pr_err("irq is NULL.\n");
+		return -EINVAL;
+	}
+
+	/* Read the mask register */
+	ret = lwis_device_single_register_read(irq->lwis_dev, irq->irq_reg_bid, irq->irq_mask_reg,
+					       &mask_value, irq->irq_reg_access_size);
+	if (ret) {
+		pr_err("Failed to read IRQ mask register: %d\n", ret);
+		mutex_unlock(&irq->lwis_dev->reg_rw_lock);
+		return ret;
+	}
+
+	/* Unmask the interrupt */
+	if (is_set) {
+		mask_value |= (1ULL << int_reg_bit);
+	} else {
+		mask_value &= ~(1ULL << int_reg_bit);
+	}
+
+	/* Write the mask register */
+	ret = lwis_device_single_register_write(irq->lwis_dev, irq->irq_reg_bid, irq->irq_mask_reg,
+						mask_value, irq->irq_reg_access_size);
+	if (ret) {
+		pr_err("Failed to write IRQ mask register: %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
 static irqreturn_t lwis_interrupt_event_isr(int irq_number, void *data)
 {
 	int ret;
 	struct lwis_interrupt *irq = (struct lwis_interrupt *)data;
+	struct lwis_client_event_state *event_state;
 	struct lwis_single_event_info *event;
 	struct list_head *p;
 	uint64_t source_value, reset_value = 0;
+	struct lwis_client *lwis_client;
+	struct list_head *t, *n;
 #ifdef LWIS_INTERRUPT_DEBUG
 	uint64_t mask_value;
 #endif
@@ -190,6 +272,23 @@
 						    "Caught critical IRQ(%s) event(0x%llx)\n",
 						    irq->name, event->event_id);
 			}
+			/* If enabled once, set interrupt mask to false */
+			list_for_each_safe (t, n, &irq->lwis_dev->clients) {
+				lwis_client = list_entry(t, struct lwis_client, node);
+				hash_for_each_possible (lwis_client->event_states, event_state,
+							node, event->event_id) {
+					if (event_state->event_control.event_id ==
+						    event->event_id &&
+					    event_state->event_control.flags &
+						    LWIS_EVENT_CONTROL_FLAG_IRQ_ENABLE_ONCE) {
+						dev_err(irq->lwis_dev->dev,
+							"IRQ(%s) event(0x%llx) enabled once\n",
+							irq->name, event->event_id);
+						lwis_interrupt_set_mask(irq, event->int_reg_bit,
+									false);
+					}
+				}
+			}
 		}
 
 		/* All enabled and triggered interrupts are handled */
@@ -225,6 +324,24 @@
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t lwis_interrupt_gpios_event_isr(int irq_number, void *data)
+{
+	unsigned long flags;
+	struct lwis_interrupt *irq = (struct lwis_interrupt *)data;
+	struct lwis_single_event_info *event;
+	struct list_head *p;
+
+	spin_lock_irqsave(&irq->lock, flags);
+	list_for_each (p, &irq->enabled_event_infos) {
+		event = list_entry(p, struct lwis_single_event_info, node_enabled);
+		/* Emit the event */
+		lwis_device_event_emit(irq->lwis_dev, event->event_id, NULL, 0, /*in_irq=*/true);
+	}
+	spin_unlock_irqrestore(&irq->lock, flags);
+
+	return IRQ_HANDLED;
+}
+
 int lwis_interrupt_set_event_info(struct lwis_interrupt_list *list, int index,
 				  const char *irq_reg_space, int irq_reg_bid, int64_t *irq_events,
 				  size_t irq_events_num, uint32_t *int_reg_bits,
@@ -236,7 +353,11 @@
 	int i, j;
 	unsigned long flags;
 	bool is_critical = false;
-	BUG_ON(int_reg_bits_num != irq_events_num);
+
+	if (int_reg_bits_num != irq_events_num) {
+		pr_err("reg bits num != irq event num.\n");
+		return -EINVAL;
+	}
 
 	/* Protect the structure */
 	spin_lock_irqsave(&list->irq[index].lock, flags);
@@ -284,8 +405,9 @@
 		if (lwis_interrupt_get_single_event_info_locked(&list->irq[index],
 								new_event->event_id) != NULL) {
 			spin_unlock_irqrestore(&list->irq[index].lock, flags);
-			pr_err("Duplicate event_id: %lld for IRQ: %s\n", new_event->event_id,
-			       list->irq[index].name);
+			dev_err(list->lwis_dev->dev, "Duplicate event_id: %llx for IRQ: %s\n",
+				new_event->event_id, list->irq[index].name);
+			kfree(new_event);
 			return -EINVAL;
 		}
 		/* Let's add the new state object */
@@ -304,46 +426,75 @@
 	return 0;
 }
 
-static int lwis_interrupt_set_mask(struct lwis_interrupt *irq, int int_reg_bit, bool is_set)
+int lwis_interrupt_set_gpios_event_info(struct lwis_interrupt_list *list, int index,
+					int64_t irq_event)
 {
-	int ret = 0;
-	uint64_t mask_value = 0;
-	BUG_ON(!irq);
+	unsigned long flags;
+	struct lwis_single_event_info *new_event;
 
-	/* Read the mask register */
-	ret = lwis_device_single_register_read(irq->lwis_dev, irq->irq_reg_bid, irq->irq_mask_reg,
-					       &mask_value, irq->irq_reg_access_size);
-	if (ret) {
-		pr_err("Failed to read IRQ mask register: %d\n", ret);
-		mutex_unlock(&irq->lwis_dev->reg_rw_lock);
-		return ret;
+	/* Protect the structure */
+	spin_lock_irqsave(&list->irq[index].lock, flags);
+	/* Empty hash table for event infos */
+	hash_init(list->irq[index].event_infos);
+	/* Initialize an empty list for enabled events */
+	INIT_LIST_HEAD(&list->irq[index].enabled_event_infos);
+	spin_unlock_irqrestore(&list->irq[index].lock, flags);
+
+	/* Build the hash table of events we can emit */
+
+	new_event = kzalloc(sizeof(struct lwis_single_event_info), GFP_KERNEL);
+	if (!new_event) {
+		dev_err(list->lwis_dev->dev, "Allocate event info failed\n");
+		return -ENOMEM;
 	}
 
-	/* Unmask the interrupt */
-	if (is_set) {
-		mask_value |= (1ULL << int_reg_bit);
-	} else {
-		mask_value &= ~(1ULL << int_reg_bit);
-	}
+	/* Fill the device id info in event id bit[47..32] */
+	irq_event |= (int64_t)(list->lwis_dev->id & 0xFFFF) << 32;
+	/* Grab the device state outside of the spinlock */
+	new_event->state = lwis_device_event_state_find_or_create(list->lwis_dev, irq_event);
+	new_event->event_id = irq_event;
 
-	/* Write the mask register */
-	ret = lwis_device_single_register_write(irq->lwis_dev, irq->irq_reg_bid, irq->irq_mask_reg,
-						mask_value, irq->irq_reg_access_size);
-	if (ret) {
-		pr_err("Failed to write IRQ mask register: %d\n", ret);
-		return ret;
+	spin_lock_irqsave(&list->irq[index].lock, flags);
+	/* Check for duplicate events */
+	if (lwis_interrupt_get_single_event_info_locked(&list->irq[index], new_event->event_id) !=
+	    NULL) {
+		spin_unlock_irqrestore(&list->irq[index].lock, flags);
+		dev_err(list->lwis_dev->dev, "Duplicate event_id: %llx for IRQ: %s\n",
+			new_event->event_id, list->irq[index].name);
+		kfree(new_event);
+		return -EINVAL;
 	}
+	/* Let's add the new state object */
+	hash_add(list->irq[index].event_infos, &new_event->node, new_event->event_id);
+	spin_unlock_irqrestore(&list->irq[index].lock, flags);
 
-	return ret;
+	/* It might make more sense to make has_events atomic_t instead of
+	 * locking a spinlock to write a boolean, but then we might have to deal
+	 * with barriers, etc. */
+	spin_lock_irqsave(&list->irq[index].lock, flags);
+	/* Set flag that we have events */
+	list->irq[index].has_events = true;
+	spin_unlock_irqrestore(&list->irq[index].lock, flags);
+
+	return 0;
 }
+
 static int lwis_interrupt_single_event_enable_locked(struct lwis_interrupt *irq,
 						     struct lwis_single_event_info *event,
 						     bool enabled)
 {
 	int ret = 0;
 	bool is_set;
-	BUG_ON(!irq);
-	BUG_ON(!event);
+
+	if (!irq) {
+		pr_err("irq is NULL.\n");
+		return -EINVAL;
+	}
+
+	if (!event) {
+		pr_err("event is NULL.\n");
+		return -EINVAL;
+	}
 
 	if (enabled) {
 		list_add_tail(&event->node_enabled, &irq->enabled_event_infos);
@@ -363,7 +514,11 @@
 	int index, ret = -EINVAL;
 	unsigned long flags;
 	struct lwis_single_event_info *event;
-	BUG_ON(!list);
+
+	if (!list) {
+		pr_err("Interrupt list is NULL.\n");
+		return -EINVAL;
+	}
 
 	for (index = 0; index < list->count; index++) {
 		spin_lock_irqsave(&list->irq[index].lock, flags);
diff --git a/lwis_interrupt.h b/lwis_interrupt.h
index e705221..3824e8b 100644
--- a/lwis_interrupt.h
+++ b/lwis_interrupt.h
@@ -22,7 +22,7 @@
 struct lwis_interrupt {
 	int irq;
 	/* IRQ name */
-	char *name;
+	char name[IRQ_FULL_NAME_LENGTH];
 	/* Full name consists of both device and irq name */
 	char full_name[IRQ_FULL_NAME_LENGTH];
 	/* Device that owns this interrupt */
@@ -77,13 +77,20 @@
 void lwis_interrupt_list_free(struct lwis_interrupt_list *list);
 
 /*
- *  lwis_interrupt_get: Register the interrupt by name.
- *  Returns: index number (>= 0) if success, -ve if error
+ *  lwis_interrupt_get: Register the interrupt by index.
+ *  Returns: 0 if success, -ve if error
  */
 int lwis_interrupt_get(struct lwis_interrupt_list *list, int index, char *name,
 		       struct platform_device *plat_dev);
 
 /*
+ *  lwis_interrupt_get_gpio_irq: Register the GPIO interrupt by index
+ *  Returns: 0 if success, -ve if error
+ */
+int lwis_interrupt_get_gpio_irq(struct lwis_interrupt_list *list, int index, char *name,
+				int gpio_irq);
+
+/*
  * lwis_interrupt_set_event_info: Provides event-info structure for a given
  * interrupt based on index
  *
@@ -100,6 +107,15 @@
 				  size_t critical_events_num);
 
 /*
+ * lwis_interrupt_set_gpios_event_info: Provides event-info structure for a given
+ * interrupt based on index
+ *
+ * Returns: 0 on success
+ */
+int lwis_interrupt_set_gpios_event_info(struct lwis_interrupt_list *list, int index,
+					int64_t irq_event);
+
+/*
  * lwis_interrupt_event_enable: Handles masking and unmasking interrupts when
  * an event is enabled or disabled
  *
diff --git a/lwis_io_entry.c b/lwis_io_entry.c
new file mode 100644
index 0000000..ab7d5bb
--- /dev/null
+++ b/lwis_io_entry.c
@@ -0,0 +1,65 @@
+/*
+ * Google LWIS I/O Entry Implementation
+ *
+ * Copyright (c) 2021 Google, LLC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME "-ioentry: " fmt
+
+#include <linux/delay.h>
+
+#include "lwis_io_entry.h"
+#include "lwis_util.h"
+
+int lwis_io_entry_poll(struct lwis_device *lwis_dev, struct lwis_io_entry *entry, bool non_blocking)
+{
+	uint64_t val, start;
+	uint64_t timeout_ms = entry->read_assert.timeout_ms;
+	int ret = 0;
+
+	/* Only read and check once if non_blocking */
+	if (non_blocking) {
+		timeout_ms = 0;
+	}
+
+	/* Read until getting the expected value or timeout */
+	val = ~entry->read_assert.val;
+	start = ktime_to_ms(lwis_get_time());
+	while (val != entry->read_assert.val) {
+		ret = lwis_io_entry_read_assert(lwis_dev, entry);
+		if (ret == 0) {
+			break;
+		}
+		if (ktime_to_ms(lwis_get_time()) - start > timeout_ms) {
+			dev_err(lwis_dev->dev, "Polling timed out: block %d offset 0x%llx\n",
+				entry->read_assert.bid, entry->read_assert.offset);
+			return -ETIMEDOUT;
+		}
+		/* Sleep for 1ms */
+		usleep_range(1000, 1000);
+	}
+	return ret;
+}
+
+int lwis_io_entry_read_assert(struct lwis_device *lwis_dev, struct lwis_io_entry *entry)
+{
+	uint64_t val;
+	int ret = 0;
+
+	ret = lwis_device_single_register_read(lwis_dev, entry->read_assert.bid,
+					       entry->read_assert.offset, &val,
+					       lwis_dev->native_value_bitwidth);
+	if (ret) {
+		dev_err(lwis_dev->dev, "Failed to read registers: block %d offset 0x%llx\n",
+			entry->read_assert.bid, entry->read_assert.offset);
+		return ret;
+	}
+	if ((val & entry->read_assert.mask) == (entry->read_assert.val & entry->read_assert.mask)) {
+		return 0;
+	}
+	return -EINVAL;
+}
diff --git a/lwis_io_entry.h b/lwis_io_entry.h
new file mode 100644
index 0000000..a2679da
--- /dev/null
+++ b/lwis_io_entry.h
@@ -0,0 +1,30 @@
+/*
+ * Google LWIS I/O Entry Implementation
+ *
+ * Copyright (c) 2021 Google, LLC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef LWIS_IO_ENTRY_H_
+#define LWIS_IO_ENTRY_H_
+
+#include "lwis_commands.h"
+#include "lwis_device.h"
+
+/*
+ * lwis_io_entry_poll:
+ * Polls a register for a specified time or until it reaches the expected value.
+ */
+int lwis_io_entry_poll(struct lwis_device *lwis_dev, struct lwis_io_entry *entry,
+		       bool non_blocking);
+
+/*
+ * lwis_io_entry_read_assert:
+ * Returns error if a register's value is not as expected.
+ */
+int lwis_io_entry_read_assert(struct lwis_device *lwis_dev, struct lwis_io_entry *entry);
+
+#endif /* LWIS_IO_ENTRY_H_ */
\ No newline at end of file
diff --git a/lwis_ioctl.c b/lwis_ioctl.c
index 198c9eb..68a75c1 100644
--- a/lwis_ioctl.c
+++ b/lwis_ioctl.c
@@ -17,6 +17,7 @@
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 
+#include "lwis_allocator.h"
 #include "lwis_buffer.h"
 #include "lwis_commands.h"
 #include "lwis_device.h"
@@ -25,6 +26,7 @@
 #include "lwis_device_ioreg.h"
 #include "lwis_event.h"
 #include "lwis_i2c.h"
+#include "lwis_io_entry.h"
 #include "lwis_ioreg.h"
 #include "lwis_periodic_io.h"
 #include "lwis_platform.h"
@@ -36,7 +38,7 @@
 #define IOCTL_ARG_SIZE(x) _IOC_SIZE(x)
 #define STRINGIFY(x) #x
 
-void lwis_ioctl_pr_err(struct lwis_device *lwis_dev, unsigned int ioctl_type, int errno)
+static void lwis_ioctl_pr_err(struct lwis_device *lwis_dev, unsigned int ioctl_type, int errno)
 {
 	unsigned int type = IOCTL_TO_ENUM(ioctl_type);
 	static char type_name[32];
@@ -71,7 +73,6 @@
 		strlcpy(type_name, STRINGIFY(LWIS_DEVICE_ENABLE), sizeof(type_name));
 		exp_size = IOCTL_ARG_SIZE(LWIS_DEVICE_ENABLE);
 		break;
-
 	case IOCTL_TO_ENUM(LWIS_DEVICE_DISABLE):
 		strlcpy(type_name, STRINGIFY(LWIS_DEVICE_DISABLE), sizeof(type_name));
 		exp_size = IOCTL_ARG_SIZE(LWIS_DEVICE_DISABLE);
@@ -124,6 +125,14 @@
 		strlcpy(type_name, STRINGIFY(LWIS_DPM_GET_CLOCK), sizeof(type_name));
 		exp_size = IOCTL_ARG_SIZE(LWIS_DPM_GET_CLOCK);
 		break;
+	case IOCTL_TO_ENUM(LWIS_PERIODIC_IO_SUBMIT):
+		strlcpy(type_name, STRINGIFY(LWIS_PERIODIC_IO_SUBMIT), sizeof(type_name));
+		exp_size = IOCTL_ARG_SIZE(LWIS_PERIODIC_IO_SUBMIT);
+		break;
+	case IOCTL_TO_ENUM(LWIS_PERIODIC_IO_CANCEL):
+		strlcpy(type_name, STRINGIFY(LWIS_PERIODIC_IO_CANCEL), sizeof(type_name));
+		exp_size = IOCTL_ARG_SIZE(LWIS_PERIODIC_IO_CANCEL);
+		break;
 	default:
 		strlcpy(type_name, "UNDEFINED", sizeof(type_name));
 		exp_size = 0;
@@ -179,7 +188,8 @@
 		/* Save the userspace buffer address */
 		user_buf = read_entry->rw_batch.buf;
 		/* Allocate read buffer */
-		read_entry->rw_batch.buf = kvmalloc(read_entry->rw_batch.size_in_bytes, GFP_KERNEL);
+		read_entry->rw_batch.buf =
+			lwis_allocator_allocate(lwis_dev, read_entry->rw_batch.size_in_bytes);
 		if (!read_entry->rw_batch.buf) {
 			dev_err_ratelimited(lwis_dev->dev,
 					    "Failed to allocate register read buffer\n");
@@ -217,7 +227,8 @@
 
 reg_read_exit:
 	if (batch_mode) {
-		kvfree(read_entry->rw_batch.buf);
+		lwis_allocator_free(lwis_dev, read_entry->rw_batch.buf);
+		read_entry->rw_batch.buf = NULL;
 	}
 	return ret;
 }
@@ -234,7 +245,7 @@
 		user_buf = write_entry->rw_batch.buf;
 		/* Allocate write buffer and copy contents from userspace */
 		write_entry->rw_batch.buf =
-			kvmalloc(write_entry->rw_batch.size_in_bytes, GFP_KERNEL);
+			lwis_allocator_allocate(lwis_dev, write_entry->rw_batch.size_in_bytes);
 		if (!write_entry->rw_batch.buf) {
 			dev_err_ratelimited(lwis_dev->dev,
 					    "Failed to allocate register write buffer\n");
@@ -261,7 +272,8 @@
 
 reg_write_exit:
 	if (batch_mode) {
-		kvfree(write_entry->rw_batch.buf);
+		lwis_allocator_free(lwis_dev, write_entry->rw_batch.buf);
+		write_entry->rw_batch.buf = NULL;
 	}
 	return ret;
 }
@@ -301,14 +313,14 @@
 		dev_err(lwis_dev->dev, "Failed to copy io_entries due to integer overflow.\n");
 		return -EOVERFLOW;
 	}
-	io_entries = kvmalloc(buf_size, GFP_KERNEL);
+	io_entries = lwis_allocator_allocate(lwis_dev, buf_size);
 	if (!io_entries) {
 		dev_err(lwis_dev->dev, "Failed to allocate io_entries buffer\n");
 		return -ENOMEM;
 	}
 	if (copy_from_user(io_entries, (void __user *)k_msg->io_entries, buf_size)) {
 		ret = -EFAULT;
-		kvfree(io_entries);
+		lwis_allocator_free(lwis_dev, io_entries);
 		dev_err(lwis_dev->dev, "Failed to copy io_entries from userspace.\n");
 		return ret;
 	}
@@ -345,10 +357,10 @@
 			ret = register_write(lwis_dev, &io_entries[i]);
 			break;
 		case LWIS_IO_ENTRY_POLL:
-			ret = lwis_entry_poll(lwis_dev, &io_entries[i], /*non_blocking=*/false);
+			ret = lwis_io_entry_poll(lwis_dev, &io_entries[i], /*non_blocking=*/false);
 			break;
 		case LWIS_IO_ENTRY_READ_ASSERT:
-			ret = lwis_entry_read_assert(lwis_dev, &io_entries[i]);
+			ret = lwis_io_entry_read_assert(lwis_dev, &io_entries[i]);
 			break;
 		default:
 			dev_err(lwis_dev->dev, "Unknown io_entry operation\n");
@@ -388,7 +400,7 @@
 
 reg_io_exit:
 	if (k_entries) {
-		kvfree(k_entries);
+		lwis_allocator_free(lwis_dev, k_entries);
 	}
 	return ret;
 }
@@ -554,9 +566,10 @@
 		goto error_locked;
 	}
 
-	/* Clear event queue to make sure there is no stale event from
+	/* Clear event queues to make sure there is no stale event from
 	 * previous session */
 	lwis_client_event_queue_clear(lwis_client);
+	lwis_client_error_event_queue_clear(lwis_client);
 
 	ret = lwis_dev_power_up_locked(lwis_dev);
 	if (ret < 0) {
@@ -650,6 +663,7 @@
 	if (copy_from_user(buffer, (void __user *)echo_msg.msg, echo_msg.size)) {
 		dev_err(lwis_dev->dev, "Failed to copy %zu bytes echo message from user\n",
 			echo_msg.size);
+		kfree(buffer);
 		return -EFAULT;
 	}
 	buffer[echo_msg.size] = '\0';
@@ -657,6 +671,7 @@
 	if (echo_msg.kernel_log) {
 		dev_info(lwis_dev->dev, "LWIS_ECHO: %s\n", buffer);
 	}
+	kfree(buffer);
 	return 0;
 }
 
@@ -674,10 +689,11 @@
 		goto soft_reset_exit;
 	}
 
-	/* Clear event states, event queue and transactions for this client */
+	/* Clear event states, event queues and transactions for this client */
 	mutex_lock(&lwis_dev->client_lock);
 	lwis_client_event_states_clear(lwis_client);
 	lwis_client_event_queue_clear(lwis_client);
+	lwis_client_error_event_queue_clear(lwis_client);
 	device_enabled = lwis_dev->enabled;
 	mutex_unlock(&lwis_dev->client_lock);
 
@@ -707,7 +723,7 @@
 	spin_unlock_irqrestore(&lwis_dev->lock, flags);
 soft_reset_exit:
 	if (k_entries) {
-		kvfree(k_entries);
+		lwis_allocator_free(lwis_dev, k_entries);
 	}
 	return ret;
 }
@@ -888,22 +904,83 @@
 	return ret;
 }
 
-static int construct_transaction(struct lwis_client *client,
-				 struct lwis_transaction_info __user *msg,
-				 struct lwis_transaction **transaction)
+static int construct_io_entry(struct lwis_client *client, struct lwis_io_entry *user_entries,
+			      size_t num_io_entries, struct lwis_io_entry **io_entries)
 {
 	int i;
 	int ret = 0;
 	int last_buf_alloc_idx = -1;
 	size_t entry_size;
-	struct lwis_transaction *k_transaction;
-	struct lwis_transaction_info *user_transaction;
 	struct lwis_io_entry *k_entries;
-	struct lwis_io_entry *user_entries;
 	uint8_t *user_buf;
 	uint8_t *k_buf;
 	struct lwis_device *lwis_dev = client->lwis_dev;
 
+	entry_size = num_io_entries * sizeof(struct lwis_io_entry);
+	k_entries = lwis_allocator_allocate(lwis_dev, entry_size);
+	if (!k_entries) {
+		dev_err(lwis_dev->dev, "Failed to allocate io entries\n");
+		return -ENOMEM;
+	}
+
+	if (copy_from_user((void *)k_entries, (void __user *)user_entries, entry_size)) {
+		ret = -EFAULT;
+		dev_err(lwis_dev->dev, "Failed to copy io entries from user\n");
+		goto error_free_entries;
+	}
+
+	/* For batch writes, ened to allocate kernel buffers to deep copy the
+	 * write values. Don't need to do this for batch reads because memory
+	 * will be allocated in the form of lwis_io_result in io processing.
+	 */
+	for (i = 0; i < num_io_entries; ++i) {
+		if (k_entries[i].type == LWIS_IO_ENTRY_WRITE_BATCH) {
+			user_buf = k_entries[i].rw_batch.buf;
+			k_buf = lwis_allocator_allocate(lwis_dev,
+							k_entries[i].rw_batch.size_in_bytes);
+			if (!k_buf) {
+				dev_err_ratelimited(lwis_dev->dev,
+					"Failed to allocate io write buffer\n");
+				ret = -ENOMEM;
+				goto error_free_buf;
+			}
+			last_buf_alloc_idx = i;
+			k_entries[i].rw_batch.buf = k_buf;
+			if (copy_from_user(k_buf, (void __user *)user_buf,
+					   k_entries[i].rw_batch.size_in_bytes)) {
+				ret = -EFAULT;
+				dev_err_ratelimited(lwis_dev->dev,
+					"Failed to copy io write buffer from userspace\n");
+				goto error_free_buf;
+			}
+		}
+	}
+
+	*io_entries = k_entries;
+	return 0;
+
+error_free_buf:
+	for (i = 0; i <= last_buf_alloc_idx; ++i) {
+		if (k_entries[i].type == LWIS_IO_ENTRY_WRITE_BATCH) {
+			lwis_allocator_free(lwis_dev, k_entries[i].rw_batch.buf);
+			k_entries[i].rw_batch.buf = NULL;
+		}
+	}
+error_free_entries:
+	lwis_allocator_free(lwis_dev, k_entries);
+	*io_entries = NULL;
+	return ret;
+}
+
+static int construct_transaction(struct lwis_client *client,
+				 struct lwis_transaction_info __user *msg,
+				 struct lwis_transaction **transaction)
+{
+	int ret;
+	struct lwis_transaction *k_transaction;
+	struct lwis_transaction_info *user_transaction;
+	struct lwis_device *lwis_dev = client->lwis_dev;
+
 	k_transaction = kmalloc(sizeof(struct lwis_transaction), GFP_KERNEL);
 	if (!k_transaction) {
 		dev_err(lwis_dev->dev, "Failed to allocate transaction info\n");
@@ -918,78 +995,26 @@
 		goto error_free_transaction;
 	}
 
-	user_entries = k_transaction->info.io_entries;
-	entry_size = k_transaction->info.num_io_entries * sizeof(struct lwis_io_entry);
-	k_entries = kvmalloc(entry_size, GFP_KERNEL);
-	if (!k_entries) {
-		dev_err(lwis_dev->dev, "Failed to allocate transaction entries\n");
-		ret = -ENOMEM;
+	ret = construct_io_entry(client, k_transaction->info.io_entries,
+				 k_transaction->info.num_io_entries,
+				 &k_transaction->info.io_entries);
+	if (ret) {
+		dev_err(lwis_dev->dev, "Failed to prepare lwis io entries for transaction\n");
 		goto error_free_transaction;
 	}
-	k_transaction->info.io_entries = k_entries;
 
-	if (copy_from_user((void *)k_entries, (void __user *)user_entries, entry_size)) {
-		ret = -EFAULT;
-		dev_err(lwis_dev->dev, "Failed to copy transaction entries from user\n");
-		goto error_free_entries;
-	}
+	k_transaction->resp = NULL;
+	INIT_LIST_HEAD(&k_transaction->event_list_node);
+	INIT_LIST_HEAD(&k_transaction->process_queue_node);
 
-	/* For batch writes, need to allocate kernel buffers to deep copy the
-	 * write values. Don't need to do this for batch reads because memory
-	 * will be allocated in the form of lwis_io_result in transaction
-	 * processing.
-	 */
-	for (i = 0; i < k_transaction->info.num_io_entries; ++i) {
-		if (k_entries[i].type == LWIS_IO_ENTRY_WRITE_BATCH) {
-			user_buf = k_entries[i].rw_batch.buf;
-			k_buf = kvmalloc(k_entries[i].rw_batch.size_in_bytes, GFP_KERNEL);
-			if (!k_buf) {
-				dev_err_ratelimited(lwis_dev->dev,
-						    "Failed to allocate tx write buffer\n");
-				ret = -ENOMEM;
-				goto error_free_buf;
-			}
-			last_buf_alloc_idx = i;
-			k_entries[i].rw_batch.buf = k_buf;
-			if (copy_from_user(k_buf, (void __user *)user_buf,
-					   k_entries[i].rw_batch.size_in_bytes)) {
-				ret = -EFAULT;
-				dev_err_ratelimited(
-					lwis_dev->dev,
-					"Failed to copy tx write buffer from userspace\n");
-				goto error_free_buf;
-			}
-		}
-	}
 	*transaction = k_transaction;
 	return 0;
 
-error_free_buf:
-	for (i = 0; i <= last_buf_alloc_idx; ++i) {
-		if (k_entries[i].type == LWIS_IO_ENTRY_WRITE_BATCH) {
-			kvfree(k_entries[i].rw_batch.buf);
-		}
-	}
-error_free_entries:
-	kvfree(k_entries);
 error_free_transaction:
 	kfree(k_transaction);
 	return ret;
 }
 
-static void free_transaction(struct lwis_transaction *transaction)
-{
-	int i;
-
-	for (i = 0; i < transaction->info.num_io_entries; ++i) {
-		if (transaction->info.io_entries[i].type == LWIS_IO_ENTRY_WRITE_BATCH) {
-			kvfree(transaction->info.io_entries[i].rw_batch.buf);
-		}
-	}
-	kvfree(transaction->info.io_entries);
-	kfree(transaction);
-}
-
 static int ioctl_transaction_submit(struct lwis_client *client,
 				    struct lwis_transaction_info __user *msg)
 {
@@ -1000,24 +1025,20 @@
 	struct lwis_device *lwis_dev = client->lwis_dev;
 
 	ret = construct_transaction(client, msg, &k_transaction);
-	if (ret)
+	if (ret) {
 		return ret;
+	}
 
 	spin_lock_irqsave(&client->transaction_lock, flags);
 	ret = lwis_transaction_submit_locked(client, k_transaction);
-	if (ret) {
-		k_transaction->info.id = LWIS_ID_INVALID;
-		spin_unlock_irqrestore(&client->transaction_lock, flags);
-		if (copy_to_user((void __user *)msg, &k_transaction->info,
-				 sizeof(struct lwis_transaction_info))) {
-			dev_err_ratelimited(lwis_dev->dev, "Failed to return info to userspace\n");
-		}
-		free_transaction(k_transaction);
-		return ret;
-	}
 	k_transaction_info = k_transaction->info;
 	spin_unlock_irqrestore(&client->transaction_lock, flags);
 
+	if (ret) {
+		k_transaction_info.id = LWIS_ID_INVALID;
+		lwis_transaction_free(lwis_dev, k_transaction);
+	}
+
 	if (copy_to_user((void __user *)msg, &k_transaction_info,
 			 sizeof(struct lwis_transaction_info))) {
 		ret = -EFAULT;
@@ -1033,7 +1054,7 @@
 {
 	int ret = 0;
 	unsigned long flags;
-	struct lwis_transaction *k_transaction;
+	struct lwis_transaction *k_transaction = NULL;
 	struct lwis_transaction_info k_transaction_info;
 	struct lwis_device *lwis_dev = client->lwis_dev;
 
@@ -1044,19 +1065,14 @@
 
 	spin_lock_irqsave(&client->transaction_lock, flags);
 	ret = lwis_transaction_replace_locked(client, k_transaction);
-	if (ret) {
-		k_transaction->info.id = LWIS_ID_INVALID;
-		spin_unlock_irqrestore(&client->transaction_lock, flags);
-		if (copy_to_user((void __user *)msg, &k_transaction->info,
-				 sizeof(struct lwis_transaction_info))) {
-			dev_err_ratelimited(lwis_dev->dev, "Failed to return info to userspace\n");
-		}
-		free_transaction(k_transaction);
-		return ret;
-	}
 	k_transaction_info = k_transaction->info;
 	spin_unlock_irqrestore(&client->transaction_lock, flags);
 
+	if (ret) {
+		k_transaction_info.id = LWIS_ID_INVALID;
+		lwis_transaction_free(lwis_dev, k_transaction);
+	}
+
 	if (copy_to_user((void __user *)msg, &k_transaction_info,
 			 sizeof(struct lwis_transaction_info))) {
 		ret = -EFAULT;
@@ -1088,79 +1104,9 @@
 	return 0;
 }
 
-static int prepare_io_entry(struct lwis_client *client, struct lwis_io_entry *user_entries,
-			    size_t num_io_entries, struct lwis_io_entry **io_entries)
-{
-	int i;
-	int ret = 0;
-	int last_buf_alloc_idx = 0;
-	size_t entry_size;
-	struct lwis_io_entry *k_entries;
-	uint8_t *user_buf;
-	uint8_t *k_buf;
-	struct lwis_device *lwis_dev = client->lwis_dev;
-
-	entry_size = num_io_entries * sizeof(struct lwis_io_entry);
-	if (entry_size / sizeof(struct lwis_io_entry) != num_io_entries) {
-		dev_err(lwis_dev->dev, "Failed to prepare io entry due to integer overflow\n");
-		return -EINVAL;
-	}
-
-	k_entries = kvmalloc(entry_size, GFP_KERNEL);
-	if (!k_entries) {
-		dev_err(lwis_dev->dev, "Failed to allocate periodic io entries\n");
-		return -ENOMEM;
-	}
-	*io_entries = k_entries;
-
-	if (copy_from_user((void *)k_entries, (void __user *)user_entries, entry_size)) {
-		ret = -EFAULT;
-		dev_err(lwis_dev->dev, "Failed to copy periodic io entries from user\n");
-		goto error_free_entries;
-	}
-
-	/* For batch writes, ened to allocate kernel buffers to deep copy the
-	 * write values. Don't need to do this for batch reads because memory
-	 * will be allocated in the form of lwis_io_result in io processing.
-	 */
-	for (i = 0; i < num_io_entries; ++i) {
-		if (k_entries[i].type == LWIS_IO_ENTRY_WRITE_BATCH) {
-			user_buf = k_entries[i].rw_batch.buf;
-			k_buf = kvmalloc(k_entries[i].rw_batch.size_in_bytes, GFP_KERNEL);
-			if (!k_buf) {
-				dev_err_ratelimited(
-					lwis_dev->dev,
-					"Failed to allocate periodic io write buffer\n");
-				ret = -ENOMEM;
-				goto error_free_buf;
-			}
-			last_buf_alloc_idx = i;
-			k_entries[i].rw_batch.buf = k_buf;
-			if (copy_from_user(k_buf, (void __user *)user_buf,
-					   k_entries[i].rw_batch.size_in_bytes)) {
-				ret = -EFAULT;
-				dev_err_ratelimited(
-					lwis_dev->dev,
-					"Failed to copy periodic io write buffer from userspace\n");
-				goto error_free_buf;
-			}
-		}
-	}
-	return 0;
-
-error_free_buf:
-	for (i = 0; i <= last_buf_alloc_idx; ++i) {
-		if (k_entries[i].type == LWIS_IO_ENTRY_WRITE_BATCH) {
-			kvfree(k_entries[i].rw_batch.buf);
-		}
-	}
-error_free_entries:
-	kvfree(k_entries);
-	return ret;
-}
-
-static int prepare_periodic_io(struct lwis_client *client, struct lwis_periodic_io_info __user *msg,
-			       struct lwis_periodic_io **periodic_io)
+static int construct_periodic_io(struct lwis_client *client,
+				 struct lwis_periodic_io_info __user *msg,
+				 struct lwis_periodic_io **periodic_io)
 {
 	int ret = 0;
 	struct lwis_periodic_io *k_periodic_io;
@@ -1181,12 +1127,17 @@
 		goto error_free_periodic_io;
 	}
 
-	ret = prepare_io_entry(client, k_periodic_io->info.io_entries,
-			       k_periodic_io->info.num_io_entries, &k_periodic_io->info.io_entries);
+	ret = construct_io_entry(client, k_periodic_io->info.io_entries,
+				 k_periodic_io->info.num_io_entries,
+				 &k_periodic_io->info.io_entries);
 	if (ret) {
 		dev_err(lwis_dev->dev, "Failed to prepare lwis io entries for periodic io\n");
 		goto error_free_periodic_io;
 	}
+
+	k_periodic_io->resp = NULL;
+	k_periodic_io->periodic_io_list = NULL;
+
 	*periodic_io = k_periodic_io;
 	return 0;
 
@@ -1202,9 +1153,10 @@
 	struct lwis_periodic_io *k_periodic_io = NULL;
 	struct lwis_device *lwis_dev = client->lwis_dev;
 
-	ret = prepare_periodic_io(client, msg, &k_periodic_io);
-	if (ret)
+	ret = construct_periodic_io(client, msg, &k_periodic_io);
+	if (ret) {
 		return ret;
+	}
 
 	ret = lwis_periodic_io_submit(client, k_periodic_io);
 	if (ret) {
@@ -1213,7 +1165,7 @@
 				 sizeof(struct lwis_periodic_io_info))) {
 			dev_err_ratelimited(lwis_dev->dev, "Failed to return info to userspace\n");
 		}
-		lwis_periodic_io_clean(k_periodic_io);
+		lwis_periodic_io_free(lwis_dev, k_periodic_io);
 		return ret;
 	}
 
@@ -1224,7 +1176,7 @@
 		return -EFAULT;
 	}
 
-	return 0;
+	return ret;
 }
 
 static int ioctl_periodic_io_cancel(struct lwis_client *client, int64_t __user *msg)
@@ -1246,9 +1198,11 @@
 
 	return 0;
 }
+
 static int ioctl_dpm_clk_update(struct lwis_device *lwis_dev,
 				struct lwis_dpm_clk_settings __user *msg)
 {
+	int ret;
 	struct lwis_dpm_clk_settings k_msg;
 	struct lwis_clk_setting *clk_settings;
 	size_t buf_size;
@@ -1276,7 +1230,9 @@
 		return -EFAULT;
 	}
 
-	return lwis_dpm_update_clock(lwis_dev, clk_settings, k_msg.num_settings);
+	ret = lwis_dpm_update_clock(lwis_dev, clk_settings, k_msg.num_settings);
+	kfree(clk_settings);
+	return ret;
 }
 
 static int ioctl_dpm_qos_update(struct lwis_device *lwis_dev,
@@ -1470,5 +1426,10 @@
 	if (type != LWIS_EVENT_DEQUEUE) {
 		mutex_unlock(&lwis_client->lock);
 	}
+
+	if (ret && ret != -ENOENT && ret != -ETIMEDOUT && ret != -EAGAIN) {
+		lwis_ioctl_pr_err(lwis_dev, type, ret);
+	}
+
 	return ret;
 }
diff --git a/lwis_ioctl.h b/lwis_ioctl.h
index 16c2661..8fba49f 100644
--- a/lwis_ioctl.h
+++ b/lwis_ioctl.h
@@ -18,6 +18,4 @@
  */
 int lwis_ioctl_handler(struct lwis_client *lwis_client, unsigned int type, unsigned long param);
 
-void lwis_ioctl_pr_err(struct lwis_device *lwis_device, unsigned int ioctl_type, int errno);
-
 #endif /* LWIS_IOCTL_H_ */
diff --git a/lwis_ioreg.c b/lwis_ioreg.c
index 46e1cd8..fd74336 100644
--- a/lwis_ioreg.c
+++ b/lwis_ioreg.c
@@ -89,7 +89,10 @@
 {
 	struct lwis_ioreg_list *list;
 
-	BUG_ON(!ioreg_dev);
+	if (!ioreg_dev) {
+		pr_err("LWIS IOREG device is NULL\n");
+		return -ENODEV;
+	}
 
 	/* No need to allocate if num_blocks is invalid */
 	if (num_blocks <= 0) {
@@ -111,7 +114,10 @@
 {
 	struct lwis_ioreg_list *list;
 
-	BUG_ON(!ioreg_dev);
+	if (!ioreg_dev) {
+		pr_err("LWIS IOREG device is NULL\n");
+		return;
+	}
 
 	list = &ioreg_dev->reg_list;
 	if (list->block) {
@@ -128,7 +134,10 @@
 	struct lwis_ioreg_list *list;
 	struct platform_device *plat_dev;
 
-	BUG_ON(!ioreg_dev);
+	if (!ioreg_dev) {
+		pr_err("LWIS IOREG device is NULL\n");
+		return -ENODEV;
+	}
 
 	plat_dev = ioreg_dev->base_dev.plat_dev;
 	list = &ioreg_dev->reg_list;
@@ -160,7 +169,10 @@
 	struct lwis_ioreg_list *list;
 	struct device *dev;
 
-	BUG_ON(!ioreg_dev);
+	if (!ioreg_dev) {
+		pr_err("LWIS IOREG device is NULL\n");
+		return -ENODEV;
+	};
 
 	dev = &ioreg_dev->base_dev.plat_dev->dev;
 	list = &ioreg_dev->reg_list;
@@ -183,7 +195,10 @@
 	struct lwis_ioreg_list *list;
 	struct device *dev;
 
-	BUG_ON(!ioreg_dev);
+	if (!ioreg_dev) {
+		pr_err("LWIS IOREG device is NULL\n");
+		return -ENODEV;
+	};
 
 	dev = &ioreg_dev->base_dev.plat_dev->dev;
 	list = &ioreg_dev->reg_list;
@@ -240,7 +255,7 @@
 }
 
 static int ioreg_write_batch_internal(void __iomem *base, uint64_t offset, int value_bits,
-				      size_t size_in_bytes, uint8_t *buf)
+				      size_t size_in_bytes, uint8_t *buf, bool is_offset_fixed)
 {
 	int i;
 	uint8_t *addr = (uint8_t *)base + offset;
@@ -254,22 +269,29 @@
 	switch (value_bits) {
 	case 8:
 		for (i = 0; i < size_in_bytes; ++i) {
-			writeb_relaxed(*(buf + i), (void __iomem *)(addr + i));
+			writeb_relaxed(*(buf + i), is_offset_fixed ? (void __iomem *)(addr) :
+									   (void __iomem *)(addr + i));
 		}
 		break;
 	case 16:
 		for (i = 0; i < size_in_bytes; i += 2) {
-			writew_relaxed(*(uint16_t *)(buf + i), (void __iomem *)(addr + i));
+			writew_relaxed(*(uint16_t *)(buf + i), is_offset_fixed ?
+								       (void __iomem *)(addr) :
+									     (void __iomem *)(addr + i));
 		}
 		break;
 	case 32:
 		for (i = 0; i < size_in_bytes; i += 4) {
-			writel_relaxed(*(uint32_t *)(buf + i), (void __iomem *)(addr + i));
+			writel_relaxed(*(uint32_t *)(buf + i), is_offset_fixed ?
+								       (void __iomem *)(addr) :
+									     (void __iomem *)(addr + i));
 		}
 		break;
 	case 64:
 		for (i = 0; i < size_in_bytes; i += 8) {
-			writeq_relaxed(*(uint64_t *)(buf + i), (void __iomem *)(addr + i));
+			writeq_relaxed(*(uint64_t *)(buf + i), is_offset_fixed ?
+								       (void __iomem *)(addr) :
+									     (void __iomem *)(addr + i));
 		}
 		break;
 	default:
@@ -334,8 +356,15 @@
 	struct lwis_ioreg *block;
 	uint64_t reg_value;
 
-	BUG_ON(!ioreg_dev);
-	BUG_ON(!entry);
+	if (!ioreg_dev) {
+		pr_err("LWIS IOREG device is NULL\n");
+		return -ENODEV;
+	};
+
+	if (!entry) {
+		dev_err(ioreg_dev->base_dev.dev, "IO entry is NULL.\n");
+		return -EINVAL;
+	}
 
 	/* Non-blocking because we already locked here */
 	if (entry->type == LWIS_IO_ENTRY_READ) {
@@ -402,8 +431,8 @@
 		}
 		ret = ioreg_write_batch_internal(block->base, entry->rw_batch.offset,
 						 ioreg_dev->base_dev.native_value_bitwidth,
-						 entry->rw_batch.size_in_bytes,
-						 entry->rw_batch.buf);
+						 entry->rw_batch.size_in_bytes, entry->rw_batch.buf,
+						 entry->rw_batch.is_offset_fixed);
 		if (ret) {
 			dev_err(ioreg_dev->base_dev.dev, "Invalid ioreg batch write at:\n");
 			dev_err(ioreg_dev->base_dev.dev, "Offset: 0x%08llx, Base: %pK\n",
@@ -444,7 +473,10 @@
 	unsigned int native_value_bitwidth;
 	uint64_t offset_mask;
 
-	BUG_ON(!ioreg_dev);
+	if (!ioreg_dev) {
+		pr_err("LWIS IOREG device is NULL\n");
+		return -ENODEV;
+	};
 
 	block = get_block_by_idx(ioreg_dev, index);
 	if (IS_ERR_OR_NULL(block)) {
@@ -493,7 +525,10 @@
 	uint64_t offset_mask;
 	uint64_t value_mask;
 
-	BUG_ON(!ioreg_dev);
+	if (!ioreg_dev) {
+		pr_err("LWIS IOREG device is NULL\n");
+		return -ENODEV;
+	};
 
 	if (ioreg_dev->base_dev.is_read_only) {
 		dev_err(ioreg_dev->base_dev.dev, "Device is read only\n");
diff --git a/lwis_periodic_io.c b/lwis_periodic_io.c
index 6c7fed1..d3b56cb 100644
--- a/lwis_periodic_io.c
+++ b/lwis_periodic_io.c
@@ -13,10 +13,13 @@
 #include "lwis_periodic_io.h"
 
 #include <linux/completion.h>
+#include <linux/kthread.h>
 #include <linux/slab.h>
 #include <linux/workqueue.h>
 
+#include "lwis_allocator.h"
 #include "lwis_event.h"
+#include "lwis_io_entry.h"
 #include "lwis_ioreg.h"
 #include "lwis_transaction.h"
 #include "lwis_util.h"
@@ -54,7 +57,8 @@
 		}
 	}
 	if (active_periodic_io_present) {
-		queue_work(client->periodic_io_wq, &client->periodic_io_work);
+		kthread_queue_work(&client->lwis_dev->periodic_io_worker,
+				   &client->periodic_io_work);
 	}
 	spin_unlock_irqrestore(&client->periodic_io_lock, flags);
 	if (!active_periodic_io_present) {
@@ -231,13 +235,13 @@
 			read_buf += sizeof(struct lwis_periodic_io_result) +
 				    io_result->io_result.num_value_bytes;
 		} else if (entry->type == LWIS_IO_ENTRY_POLL) {
-			ret = lwis_entry_poll(lwis_dev, entry, /*non_blocking=*/false);
+			ret = lwis_io_entry_poll(lwis_dev, entry, /*non_blocking=*/false);
 			if (ret) {
 				resp->error_code = ret;
 				goto event_push;
 			}
 		} else if (entry->type == LWIS_IO_ENTRY_READ_ASSERT) {
-			ret = lwis_entry_read_assert(lwis_dev, entry);
+			ret = lwis_io_entry_read_assert(lwis_dev, entry);
 			if (ret) {
 				resp->error_code = ret;
 				goto event_push;
@@ -296,7 +300,7 @@
 	return ret;
 }
 
-static void periodic_io_work_func(struct work_struct *work)
+static void periodic_io_work_func(struct kthread_work *work)
 {
 	int error_code;
 	unsigned long flags;
@@ -378,26 +382,6 @@
 		}
 	}
 
-	/* Check integer overflow.*/
-	if (info->batch_size != 0 && read_entries != 0 && read_buf_size != 0) {
-		if (SIZE_MAX / (sizeof(struct lwis_periodic_io_result) * info->batch_size) <
-			    read_entries ||
-		    SIZE_MAX / (read_entries * sizeof(struct lwis_periodic_io_result)) <
-			    info->batch_size ||
-		    SIZE_MAX / read_buf_size < info->batch_size ||
-		    SIZE_MAX - (read_entries * sizeof(struct lwis_periodic_io_result) *
-					info->batch_size +
-				read_buf_size * info->batch_size) <
-			    sizeof(struct lwis_periodic_io_response_header) ||
-		    SIZE_MAX - (read_entries * sizeof(struct lwis_periodic_io_result) *
-					info->batch_size +
-				sizeof(struct lwis_periodic_io_response_header)) <
-			    (read_buf_size * info->batch_size)) {
-			pr_err_ratelimited("Failed to prepare response due to integer overflow\n");
-			return -EINVAL;
-		}
-	}
-
 	/* Periodic io response payload consists of one response header and
 	 * batch_size of batches, each of which contains num_entries_per_period
 	 * pairs of lwis_periodic_io_result and its read_buf. */
@@ -442,18 +426,19 @@
 	return 0;
 }
 
-void lwis_periodic_io_clean(struct lwis_periodic_io *periodic_io)
+void lwis_periodic_io_free(struct lwis_device *lwis_dev, struct lwis_periodic_io *periodic_io)
 {
 	int i;
+
 	for (i = 0; i < periodic_io->info.num_io_entries; ++i) {
 		if (periodic_io->info.io_entries[i].type == LWIS_IO_ENTRY_WRITE_BATCH) {
-			kvfree(periodic_io->info.io_entries[i].rw_batch.buf);
+			lwis_allocator_free(lwis_dev, periodic_io->info.io_entries[i].rw_batch.buf);
+			periodic_io->info.io_entries[i].rw_batch.buf = NULL;
 		}
 	}
-	kvfree(periodic_io->info.io_entries);
+	lwis_allocator_free(lwis_dev, periodic_io->info.io_entries);
 
-	/* resp may not be allocated before the periodic_io is successfully
-	 * submitted */
+	/* resp may not be allocated before the periodic_io is successfully submitted */
 	if (periodic_io->resp) {
 		kfree(periodic_io->resp);
 	}
@@ -463,8 +448,7 @@
 int lwis_periodic_io_init(struct lwis_client *client)
 {
 	INIT_LIST_HEAD(&client->periodic_io_process_queue);
-	client->periodic_io_wq = create_workqueue("lwisperiod");
-	INIT_WORK(&client->periodic_io_work, periodic_io_work_func);
+	kthread_init_work(&client->periodic_io_work, periodic_io_work_func);
 	client->periodic_io_counter = 0;
 	hash_init(client->timer_list);
 	return 0;
@@ -532,8 +516,8 @@
 	}
 
 	/* Wait until all workload in process queue are processed */
-	if (client->periodic_io_wq) {
-		drain_workqueue(client->periodic_io_wq);
+	if (client->lwis_dev->periodic_io_worker_thread) {
+		kthread_flush_worker(&client->lwis_dev->periodic_io_worker);
 	}
 	spin_lock_irqsave(&client->periodic_io_lock, flags);
 
@@ -543,7 +527,7 @@
 			periodic_io =
 				list_entry(it_period, struct lwis_periodic_io, timer_list_node);
 			list_del(it_period);
-			lwis_periodic_io_clean(periodic_io);
+			lwis_periodic_io_free(client->lwis_dev, periodic_io);
 		}
 	}
 	spin_unlock_irqrestore(&client->periodic_io_lock, flags);
@@ -563,10 +547,6 @@
 		return ret;
 	}
 
-	if (client->periodic_io_wq) {
-		destroy_workqueue(client->periodic_io_wq);
-	}
-
 	spin_lock_irqsave(&client->periodic_io_lock, flags);
 	hash_for_each_safe (client->timer_list, i, tmp, it_periodic_io_list, node) {
 		hash_del(&it_periodic_io_list->node);
diff --git a/lwis_periodic_io.h b/lwis_periodic_io.h
index 392722a..dbec999 100644
--- a/lwis_periodic_io.h
+++ b/lwis_periodic_io.h
@@ -81,6 +81,6 @@
 int lwis_periodic_io_client_cleanup(struct lwis_client *client);
 int lwis_periodic_io_submit(struct lwis_client *client, struct lwis_periodic_io *periodic_io);
 int lwis_periodic_io_cancel(struct lwis_client *client, int64_t id);
-void lwis_periodic_io_clean(struct lwis_periodic_io *periodic_io);
+void lwis_periodic_io_free(struct lwis_device *lwis_dev, struct lwis_periodic_io *periodic_io);
 
 #endif /* LWIS_PERIODIC_IO_H_ */
\ No newline at end of file
diff --git a/lwis_trace.h b/lwis_trace.h
new file mode 100644
index 0000000..2f263e2
--- /dev/null
+++ b/lwis_trace.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * LWIS Trace Event
+ *
+ * Copyright (c) 2021 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM lwis
+
+#if !defined(_TRACE_LWIS_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_LWIS_H_
+
+#include <linux/tracepoint.h>
+#include "lwis_commands.h"
+#include "lwis_device.h"
+
+#define LWIS_DEVICE_NAME_ENTRY \
+	__array(char, lwis_name, LWIS_MAX_NAME_STRING_LEN)
+#define LWIS_DEVICE_NAME_ASSIGN \
+	strlcpy(__entry->lwis_name, lwis_dev->name, LWIS_MAX_NAME_STRING_LEN)
+#define LWIS_DEVICE_NAME __entry->lwis_name
+
+TRACE_EVENT(tracing_mark_write,
+	TP_PROTO(struct lwis_device *lwis_dev, char type,
+		int pid, const char *func_name, int64_t value),
+	TP_ARGS(lwis_dev, type, pid, func_name, value),
+	TP_STRUCT__entry(
+		LWIS_DEVICE_NAME_ENTRY
+		__field(char, type)
+		__field(int, pid)
+		__string(func_name, func_name)
+		__field(int64_t, value)
+	),
+	TP_fast_assign(
+		LWIS_DEVICE_NAME_ASSIGN;
+		__entry->type = type;
+		__entry->pid = pid;
+		__assign_str(func_name, func_name);
+		__entry->value = value;
+	),
+	TP_printk("%c|%d|lwis-%s:%s|%lld",
+		__entry->type, __entry->pid, LWIS_DEVICE_NAME, __get_str(func_name), __entry->value)
+);
+
+#define LWIS_ATRACE_BEGIN(lwis_dev, func_name) \
+	trace_tracing_mark_write(lwis_dev, 'B', current->tgid, func_name, 0)
+#define LWIS_ATRACE_FUNC_BEGIN(lwis_dev) LWIS_ATRACE_BEGIN(lwis_dev, __func__)
+
+#define LWIS_ATRACE_END(lwis_dev, func_name) \
+	trace_tracing_mark_write(lwis_dev, 'E', current->tgid, func_name, 0)
+#define LWIS_ATRACE_FUNC_END(lwis_dev) LWIS_ATRACE_END(lwis_dev, __func__)
+
+#define LWIS_ATRACE_INT_PID(lwis_dev, func_name, value, pid) \
+	trace_tracing_mark_write(lwis_dev, 'C', pid, func_name, value)
+#define LWIS_ATRACE_INT(lwis_dev, func_name, value) \
+	LWIS_ATRACE_INT_PID(lwis_dev, func_name, value, current->tgid)
+
+#endif /* _TRACE_LWIS_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE lwis_trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
\ No newline at end of file
diff --git a/lwis_transaction.c b/lwis_transaction.c
index 0895123..e467be3 100644
--- a/lwis_transaction.c
+++ b/lwis_transaction.c
@@ -14,16 +14,20 @@
 
 #include <linux/delay.h>
 #include <linux/kernel.h>
-#include <linux/kthread.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <linux/workqueue.h>
 
+#include "lwis_allocator.h"
 #include "lwis_device.h"
 #include "lwis_event.h"
+#include "lwis_io_entry.h"
 #include "lwis_ioreg.h"
 #include "lwis_util.h"
 
+#define CREATE_TRACE_POINTS
+#include "lwis_trace.h"
+
 #define EXPLICIT_EVENT_COUNTER(x)                                                                  \
 	((x) != LWIS_EVENT_COUNTER_ON_NEXT_OCCURRENCE && (x) != LWIS_EVENT_COUNTER_EVERY_TIME)
 
@@ -61,55 +65,6 @@
 	return (list == NULL) ? event_list_create(client, event_id) : list;
 }
 
-int lwis_entry_poll(struct lwis_device *lwis_dev, struct lwis_io_entry *entry, bool non_blocking)
-{
-	uint64_t val, start;
-	uint64_t timeout_ms = entry->read_assert.timeout_ms;
-	int ret = 0;
-
-	/* Only read and check once if non_blocking */
-	if (non_blocking) {
-		timeout_ms = 0;
-	}
-
-	/* Read until getting the expected value or timeout */
-	val = ~entry->read_assert.val;
-	start = ktime_to_ms(lwis_get_time());
-	while (val != entry->read_assert.val) {
-		ret = lwis_entry_read_assert(lwis_dev, entry);
-		if (ret == 0) {
-			break;
-		}
-		if (ktime_to_ms(lwis_get_time()) - start > timeout_ms) {
-			dev_err(lwis_dev->dev, "Polling timed out: block %d offset 0x%llx\n",
-				entry->read_assert.bid, entry->read_assert.offset);
-			return -ETIMEDOUT;
-		}
-		/* Sleep for 1ms */
-		usleep_range(1000, 1000);
-	}
-	return ret;
-}
-
-int lwis_entry_read_assert(struct lwis_device *lwis_dev, struct lwis_io_entry *entry)
-{
-	uint64_t val;
-	int ret = 0;
-
-	ret = lwis_device_single_register_read(lwis_dev, entry->read_assert.bid,
-					       entry->read_assert.offset, &val,
-					       lwis_dev->native_value_bitwidth);
-	if (ret) {
-		dev_err(lwis_dev->dev, "Failed to read registers: block %d offset 0x%llx\n",
-			entry->read_assert.bid, entry->read_assert.offset);
-		return ret;
-	}
-	if ((val & entry->read_assert.mask) == (entry->read_assert.val & entry->read_assert.mask)) {
-		return 0;
-	}
-	return -EINVAL;
-}
-
 static void save_transaction_to_history(struct lwis_client *client,
 					struct lwis_transaction_info *trans_info,
 					int64_t process_timestamp, int64_t process_duration_ns)
@@ -126,17 +81,20 @@
 	}
 }
 
-static void free_transaction(struct lwis_transaction *transaction)
+void lwis_transaction_free(struct lwis_device *lwis_dev, struct lwis_transaction *transaction)
 {
-	int i = 0;
+	int i;
 
-	kfree(transaction->resp);
 	for (i = 0; i < transaction->info.num_io_entries; ++i) {
 		if (transaction->info.io_entries[i].type == LWIS_IO_ENTRY_WRITE_BATCH) {
-			kvfree(transaction->info.io_entries[i].rw_batch.buf);
+			lwis_allocator_free(lwis_dev, transaction->info.io_entries[i].rw_batch.buf);
+			transaction->info.io_entries[i].rw_batch.buf = NULL;
 		}
 	}
-	kvfree(transaction->info.io_entries);
+	lwis_allocator_free(lwis_dev, transaction->info.io_entries);
+	if (transaction->resp) {
+		kfree(transaction->resp);
+	}
 	kfree(transaction);
 }
 
@@ -156,6 +114,7 @@
 	int64_t process_duration_ns = 0;
 	int64_t process_timestamp = ktime_to_ns(lwis_get_time());
 
+	LWIS_ATRACE_FUNC_BEGIN(lwis_dev);
 	resp_size = sizeof(struct lwis_transaction_response_header) + resp->results_size_bytes;
 	read_buf = (uint8_t *)resp + sizeof(struct lwis_transaction_response_header);
 	resp->completion_index = -1;
@@ -228,7 +187,7 @@
 			}
 			read_buf += sizeof(struct lwis_io_result) + io_result->num_value_bytes;
 		} else if (entry->type == LWIS_IO_ENTRY_POLL) {
-			ret = lwis_entry_poll(lwis_dev, entry, in_irq);
+			ret = lwis_io_entry_poll(lwis_dev, entry, in_irq);
 			if (ret) {
 				resp->error_code = ret;
 				if (skip_err) {
@@ -240,7 +199,7 @@
 				break;
 			}
 		} else if (entry->type == LWIS_IO_ENTRY_READ_ASSERT) {
-			ret = lwis_entry_read_assert(lwis_dev, entry);
+			ret = lwis_io_entry_read_assert(lwis_dev, entry);
 			if (ret) {
 				resp->error_code = ret;
 				if (skip_err) {
@@ -297,13 +256,14 @@
 		kfree(transaction->resp);
 		kfree(transaction);
 	} else {
-		free_transaction(transaction);
+		lwis_transaction_free(lwis_dev, transaction);
 	}
+	LWIS_ATRACE_FUNC_END(lwis_dev);
 	return ret;
 }
 
-static void cancel_transaction(struct lwis_transaction *transaction, int error_code,
-			       struct list_head *pending_events)
+static void cancel_transaction(struct lwis_device *lwis_dev, struct lwis_transaction *transaction,
+			       int error_code, struct list_head *pending_events)
 {
 	struct lwis_transaction_info *info = &transaction->info;
 	struct lwis_transaction_response_header resp;
@@ -317,7 +277,7 @@
 		lwis_pending_event_push(pending_events, info->emit_error_event_id, &resp,
 					sizeof(resp));
 	}
-	free_transaction(transaction);
+	lwis_transaction_free(lwis_dev, transaction);
 }
 
 static void process_transactions_in_queue(struct lwis_client *client,
@@ -335,8 +295,8 @@
 		transaction = list_entry(it_tran, struct lwis_transaction, process_queue_node);
 		list_del(&transaction->process_queue_node);
 		if (transaction->resp->error_code) {
-			cancel_transaction(transaction, transaction->resp->error_code,
-					   &pending_events);
+			cancel_transaction(client->lwis_dev, transaction,
+					   transaction->resp->error_code, &pending_events);
 		} else {
 			spin_unlock_irqrestore(&client->transaction_lock, flags);
 			process_transaction(client, transaction, &pending_events, in_irq,
@@ -357,10 +317,9 @@
 				      /*in_irq=*/true);
 }
 
-static void transaction_work_func(struct work_struct *work)
+static void transaction_work_func(struct kthread_work *work)
 {
 	struct lwis_client *client = container_of(work, struct lwis_client, transaction_work);
-
 	process_transactions_in_queue(client, &client->transaction_process_queue, /*in_irq=*/false);
 }
 
@@ -370,15 +329,7 @@
 	INIT_LIST_HEAD(&client->transaction_process_queue_tasklet);
 	tasklet_init(&client->transaction_tasklet, transaction_tasklet_func, (unsigned long)client);
 	INIT_LIST_HEAD(&client->transaction_process_queue);
-	if (client->lwis_dev->adjust_thread_priority != 0) {
-		/* Since I2C transactions can only be executed in workqueues, putting them in high
-		 * priority to avoid scheduling delays. */
-		client->transaction_wq = alloc_ordered_workqueue(
-			"lwistran-i2c", __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_HIGHPRI);
-	} else {
-		client->transaction_wq = create_workqueue("lwistran");
-	}
-	INIT_WORK(&client->transaction_work, transaction_work_func);
+	kthread_init_work(&client->transaction_work, transaction_work_func);
 	client->transaction_counter = 0;
 	hash_init(client->transaction_list);
 	return 0;
@@ -395,9 +346,6 @@
 		return ret;
 	}
 	tasklet_kill(&client->transaction_tasklet);
-	if (client->transaction_wq) {
-		destroy_workqueue(client->transaction_wq);
-	}
 	return 0;
 }
 
@@ -424,16 +372,15 @@
 		list_for_each_safe (it_tran, it_tran_tmp, &it_evt_list->list) {
 			transaction = list_entry(it_tran, struct lwis_transaction, event_list_node);
 			list_del(&transaction->event_list_node);
-			cancel_transaction(transaction, -ECANCELED, NULL);
+			cancel_transaction(client->lwis_dev, transaction, -ECANCELED, NULL);
 		}
 		hash_del(&it_evt_list->node);
 		kfree(it_evt_list);
 	}
 	spin_unlock_irqrestore(&client->transaction_lock, flags);
 
-	if (client->transaction_wq) {
-		drain_workqueue(client->transaction_wq);
-	}
+	if (client->lwis_dev->transaction_worker_thread)
+		kthread_flush_worker(&client->lwis_dev->transaction_worker);
 
 	spin_lock_irqsave(&client->transaction_lock, flags);
 	/* This shouldn't happen after drain_workqueue, but check anyway. */
@@ -443,7 +390,7 @@
 			transaction =
 				list_entry(it_tran, struct lwis_transaction, process_queue_node);
 			list_del(&transaction->process_queue_node);
-			cancel_transaction(transaction, -ECANCELED, NULL);
+			cancel_transaction(client->lwis_dev, transaction, -ECANCELED, NULL);
 		}
 	}
 	spin_unlock_irqrestore(&client->transaction_lock, flags);
@@ -495,7 +442,7 @@
 		transaction = list_entry(it_tran, struct lwis_transaction, event_list_node);
 		list_del(&transaction->event_list_node);
 		if (transaction->resp->error_code || client->lwis_dev->enabled == 0) {
-			cancel_transaction(transaction, -ECANCELED, NULL);
+			cancel_transaction(client->lwis_dev, transaction, -ECANCELED, NULL);
 		} else {
 			spin_unlock_irqrestore(&client->transaction_lock, flags);
 			process_transaction(client, transaction, &pending_events, in_irq,
@@ -555,8 +502,15 @@
 	struct lwis_transaction_info *info = &transaction->info;
 	struct lwis_device *lwis_dev = client->lwis_dev;
 
-	BUG_ON(!client);
-	BUG_ON(!transaction);
+	if (!client) {
+		pr_err("Client is NULL while checking transaction parameter.\n");
+		return -ENODEV;
+	}
+
+	if (!transaction) {
+		dev_err(lwis_dev->dev, "Transaction is NULL.\n");
+		return -ENODEV;
+	}
 
 	/* Initialize event counter return value  */
 	info->current_trigger_event_counter = -1LL;
@@ -660,11 +614,14 @@
 	if (info->trigger_event_id == LWIS_EVENT_ID_NONE) {
 		/* Immediate trigger. */
 		if (info->run_at_real_time) {
-			list_add_tail(&transaction->process_queue_node, &client->transaction_process_queue_tasklet);
+			list_add_tail(&transaction->process_queue_node,
+				      &client->transaction_process_queue_tasklet);
 			tasklet_schedule(&client->transaction_tasklet);
 		} else {
-			list_add_tail(&transaction->process_queue_node, &client->transaction_process_queue);
-			queue_work(client->transaction_wq, &client->transaction_work);
+			list_add_tail(&transaction->process_queue_node,
+				      &client->transaction_process_queue);
+			kthread_queue_work(&client->lwis_dev->transaction_worker,
+					   &client->transaction_work);
 		}
 	} else {
 		/* Trigger by event. */
@@ -731,6 +688,9 @@
 	memcpy(resp_buf, transaction->resp, sizeof(struct lwis_transaction_response_header));
 	new_instance->resp = (struct lwis_transaction_response_header *)resp_buf;
 
+	INIT_LIST_HEAD(&new_instance->event_list_node);
+	INIT_LIST_HEAD(&new_instance->process_queue_node);
+
 	return new_instance;
 }
 
@@ -819,7 +779,8 @@
 		tasklet_schedule(&client->transaction_tasklet);
 	}
 	if (!list_empty(&client->transaction_process_queue)) {
-		queue_work(client->transaction_wq, &client->transaction_work);
+		kthread_queue_work(&client->lwis_dev->transaction_worker,
+				   &client->transaction_work);
 	}
 
 	spin_unlock_irqrestore(&client->transaction_lock, flags);
diff --git a/lwis_transaction.h b/lwis_transaction.h
index 2527def..2d6157e 100644
--- a/lwis_transaction.h
+++ b/lwis_transaction.h
@@ -45,9 +45,6 @@
 	struct hlist_node node;
 };
 
-int lwis_entry_poll(struct lwis_device *lwis_dev, struct lwis_io_entry *entry, bool non_blocking);
-int lwis_entry_read_assert(struct lwis_device *lwis_dev, struct lwis_io_entry *entry);
-
 int lwis_transaction_init(struct lwis_client *client);
 int lwis_transaction_clear(struct lwis_client *client);
 int lwis_transaction_client_flush(struct lwis_client *client);
@@ -58,6 +55,8 @@
 				   bool in_irq);
 int lwis_transaction_cancel(struct lwis_client *client, int64_t id);
 
+void lwis_transaction_free(struct lwis_device *lwis_dev, struct lwis_transaction *transaction);
+
 /* Expects lwis_client->transaction_lock to be acquired before calling
  * the following functions. */
 int lwis_transaction_submit_locked(struct lwis_client *client,
diff --git a/lwis_util.c b/lwis_util.c
index c1ea59b..d23a85b 100644
--- a/lwis_util.c
+++ b/lwis_util.c
@@ -10,6 +10,8 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME "-util: " fmt
 
+#include <linux/slab.h>
+#include <uapi/linux/sched/types.h>
 #include "lwis_util.h"
 #include "lwis_device.h"
 
@@ -93,3 +95,61 @@
 		return "UNKNOWN";
 	}
 }
+
+int lwis_create_kthread_workers(struct lwis_device *lwis_dev, const char *transaction_worker_name,
+				const char *periodic_io_worker_name)
+{
+	if (!lwis_dev) {
+		pr_err("lwis_create_kthread_workers: lwis_dev is NULL\n");
+		return -ENODEV;
+	}
+
+	kthread_init_worker(&lwis_dev->transaction_worker);
+	lwis_dev->transaction_worker_thread = kthread_run(kthread_worker_fn,
+			&lwis_dev->transaction_worker, transaction_worker_name);
+	if (IS_ERR(lwis_dev->transaction_worker_thread)) {
+		dev_err(lwis_dev->dev, "transaction kthread_run failed\n");
+		return -EINVAL;
+	}
+
+	kthread_init_worker(&lwis_dev->periodic_io_worker);
+	lwis_dev->periodic_io_worker_thread = kthread_run(kthread_worker_fn,
+			&lwis_dev->periodic_io_worker, periodic_io_worker_name);
+	if (IS_ERR(lwis_dev->periodic_io_worker_thread)) {
+		dev_err(lwis_dev->dev, "periodic_io kthread_run failed\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int lwis_set_kthread_priority(struct lwis_device *lwis_dev, struct task_struct *task,
+			      u32 priority)
+{
+	int policy;
+	struct sched_param param;
+	int ret;
+
+	if (priority >= MAX_PRIO) {
+		dev_err(lwis_dev->dev, "transaction_thread_priority(%d) >= Max(%d)",
+			priority, MAX_PRIO);
+		return -EINVAL;
+	}
+	if (priority < MAX_RT_PRIO) {
+		policy = SCHED_FIFO;
+		param.sched_priority = priority;
+	} else {
+		policy = SCHED_NORMAL;
+		param.sched_priority = 0;
+		task->prio = priority;
+		task->static_prio = priority;
+		task->normal_prio = priority;
+	}
+	ret = sched_setscheduler(task, policy, &param);
+	if (ret) {
+		dev_err(lwis_dev->dev, "Failed to set kthread priority (%d)", ret);
+		return ret;
+	}
+
+	return 0;
+}
\ No newline at end of file
diff --git a/lwis_util.h b/lwis_util.h
index 116e6bd..577e78a 100644
--- a/lwis_util.h
+++ b/lwis_util.h
@@ -64,4 +64,16 @@
 	return ktime_get_boottime();
 }
 
+/*
+ * lwis_create_kthread_workers: Creates kthread workers associated with this lwis device.
+ */
+int lwis_create_kthread_workers(struct lwis_device *lwis_dev, const char *transaction_worker_name,
+				const char *periodic_io_worker_name);
+
+/*
+ * lwis_set_kthread_priority: Set kthread priority.
+ */
+int lwis_set_kthread_priority(struct lwis_device *lwis_dev, struct task_struct *task,
+			      u32 priority);
+
 #endif // LWIS_UTIL_H_
diff --git a/platform/gs101/lwis_platform_gs101.c b/platform/anchorage/lwis_platform_anchorage.c
similarity index 89%
rename from platform/gs101/lwis_platform_gs101.c
rename to platform/anchorage/lwis_platform_anchorage.c
index 8fde2b9..2eb8d9b 100644
--- a/platform/gs101/lwis_platform_gs101.c
+++ b/platform/anchorage/lwis_platform_anchorage.c
@@ -1,5 +1,5 @@
 /*
- * Google LWIS GS101 Platform-Specific Functions
+ * Google LWIS Anchorage Platform-Specific Functions
  *
  * Copyright (c) 2020 Google, LLC
  *
@@ -8,7 +8,7 @@
  * published by the Free Software Foundation.
  */
 
-#include "lwis_platform_gs101.h"
+#include "lwis_platform_anchorage.h"
 
 #include <linux/iommu.h>
 #include <linux/of.h>
@@ -58,12 +58,24 @@
 
 static int lwis_iommu_fault_handler(struct iommu_fault *fault, void *param)
 {
+	int ret;
+	struct of_phandle_iterator it;
 	struct lwis_device *lwis_dev = (struct lwis_device *)param;
 	struct lwis_mem_page_fault_event_payload event_payload;
 
 	pr_err("############ LWIS IOMMU PAGE FAULT ############\n");
 	pr_err("\n");
-	pr_err("Device: %s IOMMU Page Fault at Address: 0x%px Flag: 0x%08x\n", lwis_dev->name,
+	of_for_each_phandle (&it, ret, lwis_dev->plat_dev->dev.of_node, "iommus", 0, 0) {
+		u64 iommus_reg;
+		const char *port_name = NULL;
+		struct device_node *iommus_info = of_node_get(it.node);
+		of_property_read_u64(iommus_info, "reg", &iommus_reg);
+		of_property_read_string(iommus_info, "port-name", &port_name);
+		pr_info("Device [%s] registered IOMMUS :[%s] %#010llx.sysmmu\n", lwis_dev->name,
+			port_name, iommus_reg);
+		pr_err("\n");
+	}
+	pr_err("IOMMU Page Fault at Address: 0x%px Flag: 0x%08x. Check dmesg for sysmmu errors\n",
 	       (void *)fault->event.addr, fault->event.flags);
 	pr_err("\n");
 	lwis_debug_print_transaction_info(lwis_dev);
@@ -89,6 +101,7 @@
 int lwis_platform_device_enable(struct lwis_device *lwis_dev)
 {
 	int ret;
+	int iommus_len = 0;
 	struct lwis_platform *platform;
 
 	const int core_clock_qos = 67000;
@@ -110,7 +123,8 @@
 		return ret;
 	}
 
-	if (lwis_dev->has_iommu) {
+	if (of_find_property(lwis_dev->plat_dev->dev.of_node, "iommus", &iommus_len) &&
+	    iommus_len) {
 		/* Activate IOMMU for the platform device */
 		ret = iommu_register_device_fault_handler(&lwis_dev->plat_dev->dev,
 							  lwis_iommu_fault_handler, lwis_dev);
@@ -162,6 +176,7 @@
 
 int lwis_platform_device_disable(struct lwis_device *lwis_dev)
 {
+	int iommus_len = 0;
 	struct lwis_platform *platform;
 
 	if (!lwis_dev) {
@@ -182,7 +197,8 @@
 
 	lwis_platform_remove_qos(lwis_dev);
 
-	if (lwis_dev->has_iommu) {
+	if (of_find_property(lwis_dev->plat_dev->dev.of_node, "iommus", &iommus_len) &&
+	    iommus_len) {
 		/* Deactivate IOMMU */
 		iommu_unregister_device_fault_handler(&lwis_dev->plat_dev->dev);
 	}
@@ -217,10 +233,8 @@
 		qos_class = PM_QOS_CAM_THROUGHPUT;
 		break;
 	case CLOCK_FAMILY_TNR:
-#if defined(CONFIG_SOC_GS101)
 		qos_req = &platform->pm_qos_tnr;
 		qos_class = PM_QOS_TNR_THROUGHPUT;
-#endif
 		break;
 	case CLOCK_FAMILY_MIF:
 		qos_req = &platform->pm_qos_mem;
@@ -283,11 +297,9 @@
 	if (exynos_pm_qos_request_active(&platform->pm_qos_cam)) {
 		exynos_pm_qos_remove_request(&platform->pm_qos_cam);
 	}
-#if defined(CONFIG_SOC_GS101)
 	if (exynos_pm_qos_request_active(&platform->pm_qos_tnr)) {
 		exynos_pm_qos_remove_request(&platform->pm_qos_tnr);
 	}
-#endif
 	return 0;
 }
 
diff --git a/platform/gs101/lwis_platform_gs101.h b/platform/anchorage/lwis_platform_anchorage.h
similarity index 77%
rename from platform/gs101/lwis_platform_gs101.h
rename to platform/anchorage/lwis_platform_anchorage.h
index c14c732..4a3a985 100644
--- a/platform/gs101/lwis_platform_gs101.h
+++ b/platform/anchorage/lwis_platform_anchorage.h
@@ -1,5 +1,5 @@
 /*
- * Google LWIS GS101 Platform-Specific Functions
+ * Google LWIS Anchorage Platform-Specific Functions
  *
  * Copyright (c) 2020 Google, LLC
  *
@@ -8,8 +8,8 @@
  * published by the Free Software Foundation.
  */
 
-#ifndef LWIS_PLATFORM_GS101_H_
-#define LWIS_PLATFORM_GS101_H_
+#ifndef LWIS_PLATFORM_ANCHORAGE_H_
+#define LWIS_PLATFORM_ANCHORAGE_H_
 
 #include <soc/google/exynos_pm_qos.h>
 
@@ -22,4 +22,4 @@
 	/* struct exynos_pm_qos_request pm_qos_hpg; */
 };
 
-#endif /* LWIS_PLATFORM_GS101_H_ */
\ No newline at end of file
+#endif /* LWIS_PLATFORM_ANCHORAGE_H_ */
\ No newline at end of file
diff --git a/platform/gs101/lwis_platform_gs101_dma.c b/platform/anchorage/lwis_platform_anchorage_dma.c
similarity index 96%
rename from platform/gs101/lwis_platform_gs101_dma.c
rename to platform/anchorage/lwis_platform_anchorage_dma.c
index be4828c..401275c 100644
--- a/platform/gs101/lwis_platform_gs101_dma.c
+++ b/platform/anchorage/lwis_platform_anchorage_dma.c
@@ -1,5 +1,5 @@
 /*
- * Google LWIS GS101 Platform-Specific DMA Functions
+ * Google LWIS Anchorage Platform-Specific DMA Functions
  *
  * Copyright (c) 2020 Google, LLC
  *
diff --git a/platform/gs101/lwis_platform_gs101.c b/platform/busan/lwis_platform_busan.c
similarity index 86%
copy from platform/gs101/lwis_platform_gs101.c
copy to platform/busan/lwis_platform_busan.c
index 8fde2b9..b9fe7c0 100644
--- a/platform/gs101/lwis_platform_gs101.c
+++ b/platform/busan/lwis_platform_busan.c
@@ -1,14 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Google LWIS GS101 Platform-Specific Functions
+ * Google LWIS Busan Platform-Specific Functions
  *
- * Copyright (c) 2020 Google, LLC
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright (c) 2021 Google, LLC
  */
 
-#include "lwis_platform_gs101.h"
+#include "lwis_platform_busan.h"
 
 #include <linux/iommu.h>
 #include <linux/of.h>
@@ -58,12 +55,24 @@
 
 static int lwis_iommu_fault_handler(struct iommu_fault *fault, void *param)
 {
+	int ret;
+	struct of_phandle_iterator it;
 	struct lwis_device *lwis_dev = (struct lwis_device *)param;
 	struct lwis_mem_page_fault_event_payload event_payload;
 
 	pr_err("############ LWIS IOMMU PAGE FAULT ############\n");
 	pr_err("\n");
-	pr_err("Device: %s IOMMU Page Fault at Address: 0x%px Flag: 0x%08x\n", lwis_dev->name,
+	of_for_each_phandle (&it, ret, lwis_dev->plat_dev->dev.of_node, "iommus", 0, 0) {
+		u64 iommus_reg;
+		const char *port_name = NULL;
+		struct device_node *iommus_info = of_node_get(it.node);
+		of_property_read_u64(iommus_info, "reg", &iommus_reg);
+		of_property_read_string(iommus_info, "port-name", &port_name);
+		pr_info("Device [%s] registered IOMMUS :[%s] %#010llx.sysmmu\n", lwis_dev->name,
+			port_name, iommus_reg);
+		pr_err("\n");
+	}
+	pr_err("IOMMU Page Fault at Address: 0x%px Flag: 0x%08x. Check dmesg for sysmmu errors\n",
 	       (void *)fault->event.addr, fault->event.flags);
 	pr_err("\n");
 	lwis_debug_print_transaction_info(lwis_dev);
@@ -89,6 +98,7 @@
 int lwis_platform_device_enable(struct lwis_device *lwis_dev)
 {
 	int ret;
+	int iommus_len = 0;
 	struct lwis_platform *platform;
 
 	const int core_clock_qos = 67000;
@@ -110,7 +120,8 @@
 		return ret;
 	}
 
-	if (lwis_dev->has_iommu) {
+	if (of_find_property(lwis_dev->plat_dev->dev.of_node, "iommus", &iommus_len) &&
+	    iommus_len) {
 		/* Activate IOMMU for the platform device */
 		ret = iommu_register_device_fault_handler(&lwis_dev->plat_dev->dev,
 							  lwis_iommu_fault_handler, lwis_dev);
@@ -149,7 +160,7 @@
 		}
 	}
 
-	if (lwis_dev->bts_scenario_name) {
+	if (lwis_dev->bts_index != BTS_UNSUPPORTED && lwis_dev->bts_scenario_name) {
 		lwis_dev->bts_scenario = bts_get_scenindex(lwis_dev->bts_scenario_name);
 		if (!lwis_dev->bts_scenario) {
 			dev_err(lwis_dev->dev, "Failed to get default camera BTS scenario.\n");
@@ -162,6 +173,7 @@
 
 int lwis_platform_device_disable(struct lwis_device *lwis_dev)
 {
+	int iommus_len = 0;
 	struct lwis_platform *platform;
 
 	if (!lwis_dev) {
@@ -173,7 +185,7 @@
 		return -ENODEV;
 	}
 
-	if (lwis_dev->bts_scenario_name) {
+	if (lwis_dev->bts_index != BTS_UNSUPPORTED && lwis_dev->bts_scenario_name) {
 		bts_del_scenario(lwis_dev->bts_scenario);
 	}
 
@@ -182,7 +194,8 @@
 
 	lwis_platform_remove_qos(lwis_dev);
 
-	if (lwis_dev->has_iommu) {
+	if (of_find_property(lwis_dev->plat_dev->dev.of_node, "iommus", &iommus_len) &&
+	    iommus_len) {
 		/* Deactivate IOMMU */
 		iommu_unregister_device_fault_handler(&lwis_dev->plat_dev->dev);
 	}
@@ -217,10 +230,8 @@
 		qos_class = PM_QOS_CAM_THROUGHPUT;
 		break;
 	case CLOCK_FAMILY_TNR:
-#if defined(CONFIG_SOC_GS101)
 		qos_req = &platform->pm_qos_tnr;
 		qos_class = PM_QOS_TNR_THROUGHPUT;
-#endif
 		break;
 	case CLOCK_FAMILY_MIF:
 		qos_req = &platform->pm_qos_mem;
@@ -283,11 +294,9 @@
 	if (exynos_pm_qos_request_active(&platform->pm_qos_cam)) {
 		exynos_pm_qos_remove_request(&platform->pm_qos_cam);
 	}
-#if defined(CONFIG_SOC_GS101)
 	if (exynos_pm_qos_request_active(&platform->pm_qos_tnr)) {
 		exynos_pm_qos_remove_request(&platform->pm_qos_tnr);
 	}
-#endif
 	return 0;
 }
 
diff --git a/platform/busan/lwis_platform_busan.h b/platform/busan/lwis_platform_busan.h
new file mode 100644
index 0000000..fca0596
--- /dev/null
+++ b/platform/busan/lwis_platform_busan.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Google LWIS Busan Platform-Specific Functions
+ *
+ * Copyright (c) 2021 Google, LLC
+ */
+
+#ifndef LWIS_PLATFORM_BUSAN_H_
+#define LWIS_PLATFORM_BUSAN_H_
+
+#include <soc/google/exynos_pm_qos.h>
+
+struct lwis_platform {
+	struct exynos_pm_qos_request pm_qos_int_cam;
+	struct exynos_pm_qos_request pm_qos_int;
+	struct exynos_pm_qos_request pm_qos_cam;
+	struct exynos_pm_qos_request pm_qos_mem;
+	struct exynos_pm_qos_request pm_qos_tnr;
+	/* struct exynos_pm_qos_request pm_qos_hpg; */
+};
+
+#endif /* LWIS_PLATFORM_BUSAN_H_ */
diff --git a/platform/gs101/lwis_platform_gs101_dma.c b/platform/busan/lwis_platform_busan_dma.c
similarity index 81%
copy from platform/gs101/lwis_platform_gs101_dma.c
copy to platform/busan/lwis_platform_busan_dma.c
index be4828c..1fc5dc8 100644
--- a/platform/gs101/lwis_platform_gs101_dma.c
+++ b/platform/busan/lwis_platform_busan_dma.c
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Google LWIS GS101 Platform-Specific DMA Functions
+ * Google LWIS Busan Platform-Specific DMA Functions
  *
- * Copyright (c) 2020 Google, LLC
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright (c) 2021 Google, LLC
  */
 
 #include <linux/slab.h>
@@ -57,7 +54,7 @@
 /*
  * We don't ever do dma_buf_vmap before. Instead, use the upstream dma-buf
  * interface to map ION buffers, so we don't need to do dma_buf_vunmap.
- * Keep this function by defult return 0
+ * Keep this function by default return 0
  */
 int lwis_platform_dma_buffer_unmap(struct lwis_device *lwis_dev,
 				   struct dma_buf_attachment *attachment, dma_addr_t address)
diff --git a/platform/exynos/lwis_platform_exynos.c b/platform/exynos/lwis_platform_exynos.c
deleted file mode 100644
index 7628def..0000000
--- a/platform/exynos/lwis_platform_exynos.c
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
- * Google LWIS Lwis Exynos-specific platform functions
- *
- * Copyright (c) 2018 Google, LLC
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include "lwis_platform_exynos.h"
-
-#include <linux/exynos_iovmm.h>
-#include <linux/iommu.h>
-#include <linux/of.h>
-#include <linux/pm_qos.h>
-#include <linux/slab.h>
-#include <soc/samsung/bts.h>
-
-#include "lwis_device_dpm.h"
-#include "lwis_debug.h"
-#include "lwis_platform.h"
-
-/* Uncomment to let kernel panic when IOMMU hits a page fault. */
-/* TODO: Add error handling to propagate SysMMU errors back to userspace,
- * so we don't need to panic here. */
-#define ENABLE_PAGE_FAULT_PANIC
-
-int lwis_platform_probe(struct lwis_device *lwis_dev)
-{
-	struct lwis_platform *platform;
-	BUG_ON(!lwis_dev);
-	platform = kzalloc(sizeof(struct lwis_platform), GFP_KERNEL);
-	if (IS_ERR_OR_NULL(platform)) {
-		return -ENOMEM;
-	}
-	lwis_dev->platform = platform;
-
-	/* Enable runtime power management for the platform device */
-	pm_runtime_enable(&lwis_dev->plat_dev->dev);
-
-	lwis_dev->bts_index = BTS_UNSUPPORTED;
-	/* Only IOREG devices will access DMA resources */
-	if (lwis_dev->type != DEVICE_TYPE_IOREG) {
-		return 0;
-	}
-	/* Register to bts */
-	lwis_dev->bts_index = bts_get_bwindex(lwis_dev->name);
-	if (lwis_dev->bts_index < 0) {
-		dev_err(lwis_dev->dev, "Failed to register to BTS, ret: %d\n", lwis_dev->bts_index);
-		lwis_dev->bts_index = BTS_UNSUPPORTED;
-	}
-
-	return 0;
-}
-
-static int __attribute__((unused))
-iovmm_fault_handler(struct iommu_domain *domain, struct device *dev, unsigned long fault_addr,
-		    int fault_flag, void *token)
-{
-	struct lwis_device *lwis_dev = (struct lwis_device *)token;
-
-	pr_err("############ LWIS IOVMM PAGE FAULT ############\n");
-	pr_err("\n");
-	pr_err("Device: %s IOVMM Page Fault at Address: 0x%016lx Flag: 0x%08x\n", lwis_dev->name,
-	       fault_addr, fault_flag);
-	pr_err("\n");
-	lwis_debug_print_transaction_info(lwis_dev);
-	pr_err("\n");
-	lwis_debug_print_event_states_info(lwis_dev);
-	pr_err("\n");
-	lwis_debug_print_buffer_info(lwis_dev);
-	pr_err("\n");
-	pr_err("###############################################\n");
-
-#ifdef ENABLE_PAGE_FAULT_PANIC
-	return NOTIFY_BAD;
-#else
-	return NOTIFY_OK;
-#endif
-}
-
-int lwis_platform_device_enable(struct lwis_device *lwis_dev)
-{
-	int ret;
-	struct lwis_platform *platform;
-	const int core_clock_qos = 67000;
-	const int hpg_qos = 1;
-
-	BUG_ON(!lwis_dev);
-	platform = lwis_dev->platform;
-	if (!platform) {
-		return -ENODEV;
-	}
-
-	/* Upref the runtime power management controls for the platform dev */
-	ret = pm_runtime_get_sync(&lwis_dev->plat_dev->dev);
-	if (ret < 0) {
-		pr_err("Unable to enable platform device\n");
-		return ret;
-	}
-	if (lwis_dev->has_iommu) {
-		/* Activate IOMMU/SYSMMU for the platform device */
-		ret = iovmm_activate(&lwis_dev->plat_dev->dev);
-		if (ret < 0) {
-			pr_err("Failed to enable IOMMU for the device: %d\n", ret);
-			return ret;
-		}
-		/* Set SYSMMU fault handler */
-		iovmm_set_fault_handler(&lwis_dev->plat_dev->dev, iovmm_fault_handler, lwis_dev);
-	}
-
-	/* Set hardcoded DVFS levels */
-	if (!pm_qos_request_active(&platform->pm_qos_hpg))
-		pm_qos_add_request(&platform->pm_qos_hpg, PM_QOS_CPU_ONLINE_MIN, hpg_qos);
-
-	if (lwis_dev->clock_family != CLOCK_FAMILY_INVALID &&
-	    lwis_dev->clock_family < NUM_CLOCK_FAMILY) {
-		ret = lwis_platform_update_qos(lwis_dev, core_clock_qos, lwis_dev->clock_family);
-		if (ret < 0) {
-			dev_err(lwis_dev->dev, "Failed to enable core clock\n");
-			return ret;
-		}
-	}
-
-	if (lwis_dev->bts_scenario_name) {
-		lwis_dev->bts_scenario = bts_get_scenindex(lwis_dev->bts_scenario_name);
-		if (!lwis_dev->bts_scenario) {
-			dev_err(lwis_dev->dev, "Failed to get default camera BTS scenario.\n");
-			return -EINVAL;
-		}
-		bts_add_scenario(lwis_dev->bts_scenario);
-	}
-	return 0;
-}
-
-int lwis_platform_device_disable(struct lwis_device *lwis_dev)
-{
-	int ret;
-	struct lwis_platform *platform;
-	BUG_ON(!lwis_dev);
-	platform = lwis_dev->platform;
-	if (!platform) {
-		return -ENODEV;
-	}
-
-	if (lwis_dev->bts_scenario_name) {
-		bts_del_scenario(lwis_dev->bts_scenario);
-	}
-
-	/* We can't remove fault handlers, so there's no call corresponding
-	 * to the iovmm_set_fault_handler above */
-
-	lwis_platform_remove_qos(lwis_dev);
-
-	if (lwis_dev->has_iommu) {
-		/* Deactivate IOMMU/SYSMMU */
-		iovmm_deactivate(&lwis_dev->plat_dev->dev);
-	}
-
-	/* Disable platform device */
-	ret = pm_runtime_put_sync(&lwis_dev->plat_dev->dev);
-
-	return ret;
-}
-
-int lwis_platform_update_qos(struct lwis_device *lwis_dev, int value,
-			     int32_t clock_family)
-{
-	struct lwis_platform *platform;
-	struct pm_qos_request *qos_req;
-	int qos_class;
-
-	BUG_ON(!lwis_dev);
-	platform = lwis_dev->platform;
-	if (!platform) {
-		return -ENODEV;
-	}
-
-	switch (clock_family) {
-	case CLOCK_FAMILY_INTCAM:
-		qos_req = &platform->pm_qos_int_cam;
-		qos_class = PM_QOS_INTCAM_THROUGHPUT;
-		break;
-	case CLOCK_FAMILY_CAM:
-		qos_req = &platform->pm_qos_cam;
-		qos_class = PM_QOS_CAM_THROUGHPUT;
-		break;
-	case CLOCK_FAMILY_TNR:
-#if defined(CONFIG_SOC_GS101)
-		qos_req = &platform->pm_qos_tnr;
-		qos_class = PM_QOS_TNR_THROUGHPUT;
-#endif
-		break;
-	case CLOCK_FAMILY_MIF:
-		qos_req = &platform->pm_qos_mem;
-		qos_class = PM_QOS_BUS_THROUGHPUT;
-		break;
-	case CLOCK_FAMILY_INT:
-		qos_req = &platform->pm_qos_int;
-		qos_class = PM_QOS_DEVICE_THROUGHPUT;
-		break;
-	default:
-		dev_err(lwis_dev->dev, "%s clk family %d is invalid\n", lwis_dev->name,
-			lwis_dev->clock_family);
-		return -EINVAL;
-	}
-
-	if (!pm_qos_request_active(qos_req))
-		pm_qos_add_request(qos_req, qos_class, value);
-	else
-		pm_qos_update_request(qos_req, value);
-
-	dev_info(lwis_dev->dev, "Updating clock for clock_family %d, freq to %u\n", clock_family,
-		 value);
-
-	return 0;
-}
-
-int lwis_platform_remove_qos(struct lwis_device *lwis_dev)
-{
-	struct lwis_platform *platform;
-	BUG_ON(!lwis_dev);
-	platform = lwis_dev->platform;
-	if (!platform) {
-		return -ENODEV;
-	}
-
-	if (pm_qos_request_active(&platform->pm_qos_int))
-		pm_qos_remove_request(&platform->pm_qos_int);
-	if (pm_qos_request_active(&platform->pm_qos_mem))
-		pm_qos_remove_request(&platform->pm_qos_mem);
-	if (pm_qos_request_active(&platform->pm_qos_hpg))
-		pm_qos_remove_request(&platform->pm_qos_hpg);
-
-	switch (lwis_dev->clock_family) {
-	case CLOCK_FAMILY_INTCAM:
-		if (pm_qos_request_active(&platform->pm_qos_int_cam)) {
-			pm_qos_remove_request(&platform->pm_qos_int_cam);
-		}
-		break;
-	case CLOCK_FAMILY_CAM:
-		if (pm_qos_request_active(&platform->pm_qos_cam)) {
-			pm_qos_remove_request(&platform->pm_qos_cam);
-		}
-		break;
-	case CLOCK_FAMILY_TNR:
-#if defined(CONFIG_SOC_GS101)
-		if (pm_qos_request_active(&platform->pm_qos_tnr)) {
-			pm_qos_remove_request(&platform->pm_qos_tnr);
-		}
-#endif
-		break;
-	default:
-		break;
-	}
-	return 0;
-}
-
-int lwis_platform_update_bts(struct lwis_device *lwis_dev, unsigned int bw_kb_peak,
-			     unsigned int bw_kb_read, unsigned int bw_kb_write)
-{
-	int ret = 0;
-	struct bts_bw bts_request;
-
-	if (lwis_dev->bts_index == BTS_UNSUPPORTED) {
-		dev_info(lwis_dev->dev, "%s doesn't support bts\n", lwis_dev->name);
-		return ret;
-	}
-
-	bts_request.peak = bw_kb_peak;
-	bts_request.read = bw_kb_read;
-	bts_request.write = bw_kb_write;
-	ret = bts_update_bw(lwis_dev->bts_index, bts_request);
-	if (ret < 0) {
-		dev_err(lwis_dev->dev, "Failed to update bandwidth to bts, ret: %d\n", ret);
-	} else {
-		dev_info(lwis_dev->dev, "Updated bandwidth to bts, peak: %u, read: %u, write: %u\n",
-			 bw_kb_peak, bw_kb_read, bw_kb_write);
-	}
-
-	return ret;
-}
diff --git a/platform/exynos/lwis_platform_exynos.h b/platform/exynos/lwis_platform_exynos.h
deleted file mode 100644
index 4b46772..0000000
--- a/platform/exynos/lwis_platform_exynos.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Google LWIS Exynos Platform-specific Functions
- *
- * Copyright (c) 2018 Google, LLC
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef LWIS_PLATFORM_EXYNOS_H_
-#define LWIS_PLATFORM_EXYNOS_H_
-
-#include <linux/pm_qos.h>
-
-struct lwis_platform {
-	struct pm_qos_request pm_qos_int_cam;
-	struct pm_qos_request pm_qos_int;
-	struct pm_qos_request pm_qos_cam;
-	struct pm_qos_request pm_qos_mem;
-	struct pm_qos_request pm_qos_hpg;
-	struct pm_qos_request pm_qos_tnr;
-};
-
-#endif /* LWIS_PLATFORM_H_ */
diff --git a/platform/exynos/lwis_platform_exynos_dma.c b/platform/exynos/lwis_platform_exynos_dma.c
deleted file mode 100644
index 0834da5..0000000
--- a/platform/exynos/lwis_platform_exynos_dma.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Google LWIS Exynos Platform-specific DMA Functions
- *
- * Copyright (c) 2018 Google, LLC
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/slab.h>
-#include <linux/ion_exynos.h>
-#include <linux/exynos_iovmm.h>
-
-#include "lwis_commands.h"
-#include "lwis_init.h"
-#include "lwis_platform.h"
-#include "lwis_platform_dma.h"
-#include "lwis_platform_exynos.h"
-
-#define ION_SYSTEM_HEAP_NAME "ion_system_heap"
-#define ION_SYSTEM_CONTIG_HEAP_NAME "ion_system_contig_heap"
-
-// ION allocation flags, imported from "/drivers/staging/android/uapi/ion.h".
-#define ION_FLAG_CACHED 1
-#define ION_FLAG_NOZEROED 8
-
-struct dma_buf *lwis_platform_dma_buffer_alloc(size_t len, unsigned int flags)
-{
-	unsigned int ion_flags = 0;
-
-	// "system_contig_heap" does not seemed to be used in the exynos driver,
-	// that's why system heap is used by default.
-	const char *ion_heap_name = ION_SYSTEM_HEAP_NAME;
-
-	if (flags & LWIS_DMA_BUFFER_CACHED) {
-		ion_flags |= ION_FLAG_CACHED;
-	}
-	if (flags & LWIS_DMA_BUFFER_UNINITIALIZED) {
-		ion_flags |= ION_FLAG_NOZEROED;
-	}
-
-	return ion_alloc_dmabuf(ion_heap_name, len, ion_flags);
-}
-
-dma_addr_t lwis_platform_dma_buffer_map(struct lwis_device *lwis_dev,
-					struct dma_buf_attachment *attachment, off_t offset,
-					size_t size, enum dma_data_direction direction, int flags)
-{
-	return ion_iovmm_map(attachment, offset, size, direction, flags);
-}
-
-int lwis_platform_dma_buffer_unmap(struct lwis_device *lwis_dev,
-				   struct dma_buf_attachment *attachment, dma_addr_t address)
-{
-	ion_iovmm_unmap(attachment, address);
-	return 0;
-}