LWIS: Add lock to protect register accessing

There is no lock when we accessing the register. Add spinlock protection
to prevent access register at the same time.

Bug: 237180008
Test: GCA, CTS
Signed-off-by: Holmes Chou <holmeschou@google.com>
Change-Id: I1dd9d850d37897cbd8ba8c983a01a21e295ee8c8
diff --git a/lwis_device.c b/lwis_device.c
index 1b3ec55..db10571 100644
--- a/lwis_device.c
+++ b/lwis_device.c
@@ -1192,9 +1192,6 @@
 	/* Initialize client mutex */
 	mutex_init(&lwis_dev->client_lock);
 
-	/* Initialize register access mutex */
-	mutex_init(&lwis_dev->reg_rw_lock);
-
 	/* Initialize an empty list of clients */
 	INIT_LIST_HEAD(&lwis_dev->clients);
 
diff --git a/lwis_device.h b/lwis_device.h
index 7f5f34a..f74296d 100644
--- a/lwis_device.h
+++ b/lwis_device.h
@@ -203,8 +203,6 @@
 	DECLARE_HASHTABLE(event_states, EVENT_HASH_BITS);
 	/* Virtual function table for sub classes */
 	struct lwis_device_subclass_operations vops;
-	/* Mutex used to synchronize register access between clients */
-	struct mutex reg_rw_lock;
 	/* Heartbeat timer structure */
 	struct timer_list heartbeat_timer;
 	/* Register-related properties */
diff --git a/lwis_ioctl.c b/lwis_ioctl.c
index 3abce74..003010a 100644
--- a/lwis_ioctl.c
+++ b/lwis_ioctl.c
@@ -382,7 +382,6 @@
 						   /*use_read_barrier=*/false,
 						   /*use_write_barrier=*/true);
 	}
-	mutex_lock(&lwis_dev->reg_rw_lock);
 	for (i = 0; i < num_io_entries; i++) {
 		switch (io_entries[i].type) {
 		case LWIS_IO_ENTRY_MODIFY:
@@ -412,7 +411,6 @@
 		}
 	}
 exit:
-	mutex_unlock(&lwis_dev->reg_rw_lock);
 	/* Use read memory barrier at the end of I/O entries if the access protocol
 	 * allows it */
 	if (lwis_dev->vops.register_io_barrier != NULL) {
diff --git a/lwis_ioreg.c b/lwis_ioreg.c
index fd74336..0a8478c 100644
--- a/lwis_ioreg.c
+++ b/lwis_ioreg.c
@@ -355,6 +355,7 @@
 	int index;
 	struct lwis_ioreg *block;
 	uint64_t reg_value;
+	unsigned long flags;
 
 	if (!ioreg_dev) {
 		pr_err("LWIS IOREG device is NULL\n");
@@ -366,7 +367,7 @@
 		return -EINVAL;
 	}
 
-	/* Non-blocking because we already locked here */
+	spin_lock_irqsave(&ioreg_dev->base_dev.lock, flags);
 	if (entry->type == LWIS_IO_ENTRY_READ) {
 		ret = lwis_ioreg_read(ioreg_dev, entry->rw.bid, entry->rw.offset, &entry->rw.val,
 				      access_size);
@@ -379,6 +380,7 @@
 		index = entry->rw_batch.bid;
 		block = get_block_by_idx(ioreg_dev, index);
 		if (IS_ERR_OR_NULL(block)) {
+			spin_unlock_irqrestore(&ioreg_dev->base_dev.lock, flags);
 			return PTR_ERR(block);
 		}
 
@@ -389,6 +391,7 @@
 			dev_err(ioreg_dev->base_dev.dev,
 				"ioreg validate_offset failed at: Offset: 0x%llx\n",
 				entry->rw_batch.offset);
+			spin_unlock_irqrestore(&ioreg_dev->base_dev.lock, flags);
 			return ret;
 		}
 
@@ -411,12 +414,14 @@
 	} else if (entry->type == LWIS_IO_ENTRY_WRITE_BATCH) {
 		if (ioreg_dev->base_dev.is_read_only) {
 			dev_err(ioreg_dev->base_dev.dev, "Device is read only\n");
+			spin_unlock_irqrestore(&ioreg_dev->base_dev.lock, flags);
 			return -EPERM;
 		}
 
 		index = entry->rw_batch.bid;
 		block = get_block_by_idx(ioreg_dev, index);
 		if (IS_ERR_OR_NULL(block)) {
+			spin_unlock_irqrestore(&ioreg_dev->base_dev.lock, flags);
 			return PTR_ERR(block);
 		}
 
@@ -427,6 +432,7 @@
 			dev_err(ioreg_dev->base_dev.dev,
 				"ioreg validate_offset failed at: Offset: 0x%llx\n",
 				entry->rw_batch.offset);
+			spin_unlock_irqrestore(&ioreg_dev->base_dev.lock, flags);
 			return ret;
 		}
 		ret = ioreg_write_batch_internal(block->base, entry->rw_batch.offset,
@@ -445,6 +451,7 @@
 			dev_err(ioreg_dev->base_dev.dev,
 				"ioreg modify read failed at: Bid: %d, Offset: 0x%llx\n",
 				entry->mod.bid, entry->mod.offset);
+			spin_unlock_irqrestore(&ioreg_dev->base_dev.lock, flags);
 			return ret;
 		}
 		reg_value &= ~entry->mod.val_mask;
@@ -458,8 +465,10 @@
 		}
 	} else {
 		dev_err(ioreg_dev->base_dev.dev, "Invalid IO entry type: %d\n", entry->type);
+		spin_unlock_irqrestore(&ioreg_dev->base_dev.lock, flags);
 		return -EINVAL;
 	}
+	spin_unlock_irqrestore(&ioreg_dev->base_dev.lock, flags);
 
 	return ret;
 }
diff --git a/lwis_periodic_io.c b/lwis_periodic_io.c
index d3b56cb..5751745 100644
--- a/lwis_periodic_io.c
+++ b/lwis_periodic_io.c
@@ -183,7 +183,6 @@
 						   /*use_write_barrier=*/true);
 	}
 
-	mutex_lock(&lwis_dev->reg_rw_lock);
 	reinit_completion(&periodic_io->io_done);
 	for (i = 0; i < info->num_io_entries; ++i) {
 		/* Abort if periodic io is deactivated during processing.
@@ -257,7 +256,6 @@
 
 event_push:
 	complete(&periodic_io->io_done);
-	mutex_unlock(&lwis_dev->reg_rw_lock);
 	/* Use read memory barrier at the beginning of I/O entries if the access protocol
 	 * allows it */
 	if (lwis_dev->vops.register_io_barrier != NULL) {
diff --git a/lwis_transaction.c b/lwis_transaction.c
index 7783885..0ed5ecb 100644
--- a/lwis_transaction.c
+++ b/lwis_transaction.c
@@ -127,10 +127,6 @@
 						   /*use_write_barrier=*/true);
 	}
 
-	if (!in_irq) {
-		mutex_lock(&lwis_dev->reg_rw_lock);
-	}
-
 	for (i = 0; i < info->num_io_entries; ++i) {
 		entry = &info->io_entries[i];
 		if (entry->type == LWIS_IO_ENTRY_WRITE ||
@@ -224,10 +220,6 @@
 		resp->completion_index = i;
 	}
 
-	if (!in_irq) {
-		mutex_unlock(&lwis_dev->reg_rw_lock);
-	}
-
 	process_duration_ns = ktime_to_ns(lwis_get_time() - process_timestamp);
 
 	/* Use read memory barrier at the end of I/O entries if the access protocol