ANDROID: uid_sys_stats: Use llist for deferred work

A use-after-free bug was found in the previous custom lock-free list
implementation for the deferred work, so switch functionality to llist
implementation.

While the previous approach atomically handled the list head, it did not
assure the new node's next pointer was assigned before the head was
pointed to the node, allowing the consumer to traverse to an invalid
next pointer.

Additionally, in switching to llists, this patch pulls the entire list
off the list head once and processes it separately, reducing the number
of atomic operations compared with the custom lists's implementation
which pulled one node at a time atomically from the list head.

BUG: KASAN: use-after-free in process_notifier+0x270/0x2dc
Write of size 8 at addr d4ffff89545c3c58 by task Blocking Thread/3431
Pointer tag: [d4], memory tag: [fe]

call trace:
 dump_backtrace+0xf8/0x118
 show_stack+0x18/0x24
 dump_stack_lvl+0x60/0x78
 print_report+0x178/0x470
 kasan_report+0x8c/0xbc
 kasan_tag_mismatch+0x28/0x3c
 __hwasan_tag_mismatch+0x30/0x60
 process_notifier+0x270/0x2dc
 notifier_call_chain+0xb4/0x108
 blocking_notifier_call_chain+0x54/0x80
 profile_task_exit+0x20/0x2c
 do_exit+0xec/0x1114
 __arm64_sys_exit_group+0x0/0x24
 get_signal+0x93c/0xa78
 do_notify_resume+0x158/0x3fc
 el0_svc+0x54/0x78
 el0t_64_sync_handler+0x44/0xe4
 el0t_64_sync+0x190/0x194

Bug: 294468796
Bug: 295787403
Bug: 299197985
Fixes: 8e86825eecfa ("ANDROID: uid_sys_stats: Use a single work for deferred updates")
Change-Id: Id377348c239ec720a5237726bc3632544d737e3b
Signed-off-by: John Stultz <jstultz@google.com>
[nkapron: Squashed with other changes and rewrote the commit message]
Signed-off-by: Neill Kapron <nkapron@google.com>
(cherry picked from commit 87647c0c54bbfe865691d8b58988a3ce941b905e)
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
index f5eaa63..842597b 100644
--- a/drivers/misc/uid_sys_stats.c
+++ b/drivers/misc/uid_sys_stats.c
@@ -19,6 +19,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/llist.h>
 #include <linux/mm.h>
 #include <linux/proc_fs.h>
 #include <linux/profile.h>
@@ -636,22 +637,22 @@
 	struct task_io_accounting ioac;
 	u64 utime;
 	u64 stime;
-	struct update_stats_work *next;
+	struct llist_node node;
 };
 
-static atomic_long_t work_usw;
+static LLIST_HEAD(work_usw);
 
 static void update_stats_workfn(struct work_struct *work)
 {
-	struct update_stats_work *usw;
+	struct update_stats_work *usw, *t;
 	struct uid_entry *uid_entry;
 	struct task_entry *task_entry __maybe_unused;
+	struct llist_node *node;
 
 	rt_mutex_lock(&uid_lock);
-	while ((usw = (struct update_stats_work *)atomic_long_read(&work_usw))) {
-		if (atomic_long_cmpxchg(&work_usw, (long)usw, (long)(usw->next)) != (long)usw)
-			continue;
 
+	node = llist_del_all(&work_usw);
+	llist_for_each_entry_safe(usw, t, node, node) {
 		uid_entry = find_uid_entry(usw->uid);
 		if (!uid_entry)
 			goto next;
@@ -664,7 +665,7 @@
 		if (!task_entry)
 			goto next;
 		add_uid_tasks_io_stats(task_entry, &usw->ioac,
-				UID_STATE_DEAD_TASKS);
+				       UID_STATE_DEAD_TASKS);
 #endif
 		__add_uid_io_stats(uid_entry, &usw->ioac, UID_STATE_DEAD_TASKS);
 next:
@@ -704,8 +705,7 @@
 			 */
 			usw->ioac = task->ioac;
 			task_cputime_adjusted(task, &usw->utime, &usw->stime);
-			usw->next = (struct update_stats_work *)atomic_long_xchg(&work_usw,
-										 (long)usw);
+			llist_add(&usw->node, &work_usw);
 			schedule_work(&update_stats_work);
 		}
 		return NOTIFY_OK;