[kernel][thread] Remove direct thread_lock access.

Add thread_lock_ints_disabled and thread_unlock_ints_disabled for
callers that can't use THREAD_LOCK and THREAD_UNLOCK. Also use existing
thread_lock_held helper instead of accessing the spin-lock directly in
debug checks.

Bug: 165823021
Change-Id: I8da18b8289222d644d36cbfe9e8b933adff94129
diff --git a/arch/arm/arm-m/thread.c b/arch/arm/arm-m/thread.c
index 1a33b50..91a4a11 100644
--- a/arch/arm/arm-m/thread.c
+++ b/arch/arm/arm-m/thread.c
@@ -121,7 +121,7 @@
 #endif
 
     /* release the thread lock that was implicitly held across the reschedule */
-    spin_unlock(&thread_lock);
+    thread_unlock_ints_disabled();
     arch_enable_ints();
 
     ret = _current_thread->entry(_current_thread->arg);
diff --git a/arch/arm/arm/thread.c b/arch/arm/arm/thread.c
index 6ad833b..a14528c 100644
--- a/arch/arm/arm/thread.c
+++ b/arch/arm/arm/thread.c
@@ -53,7 +53,7 @@
 //  dump_thread(current_thread);
 
     /* release the thread lock that was implicitly held across the reschedule */
-    spin_unlock(&thread_lock);
+    thread_unlock_ints_disabled();
     arch_enable_ints();
 
     thread_t *ct = get_current_thread();
diff --git a/arch/arm64/thread.c b/arch/arm64/thread.c
index 3736ebb..cc3e8bd 100644
--- a/arch/arm64/thread.c
+++ b/arch/arm64/thread.c
@@ -61,7 +61,7 @@
     LTRACEF("initial_thread_func: thread %p calling %p with arg %p\n", current_thread, current_thread->entry, current_thread->arg);
 
     /* release the thread lock that was implicitly held across the reschedule */
-    spin_unlock(&thread_lock);
+    thread_unlock_ints_disabled();
     arch_enable_ints();
 
     ret = current_thread->entry(current_thread->arg);
diff --git a/arch/x86/thread.c b/arch/x86/thread.c
index da98fef..aac0db6 100644
--- a/arch/x86/thread.c
+++ b/arch/x86/thread.c
@@ -39,7 +39,7 @@
     thread_t *current_thread = get_current_thread();
 
     /* release the thread lock that was implicitly held across the reschedule */
-    spin_unlock(&thread_lock);
+    thread_unlock_ints_disabled();
     arch_enable_ints();
 
     ret = current_thread->entry(current_thread->arg);
diff --git a/include/kernel/thread.h b/include/kernel/thread.h
index 985e3b6..3f5a5ed 100644
--- a/include/kernel/thread.h
+++ b/include/kernel/thread.h
@@ -23,6 +23,7 @@
 #ifndef __KERNEL_THREAD_H
 #define __KERNEL_THREAD_H
 
+#include <assert.h>
 #include <sys/types.h>
 #include <list.h>
 #include <compiler.h>
@@ -203,6 +204,15 @@
 #define THREAD_LOCK(state) spin_lock_saved_state_t state; spin_lock_irqsave(&thread_lock, state)
 #define THREAD_UNLOCK(state) spin_unlock_irqrestore(&thread_lock, state)
 
+static inline void thread_lock_ints_disabled(void) {
+    DEBUG_ASSERT(arch_ints_disabled());
+    spin_lock(&thread_lock);
+}
+
+static inline void thread_unlock_ints_disabled(void) {
+    spin_unlock(&thread_lock);
+}
+
 static inline bool thread_lock_held(void)
 {
     return spin_lock_held(&thread_lock);
diff --git a/kernel/thread.c b/kernel/thread.c
index df3bf99..256d83b 100644
--- a/kernel/thread.c
+++ b/kernel/thread.c
@@ -104,7 +104,7 @@
     DEBUG_ASSERT(t->state == THREAD_READY);
     DEBUG_ASSERT(!list_in_list(&t->queue_node));
     DEBUG_ASSERT(arch_ints_disabled());
-    DEBUG_ASSERT(spin_lock_held(&thread_lock));
+    DEBUG_ASSERT(thread_lock_held());
 
     list_add_head(&run_queue[t->priority], &t->queue_node);
     run_queue_bitmap |= (1U<<t->priority);
@@ -116,7 +116,7 @@
     DEBUG_ASSERT(t->state == THREAD_READY);
     DEBUG_ASSERT(!list_in_list(&t->queue_node));
     DEBUG_ASSERT(arch_ints_disabled());
-    DEBUG_ASSERT(spin_lock_held(&thread_lock));
+    DEBUG_ASSERT(thread_lock_held());
 
     list_add_tail(&run_queue[t->priority], &t->queue_node);
     run_queue_bitmap |= (1<<t->priority);
@@ -554,7 +554,7 @@
     thread_t *t = get_top_thread(-1, false);
 
     DEBUG_ASSERT(arch_ints_disabled());
-    DEBUG_ASSERT(spin_lock_held(&thread_lock));
+    DEBUG_ASSERT(thread_lock_held());
 
     for (i = 0; i < SMP_MAX_CPUS; i++) {
         if (!mp_is_cpu_active(i))
@@ -599,7 +599,7 @@
     uint cpu = arch_curr_cpu_num();
 
     DEBUG_ASSERT(arch_ints_disabled());
-    DEBUG_ASSERT(spin_lock_held(&thread_lock));
+    DEBUG_ASSERT(thread_lock_held());
     DEBUG_ASSERT(current_thread->state != THREAD_RUNNING);
 
     THREAD_STATS_INC(reschedules);
@@ -825,7 +825,7 @@
 
     DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
     DEBUG_ASSERT(current_thread->state == THREAD_BLOCKED);
-    DEBUG_ASSERT(spin_lock_held(&thread_lock));
+    DEBUG_ASSERT(thread_lock_held());
     DEBUG_ASSERT(!thread_is_idle(current_thread));
 
     /* we are blocking on something. the blocking code should have already stuck us on a queue */
@@ -836,7 +836,7 @@
 {
     DEBUG_ASSERT(t->magic == THREAD_MAGIC);
     DEBUG_ASSERT(t->state == THREAD_BLOCKED);
-    DEBUG_ASSERT(spin_lock_held(&thread_lock));
+    DEBUG_ASSERT(thread_lock_held());
     DEBUG_ASSERT(!thread_is_idle(t));
 
     t->state = THREAD_READY;
@@ -1223,14 +1223,14 @@
 
     DEBUG_ASSERT(thread->magic == THREAD_MAGIC);
 
-    spin_lock(&thread_lock);
+    thread_lock_ints_disabled();
 
     enum handler_return ret = INT_NO_RESCHEDULE;
     if (thread_unblock_from_wait_queue(thread, ERR_TIMED_OUT) >= NO_ERROR) {
         ret = INT_RESCHEDULE;
     }
 
-    spin_unlock(&thread_lock);
+    thread_unlock_ints_disabled();
 
     return ret;
 }
@@ -1262,7 +1262,7 @@
     DEBUG_ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
     DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
     DEBUG_ASSERT(arch_ints_disabled());
-    DEBUG_ASSERT(spin_lock_held(&thread_lock));
+    DEBUG_ASSERT(thread_lock_held());
 
     if (timeout == 0)
         return ERR_TIMED_OUT;
@@ -1289,11 +1289,11 @@
          * The timer could be running on another CPU. Drop the thread-lock then
          * cancel and wait for the stack allocated timer.
          */
-        spin_unlock(&thread_lock);
+        thread_unlock_ints_disabled();
         arch_enable_ints();
         timer_cancel_sync(&timer);
         arch_disable_ints();
-        spin_lock(&thread_lock);
+        thread_lock_ints_disabled();
     }
 
     return current_thread->wait_queue_block_ret;
@@ -1322,7 +1322,7 @@
 
     DEBUG_ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
     DEBUG_ASSERT(arch_ints_disabled());
-    DEBUG_ASSERT(spin_lock_held(&thread_lock));
+    DEBUG_ASSERT(thread_lock_held());
 
     t = list_remove_head_type(&wait->list, thread_t, queue_node);
     if (t) {
@@ -1377,7 +1377,7 @@
 
     DEBUG_ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
     DEBUG_ASSERT(arch_ints_disabled());
-    DEBUG_ASSERT(spin_lock_held(&thread_lock));
+    DEBUG_ASSERT(thread_lock_held());
 
     if (reschedule && wait->count > 0) {
         /* if we're instructed to reschedule, stick the current thread on the head
@@ -1422,7 +1422,7 @@
 {
     DEBUG_ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
     DEBUG_ASSERT(arch_ints_disabled());
-    DEBUG_ASSERT(spin_lock_held(&thread_lock));
+    DEBUG_ASSERT(thread_lock_held());
 
     wait_queue_wake_all(wait, reschedule, ERR_OBJECT_DESTROYED);
     wait->magic = 0;
@@ -1444,7 +1444,7 @@
 {
     DEBUG_ASSERT(t->magic == THREAD_MAGIC);
     DEBUG_ASSERT(arch_ints_disabled());
-    DEBUG_ASSERT(spin_lock_held(&thread_lock));
+    DEBUG_ASSERT(thread_lock_held());
 
     if (t->state != THREAD_BLOCKED)
         return ERR_NOT_BLOCKED;