Fix performance issue incurred by removing monitor thread.

When the monitor thread is used, most threads in the team directly go to
sleep if the copy of bt_intervals/bt_set is not available in the cache,
and this happens at least once per thread in the wait function, making the
overall performance slightly better.
This change tries to mimic this behavior by using the bt_intervals cache,
which simply keeps the blocktime interval in terms of the platform-dependent
ticks or nanoseconds.

Patch by Hansang Bae

Differential Revision: https://reviews.llvm.org/D28906


git-svn-id: https://llvm.org/svn/llvm-project/openmp/trunk@293312 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/runtime/src/kmp.h b/runtime/src/kmp.h
index 07fc864..5859c51 100644
--- a/runtime/src/kmp.h
+++ b/runtime/src/kmp.h
@@ -889,6 +889,20 @@
 #define KMP_INTERVALS_FROM_BLOCKTIME(blocktime, monitor_wakeups)  \
                                  ( ( (blocktime) + (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)) - 1 ) /  \
                                    (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)) )
+#else
+# if KMP_OS_UNIX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
+   // HW TSC is used to reduce overhead (clock tick instead of nanosecond).
+   extern double __kmp_ticks_per_nsec;
+#  define KMP_NOW() __kmp_hardware_timestamp()
+#  define KMP_BLOCKTIME_INTERVAL() (__kmp_dflt_blocktime * KMP_USEC_PER_SEC * __kmp_ticks_per_nsec)
+#  define KMP_BLOCKING(goal, count) ((goal) > KMP_NOW())
+# else
+   // System time is retrieved sporadically while blocking.
+   extern kmp_uint64 __kmp_now_nsec();
+#  define KMP_NOW() __kmp_now_nsec()
+#  define KMP_BLOCKTIME_INTERVAL() (__kmp_dflt_blocktime * KMP_USEC_PER_SEC)
+#  define KMP_BLOCKING(goal, count) ((count) % 1000 != 0 || (goal) > KMP_NOW())
+# endif
 #endif // KMP_USE_MONITOR
 
 #define KMP_MIN_STATSCOLS       40
@@ -2220,8 +2234,10 @@
     /* to exist (from the POV of worker threads).                            */
 #if KMP_USE_MONITOR
     int               th_team_bt_intervals;
-#endif
     int               th_team_bt_set;
+#else
+    kmp_uint64        th_team_bt_intervals;
+#endif
 
 
 #if KMP_AFFINITY_SUPPORTED
diff --git a/runtime/src/kmp_barrier.cpp b/runtime/src/kmp_barrier.cpp
index 5e77614..4106245 100644
--- a/runtime/src/kmp_barrier.cpp
+++ b/runtime/src/kmp_barrier.cpp
@@ -1130,8 +1130,10 @@
         if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
 #if KMP_USE_MONITOR
             this_thr->th.th_team_bt_intervals = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
-#endif
             this_thr->th.th_team_bt_set = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
+#else
+            this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL();
+#endif
         }
 
 #if USE_ITT_BUILD
@@ -1453,8 +1455,10 @@
     if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
 #if KMP_USE_MONITOR
         this_thr->th.th_team_bt_intervals = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
-#endif
         this_thr->th.th_team_bt_set = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
+#else
+        this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL();
+#endif
     }
 
 #if USE_ITT_BUILD
@@ -1644,8 +1648,10 @@
         if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
 #if KMP_USE_MONITOR
             this_thr->th.th_team_bt_intervals = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
-#endif
             this_thr->th.th_team_bt_set = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
+#else
+            this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL();
+#endif
         }
     } // master
 
diff --git a/runtime/src/kmp_wait_release.h b/runtime/src/kmp_wait_release.h
index 22ff8e8..4d17789 100644
--- a/runtime/src/kmp_wait_release.h
+++ b/runtime/src/kmp_wait_release.h
@@ -84,22 +84,6 @@
     */
 };
 
-#if ! KMP_USE_MONITOR
-# if KMP_OS_UNIX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
-   // HW TSC is used to reduce overhead (clock tick instead of nanosecond).
-   extern double __kmp_ticks_per_nsec;
-#  define KMP_NOW() __kmp_hardware_timestamp()
-#  define KMP_BLOCKTIME_INTERVAL() (__kmp_dflt_blocktime * KMP_USEC_PER_SEC * __kmp_ticks_per_nsec)
-#  define KMP_BLOCKING(goal, count) ((goal) > KMP_NOW())
-# else
-   // System time is retrieved sporadically while blocking.
-   extern kmp_uint64 __kmp_now_nsec();
-#  define KMP_NOW() __kmp_now_nsec()
-#  define KMP_BLOCKTIME_INTERVAL() (__kmp_dflt_blocktime * KMP_USEC_PER_SEC)
-#  define KMP_BLOCKING(goal, count) ((count) % 1000 != 0 || (goal) > KMP_NOW())
-# endif
-#endif
-
 /* Spin wait loop that first does pause, then yield, then sleep. A thread that calls __kmp_wait_*
    must make certain that another thread calls __kmp_release to wake it back up to prevent deadlocks!  */
 template <class C>
@@ -187,7 +171,7 @@
                       th_gtid, __kmp_global.g.g_time.dt.t_value, hibernate,
                       hibernate - __kmp_global.g.g_time.dt.t_value));
 #else
-        hibernate_goal = KMP_NOW() + KMP_BLOCKTIME_INTERVAL();
+        hibernate_goal = KMP_NOW() + this_thr->th.th_team_bt_intervals;
         poll_count = 0;
 #endif // KMP_USE_MONITOR
     }