Move all helper functions to a class called Atomic.
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
index b0420a2..d7fd73f 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
@@ -343,7 +343,7 @@
struct ServerLoadReportingFilterStaticRegistrar {
ServerLoadReportingFilterStaticRegistrar() {
static grpc_core::Atomic<bool> registered{false};
- if (registered) return;
+ if (registered.Load(grpc_core::MemoryOrder::ACQUIRE)) return;
RegisterChannelFilter<ServerLoadReportingChannelData,
ServerLoadReportingCallData>(
"server_load_reporting", GRPC_SERVER_CHANNEL, INT_MAX,
@@ -356,7 +356,7 @@
::grpc::load_reporter::MeasureEndBytesReceived();
::grpc::load_reporter::MeasureEndLatencyMs();
::grpc::load_reporter::MeasureOtherCallMetric();
- registered = true;
+ registered.Store(true, grpc_core::MemoryOrder::RELEASE);
}
} server_load_reporting_filter_static_registrar;
diff --git a/src/core/lib/gprpp/atomic.h b/src/core/lib/gprpp/atomic.h
index e7c10f6..622df1b 100644
--- a/src/core/lib/gprpp/atomic.h
+++ b/src/core/lib/gprpp/atomic.h
@@ -25,9 +25,6 @@
namespace grpc_core {
-template <typename T>
-using Atomic = std::atomic<T>;
-
enum class MemoryOrder {
RELAXED = std::memory_order_relaxed,
CONSUME = std::memory_order_consume,
@@ -37,66 +34,64 @@
SEQ_CST = std::memory_order_seq_cst
};
-// Prefer the helper methods below over the same functions provided by
-// std::atomic, because they maintain stats over atomic opertions which are
-// useful for comparing benchmarks.
-
template <typename T>
-T AtomicLoad(const Atomic<T>* storage, MemoryOrder order) {
- return storage->load(static_cast<std::memory_order>(order));
-}
+class Atomic {
+ public:
+ explicit Atomic(T val = T()) : storage_(val) {}
-template <typename T>
-T AtomicStore(Atomic<T>* storage, T val, MemoryOrder order) {
- return storage->store(val, static_cast<std::memory_order>(order));
-}
-template <typename T>
-bool AtomicCompareExchangeWeak(Atomic<T>* storage, T* expected, T desired,
- MemoryOrder success, MemoryOrder failure) {
- return GPR_ATM_INC_CAS_THEN(
- storage->compare_exchange_weak(*expected, desired, success, failure));
-}
+ T Load(MemoryOrder order) const {
+ return storage_.load(static_cast<std::memory_order>(order));
+ }
-template <typename T>
-bool AtomicCompareExchangeStrong(Atomic<T>* storage, T* expected, T desired,
- MemoryOrder success, MemoryOrder failure) {
- return GPR_ATM_INC_CAS_THEN(storage->compare_exchange_weak(
- *expected, desired, static_cast<std::memory_order>(success),
- static_cast<std::memory_order>(failure)));
-}
+ void Store(T val, MemoryOrder order) {
+ storage_.store(val, static_cast<std::memory_order>(order));
+ }
-template <typename T, typename Arg>
-T AtomicFetchAdd(Atomic<T>* storage, Arg arg,
- MemoryOrder order = MemoryOrder::SEQ_CST) {
- return GPR_ATM_INC_ADD_THEN(storage->fetch_add(
- static_cast<Arg>(arg), static_cast<std::memory_order>(order)));
-}
+ bool CompareExchangeWeak(T* expected, T desired, MemoryOrder success,
+ MemoryOrder failure) {
+ return GPR_ATM_INC_CAS_THEN(
+ storage_.compare_exchange_weak(*expected, desired, success, failure));
+ }
-template <typename T, typename Arg>
-T AtomicFetchSub(Atomic<T>* storage, Arg arg,
- MemoryOrder order = MemoryOrder::SEQ_CST) {
- return GPR_ATM_INC_ADD_THEN(storage->fetch_sub(
- static_cast<Arg>(arg), static_cast<std::memory_order>(order)));
-}
+ bool CompareExchangeStrong(T* expected, T desired, MemoryOrder success,
+ MemoryOrder failure) {
+ return GPR_ATM_INC_CAS_THEN(storage_.compare_exchange_weak(
+ *expected, desired, static_cast<std::memory_order>(success),
+ static_cast<std::memory_order>(failure)));
+ }
-// Atomically increment a counter only if the counter value is not zero.
-// Returns true if increment took place; false if counter is zero.
-template <class T>
-bool AtomicIncrementIfNonzero(Atomic<T>* counter,
- MemoryOrder load_order = MemoryOrder::ACQ_REL) {
- T count = counter->load(static_cast<std::memory_order>(load_order));
- do {
- // If zero, we are done (without an increment). If not, we must do a CAS to
- // maintain the contract: do not increment the counter if it is already zero
- if (count == 0) {
- return false;
- }
- } while (!AtomicCompareExchangeWeak(
- counter, &count, count + 1,
- static_cast<std::memory_order>(MemoryOrder::ACQ_REL),
- static_cast<std::memory_order>(load_order)));
- return true;
-}
+ template <typename Arg>
+ T FetchAdd(Arg arg, MemoryOrder order = MemoryOrder::SEQ_CST) {
+ return GPR_ATM_INC_ADD_THEN(storage_.fetch_add(
+ static_cast<Arg>(arg), static_cast<std::memory_order>(order)));
+ }
+
+ template <typename Arg>
+ T FetchSub(Arg arg, MemoryOrder order = MemoryOrder::SEQ_CST) {
+ return GPR_ATM_INC_ADD_THEN(storage_.fetch_sub(
+ static_cast<Arg>(arg), static_cast<std::memory_order>(order)));
+ }
+
+ // Atomically increment a counter only if the counter value is not zero.
+ // Returns true if increment took place; false if counter is zero.
+ bool IncrementIfNonzero(MemoryOrder load_order = MemoryOrder::ACQ_REL) {
+ T count = storage_.load(static_cast<std::memory_order>(load_order));
+ do {
+ // If zero, we are done (without an increment). If not, we must do a CAS
+ // to maintain the contract: do not increment the counter if it is already
+ // zero
+ if (count == 0) {
+ return false;
+ }
+ } while (!storage_.AtomicCompareExchangeWeak(
+ &count, count + 1, static_cast<std::memory_order>(MemoryOrder::ACQ_REL),
+ static_cast<std::memory_order>(load_order)));
+ return true;
+ }
+
+ private:
+ std::atomic<T> storage_;
+};
} // namespace grpc_core
diff --git a/src/core/lib/gprpp/ref_counted.h b/src/core/lib/gprpp/ref_counted.h
index 8148cfd..98a7ede 100644
--- a/src/core/lib/gprpp/ref_counted.h
+++ b/src/core/lib/gprpp/ref_counted.h
@@ -89,7 +89,7 @@
}
// Increases the ref-count by `n`.
- void Ref(Value n = 1) { AtomicFetchAdd(&value_, n, MemoryOrder::RELAXED); }
+ void Ref(Value n = 1) { value_.FetchAdd(n, MemoryOrder::RELAXED); }
void Ref(const DebugLocation& location, const char* reason, Value n = 1) {
#ifndef NDEBUG
if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
@@ -105,7 +105,7 @@
// Similar to Ref() with an assert on the ref-count being non-zero.
void RefNonZero() {
#ifndef NDEBUG
- const Value prior = AtomicFetchAdd(&value_, 1, MemoryOrder::RELAXED);
+ const Value prior = value_.FetchAdd(1, MemoryOrder::RELAXED);
assert(prior > 0);
#else
Ref();
@@ -125,7 +125,7 @@
// Decrements the ref-count and returns true if the ref-count reaches 0.
bool Unref() {
- const Value prior = AtomicFetchSub(&value_, 1, MemoryOrder::ACQ_REL);
+ const Value prior = value_.FetchSub(1, MemoryOrder::ACQ_REL);
GPR_DEBUG_ASSERT(prior > 0);
return prior == 1;
}
@@ -142,7 +142,7 @@
}
private:
- Value get() const { return AtomicLoad(&value_, MemoryOrder::RELAXED); }
+ Value get() const { return value_.Load(MemoryOrder::RELAXED); }
#ifndef NDEBUG
TraceFlag* trace_flag_;
diff --git a/src/core/lib/surface/lame_client.cc b/src/core/lib/surface/lame_client.cc
index c2ee9d9..5f5f10d 100644
--- a/src/core/lib/surface/lame_client.cc
+++ b/src/core/lib/surface/lame_client.cc
@@ -53,9 +53,8 @@
static void fill_metadata(grpc_call_element* elem, grpc_metadata_batch* mdb) {
CallData* calld = static_cast<CallData*>(elem->call_data);
bool expected = false;
- if (!AtomicCompareExchangeStrong(&calld->filled_metadata, &expected, true,
- MemoryOrder::RELAXED,
- MemoryOrder::RELAXED)) {
+ if (!calld->filled_metadata.CompareExchangeStrong(
+ &expected, true, MemoryOrder::RELAXED, MemoryOrder::RELAXED)) {
return;
}
ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);