Renamed macros for memory alignment
diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc
index a284e69..873db8e 100644
--- a/src/core/ext/filters/client_channel/subchannel.cc
+++ b/src/core/ext/filters/client_channel/subchannel.cc
@@ -66,12 +66,13 @@
#define GRPC_SUBCHANNEL_RECONNECT_JITTER 0.2
// Conversion between subchannel call and call stack.
-#define SUBCHANNEL_CALL_TO_CALL_STACK(call) \
- (grpc_call_stack*)((char*)(call) + \
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(SubchannelCall)))
-#define CALL_STACK_TO_SUBCHANNEL_CALL(callstack) \
- (SubchannelCall*)(((char*)(call_stack)) - \
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(SubchannelCall)))
+#define SUBCHANNEL_CALL_TO_CALL_STACK(call) \
+ (grpc_call_stack*)((char*)(call) + GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE( \
+ sizeof(SubchannelCall)))
+#define CALL_STACK_TO_SUBCHANNEL_CALL(callstack) \
+ (SubchannelCall*)(((char*)(call_stack)) - \
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE( \
+ sizeof(SubchannelCall)))
namespace grpc_core {
@@ -151,10 +152,10 @@
size_t ConnectedSubchannel::GetInitialCallSizeEstimate(
size_t parent_data_size) const {
size_t allocation_size =
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(SubchannelCall));
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(SubchannelCall));
if (parent_data_size > 0) {
allocation_size +=
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(channel_stack_->call_stack_size) +
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(channel_stack_->call_stack_size) +
parent_data_size;
} else {
allocation_size += channel_stack_->call_stack_size;
@@ -178,8 +179,9 @@
void* SubchannelCall::GetParentData() {
grpc_channel_stack* chanstk = connected_subchannel_->channel_stack();
- return (char*)this + GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(SubchannelCall)) +
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(chanstk->call_stack_size);
+ return (char*)this +
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(SubchannelCall)) +
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(chanstk->call_stack_size);
}
grpc_call_stack* SubchannelCall::GetCallStack() {
diff --git a/src/core/lib/channel/channel_stack.cc b/src/core/lib/channel/channel_stack.cc
index df956c7..7dfabbb 100644
--- a/src/core/lib/channel/channel_stack.cc
+++ b/src/core/lib/channel/channel_stack.cc
@@ -47,9 +47,9 @@
size_t grpc_channel_stack_size(const grpc_channel_filter** filters,
size_t filter_count) {
/* always need the header, and size for the channel elements */
- size_t size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_channel_stack)) +
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(filter_count *
- sizeof(grpc_channel_element));
+ size_t size = GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(grpc_channel_stack)) +
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(
+ filter_count * sizeof(grpc_channel_element));
size_t i;
GPR_ASSERT((GPR_MAX_ALIGNMENT & (GPR_MAX_ALIGNMENT - 1)) == 0 &&
@@ -57,18 +57,18 @@
/* add the size for each filter */
for (i = 0; i < filter_count; i++) {
- size += GPR_ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data);
+ size += GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data);
}
return size;
}
-#define CHANNEL_ELEMS_FROM_STACK(stk) \
- ((grpc_channel_element*)((char*)(stk) + GPR_ROUND_UP_TO_ALIGNMENT_SIZE( \
+#define CHANNEL_ELEMS_FROM_STACK(stk) \
+ ((grpc_channel_element*)((char*)(stk) + GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE( \
sizeof(grpc_channel_stack))))
-#define CALL_ELEMS_FROM_STACK(stk) \
- ((grpc_call_element*)((char*)(stk) + GPR_ROUND_UP_TO_ALIGNMENT_SIZE( \
+#define CALL_ELEMS_FROM_STACK(stk) \
+ ((grpc_call_element*)((char*)(stk) + GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE( \
sizeof(grpc_call_stack))))
grpc_channel_element* grpc_channel_stack_element(
@@ -92,8 +92,9 @@
const grpc_channel_args* channel_args, grpc_transport* optional_transport,
const char* name, grpc_channel_stack* stack) {
size_t call_size =
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)) +
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_call_element));
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(grpc_call_stack)) +
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(filter_count *
+ sizeof(grpc_call_element));
grpc_channel_element* elems;
grpc_channel_element_args args;
char* user_data;
@@ -104,8 +105,8 @@
name);
elems = CHANNEL_ELEMS_FROM_STACK(stack);
user_data = (reinterpret_cast<char*>(elems)) +
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(filter_count *
- sizeof(grpc_channel_element));
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(filter_count *
+ sizeof(grpc_channel_element));
/* init per-filter data */
grpc_error* first_error = GRPC_ERROR_NONE;
@@ -126,8 +127,9 @@
}
}
user_data +=
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data);
- call_size += GPR_ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_call_data);
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data);
+ call_size +=
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(filters[i]->sizeof_call_data);
}
GPR_ASSERT(user_data > (char*)stack);
@@ -162,8 +164,9 @@
GRPC_STREAM_REF_INIT(&elem_args->call_stack->refcount, initial_refs, destroy,
destroy_arg, "CALL_STACK");
call_elems = CALL_ELEMS_FROM_STACK(elem_args->call_stack);
- user_data = (reinterpret_cast<char*>(call_elems)) +
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
+ user_data =
+ (reinterpret_cast<char*>(call_elems)) +
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
/* init per-filter data */
grpc_error* first_error = GRPC_ERROR_NONE;
@@ -171,8 +174,8 @@
call_elems[i].filter = channel_elems[i].filter;
call_elems[i].channel_data = channel_elems[i].channel_data;
call_elems[i].call_data = user_data;
- user_data +=
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
+ user_data += GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(
+ call_elems[i].filter->sizeof_call_data);
}
for (size_t i = 0; i < count; i++) {
grpc_error* error =
@@ -242,11 +245,11 @@
grpc_channel_element* elem) {
return reinterpret_cast<grpc_channel_stack*>(
reinterpret_cast<char*>(elem) -
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_channel_stack)));
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(grpc_channel_stack)));
}
grpc_call_stack* grpc_call_stack_from_top_element(grpc_call_element* elem) {
return reinterpret_cast<grpc_call_stack*>(
reinterpret_cast<char*>(elem) -
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)));
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(grpc_call_stack)));
}
diff --git a/src/core/lib/gpr/alloc.cc b/src/core/lib/gpr/alloc.cc
index b601ad7..0d9f6f7 100644
--- a/src/core/lib/gpr/alloc.cc
+++ b/src/core/lib/gpr/alloc.cc
@@ -56,7 +56,7 @@
static void* platform_malloc_aligned(size_t size, size_t alignment) {
#if defined(GPR_HAS_ALIGNED_ALLOC)
- size = GPR_ROUND_UP_TO_SPECIFIED_SIZE(size, alignment);
+ size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(size, alignment);
void* ret = aligned_alloc(alignment, size);
GPR_ASSERT(ret != nullptr);
return ret;
diff --git a/src/core/lib/gpr/alloc.h b/src/core/lib/gpr/alloc.h
index 2493c87..c77fbea 100644
--- a/src/core/lib/gpr/alloc.h
+++ b/src/core/lib/gpr/alloc.h
@@ -22,15 +22,13 @@
#include <grpc/support/port_platform.h>
/// Given a size, round up to the next multiple of sizeof(void*).
-#define GPR_ROUND_UP_TO_ALIGNMENT_SIZE(x) \
- (((x) + GPR_MAX_ALIGNMENT - 1u) & ~(GPR_MAX_ALIGNMENT - 1u))
+#define GPR_ROUND_UP_TO_ALIGNMENT_SIZE(x, align) \
+ (((x) + (align)-1u) & ~((align)-1u))
+
+#define GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(x) \
+ GPR_ROUND_UP_TO_ALIGNMENT_SIZE((x), GPR_MAX_ALIGNMENT)
#define GPR_ROUND_UP_TO_CACHELINE_SIZE(x) \
- (((x) + GPR_CACHELINE_SIZE - 1u) & ~(GPR_CACHELINE_SIZE - 1u))
-
-#define GPR_ROUND_UP_TO_SPECIFIED_SIZE(x, align) \
- (((x) + align - 1u) & ~(align - 1u))
-
-void* gpr_malloc_cacheline(size_t size);
+ GPR_ROUND_UP_TO_ALIGNMENT_SIZE((x), GPR_CACHELINE_SIZE)
#endif /* GRPC_CORE_LIB_GPR_ALLOC_H */
diff --git a/src/core/lib/gprpp/arena.cc b/src/core/lib/gprpp/arena.cc
index 5c344db..e1c7b29 100644
--- a/src/core/lib/gprpp/arena.cc
+++ b/src/core/lib/gprpp/arena.cc
@@ -67,7 +67,7 @@
Pair<Arena*, void*> Arena::CreateWithAlloc(size_t initial_size,
size_t alloc_size) {
static constexpr size_t base_size =
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(Arena));
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(Arena));
auto* new_arena =
new (ArenaStorage(initial_size)) Arena(initial_size, alloc_size);
void* first_alloc = reinterpret_cast<char*>(new_arena) + base_size;
@@ -88,7 +88,7 @@
// sizing hysteresis (that is, most calls should have a large enough initial
// zone and will not need to grow the arena).
static constexpr size_t zone_base_size =
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(Zone));
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(Zone));
size_t alloc_size = zone_base_size + size;
Zone* z = new (gpr_malloc_aligned(alloc_size, GPR_MAX_ALIGNMENT)) Zone();
{
diff --git a/src/core/lib/gprpp/arena.h b/src/core/lib/gprpp/arena.h
index 915cd5c..6c646c5 100644
--- a/src/core/lib/gprpp/arena.h
+++ b/src/core/lib/gprpp/arena.h
@@ -58,8 +58,8 @@
// Allocate \a size bytes from the arena.
void* Alloc(size_t size) {
static constexpr size_t base_size =
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(Arena));
- size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(size);
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(Arena));
+ size = GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(size);
size_t begin = total_used_.FetchAdd(size, MemoryOrder::RELAXED);
if (GPR_LIKELY(begin + size <= initial_zone_size_)) {
return reinterpret_cast<char*>(this) + base_size + begin;
diff --git a/src/core/lib/surface/call.cc b/src/core/lib/surface/call.cc
index bd14002..0a26872 100644
--- a/src/core/lib/surface/call.cc
+++ b/src/core/lib/surface/call.cc
@@ -260,10 +260,10 @@
#define CALL_STACK_FROM_CALL(call) \
(grpc_call_stack*)((char*)(call) + \
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call)))
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(grpc_call)))
#define CALL_FROM_CALL_STACK(call_stack) \
(grpc_call*)(((char*)(call_stack)) - \
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call)))
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(grpc_call)))
#define CALL_ELEM_FROM_CALL(call, idx) \
grpc_call_stack_element(CALL_STACK_FROM_CALL(call), idx)
@@ -329,7 +329,7 @@
size_t initial_size = grpc_channel_get_call_size_estimate(args->channel);
GRPC_STATS_INC_CALL_INITIAL_SIZE(initial_size);
size_t call_and_stack_size =
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call)) +
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(grpc_call)) +
channel_stack->call_stack_size;
size_t call_alloc_size =
call_and_stack_size + (args->parent ? sizeof(child_call) : 0);
diff --git a/src/core/lib/transport/transport.cc b/src/core/lib/transport/transport.cc
index 29c1e56..4087065 100644
--- a/src/core/lib/transport/transport.cc
+++ b/src/core/lib/transport/transport.cc
@@ -115,7 +115,7 @@
}
size_t grpc_transport_stream_size(grpc_transport* transport) {
- return GPR_ROUND_UP_TO_ALIGNMENT_SIZE(transport->vtable->sizeof_stream);
+ return GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(transport->vtable->sizeof_stream);
}
void grpc_transport_destroy(grpc_transport* transport) {
diff --git a/test/core/util/memory_counters.cc b/test/core/util/memory_counters.cc
index 11107f6..60d22b1 100644
--- a/test/core/util/memory_counters.cc
+++ b/test/core/util/memory_counters.cc
@@ -54,9 +54,10 @@
NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_absolute, (gpr_atm)1);
NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_relative, (gpr_atm)1);
void* ptr = g_old_allocs.malloc_fn(
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size)) + size);
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(size)) + size);
*static_cast<size_t*>(ptr) = size;
- return static_cast<char*>(ptr) + GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size));
+ return static_cast<char*>(ptr) +
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(size));
}
static void* guard_realloc(void* vptr, size_t size) {
@@ -67,23 +68,24 @@
guard_free(vptr);
return nullptr;
}
- void* ptr =
- static_cast<char*>(vptr) - GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size));
+ void* ptr = static_cast<char*>(vptr) -
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(size));
NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_absolute, (gpr_atm)size);
NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_relative,
-*static_cast<gpr_atm*>(ptr));
NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_relative, (gpr_atm)size);
NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_absolute, (gpr_atm)1);
ptr = g_old_allocs.realloc_fn(
- ptr, GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size)) + size);
+ ptr, GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(size)) + size);
*static_cast<size_t*>(ptr) = size;
- return static_cast<char*>(ptr) + GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size));
+ return static_cast<char*>(ptr) +
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(size));
}
static void guard_free(void* vptr) {
if (vptr == nullptr) return;
- void* ptr =
- static_cast<char*>(vptr) - GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size_t));
+ void* ptr = static_cast<char*>(vptr) -
+ GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(size_t));
NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_relative,
-*static_cast<gpr_atm*>(ptr));
NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_relative, -(gpr_atm)1);
@@ -93,7 +95,7 @@
// NB: We do not specify guard_malloc_aligned/guard_free_aligned methods. Since
// they are null, calls to gpr_malloc_aligned/gpr_free_aligned are executed as a
// wrapper over gpr_malloc/gpr_free, which do use guard_malloc/guard_free, and
-// thus there allocations are tracked as well.
+// thus their allocations are tracked as well.
struct gpr_allocation_functions g_guard_allocs = {
guard_malloc, nullptr, guard_realloc, guard_free, nullptr, nullptr};